1c6b6a421SHawking Zhang /* 2c6b6a421SHawking Zhang * Copyright 2019 Advanced Micro Devices, Inc. 3c6b6a421SHawking Zhang * 4c6b6a421SHawking Zhang * Permission is hereby granted, free of charge, to any person obtaining a 5c6b6a421SHawking Zhang * copy of this software and associated documentation files (the "Software"), 6c6b6a421SHawking Zhang * to deal in the Software without restriction, including without limitation 7c6b6a421SHawking Zhang * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c6b6a421SHawking Zhang * and/or sell copies of the Software, and to permit persons to whom the 9c6b6a421SHawking Zhang * Software is furnished to do so, subject to the following conditions: 10c6b6a421SHawking Zhang * 11c6b6a421SHawking Zhang * The above copyright notice and this permission notice shall be included in 12c6b6a421SHawking Zhang * all copies or substantial portions of the Software. 13c6b6a421SHawking Zhang * 14c6b6a421SHawking Zhang * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c6b6a421SHawking Zhang * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c6b6a421SHawking Zhang * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c6b6a421SHawking Zhang * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c6b6a421SHawking Zhang * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c6b6a421SHawking Zhang * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c6b6a421SHawking Zhang * OTHER DEALINGS IN THE SOFTWARE. 21c6b6a421SHawking Zhang * 22c6b6a421SHawking Zhang */ 23c6b6a421SHawking Zhang #include <linux/firmware.h> 24c6b6a421SHawking Zhang #include <linux/slab.h> 25c6b6a421SHawking Zhang #include <linux/module.h> 26e9eea902SAlex Deucher #include <linux/pci.h> 27e9eea902SAlex Deucher 28c6b6a421SHawking Zhang #include "amdgpu.h" 29c6b6a421SHawking Zhang #include "amdgpu_atombios.h" 30c6b6a421SHawking Zhang #include "amdgpu_ih.h" 31c6b6a421SHawking Zhang #include "amdgpu_uvd.h" 32c6b6a421SHawking Zhang #include "amdgpu_vce.h" 33c6b6a421SHawking Zhang #include "amdgpu_ucode.h" 34c6b6a421SHawking Zhang #include "amdgpu_psp.h" 35767acabdSKevin Wang #include "amdgpu_smu.h" 36c6b6a421SHawking Zhang #include "atom.h" 37c6b6a421SHawking Zhang #include "amd_pcie.h" 38c6b6a421SHawking Zhang 39c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h" 40c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h" 41c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h" 42c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h" 43c6b6a421SHawking Zhang 44c6b6a421SHawking Zhang #include "soc15.h" 45c6b6a421SHawking Zhang #include "soc15_common.h" 46c6b6a421SHawking Zhang #include "gmc_v10_0.h" 47c6b6a421SHawking Zhang #include "gfxhub_v2_0.h" 48c6b6a421SHawking Zhang #include "mmhub_v2_0.h" 49bebc0762SHawking Zhang #include "nbio_v2_3.h" 50c6b6a421SHawking Zhang #include "nv.h" 51c6b6a421SHawking Zhang #include "navi10_ih.h" 52c6b6a421SHawking Zhang #include "gfx_v10_0.h" 53c6b6a421SHawking Zhang #include "sdma_v5_0.h" 54c6b6a421SHawking Zhang #include "vcn_v2_0.h" 55c6b6a421SHawking Zhang #include "dce_virtual.h" 56c6b6a421SHawking Zhang #include "mes_v10_1.h" 57b05b6903SJiange Zhao #include "mxgpu_nv.h" 58c6b6a421SHawking Zhang 59c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs; 60c6b6a421SHawking Zhang 61c6b6a421SHawking Zhang /* 62c6b6a421SHawking Zhang * Indirect registers accessor 63c6b6a421SHawking Zhang */ 64c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 65c6b6a421SHawking Zhang { 66c6b6a421SHawking Zhang unsigned long flags, address, data; 67c6b6a421SHawking Zhang u32 r; 68bebc0762SHawking Zhang address = adev->nbio.funcs->get_pcie_index_offset(adev); 69bebc0762SHawking Zhang data = adev->nbio.funcs->get_pcie_data_offset(adev); 70c6b6a421SHawking Zhang 71c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 72c6b6a421SHawking Zhang WREG32(address, reg); 73c6b6a421SHawking Zhang (void)RREG32(address); 74c6b6a421SHawking Zhang r = RREG32(data); 75c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 76c6b6a421SHawking Zhang return r; 77c6b6a421SHawking Zhang } 78c6b6a421SHawking Zhang 79c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 80c6b6a421SHawking Zhang { 81c6b6a421SHawking Zhang unsigned long flags, address, data; 82c6b6a421SHawking Zhang 83bebc0762SHawking Zhang address = adev->nbio.funcs->get_pcie_index_offset(adev); 84bebc0762SHawking Zhang data = adev->nbio.funcs->get_pcie_data_offset(adev); 85c6b6a421SHawking Zhang 86c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 87c6b6a421SHawking Zhang WREG32(address, reg); 88c6b6a421SHawking Zhang (void)RREG32(address); 89c6b6a421SHawking Zhang WREG32(data, v); 90c6b6a421SHawking Zhang (void)RREG32(data); 91c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 92c6b6a421SHawking Zhang } 93c6b6a421SHawking Zhang 94c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 95c6b6a421SHawking Zhang { 96c6b6a421SHawking Zhang unsigned long flags, address, data; 97c6b6a421SHawking Zhang u32 r; 98c6b6a421SHawking Zhang 99c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 100c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 101c6b6a421SHawking Zhang 102c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 103c6b6a421SHawking Zhang WREG32(address, (reg)); 104c6b6a421SHawking Zhang r = RREG32(data); 105c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 106c6b6a421SHawking Zhang return r; 107c6b6a421SHawking Zhang } 108c6b6a421SHawking Zhang 109c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 110c6b6a421SHawking Zhang { 111c6b6a421SHawking Zhang unsigned long flags, address, data; 112c6b6a421SHawking Zhang 113c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 114c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 115c6b6a421SHawking Zhang 116c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 117c6b6a421SHawking Zhang WREG32(address, (reg)); 118c6b6a421SHawking Zhang WREG32(data, (v)); 119c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 120c6b6a421SHawking Zhang } 121c6b6a421SHawking Zhang 122c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev) 123c6b6a421SHawking Zhang { 124bebc0762SHawking Zhang return adev->nbio.funcs->get_memsize(adev); 125c6b6a421SHawking Zhang } 126c6b6a421SHawking Zhang 127c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev) 128c6b6a421SHawking Zhang { 129462a70d8STao Zhou return adev->clock.spll.reference_freq; 130c6b6a421SHawking Zhang } 131c6b6a421SHawking Zhang 132c6b6a421SHawking Zhang 133c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev, 134c6b6a421SHawking Zhang u32 me, u32 pipe, u32 queue, u32 vmid) 135c6b6a421SHawking Zhang { 136c6b6a421SHawking Zhang u32 grbm_gfx_cntl = 0; 137c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 138c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 139c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 140c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 141c6b6a421SHawking Zhang 142c6b6a421SHawking Zhang WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 143c6b6a421SHawking Zhang } 144c6b6a421SHawking Zhang 145c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 146c6b6a421SHawking Zhang { 147c6b6a421SHawking Zhang /* todo */ 148c6b6a421SHawking Zhang } 149c6b6a421SHawking Zhang 150c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev) 151c6b6a421SHawking Zhang { 152c6b6a421SHawking Zhang /* todo */ 153c6b6a421SHawking Zhang return false; 154c6b6a421SHawking Zhang } 155c6b6a421SHawking Zhang 156c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 157c6b6a421SHawking Zhang u8 *bios, u32 length_bytes) 158c6b6a421SHawking Zhang { 159c6b6a421SHawking Zhang /* TODO: will implement it when SMU header is available */ 160c6b6a421SHawking Zhang return false; 161c6b6a421SHawking Zhang } 162c6b6a421SHawking Zhang 163c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 164c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 165c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 166c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 167c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 168c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 169c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 170c6b6a421SHawking Zhang #if 0 /* TODO: will set it when SDMA header is available */ 171c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 172c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 173c6b6a421SHawking Zhang #endif 174c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 175c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 176c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 177c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 178c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 179c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 180c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 181664fe85aSMarek Olšák { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 182c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 183c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 184c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 185c6b6a421SHawking Zhang }; 186c6b6a421SHawking Zhang 187c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 188c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 189c6b6a421SHawking Zhang { 190c6b6a421SHawking Zhang uint32_t val; 191c6b6a421SHawking Zhang 192c6b6a421SHawking Zhang mutex_lock(&adev->grbm_idx_mutex); 193c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 194c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 195c6b6a421SHawking Zhang 196c6b6a421SHawking Zhang val = RREG32(reg_offset); 197c6b6a421SHawking Zhang 198c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 199c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 200c6b6a421SHawking Zhang mutex_unlock(&adev->grbm_idx_mutex); 201c6b6a421SHawking Zhang return val; 202c6b6a421SHawking Zhang } 203c6b6a421SHawking Zhang 204c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev, 205c6b6a421SHawking Zhang bool indexed, u32 se_num, 206c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 207c6b6a421SHawking Zhang { 208c6b6a421SHawking Zhang if (indexed) { 209c6b6a421SHawking Zhang return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 210c6b6a421SHawking Zhang } else { 211c6b6a421SHawking Zhang if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 212c6b6a421SHawking Zhang return adev->gfx.config.gb_addr_config; 213c6b6a421SHawking Zhang return RREG32(reg_offset); 214c6b6a421SHawking Zhang } 215c6b6a421SHawking Zhang } 216c6b6a421SHawking Zhang 217c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 218c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset, u32 *value) 219c6b6a421SHawking Zhang { 220c6b6a421SHawking Zhang uint32_t i; 221c6b6a421SHawking Zhang struct soc15_allowed_register_entry *en; 222c6b6a421SHawking Zhang 223c6b6a421SHawking Zhang *value = 0; 224c6b6a421SHawking Zhang for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 225c6b6a421SHawking Zhang en = &nv_allowed_read_registers[i]; 226c6b6a421SHawking Zhang if (reg_offset != 227c6b6a421SHawking Zhang (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 228c6b6a421SHawking Zhang continue; 229c6b6a421SHawking Zhang 230c6b6a421SHawking Zhang *value = nv_get_register_value(adev, 231c6b6a421SHawking Zhang nv_allowed_read_registers[i].grbm_indexed, 232c6b6a421SHawking Zhang se_num, sh_num, reg_offset); 233c6b6a421SHawking Zhang return 0; 234c6b6a421SHawking Zhang } 235c6b6a421SHawking Zhang return -EINVAL; 236c6b6a421SHawking Zhang } 237c6b6a421SHawking Zhang 238c6b6a421SHawking Zhang #if 0 239c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 240c6b6a421SHawking Zhang { 241c6b6a421SHawking Zhang u32 i; 242c6b6a421SHawking Zhang 243c6b6a421SHawking Zhang dev_info(adev->dev, "GPU pci config reset\n"); 244c6b6a421SHawking Zhang 245c6b6a421SHawking Zhang /* disable BM */ 246c6b6a421SHawking Zhang pci_clear_master(adev->pdev); 247c6b6a421SHawking Zhang /* reset */ 248c6b6a421SHawking Zhang amdgpu_pci_config_reset(adev); 249c6b6a421SHawking Zhang 250c6b6a421SHawking Zhang udelay(100); 251c6b6a421SHawking Zhang 252c6b6a421SHawking Zhang /* wait for asic to come out of reset */ 253c6b6a421SHawking Zhang for (i = 0; i < adev->usec_timeout; i++) { 254c6b6a421SHawking Zhang u32 memsize = nbio_v2_3_get_memsize(adev); 255c6b6a421SHawking Zhang if (memsize != 0xffffffff) 256c6b6a421SHawking Zhang break; 257c6b6a421SHawking Zhang udelay(1); 258c6b6a421SHawking Zhang } 259c6b6a421SHawking Zhang 260c6b6a421SHawking Zhang } 261c6b6a421SHawking Zhang #endif 262c6b6a421SHawking Zhang 2633e2bb60aSKevin Wang static int nv_asic_mode1_reset(struct amdgpu_device *adev) 2643e2bb60aSKevin Wang { 2653e2bb60aSKevin Wang u32 i; 2663e2bb60aSKevin Wang int ret = 0; 2673e2bb60aSKevin Wang 2683e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 2693e2bb60aSKevin Wang 2703e2bb60aSKevin Wang dev_info(adev->dev, "GPU mode1 reset\n"); 2713e2bb60aSKevin Wang 2723e2bb60aSKevin Wang /* disable BM */ 2733e2bb60aSKevin Wang pci_clear_master(adev->pdev); 2743e2bb60aSKevin Wang 2753e2bb60aSKevin Wang pci_save_state(adev->pdev); 2763e2bb60aSKevin Wang 2773e2bb60aSKevin Wang ret = psp_gpu_reset(adev); 2783e2bb60aSKevin Wang if (ret) 2793e2bb60aSKevin Wang dev_err(adev->dev, "GPU mode1 reset failed\n"); 2803e2bb60aSKevin Wang 2813e2bb60aSKevin Wang pci_restore_state(adev->pdev); 2823e2bb60aSKevin Wang 2833e2bb60aSKevin Wang /* wait for asic to come out of reset */ 2843e2bb60aSKevin Wang for (i = 0; i < adev->usec_timeout; i++) { 285bebc0762SHawking Zhang u32 memsize = adev->nbio.funcs->get_memsize(adev); 2863e2bb60aSKevin Wang 2873e2bb60aSKevin Wang if (memsize != 0xffffffff) 2883e2bb60aSKevin Wang break; 2893e2bb60aSKevin Wang udelay(1); 2903e2bb60aSKevin Wang } 2913e2bb60aSKevin Wang 2923e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 2933e2bb60aSKevin Wang 2943e2bb60aSKevin Wang return ret; 2953e2bb60aSKevin Wang } 2962ddc6c3eSAlex Deucher 2972ddc6c3eSAlex Deucher static enum amd_reset_method 2982ddc6c3eSAlex Deucher nv_asic_reset_method(struct amdgpu_device *adev) 2992ddc6c3eSAlex Deucher { 3002ddc6c3eSAlex Deucher struct smu_context *smu = &adev->smu; 3012ddc6c3eSAlex Deucher 302b4def374SJiange Zhao if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) 3032ddc6c3eSAlex Deucher return AMD_RESET_METHOD_BACO; 3042ddc6c3eSAlex Deucher else 3052ddc6c3eSAlex Deucher return AMD_RESET_METHOD_MODE1; 3062ddc6c3eSAlex Deucher } 3072ddc6c3eSAlex Deucher 308c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev) 309c6b6a421SHawking Zhang { 310c6b6a421SHawking Zhang 311c6b6a421SHawking Zhang /* FIXME: it doesn't work since vega10 */ 312c6b6a421SHawking Zhang #if 0 313c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 314c6b6a421SHawking Zhang 315c6b6a421SHawking Zhang nv_gpu_pci_config_reset(adev); 316c6b6a421SHawking Zhang 317c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 318c6b6a421SHawking Zhang #endif 319767acabdSKevin Wang int ret = 0; 320767acabdSKevin Wang struct smu_context *smu = &adev->smu; 321c6b6a421SHawking Zhang 322e3526257SMonk Liu if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3232c9a0c66SAlex Deucher if (!adev->in_suspend) 324e3526257SMonk Liu amdgpu_inc_vram_lost(adev); 325767acabdSKevin Wang ret = smu_baco_reset(smu); 326e3526257SMonk Liu } else { 3272c9a0c66SAlex Deucher if (!adev->in_suspend) 328e3526257SMonk Liu amdgpu_inc_vram_lost(adev); 3293e2bb60aSKevin Wang ret = nv_asic_mode1_reset(adev); 330e3526257SMonk Liu } 331767acabdSKevin Wang 332767acabdSKevin Wang return ret; 333c6b6a421SHawking Zhang } 334c6b6a421SHawking Zhang 335c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 336c6b6a421SHawking Zhang { 337c6b6a421SHawking Zhang /* todo */ 338c6b6a421SHawking Zhang return 0; 339c6b6a421SHawking Zhang } 340c6b6a421SHawking Zhang 341c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 342c6b6a421SHawking Zhang { 343c6b6a421SHawking Zhang /* todo */ 344c6b6a421SHawking Zhang return 0; 345c6b6a421SHawking Zhang } 346c6b6a421SHawking Zhang 347c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 348c6b6a421SHawking Zhang { 349c6b6a421SHawking Zhang if (pci_is_root_bus(adev->pdev->bus)) 350c6b6a421SHawking Zhang return; 351c6b6a421SHawking Zhang 352c6b6a421SHawking Zhang if (amdgpu_pcie_gen2 == 0) 353c6b6a421SHawking Zhang return; 354c6b6a421SHawking Zhang 355c6b6a421SHawking Zhang if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 356c6b6a421SHawking Zhang CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 357c6b6a421SHawking Zhang return; 358c6b6a421SHawking Zhang 359c6b6a421SHawking Zhang /* todo */ 360c6b6a421SHawking Zhang } 361c6b6a421SHawking Zhang 362c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev) 363c6b6a421SHawking Zhang { 364c6b6a421SHawking Zhang 365c6b6a421SHawking Zhang if (amdgpu_aspm == 0) 366c6b6a421SHawking Zhang return; 367c6b6a421SHawking Zhang 368c6b6a421SHawking Zhang /* todo */ 369c6b6a421SHawking Zhang } 370c6b6a421SHawking Zhang 371c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 372c6b6a421SHawking Zhang bool enable) 373c6b6a421SHawking Zhang { 374bebc0762SHawking Zhang adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 375bebc0762SHawking Zhang adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 376c6b6a421SHawking Zhang } 377c6b6a421SHawking Zhang 378c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block = 379c6b6a421SHawking Zhang { 380c6b6a421SHawking Zhang .type = AMD_IP_BLOCK_TYPE_COMMON, 381c6b6a421SHawking Zhang .major = 1, 382c6b6a421SHawking Zhang .minor = 0, 383c6b6a421SHawking Zhang .rev = 0, 384c6b6a421SHawking Zhang .funcs = &nv_common_ip_funcs, 385c6b6a421SHawking Zhang }; 386c6b6a421SHawking Zhang 387b5c73856SXiaojie Yuan static int nv_reg_base_init(struct amdgpu_device *adev) 388c6b6a421SHawking Zhang { 389b5c73856SXiaojie Yuan int r; 390b5c73856SXiaojie Yuan 391b5c73856SXiaojie Yuan if (amdgpu_discovery) { 392b5c73856SXiaojie Yuan r = amdgpu_discovery_reg_base_init(adev); 393b5c73856SXiaojie Yuan if (r) { 394b5c73856SXiaojie Yuan DRM_WARN("failed to init reg base from ip discovery table, " 395b5c73856SXiaojie Yuan "fallback to legacy init method\n"); 396b5c73856SXiaojie Yuan goto legacy_init; 397b5c73856SXiaojie Yuan } 398b5c73856SXiaojie Yuan 399b5c73856SXiaojie Yuan return 0; 400b5c73856SXiaojie Yuan } 401b5c73856SXiaojie Yuan 402b5c73856SXiaojie Yuan legacy_init: 403c6b6a421SHawking Zhang switch (adev->asic_type) { 404c6b6a421SHawking Zhang case CHIP_NAVI10: 405c6b6a421SHawking Zhang navi10_reg_base_init(adev); 406c6b6a421SHawking Zhang break; 407a0f6d926SXiaojie Yuan case CHIP_NAVI14: 408a0f6d926SXiaojie Yuan navi14_reg_base_init(adev); 409a0f6d926SXiaojie Yuan break; 41003d0a073SXiaojie Yuan case CHIP_NAVI12: 41103d0a073SXiaojie Yuan navi12_reg_base_init(adev); 41203d0a073SXiaojie Yuan break; 413c6b6a421SHawking Zhang default: 414c6b6a421SHawking Zhang return -EINVAL; 415c6b6a421SHawking Zhang } 416c6b6a421SHawking Zhang 417b5c73856SXiaojie Yuan return 0; 418b5c73856SXiaojie Yuan } 419b5c73856SXiaojie Yuan 420b5c73856SXiaojie Yuan int nv_set_ip_blocks(struct amdgpu_device *adev) 421b5c73856SXiaojie Yuan { 422b5c73856SXiaojie Yuan int r; 423b5c73856SXiaojie Yuan 424b5c73856SXiaojie Yuan /* Set IP register base before any HW register access */ 425b5c73856SXiaojie Yuan r = nv_reg_base_init(adev); 426b5c73856SXiaojie Yuan if (r) 427b5c73856SXiaojie Yuan return r; 428b5c73856SXiaojie Yuan 429bebc0762SHawking Zhang adev->nbio.funcs = &nbio_v2_3_funcs; 430bebc0762SHawking Zhang adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 431c6b6a421SHawking Zhang 432bebc0762SHawking Zhang adev->nbio.funcs->detect_hw_virt(adev); 433c6b6a421SHawking Zhang 434b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 435b05b6903SJiange Zhao adev->virt.ops = &xgpu_nv_virt_ops; 436b05b6903SJiange Zhao 437c6b6a421SHawking Zhang switch (adev->asic_type) { 438c6b6a421SHawking Zhang case CHIP_NAVI10: 439d1daf850SAlex Deucher case CHIP_NAVI14: 440c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 441c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 442c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 443c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 444c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 445a4ac7693SJiange Zhao is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 446c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 447c6b6a421SHawking Zhang if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 448c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 449f8a7976bSAlex Deucher #if defined(CONFIG_DRM_AMD_DC) 450b4f199c7SHarry Wentland else if (amdgpu_device_has_dc_support(adev)) 451b4f199c7SHarry Wentland amdgpu_device_ip_block_add(adev, &dm_ip_block); 452f8a7976bSAlex Deucher #endif 453c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 454c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 455c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 456a4ac7693SJiange Zhao is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 457c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 458c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 459c6b6a421SHawking Zhang if (adev->enable_mes) 460c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 461c6b6a421SHawking Zhang break; 46244e9e7c9SXiaojie Yuan case CHIP_NAVI12: 46344e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 46444e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 46544e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 4666b66ae2eSXiaojie Yuan amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 4677f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 468a4ac7693SJiange Zhao is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 4697f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 47079902029SXiaojie Yuan if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 47179902029SXiaojie Yuan amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 47220c14ee1SPetr Cvek #if defined(CONFIG_DRM_AMD_DC) 473078655d9SLeo Li else if (amdgpu_device_has_dc_support(adev)) 474078655d9SLeo Li amdgpu_device_ip_block_add(adev, &dm_ip_block); 47520c14ee1SPetr Cvek #endif 47644e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 47744e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 4787f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 479a4ac7693SJiange Zhao is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 4807f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 4811fbed280SBoyuan Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 48244e9e7c9SXiaojie Yuan break; 483c6b6a421SHawking Zhang default: 484c6b6a421SHawking Zhang return -EINVAL; 485c6b6a421SHawking Zhang } 486c6b6a421SHawking Zhang 487c6b6a421SHawking Zhang return 0; 488c6b6a421SHawking Zhang } 489c6b6a421SHawking Zhang 490c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 491c6b6a421SHawking Zhang { 492bebc0762SHawking Zhang return adev->nbio.funcs->get_rev_id(adev); 493c6b6a421SHawking Zhang } 494c6b6a421SHawking Zhang 495c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 496c6b6a421SHawking Zhang { 497bebc0762SHawking Zhang adev->nbio.funcs->hdp_flush(adev, ring); 498c6b6a421SHawking Zhang } 499c6b6a421SHawking Zhang 500c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev, 501c6b6a421SHawking Zhang struct amdgpu_ring *ring) 502c6b6a421SHawking Zhang { 503c6b6a421SHawking Zhang if (!ring || !ring->funcs->emit_wreg) { 504c6b6a421SHawking Zhang WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 505c6b6a421SHawking Zhang } else { 506c6b6a421SHawking Zhang amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 507c6b6a421SHawking Zhang HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 508c6b6a421SHawking Zhang } 509c6b6a421SHawking Zhang } 510c6b6a421SHawking Zhang 511c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev) 512c6b6a421SHawking Zhang { 513c6b6a421SHawking Zhang return true; 514c6b6a421SHawking Zhang } 515c6b6a421SHawking Zhang 516c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev, 517c6b6a421SHawking Zhang uint64_t *count0, 518c6b6a421SHawking Zhang uint64_t *count1) 519c6b6a421SHawking Zhang { 520c6b6a421SHawking Zhang /*TODO*/ 521c6b6a421SHawking Zhang } 522c6b6a421SHawking Zhang 523c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev) 524c6b6a421SHawking Zhang { 525c6b6a421SHawking Zhang #if 0 526c6b6a421SHawking Zhang u32 sol_reg; 527c6b6a421SHawking Zhang 528c6b6a421SHawking Zhang if (adev->flags & AMD_IS_APU) 529c6b6a421SHawking Zhang return false; 530c6b6a421SHawking Zhang 531c6b6a421SHawking Zhang /* Check sOS sign of life register to confirm sys driver and sOS 532c6b6a421SHawking Zhang * are already been loaded. 533c6b6a421SHawking Zhang */ 534c6b6a421SHawking Zhang sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 535c6b6a421SHawking Zhang if (sol_reg) 536c6b6a421SHawking Zhang return true; 537c6b6a421SHawking Zhang #endif 538c6b6a421SHawking Zhang /* TODO: re-enable it when mode1 reset is functional */ 539c6b6a421SHawking Zhang return false; 540c6b6a421SHawking Zhang } 541c6b6a421SHawking Zhang 542c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev) 543c6b6a421SHawking Zhang { 544c6b6a421SHawking Zhang adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 545c6b6a421SHawking Zhang adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 546c6b6a421SHawking Zhang adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 547c6b6a421SHawking Zhang adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 548c6b6a421SHawking Zhang adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 549c6b6a421SHawking Zhang adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 550c6b6a421SHawking Zhang adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 551c6b6a421SHawking Zhang adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 552c6b6a421SHawking Zhang adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 553c6b6a421SHawking Zhang adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 554c6b6a421SHawking Zhang adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 555c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 556c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 557c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 558c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 559c6b6a421SHawking Zhang adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 560c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 561c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 562c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 563c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 564c6b6a421SHawking Zhang adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 565c6b6a421SHawking Zhang adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 566c6b6a421SHawking Zhang 567c6b6a421SHawking Zhang adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 568c6b6a421SHawking Zhang adev->doorbell_index.sdma_doorbell_range = 20; 569c6b6a421SHawking Zhang } 570c6b6a421SHawking Zhang 571c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs = 572c6b6a421SHawking Zhang { 573c6b6a421SHawking Zhang .read_disabled_bios = &nv_read_disabled_bios, 574c6b6a421SHawking Zhang .read_bios_from_rom = &nv_read_bios_from_rom, 575c6b6a421SHawking Zhang .read_register = &nv_read_register, 576c6b6a421SHawking Zhang .reset = &nv_asic_reset, 5772ddc6c3eSAlex Deucher .reset_method = &nv_asic_reset_method, 578c6b6a421SHawking Zhang .set_vga_state = &nv_vga_set_state, 579c6b6a421SHawking Zhang .get_xclk = &nv_get_xclk, 580c6b6a421SHawking Zhang .set_uvd_clocks = &nv_set_uvd_clocks, 581c6b6a421SHawking Zhang .set_vce_clocks = &nv_set_vce_clocks, 582c6b6a421SHawking Zhang .get_config_memsize = &nv_get_config_memsize, 583c6b6a421SHawking Zhang .flush_hdp = &nv_flush_hdp, 584c6b6a421SHawking Zhang .invalidate_hdp = &nv_invalidate_hdp, 585c6b6a421SHawking Zhang .init_doorbell_index = &nv_init_doorbell_index, 586c6b6a421SHawking Zhang .need_full_reset = &nv_need_full_reset, 587c6b6a421SHawking Zhang .get_pcie_usage = &nv_get_pcie_usage, 588c6b6a421SHawking Zhang .need_reset_on_init = &nv_need_reset_on_init, 589c6b6a421SHawking Zhang }; 590c6b6a421SHawking Zhang 591c6b6a421SHawking Zhang static int nv_common_early_init(void *handle) 592c6b6a421SHawking Zhang { 593923c087aSYong Zhao #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 594c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 595c6b6a421SHawking Zhang 596923c087aSYong Zhao adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 597923c087aSYong Zhao adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 598c6b6a421SHawking Zhang adev->smc_rreg = NULL; 599c6b6a421SHawking Zhang adev->smc_wreg = NULL; 600c6b6a421SHawking Zhang adev->pcie_rreg = &nv_pcie_rreg; 601c6b6a421SHawking Zhang adev->pcie_wreg = &nv_pcie_wreg; 602c6b6a421SHawking Zhang 603c6b6a421SHawking Zhang /* TODO: will add them during VCN v2 implementation */ 604c6b6a421SHawking Zhang adev->uvd_ctx_rreg = NULL; 605c6b6a421SHawking Zhang adev->uvd_ctx_wreg = NULL; 606c6b6a421SHawking Zhang 607c6b6a421SHawking Zhang adev->didt_rreg = &nv_didt_rreg; 608c6b6a421SHawking Zhang adev->didt_wreg = &nv_didt_wreg; 609c6b6a421SHawking Zhang 610c6b6a421SHawking Zhang adev->asic_funcs = &nv_asic_funcs; 611c6b6a421SHawking Zhang 612c6b6a421SHawking Zhang adev->rev_id = nv_get_rev_id(adev); 613c6b6a421SHawking Zhang adev->external_rev_id = 0xff; 614c6b6a421SHawking Zhang switch (adev->asic_type) { 615c6b6a421SHawking Zhang case CHIP_NAVI10: 616c6b6a421SHawking Zhang adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 617c6b6a421SHawking Zhang AMD_CG_SUPPORT_GFX_CGCG | 618c6b6a421SHawking Zhang AMD_CG_SUPPORT_IH_CG | 619c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_MGCG | 620c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_LS | 621c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_MGCG | 622c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_LS | 623c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_MGCG | 624c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_LS | 625c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_MGCG | 626c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_LS | 627c6b6a421SHawking Zhang AMD_CG_SUPPORT_VCN_MGCG | 628c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_MGCG | 629c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_LS; 630157710eaSLeo Liu adev->pg_flags = AMD_PG_SUPPORT_VCN | 631c12d410fSHuang Rui AMD_PG_SUPPORT_VCN_DPG | 632a201b6acSHuang Rui AMD_PG_SUPPORT_ATHUB; 633c6b6a421SHawking Zhang adev->external_rev_id = adev->rev_id + 0x1; 634c6b6a421SHawking Zhang break; 6355e71e011SXiaojie Yuan case CHIP_NAVI14: 636d0c39f8cSXiaojie Yuan adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 637d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_GFX_CGCG | 638d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_IH_CG | 639d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_MGCG | 640d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_LS | 641d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_MGCG | 642d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_LS | 643d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_MGCG | 644d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_LS | 645d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_MGCG | 646d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_LS | 647d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_VCN_MGCG | 648d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_MGCG | 649d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_LS; 6500377b088SXiaojie Yuan adev->pg_flags = AMD_PG_SUPPORT_VCN | 6510377b088SXiaojie Yuan AMD_PG_SUPPORT_VCN_DPG; 65235ef88faStiancyin adev->external_rev_id = adev->rev_id + 20; 6535e71e011SXiaojie Yuan break; 65474b5e509SXiaojie Yuan case CHIP_NAVI12: 655dca009e7SXiaojie Yuan adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 656dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_MGLS | 657dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_CGCG | 658dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_CP_LS | 6595211c37aSXiaojie Yuan AMD_CG_SUPPORT_GFX_RLC_LS | 660fbe0bc57SXiaojie Yuan AMD_CG_SUPPORT_IH_CG | 6615211c37aSXiaojie Yuan AMD_CG_SUPPORT_HDP_MGCG | 662358ab97fSXiaojie Yuan AMD_CG_SUPPORT_HDP_LS | 663358ab97fSXiaojie Yuan AMD_CG_SUPPORT_SDMA_MGCG | 6648b797b3dSXiaojie Yuan AMD_CG_SUPPORT_SDMA_LS | 6658b797b3dSXiaojie Yuan AMD_CG_SUPPORT_MC_MGCG | 666ca51678dSXiaojie Yuan AMD_CG_SUPPORT_MC_LS | 667ca51678dSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_MGCG | 66865872e59SXiaojie Yuan AMD_CG_SUPPORT_ATHUB_LS | 66965872e59SXiaojie Yuan AMD_CG_SUPPORT_VCN_MGCG; 670c1653ea0SXiaojie Yuan adev->pg_flags = AMD_PG_SUPPORT_VCN | 6715ef3b8acSXiaojie Yuan AMD_PG_SUPPORT_VCN_DPG | 6725ef3b8acSXiaojie Yuan AMD_PG_SUPPORT_ATHUB; 67374b5e509SXiaojie Yuan adev->external_rev_id = adev->rev_id + 0xa; 67474b5e509SXiaojie Yuan break; 675c6b6a421SHawking Zhang default: 676c6b6a421SHawking Zhang /* FIXME: not supported yet */ 677c6b6a421SHawking Zhang return -EINVAL; 678c6b6a421SHawking Zhang } 679c6b6a421SHawking Zhang 680b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) { 681b05b6903SJiange Zhao amdgpu_virt_init_setting(adev); 682b05b6903SJiange Zhao xgpu_nv_mailbox_set_irq_funcs(adev); 683b05b6903SJiange Zhao } 684b05b6903SJiange Zhao 685c6b6a421SHawking Zhang return 0; 686c6b6a421SHawking Zhang } 687c6b6a421SHawking Zhang 688c6b6a421SHawking Zhang static int nv_common_late_init(void *handle) 689c6b6a421SHawking Zhang { 690b05b6903SJiange Zhao struct amdgpu_device *adev = (struct amdgpu_device *)handle; 691b05b6903SJiange Zhao 692b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 693b05b6903SJiange Zhao xgpu_nv_mailbox_get_irq(adev); 694b05b6903SJiange Zhao 695c6b6a421SHawking Zhang return 0; 696c6b6a421SHawking Zhang } 697c6b6a421SHawking Zhang 698c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle) 699c6b6a421SHawking Zhang { 700b05b6903SJiange Zhao struct amdgpu_device *adev = (struct amdgpu_device *)handle; 701b05b6903SJiange Zhao 702b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 703b05b6903SJiange Zhao xgpu_nv_mailbox_add_irq_id(adev); 704b05b6903SJiange Zhao 705c6b6a421SHawking Zhang return 0; 706c6b6a421SHawking Zhang } 707c6b6a421SHawking Zhang 708c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle) 709c6b6a421SHawking Zhang { 710c6b6a421SHawking Zhang return 0; 711c6b6a421SHawking Zhang } 712c6b6a421SHawking Zhang 713c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle) 714c6b6a421SHawking Zhang { 715c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 716c6b6a421SHawking Zhang 717c6b6a421SHawking Zhang /* enable pcie gen2/3 link */ 718c6b6a421SHawking Zhang nv_pcie_gen3_enable(adev); 719c6b6a421SHawking Zhang /* enable aspm */ 720c6b6a421SHawking Zhang nv_program_aspm(adev); 721c6b6a421SHawking Zhang /* setup nbio registers */ 722bebc0762SHawking Zhang adev->nbio.funcs->init_registers(adev); 723923c087aSYong Zhao /* remap HDP registers to a hole in mmio space, 724923c087aSYong Zhao * for the purpose of expose those registers 725923c087aSYong Zhao * to process space 726923c087aSYong Zhao */ 727923c087aSYong Zhao if (adev->nbio.funcs->remap_hdp_registers) 728923c087aSYong Zhao adev->nbio.funcs->remap_hdp_registers(adev); 729c6b6a421SHawking Zhang /* enable the doorbell aperture */ 730c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, true); 731c6b6a421SHawking Zhang 732c6b6a421SHawking Zhang return 0; 733c6b6a421SHawking Zhang } 734c6b6a421SHawking Zhang 735c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle) 736c6b6a421SHawking Zhang { 737c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 738c6b6a421SHawking Zhang 739c6b6a421SHawking Zhang /* disable the doorbell aperture */ 740c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, false); 741c6b6a421SHawking Zhang 742c6b6a421SHawking Zhang return 0; 743c6b6a421SHawking Zhang } 744c6b6a421SHawking Zhang 745c6b6a421SHawking Zhang static int nv_common_suspend(void *handle) 746c6b6a421SHawking Zhang { 747c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 748c6b6a421SHawking Zhang 749c6b6a421SHawking Zhang return nv_common_hw_fini(adev); 750c6b6a421SHawking Zhang } 751c6b6a421SHawking Zhang 752c6b6a421SHawking Zhang static int nv_common_resume(void *handle) 753c6b6a421SHawking Zhang { 754c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 755c6b6a421SHawking Zhang 756c6b6a421SHawking Zhang return nv_common_hw_init(adev); 757c6b6a421SHawking Zhang } 758c6b6a421SHawking Zhang 759c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle) 760c6b6a421SHawking Zhang { 761c6b6a421SHawking Zhang return true; 762c6b6a421SHawking Zhang } 763c6b6a421SHawking Zhang 764c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle) 765c6b6a421SHawking Zhang { 766c6b6a421SHawking Zhang return 0; 767c6b6a421SHawking Zhang } 768c6b6a421SHawking Zhang 769c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle) 770c6b6a421SHawking Zhang { 771c6b6a421SHawking Zhang return 0; 772c6b6a421SHawking Zhang } 773c6b6a421SHawking Zhang 774c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 775c6b6a421SHawking Zhang bool enable) 776c6b6a421SHawking Zhang { 777c6b6a421SHawking Zhang uint32_t hdp_clk_cntl, hdp_clk_cntl1; 778c6b6a421SHawking Zhang uint32_t hdp_mem_pwr_cntl; 779c6b6a421SHawking Zhang 780c6b6a421SHawking Zhang if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 781c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_DS | 782c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_SD))) 783c6b6a421SHawking Zhang return; 784c6b6a421SHawking Zhang 785c6b6a421SHawking Zhang hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 786c6b6a421SHawking Zhang hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 787c6b6a421SHawking Zhang 788c6b6a421SHawking Zhang /* Before doing clock/power mode switch, 789c6b6a421SHawking Zhang * forced on IPH & RC clock */ 790c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 791c6b6a421SHawking Zhang IPH_MEM_CLK_SOFT_OVERRIDE, 1); 792c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 793c6b6a421SHawking Zhang RC_MEM_CLK_SOFT_OVERRIDE, 1); 794c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 795c6b6a421SHawking Zhang 796c6b6a421SHawking Zhang /* HDP 5.0 doesn't support dynamic power mode switch, 797c6b6a421SHawking Zhang * disable clock and power gating before any changing */ 798c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 799c6b6a421SHawking Zhang IPH_MEM_POWER_CTRL_EN, 0); 800c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 801c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, 0); 802c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 803c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, 0); 804c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 805c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, 0); 806c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 807c6b6a421SHawking Zhang RC_MEM_POWER_CTRL_EN, 0); 808c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 809c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, 0); 810c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 811c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, 0); 812c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 813c6b6a421SHawking Zhang RC_MEM_POWER_SD_EN, 0); 814c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 815c6b6a421SHawking Zhang 816c6b6a421SHawking Zhang /* only one clock gating mode (LS/DS/SD) can be enabled */ 817c6b6a421SHawking Zhang if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 818c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 819c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 820c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, enable); 821c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 822c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 823c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, enable); 824c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 825c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 826c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 827c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, enable); 828c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 829c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 830c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 831c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 832c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 833c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 834c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, enable); 835c6b6a421SHawking Zhang /* RC should not use shut down mode, fallback to ds */ 836c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 837c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 838c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 839c6b6a421SHawking Zhang } 840c6b6a421SHawking Zhang 841c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 842c6b6a421SHawking Zhang 843c6b6a421SHawking Zhang /* restore IPH & RC clock override after clock/power mode changing */ 844c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 845c6b6a421SHawking Zhang } 846c6b6a421SHawking Zhang 847c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 848c6b6a421SHawking Zhang bool enable) 849c6b6a421SHawking Zhang { 850c6b6a421SHawking Zhang uint32_t hdp_clk_cntl; 851c6b6a421SHawking Zhang 852c6b6a421SHawking Zhang if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 853c6b6a421SHawking Zhang return; 854c6b6a421SHawking Zhang 855c6b6a421SHawking Zhang hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 856c6b6a421SHawking Zhang 857c6b6a421SHawking Zhang if (enable) { 858c6b6a421SHawking Zhang hdp_clk_cntl &= 859c6b6a421SHawking Zhang ~(uint32_t) 860c6b6a421SHawking Zhang (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 861c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 862c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 863c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 864c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 865c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 866c6b6a421SHawking Zhang } else { 867c6b6a421SHawking Zhang hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 868c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 869c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 870c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 871c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 872c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 873c6b6a421SHawking Zhang } 874c6b6a421SHawking Zhang 875c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 876c6b6a421SHawking Zhang } 877c6b6a421SHawking Zhang 878c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle, 879c6b6a421SHawking Zhang enum amd_clockgating_state state) 880c6b6a421SHawking Zhang { 881c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 882c6b6a421SHawking Zhang 883c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 884c6b6a421SHawking Zhang return 0; 885c6b6a421SHawking Zhang 886c6b6a421SHawking Zhang switch (adev->asic_type) { 887c6b6a421SHawking Zhang case CHIP_NAVI10: 8885e71e011SXiaojie Yuan case CHIP_NAVI14: 8897e17e58bSXiaojie Yuan case CHIP_NAVI12: 890bebc0762SHawking Zhang adev->nbio.funcs->update_medium_grain_clock_gating(adev, 891c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 892bebc0762SHawking Zhang adev->nbio.funcs->update_medium_grain_light_sleep(adev, 893c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 894c6b6a421SHawking Zhang nv_update_hdp_mem_power_gating(adev, 895c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 896c6b6a421SHawking Zhang nv_update_hdp_clock_gating(adev, 897c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 898c6b6a421SHawking Zhang break; 899c6b6a421SHawking Zhang default: 900c6b6a421SHawking Zhang break; 901c6b6a421SHawking Zhang } 902c6b6a421SHawking Zhang return 0; 903c6b6a421SHawking Zhang } 904c6b6a421SHawking Zhang 905c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle, 906c6b6a421SHawking Zhang enum amd_powergating_state state) 907c6b6a421SHawking Zhang { 908c6b6a421SHawking Zhang /* TODO */ 909c6b6a421SHawking Zhang return 0; 910c6b6a421SHawking Zhang } 911c6b6a421SHawking Zhang 912c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags) 913c6b6a421SHawking Zhang { 914c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 915c6b6a421SHawking Zhang uint32_t tmp; 916c6b6a421SHawking Zhang 917c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 918c6b6a421SHawking Zhang *flags = 0; 919c6b6a421SHawking Zhang 920bebc0762SHawking Zhang adev->nbio.funcs->get_clockgating_state(adev, flags); 921c6b6a421SHawking Zhang 922c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_MGCG */ 923c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 924c6b6a421SHawking Zhang if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 925c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 926c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 927c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 928c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 929c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 930c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_MGCG; 931c6b6a421SHawking Zhang 932c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 933c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 934c6b6a421SHawking Zhang if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 935c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_LS; 936c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 937c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_DS; 938c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 939c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_SD; 940c6b6a421SHawking Zhang 941c6b6a421SHawking Zhang return; 942c6b6a421SHawking Zhang } 943c6b6a421SHawking Zhang 944c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = { 945c6b6a421SHawking Zhang .name = "nv_common", 946c6b6a421SHawking Zhang .early_init = nv_common_early_init, 947c6b6a421SHawking Zhang .late_init = nv_common_late_init, 948c6b6a421SHawking Zhang .sw_init = nv_common_sw_init, 949c6b6a421SHawking Zhang .sw_fini = nv_common_sw_fini, 950c6b6a421SHawking Zhang .hw_init = nv_common_hw_init, 951c6b6a421SHawking Zhang .hw_fini = nv_common_hw_fini, 952c6b6a421SHawking Zhang .suspend = nv_common_suspend, 953c6b6a421SHawking Zhang .resume = nv_common_resume, 954c6b6a421SHawking Zhang .is_idle = nv_common_is_idle, 955c6b6a421SHawking Zhang .wait_for_idle = nv_common_wait_for_idle, 956c6b6a421SHawking Zhang .soft_reset = nv_common_soft_reset, 957c6b6a421SHawking Zhang .set_clockgating_state = nv_common_set_clockgating_state, 958c6b6a421SHawking Zhang .set_powergating_state = nv_common_set_powergating_state, 959c6b6a421SHawking Zhang .get_clockgating_state = nv_common_get_clockgating_state, 960c6b6a421SHawking Zhang }; 961