1c6b6a421SHawking Zhang /* 2c6b6a421SHawking Zhang * Copyright 2019 Advanced Micro Devices, Inc. 3c6b6a421SHawking Zhang * 4c6b6a421SHawking Zhang * Permission is hereby granted, free of charge, to any person obtaining a 5c6b6a421SHawking Zhang * copy of this software and associated documentation files (the "Software"), 6c6b6a421SHawking Zhang * to deal in the Software without restriction, including without limitation 7c6b6a421SHawking Zhang * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c6b6a421SHawking Zhang * and/or sell copies of the Software, and to permit persons to whom the 9c6b6a421SHawking Zhang * Software is furnished to do so, subject to the following conditions: 10c6b6a421SHawking Zhang * 11c6b6a421SHawking Zhang * The above copyright notice and this permission notice shall be included in 12c6b6a421SHawking Zhang * all copies or substantial portions of the Software. 13c6b6a421SHawking Zhang * 14c6b6a421SHawking Zhang * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c6b6a421SHawking Zhang * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c6b6a421SHawking Zhang * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c6b6a421SHawking Zhang * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c6b6a421SHawking Zhang * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c6b6a421SHawking Zhang * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c6b6a421SHawking Zhang * OTHER DEALINGS IN THE SOFTWARE. 21c6b6a421SHawking Zhang * 22c6b6a421SHawking Zhang */ 23c6b6a421SHawking Zhang #include <linux/firmware.h> 24c6b6a421SHawking Zhang #include <linux/slab.h> 25c6b6a421SHawking Zhang #include <linux/module.h> 26e9eea902SAlex Deucher #include <linux/pci.h> 27e9eea902SAlex Deucher 28c6b6a421SHawking Zhang #include "amdgpu.h" 29c6b6a421SHawking Zhang #include "amdgpu_atombios.h" 30c6b6a421SHawking Zhang #include "amdgpu_ih.h" 31c6b6a421SHawking Zhang #include "amdgpu_uvd.h" 32c6b6a421SHawking Zhang #include "amdgpu_vce.h" 33c6b6a421SHawking Zhang #include "amdgpu_ucode.h" 34c6b6a421SHawking Zhang #include "amdgpu_psp.h" 35767acabdSKevin Wang #include "amdgpu_smu.h" 36c6b6a421SHawking Zhang #include "atom.h" 37c6b6a421SHawking Zhang #include "amd_pcie.h" 38c6b6a421SHawking Zhang 39c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h" 40c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h" 41c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h" 42c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h" 43c6b6a421SHawking Zhang 44c6b6a421SHawking Zhang #include "soc15.h" 45c6b6a421SHawking Zhang #include "soc15_common.h" 46c6b6a421SHawking Zhang #include "gmc_v10_0.h" 47c6b6a421SHawking Zhang #include "gfxhub_v2_0.h" 48c6b6a421SHawking Zhang #include "mmhub_v2_0.h" 49c6b6a421SHawking Zhang #include "nv.h" 50c6b6a421SHawking Zhang #include "navi10_ih.h" 51c6b6a421SHawking Zhang #include "gfx_v10_0.h" 52c6b6a421SHawking Zhang #include "sdma_v5_0.h" 53c6b6a421SHawking Zhang #include "vcn_v2_0.h" 54c6b6a421SHawking Zhang #include "dce_virtual.h" 55c6b6a421SHawking Zhang #include "mes_v10_1.h" 56c6b6a421SHawking Zhang 57c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs; 58c6b6a421SHawking Zhang 59c6b6a421SHawking Zhang /* 60c6b6a421SHawking Zhang * Indirect registers accessor 61c6b6a421SHawking Zhang */ 62c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 63c6b6a421SHawking Zhang { 64c6b6a421SHawking Zhang unsigned long flags, address, data; 65c6b6a421SHawking Zhang u32 r; 66c6b6a421SHawking Zhang address = adev->nbio_funcs->get_pcie_index_offset(adev); 67c6b6a421SHawking Zhang data = adev->nbio_funcs->get_pcie_data_offset(adev); 68c6b6a421SHawking Zhang 69c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 70c6b6a421SHawking Zhang WREG32(address, reg); 71c6b6a421SHawking Zhang (void)RREG32(address); 72c6b6a421SHawking Zhang r = RREG32(data); 73c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 74c6b6a421SHawking Zhang return r; 75c6b6a421SHawking Zhang } 76c6b6a421SHawking Zhang 77c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 78c6b6a421SHawking Zhang { 79c6b6a421SHawking Zhang unsigned long flags, address, data; 80c6b6a421SHawking Zhang 81c6b6a421SHawking Zhang address = adev->nbio_funcs->get_pcie_index_offset(adev); 82c6b6a421SHawking Zhang data = adev->nbio_funcs->get_pcie_data_offset(adev); 83c6b6a421SHawking Zhang 84c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 85c6b6a421SHawking Zhang WREG32(address, reg); 86c6b6a421SHawking Zhang (void)RREG32(address); 87c6b6a421SHawking Zhang WREG32(data, v); 88c6b6a421SHawking Zhang (void)RREG32(data); 89c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 90c6b6a421SHawking Zhang } 91c6b6a421SHawking Zhang 92c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 93c6b6a421SHawking Zhang { 94c6b6a421SHawking Zhang unsigned long flags, address, data; 95c6b6a421SHawking Zhang u32 r; 96c6b6a421SHawking Zhang 97c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 98c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 99c6b6a421SHawking Zhang 100c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 101c6b6a421SHawking Zhang WREG32(address, (reg)); 102c6b6a421SHawking Zhang r = RREG32(data); 103c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 104c6b6a421SHawking Zhang return r; 105c6b6a421SHawking Zhang } 106c6b6a421SHawking Zhang 107c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 108c6b6a421SHawking Zhang { 109c6b6a421SHawking Zhang unsigned long flags, address, data; 110c6b6a421SHawking Zhang 111c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 112c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 113c6b6a421SHawking Zhang 114c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 115c6b6a421SHawking Zhang WREG32(address, (reg)); 116c6b6a421SHawking Zhang WREG32(data, (v)); 117c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 118c6b6a421SHawking Zhang } 119c6b6a421SHawking Zhang 120c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev) 121c6b6a421SHawking Zhang { 122c6b6a421SHawking Zhang return adev->nbio_funcs->get_memsize(adev); 123c6b6a421SHawking Zhang } 124c6b6a421SHawking Zhang 125c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev) 126c6b6a421SHawking Zhang { 127462a70d8STao Zhou return adev->clock.spll.reference_freq; 128c6b6a421SHawking Zhang } 129c6b6a421SHawking Zhang 130c6b6a421SHawking Zhang 131c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev, 132c6b6a421SHawking Zhang u32 me, u32 pipe, u32 queue, u32 vmid) 133c6b6a421SHawking Zhang { 134c6b6a421SHawking Zhang u32 grbm_gfx_cntl = 0; 135c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 136c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 137c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 138c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 139c6b6a421SHawking Zhang 140c6b6a421SHawking Zhang WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 141c6b6a421SHawking Zhang } 142c6b6a421SHawking Zhang 143c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 144c6b6a421SHawking Zhang { 145c6b6a421SHawking Zhang /* todo */ 146c6b6a421SHawking Zhang } 147c6b6a421SHawking Zhang 148c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev) 149c6b6a421SHawking Zhang { 150c6b6a421SHawking Zhang /* todo */ 151c6b6a421SHawking Zhang return false; 152c6b6a421SHawking Zhang } 153c6b6a421SHawking Zhang 154c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 155c6b6a421SHawking Zhang u8 *bios, u32 length_bytes) 156c6b6a421SHawking Zhang { 157c6b6a421SHawking Zhang /* TODO: will implement it when SMU header is available */ 158c6b6a421SHawking Zhang return false; 159c6b6a421SHawking Zhang } 160c6b6a421SHawking Zhang 161c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 162c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 163c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 164c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 165c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 166c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 167c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 168c6b6a421SHawking Zhang #if 0 /* TODO: will set it when SDMA header is available */ 169c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 170c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 171c6b6a421SHawking Zhang #endif 172c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 173c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 174c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 175c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 176c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 177c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 178c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 179c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 180c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 181c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 182c6b6a421SHawking Zhang }; 183c6b6a421SHawking Zhang 184c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 185c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 186c6b6a421SHawking Zhang { 187c6b6a421SHawking Zhang uint32_t val; 188c6b6a421SHawking Zhang 189c6b6a421SHawking Zhang mutex_lock(&adev->grbm_idx_mutex); 190c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 191c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 192c6b6a421SHawking Zhang 193c6b6a421SHawking Zhang val = RREG32(reg_offset); 194c6b6a421SHawking Zhang 195c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 196c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 197c6b6a421SHawking Zhang mutex_unlock(&adev->grbm_idx_mutex); 198c6b6a421SHawking Zhang return val; 199c6b6a421SHawking Zhang } 200c6b6a421SHawking Zhang 201c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev, 202c6b6a421SHawking Zhang bool indexed, u32 se_num, 203c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 204c6b6a421SHawking Zhang { 205c6b6a421SHawking Zhang if (indexed) { 206c6b6a421SHawking Zhang return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 207c6b6a421SHawking Zhang } else { 208c6b6a421SHawking Zhang if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 209c6b6a421SHawking Zhang return adev->gfx.config.gb_addr_config; 210c6b6a421SHawking Zhang return RREG32(reg_offset); 211c6b6a421SHawking Zhang } 212c6b6a421SHawking Zhang } 213c6b6a421SHawking Zhang 214c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 215c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset, u32 *value) 216c6b6a421SHawking Zhang { 217c6b6a421SHawking Zhang uint32_t i; 218c6b6a421SHawking Zhang struct soc15_allowed_register_entry *en; 219c6b6a421SHawking Zhang 220c6b6a421SHawking Zhang *value = 0; 221c6b6a421SHawking Zhang for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 222c6b6a421SHawking Zhang en = &nv_allowed_read_registers[i]; 223c6b6a421SHawking Zhang if (reg_offset != 224c6b6a421SHawking Zhang (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 225c6b6a421SHawking Zhang continue; 226c6b6a421SHawking Zhang 227c6b6a421SHawking Zhang *value = nv_get_register_value(adev, 228c6b6a421SHawking Zhang nv_allowed_read_registers[i].grbm_indexed, 229c6b6a421SHawking Zhang se_num, sh_num, reg_offset); 230c6b6a421SHawking Zhang return 0; 231c6b6a421SHawking Zhang } 232c6b6a421SHawking Zhang return -EINVAL; 233c6b6a421SHawking Zhang } 234c6b6a421SHawking Zhang 235c6b6a421SHawking Zhang #if 0 236c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 237c6b6a421SHawking Zhang { 238c6b6a421SHawking Zhang u32 i; 239c6b6a421SHawking Zhang 240c6b6a421SHawking Zhang dev_info(adev->dev, "GPU pci config reset\n"); 241c6b6a421SHawking Zhang 242c6b6a421SHawking Zhang /* disable BM */ 243c6b6a421SHawking Zhang pci_clear_master(adev->pdev); 244c6b6a421SHawking Zhang /* reset */ 245c6b6a421SHawking Zhang amdgpu_pci_config_reset(adev); 246c6b6a421SHawking Zhang 247c6b6a421SHawking Zhang udelay(100); 248c6b6a421SHawking Zhang 249c6b6a421SHawking Zhang /* wait for asic to come out of reset */ 250c6b6a421SHawking Zhang for (i = 0; i < adev->usec_timeout; i++) { 251c6b6a421SHawking Zhang u32 memsize = nbio_v2_3_get_memsize(adev); 252c6b6a421SHawking Zhang if (memsize != 0xffffffff) 253c6b6a421SHawking Zhang break; 254c6b6a421SHawking Zhang udelay(1); 255c6b6a421SHawking Zhang } 256c6b6a421SHawking Zhang 257c6b6a421SHawking Zhang } 258c6b6a421SHawking Zhang #endif 259c6b6a421SHawking Zhang 2603e2bb60aSKevin Wang static int nv_asic_mode1_reset(struct amdgpu_device *adev) 2613e2bb60aSKevin Wang { 2623e2bb60aSKevin Wang u32 i; 2633e2bb60aSKevin Wang int ret = 0; 2643e2bb60aSKevin Wang 2653e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 2663e2bb60aSKevin Wang 2673e2bb60aSKevin Wang dev_info(adev->dev, "GPU mode1 reset\n"); 2683e2bb60aSKevin Wang 2693e2bb60aSKevin Wang /* disable BM */ 2703e2bb60aSKevin Wang pci_clear_master(adev->pdev); 2713e2bb60aSKevin Wang 2723e2bb60aSKevin Wang pci_save_state(adev->pdev); 2733e2bb60aSKevin Wang 2743e2bb60aSKevin Wang ret = psp_gpu_reset(adev); 2753e2bb60aSKevin Wang if (ret) 2763e2bb60aSKevin Wang dev_err(adev->dev, "GPU mode1 reset failed\n"); 2773e2bb60aSKevin Wang 2783e2bb60aSKevin Wang pci_restore_state(adev->pdev); 2793e2bb60aSKevin Wang 2803e2bb60aSKevin Wang /* wait for asic to come out of reset */ 2813e2bb60aSKevin Wang for (i = 0; i < adev->usec_timeout; i++) { 2823e2bb60aSKevin Wang u32 memsize = adev->nbio_funcs->get_memsize(adev); 2833e2bb60aSKevin Wang 2843e2bb60aSKevin Wang if (memsize != 0xffffffff) 2853e2bb60aSKevin Wang break; 2863e2bb60aSKevin Wang udelay(1); 2873e2bb60aSKevin Wang } 2883e2bb60aSKevin Wang 2893e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 2903e2bb60aSKevin Wang 2913e2bb60aSKevin Wang return ret; 2923e2bb60aSKevin Wang } 2932ddc6c3eSAlex Deucher 2942ddc6c3eSAlex Deucher static enum amd_reset_method 2952ddc6c3eSAlex Deucher nv_asic_reset_method(struct amdgpu_device *adev) 2962ddc6c3eSAlex Deucher { 2972ddc6c3eSAlex Deucher struct smu_context *smu = &adev->smu; 2982ddc6c3eSAlex Deucher 2992ddc6c3eSAlex Deucher if (smu_baco_is_support(smu)) 3002ddc6c3eSAlex Deucher return AMD_RESET_METHOD_BACO; 3012ddc6c3eSAlex Deucher else 3022ddc6c3eSAlex Deucher return AMD_RESET_METHOD_MODE1; 3032ddc6c3eSAlex Deucher } 3042ddc6c3eSAlex Deucher 305c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev) 306c6b6a421SHawking Zhang { 307c6b6a421SHawking Zhang 308c6b6a421SHawking Zhang /* FIXME: it doesn't work since vega10 */ 309c6b6a421SHawking Zhang #if 0 310c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 311c6b6a421SHawking Zhang 312c6b6a421SHawking Zhang nv_gpu_pci_config_reset(adev); 313c6b6a421SHawking Zhang 314c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 315c6b6a421SHawking Zhang #endif 316767acabdSKevin Wang int ret = 0; 317767acabdSKevin Wang struct smu_context *smu = &adev->smu; 318c6b6a421SHawking Zhang 3192ddc6c3eSAlex Deucher if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) 320767acabdSKevin Wang ret = smu_baco_reset(smu); 3213e2bb60aSKevin Wang else 3223e2bb60aSKevin Wang ret = nv_asic_mode1_reset(adev); 323767acabdSKevin Wang 324767acabdSKevin Wang return ret; 325c6b6a421SHawking Zhang } 326c6b6a421SHawking Zhang 327c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 328c6b6a421SHawking Zhang { 329c6b6a421SHawking Zhang /* todo */ 330c6b6a421SHawking Zhang return 0; 331c6b6a421SHawking Zhang } 332c6b6a421SHawking Zhang 333c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 334c6b6a421SHawking Zhang { 335c6b6a421SHawking Zhang /* todo */ 336c6b6a421SHawking Zhang return 0; 337c6b6a421SHawking Zhang } 338c6b6a421SHawking Zhang 339c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 340c6b6a421SHawking Zhang { 341c6b6a421SHawking Zhang if (pci_is_root_bus(adev->pdev->bus)) 342c6b6a421SHawking Zhang return; 343c6b6a421SHawking Zhang 344c6b6a421SHawking Zhang if (amdgpu_pcie_gen2 == 0) 345c6b6a421SHawking Zhang return; 346c6b6a421SHawking Zhang 347c6b6a421SHawking Zhang if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 348c6b6a421SHawking Zhang CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 349c6b6a421SHawking Zhang return; 350c6b6a421SHawking Zhang 351c6b6a421SHawking Zhang /* todo */ 352c6b6a421SHawking Zhang } 353c6b6a421SHawking Zhang 354c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev) 355c6b6a421SHawking Zhang { 356c6b6a421SHawking Zhang 357c6b6a421SHawking Zhang if (amdgpu_aspm == 0) 358c6b6a421SHawking Zhang return; 359c6b6a421SHawking Zhang 360c6b6a421SHawking Zhang /* todo */ 361c6b6a421SHawking Zhang } 362c6b6a421SHawking Zhang 363c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 364c6b6a421SHawking Zhang bool enable) 365c6b6a421SHawking Zhang { 366c6b6a421SHawking Zhang adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 367c6b6a421SHawking Zhang adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 368c6b6a421SHawking Zhang } 369c6b6a421SHawking Zhang 370c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block = 371c6b6a421SHawking Zhang { 372c6b6a421SHawking Zhang .type = AMD_IP_BLOCK_TYPE_COMMON, 373c6b6a421SHawking Zhang .major = 1, 374c6b6a421SHawking Zhang .minor = 0, 375c6b6a421SHawking Zhang .rev = 0, 376c6b6a421SHawking Zhang .funcs = &nv_common_ip_funcs, 377c6b6a421SHawking Zhang }; 378c6b6a421SHawking Zhang 379b5c73856SXiaojie Yuan static int nv_reg_base_init(struct amdgpu_device *adev) 380c6b6a421SHawking Zhang { 381b5c73856SXiaojie Yuan int r; 382b5c73856SXiaojie Yuan 383b5c73856SXiaojie Yuan if (amdgpu_discovery) { 384b5c73856SXiaojie Yuan r = amdgpu_discovery_reg_base_init(adev); 385b5c73856SXiaojie Yuan if (r) { 386b5c73856SXiaojie Yuan DRM_WARN("failed to init reg base from ip discovery table, " 387b5c73856SXiaojie Yuan "fallback to legacy init method\n"); 388b5c73856SXiaojie Yuan goto legacy_init; 389b5c73856SXiaojie Yuan } 390b5c73856SXiaojie Yuan 391b5c73856SXiaojie Yuan return 0; 392b5c73856SXiaojie Yuan } 393b5c73856SXiaojie Yuan 394b5c73856SXiaojie Yuan legacy_init: 395c6b6a421SHawking Zhang switch (adev->asic_type) { 396c6b6a421SHawking Zhang case CHIP_NAVI10: 397c6b6a421SHawking Zhang navi10_reg_base_init(adev); 398c6b6a421SHawking Zhang break; 399a0f6d926SXiaojie Yuan case CHIP_NAVI14: 400a0f6d926SXiaojie Yuan navi14_reg_base_init(adev); 401a0f6d926SXiaojie Yuan break; 40203d0a073SXiaojie Yuan case CHIP_NAVI12: 40303d0a073SXiaojie Yuan navi12_reg_base_init(adev); 40403d0a073SXiaojie Yuan break; 405c6b6a421SHawking Zhang default: 406c6b6a421SHawking Zhang return -EINVAL; 407c6b6a421SHawking Zhang } 408c6b6a421SHawking Zhang 409b5c73856SXiaojie Yuan return 0; 410b5c73856SXiaojie Yuan } 411b5c73856SXiaojie Yuan 412b5c73856SXiaojie Yuan int nv_set_ip_blocks(struct amdgpu_device *adev) 413b5c73856SXiaojie Yuan { 414b5c73856SXiaojie Yuan int r; 415b5c73856SXiaojie Yuan 416b5c73856SXiaojie Yuan /* Set IP register base before any HW register access */ 417b5c73856SXiaojie Yuan r = nv_reg_base_init(adev); 418b5c73856SXiaojie Yuan if (r) 419b5c73856SXiaojie Yuan return r; 420b5c73856SXiaojie Yuan 421c6b6a421SHawking Zhang adev->nbio_funcs = &nbio_v2_3_funcs; 422c6b6a421SHawking Zhang 423c6b6a421SHawking Zhang adev->nbio_funcs->detect_hw_virt(adev); 424c6b6a421SHawking Zhang 425c6b6a421SHawking Zhang switch (adev->asic_type) { 426c6b6a421SHawking Zhang case CHIP_NAVI10: 427d1daf850SAlex Deucher case CHIP_NAVI14: 428c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 429c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 430c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 431c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 432c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 433c6b6a421SHawking Zhang is_support_sw_smu(adev)) 434c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 435c6b6a421SHawking Zhang if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 436c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 437f8a7976bSAlex Deucher #if defined(CONFIG_DRM_AMD_DC) 438b4f199c7SHarry Wentland else if (amdgpu_device_has_dc_support(adev)) 439b4f199c7SHarry Wentland amdgpu_device_ip_block_add(adev, &dm_ip_block); 440f8a7976bSAlex Deucher #endif 441c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 442c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 443c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 444c6b6a421SHawking Zhang is_support_sw_smu(adev)) 445c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 446c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 447c6b6a421SHawking Zhang if (adev->enable_mes) 448c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 449c6b6a421SHawking Zhang break; 45044e9e7c9SXiaojie Yuan case CHIP_NAVI12: 45144e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 45244e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 45344e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 4546b66ae2eSXiaojie Yuan amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 4557f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4567f47efebSXiaojie Yuan is_support_sw_smu(adev)) 4577f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 45879902029SXiaojie Yuan if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 45979902029SXiaojie Yuan amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 460078655d9SLeo Li else if (amdgpu_device_has_dc_support(adev)) 461078655d9SLeo Li amdgpu_device_ip_block_add(adev, &dm_ip_block); 46244e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 46344e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 4647f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 4657f47efebSXiaojie Yuan is_support_sw_smu(adev)) 4667f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 4671fbed280SBoyuan Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 46844e9e7c9SXiaojie Yuan break; 469c6b6a421SHawking Zhang default: 470c6b6a421SHawking Zhang return -EINVAL; 471c6b6a421SHawking Zhang } 472c6b6a421SHawking Zhang 473c6b6a421SHawking Zhang return 0; 474c6b6a421SHawking Zhang } 475c6b6a421SHawking Zhang 476c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 477c6b6a421SHawking Zhang { 478c6b6a421SHawking Zhang return adev->nbio_funcs->get_rev_id(adev); 479c6b6a421SHawking Zhang } 480c6b6a421SHawking Zhang 481c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 482c6b6a421SHawking Zhang { 483c6b6a421SHawking Zhang adev->nbio_funcs->hdp_flush(adev, ring); 484c6b6a421SHawking Zhang } 485c6b6a421SHawking Zhang 486c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev, 487c6b6a421SHawking Zhang struct amdgpu_ring *ring) 488c6b6a421SHawking Zhang { 489c6b6a421SHawking Zhang if (!ring || !ring->funcs->emit_wreg) { 490c6b6a421SHawking Zhang WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 491c6b6a421SHawking Zhang } else { 492c6b6a421SHawking Zhang amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 493c6b6a421SHawking Zhang HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 494c6b6a421SHawking Zhang } 495c6b6a421SHawking Zhang } 496c6b6a421SHawking Zhang 497c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev) 498c6b6a421SHawking Zhang { 499c6b6a421SHawking Zhang return true; 500c6b6a421SHawking Zhang } 501c6b6a421SHawking Zhang 502c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev, 503c6b6a421SHawking Zhang uint64_t *count0, 504c6b6a421SHawking Zhang uint64_t *count1) 505c6b6a421SHawking Zhang { 506c6b6a421SHawking Zhang /*TODO*/ 507c6b6a421SHawking Zhang } 508c6b6a421SHawking Zhang 509c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev) 510c6b6a421SHawking Zhang { 511c6b6a421SHawking Zhang #if 0 512c6b6a421SHawking Zhang u32 sol_reg; 513c6b6a421SHawking Zhang 514c6b6a421SHawking Zhang if (adev->flags & AMD_IS_APU) 515c6b6a421SHawking Zhang return false; 516c6b6a421SHawking Zhang 517c6b6a421SHawking Zhang /* Check sOS sign of life register to confirm sys driver and sOS 518c6b6a421SHawking Zhang * are already been loaded. 519c6b6a421SHawking Zhang */ 520c6b6a421SHawking Zhang sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 521c6b6a421SHawking Zhang if (sol_reg) 522c6b6a421SHawking Zhang return true; 523c6b6a421SHawking Zhang #endif 524c6b6a421SHawking Zhang /* TODO: re-enable it when mode1 reset is functional */ 525c6b6a421SHawking Zhang return false; 526c6b6a421SHawking Zhang } 527c6b6a421SHawking Zhang 528c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev) 529c6b6a421SHawking Zhang { 530c6b6a421SHawking Zhang adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 531c6b6a421SHawking Zhang adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 532c6b6a421SHawking Zhang adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 533c6b6a421SHawking Zhang adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 534c6b6a421SHawking Zhang adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 535c6b6a421SHawking Zhang adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 536c6b6a421SHawking Zhang adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 537c6b6a421SHawking Zhang adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 538c6b6a421SHawking Zhang adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 539c6b6a421SHawking Zhang adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 540c6b6a421SHawking Zhang adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 541c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 542c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 543c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 544c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 545c6b6a421SHawking Zhang adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 546c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 547c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 548c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 549c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 550c6b6a421SHawking Zhang adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 551c6b6a421SHawking Zhang adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 552c6b6a421SHawking Zhang 553c6b6a421SHawking Zhang adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 554c6b6a421SHawking Zhang adev->doorbell_index.sdma_doorbell_range = 20; 555c6b6a421SHawking Zhang } 556c6b6a421SHawking Zhang 557c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs = 558c6b6a421SHawking Zhang { 559c6b6a421SHawking Zhang .read_disabled_bios = &nv_read_disabled_bios, 560c6b6a421SHawking Zhang .read_bios_from_rom = &nv_read_bios_from_rom, 561c6b6a421SHawking Zhang .read_register = &nv_read_register, 562c6b6a421SHawking Zhang .reset = &nv_asic_reset, 5632ddc6c3eSAlex Deucher .reset_method = &nv_asic_reset_method, 564c6b6a421SHawking Zhang .set_vga_state = &nv_vga_set_state, 565c6b6a421SHawking Zhang .get_xclk = &nv_get_xclk, 566c6b6a421SHawking Zhang .set_uvd_clocks = &nv_set_uvd_clocks, 567c6b6a421SHawking Zhang .set_vce_clocks = &nv_set_vce_clocks, 568c6b6a421SHawking Zhang .get_config_memsize = &nv_get_config_memsize, 569c6b6a421SHawking Zhang .flush_hdp = &nv_flush_hdp, 570c6b6a421SHawking Zhang .invalidate_hdp = &nv_invalidate_hdp, 571c6b6a421SHawking Zhang .init_doorbell_index = &nv_init_doorbell_index, 572c6b6a421SHawking Zhang .need_full_reset = &nv_need_full_reset, 573c6b6a421SHawking Zhang .get_pcie_usage = &nv_get_pcie_usage, 574c6b6a421SHawking Zhang .need_reset_on_init = &nv_need_reset_on_init, 575c6b6a421SHawking Zhang }; 576c6b6a421SHawking Zhang 577c6b6a421SHawking Zhang static int nv_common_early_init(void *handle) 578c6b6a421SHawking Zhang { 579c6b6a421SHawking Zhang bool psp_enabled = false; 580c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 581c6b6a421SHawking Zhang 582c6b6a421SHawking Zhang adev->smc_rreg = NULL; 583c6b6a421SHawking Zhang adev->smc_wreg = NULL; 584c6b6a421SHawking Zhang adev->pcie_rreg = &nv_pcie_rreg; 585c6b6a421SHawking Zhang adev->pcie_wreg = &nv_pcie_wreg; 586c6b6a421SHawking Zhang 587c6b6a421SHawking Zhang /* TODO: will add them during VCN v2 implementation */ 588c6b6a421SHawking Zhang adev->uvd_ctx_rreg = NULL; 589c6b6a421SHawking Zhang adev->uvd_ctx_wreg = NULL; 590c6b6a421SHawking Zhang 591c6b6a421SHawking Zhang adev->didt_rreg = &nv_didt_rreg; 592c6b6a421SHawking Zhang adev->didt_wreg = &nv_didt_wreg; 593c6b6a421SHawking Zhang 594c6b6a421SHawking Zhang adev->asic_funcs = &nv_asic_funcs; 595c6b6a421SHawking Zhang 596c6b6a421SHawking Zhang if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && 597c6b6a421SHawking Zhang (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) 598c6b6a421SHawking Zhang psp_enabled = true; 599c6b6a421SHawking Zhang 600c6b6a421SHawking Zhang adev->rev_id = nv_get_rev_id(adev); 601c6b6a421SHawking Zhang adev->external_rev_id = 0xff; 602c6b6a421SHawking Zhang switch (adev->asic_type) { 603c6b6a421SHawking Zhang case CHIP_NAVI10: 604c6b6a421SHawking Zhang adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 605c6b6a421SHawking Zhang AMD_CG_SUPPORT_GFX_CGCG | 606c6b6a421SHawking Zhang AMD_CG_SUPPORT_IH_CG | 607c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_MGCG | 608c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_LS | 609c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_MGCG | 610c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_LS | 611c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_MGCG | 612c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_LS | 613c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_MGCG | 614c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_LS | 615c6b6a421SHawking Zhang AMD_CG_SUPPORT_VCN_MGCG | 616c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_MGCG | 617c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_LS; 618157710eaSLeo Liu adev->pg_flags = AMD_PG_SUPPORT_VCN | 619c12d410fSHuang Rui AMD_PG_SUPPORT_VCN_DPG | 620a201b6acSHuang Rui AMD_PG_SUPPORT_MMHUB | 621a201b6acSHuang Rui AMD_PG_SUPPORT_ATHUB; 622c6b6a421SHawking Zhang adev->external_rev_id = adev->rev_id + 0x1; 623c6b6a421SHawking Zhang break; 6245e71e011SXiaojie Yuan case CHIP_NAVI14: 625d0c39f8cSXiaojie Yuan adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 626d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_GFX_CGCG | 627d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_IH_CG | 628d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_MGCG | 629d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_LS | 630d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_MGCG | 631d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_LS | 632d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_MGCG | 633d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_LS | 634d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_MGCG | 635d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_LS | 636d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_VCN_MGCG | 637d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_MGCG | 638d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_LS; 6390377b088SXiaojie Yuan adev->pg_flags = AMD_PG_SUPPORT_VCN | 6400377b088SXiaojie Yuan AMD_PG_SUPPORT_VCN_DPG; 64135ef88faStiancyin adev->external_rev_id = adev->rev_id + 20; 6425e71e011SXiaojie Yuan break; 64374b5e509SXiaojie Yuan case CHIP_NAVI12: 64474b5e509SXiaojie Yuan adev->cg_flags = 0; 645400e9c5eSBoyuan Zhang adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; 64674b5e509SXiaojie Yuan adev->external_rev_id = adev->rev_id + 0xa; 64774b5e509SXiaojie Yuan break; 648c6b6a421SHawking Zhang default: 649c6b6a421SHawking Zhang /* FIXME: not supported yet */ 650c6b6a421SHawking Zhang return -EINVAL; 651c6b6a421SHawking Zhang } 652c6b6a421SHawking Zhang 653c6b6a421SHawking Zhang return 0; 654c6b6a421SHawking Zhang } 655c6b6a421SHawking Zhang 656c6b6a421SHawking Zhang static int nv_common_late_init(void *handle) 657c6b6a421SHawking Zhang { 658c6b6a421SHawking Zhang return 0; 659c6b6a421SHawking Zhang } 660c6b6a421SHawking Zhang 661c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle) 662c6b6a421SHawking Zhang { 663c6b6a421SHawking Zhang return 0; 664c6b6a421SHawking Zhang } 665c6b6a421SHawking Zhang 666c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle) 667c6b6a421SHawking Zhang { 668c6b6a421SHawking Zhang return 0; 669c6b6a421SHawking Zhang } 670c6b6a421SHawking Zhang 671c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle) 672c6b6a421SHawking Zhang { 673c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 674c6b6a421SHawking Zhang 675c6b6a421SHawking Zhang /* enable pcie gen2/3 link */ 676c6b6a421SHawking Zhang nv_pcie_gen3_enable(adev); 677c6b6a421SHawking Zhang /* enable aspm */ 678c6b6a421SHawking Zhang nv_program_aspm(adev); 679c6b6a421SHawking Zhang /* setup nbio registers */ 680c6b6a421SHawking Zhang adev->nbio_funcs->init_registers(adev); 681c6b6a421SHawking Zhang /* enable the doorbell aperture */ 682c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, true); 683c6b6a421SHawking Zhang 684c6b6a421SHawking Zhang return 0; 685c6b6a421SHawking Zhang } 686c6b6a421SHawking Zhang 687c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle) 688c6b6a421SHawking Zhang { 689c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 690c6b6a421SHawking Zhang 691c6b6a421SHawking Zhang /* disable the doorbell aperture */ 692c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, false); 693c6b6a421SHawking Zhang 694c6b6a421SHawking Zhang return 0; 695c6b6a421SHawking Zhang } 696c6b6a421SHawking Zhang 697c6b6a421SHawking Zhang static int nv_common_suspend(void *handle) 698c6b6a421SHawking Zhang { 699c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 700c6b6a421SHawking Zhang 701c6b6a421SHawking Zhang return nv_common_hw_fini(adev); 702c6b6a421SHawking Zhang } 703c6b6a421SHawking Zhang 704c6b6a421SHawking Zhang static int nv_common_resume(void *handle) 705c6b6a421SHawking Zhang { 706c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 707c6b6a421SHawking Zhang 708c6b6a421SHawking Zhang return nv_common_hw_init(adev); 709c6b6a421SHawking Zhang } 710c6b6a421SHawking Zhang 711c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle) 712c6b6a421SHawking Zhang { 713c6b6a421SHawking Zhang return true; 714c6b6a421SHawking Zhang } 715c6b6a421SHawking Zhang 716c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle) 717c6b6a421SHawking Zhang { 718c6b6a421SHawking Zhang return 0; 719c6b6a421SHawking Zhang } 720c6b6a421SHawking Zhang 721c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle) 722c6b6a421SHawking Zhang { 723c6b6a421SHawking Zhang return 0; 724c6b6a421SHawking Zhang } 725c6b6a421SHawking Zhang 726c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 727c6b6a421SHawking Zhang bool enable) 728c6b6a421SHawking Zhang { 729c6b6a421SHawking Zhang uint32_t hdp_clk_cntl, hdp_clk_cntl1; 730c6b6a421SHawking Zhang uint32_t hdp_mem_pwr_cntl; 731c6b6a421SHawking Zhang 732c6b6a421SHawking Zhang if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 733c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_DS | 734c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_SD))) 735c6b6a421SHawking Zhang return; 736c6b6a421SHawking Zhang 737c6b6a421SHawking Zhang hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 738c6b6a421SHawking Zhang hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 739c6b6a421SHawking Zhang 740c6b6a421SHawking Zhang /* Before doing clock/power mode switch, 741c6b6a421SHawking Zhang * forced on IPH & RC clock */ 742c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 743c6b6a421SHawking Zhang IPH_MEM_CLK_SOFT_OVERRIDE, 1); 744c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 745c6b6a421SHawking Zhang RC_MEM_CLK_SOFT_OVERRIDE, 1); 746c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 747c6b6a421SHawking Zhang 748c6b6a421SHawking Zhang /* HDP 5.0 doesn't support dynamic power mode switch, 749c6b6a421SHawking Zhang * disable clock and power gating before any changing */ 750c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 751c6b6a421SHawking Zhang IPH_MEM_POWER_CTRL_EN, 0); 752c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 753c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, 0); 754c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 755c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, 0); 756c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 757c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, 0); 758c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 759c6b6a421SHawking Zhang RC_MEM_POWER_CTRL_EN, 0); 760c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 761c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, 0); 762c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 763c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, 0); 764c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 765c6b6a421SHawking Zhang RC_MEM_POWER_SD_EN, 0); 766c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 767c6b6a421SHawking Zhang 768c6b6a421SHawking Zhang /* only one clock gating mode (LS/DS/SD) can be enabled */ 769c6b6a421SHawking Zhang if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 770c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 771c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 772c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, enable); 773c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 774c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 775c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, enable); 776c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 777c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 778c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 779c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, enable); 780c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 781c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 782c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 783c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 784c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 785c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 786c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, enable); 787c6b6a421SHawking Zhang /* RC should not use shut down mode, fallback to ds */ 788c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 789c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 790c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 791c6b6a421SHawking Zhang } 792c6b6a421SHawking Zhang 793c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 794c6b6a421SHawking Zhang 795c6b6a421SHawking Zhang /* restore IPH & RC clock override after clock/power mode changing */ 796c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 797c6b6a421SHawking Zhang } 798c6b6a421SHawking Zhang 799c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 800c6b6a421SHawking Zhang bool enable) 801c6b6a421SHawking Zhang { 802c6b6a421SHawking Zhang uint32_t hdp_clk_cntl; 803c6b6a421SHawking Zhang 804c6b6a421SHawking Zhang if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 805c6b6a421SHawking Zhang return; 806c6b6a421SHawking Zhang 807c6b6a421SHawking Zhang hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 808c6b6a421SHawking Zhang 809c6b6a421SHawking Zhang if (enable) { 810c6b6a421SHawking Zhang hdp_clk_cntl &= 811c6b6a421SHawking Zhang ~(uint32_t) 812c6b6a421SHawking Zhang (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 813c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 814c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 815c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 816c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 817c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 818c6b6a421SHawking Zhang } else { 819c6b6a421SHawking Zhang hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 820c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 821c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 822c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 823c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 824c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 825c6b6a421SHawking Zhang } 826c6b6a421SHawking Zhang 827c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 828c6b6a421SHawking Zhang } 829c6b6a421SHawking Zhang 830c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle, 831c6b6a421SHawking Zhang enum amd_clockgating_state state) 832c6b6a421SHawking Zhang { 833c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 834c6b6a421SHawking Zhang 835c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 836c6b6a421SHawking Zhang return 0; 837c6b6a421SHawking Zhang 838c6b6a421SHawking Zhang switch (adev->asic_type) { 839c6b6a421SHawking Zhang case CHIP_NAVI10: 8405e71e011SXiaojie Yuan case CHIP_NAVI14: 8417e17e58bSXiaojie Yuan case CHIP_NAVI12: 842c6b6a421SHawking Zhang adev->nbio_funcs->update_medium_grain_clock_gating(adev, 843c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 844c6b6a421SHawking Zhang adev->nbio_funcs->update_medium_grain_light_sleep(adev, 845c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 846c6b6a421SHawking Zhang nv_update_hdp_mem_power_gating(adev, 847c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 848c6b6a421SHawking Zhang nv_update_hdp_clock_gating(adev, 849c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 850c6b6a421SHawking Zhang break; 851c6b6a421SHawking Zhang default: 852c6b6a421SHawking Zhang break; 853c6b6a421SHawking Zhang } 854c6b6a421SHawking Zhang return 0; 855c6b6a421SHawking Zhang } 856c6b6a421SHawking Zhang 857c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle, 858c6b6a421SHawking Zhang enum amd_powergating_state state) 859c6b6a421SHawking Zhang { 860c6b6a421SHawking Zhang /* TODO */ 861c6b6a421SHawking Zhang return 0; 862c6b6a421SHawking Zhang } 863c6b6a421SHawking Zhang 864c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags) 865c6b6a421SHawking Zhang { 866c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 867c6b6a421SHawking Zhang uint32_t tmp; 868c6b6a421SHawking Zhang 869c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 870c6b6a421SHawking Zhang *flags = 0; 871c6b6a421SHawking Zhang 872c6b6a421SHawking Zhang adev->nbio_funcs->get_clockgating_state(adev, flags); 873c6b6a421SHawking Zhang 874c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_MGCG */ 875c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 876c6b6a421SHawking Zhang if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 877c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 878c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 879c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 880c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 881c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 882c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_MGCG; 883c6b6a421SHawking Zhang 884c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 885c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 886c6b6a421SHawking Zhang if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 887c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_LS; 888c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 889c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_DS; 890c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 891c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_SD; 892c6b6a421SHawking Zhang 893c6b6a421SHawking Zhang return; 894c6b6a421SHawking Zhang } 895c6b6a421SHawking Zhang 896c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = { 897c6b6a421SHawking Zhang .name = "nv_common", 898c6b6a421SHawking Zhang .early_init = nv_common_early_init, 899c6b6a421SHawking Zhang .late_init = nv_common_late_init, 900c6b6a421SHawking Zhang .sw_init = nv_common_sw_init, 901c6b6a421SHawking Zhang .sw_fini = nv_common_sw_fini, 902c6b6a421SHawking Zhang .hw_init = nv_common_hw_init, 903c6b6a421SHawking Zhang .hw_fini = nv_common_hw_fini, 904c6b6a421SHawking Zhang .suspend = nv_common_suspend, 905c6b6a421SHawking Zhang .resume = nv_common_resume, 906c6b6a421SHawking Zhang .is_idle = nv_common_is_idle, 907c6b6a421SHawking Zhang .wait_for_idle = nv_common_wait_for_idle, 908c6b6a421SHawking Zhang .soft_reset = nv_common_soft_reset, 909c6b6a421SHawking Zhang .set_clockgating_state = nv_common_set_clockgating_state, 910c6b6a421SHawking Zhang .set_powergating_state = nv_common_set_powergating_state, 911c6b6a421SHawking Zhang .get_clockgating_state = nv_common_get_clockgating_state, 912c6b6a421SHawking Zhang }; 913