1c6b6a421SHawking Zhang /* 2c6b6a421SHawking Zhang * Copyright 2019 Advanced Micro Devices, Inc. 3c6b6a421SHawking Zhang * 4c6b6a421SHawking Zhang * Permission is hereby granted, free of charge, to any person obtaining a 5c6b6a421SHawking Zhang * copy of this software and associated documentation files (the "Software"), 6c6b6a421SHawking Zhang * to deal in the Software without restriction, including without limitation 7c6b6a421SHawking Zhang * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c6b6a421SHawking Zhang * and/or sell copies of the Software, and to permit persons to whom the 9c6b6a421SHawking Zhang * Software is furnished to do so, subject to the following conditions: 10c6b6a421SHawking Zhang * 11c6b6a421SHawking Zhang * The above copyright notice and this permission notice shall be included in 12c6b6a421SHawking Zhang * all copies or substantial portions of the Software. 13c6b6a421SHawking Zhang * 14c6b6a421SHawking Zhang * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c6b6a421SHawking Zhang * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c6b6a421SHawking Zhang * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c6b6a421SHawking Zhang * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c6b6a421SHawking Zhang * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c6b6a421SHawking Zhang * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c6b6a421SHawking Zhang * OTHER DEALINGS IN THE SOFTWARE. 21c6b6a421SHawking Zhang * 22c6b6a421SHawking Zhang */ 23c6b6a421SHawking Zhang #include <linux/firmware.h> 24c6b6a421SHawking Zhang #include <linux/slab.h> 25c6b6a421SHawking Zhang #include <linux/module.h> 26c6b6a421SHawking Zhang #include <drm/drmP.h> 27c6b6a421SHawking Zhang #include "amdgpu.h" 28c6b6a421SHawking Zhang #include "amdgpu_atombios.h" 29c6b6a421SHawking Zhang #include "amdgpu_ih.h" 30c6b6a421SHawking Zhang #include "amdgpu_uvd.h" 31c6b6a421SHawking Zhang #include "amdgpu_vce.h" 32c6b6a421SHawking Zhang #include "amdgpu_ucode.h" 33c6b6a421SHawking Zhang #include "amdgpu_psp.h" 34c6b6a421SHawking Zhang #include "atom.h" 35c6b6a421SHawking Zhang #include "amd_pcie.h" 36c6b6a421SHawking Zhang 37c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h" 38c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h" 39c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h" 40c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h" 41c6b6a421SHawking Zhang 42c6b6a421SHawking Zhang #include "soc15.h" 43c6b6a421SHawking Zhang #include "soc15_common.h" 44c6b6a421SHawking Zhang #include "gmc_v10_0.h" 45c6b6a421SHawking Zhang #include "gfxhub_v2_0.h" 46c6b6a421SHawking Zhang #include "mmhub_v2_0.h" 47c6b6a421SHawking Zhang #include "nv.h" 48c6b6a421SHawking Zhang #include "navi10_ih.h" 49c6b6a421SHawking Zhang #include "gfx_v10_0.h" 50c6b6a421SHawking Zhang #include "sdma_v5_0.h" 51c6b6a421SHawking Zhang #include "vcn_v2_0.h" 52c6b6a421SHawking Zhang #include "dce_virtual.h" 53c6b6a421SHawking Zhang #include "mes_v10_1.h" 54c6b6a421SHawking Zhang 55c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs; 56c6b6a421SHawking Zhang 57c6b6a421SHawking Zhang /* 58c6b6a421SHawking Zhang * Indirect registers accessor 59c6b6a421SHawking Zhang */ 60c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 61c6b6a421SHawking Zhang { 62c6b6a421SHawking Zhang unsigned long flags, address, data; 63c6b6a421SHawking Zhang u32 r; 64c6b6a421SHawking Zhang address = adev->nbio_funcs->get_pcie_index_offset(adev); 65c6b6a421SHawking Zhang data = adev->nbio_funcs->get_pcie_data_offset(adev); 66c6b6a421SHawking Zhang 67c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 68c6b6a421SHawking Zhang WREG32(address, reg); 69c6b6a421SHawking Zhang (void)RREG32(address); 70c6b6a421SHawking Zhang r = RREG32(data); 71c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 72c6b6a421SHawking Zhang return r; 73c6b6a421SHawking Zhang } 74c6b6a421SHawking Zhang 75c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 76c6b6a421SHawking Zhang { 77c6b6a421SHawking Zhang unsigned long flags, address, data; 78c6b6a421SHawking Zhang 79c6b6a421SHawking Zhang address = adev->nbio_funcs->get_pcie_index_offset(adev); 80c6b6a421SHawking Zhang data = adev->nbio_funcs->get_pcie_data_offset(adev); 81c6b6a421SHawking Zhang 82c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 83c6b6a421SHawking Zhang WREG32(address, reg); 84c6b6a421SHawking Zhang (void)RREG32(address); 85c6b6a421SHawking Zhang WREG32(data, v); 86c6b6a421SHawking Zhang (void)RREG32(data); 87c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 88c6b6a421SHawking Zhang } 89c6b6a421SHawking Zhang 90c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 91c6b6a421SHawking Zhang { 92c6b6a421SHawking Zhang unsigned long flags, address, data; 93c6b6a421SHawking Zhang u32 r; 94c6b6a421SHawking Zhang 95c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 96c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 97c6b6a421SHawking Zhang 98c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 99c6b6a421SHawking Zhang WREG32(address, (reg)); 100c6b6a421SHawking Zhang r = RREG32(data); 101c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 102c6b6a421SHawking Zhang return r; 103c6b6a421SHawking Zhang } 104c6b6a421SHawking Zhang 105c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 106c6b6a421SHawking Zhang { 107c6b6a421SHawking Zhang unsigned long flags, address, data; 108c6b6a421SHawking Zhang 109c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 110c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 111c6b6a421SHawking Zhang 112c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 113c6b6a421SHawking Zhang WREG32(address, (reg)); 114c6b6a421SHawking Zhang WREG32(data, (v)); 115c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 116c6b6a421SHawking Zhang } 117c6b6a421SHawking Zhang 118c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev) 119c6b6a421SHawking Zhang { 120c6b6a421SHawking Zhang return adev->nbio_funcs->get_memsize(adev); 121c6b6a421SHawking Zhang } 122c6b6a421SHawking Zhang 123c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev) 124c6b6a421SHawking Zhang { 125462a70d8STao Zhou return adev->clock.spll.reference_freq; 126c6b6a421SHawking Zhang } 127c6b6a421SHawking Zhang 128c6b6a421SHawking Zhang 129c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev, 130c6b6a421SHawking Zhang u32 me, u32 pipe, u32 queue, u32 vmid) 131c6b6a421SHawking Zhang { 132c6b6a421SHawking Zhang u32 grbm_gfx_cntl = 0; 133c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 134c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 135c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 136c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 137c6b6a421SHawking Zhang 138c6b6a421SHawking Zhang WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 139c6b6a421SHawking Zhang } 140c6b6a421SHawking Zhang 141c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 142c6b6a421SHawking Zhang { 143c6b6a421SHawking Zhang /* todo */ 144c6b6a421SHawking Zhang } 145c6b6a421SHawking Zhang 146c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev) 147c6b6a421SHawking Zhang { 148c6b6a421SHawking Zhang /* todo */ 149c6b6a421SHawking Zhang return false; 150c6b6a421SHawking Zhang } 151c6b6a421SHawking Zhang 152c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 153c6b6a421SHawking Zhang u8 *bios, u32 length_bytes) 154c6b6a421SHawking Zhang { 155c6b6a421SHawking Zhang /* TODO: will implement it when SMU header is available */ 156c6b6a421SHawking Zhang return false; 157c6b6a421SHawking Zhang } 158c6b6a421SHawking Zhang 159c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 160c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 161c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 162c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 163c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 164c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 165c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 166c6b6a421SHawking Zhang #if 0 /* TODO: will set it when SDMA header is available */ 167c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 168c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 169c6b6a421SHawking Zhang #endif 170c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 171c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 172c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 173c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 174c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 175c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 176c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 177c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 178c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 179c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 180c6b6a421SHawking Zhang }; 181c6b6a421SHawking Zhang 182c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 183c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 184c6b6a421SHawking Zhang { 185c6b6a421SHawking Zhang uint32_t val; 186c6b6a421SHawking Zhang 187c6b6a421SHawking Zhang mutex_lock(&adev->grbm_idx_mutex); 188c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 189c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 190c6b6a421SHawking Zhang 191c6b6a421SHawking Zhang val = RREG32(reg_offset); 192c6b6a421SHawking Zhang 193c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 194c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 195c6b6a421SHawking Zhang mutex_unlock(&adev->grbm_idx_mutex); 196c6b6a421SHawking Zhang return val; 197c6b6a421SHawking Zhang } 198c6b6a421SHawking Zhang 199c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev, 200c6b6a421SHawking Zhang bool indexed, u32 se_num, 201c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 202c6b6a421SHawking Zhang { 203c6b6a421SHawking Zhang if (indexed) { 204c6b6a421SHawking Zhang return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 205c6b6a421SHawking Zhang } else { 206c6b6a421SHawking Zhang if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 207c6b6a421SHawking Zhang return adev->gfx.config.gb_addr_config; 208c6b6a421SHawking Zhang return RREG32(reg_offset); 209c6b6a421SHawking Zhang } 210c6b6a421SHawking Zhang } 211c6b6a421SHawking Zhang 212c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 213c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset, u32 *value) 214c6b6a421SHawking Zhang { 215c6b6a421SHawking Zhang uint32_t i; 216c6b6a421SHawking Zhang struct soc15_allowed_register_entry *en; 217c6b6a421SHawking Zhang 218c6b6a421SHawking Zhang *value = 0; 219c6b6a421SHawking Zhang for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 220c6b6a421SHawking Zhang en = &nv_allowed_read_registers[i]; 221c6b6a421SHawking Zhang if (reg_offset != 222c6b6a421SHawking Zhang (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 223c6b6a421SHawking Zhang continue; 224c6b6a421SHawking Zhang 225c6b6a421SHawking Zhang *value = nv_get_register_value(adev, 226c6b6a421SHawking Zhang nv_allowed_read_registers[i].grbm_indexed, 227c6b6a421SHawking Zhang se_num, sh_num, reg_offset); 228c6b6a421SHawking Zhang return 0; 229c6b6a421SHawking Zhang } 230c6b6a421SHawking Zhang return -EINVAL; 231c6b6a421SHawking Zhang } 232c6b6a421SHawking Zhang 233c6b6a421SHawking Zhang #if 0 234c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 235c6b6a421SHawking Zhang { 236c6b6a421SHawking Zhang u32 i; 237c6b6a421SHawking Zhang 238c6b6a421SHawking Zhang dev_info(adev->dev, "GPU pci config reset\n"); 239c6b6a421SHawking Zhang 240c6b6a421SHawking Zhang /* disable BM */ 241c6b6a421SHawking Zhang pci_clear_master(adev->pdev); 242c6b6a421SHawking Zhang /* reset */ 243c6b6a421SHawking Zhang amdgpu_pci_config_reset(adev); 244c6b6a421SHawking Zhang 245c6b6a421SHawking Zhang udelay(100); 246c6b6a421SHawking Zhang 247c6b6a421SHawking Zhang /* wait for asic to come out of reset */ 248c6b6a421SHawking Zhang for (i = 0; i < adev->usec_timeout; i++) { 249c6b6a421SHawking Zhang u32 memsize = nbio_v2_3_get_memsize(adev); 250c6b6a421SHawking Zhang if (memsize != 0xffffffff) 251c6b6a421SHawking Zhang break; 252c6b6a421SHawking Zhang udelay(1); 253c6b6a421SHawking Zhang } 254c6b6a421SHawking Zhang 255c6b6a421SHawking Zhang } 256c6b6a421SHawking Zhang #endif 257c6b6a421SHawking Zhang 258c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev) 259c6b6a421SHawking Zhang { 260c6b6a421SHawking Zhang 261c6b6a421SHawking Zhang /* FIXME: it doesn't work since vega10 */ 262c6b6a421SHawking Zhang #if 0 263c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 264c6b6a421SHawking Zhang 265c6b6a421SHawking Zhang nv_gpu_pci_config_reset(adev); 266c6b6a421SHawking Zhang 267c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 268c6b6a421SHawking Zhang #endif 269c6b6a421SHawking Zhang 270c6b6a421SHawking Zhang return 0; 271c6b6a421SHawking Zhang } 272c6b6a421SHawking Zhang 273c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 274c6b6a421SHawking Zhang { 275c6b6a421SHawking Zhang /* todo */ 276c6b6a421SHawking Zhang return 0; 277c6b6a421SHawking Zhang } 278c6b6a421SHawking Zhang 279c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 280c6b6a421SHawking Zhang { 281c6b6a421SHawking Zhang /* todo */ 282c6b6a421SHawking Zhang return 0; 283c6b6a421SHawking Zhang } 284c6b6a421SHawking Zhang 285c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 286c6b6a421SHawking Zhang { 287c6b6a421SHawking Zhang if (pci_is_root_bus(adev->pdev->bus)) 288c6b6a421SHawking Zhang return; 289c6b6a421SHawking Zhang 290c6b6a421SHawking Zhang if (amdgpu_pcie_gen2 == 0) 291c6b6a421SHawking Zhang return; 292c6b6a421SHawking Zhang 293c6b6a421SHawking Zhang if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 294c6b6a421SHawking Zhang CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 295c6b6a421SHawking Zhang return; 296c6b6a421SHawking Zhang 297c6b6a421SHawking Zhang /* todo */ 298c6b6a421SHawking Zhang } 299c6b6a421SHawking Zhang 300c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev) 301c6b6a421SHawking Zhang { 302c6b6a421SHawking Zhang 303c6b6a421SHawking Zhang if (amdgpu_aspm == 0) 304c6b6a421SHawking Zhang return; 305c6b6a421SHawking Zhang 306c6b6a421SHawking Zhang /* todo */ 307c6b6a421SHawking Zhang } 308c6b6a421SHawking Zhang 309c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 310c6b6a421SHawking Zhang bool enable) 311c6b6a421SHawking Zhang { 312c6b6a421SHawking Zhang adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 313c6b6a421SHawking Zhang adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 314c6b6a421SHawking Zhang } 315c6b6a421SHawking Zhang 316c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block = 317c6b6a421SHawking Zhang { 318c6b6a421SHawking Zhang .type = AMD_IP_BLOCK_TYPE_COMMON, 319c6b6a421SHawking Zhang .major = 1, 320c6b6a421SHawking Zhang .minor = 0, 321c6b6a421SHawking Zhang .rev = 0, 322c6b6a421SHawking Zhang .funcs = &nv_common_ip_funcs, 323c6b6a421SHawking Zhang }; 324c6b6a421SHawking Zhang 325c6b6a421SHawking Zhang int nv_set_ip_blocks(struct amdgpu_device *adev) 326c6b6a421SHawking Zhang { 327c6b6a421SHawking Zhang /* Set IP register base before any HW register access */ 328c6b6a421SHawking Zhang switch (adev->asic_type) { 329c6b6a421SHawking Zhang case CHIP_NAVI10: 330c6b6a421SHawking Zhang navi10_reg_base_init(adev); 331c6b6a421SHawking Zhang break; 332c6b6a421SHawking Zhang default: 333c6b6a421SHawking Zhang return -EINVAL; 334c6b6a421SHawking Zhang } 335c6b6a421SHawking Zhang 336c6b6a421SHawking Zhang adev->nbio_funcs = &nbio_v2_3_funcs; 337c6b6a421SHawking Zhang 338c6b6a421SHawking Zhang adev->nbio_funcs->detect_hw_virt(adev); 339c6b6a421SHawking Zhang 340c6b6a421SHawking Zhang switch (adev->asic_type) { 341c6b6a421SHawking Zhang case CHIP_NAVI10: 342c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 343c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 344c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 345c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 346c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 347c6b6a421SHawking Zhang is_support_sw_smu(adev)) 348c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 349c6b6a421SHawking Zhang if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 350c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 351b4f199c7SHarry Wentland else if (amdgpu_device_has_dc_support(adev)) 352b4f199c7SHarry Wentland amdgpu_device_ip_block_add(adev, &dm_ip_block); 353c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 354c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 355c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 356c6b6a421SHawking Zhang is_support_sw_smu(adev)) 357c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 358c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 359c6b6a421SHawking Zhang if (adev->enable_mes) 360c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 361c6b6a421SHawking Zhang break; 362c6b6a421SHawking Zhang default: 363c6b6a421SHawking Zhang return -EINVAL; 364c6b6a421SHawking Zhang } 365c6b6a421SHawking Zhang 366c6b6a421SHawking Zhang return 0; 367c6b6a421SHawking Zhang } 368c6b6a421SHawking Zhang 369c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 370c6b6a421SHawking Zhang { 371c6b6a421SHawking Zhang return adev->nbio_funcs->get_rev_id(adev); 372c6b6a421SHawking Zhang } 373c6b6a421SHawking Zhang 374c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 375c6b6a421SHawking Zhang { 376c6b6a421SHawking Zhang adev->nbio_funcs->hdp_flush(adev, ring); 377c6b6a421SHawking Zhang } 378c6b6a421SHawking Zhang 379c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev, 380c6b6a421SHawking Zhang struct amdgpu_ring *ring) 381c6b6a421SHawking Zhang { 382c6b6a421SHawking Zhang if (!ring || !ring->funcs->emit_wreg) { 383c6b6a421SHawking Zhang WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 384c6b6a421SHawking Zhang } else { 385c6b6a421SHawking Zhang amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 386c6b6a421SHawking Zhang HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 387c6b6a421SHawking Zhang } 388c6b6a421SHawking Zhang } 389c6b6a421SHawking Zhang 390c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev) 391c6b6a421SHawking Zhang { 392c6b6a421SHawking Zhang return true; 393c6b6a421SHawking Zhang } 394c6b6a421SHawking Zhang 395c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev, 396c6b6a421SHawking Zhang uint64_t *count0, 397c6b6a421SHawking Zhang uint64_t *count1) 398c6b6a421SHawking Zhang { 399c6b6a421SHawking Zhang /*TODO*/ 400c6b6a421SHawking Zhang } 401c6b6a421SHawking Zhang 402c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev) 403c6b6a421SHawking Zhang { 404c6b6a421SHawking Zhang #if 0 405c6b6a421SHawking Zhang u32 sol_reg; 406c6b6a421SHawking Zhang 407c6b6a421SHawking Zhang if (adev->flags & AMD_IS_APU) 408c6b6a421SHawking Zhang return false; 409c6b6a421SHawking Zhang 410c6b6a421SHawking Zhang /* Check sOS sign of life register to confirm sys driver and sOS 411c6b6a421SHawking Zhang * are already been loaded. 412c6b6a421SHawking Zhang */ 413c6b6a421SHawking Zhang sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 414c6b6a421SHawking Zhang if (sol_reg) 415c6b6a421SHawking Zhang return true; 416c6b6a421SHawking Zhang #endif 417c6b6a421SHawking Zhang /* TODO: re-enable it when mode1 reset is functional */ 418c6b6a421SHawking Zhang return false; 419c6b6a421SHawking Zhang } 420c6b6a421SHawking Zhang 421c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev) 422c6b6a421SHawking Zhang { 423c6b6a421SHawking Zhang adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 424c6b6a421SHawking Zhang adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 425c6b6a421SHawking Zhang adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 426c6b6a421SHawking Zhang adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 427c6b6a421SHawking Zhang adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 428c6b6a421SHawking Zhang adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 429c6b6a421SHawking Zhang adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 430c6b6a421SHawking Zhang adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 431c6b6a421SHawking Zhang adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 432c6b6a421SHawking Zhang adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 433c6b6a421SHawking Zhang adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 434c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 435c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 436c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 437c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 438c6b6a421SHawking Zhang adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 439c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 440c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 441c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 442c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 443c6b6a421SHawking Zhang adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 444c6b6a421SHawking Zhang adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 445c6b6a421SHawking Zhang 446c6b6a421SHawking Zhang adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 447c6b6a421SHawking Zhang adev->doorbell_index.sdma_doorbell_range = 20; 448c6b6a421SHawking Zhang } 449c6b6a421SHawking Zhang 450c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs = 451c6b6a421SHawking Zhang { 452c6b6a421SHawking Zhang .read_disabled_bios = &nv_read_disabled_bios, 453c6b6a421SHawking Zhang .read_bios_from_rom = &nv_read_bios_from_rom, 454c6b6a421SHawking Zhang .read_register = &nv_read_register, 455c6b6a421SHawking Zhang .reset = &nv_asic_reset, 456c6b6a421SHawking Zhang .set_vga_state = &nv_vga_set_state, 457c6b6a421SHawking Zhang .get_xclk = &nv_get_xclk, 458c6b6a421SHawking Zhang .set_uvd_clocks = &nv_set_uvd_clocks, 459c6b6a421SHawking Zhang .set_vce_clocks = &nv_set_vce_clocks, 460c6b6a421SHawking Zhang .get_config_memsize = &nv_get_config_memsize, 461c6b6a421SHawking Zhang .flush_hdp = &nv_flush_hdp, 462c6b6a421SHawking Zhang .invalidate_hdp = &nv_invalidate_hdp, 463c6b6a421SHawking Zhang .init_doorbell_index = &nv_init_doorbell_index, 464c6b6a421SHawking Zhang .need_full_reset = &nv_need_full_reset, 465c6b6a421SHawking Zhang .get_pcie_usage = &nv_get_pcie_usage, 466c6b6a421SHawking Zhang .need_reset_on_init = &nv_need_reset_on_init, 467c6b6a421SHawking Zhang }; 468c6b6a421SHawking Zhang 469c6b6a421SHawking Zhang static int nv_common_early_init(void *handle) 470c6b6a421SHawking Zhang { 471c6b6a421SHawking Zhang bool psp_enabled = false; 472c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 473c6b6a421SHawking Zhang 474c6b6a421SHawking Zhang adev->smc_rreg = NULL; 475c6b6a421SHawking Zhang adev->smc_wreg = NULL; 476c6b6a421SHawking Zhang adev->pcie_rreg = &nv_pcie_rreg; 477c6b6a421SHawking Zhang adev->pcie_wreg = &nv_pcie_wreg; 478c6b6a421SHawking Zhang 479c6b6a421SHawking Zhang /* TODO: will add them during VCN v2 implementation */ 480c6b6a421SHawking Zhang adev->uvd_ctx_rreg = NULL; 481c6b6a421SHawking Zhang adev->uvd_ctx_wreg = NULL; 482c6b6a421SHawking Zhang 483c6b6a421SHawking Zhang adev->didt_rreg = &nv_didt_rreg; 484c6b6a421SHawking Zhang adev->didt_wreg = &nv_didt_wreg; 485c6b6a421SHawking Zhang 486c6b6a421SHawking Zhang adev->asic_funcs = &nv_asic_funcs; 487c6b6a421SHawking Zhang 488c6b6a421SHawking Zhang if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && 489c6b6a421SHawking Zhang (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) 490c6b6a421SHawking Zhang psp_enabled = true; 491c6b6a421SHawking Zhang 492c6b6a421SHawking Zhang adev->rev_id = nv_get_rev_id(adev); 493c6b6a421SHawking Zhang adev->external_rev_id = 0xff; 494c6b6a421SHawking Zhang switch (adev->asic_type) { 495c6b6a421SHawking Zhang case CHIP_NAVI10: 496c6b6a421SHawking Zhang adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 497c6b6a421SHawking Zhang AMD_CG_SUPPORT_GFX_CGCG | 498c6b6a421SHawking Zhang AMD_CG_SUPPORT_IH_CG | 499c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_MGCG | 500c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_LS | 501c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_MGCG | 502c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_LS | 503c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_MGCG | 504c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_LS | 505c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_MGCG | 506c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_LS | 507c6b6a421SHawking Zhang AMD_CG_SUPPORT_VCN_MGCG | 508c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_MGCG | 509c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_LS; 510157710eaSLeo Liu adev->pg_flags = AMD_PG_SUPPORT_VCN | 511157710eaSLeo Liu AMD_PG_SUPPORT_VCN_DPG; 512c6b6a421SHawking Zhang adev->external_rev_id = adev->rev_id + 0x1; 513c6b6a421SHawking Zhang break; 514c6b6a421SHawking Zhang default: 515c6b6a421SHawking Zhang /* FIXME: not supported yet */ 516c6b6a421SHawking Zhang return -EINVAL; 517c6b6a421SHawking Zhang } 518c6b6a421SHawking Zhang 519c6b6a421SHawking Zhang return 0; 520c6b6a421SHawking Zhang } 521c6b6a421SHawking Zhang 522c6b6a421SHawking Zhang static int nv_common_late_init(void *handle) 523c6b6a421SHawking Zhang { 524c6b6a421SHawking Zhang return 0; 525c6b6a421SHawking Zhang } 526c6b6a421SHawking Zhang 527c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle) 528c6b6a421SHawking Zhang { 529c6b6a421SHawking Zhang return 0; 530c6b6a421SHawking Zhang } 531c6b6a421SHawking Zhang 532c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle) 533c6b6a421SHawking Zhang { 534c6b6a421SHawking Zhang return 0; 535c6b6a421SHawking Zhang } 536c6b6a421SHawking Zhang 537c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle) 538c6b6a421SHawking Zhang { 539c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 540c6b6a421SHawking Zhang 541c6b6a421SHawking Zhang /* enable pcie gen2/3 link */ 542c6b6a421SHawking Zhang nv_pcie_gen3_enable(adev); 543c6b6a421SHawking Zhang /* enable aspm */ 544c6b6a421SHawking Zhang nv_program_aspm(adev); 545c6b6a421SHawking Zhang /* setup nbio registers */ 546c6b6a421SHawking Zhang adev->nbio_funcs->init_registers(adev); 547c6b6a421SHawking Zhang /* enable the doorbell aperture */ 548c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, true); 549c6b6a421SHawking Zhang 550c6b6a421SHawking Zhang return 0; 551c6b6a421SHawking Zhang } 552c6b6a421SHawking Zhang 553c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle) 554c6b6a421SHawking Zhang { 555c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 556c6b6a421SHawking Zhang 557c6b6a421SHawking Zhang /* disable the doorbell aperture */ 558c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, false); 559c6b6a421SHawking Zhang 560c6b6a421SHawking Zhang return 0; 561c6b6a421SHawking Zhang } 562c6b6a421SHawking Zhang 563c6b6a421SHawking Zhang static int nv_common_suspend(void *handle) 564c6b6a421SHawking Zhang { 565c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 566c6b6a421SHawking Zhang 567c6b6a421SHawking Zhang return nv_common_hw_fini(adev); 568c6b6a421SHawking Zhang } 569c6b6a421SHawking Zhang 570c6b6a421SHawking Zhang static int nv_common_resume(void *handle) 571c6b6a421SHawking Zhang { 572c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 573c6b6a421SHawking Zhang 574c6b6a421SHawking Zhang return nv_common_hw_init(adev); 575c6b6a421SHawking Zhang } 576c6b6a421SHawking Zhang 577c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle) 578c6b6a421SHawking Zhang { 579c6b6a421SHawking Zhang return true; 580c6b6a421SHawking Zhang } 581c6b6a421SHawking Zhang 582c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle) 583c6b6a421SHawking Zhang { 584c6b6a421SHawking Zhang return 0; 585c6b6a421SHawking Zhang } 586c6b6a421SHawking Zhang 587c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle) 588c6b6a421SHawking Zhang { 589c6b6a421SHawking Zhang return 0; 590c6b6a421SHawking Zhang } 591c6b6a421SHawking Zhang 592c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 593c6b6a421SHawking Zhang bool enable) 594c6b6a421SHawking Zhang { 595c6b6a421SHawking Zhang uint32_t hdp_clk_cntl, hdp_clk_cntl1; 596c6b6a421SHawking Zhang uint32_t hdp_mem_pwr_cntl; 597c6b6a421SHawking Zhang 598c6b6a421SHawking Zhang if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 599c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_DS | 600c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_SD))) 601c6b6a421SHawking Zhang return; 602c6b6a421SHawking Zhang 603c6b6a421SHawking Zhang hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 604c6b6a421SHawking Zhang hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 605c6b6a421SHawking Zhang 606c6b6a421SHawking Zhang /* Before doing clock/power mode switch, 607c6b6a421SHawking Zhang * forced on IPH & RC clock */ 608c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 609c6b6a421SHawking Zhang IPH_MEM_CLK_SOFT_OVERRIDE, 1); 610c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 611c6b6a421SHawking Zhang RC_MEM_CLK_SOFT_OVERRIDE, 1); 612c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 613c6b6a421SHawking Zhang 614c6b6a421SHawking Zhang /* HDP 5.0 doesn't support dynamic power mode switch, 615c6b6a421SHawking Zhang * disable clock and power gating before any changing */ 616c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 617c6b6a421SHawking Zhang IPH_MEM_POWER_CTRL_EN, 0); 618c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 619c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, 0); 620c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 621c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, 0); 622c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 623c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, 0); 624c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 625c6b6a421SHawking Zhang RC_MEM_POWER_CTRL_EN, 0); 626c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 627c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, 0); 628c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 629c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, 0); 630c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 631c6b6a421SHawking Zhang RC_MEM_POWER_SD_EN, 0); 632c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 633c6b6a421SHawking Zhang 634c6b6a421SHawking Zhang /* only one clock gating mode (LS/DS/SD) can be enabled */ 635c6b6a421SHawking Zhang if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 636c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 637c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 638c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, enable); 639c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 640c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 641c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, enable); 642c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 643c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 644c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 645c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, enable); 646c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 647c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 648c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 649c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 650c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 651c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 652c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, enable); 653c6b6a421SHawking Zhang /* RC should not use shut down mode, fallback to ds */ 654c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 655c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 656c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 657c6b6a421SHawking Zhang } 658c6b6a421SHawking Zhang 659c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 660c6b6a421SHawking Zhang 661c6b6a421SHawking Zhang /* restore IPH & RC clock override after clock/power mode changing */ 662c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 663c6b6a421SHawking Zhang } 664c6b6a421SHawking Zhang 665c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 666c6b6a421SHawking Zhang bool enable) 667c6b6a421SHawking Zhang { 668c6b6a421SHawking Zhang uint32_t hdp_clk_cntl; 669c6b6a421SHawking Zhang 670c6b6a421SHawking Zhang if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 671c6b6a421SHawking Zhang return; 672c6b6a421SHawking Zhang 673c6b6a421SHawking Zhang hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 674c6b6a421SHawking Zhang 675c6b6a421SHawking Zhang if (enable) { 676c6b6a421SHawking Zhang hdp_clk_cntl &= 677c6b6a421SHawking Zhang ~(uint32_t) 678c6b6a421SHawking Zhang (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 679c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 680c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 681c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 682c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 683c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 684c6b6a421SHawking Zhang } else { 685c6b6a421SHawking Zhang hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 686c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 687c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 688c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 689c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 690c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 691c6b6a421SHawking Zhang } 692c6b6a421SHawking Zhang 693c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 694c6b6a421SHawking Zhang } 695c6b6a421SHawking Zhang 696c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle, 697c6b6a421SHawking Zhang enum amd_clockgating_state state) 698c6b6a421SHawking Zhang { 699c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 700c6b6a421SHawking Zhang 701c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 702c6b6a421SHawking Zhang return 0; 703c6b6a421SHawking Zhang 704c6b6a421SHawking Zhang switch (adev->asic_type) { 705c6b6a421SHawking Zhang case CHIP_NAVI10: 706c6b6a421SHawking Zhang adev->nbio_funcs->update_medium_grain_clock_gating(adev, 707c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 708c6b6a421SHawking Zhang adev->nbio_funcs->update_medium_grain_light_sleep(adev, 709c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 710c6b6a421SHawking Zhang nv_update_hdp_mem_power_gating(adev, 711c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 712c6b6a421SHawking Zhang nv_update_hdp_clock_gating(adev, 713c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 714c6b6a421SHawking Zhang break; 715c6b6a421SHawking Zhang default: 716c6b6a421SHawking Zhang break; 717c6b6a421SHawking Zhang } 718c6b6a421SHawking Zhang return 0; 719c6b6a421SHawking Zhang } 720c6b6a421SHawking Zhang 721c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle, 722c6b6a421SHawking Zhang enum amd_powergating_state state) 723c6b6a421SHawking Zhang { 724c6b6a421SHawking Zhang /* TODO */ 725c6b6a421SHawking Zhang return 0; 726c6b6a421SHawking Zhang } 727c6b6a421SHawking Zhang 728c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags) 729c6b6a421SHawking Zhang { 730c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 731c6b6a421SHawking Zhang uint32_t tmp; 732c6b6a421SHawking Zhang 733c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 734c6b6a421SHawking Zhang *flags = 0; 735c6b6a421SHawking Zhang 736c6b6a421SHawking Zhang adev->nbio_funcs->get_clockgating_state(adev, flags); 737c6b6a421SHawking Zhang 738c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_MGCG */ 739c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 740c6b6a421SHawking Zhang if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 741c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 742c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 743c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 744c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 745c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 746c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_MGCG; 747c6b6a421SHawking Zhang 748c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 749c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 750c6b6a421SHawking Zhang if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 751c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_LS; 752c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 753c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_DS; 754c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 755c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_SD; 756c6b6a421SHawking Zhang 757c6b6a421SHawking Zhang return; 758c6b6a421SHawking Zhang } 759c6b6a421SHawking Zhang 760c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = { 761c6b6a421SHawking Zhang .name = "nv_common", 762c6b6a421SHawking Zhang .early_init = nv_common_early_init, 763c6b6a421SHawking Zhang .late_init = nv_common_late_init, 764c6b6a421SHawking Zhang .sw_init = nv_common_sw_init, 765c6b6a421SHawking Zhang .sw_fini = nv_common_sw_fini, 766c6b6a421SHawking Zhang .hw_init = nv_common_hw_init, 767c6b6a421SHawking Zhang .hw_fini = nv_common_hw_fini, 768c6b6a421SHawking Zhang .suspend = nv_common_suspend, 769c6b6a421SHawking Zhang .resume = nv_common_resume, 770c6b6a421SHawking Zhang .is_idle = nv_common_is_idle, 771c6b6a421SHawking Zhang .wait_for_idle = nv_common_wait_for_idle, 772c6b6a421SHawking Zhang .soft_reset = nv_common_soft_reset, 773c6b6a421SHawking Zhang .set_clockgating_state = nv_common_set_clockgating_state, 774c6b6a421SHawking Zhang .set_powergating_state = nv_common_set_powergating_state, 775c6b6a421SHawking Zhang .get_clockgating_state = nv_common_get_clockgating_state, 776c6b6a421SHawking Zhang }; 777