xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 79bebabb)
1c6b6a421SHawking Zhang /*
2c6b6a421SHawking Zhang  * Copyright 2019 Advanced Micro Devices, Inc.
3c6b6a421SHawking Zhang  *
4c6b6a421SHawking Zhang  * Permission is hereby granted, free of charge, to any person obtaining a
5c6b6a421SHawking Zhang  * copy of this software and associated documentation files (the "Software"),
6c6b6a421SHawking Zhang  * to deal in the Software without restriction, including without limitation
7c6b6a421SHawking Zhang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c6b6a421SHawking Zhang  * and/or sell copies of the Software, and to permit persons to whom the
9c6b6a421SHawking Zhang  * Software is furnished to do so, subject to the following conditions:
10c6b6a421SHawking Zhang  *
11c6b6a421SHawking Zhang  * The above copyright notice and this permission notice shall be included in
12c6b6a421SHawking Zhang  * all copies or substantial portions of the Software.
13c6b6a421SHawking Zhang  *
14c6b6a421SHawking Zhang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15c6b6a421SHawking Zhang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16c6b6a421SHawking Zhang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17c6b6a421SHawking Zhang  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18c6b6a421SHawking Zhang  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19c6b6a421SHawking Zhang  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20c6b6a421SHawking Zhang  * OTHER DEALINGS IN THE SOFTWARE.
21c6b6a421SHawking Zhang  *
22c6b6a421SHawking Zhang  */
23c6b6a421SHawking Zhang #include <linux/firmware.h>
24c6b6a421SHawking Zhang #include <linux/slab.h>
25c6b6a421SHawking Zhang #include <linux/module.h>
26e9eea902SAlex Deucher #include <linux/pci.h>
27e9eea902SAlex Deucher 
28c6b6a421SHawking Zhang #include "amdgpu.h"
29c6b6a421SHawking Zhang #include "amdgpu_atombios.h"
30c6b6a421SHawking Zhang #include "amdgpu_ih.h"
31c6b6a421SHawking Zhang #include "amdgpu_uvd.h"
32c6b6a421SHawking Zhang #include "amdgpu_vce.h"
33c6b6a421SHawking Zhang #include "amdgpu_ucode.h"
34c6b6a421SHawking Zhang #include "amdgpu_psp.h"
35767acabdSKevin Wang #include "amdgpu_smu.h"
36c6b6a421SHawking Zhang #include "atom.h"
37c6b6a421SHawking Zhang #include "amd_pcie.h"
38c6b6a421SHawking Zhang 
39c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h"
40c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h"
41c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h"
42c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h"
4329bc37b4SAlex Deucher #include "smuio/smuio_11_0_0_offset.h"
44c6b6a421SHawking Zhang 
45c6b6a421SHawking Zhang #include "soc15.h"
46c6b6a421SHawking Zhang #include "soc15_common.h"
47c6b6a421SHawking Zhang #include "gmc_v10_0.h"
48c6b6a421SHawking Zhang #include "gfxhub_v2_0.h"
49c6b6a421SHawking Zhang #include "mmhub_v2_0.h"
50bebc0762SHawking Zhang #include "nbio_v2_3.h"
51c6b6a421SHawking Zhang #include "nv.h"
52c6b6a421SHawking Zhang #include "navi10_ih.h"
53c6b6a421SHawking Zhang #include "gfx_v10_0.h"
54c6b6a421SHawking Zhang #include "sdma_v5_0.h"
55c6b6a421SHawking Zhang #include "vcn_v2_0.h"
565be45a26SLeo Liu #include "jpeg_v2_0.h"
57c6b6a421SHawking Zhang #include "dce_virtual.h"
58c6b6a421SHawking Zhang #include "mes_v10_1.h"
59b05b6903SJiange Zhao #include "mxgpu_nv.h"
60c6b6a421SHawking Zhang 
61c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs;
62c6b6a421SHawking Zhang 
63c6b6a421SHawking Zhang /*
64c6b6a421SHawking Zhang  * Indirect registers accessor
65c6b6a421SHawking Zhang  */
66c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
67c6b6a421SHawking Zhang {
68c6b6a421SHawking Zhang 	unsigned long flags, address, data;
69c6b6a421SHawking Zhang 	u32 r;
70bebc0762SHawking Zhang 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
71bebc0762SHawking Zhang 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
72c6b6a421SHawking Zhang 
73c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
74c6b6a421SHawking Zhang 	WREG32(address, reg);
75c6b6a421SHawking Zhang 	(void)RREG32(address);
76c6b6a421SHawking Zhang 	r = RREG32(data);
77c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
78c6b6a421SHawking Zhang 	return r;
79c6b6a421SHawking Zhang }
80c6b6a421SHawking Zhang 
81c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
82c6b6a421SHawking Zhang {
83c6b6a421SHawking Zhang 	unsigned long flags, address, data;
84c6b6a421SHawking Zhang 
85bebc0762SHawking Zhang 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
86bebc0762SHawking Zhang 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
87c6b6a421SHawking Zhang 
88c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
89c6b6a421SHawking Zhang 	WREG32(address, reg);
90c6b6a421SHawking Zhang 	(void)RREG32(address);
91c6b6a421SHawking Zhang 	WREG32(data, v);
92c6b6a421SHawking Zhang 	(void)RREG32(data);
93c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
94c6b6a421SHawking Zhang }
95c6b6a421SHawking Zhang 
96c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
97c6b6a421SHawking Zhang {
98c6b6a421SHawking Zhang 	unsigned long flags, address, data;
99c6b6a421SHawking Zhang 	u32 r;
100c6b6a421SHawking Zhang 
101c6b6a421SHawking Zhang 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
102c6b6a421SHawking Zhang 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
103c6b6a421SHawking Zhang 
104c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
105c6b6a421SHawking Zhang 	WREG32(address, (reg));
106c6b6a421SHawking Zhang 	r = RREG32(data);
107c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
108c6b6a421SHawking Zhang 	return r;
109c6b6a421SHawking Zhang }
110c6b6a421SHawking Zhang 
111c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
112c6b6a421SHawking Zhang {
113c6b6a421SHawking Zhang 	unsigned long flags, address, data;
114c6b6a421SHawking Zhang 
115c6b6a421SHawking Zhang 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
116c6b6a421SHawking Zhang 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
117c6b6a421SHawking Zhang 
118c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
119c6b6a421SHawking Zhang 	WREG32(address, (reg));
120c6b6a421SHawking Zhang 	WREG32(data, (v));
121c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
122c6b6a421SHawking Zhang }
123c6b6a421SHawking Zhang 
124c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev)
125c6b6a421SHawking Zhang {
126bebc0762SHawking Zhang 	return adev->nbio.funcs->get_memsize(adev);
127c6b6a421SHawking Zhang }
128c6b6a421SHawking Zhang 
129c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev)
130c6b6a421SHawking Zhang {
131462a70d8STao Zhou 	return adev->clock.spll.reference_freq;
132c6b6a421SHawking Zhang }
133c6b6a421SHawking Zhang 
134c6b6a421SHawking Zhang 
135c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev,
136c6b6a421SHawking Zhang 		     u32 me, u32 pipe, u32 queue, u32 vmid)
137c6b6a421SHawking Zhang {
138c6b6a421SHawking Zhang 	u32 grbm_gfx_cntl = 0;
139c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
140c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
141c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
142c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
143c6b6a421SHawking Zhang 
144c6b6a421SHawking Zhang 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
145c6b6a421SHawking Zhang }
146c6b6a421SHawking Zhang 
147c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
148c6b6a421SHawking Zhang {
149c6b6a421SHawking Zhang 	/* todo */
150c6b6a421SHawking Zhang }
151c6b6a421SHawking Zhang 
152c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev)
153c6b6a421SHawking Zhang {
154c6b6a421SHawking Zhang 	/* todo */
155c6b6a421SHawking Zhang 	return false;
156c6b6a421SHawking Zhang }
157c6b6a421SHawking Zhang 
158c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
159c6b6a421SHawking Zhang 				  u8 *bios, u32 length_bytes)
160c6b6a421SHawking Zhang {
16129bc37b4SAlex Deucher 	u32 *dw_ptr;
16229bc37b4SAlex Deucher 	u32 i, length_dw;
16329bc37b4SAlex Deucher 
16429bc37b4SAlex Deucher 	if (bios == NULL)
165c6b6a421SHawking Zhang 		return false;
16629bc37b4SAlex Deucher 	if (length_bytes == 0)
16729bc37b4SAlex Deucher 		return false;
16829bc37b4SAlex Deucher 	/* APU vbios image is part of sbios image */
16929bc37b4SAlex Deucher 	if (adev->flags & AMD_IS_APU)
17029bc37b4SAlex Deucher 		return false;
17129bc37b4SAlex Deucher 
17229bc37b4SAlex Deucher 	dw_ptr = (u32 *)bios;
17329bc37b4SAlex Deucher 	length_dw = ALIGN(length_bytes, 4) / 4;
17429bc37b4SAlex Deucher 
17529bc37b4SAlex Deucher 	/* set rom index to 0 */
17629bc37b4SAlex Deucher 	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
17729bc37b4SAlex Deucher 	/* read out the rom data */
17829bc37b4SAlex Deucher 	for (i = 0; i < length_dw; i++)
17929bc37b4SAlex Deucher 		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
18029bc37b4SAlex Deucher 
18129bc37b4SAlex Deucher 	return true;
182c6b6a421SHawking Zhang }
183c6b6a421SHawking Zhang 
184c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
185c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
186c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
187c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
188c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
189c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
190c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
191c6b6a421SHawking Zhang #if 0	/* TODO: will set it when SDMA header is available */
192c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
193c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
194c6b6a421SHawking Zhang #endif
195c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
196c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
197c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
198c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
199c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
200c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
201c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
202664fe85aSMarek Olšák 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
203c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
204c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
205c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
206c6b6a421SHawking Zhang };
207c6b6a421SHawking Zhang 
208c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
209c6b6a421SHawking Zhang 					 u32 sh_num, u32 reg_offset)
210c6b6a421SHawking Zhang {
211c6b6a421SHawking Zhang 	uint32_t val;
212c6b6a421SHawking Zhang 
213c6b6a421SHawking Zhang 	mutex_lock(&adev->grbm_idx_mutex);
214c6b6a421SHawking Zhang 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
215c6b6a421SHawking Zhang 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
216c6b6a421SHawking Zhang 
217c6b6a421SHawking Zhang 	val = RREG32(reg_offset);
218c6b6a421SHawking Zhang 
219c6b6a421SHawking Zhang 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
220c6b6a421SHawking Zhang 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
221c6b6a421SHawking Zhang 	mutex_unlock(&adev->grbm_idx_mutex);
222c6b6a421SHawking Zhang 	return val;
223c6b6a421SHawking Zhang }
224c6b6a421SHawking Zhang 
225c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev,
226c6b6a421SHawking Zhang 				      bool indexed, u32 se_num,
227c6b6a421SHawking Zhang 				      u32 sh_num, u32 reg_offset)
228c6b6a421SHawking Zhang {
229c6b6a421SHawking Zhang 	if (indexed) {
230c6b6a421SHawking Zhang 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
231c6b6a421SHawking Zhang 	} else {
232c6b6a421SHawking Zhang 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
233c6b6a421SHawking Zhang 			return adev->gfx.config.gb_addr_config;
234c6b6a421SHawking Zhang 		return RREG32(reg_offset);
235c6b6a421SHawking Zhang 	}
236c6b6a421SHawking Zhang }
237c6b6a421SHawking Zhang 
238c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
239c6b6a421SHawking Zhang 			    u32 sh_num, u32 reg_offset, u32 *value)
240c6b6a421SHawking Zhang {
241c6b6a421SHawking Zhang 	uint32_t i;
242c6b6a421SHawking Zhang 	struct soc15_allowed_register_entry  *en;
243c6b6a421SHawking Zhang 
244c6b6a421SHawking Zhang 	*value = 0;
245c6b6a421SHawking Zhang 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
246c6b6a421SHawking Zhang 		en = &nv_allowed_read_registers[i];
247c6b6a421SHawking Zhang 		if (reg_offset !=
248c6b6a421SHawking Zhang 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
249c6b6a421SHawking Zhang 			continue;
250c6b6a421SHawking Zhang 
251c6b6a421SHawking Zhang 		*value = nv_get_register_value(adev,
252c6b6a421SHawking Zhang 					       nv_allowed_read_registers[i].grbm_indexed,
253c6b6a421SHawking Zhang 					       se_num, sh_num, reg_offset);
254c6b6a421SHawking Zhang 		return 0;
255c6b6a421SHawking Zhang 	}
256c6b6a421SHawking Zhang 	return -EINVAL;
257c6b6a421SHawking Zhang }
258c6b6a421SHawking Zhang 
259c6b6a421SHawking Zhang #if 0
260c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
261c6b6a421SHawking Zhang {
262c6b6a421SHawking Zhang 	u32 i;
263c6b6a421SHawking Zhang 
264c6b6a421SHawking Zhang 	dev_info(adev->dev, "GPU pci config reset\n");
265c6b6a421SHawking Zhang 
266c6b6a421SHawking Zhang 	/* disable BM */
267c6b6a421SHawking Zhang 	pci_clear_master(adev->pdev);
268c6b6a421SHawking Zhang 	/* reset */
269c6b6a421SHawking Zhang 	amdgpu_pci_config_reset(adev);
270c6b6a421SHawking Zhang 
271c6b6a421SHawking Zhang 	udelay(100);
272c6b6a421SHawking Zhang 
273c6b6a421SHawking Zhang 	/* wait for asic to come out of reset */
274c6b6a421SHawking Zhang 	for (i = 0; i < adev->usec_timeout; i++) {
275c6b6a421SHawking Zhang 		u32 memsize = nbio_v2_3_get_memsize(adev);
276c6b6a421SHawking Zhang 		if (memsize != 0xffffffff)
277c6b6a421SHawking Zhang 			break;
278c6b6a421SHawking Zhang 		udelay(1);
279c6b6a421SHawking Zhang 	}
280c6b6a421SHawking Zhang 
281c6b6a421SHawking Zhang }
282c6b6a421SHawking Zhang #endif
283c6b6a421SHawking Zhang 
2843e2bb60aSKevin Wang static int nv_asic_mode1_reset(struct amdgpu_device *adev)
2853e2bb60aSKevin Wang {
2863e2bb60aSKevin Wang 	u32 i;
2873e2bb60aSKevin Wang 	int ret = 0;
2883e2bb60aSKevin Wang 
2893e2bb60aSKevin Wang 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
2903e2bb60aSKevin Wang 
2913e2bb60aSKevin Wang 	dev_info(adev->dev, "GPU mode1 reset\n");
2923e2bb60aSKevin Wang 
2933e2bb60aSKevin Wang 	/* disable BM */
2943e2bb60aSKevin Wang 	pci_clear_master(adev->pdev);
2953e2bb60aSKevin Wang 
2963e2bb60aSKevin Wang 	pci_save_state(adev->pdev);
2973e2bb60aSKevin Wang 
2983e2bb60aSKevin Wang 	ret = psp_gpu_reset(adev);
2993e2bb60aSKevin Wang 	if (ret)
3003e2bb60aSKevin Wang 		dev_err(adev->dev, "GPU mode1 reset failed\n");
3013e2bb60aSKevin Wang 
3023e2bb60aSKevin Wang 	pci_restore_state(adev->pdev);
3033e2bb60aSKevin Wang 
3043e2bb60aSKevin Wang 	/* wait for asic to come out of reset */
3053e2bb60aSKevin Wang 	for (i = 0; i < adev->usec_timeout; i++) {
306bebc0762SHawking Zhang 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
3073e2bb60aSKevin Wang 
3083e2bb60aSKevin Wang 		if (memsize != 0xffffffff)
3093e2bb60aSKevin Wang 			break;
3103e2bb60aSKevin Wang 		udelay(1);
3113e2bb60aSKevin Wang 	}
3123e2bb60aSKevin Wang 
3133e2bb60aSKevin Wang 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
3143e2bb60aSKevin Wang 
3153e2bb60aSKevin Wang 	return ret;
3163e2bb60aSKevin Wang }
3172ddc6c3eSAlex Deucher 
318ac742616SAlex Deucher static bool nv_asic_supports_baco(struct amdgpu_device *adev)
319ac742616SAlex Deucher {
320ac742616SAlex Deucher 	struct smu_context *smu = &adev->smu;
321ac742616SAlex Deucher 
322ac742616SAlex Deucher 	if (smu_baco_is_support(smu))
323ac742616SAlex Deucher 		return true;
324ac742616SAlex Deucher 	else
325ac742616SAlex Deucher 		return false;
326ac742616SAlex Deucher }
327ac742616SAlex Deucher 
3282ddc6c3eSAlex Deucher static enum amd_reset_method
3292ddc6c3eSAlex Deucher nv_asic_reset_method(struct amdgpu_device *adev)
3302ddc6c3eSAlex Deucher {
3312ddc6c3eSAlex Deucher 	struct smu_context *smu = &adev->smu;
3322ddc6c3eSAlex Deucher 
333b4def374SJiange Zhao 	if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
3342ddc6c3eSAlex Deucher 		return AMD_RESET_METHOD_BACO;
3352ddc6c3eSAlex Deucher 	else
3362ddc6c3eSAlex Deucher 		return AMD_RESET_METHOD_MODE1;
3372ddc6c3eSAlex Deucher }
3382ddc6c3eSAlex Deucher 
339c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev)
340c6b6a421SHawking Zhang {
341c6b6a421SHawking Zhang 
342c6b6a421SHawking Zhang 	/* FIXME: it doesn't work since vega10 */
343c6b6a421SHawking Zhang #if 0
344c6b6a421SHawking Zhang 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
345c6b6a421SHawking Zhang 
346c6b6a421SHawking Zhang 	nv_gpu_pci_config_reset(adev);
347c6b6a421SHawking Zhang 
348c6b6a421SHawking Zhang 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
349c6b6a421SHawking Zhang #endif
350767acabdSKevin Wang 	int ret = 0;
351767acabdSKevin Wang 	struct smu_context *smu = &adev->smu;
352c6b6a421SHawking Zhang 
353e3526257SMonk Liu 	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
35411520f27SAlex Deucher 		ret = smu_baco_enter(smu);
35511520f27SAlex Deucher 		if (ret)
35611520f27SAlex Deucher 			return ret;
35711520f27SAlex Deucher 		ret = smu_baco_exit(smu);
35811520f27SAlex Deucher 		if (ret)
35911520f27SAlex Deucher 			return ret;
360e3526257SMonk Liu 	} else {
3613e2bb60aSKevin Wang 		ret = nv_asic_mode1_reset(adev);
362e3526257SMonk Liu 	}
363767acabdSKevin Wang 
364767acabdSKevin Wang 	return ret;
365c6b6a421SHawking Zhang }
366c6b6a421SHawking Zhang 
367c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
368c6b6a421SHawking Zhang {
369c6b6a421SHawking Zhang 	/* todo */
370c6b6a421SHawking Zhang 	return 0;
371c6b6a421SHawking Zhang }
372c6b6a421SHawking Zhang 
373c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
374c6b6a421SHawking Zhang {
375c6b6a421SHawking Zhang 	/* todo */
376c6b6a421SHawking Zhang 	return 0;
377c6b6a421SHawking Zhang }
378c6b6a421SHawking Zhang 
379c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
380c6b6a421SHawking Zhang {
381c6b6a421SHawking Zhang 	if (pci_is_root_bus(adev->pdev->bus))
382c6b6a421SHawking Zhang 		return;
383c6b6a421SHawking Zhang 
384c6b6a421SHawking Zhang 	if (amdgpu_pcie_gen2 == 0)
385c6b6a421SHawking Zhang 		return;
386c6b6a421SHawking Zhang 
387c6b6a421SHawking Zhang 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
388c6b6a421SHawking Zhang 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
389c6b6a421SHawking Zhang 		return;
390c6b6a421SHawking Zhang 
391c6b6a421SHawking Zhang 	/* todo */
392c6b6a421SHawking Zhang }
393c6b6a421SHawking Zhang 
394c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev)
395c6b6a421SHawking Zhang {
396c6b6a421SHawking Zhang 
397c6b6a421SHawking Zhang 	if (amdgpu_aspm == 0)
398c6b6a421SHawking Zhang 		return;
399c6b6a421SHawking Zhang 
400c6b6a421SHawking Zhang 	/* todo */
401c6b6a421SHawking Zhang }
402c6b6a421SHawking Zhang 
403c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
404c6b6a421SHawking Zhang 					bool enable)
405c6b6a421SHawking Zhang {
406bebc0762SHawking Zhang 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
407bebc0762SHawking Zhang 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
408c6b6a421SHawking Zhang }
409c6b6a421SHawking Zhang 
410c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block =
411c6b6a421SHawking Zhang {
412c6b6a421SHawking Zhang 	.type = AMD_IP_BLOCK_TYPE_COMMON,
413c6b6a421SHawking Zhang 	.major = 1,
414c6b6a421SHawking Zhang 	.minor = 0,
415c6b6a421SHawking Zhang 	.rev = 0,
416c6b6a421SHawking Zhang 	.funcs = &nv_common_ip_funcs,
417c6b6a421SHawking Zhang };
418c6b6a421SHawking Zhang 
419b5c73856SXiaojie Yuan static int nv_reg_base_init(struct amdgpu_device *adev)
420c6b6a421SHawking Zhang {
421b5c73856SXiaojie Yuan 	int r;
422b5c73856SXiaojie Yuan 
423b5c73856SXiaojie Yuan 	if (amdgpu_discovery) {
424b5c73856SXiaojie Yuan 		r = amdgpu_discovery_reg_base_init(adev);
425b5c73856SXiaojie Yuan 		if (r) {
426b5c73856SXiaojie Yuan 			DRM_WARN("failed to init reg base from ip discovery table, "
427b5c73856SXiaojie Yuan 					"fallback to legacy init method\n");
428b5c73856SXiaojie Yuan 			goto legacy_init;
429b5c73856SXiaojie Yuan 		}
430b5c73856SXiaojie Yuan 
431b5c73856SXiaojie Yuan 		return 0;
432b5c73856SXiaojie Yuan 	}
433b5c73856SXiaojie Yuan 
434b5c73856SXiaojie Yuan legacy_init:
435c6b6a421SHawking Zhang 	switch (adev->asic_type) {
436c6b6a421SHawking Zhang 	case CHIP_NAVI10:
437c6b6a421SHawking Zhang 		navi10_reg_base_init(adev);
438c6b6a421SHawking Zhang 		break;
439a0f6d926SXiaojie Yuan 	case CHIP_NAVI14:
440a0f6d926SXiaojie Yuan 		navi14_reg_base_init(adev);
441a0f6d926SXiaojie Yuan 		break;
44203d0a073SXiaojie Yuan 	case CHIP_NAVI12:
44303d0a073SXiaojie Yuan 		navi12_reg_base_init(adev);
44403d0a073SXiaojie Yuan 		break;
445c6b6a421SHawking Zhang 	default:
446c6b6a421SHawking Zhang 		return -EINVAL;
447c6b6a421SHawking Zhang 	}
448c6b6a421SHawking Zhang 
449b5c73856SXiaojie Yuan 	return 0;
450b5c73856SXiaojie Yuan }
451b5c73856SXiaojie Yuan 
452b5c73856SXiaojie Yuan int nv_set_ip_blocks(struct amdgpu_device *adev)
453b5c73856SXiaojie Yuan {
454b5c73856SXiaojie Yuan 	int r;
455b5c73856SXiaojie Yuan 
456122078deSMonk Liu 	adev->nbio.funcs = &nbio_v2_3_funcs;
457122078deSMonk Liu 	adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
458122078deSMonk Liu 
459122078deSMonk Liu 	if (amdgpu_sriov_vf(adev)) {
460122078deSMonk Liu 		adev->virt.ops = &xgpu_nv_virt_ops;
461122078deSMonk Liu 		/* try send GPU_INIT_DATA request to host */
462122078deSMonk Liu 		amdgpu_virt_request_init_data(adev);
463122078deSMonk Liu 	}
464122078deSMonk Liu 
465b5c73856SXiaojie Yuan 	/* Set IP register base before any HW register access */
466b5c73856SXiaojie Yuan 	r = nv_reg_base_init(adev);
467b5c73856SXiaojie Yuan 	if (r)
468b5c73856SXiaojie Yuan 		return r;
469b5c73856SXiaojie Yuan 
470c6b6a421SHawking Zhang 	switch (adev->asic_type) {
471c6b6a421SHawking Zhang 	case CHIP_NAVI10:
472d1daf850SAlex Deucher 	case CHIP_NAVI14:
473c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
474c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
475c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
476c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
477c6b6a421SHawking Zhang 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4789530273eSEvan Quan 		    !amdgpu_sriov_vf(adev))
479c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
480c6b6a421SHawking Zhang 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
481c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
482f8a7976bSAlex Deucher #if defined(CONFIG_DRM_AMD_DC)
483b4f199c7SHarry Wentland 		else if (amdgpu_device_has_dc_support(adev))
484b4f199c7SHarry Wentland 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
485f8a7976bSAlex Deucher #endif
486c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
487c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
488c6b6a421SHawking Zhang 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
4899530273eSEvan Quan 		    !amdgpu_sriov_vf(adev))
490c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
491c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
4925be45a26SLeo Liu 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
493c6b6a421SHawking Zhang 		if (adev->enable_mes)
494c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
495c6b6a421SHawking Zhang 		break;
49644e9e7c9SXiaojie Yuan 	case CHIP_NAVI12:
49744e9e7c9SXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
49844e9e7c9SXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
49944e9e7c9SXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
5006b66ae2eSXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
50179bebabbSMonk Liu 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
5027f47efebSXiaojie Yuan 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
50379902029SXiaojie Yuan 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
50479902029SXiaojie Yuan 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
50520c14ee1SPetr Cvek #if defined(CONFIG_DRM_AMD_DC)
506078655d9SLeo Li 		else if (amdgpu_device_has_dc_support(adev))
507078655d9SLeo Li 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
50820c14ee1SPetr Cvek #endif
50944e9e7c9SXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
51044e9e7c9SXiaojie Yuan 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
5117f47efebSXiaojie Yuan 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
5129530273eSEvan Quan 		    !amdgpu_sriov_vf(adev))
5137f47efebSXiaojie Yuan 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
5141fbed280SBoyuan Zhang 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
515fe442491SMonk Liu 		if (!amdgpu_sriov_vf(adev))
5165be45a26SLeo Liu 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
51744e9e7c9SXiaojie Yuan 		break;
518c6b6a421SHawking Zhang 	default:
519c6b6a421SHawking Zhang 		return -EINVAL;
520c6b6a421SHawking Zhang 	}
521c6b6a421SHawking Zhang 
522c6b6a421SHawking Zhang 	return 0;
523c6b6a421SHawking Zhang }
524c6b6a421SHawking Zhang 
525c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
526c6b6a421SHawking Zhang {
527bebc0762SHawking Zhang 	return adev->nbio.funcs->get_rev_id(adev);
528c6b6a421SHawking Zhang }
529c6b6a421SHawking Zhang 
530c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
531c6b6a421SHawking Zhang {
532bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, ring);
533c6b6a421SHawking Zhang }
534c6b6a421SHawking Zhang 
535c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev,
536c6b6a421SHawking Zhang 				struct amdgpu_ring *ring)
537c6b6a421SHawking Zhang {
538c6b6a421SHawking Zhang 	if (!ring || !ring->funcs->emit_wreg) {
539c6b6a421SHawking Zhang 		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
540c6b6a421SHawking Zhang 	} else {
541c6b6a421SHawking Zhang 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
542c6b6a421SHawking Zhang 					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
543c6b6a421SHawking Zhang 	}
544c6b6a421SHawking Zhang }
545c6b6a421SHawking Zhang 
546c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev)
547c6b6a421SHawking Zhang {
548c6b6a421SHawking Zhang 	return true;
549c6b6a421SHawking Zhang }
550c6b6a421SHawking Zhang 
551c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev,
552c6b6a421SHawking Zhang 			      uint64_t *count0,
553c6b6a421SHawking Zhang 			      uint64_t *count1)
554c6b6a421SHawking Zhang {
555c6b6a421SHawking Zhang 	/*TODO*/
556c6b6a421SHawking Zhang }
557c6b6a421SHawking Zhang 
558c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev)
559c6b6a421SHawking Zhang {
560c6b6a421SHawking Zhang #if 0
561c6b6a421SHawking Zhang 	u32 sol_reg;
562c6b6a421SHawking Zhang 
563c6b6a421SHawking Zhang 	if (adev->flags & AMD_IS_APU)
564c6b6a421SHawking Zhang 		return false;
565c6b6a421SHawking Zhang 
566c6b6a421SHawking Zhang 	/* Check sOS sign of life register to confirm sys driver and sOS
567c6b6a421SHawking Zhang 	 * are already been loaded.
568c6b6a421SHawking Zhang 	 */
569c6b6a421SHawking Zhang 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
570c6b6a421SHawking Zhang 	if (sol_reg)
571c6b6a421SHawking Zhang 		return true;
572c6b6a421SHawking Zhang #endif
573c6b6a421SHawking Zhang 	/* TODO: re-enable it when mode1 reset is functional */
574c6b6a421SHawking Zhang 	return false;
575c6b6a421SHawking Zhang }
576c6b6a421SHawking Zhang 
5772af81531SKevin Wang static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
5782af81531SKevin Wang {
5792af81531SKevin Wang 
5802af81531SKevin Wang 	/* TODO
5812af81531SKevin Wang 	 * dummy implement for pcie_replay_count sysfs interface
5822af81531SKevin Wang 	 * */
5832af81531SKevin Wang 
5842af81531SKevin Wang 	return 0;
5852af81531SKevin Wang }
5862af81531SKevin Wang 
587c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev)
588c6b6a421SHawking Zhang {
589c6b6a421SHawking Zhang 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
590c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
591c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
592c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
593c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
594c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
595c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
596c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
597c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
598c6b6a421SHawking Zhang 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
599c6b6a421SHawking Zhang 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
600c6b6a421SHawking Zhang 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
601c6b6a421SHawking Zhang 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
602c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
603c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
604c6b6a421SHawking Zhang 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
605c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
606c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
607c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
608c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
609c6b6a421SHawking Zhang 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
610c6b6a421SHawking Zhang 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
611c6b6a421SHawking Zhang 
612c6b6a421SHawking Zhang 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
613c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_doorbell_range = 20;
614c6b6a421SHawking Zhang }
615c6b6a421SHawking Zhang 
616c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs =
617c6b6a421SHawking Zhang {
618c6b6a421SHawking Zhang 	.read_disabled_bios = &nv_read_disabled_bios,
619c6b6a421SHawking Zhang 	.read_bios_from_rom = &nv_read_bios_from_rom,
620c6b6a421SHawking Zhang 	.read_register = &nv_read_register,
621c6b6a421SHawking Zhang 	.reset = &nv_asic_reset,
6222ddc6c3eSAlex Deucher 	.reset_method = &nv_asic_reset_method,
623c6b6a421SHawking Zhang 	.set_vga_state = &nv_vga_set_state,
624c6b6a421SHawking Zhang 	.get_xclk = &nv_get_xclk,
625c6b6a421SHawking Zhang 	.set_uvd_clocks = &nv_set_uvd_clocks,
626c6b6a421SHawking Zhang 	.set_vce_clocks = &nv_set_vce_clocks,
627c6b6a421SHawking Zhang 	.get_config_memsize = &nv_get_config_memsize,
628c6b6a421SHawking Zhang 	.flush_hdp = &nv_flush_hdp,
629c6b6a421SHawking Zhang 	.invalidate_hdp = &nv_invalidate_hdp,
630c6b6a421SHawking Zhang 	.init_doorbell_index = &nv_init_doorbell_index,
631c6b6a421SHawking Zhang 	.need_full_reset = &nv_need_full_reset,
632c6b6a421SHawking Zhang 	.get_pcie_usage = &nv_get_pcie_usage,
633c6b6a421SHawking Zhang 	.need_reset_on_init = &nv_need_reset_on_init,
6342af81531SKevin Wang 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
635ac742616SAlex Deucher 	.supports_baco = &nv_asic_supports_baco,
636c6b6a421SHawking Zhang };
637c6b6a421SHawking Zhang 
638c6b6a421SHawking Zhang static int nv_common_early_init(void *handle)
639c6b6a421SHawking Zhang {
640923c087aSYong Zhao #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
641c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
642c6b6a421SHawking Zhang 
643923c087aSYong Zhao 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
644923c087aSYong Zhao 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
645c6b6a421SHawking Zhang 	adev->smc_rreg = NULL;
646c6b6a421SHawking Zhang 	adev->smc_wreg = NULL;
647c6b6a421SHawking Zhang 	adev->pcie_rreg = &nv_pcie_rreg;
648c6b6a421SHawking Zhang 	adev->pcie_wreg = &nv_pcie_wreg;
649c6b6a421SHawking Zhang 
650c6b6a421SHawking Zhang 	/* TODO: will add them during VCN v2 implementation */
651c6b6a421SHawking Zhang 	adev->uvd_ctx_rreg = NULL;
652c6b6a421SHawking Zhang 	adev->uvd_ctx_wreg = NULL;
653c6b6a421SHawking Zhang 
654c6b6a421SHawking Zhang 	adev->didt_rreg = &nv_didt_rreg;
655c6b6a421SHawking Zhang 	adev->didt_wreg = &nv_didt_wreg;
656c6b6a421SHawking Zhang 
657c6b6a421SHawking Zhang 	adev->asic_funcs = &nv_asic_funcs;
658c6b6a421SHawking Zhang 
659c6b6a421SHawking Zhang 	adev->rev_id = nv_get_rev_id(adev);
660c6b6a421SHawking Zhang 	adev->external_rev_id = 0xff;
661c6b6a421SHawking Zhang 	switch (adev->asic_type) {
662c6b6a421SHawking Zhang 	case CHIP_NAVI10:
663c6b6a421SHawking Zhang 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
664c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_GFX_CGCG |
665c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_IH_CG |
666c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_HDP_MGCG |
667c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_HDP_LS |
668c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_SDMA_MGCG |
669c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_SDMA_LS |
670c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_MC_MGCG |
671c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_MC_LS |
672c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_ATHUB_MGCG |
673c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_ATHUB_LS |
674c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_VCN_MGCG |
675099d66e4SLeo Liu 			AMD_CG_SUPPORT_JPEG_MGCG |
676c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_BIF_MGCG |
677c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_BIF_LS;
678157710eaSLeo Liu 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
679c12d410fSHuang Rui 			AMD_PG_SUPPORT_VCN_DPG |
680099d66e4SLeo Liu 			AMD_PG_SUPPORT_JPEG |
681a201b6acSHuang Rui 			AMD_PG_SUPPORT_ATHUB;
682c6b6a421SHawking Zhang 		adev->external_rev_id = adev->rev_id + 0x1;
683c6b6a421SHawking Zhang 		break;
6845e71e011SXiaojie Yuan 	case CHIP_NAVI14:
685d0c39f8cSXiaojie Yuan 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
686d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_GFX_CGCG |
687d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_IH_CG |
688d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_HDP_MGCG |
689d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_HDP_LS |
690d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_SDMA_MGCG |
691d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_SDMA_LS |
692d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_MC_MGCG |
693d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_MC_LS |
694d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_ATHUB_MGCG |
695d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_ATHUB_LS |
696d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_VCN_MGCG |
697099d66e4SLeo Liu 			AMD_CG_SUPPORT_JPEG_MGCG |
698d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_BIF_MGCG |
699d0c39f8cSXiaojie Yuan 			AMD_CG_SUPPORT_BIF_LS;
7000377b088SXiaojie Yuan 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
701099d66e4SLeo Liu 			AMD_PG_SUPPORT_JPEG |
7020377b088SXiaojie Yuan 			AMD_PG_SUPPORT_VCN_DPG;
70335ef88faStiancyin 		adev->external_rev_id = adev->rev_id + 20;
7045e71e011SXiaojie Yuan 		break;
70574b5e509SXiaojie Yuan 	case CHIP_NAVI12:
706dca009e7SXiaojie Yuan 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
707dca009e7SXiaojie Yuan 			AMD_CG_SUPPORT_GFX_MGLS |
708dca009e7SXiaojie Yuan 			AMD_CG_SUPPORT_GFX_CGCG |
709dca009e7SXiaojie Yuan 			AMD_CG_SUPPORT_GFX_CP_LS |
7105211c37aSXiaojie Yuan 			AMD_CG_SUPPORT_GFX_RLC_LS |
711fbe0bc57SXiaojie Yuan 			AMD_CG_SUPPORT_IH_CG |
7125211c37aSXiaojie Yuan 			AMD_CG_SUPPORT_HDP_MGCG |
713358ab97fSXiaojie Yuan 			AMD_CG_SUPPORT_HDP_LS |
714358ab97fSXiaojie Yuan 			AMD_CG_SUPPORT_SDMA_MGCG |
7158b797b3dSXiaojie Yuan 			AMD_CG_SUPPORT_SDMA_LS |
7168b797b3dSXiaojie Yuan 			AMD_CG_SUPPORT_MC_MGCG |
717ca51678dSXiaojie Yuan 			AMD_CG_SUPPORT_MC_LS |
718ca51678dSXiaojie Yuan 			AMD_CG_SUPPORT_ATHUB_MGCG |
71965872e59SXiaojie Yuan 			AMD_CG_SUPPORT_ATHUB_LS |
720099d66e4SLeo Liu 			AMD_CG_SUPPORT_VCN_MGCG |
721099d66e4SLeo Liu 			AMD_CG_SUPPORT_JPEG_MGCG;
722c1653ea0SXiaojie Yuan 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
7235ef3b8acSXiaojie Yuan 			AMD_PG_SUPPORT_VCN_DPG |
724099d66e4SLeo Liu 			AMD_PG_SUPPORT_JPEG |
7255ef3b8acSXiaojie Yuan 			AMD_PG_SUPPORT_ATHUB;
726df5e984cSTiecheng Zhou 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
727df5e984cSTiecheng Zhou 		 * as a consequence, the rev_id and external_rev_id are wrong.
728df5e984cSTiecheng Zhou 		 * workaround it by hardcoding rev_id to 0 (default value).
729df5e984cSTiecheng Zhou 		 */
730df5e984cSTiecheng Zhou 		if (amdgpu_sriov_vf(adev))
731df5e984cSTiecheng Zhou 			adev->rev_id = 0;
73274b5e509SXiaojie Yuan 		adev->external_rev_id = adev->rev_id + 0xa;
73374b5e509SXiaojie Yuan 		break;
734c6b6a421SHawking Zhang 	default:
735c6b6a421SHawking Zhang 		/* FIXME: not supported yet */
736c6b6a421SHawking Zhang 		return -EINVAL;
737c6b6a421SHawking Zhang 	}
738c6b6a421SHawking Zhang 
739b05b6903SJiange Zhao 	if (amdgpu_sriov_vf(adev)) {
740b05b6903SJiange Zhao 		amdgpu_virt_init_setting(adev);
741b05b6903SJiange Zhao 		xgpu_nv_mailbox_set_irq_funcs(adev);
742b05b6903SJiange Zhao 	}
743b05b6903SJiange Zhao 
744c6b6a421SHawking Zhang 	return 0;
745c6b6a421SHawking Zhang }
746c6b6a421SHawking Zhang 
747c6b6a421SHawking Zhang static int nv_common_late_init(void *handle)
748c6b6a421SHawking Zhang {
749b05b6903SJiange Zhao 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
750b05b6903SJiange Zhao 
751b05b6903SJiange Zhao 	if (amdgpu_sriov_vf(adev))
752b05b6903SJiange Zhao 		xgpu_nv_mailbox_get_irq(adev);
753b05b6903SJiange Zhao 
754c6b6a421SHawking Zhang 	return 0;
755c6b6a421SHawking Zhang }
756c6b6a421SHawking Zhang 
757c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle)
758c6b6a421SHawking Zhang {
759b05b6903SJiange Zhao 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
760b05b6903SJiange Zhao 
761b05b6903SJiange Zhao 	if (amdgpu_sriov_vf(adev))
762b05b6903SJiange Zhao 		xgpu_nv_mailbox_add_irq_id(adev);
763b05b6903SJiange Zhao 
764c6b6a421SHawking Zhang 	return 0;
765c6b6a421SHawking Zhang }
766c6b6a421SHawking Zhang 
767c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle)
768c6b6a421SHawking Zhang {
769c6b6a421SHawking Zhang 	return 0;
770c6b6a421SHawking Zhang }
771c6b6a421SHawking Zhang 
772c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle)
773c6b6a421SHawking Zhang {
774c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775c6b6a421SHawking Zhang 
776c6b6a421SHawking Zhang 	/* enable pcie gen2/3 link */
777c6b6a421SHawking Zhang 	nv_pcie_gen3_enable(adev);
778c6b6a421SHawking Zhang 	/* enable aspm */
779c6b6a421SHawking Zhang 	nv_program_aspm(adev);
780c6b6a421SHawking Zhang 	/* setup nbio registers */
781bebc0762SHawking Zhang 	adev->nbio.funcs->init_registers(adev);
782923c087aSYong Zhao 	/* remap HDP registers to a hole in mmio space,
783923c087aSYong Zhao 	 * for the purpose of expose those registers
784923c087aSYong Zhao 	 * to process space
785923c087aSYong Zhao 	 */
786923c087aSYong Zhao 	if (adev->nbio.funcs->remap_hdp_registers)
787923c087aSYong Zhao 		adev->nbio.funcs->remap_hdp_registers(adev);
788c6b6a421SHawking Zhang 	/* enable the doorbell aperture */
789c6b6a421SHawking Zhang 	nv_enable_doorbell_aperture(adev, true);
790c6b6a421SHawking Zhang 
791c6b6a421SHawking Zhang 	return 0;
792c6b6a421SHawking Zhang }
793c6b6a421SHawking Zhang 
794c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle)
795c6b6a421SHawking Zhang {
796c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797c6b6a421SHawking Zhang 
798c6b6a421SHawking Zhang 	/* disable the doorbell aperture */
799c6b6a421SHawking Zhang 	nv_enable_doorbell_aperture(adev, false);
800c6b6a421SHawking Zhang 
801c6b6a421SHawking Zhang 	return 0;
802c6b6a421SHawking Zhang }
803c6b6a421SHawking Zhang 
804c6b6a421SHawking Zhang static int nv_common_suspend(void *handle)
805c6b6a421SHawking Zhang {
806c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
807c6b6a421SHawking Zhang 
808c6b6a421SHawking Zhang 	return nv_common_hw_fini(adev);
809c6b6a421SHawking Zhang }
810c6b6a421SHawking Zhang 
811c6b6a421SHawking Zhang static int nv_common_resume(void *handle)
812c6b6a421SHawking Zhang {
813c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
814c6b6a421SHawking Zhang 
815c6b6a421SHawking Zhang 	return nv_common_hw_init(adev);
816c6b6a421SHawking Zhang }
817c6b6a421SHawking Zhang 
818c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle)
819c6b6a421SHawking Zhang {
820c6b6a421SHawking Zhang 	return true;
821c6b6a421SHawking Zhang }
822c6b6a421SHawking Zhang 
823c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle)
824c6b6a421SHawking Zhang {
825c6b6a421SHawking Zhang 	return 0;
826c6b6a421SHawking Zhang }
827c6b6a421SHawking Zhang 
828c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle)
829c6b6a421SHawking Zhang {
830c6b6a421SHawking Zhang 	return 0;
831c6b6a421SHawking Zhang }
832c6b6a421SHawking Zhang 
833c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
834c6b6a421SHawking Zhang 					   bool enable)
835c6b6a421SHawking Zhang {
836c6b6a421SHawking Zhang 	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
837c6b6a421SHawking Zhang 	uint32_t hdp_mem_pwr_cntl;
838c6b6a421SHawking Zhang 
839c6b6a421SHawking Zhang 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
840c6b6a421SHawking Zhang 				AMD_CG_SUPPORT_HDP_DS |
841c6b6a421SHawking Zhang 				AMD_CG_SUPPORT_HDP_SD)))
842c6b6a421SHawking Zhang 		return;
843c6b6a421SHawking Zhang 
844c6b6a421SHawking Zhang 	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
845c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
846c6b6a421SHawking Zhang 
847c6b6a421SHawking Zhang 	/* Before doing clock/power mode switch,
848c6b6a421SHawking Zhang 	 * forced on IPH & RC clock */
849c6b6a421SHawking Zhang 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
850c6b6a421SHawking Zhang 				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
851c6b6a421SHawking Zhang 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
852c6b6a421SHawking Zhang 				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
853c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
854c6b6a421SHawking Zhang 
855c6b6a421SHawking Zhang 	/* HDP 5.0 doesn't support dynamic power mode switch,
856c6b6a421SHawking Zhang 	 * disable clock and power gating before any changing */
857c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
858c6b6a421SHawking Zhang 					 IPH_MEM_POWER_CTRL_EN, 0);
859c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
860c6b6a421SHawking Zhang 					 IPH_MEM_POWER_LS_EN, 0);
861c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
862c6b6a421SHawking Zhang 					 IPH_MEM_POWER_DS_EN, 0);
863c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
864c6b6a421SHawking Zhang 					 IPH_MEM_POWER_SD_EN, 0);
865c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
866c6b6a421SHawking Zhang 					 RC_MEM_POWER_CTRL_EN, 0);
867c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
868c6b6a421SHawking Zhang 					 RC_MEM_POWER_LS_EN, 0);
869c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
870c6b6a421SHawking Zhang 					 RC_MEM_POWER_DS_EN, 0);
871c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
872c6b6a421SHawking Zhang 					 RC_MEM_POWER_SD_EN, 0);
873c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
874c6b6a421SHawking Zhang 
875c6b6a421SHawking Zhang 	/* only one clock gating mode (LS/DS/SD) can be enabled */
876c6b6a421SHawking Zhang 	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
877c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
878c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
879c6b6a421SHawking Zhang 						 IPH_MEM_POWER_LS_EN, enable);
880c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
881c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
882c6b6a421SHawking Zhang 						 RC_MEM_POWER_LS_EN, enable);
883c6b6a421SHawking Zhang 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
884c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
885c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
886c6b6a421SHawking Zhang 						 IPH_MEM_POWER_DS_EN, enable);
887c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
888c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
889c6b6a421SHawking Zhang 						 RC_MEM_POWER_DS_EN, enable);
890c6b6a421SHawking Zhang 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
891c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
892c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
893c6b6a421SHawking Zhang 						 IPH_MEM_POWER_SD_EN, enable);
894c6b6a421SHawking Zhang 		/* RC should not use shut down mode, fallback to ds */
895c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
896c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
897c6b6a421SHawking Zhang 						 RC_MEM_POWER_DS_EN, enable);
898c6b6a421SHawking Zhang 	}
899c6b6a421SHawking Zhang 
900c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
901c6b6a421SHawking Zhang 
902c6b6a421SHawking Zhang 	/* restore IPH & RC clock override after clock/power mode changing */
903c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
904c6b6a421SHawking Zhang }
905c6b6a421SHawking Zhang 
906c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
907c6b6a421SHawking Zhang 				       bool enable)
908c6b6a421SHawking Zhang {
909c6b6a421SHawking Zhang 	uint32_t hdp_clk_cntl;
910c6b6a421SHawking Zhang 
911c6b6a421SHawking Zhang 	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
912c6b6a421SHawking Zhang 		return;
913c6b6a421SHawking Zhang 
914c6b6a421SHawking Zhang 	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
915c6b6a421SHawking Zhang 
916c6b6a421SHawking Zhang 	if (enable) {
917c6b6a421SHawking Zhang 		hdp_clk_cntl &=
918c6b6a421SHawking Zhang 			~(uint32_t)
919c6b6a421SHawking Zhang 			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
920c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
921c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
922c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
923c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
924c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
925c6b6a421SHawking Zhang 	} else {
926c6b6a421SHawking Zhang 		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
927c6b6a421SHawking Zhang 			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
928c6b6a421SHawking Zhang 			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
929c6b6a421SHawking Zhang 			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
930c6b6a421SHawking Zhang 			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
931c6b6a421SHawking Zhang 			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
932c6b6a421SHawking Zhang 	}
933c6b6a421SHawking Zhang 
934c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
935c6b6a421SHawking Zhang }
936c6b6a421SHawking Zhang 
937c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle,
938c6b6a421SHawking Zhang 					   enum amd_clockgating_state state)
939c6b6a421SHawking Zhang {
940c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
941c6b6a421SHawking Zhang 
942c6b6a421SHawking Zhang 	if (amdgpu_sriov_vf(adev))
943c6b6a421SHawking Zhang 		return 0;
944c6b6a421SHawking Zhang 
945c6b6a421SHawking Zhang 	switch (adev->asic_type) {
946c6b6a421SHawking Zhang 	case CHIP_NAVI10:
9475e71e011SXiaojie Yuan 	case CHIP_NAVI14:
9487e17e58bSXiaojie Yuan 	case CHIP_NAVI12:
949bebc0762SHawking Zhang 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
950a9d4fe2fSNirmoy Das 				state == AMD_CG_STATE_GATE);
951bebc0762SHawking Zhang 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
952a9d4fe2fSNirmoy Das 				state == AMD_CG_STATE_GATE);
953c6b6a421SHawking Zhang 		nv_update_hdp_mem_power_gating(adev,
954a9d4fe2fSNirmoy Das 				   state == AMD_CG_STATE_GATE);
955c6b6a421SHawking Zhang 		nv_update_hdp_clock_gating(adev,
956a9d4fe2fSNirmoy Das 				state == AMD_CG_STATE_GATE);
957c6b6a421SHawking Zhang 		break;
958c6b6a421SHawking Zhang 	default:
959c6b6a421SHawking Zhang 		break;
960c6b6a421SHawking Zhang 	}
961c6b6a421SHawking Zhang 	return 0;
962c6b6a421SHawking Zhang }
963c6b6a421SHawking Zhang 
964c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle,
965c6b6a421SHawking Zhang 					   enum amd_powergating_state state)
966c6b6a421SHawking Zhang {
967c6b6a421SHawking Zhang 	/* TODO */
968c6b6a421SHawking Zhang 	return 0;
969c6b6a421SHawking Zhang }
970c6b6a421SHawking Zhang 
971c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags)
972c6b6a421SHawking Zhang {
973c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
974c6b6a421SHawking Zhang 	uint32_t tmp;
975c6b6a421SHawking Zhang 
976c6b6a421SHawking Zhang 	if (amdgpu_sriov_vf(adev))
977c6b6a421SHawking Zhang 		*flags = 0;
978c6b6a421SHawking Zhang 
979bebc0762SHawking Zhang 	adev->nbio.funcs->get_clockgating_state(adev, flags);
980c6b6a421SHawking Zhang 
981c6b6a421SHawking Zhang 	/* AMD_CG_SUPPORT_HDP_MGCG */
982c6b6a421SHawking Zhang 	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
983c6b6a421SHawking Zhang 	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
984c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
985c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
986c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
987c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
988c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
989c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
990c6b6a421SHawking Zhang 
991c6b6a421SHawking Zhang 	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
992c6b6a421SHawking Zhang 	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
993c6b6a421SHawking Zhang 	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
994c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_LS;
995c6b6a421SHawking Zhang 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
996c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_DS;
997c6b6a421SHawking Zhang 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
998c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_SD;
999c6b6a421SHawking Zhang 
1000c6b6a421SHawking Zhang 	return;
1001c6b6a421SHawking Zhang }
1002c6b6a421SHawking Zhang 
1003c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = {
1004c6b6a421SHawking Zhang 	.name = "nv_common",
1005c6b6a421SHawking Zhang 	.early_init = nv_common_early_init,
1006c6b6a421SHawking Zhang 	.late_init = nv_common_late_init,
1007c6b6a421SHawking Zhang 	.sw_init = nv_common_sw_init,
1008c6b6a421SHawking Zhang 	.sw_fini = nv_common_sw_fini,
1009c6b6a421SHawking Zhang 	.hw_init = nv_common_hw_init,
1010c6b6a421SHawking Zhang 	.hw_fini = nv_common_hw_fini,
1011c6b6a421SHawking Zhang 	.suspend = nv_common_suspend,
1012c6b6a421SHawking Zhang 	.resume = nv_common_resume,
1013c6b6a421SHawking Zhang 	.is_idle = nv_common_is_idle,
1014c6b6a421SHawking Zhang 	.wait_for_idle = nv_common_wait_for_idle,
1015c6b6a421SHawking Zhang 	.soft_reset = nv_common_soft_reset,
1016c6b6a421SHawking Zhang 	.set_clockgating_state = nv_common_set_clockgating_state,
1017c6b6a421SHawking Zhang 	.set_powergating_state = nv_common_set_powergating_state,
1018c6b6a421SHawking Zhang 	.get_clockgating_state = nv_common_get_clockgating_state,
1019c6b6a421SHawking Zhang };
1020