xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 3e2bb60a)
1c6b6a421SHawking Zhang /*
2c6b6a421SHawking Zhang  * Copyright 2019 Advanced Micro Devices, Inc.
3c6b6a421SHawking Zhang  *
4c6b6a421SHawking Zhang  * Permission is hereby granted, free of charge, to any person obtaining a
5c6b6a421SHawking Zhang  * copy of this software and associated documentation files (the "Software"),
6c6b6a421SHawking Zhang  * to deal in the Software without restriction, including without limitation
7c6b6a421SHawking Zhang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c6b6a421SHawking Zhang  * and/or sell copies of the Software, and to permit persons to whom the
9c6b6a421SHawking Zhang  * Software is furnished to do so, subject to the following conditions:
10c6b6a421SHawking Zhang  *
11c6b6a421SHawking Zhang  * The above copyright notice and this permission notice shall be included in
12c6b6a421SHawking Zhang  * all copies or substantial portions of the Software.
13c6b6a421SHawking Zhang  *
14c6b6a421SHawking Zhang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15c6b6a421SHawking Zhang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16c6b6a421SHawking Zhang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17c6b6a421SHawking Zhang  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18c6b6a421SHawking Zhang  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19c6b6a421SHawking Zhang  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20c6b6a421SHawking Zhang  * OTHER DEALINGS IN THE SOFTWARE.
21c6b6a421SHawking Zhang  *
22c6b6a421SHawking Zhang  */
23c6b6a421SHawking Zhang #include <linux/firmware.h>
24c6b6a421SHawking Zhang #include <linux/slab.h>
25c6b6a421SHawking Zhang #include <linux/module.h>
26c6b6a421SHawking Zhang #include <drm/drmP.h>
27c6b6a421SHawking Zhang #include "amdgpu.h"
28c6b6a421SHawking Zhang #include "amdgpu_atombios.h"
29c6b6a421SHawking Zhang #include "amdgpu_ih.h"
30c6b6a421SHawking Zhang #include "amdgpu_uvd.h"
31c6b6a421SHawking Zhang #include "amdgpu_vce.h"
32c6b6a421SHawking Zhang #include "amdgpu_ucode.h"
33c6b6a421SHawking Zhang #include "amdgpu_psp.h"
34767acabdSKevin Wang #include "amdgpu_smu.h"
35c6b6a421SHawking Zhang #include "atom.h"
36c6b6a421SHawking Zhang #include "amd_pcie.h"
37c6b6a421SHawking Zhang 
38c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h"
39c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h"
40c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h"
41c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h"
42c6b6a421SHawking Zhang 
43c6b6a421SHawking Zhang #include "soc15.h"
44c6b6a421SHawking Zhang #include "soc15_common.h"
45c6b6a421SHawking Zhang #include "gmc_v10_0.h"
46c6b6a421SHawking Zhang #include "gfxhub_v2_0.h"
47c6b6a421SHawking Zhang #include "mmhub_v2_0.h"
48c6b6a421SHawking Zhang #include "nv.h"
49c6b6a421SHawking Zhang #include "navi10_ih.h"
50c6b6a421SHawking Zhang #include "gfx_v10_0.h"
51c6b6a421SHawking Zhang #include "sdma_v5_0.h"
52c6b6a421SHawking Zhang #include "vcn_v2_0.h"
53c6b6a421SHawking Zhang #include "dce_virtual.h"
54c6b6a421SHawking Zhang #include "mes_v10_1.h"
55c6b6a421SHawking Zhang 
56c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs;
57c6b6a421SHawking Zhang 
58c6b6a421SHawking Zhang /*
59c6b6a421SHawking Zhang  * Indirect registers accessor
60c6b6a421SHawking Zhang  */
61c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
62c6b6a421SHawking Zhang {
63c6b6a421SHawking Zhang 	unsigned long flags, address, data;
64c6b6a421SHawking Zhang 	u32 r;
65c6b6a421SHawking Zhang 	address = adev->nbio_funcs->get_pcie_index_offset(adev);
66c6b6a421SHawking Zhang 	data = adev->nbio_funcs->get_pcie_data_offset(adev);
67c6b6a421SHawking Zhang 
68c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
69c6b6a421SHawking Zhang 	WREG32(address, reg);
70c6b6a421SHawking Zhang 	(void)RREG32(address);
71c6b6a421SHawking Zhang 	r = RREG32(data);
72c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
73c6b6a421SHawking Zhang 	return r;
74c6b6a421SHawking Zhang }
75c6b6a421SHawking Zhang 
76c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
77c6b6a421SHawking Zhang {
78c6b6a421SHawking Zhang 	unsigned long flags, address, data;
79c6b6a421SHawking Zhang 
80c6b6a421SHawking Zhang 	address = adev->nbio_funcs->get_pcie_index_offset(adev);
81c6b6a421SHawking Zhang 	data = adev->nbio_funcs->get_pcie_data_offset(adev);
82c6b6a421SHawking Zhang 
83c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84c6b6a421SHawking Zhang 	WREG32(address, reg);
85c6b6a421SHawking Zhang 	(void)RREG32(address);
86c6b6a421SHawking Zhang 	WREG32(data, v);
87c6b6a421SHawking Zhang 	(void)RREG32(data);
88c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
89c6b6a421SHawking Zhang }
90c6b6a421SHawking Zhang 
91c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
92c6b6a421SHawking Zhang {
93c6b6a421SHawking Zhang 	unsigned long flags, address, data;
94c6b6a421SHawking Zhang 	u32 r;
95c6b6a421SHawking Zhang 
96c6b6a421SHawking Zhang 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
97c6b6a421SHawking Zhang 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
98c6b6a421SHawking Zhang 
99c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
100c6b6a421SHawking Zhang 	WREG32(address, (reg));
101c6b6a421SHawking Zhang 	r = RREG32(data);
102c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
103c6b6a421SHawking Zhang 	return r;
104c6b6a421SHawking Zhang }
105c6b6a421SHawking Zhang 
106c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
107c6b6a421SHawking Zhang {
108c6b6a421SHawking Zhang 	unsigned long flags, address, data;
109c6b6a421SHawking Zhang 
110c6b6a421SHawking Zhang 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
111c6b6a421SHawking Zhang 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
112c6b6a421SHawking Zhang 
113c6b6a421SHawking Zhang 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
114c6b6a421SHawking Zhang 	WREG32(address, (reg));
115c6b6a421SHawking Zhang 	WREG32(data, (v));
116c6b6a421SHawking Zhang 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
117c6b6a421SHawking Zhang }
118c6b6a421SHawking Zhang 
119c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev)
120c6b6a421SHawking Zhang {
121c6b6a421SHawking Zhang 	return adev->nbio_funcs->get_memsize(adev);
122c6b6a421SHawking Zhang }
123c6b6a421SHawking Zhang 
124c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev)
125c6b6a421SHawking Zhang {
126462a70d8STao Zhou 	return adev->clock.spll.reference_freq;
127c6b6a421SHawking Zhang }
128c6b6a421SHawking Zhang 
129c6b6a421SHawking Zhang 
130c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev,
131c6b6a421SHawking Zhang 		     u32 me, u32 pipe, u32 queue, u32 vmid)
132c6b6a421SHawking Zhang {
133c6b6a421SHawking Zhang 	u32 grbm_gfx_cntl = 0;
134c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
135c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
136c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
137c6b6a421SHawking Zhang 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
138c6b6a421SHawking Zhang 
139c6b6a421SHawking Zhang 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
140c6b6a421SHawking Zhang }
141c6b6a421SHawking Zhang 
142c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
143c6b6a421SHawking Zhang {
144c6b6a421SHawking Zhang 	/* todo */
145c6b6a421SHawking Zhang }
146c6b6a421SHawking Zhang 
147c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev)
148c6b6a421SHawking Zhang {
149c6b6a421SHawking Zhang 	/* todo */
150c6b6a421SHawking Zhang 	return false;
151c6b6a421SHawking Zhang }
152c6b6a421SHawking Zhang 
153c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
154c6b6a421SHawking Zhang 				  u8 *bios, u32 length_bytes)
155c6b6a421SHawking Zhang {
156c6b6a421SHawking Zhang 	/* TODO: will implement it when SMU header is available */
157c6b6a421SHawking Zhang 	return false;
158c6b6a421SHawking Zhang }
159c6b6a421SHawking Zhang 
160c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
161c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
162c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
163c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
164c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
165c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
166c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
167c6b6a421SHawking Zhang #if 0	/* TODO: will set it when SDMA header is available */
168c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
169c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
170c6b6a421SHawking Zhang #endif
171c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
172c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
173c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
174c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
175c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
176c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
177c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
178c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
179c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
180c6b6a421SHawking Zhang 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
181c6b6a421SHawking Zhang };
182c6b6a421SHawking Zhang 
183c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
184c6b6a421SHawking Zhang 					 u32 sh_num, u32 reg_offset)
185c6b6a421SHawking Zhang {
186c6b6a421SHawking Zhang 	uint32_t val;
187c6b6a421SHawking Zhang 
188c6b6a421SHawking Zhang 	mutex_lock(&adev->grbm_idx_mutex);
189c6b6a421SHawking Zhang 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
190c6b6a421SHawking Zhang 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
191c6b6a421SHawking Zhang 
192c6b6a421SHawking Zhang 	val = RREG32(reg_offset);
193c6b6a421SHawking Zhang 
194c6b6a421SHawking Zhang 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
195c6b6a421SHawking Zhang 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
196c6b6a421SHawking Zhang 	mutex_unlock(&adev->grbm_idx_mutex);
197c6b6a421SHawking Zhang 	return val;
198c6b6a421SHawking Zhang }
199c6b6a421SHawking Zhang 
200c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev,
201c6b6a421SHawking Zhang 				      bool indexed, u32 se_num,
202c6b6a421SHawking Zhang 				      u32 sh_num, u32 reg_offset)
203c6b6a421SHawking Zhang {
204c6b6a421SHawking Zhang 	if (indexed) {
205c6b6a421SHawking Zhang 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
206c6b6a421SHawking Zhang 	} else {
207c6b6a421SHawking Zhang 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
208c6b6a421SHawking Zhang 			return adev->gfx.config.gb_addr_config;
209c6b6a421SHawking Zhang 		return RREG32(reg_offset);
210c6b6a421SHawking Zhang 	}
211c6b6a421SHawking Zhang }
212c6b6a421SHawking Zhang 
213c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
214c6b6a421SHawking Zhang 			    u32 sh_num, u32 reg_offset, u32 *value)
215c6b6a421SHawking Zhang {
216c6b6a421SHawking Zhang 	uint32_t i;
217c6b6a421SHawking Zhang 	struct soc15_allowed_register_entry  *en;
218c6b6a421SHawking Zhang 
219c6b6a421SHawking Zhang 	*value = 0;
220c6b6a421SHawking Zhang 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
221c6b6a421SHawking Zhang 		en = &nv_allowed_read_registers[i];
222c6b6a421SHawking Zhang 		if (reg_offset !=
223c6b6a421SHawking Zhang 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
224c6b6a421SHawking Zhang 			continue;
225c6b6a421SHawking Zhang 
226c6b6a421SHawking Zhang 		*value = nv_get_register_value(adev,
227c6b6a421SHawking Zhang 					       nv_allowed_read_registers[i].grbm_indexed,
228c6b6a421SHawking Zhang 					       se_num, sh_num, reg_offset);
229c6b6a421SHawking Zhang 		return 0;
230c6b6a421SHawking Zhang 	}
231c6b6a421SHawking Zhang 	return -EINVAL;
232c6b6a421SHawking Zhang }
233c6b6a421SHawking Zhang 
234c6b6a421SHawking Zhang #if 0
235c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
236c6b6a421SHawking Zhang {
237c6b6a421SHawking Zhang 	u32 i;
238c6b6a421SHawking Zhang 
239c6b6a421SHawking Zhang 	dev_info(adev->dev, "GPU pci config reset\n");
240c6b6a421SHawking Zhang 
241c6b6a421SHawking Zhang 	/* disable BM */
242c6b6a421SHawking Zhang 	pci_clear_master(adev->pdev);
243c6b6a421SHawking Zhang 	/* reset */
244c6b6a421SHawking Zhang 	amdgpu_pci_config_reset(adev);
245c6b6a421SHawking Zhang 
246c6b6a421SHawking Zhang 	udelay(100);
247c6b6a421SHawking Zhang 
248c6b6a421SHawking Zhang 	/* wait for asic to come out of reset */
249c6b6a421SHawking Zhang 	for (i = 0; i < adev->usec_timeout; i++) {
250c6b6a421SHawking Zhang 		u32 memsize = nbio_v2_3_get_memsize(adev);
251c6b6a421SHawking Zhang 		if (memsize != 0xffffffff)
252c6b6a421SHawking Zhang 			break;
253c6b6a421SHawking Zhang 		udelay(1);
254c6b6a421SHawking Zhang 	}
255c6b6a421SHawking Zhang 
256c6b6a421SHawking Zhang }
257c6b6a421SHawking Zhang #endif
258c6b6a421SHawking Zhang 
2593e2bb60aSKevin Wang static int nv_asic_mode1_reset(struct amdgpu_device *adev)
2603e2bb60aSKevin Wang {
2613e2bb60aSKevin Wang 	u32 i;
2623e2bb60aSKevin Wang 	int ret = 0;
2633e2bb60aSKevin Wang 
2643e2bb60aSKevin Wang 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
2653e2bb60aSKevin Wang 
2663e2bb60aSKevin Wang 	dev_info(adev->dev, "GPU mode1 reset\n");
2673e2bb60aSKevin Wang 
2683e2bb60aSKevin Wang 	/* disable BM */
2693e2bb60aSKevin Wang 	pci_clear_master(adev->pdev);
2703e2bb60aSKevin Wang 
2713e2bb60aSKevin Wang 	pci_save_state(adev->pdev);
2723e2bb60aSKevin Wang 
2733e2bb60aSKevin Wang 	ret = psp_gpu_reset(adev);
2743e2bb60aSKevin Wang 	if (ret)
2753e2bb60aSKevin Wang 		dev_err(adev->dev, "GPU mode1 reset failed\n");
2763e2bb60aSKevin Wang 
2773e2bb60aSKevin Wang 	pci_restore_state(adev->pdev);
2783e2bb60aSKevin Wang 
2793e2bb60aSKevin Wang 	/* wait for asic to come out of reset */
2803e2bb60aSKevin Wang 	for (i = 0; i < adev->usec_timeout; i++) {
2813e2bb60aSKevin Wang 		u32 memsize = adev->nbio_funcs->get_memsize(adev);
2823e2bb60aSKevin Wang 
2833e2bb60aSKevin Wang 		if (memsize != 0xffffffff)
2843e2bb60aSKevin Wang 			break;
2853e2bb60aSKevin Wang 		udelay(1);
2863e2bb60aSKevin Wang 	}
2873e2bb60aSKevin Wang 
2883e2bb60aSKevin Wang 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
2893e2bb60aSKevin Wang 
2903e2bb60aSKevin Wang 	return ret;
2913e2bb60aSKevin Wang }
292c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev)
293c6b6a421SHawking Zhang {
294c6b6a421SHawking Zhang 
295c6b6a421SHawking Zhang 	/* FIXME: it doesn't work since vega10 */
296c6b6a421SHawking Zhang #if 0
297c6b6a421SHawking Zhang 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
298c6b6a421SHawking Zhang 
299c6b6a421SHawking Zhang 	nv_gpu_pci_config_reset(adev);
300c6b6a421SHawking Zhang 
301c6b6a421SHawking Zhang 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
302c6b6a421SHawking Zhang #endif
303767acabdSKevin Wang 	int ret = 0;
304767acabdSKevin Wang 	struct smu_context *smu = &adev->smu;
305c6b6a421SHawking Zhang 
3063e2bb60aSKevin Wang 	if (smu_baco_is_support(smu))
307767acabdSKevin Wang 		ret = smu_baco_reset(smu);
3083e2bb60aSKevin Wang 	else
3093e2bb60aSKevin Wang 		ret = nv_asic_mode1_reset(adev);
310767acabdSKevin Wang 
311767acabdSKevin Wang 	return ret;
312c6b6a421SHawking Zhang }
313c6b6a421SHawking Zhang 
314c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
315c6b6a421SHawking Zhang {
316c6b6a421SHawking Zhang 	/* todo */
317c6b6a421SHawking Zhang 	return 0;
318c6b6a421SHawking Zhang }
319c6b6a421SHawking Zhang 
320c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
321c6b6a421SHawking Zhang {
322c6b6a421SHawking Zhang 	/* todo */
323c6b6a421SHawking Zhang 	return 0;
324c6b6a421SHawking Zhang }
325c6b6a421SHawking Zhang 
326c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
327c6b6a421SHawking Zhang {
328c6b6a421SHawking Zhang 	if (pci_is_root_bus(adev->pdev->bus))
329c6b6a421SHawking Zhang 		return;
330c6b6a421SHawking Zhang 
331c6b6a421SHawking Zhang 	if (amdgpu_pcie_gen2 == 0)
332c6b6a421SHawking Zhang 		return;
333c6b6a421SHawking Zhang 
334c6b6a421SHawking Zhang 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
335c6b6a421SHawking Zhang 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
336c6b6a421SHawking Zhang 		return;
337c6b6a421SHawking Zhang 
338c6b6a421SHawking Zhang 	/* todo */
339c6b6a421SHawking Zhang }
340c6b6a421SHawking Zhang 
341c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev)
342c6b6a421SHawking Zhang {
343c6b6a421SHawking Zhang 
344c6b6a421SHawking Zhang 	if (amdgpu_aspm == 0)
345c6b6a421SHawking Zhang 		return;
346c6b6a421SHawking Zhang 
347c6b6a421SHawking Zhang 	/* todo */
348c6b6a421SHawking Zhang }
349c6b6a421SHawking Zhang 
350c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
351c6b6a421SHawking Zhang 					bool enable)
352c6b6a421SHawking Zhang {
353c6b6a421SHawking Zhang 	adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
354c6b6a421SHawking Zhang 	adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
355c6b6a421SHawking Zhang }
356c6b6a421SHawking Zhang 
357c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block =
358c6b6a421SHawking Zhang {
359c6b6a421SHawking Zhang 	.type = AMD_IP_BLOCK_TYPE_COMMON,
360c6b6a421SHawking Zhang 	.major = 1,
361c6b6a421SHawking Zhang 	.minor = 0,
362c6b6a421SHawking Zhang 	.rev = 0,
363c6b6a421SHawking Zhang 	.funcs = &nv_common_ip_funcs,
364c6b6a421SHawking Zhang };
365c6b6a421SHawking Zhang 
366c6b6a421SHawking Zhang int nv_set_ip_blocks(struct amdgpu_device *adev)
367c6b6a421SHawking Zhang {
368c6b6a421SHawking Zhang 	/* Set IP register base before any HW register access */
369c6b6a421SHawking Zhang 	switch (adev->asic_type) {
370c6b6a421SHawking Zhang 	case CHIP_NAVI10:
371c6b6a421SHawking Zhang 		navi10_reg_base_init(adev);
372c6b6a421SHawking Zhang 		break;
373c6b6a421SHawking Zhang 	default:
374c6b6a421SHawking Zhang 		return -EINVAL;
375c6b6a421SHawking Zhang 	}
376c6b6a421SHawking Zhang 
377c6b6a421SHawking Zhang 	adev->nbio_funcs = &nbio_v2_3_funcs;
378c6b6a421SHawking Zhang 
379c6b6a421SHawking Zhang 	adev->nbio_funcs->detect_hw_virt(adev);
380c6b6a421SHawking Zhang 
381c6b6a421SHawking Zhang 	switch (adev->asic_type) {
382c6b6a421SHawking Zhang 	case CHIP_NAVI10:
383c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
384c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
385c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
386c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
387c6b6a421SHawking Zhang 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
388c6b6a421SHawking Zhang 		    is_support_sw_smu(adev))
389c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
390c6b6a421SHawking Zhang 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
391c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
392b4f199c7SHarry Wentland 		else if (amdgpu_device_has_dc_support(adev))
393b4f199c7SHarry Wentland 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
394c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
395c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
396c6b6a421SHawking Zhang 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
397c6b6a421SHawking Zhang 		    is_support_sw_smu(adev))
398c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
399c6b6a421SHawking Zhang 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
400c6b6a421SHawking Zhang 		if (adev->enable_mes)
401c6b6a421SHawking Zhang 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
402c6b6a421SHawking Zhang 		break;
403c6b6a421SHawking Zhang 	default:
404c6b6a421SHawking Zhang 		return -EINVAL;
405c6b6a421SHawking Zhang 	}
406c6b6a421SHawking Zhang 
407c6b6a421SHawking Zhang 	return 0;
408c6b6a421SHawking Zhang }
409c6b6a421SHawking Zhang 
410c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
411c6b6a421SHawking Zhang {
412c6b6a421SHawking Zhang 	return adev->nbio_funcs->get_rev_id(adev);
413c6b6a421SHawking Zhang }
414c6b6a421SHawking Zhang 
415c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
416c6b6a421SHawking Zhang {
417c6b6a421SHawking Zhang 	adev->nbio_funcs->hdp_flush(adev, ring);
418c6b6a421SHawking Zhang }
419c6b6a421SHawking Zhang 
420c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev,
421c6b6a421SHawking Zhang 				struct amdgpu_ring *ring)
422c6b6a421SHawking Zhang {
423c6b6a421SHawking Zhang 	if (!ring || !ring->funcs->emit_wreg) {
424c6b6a421SHawking Zhang 		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
425c6b6a421SHawking Zhang 	} else {
426c6b6a421SHawking Zhang 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
427c6b6a421SHawking Zhang 					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
428c6b6a421SHawking Zhang 	}
429c6b6a421SHawking Zhang }
430c6b6a421SHawking Zhang 
431c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev)
432c6b6a421SHawking Zhang {
433c6b6a421SHawking Zhang 	return true;
434c6b6a421SHawking Zhang }
435c6b6a421SHawking Zhang 
436c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev,
437c6b6a421SHawking Zhang 			      uint64_t *count0,
438c6b6a421SHawking Zhang 			      uint64_t *count1)
439c6b6a421SHawking Zhang {
440c6b6a421SHawking Zhang 	/*TODO*/
441c6b6a421SHawking Zhang }
442c6b6a421SHawking Zhang 
443c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev)
444c6b6a421SHawking Zhang {
445c6b6a421SHawking Zhang #if 0
446c6b6a421SHawking Zhang 	u32 sol_reg;
447c6b6a421SHawking Zhang 
448c6b6a421SHawking Zhang 	if (adev->flags & AMD_IS_APU)
449c6b6a421SHawking Zhang 		return false;
450c6b6a421SHawking Zhang 
451c6b6a421SHawking Zhang 	/* Check sOS sign of life register to confirm sys driver and sOS
452c6b6a421SHawking Zhang 	 * are already been loaded.
453c6b6a421SHawking Zhang 	 */
454c6b6a421SHawking Zhang 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
455c6b6a421SHawking Zhang 	if (sol_reg)
456c6b6a421SHawking Zhang 		return true;
457c6b6a421SHawking Zhang #endif
458c6b6a421SHawking Zhang 	/* TODO: re-enable it when mode1 reset is functional */
459c6b6a421SHawking Zhang 	return false;
460c6b6a421SHawking Zhang }
461c6b6a421SHawking Zhang 
462c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev)
463c6b6a421SHawking Zhang {
464c6b6a421SHawking Zhang 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
465c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
466c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
467c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
468c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
469c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
470c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
471c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
472c6b6a421SHawking Zhang 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
473c6b6a421SHawking Zhang 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
474c6b6a421SHawking Zhang 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
475c6b6a421SHawking Zhang 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
476c6b6a421SHawking Zhang 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
477c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
478c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
479c6b6a421SHawking Zhang 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
480c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
481c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
482c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
483c6b6a421SHawking Zhang 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
484c6b6a421SHawking Zhang 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
485c6b6a421SHawking Zhang 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
486c6b6a421SHawking Zhang 
487c6b6a421SHawking Zhang 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
488c6b6a421SHawking Zhang 	adev->doorbell_index.sdma_doorbell_range = 20;
489c6b6a421SHawking Zhang }
490c6b6a421SHawking Zhang 
491c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs =
492c6b6a421SHawking Zhang {
493c6b6a421SHawking Zhang 	.read_disabled_bios = &nv_read_disabled_bios,
494c6b6a421SHawking Zhang 	.read_bios_from_rom = &nv_read_bios_from_rom,
495c6b6a421SHawking Zhang 	.read_register = &nv_read_register,
496c6b6a421SHawking Zhang 	.reset = &nv_asic_reset,
497c6b6a421SHawking Zhang 	.set_vga_state = &nv_vga_set_state,
498c6b6a421SHawking Zhang 	.get_xclk = &nv_get_xclk,
499c6b6a421SHawking Zhang 	.set_uvd_clocks = &nv_set_uvd_clocks,
500c6b6a421SHawking Zhang 	.set_vce_clocks = &nv_set_vce_clocks,
501c6b6a421SHawking Zhang 	.get_config_memsize = &nv_get_config_memsize,
502c6b6a421SHawking Zhang 	.flush_hdp = &nv_flush_hdp,
503c6b6a421SHawking Zhang 	.invalidate_hdp = &nv_invalidate_hdp,
504c6b6a421SHawking Zhang 	.init_doorbell_index = &nv_init_doorbell_index,
505c6b6a421SHawking Zhang 	.need_full_reset = &nv_need_full_reset,
506c6b6a421SHawking Zhang 	.get_pcie_usage = &nv_get_pcie_usage,
507c6b6a421SHawking Zhang 	.need_reset_on_init = &nv_need_reset_on_init,
508c6b6a421SHawking Zhang };
509c6b6a421SHawking Zhang 
510c6b6a421SHawking Zhang static int nv_common_early_init(void *handle)
511c6b6a421SHawking Zhang {
512c6b6a421SHawking Zhang 	bool psp_enabled = false;
513c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514c6b6a421SHawking Zhang 
515c6b6a421SHawking Zhang 	adev->smc_rreg = NULL;
516c6b6a421SHawking Zhang 	adev->smc_wreg = NULL;
517c6b6a421SHawking Zhang 	adev->pcie_rreg = &nv_pcie_rreg;
518c6b6a421SHawking Zhang 	adev->pcie_wreg = &nv_pcie_wreg;
519c6b6a421SHawking Zhang 
520c6b6a421SHawking Zhang 	/* TODO: will add them during VCN v2 implementation */
521c6b6a421SHawking Zhang 	adev->uvd_ctx_rreg = NULL;
522c6b6a421SHawking Zhang 	adev->uvd_ctx_wreg = NULL;
523c6b6a421SHawking Zhang 
524c6b6a421SHawking Zhang 	adev->didt_rreg = &nv_didt_rreg;
525c6b6a421SHawking Zhang 	adev->didt_wreg = &nv_didt_wreg;
526c6b6a421SHawking Zhang 
527c6b6a421SHawking Zhang 	adev->asic_funcs = &nv_asic_funcs;
528c6b6a421SHawking Zhang 
529c6b6a421SHawking Zhang 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
530c6b6a421SHawking Zhang 	    (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
531c6b6a421SHawking Zhang 		psp_enabled = true;
532c6b6a421SHawking Zhang 
533c6b6a421SHawking Zhang 	adev->rev_id = nv_get_rev_id(adev);
534c6b6a421SHawking Zhang 	adev->external_rev_id = 0xff;
535c6b6a421SHawking Zhang 	switch (adev->asic_type) {
536c6b6a421SHawking Zhang 	case CHIP_NAVI10:
537c6b6a421SHawking Zhang 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
538c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_GFX_CGCG |
539c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_IH_CG |
540c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_HDP_MGCG |
541c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_HDP_LS |
542c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_SDMA_MGCG |
543c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_SDMA_LS |
544c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_MC_MGCG |
545c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_MC_LS |
546c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_ATHUB_MGCG |
547c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_ATHUB_LS |
548c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_VCN_MGCG |
549c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_BIF_MGCG |
550c6b6a421SHawking Zhang 			AMD_CG_SUPPORT_BIF_LS;
551157710eaSLeo Liu 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
552c12d410fSHuang Rui 			AMD_PG_SUPPORT_VCN_DPG |
553a201b6acSHuang Rui 			AMD_PG_SUPPORT_MMHUB |
554a201b6acSHuang Rui 			AMD_PG_SUPPORT_ATHUB;
555c6b6a421SHawking Zhang 		adev->external_rev_id = adev->rev_id + 0x1;
556c6b6a421SHawking Zhang 		break;
557c6b6a421SHawking Zhang 	default:
558c6b6a421SHawking Zhang 		/* FIXME: not supported yet */
559c6b6a421SHawking Zhang 		return -EINVAL;
560c6b6a421SHawking Zhang 	}
561c6b6a421SHawking Zhang 
562c6b6a421SHawking Zhang 	return 0;
563c6b6a421SHawking Zhang }
564c6b6a421SHawking Zhang 
565c6b6a421SHawking Zhang static int nv_common_late_init(void *handle)
566c6b6a421SHawking Zhang {
567c6b6a421SHawking Zhang 	return 0;
568c6b6a421SHawking Zhang }
569c6b6a421SHawking Zhang 
570c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle)
571c6b6a421SHawking Zhang {
572c6b6a421SHawking Zhang 	return 0;
573c6b6a421SHawking Zhang }
574c6b6a421SHawking Zhang 
575c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle)
576c6b6a421SHawking Zhang {
577c6b6a421SHawking Zhang 	return 0;
578c6b6a421SHawking Zhang }
579c6b6a421SHawking Zhang 
580c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle)
581c6b6a421SHawking Zhang {
582c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583c6b6a421SHawking Zhang 
584c6b6a421SHawking Zhang 	/* enable pcie gen2/3 link */
585c6b6a421SHawking Zhang 	nv_pcie_gen3_enable(adev);
586c6b6a421SHawking Zhang 	/* enable aspm */
587c6b6a421SHawking Zhang 	nv_program_aspm(adev);
588c6b6a421SHawking Zhang 	/* setup nbio registers */
589c6b6a421SHawking Zhang 	adev->nbio_funcs->init_registers(adev);
590c6b6a421SHawking Zhang 	/* enable the doorbell aperture */
591c6b6a421SHawking Zhang 	nv_enable_doorbell_aperture(adev, true);
592c6b6a421SHawking Zhang 
593c6b6a421SHawking Zhang 	return 0;
594c6b6a421SHawking Zhang }
595c6b6a421SHawking Zhang 
596c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle)
597c6b6a421SHawking Zhang {
598c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599c6b6a421SHawking Zhang 
600c6b6a421SHawking Zhang 	/* disable the doorbell aperture */
601c6b6a421SHawking Zhang 	nv_enable_doorbell_aperture(adev, false);
602c6b6a421SHawking Zhang 
603c6b6a421SHawking Zhang 	return 0;
604c6b6a421SHawking Zhang }
605c6b6a421SHawking Zhang 
606c6b6a421SHawking Zhang static int nv_common_suspend(void *handle)
607c6b6a421SHawking Zhang {
608c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609c6b6a421SHawking Zhang 
610c6b6a421SHawking Zhang 	return nv_common_hw_fini(adev);
611c6b6a421SHawking Zhang }
612c6b6a421SHawking Zhang 
613c6b6a421SHawking Zhang static int nv_common_resume(void *handle)
614c6b6a421SHawking Zhang {
615c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
616c6b6a421SHawking Zhang 
617c6b6a421SHawking Zhang 	return nv_common_hw_init(adev);
618c6b6a421SHawking Zhang }
619c6b6a421SHawking Zhang 
620c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle)
621c6b6a421SHawking Zhang {
622c6b6a421SHawking Zhang 	return true;
623c6b6a421SHawking Zhang }
624c6b6a421SHawking Zhang 
625c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle)
626c6b6a421SHawking Zhang {
627c6b6a421SHawking Zhang 	return 0;
628c6b6a421SHawking Zhang }
629c6b6a421SHawking Zhang 
630c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle)
631c6b6a421SHawking Zhang {
632c6b6a421SHawking Zhang 	return 0;
633c6b6a421SHawking Zhang }
634c6b6a421SHawking Zhang 
635c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
636c6b6a421SHawking Zhang 					   bool enable)
637c6b6a421SHawking Zhang {
638c6b6a421SHawking Zhang 	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
639c6b6a421SHawking Zhang 	uint32_t hdp_mem_pwr_cntl;
640c6b6a421SHawking Zhang 
641c6b6a421SHawking Zhang 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
642c6b6a421SHawking Zhang 				AMD_CG_SUPPORT_HDP_DS |
643c6b6a421SHawking Zhang 				AMD_CG_SUPPORT_HDP_SD)))
644c6b6a421SHawking Zhang 		return;
645c6b6a421SHawking Zhang 
646c6b6a421SHawking Zhang 	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
647c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
648c6b6a421SHawking Zhang 
649c6b6a421SHawking Zhang 	/* Before doing clock/power mode switch,
650c6b6a421SHawking Zhang 	 * forced on IPH & RC clock */
651c6b6a421SHawking Zhang 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
652c6b6a421SHawking Zhang 				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
653c6b6a421SHawking Zhang 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
654c6b6a421SHawking Zhang 				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
655c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
656c6b6a421SHawking Zhang 
657c6b6a421SHawking Zhang 	/* HDP 5.0 doesn't support dynamic power mode switch,
658c6b6a421SHawking Zhang 	 * disable clock and power gating before any changing */
659c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
660c6b6a421SHawking Zhang 					 IPH_MEM_POWER_CTRL_EN, 0);
661c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
662c6b6a421SHawking Zhang 					 IPH_MEM_POWER_LS_EN, 0);
663c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
664c6b6a421SHawking Zhang 					 IPH_MEM_POWER_DS_EN, 0);
665c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
666c6b6a421SHawking Zhang 					 IPH_MEM_POWER_SD_EN, 0);
667c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
668c6b6a421SHawking Zhang 					 RC_MEM_POWER_CTRL_EN, 0);
669c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
670c6b6a421SHawking Zhang 					 RC_MEM_POWER_LS_EN, 0);
671c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
672c6b6a421SHawking Zhang 					 RC_MEM_POWER_DS_EN, 0);
673c6b6a421SHawking Zhang 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
674c6b6a421SHawking Zhang 					 RC_MEM_POWER_SD_EN, 0);
675c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
676c6b6a421SHawking Zhang 
677c6b6a421SHawking Zhang 	/* only one clock gating mode (LS/DS/SD) can be enabled */
678c6b6a421SHawking Zhang 	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
679c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
680c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
681c6b6a421SHawking Zhang 						 IPH_MEM_POWER_LS_EN, enable);
682c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
683c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
684c6b6a421SHawking Zhang 						 RC_MEM_POWER_LS_EN, enable);
685c6b6a421SHawking Zhang 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
686c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
687c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
688c6b6a421SHawking Zhang 						 IPH_MEM_POWER_DS_EN, enable);
689c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
690c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
691c6b6a421SHawking Zhang 						 RC_MEM_POWER_DS_EN, enable);
692c6b6a421SHawking Zhang 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
693c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
694c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
695c6b6a421SHawking Zhang 						 IPH_MEM_POWER_SD_EN, enable);
696c6b6a421SHawking Zhang 		/* RC should not use shut down mode, fallback to ds */
697c6b6a421SHawking Zhang 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
698c6b6a421SHawking Zhang 						 HDP_MEM_POWER_CTRL,
699c6b6a421SHawking Zhang 						 RC_MEM_POWER_DS_EN, enable);
700c6b6a421SHawking Zhang 	}
701c6b6a421SHawking Zhang 
702c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
703c6b6a421SHawking Zhang 
704c6b6a421SHawking Zhang 	/* restore IPH & RC clock override after clock/power mode changing */
705c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
706c6b6a421SHawking Zhang }
707c6b6a421SHawking Zhang 
708c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
709c6b6a421SHawking Zhang 				       bool enable)
710c6b6a421SHawking Zhang {
711c6b6a421SHawking Zhang 	uint32_t hdp_clk_cntl;
712c6b6a421SHawking Zhang 
713c6b6a421SHawking Zhang 	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
714c6b6a421SHawking Zhang 		return;
715c6b6a421SHawking Zhang 
716c6b6a421SHawking Zhang 	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
717c6b6a421SHawking Zhang 
718c6b6a421SHawking Zhang 	if (enable) {
719c6b6a421SHawking Zhang 		hdp_clk_cntl &=
720c6b6a421SHawking Zhang 			~(uint32_t)
721c6b6a421SHawking Zhang 			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
722c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
723c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
724c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
725c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
726c6b6a421SHawking Zhang 			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
727c6b6a421SHawking Zhang 	} else {
728c6b6a421SHawking Zhang 		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
729c6b6a421SHawking Zhang 			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
730c6b6a421SHawking Zhang 			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
731c6b6a421SHawking Zhang 			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
732c6b6a421SHawking Zhang 			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
733c6b6a421SHawking Zhang 			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
734c6b6a421SHawking Zhang 	}
735c6b6a421SHawking Zhang 
736c6b6a421SHawking Zhang 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
737c6b6a421SHawking Zhang }
738c6b6a421SHawking Zhang 
739c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle,
740c6b6a421SHawking Zhang 					   enum amd_clockgating_state state)
741c6b6a421SHawking Zhang {
742c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743c6b6a421SHawking Zhang 
744c6b6a421SHawking Zhang 	if (amdgpu_sriov_vf(adev))
745c6b6a421SHawking Zhang 		return 0;
746c6b6a421SHawking Zhang 
747c6b6a421SHawking Zhang 	switch (adev->asic_type) {
748c6b6a421SHawking Zhang 	case CHIP_NAVI10:
749c6b6a421SHawking Zhang 		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
750c6b6a421SHawking Zhang 				state == AMD_CG_STATE_GATE ? true : false);
751c6b6a421SHawking Zhang 		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
752c6b6a421SHawking Zhang 				state == AMD_CG_STATE_GATE ? true : false);
753c6b6a421SHawking Zhang 		nv_update_hdp_mem_power_gating(adev,
754c6b6a421SHawking Zhang 				   state == AMD_CG_STATE_GATE ? true : false);
755c6b6a421SHawking Zhang 		nv_update_hdp_clock_gating(adev,
756c6b6a421SHawking Zhang 				state == AMD_CG_STATE_GATE ? true : false);
757c6b6a421SHawking Zhang 		break;
758c6b6a421SHawking Zhang 	default:
759c6b6a421SHawking Zhang 		break;
760c6b6a421SHawking Zhang 	}
761c6b6a421SHawking Zhang 	return 0;
762c6b6a421SHawking Zhang }
763c6b6a421SHawking Zhang 
764c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle,
765c6b6a421SHawking Zhang 					   enum amd_powergating_state state)
766c6b6a421SHawking Zhang {
767c6b6a421SHawking Zhang 	/* TODO */
768c6b6a421SHawking Zhang 	return 0;
769c6b6a421SHawking Zhang }
770c6b6a421SHawking Zhang 
771c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags)
772c6b6a421SHawking Zhang {
773c6b6a421SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774c6b6a421SHawking Zhang 	uint32_t tmp;
775c6b6a421SHawking Zhang 
776c6b6a421SHawking Zhang 	if (amdgpu_sriov_vf(adev))
777c6b6a421SHawking Zhang 		*flags = 0;
778c6b6a421SHawking Zhang 
779c6b6a421SHawking Zhang 	adev->nbio_funcs->get_clockgating_state(adev, flags);
780c6b6a421SHawking Zhang 
781c6b6a421SHawking Zhang 	/* AMD_CG_SUPPORT_HDP_MGCG */
782c6b6a421SHawking Zhang 	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
783c6b6a421SHawking Zhang 	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
784c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
785c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
786c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
787c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
788c6b6a421SHawking Zhang 		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
789c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
790c6b6a421SHawking Zhang 
791c6b6a421SHawking Zhang 	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
792c6b6a421SHawking Zhang 	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
793c6b6a421SHawking Zhang 	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
794c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_LS;
795c6b6a421SHawking Zhang 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
796c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_DS;
797c6b6a421SHawking Zhang 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
798c6b6a421SHawking Zhang 		*flags |= AMD_CG_SUPPORT_HDP_SD;
799c6b6a421SHawking Zhang 
800c6b6a421SHawking Zhang 	return;
801c6b6a421SHawking Zhang }
802c6b6a421SHawking Zhang 
803c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = {
804c6b6a421SHawking Zhang 	.name = "nv_common",
805c6b6a421SHawking Zhang 	.early_init = nv_common_early_init,
806c6b6a421SHawking Zhang 	.late_init = nv_common_late_init,
807c6b6a421SHawking Zhang 	.sw_init = nv_common_sw_init,
808c6b6a421SHawking Zhang 	.sw_fini = nv_common_sw_fini,
809c6b6a421SHawking Zhang 	.hw_init = nv_common_hw_init,
810c6b6a421SHawking Zhang 	.hw_fini = nv_common_hw_fini,
811c6b6a421SHawking Zhang 	.suspend = nv_common_suspend,
812c6b6a421SHawking Zhang 	.resume = nv_common_resume,
813c6b6a421SHawking Zhang 	.is_idle = nv_common_is_idle,
814c6b6a421SHawking Zhang 	.wait_for_idle = nv_common_wait_for_idle,
815c6b6a421SHawking Zhang 	.soft_reset = nv_common_soft_reset,
816c6b6a421SHawking Zhang 	.set_clockgating_state = nv_common_set_clockgating_state,
817c6b6a421SHawking Zhang 	.set_powergating_state = nv_common_set_powergating_state,
818c6b6a421SHawking Zhang 	.get_clockgating_state = nv_common_get_clockgating_state,
819c6b6a421SHawking Zhang };
820