xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision aeb64ff3)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "gmc_v8_0.h"
63 #include "gmc_v7_0.h"
64 #include "gfx_v8_0.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
70 #include "tonga_ih.h"
71 #include "cz_ih.h"
72 #include "uvd_v5_0.h"
73 #include "uvd_v6_0.h"
74 #include "vce_v3_0.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
77 #endif
78 #include "dce_virtual.h"
79 #include "mxgpu_vi.h"
80 #include "amdgpu_dm.h"
81 
82 /*
83  * Indirect registers accessor
84  */
85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86 {
87 	unsigned long flags;
88 	u32 r;
89 
90 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
92 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
93 	r = RREG32_NO_KIQ(mmPCIE_DATA);
94 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95 	return r;
96 }
97 
98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
104 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
105 	WREG32_NO_KIQ(mmPCIE_DATA, v);
106 	(void)RREG32_NO_KIQ(mmPCIE_DATA);
107 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
108 }
109 
110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
111 {
112 	unsigned long flags;
113 	u32 r;
114 
115 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
116 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
117 	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
118 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
119 	return r;
120 }
121 
122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
127 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
128 	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
129 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
130 }
131 
132 /* smu_8_0_d.h */
133 #define mmMP0PUB_IND_INDEX                                                      0x180
134 #define mmMP0PUB_IND_DATA                                                       0x181
135 
136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
137 {
138 	unsigned long flags;
139 	u32 r;
140 
141 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
142 	WREG32(mmMP0PUB_IND_INDEX, (reg));
143 	r = RREG32(mmMP0PUB_IND_DATA);
144 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
145 	return r;
146 }
147 
148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149 {
150 	unsigned long flags;
151 
152 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
153 	WREG32(mmMP0PUB_IND_INDEX, (reg));
154 	WREG32(mmMP0PUB_IND_DATA, (v));
155 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
156 }
157 
158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
159 {
160 	unsigned long flags;
161 	u32 r;
162 
163 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165 	r = RREG32(mmUVD_CTX_DATA);
166 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167 	return r;
168 }
169 
170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
171 {
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176 	WREG32(mmUVD_CTX_DATA, (v));
177 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178 }
179 
180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
181 {
182 	unsigned long flags;
183 	u32 r;
184 
185 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
186 	WREG32(mmDIDT_IND_INDEX, (reg));
187 	r = RREG32(mmDIDT_IND_DATA);
188 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
189 	return r;
190 }
191 
192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
193 {
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
197 	WREG32(mmDIDT_IND_INDEX, (reg));
198 	WREG32(mmDIDT_IND_DATA, (v));
199 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
200 }
201 
202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
203 {
204 	unsigned long flags;
205 	u32 r;
206 
207 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208 	WREG32(mmGC_CAC_IND_INDEX, (reg));
209 	r = RREG32(mmGC_CAC_IND_DATA);
210 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
211 	return r;
212 }
213 
214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
215 {
216 	unsigned long flags;
217 
218 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219 	WREG32(mmGC_CAC_IND_INDEX, (reg));
220 	WREG32(mmGC_CAC_IND_DATA, (v));
221 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
222 }
223 
224 
225 static const u32 tonga_mgcg_cgcg_init[] =
226 {
227 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229 	mmPCIE_DATA, 0x000f0000, 0x00000000,
230 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234 };
235 
236 static const u32 fiji_mgcg_cgcg_init[] =
237 {
238 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240 	mmPCIE_DATA, 0x000f0000, 0x00000000,
241 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
245 };
246 
247 static const u32 iceland_mgcg_cgcg_init[] =
248 {
249 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250 	mmPCIE_DATA, 0x000f0000, 0x00000000,
251 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
254 };
255 
256 static const u32 cz_mgcg_cgcg_init[] =
257 {
258 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260 	mmPCIE_DATA, 0x000f0000, 0x00000000,
261 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
263 };
264 
265 static const u32 stoney_mgcg_cgcg_init[] =
266 {
267 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
270 };
271 
272 static void vi_init_golden_registers(struct amdgpu_device *adev)
273 {
274 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 	mutex_lock(&adev->grbm_idx_mutex);
276 
277 	if (amdgpu_sriov_vf(adev)) {
278 		xgpu_vi_init_golden_registers(adev);
279 		mutex_unlock(&adev->grbm_idx_mutex);
280 		return;
281 	}
282 
283 	switch (adev->asic_type) {
284 	case CHIP_TOPAZ:
285 		amdgpu_device_program_register_sequence(adev,
286 							iceland_mgcg_cgcg_init,
287 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
288 		break;
289 	case CHIP_FIJI:
290 		amdgpu_device_program_register_sequence(adev,
291 							fiji_mgcg_cgcg_init,
292 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
293 		break;
294 	case CHIP_TONGA:
295 		amdgpu_device_program_register_sequence(adev,
296 							tonga_mgcg_cgcg_init,
297 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
298 		break;
299 	case CHIP_CARRIZO:
300 		amdgpu_device_program_register_sequence(adev,
301 							cz_mgcg_cgcg_init,
302 							ARRAY_SIZE(cz_mgcg_cgcg_init));
303 		break;
304 	case CHIP_STONEY:
305 		amdgpu_device_program_register_sequence(adev,
306 							stoney_mgcg_cgcg_init,
307 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
308 		break;
309 	case CHIP_POLARIS10:
310 	case CHIP_POLARIS11:
311 	case CHIP_POLARIS12:
312 	case CHIP_VEGAM:
313 	default:
314 		break;
315 	}
316 	mutex_unlock(&adev->grbm_idx_mutex);
317 }
318 
319 /**
320  * vi_get_xclk - get the xclk
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Returns the reference clock used by the gfx engine
325  * (VI).
326  */
327 static u32 vi_get_xclk(struct amdgpu_device *adev)
328 {
329 	u32 reference_clock = adev->clock.spll.reference_freq;
330 	u32 tmp;
331 
332 	if (adev->flags & AMD_IS_APU)
333 		return reference_clock;
334 
335 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
336 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
337 		return 1000;
338 
339 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
340 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
341 		return reference_clock / 4;
342 
343 	return reference_clock;
344 }
345 
346 /**
347  * vi_srbm_select - select specific register instances
348  *
349  * @adev: amdgpu_device pointer
350  * @me: selected ME (micro engine)
351  * @pipe: pipe
352  * @queue: queue
353  * @vmid: VMID
354  *
355  * Switches the currently active registers instances.  Some
356  * registers are instanced per VMID, others are instanced per
357  * me/pipe/queue combination.
358  */
359 void vi_srbm_select(struct amdgpu_device *adev,
360 		     u32 me, u32 pipe, u32 queue, u32 vmid)
361 {
362 	u32 srbm_gfx_cntl = 0;
363 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
365 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
366 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
367 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
368 }
369 
370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
371 {
372 	/* todo */
373 }
374 
375 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
376 {
377 	u32 bus_cntl;
378 	u32 d1vga_control = 0;
379 	u32 d2vga_control = 0;
380 	u32 vga_render_control = 0;
381 	u32 rom_cntl;
382 	bool r;
383 
384 	bus_cntl = RREG32(mmBUS_CNTL);
385 	if (adev->mode_info.num_crtc) {
386 		d1vga_control = RREG32(mmD1VGA_CONTROL);
387 		d2vga_control = RREG32(mmD2VGA_CONTROL);
388 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
389 	}
390 	rom_cntl = RREG32_SMC(ixROM_CNTL);
391 
392 	/* enable the rom */
393 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
394 	if (adev->mode_info.num_crtc) {
395 		/* Disable VGA mode */
396 		WREG32(mmD1VGA_CONTROL,
397 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
398 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
399 		WREG32(mmD2VGA_CONTROL,
400 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
401 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
402 		WREG32(mmVGA_RENDER_CONTROL,
403 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
404 	}
405 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
406 
407 	r = amdgpu_read_bios(adev);
408 
409 	/* restore regs */
410 	WREG32(mmBUS_CNTL, bus_cntl);
411 	if (adev->mode_info.num_crtc) {
412 		WREG32(mmD1VGA_CONTROL, d1vga_control);
413 		WREG32(mmD2VGA_CONTROL, d2vga_control);
414 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
415 	}
416 	WREG32_SMC(ixROM_CNTL, rom_cntl);
417 	return r;
418 }
419 
420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421 				  u8 *bios, u32 length_bytes)
422 {
423 	u32 *dw_ptr;
424 	unsigned long flags;
425 	u32 i, length_dw;
426 
427 	if (bios == NULL)
428 		return false;
429 	if (length_bytes == 0)
430 		return false;
431 	/* APU vbios image is part of sbios image */
432 	if (adev->flags & AMD_IS_APU)
433 		return false;
434 
435 	dw_ptr = (u32 *)bios;
436 	length_dw = ALIGN(length_bytes, 4) / 4;
437 	/* take the smc lock since we are using the smc index */
438 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
439 	/* set rom index to 0 */
440 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
441 	WREG32(mmSMC_IND_DATA_11, 0);
442 	/* set index to data for continous read */
443 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
444 	for (i = 0; i < length_dw; i++)
445 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
446 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
447 
448 	return true;
449 }
450 
451 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
452 {
453 	uint32_t reg = 0;
454 
455 	if (adev->asic_type == CHIP_TONGA ||
456 	    adev->asic_type == CHIP_FIJI) {
457 	       reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
458 	       /* bit0: 0 means pf and 1 means vf */
459 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
460 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
461 	       /* bit31: 0 means disable IOV and 1 means enable */
462 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
463 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
464 	}
465 
466 	if (reg == 0) {
467 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
468 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
469 	}
470 }
471 
472 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
473 	{mmGRBM_STATUS},
474 	{mmGRBM_STATUS2},
475 	{mmGRBM_STATUS_SE0},
476 	{mmGRBM_STATUS_SE1},
477 	{mmGRBM_STATUS_SE2},
478 	{mmGRBM_STATUS_SE3},
479 	{mmSRBM_STATUS},
480 	{mmSRBM_STATUS2},
481 	{mmSRBM_STATUS3},
482 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
483 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
484 	{mmCP_STAT},
485 	{mmCP_STALLED_STAT1},
486 	{mmCP_STALLED_STAT2},
487 	{mmCP_STALLED_STAT3},
488 	{mmCP_CPF_BUSY_STAT},
489 	{mmCP_CPF_STALLED_STAT1},
490 	{mmCP_CPF_STATUS},
491 	{mmCP_CPC_BUSY_STAT},
492 	{mmCP_CPC_STALLED_STAT1},
493 	{mmCP_CPC_STATUS},
494 	{mmGB_ADDR_CONFIG},
495 	{mmMC_ARB_RAMCFG},
496 	{mmGB_TILE_MODE0},
497 	{mmGB_TILE_MODE1},
498 	{mmGB_TILE_MODE2},
499 	{mmGB_TILE_MODE3},
500 	{mmGB_TILE_MODE4},
501 	{mmGB_TILE_MODE5},
502 	{mmGB_TILE_MODE6},
503 	{mmGB_TILE_MODE7},
504 	{mmGB_TILE_MODE8},
505 	{mmGB_TILE_MODE9},
506 	{mmGB_TILE_MODE10},
507 	{mmGB_TILE_MODE11},
508 	{mmGB_TILE_MODE12},
509 	{mmGB_TILE_MODE13},
510 	{mmGB_TILE_MODE14},
511 	{mmGB_TILE_MODE15},
512 	{mmGB_TILE_MODE16},
513 	{mmGB_TILE_MODE17},
514 	{mmGB_TILE_MODE18},
515 	{mmGB_TILE_MODE19},
516 	{mmGB_TILE_MODE20},
517 	{mmGB_TILE_MODE21},
518 	{mmGB_TILE_MODE22},
519 	{mmGB_TILE_MODE23},
520 	{mmGB_TILE_MODE24},
521 	{mmGB_TILE_MODE25},
522 	{mmGB_TILE_MODE26},
523 	{mmGB_TILE_MODE27},
524 	{mmGB_TILE_MODE28},
525 	{mmGB_TILE_MODE29},
526 	{mmGB_TILE_MODE30},
527 	{mmGB_TILE_MODE31},
528 	{mmGB_MACROTILE_MODE0},
529 	{mmGB_MACROTILE_MODE1},
530 	{mmGB_MACROTILE_MODE2},
531 	{mmGB_MACROTILE_MODE3},
532 	{mmGB_MACROTILE_MODE4},
533 	{mmGB_MACROTILE_MODE5},
534 	{mmGB_MACROTILE_MODE6},
535 	{mmGB_MACROTILE_MODE7},
536 	{mmGB_MACROTILE_MODE8},
537 	{mmGB_MACROTILE_MODE9},
538 	{mmGB_MACROTILE_MODE10},
539 	{mmGB_MACROTILE_MODE11},
540 	{mmGB_MACROTILE_MODE12},
541 	{mmGB_MACROTILE_MODE13},
542 	{mmGB_MACROTILE_MODE14},
543 	{mmGB_MACROTILE_MODE15},
544 	{mmCC_RB_BACKEND_DISABLE, true},
545 	{mmGC_USER_RB_BACKEND_DISABLE, true},
546 	{mmGB_BACKEND_MAP, false},
547 	{mmPA_SC_RASTER_CONFIG, true},
548 	{mmPA_SC_RASTER_CONFIG_1, true},
549 };
550 
551 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
552 				      bool indexed, u32 se_num,
553 				      u32 sh_num, u32 reg_offset)
554 {
555 	if (indexed) {
556 		uint32_t val;
557 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
558 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
559 
560 		switch (reg_offset) {
561 		case mmCC_RB_BACKEND_DISABLE:
562 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
563 		case mmGC_USER_RB_BACKEND_DISABLE:
564 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
565 		case mmPA_SC_RASTER_CONFIG:
566 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
567 		case mmPA_SC_RASTER_CONFIG_1:
568 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
569 		}
570 
571 		mutex_lock(&adev->grbm_idx_mutex);
572 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
573 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
574 
575 		val = RREG32(reg_offset);
576 
577 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
578 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
579 		mutex_unlock(&adev->grbm_idx_mutex);
580 		return val;
581 	} else {
582 		unsigned idx;
583 
584 		switch (reg_offset) {
585 		case mmGB_ADDR_CONFIG:
586 			return adev->gfx.config.gb_addr_config;
587 		case mmMC_ARB_RAMCFG:
588 			return adev->gfx.config.mc_arb_ramcfg;
589 		case mmGB_TILE_MODE0:
590 		case mmGB_TILE_MODE1:
591 		case mmGB_TILE_MODE2:
592 		case mmGB_TILE_MODE3:
593 		case mmGB_TILE_MODE4:
594 		case mmGB_TILE_MODE5:
595 		case mmGB_TILE_MODE6:
596 		case mmGB_TILE_MODE7:
597 		case mmGB_TILE_MODE8:
598 		case mmGB_TILE_MODE9:
599 		case mmGB_TILE_MODE10:
600 		case mmGB_TILE_MODE11:
601 		case mmGB_TILE_MODE12:
602 		case mmGB_TILE_MODE13:
603 		case mmGB_TILE_MODE14:
604 		case mmGB_TILE_MODE15:
605 		case mmGB_TILE_MODE16:
606 		case mmGB_TILE_MODE17:
607 		case mmGB_TILE_MODE18:
608 		case mmGB_TILE_MODE19:
609 		case mmGB_TILE_MODE20:
610 		case mmGB_TILE_MODE21:
611 		case mmGB_TILE_MODE22:
612 		case mmGB_TILE_MODE23:
613 		case mmGB_TILE_MODE24:
614 		case mmGB_TILE_MODE25:
615 		case mmGB_TILE_MODE26:
616 		case mmGB_TILE_MODE27:
617 		case mmGB_TILE_MODE28:
618 		case mmGB_TILE_MODE29:
619 		case mmGB_TILE_MODE30:
620 		case mmGB_TILE_MODE31:
621 			idx = (reg_offset - mmGB_TILE_MODE0);
622 			return adev->gfx.config.tile_mode_array[idx];
623 		case mmGB_MACROTILE_MODE0:
624 		case mmGB_MACROTILE_MODE1:
625 		case mmGB_MACROTILE_MODE2:
626 		case mmGB_MACROTILE_MODE3:
627 		case mmGB_MACROTILE_MODE4:
628 		case mmGB_MACROTILE_MODE5:
629 		case mmGB_MACROTILE_MODE6:
630 		case mmGB_MACROTILE_MODE7:
631 		case mmGB_MACROTILE_MODE8:
632 		case mmGB_MACROTILE_MODE9:
633 		case mmGB_MACROTILE_MODE10:
634 		case mmGB_MACROTILE_MODE11:
635 		case mmGB_MACROTILE_MODE12:
636 		case mmGB_MACROTILE_MODE13:
637 		case mmGB_MACROTILE_MODE14:
638 		case mmGB_MACROTILE_MODE15:
639 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
640 			return adev->gfx.config.macrotile_mode_array[idx];
641 		default:
642 			return RREG32(reg_offset);
643 		}
644 	}
645 }
646 
647 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
648 			    u32 sh_num, u32 reg_offset, u32 *value)
649 {
650 	uint32_t i;
651 
652 	*value = 0;
653 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
654 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
655 
656 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
657 			continue;
658 
659 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
660 					       reg_offset);
661 		return 0;
662 	}
663 	return -EINVAL;
664 }
665 
666 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
667 {
668 	u32 i;
669 
670 	dev_info(adev->dev, "GPU pci config reset\n");
671 
672 	/* disable BM */
673 	pci_clear_master(adev->pdev);
674 	/* reset */
675 	amdgpu_device_pci_config_reset(adev);
676 
677 	udelay(100);
678 
679 	/* wait for asic to come out of reset */
680 	for (i = 0; i < adev->usec_timeout; i++) {
681 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
682 			/* enable BM */
683 			pci_set_master(adev->pdev);
684 			adev->has_hw_reset = true;
685 			return 0;
686 		}
687 		udelay(1);
688 	}
689 	return -EINVAL;
690 }
691 
692 int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
693 {
694 	void *pp_handle = adev->powerplay.pp_handle;
695 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
696 
697 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
698 		*cap = false;
699 		return -ENOENT;
700 	}
701 
702 	return pp_funcs->get_asic_baco_capability(pp_handle, cap);
703 }
704 
705 int smu7_asic_baco_reset(struct amdgpu_device *adev)
706 {
707 	void *pp_handle = adev->powerplay.pp_handle;
708 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
709 
710 	if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
711 		return -ENOENT;
712 
713 	/* enter BACO state */
714 	if (pp_funcs->set_asic_baco_state(pp_handle, 1))
715 		return -EIO;
716 
717 	/* exit BACO state */
718 	if (pp_funcs->set_asic_baco_state(pp_handle, 0))
719 		return -EIO;
720 
721 	dev_info(adev->dev, "GPU BACO reset\n");
722 
723 	return 0;
724 }
725 
726 /**
727  * vi_asic_pci_config_reset - soft reset GPU
728  *
729  * @adev: amdgpu_device pointer
730  *
731  * Use PCI Config method to reset the GPU.
732  *
733  * Returns 0 for success.
734  */
735 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
736 {
737 	int r;
738 
739 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
740 
741 	r = vi_gpu_pci_config_reset(adev);
742 
743 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
744 
745 	return r;
746 }
747 
748 static enum amd_reset_method
749 vi_asic_reset_method(struct amdgpu_device *adev)
750 {
751 	bool baco_reset;
752 
753 	switch (adev->asic_type) {
754 	case CHIP_FIJI:
755 	case CHIP_TONGA:
756 	case CHIP_POLARIS10:
757 	case CHIP_POLARIS11:
758 	case CHIP_POLARIS12:
759 	case CHIP_TOPAZ:
760 		smu7_asic_get_baco_capability(adev, &baco_reset);
761 		break;
762 	default:
763 		baco_reset = false;
764 		break;
765 	}
766 
767 	if (baco_reset)
768 		return AMD_RESET_METHOD_BACO;
769 	else
770 		return AMD_RESET_METHOD_LEGACY;
771 }
772 
773 /**
774  * vi_asic_reset - soft reset GPU
775  *
776  * @adev: amdgpu_device pointer
777  *
778  * Look up which blocks are hung and attempt
779  * to reset them.
780  * Returns 0 for success.
781  */
782 static int vi_asic_reset(struct amdgpu_device *adev)
783 {
784 	int r;
785 
786 	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
787 		if (!adev->in_suspend)
788 			amdgpu_inc_vram_lost(adev);
789 		r = smu7_asic_baco_reset(adev);
790 	} else {
791 		r = vi_asic_pci_config_reset(adev);
792 	}
793 
794 	return r;
795 }
796 
797 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
798 {
799 	return RREG32(mmCONFIG_MEMSIZE);
800 }
801 
802 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
803 			u32 cntl_reg, u32 status_reg)
804 {
805 	int r, i;
806 	struct atom_clock_dividers dividers;
807 	uint32_t tmp;
808 
809 	r = amdgpu_atombios_get_clock_dividers(adev,
810 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
811 					       clock, false, &dividers);
812 	if (r)
813 		return r;
814 
815 	tmp = RREG32_SMC(cntl_reg);
816 
817 	if (adev->flags & AMD_IS_APU)
818 		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
819 	else
820 		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
821 				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
822 	tmp |= dividers.post_divider;
823 	WREG32_SMC(cntl_reg, tmp);
824 
825 	for (i = 0; i < 100; i++) {
826 		tmp = RREG32_SMC(status_reg);
827 		if (adev->flags & AMD_IS_APU) {
828 			if (tmp & 0x10000)
829 				break;
830 		} else {
831 			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
832 				break;
833 		}
834 		mdelay(10);
835 	}
836 	if (i == 100)
837 		return -ETIMEDOUT;
838 	return 0;
839 }
840 
841 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
842 #define ixGNB_CLK1_STATUS   0xD822010C
843 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
844 #define ixGNB_CLK2_STATUS   0xD822012C
845 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
846 #define ixGNB_CLK3_STATUS   0xD822014C
847 
848 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
849 {
850 	int r;
851 
852 	if (adev->flags & AMD_IS_APU) {
853 		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
854 		if (r)
855 			return r;
856 
857 		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
858 		if (r)
859 			return r;
860 	} else {
861 		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
862 		if (r)
863 			return r;
864 
865 		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
866 		if (r)
867 			return r;
868 	}
869 
870 	return 0;
871 }
872 
873 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
874 {
875 	int r, i;
876 	struct atom_clock_dividers dividers;
877 	u32 tmp;
878 	u32 reg_ctrl;
879 	u32 reg_status;
880 	u32 status_mask;
881 	u32 reg_mask;
882 
883 	if (adev->flags & AMD_IS_APU) {
884 		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
885 		reg_status = ixGNB_CLK3_STATUS;
886 		status_mask = 0x00010000;
887 		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
888 	} else {
889 		reg_ctrl = ixCG_ECLK_CNTL;
890 		reg_status = ixCG_ECLK_STATUS;
891 		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
892 		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
893 	}
894 
895 	r = amdgpu_atombios_get_clock_dividers(adev,
896 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
897 					       ecclk, false, &dividers);
898 	if (r)
899 		return r;
900 
901 	for (i = 0; i < 100; i++) {
902 		if (RREG32_SMC(reg_status) & status_mask)
903 			break;
904 		mdelay(10);
905 	}
906 
907 	if (i == 100)
908 		return -ETIMEDOUT;
909 
910 	tmp = RREG32_SMC(reg_ctrl);
911 	tmp &= ~reg_mask;
912 	tmp |= dividers.post_divider;
913 	WREG32_SMC(reg_ctrl, tmp);
914 
915 	for (i = 0; i < 100; i++) {
916 		if (RREG32_SMC(reg_status) & status_mask)
917 			break;
918 		mdelay(10);
919 	}
920 
921 	if (i == 100)
922 		return -ETIMEDOUT;
923 
924 	return 0;
925 }
926 
927 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
928 {
929 	if (pci_is_root_bus(adev->pdev->bus))
930 		return;
931 
932 	if (amdgpu_pcie_gen2 == 0)
933 		return;
934 
935 	if (adev->flags & AMD_IS_APU)
936 		return;
937 
938 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
939 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
940 		return;
941 
942 	/* todo */
943 }
944 
945 static void vi_program_aspm(struct amdgpu_device *adev)
946 {
947 
948 	if (amdgpu_aspm == 0)
949 		return;
950 
951 	/* todo */
952 }
953 
954 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
955 					bool enable)
956 {
957 	u32 tmp;
958 
959 	/* not necessary on CZ */
960 	if (adev->flags & AMD_IS_APU)
961 		return;
962 
963 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
964 	if (enable)
965 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
966 	else
967 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
968 
969 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
970 }
971 
972 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
973 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
974 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
975 
976 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
977 {
978 	if (adev->flags & AMD_IS_APU)
979 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
980 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
981 	else
982 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
983 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
984 }
985 
986 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
987 {
988 	if (!ring || !ring->funcs->emit_wreg) {
989 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
990 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
991 	} else {
992 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
993 	}
994 }
995 
996 static void vi_invalidate_hdp(struct amdgpu_device *adev,
997 			      struct amdgpu_ring *ring)
998 {
999 	if (!ring || !ring->funcs->emit_wreg) {
1000 		WREG32(mmHDP_DEBUG0, 1);
1001 		RREG32(mmHDP_DEBUG0);
1002 	} else {
1003 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1004 	}
1005 }
1006 
1007 static bool vi_need_full_reset(struct amdgpu_device *adev)
1008 {
1009 	switch (adev->asic_type) {
1010 	case CHIP_CARRIZO:
1011 	case CHIP_STONEY:
1012 		/* CZ has hang issues with full reset at the moment */
1013 		return false;
1014 	case CHIP_FIJI:
1015 	case CHIP_TONGA:
1016 		/* XXX: soft reset should work on fiji and tonga */
1017 		return true;
1018 	case CHIP_POLARIS10:
1019 	case CHIP_POLARIS11:
1020 	case CHIP_POLARIS12:
1021 	case CHIP_TOPAZ:
1022 	default:
1023 		/* change this when we support soft reset */
1024 		return true;
1025 	}
1026 }
1027 
1028 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1029 			      uint64_t *count1)
1030 {
1031 	uint32_t perfctr = 0;
1032 	uint64_t cnt0_of, cnt1_of;
1033 	int tmp;
1034 
1035 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1036 	 * that may or may not be different from their GPU counterparts
1037 	 */
1038 	if (adev->flags & AMD_IS_APU)
1039 		return;
1040 
1041 	/* Set the 2 events that we wish to watch, defined above */
1042 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1043 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1044 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1045 
1046 	/* Write to enable desired perf counters */
1047 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1048 	/* Zero out and enable the perf counters
1049 	 * Write 0x5:
1050 	 * Bit 0 = Start all counters(1)
1051 	 * Bit 2 = Global counter reset enable(1)
1052 	 */
1053 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1054 
1055 	msleep(1000);
1056 
1057 	/* Load the shadow and disable the perf counters
1058 	 * Write 0x2:
1059 	 * Bit 0 = Stop counters(0)
1060 	 * Bit 1 = Load the shadow counters(1)
1061 	 */
1062 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1063 
1064 	/* Read register values to get any >32bit overflow */
1065 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1066 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1067 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1068 
1069 	/* Get the values and add the overflow */
1070 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1071 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1072 }
1073 
1074 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1075 {
1076 	uint64_t nak_r, nak_g;
1077 
1078 	/* Get the number of NAKs received and generated */
1079 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1080 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1081 
1082 	/* Add the total number of NAKs, i.e the number of replays */
1083 	return (nak_r + nak_g);
1084 }
1085 
1086 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1087 {
1088 	u32 clock_cntl, pc;
1089 
1090 	if (adev->flags & AMD_IS_APU)
1091 		return false;
1092 
1093 	/* check if the SMC is already running */
1094 	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1095 	pc = RREG32_SMC(ixSMC_PC_C);
1096 	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1097 	    (0x20100 <= pc))
1098 		return true;
1099 
1100 	return false;
1101 }
1102 
1103 static const struct amdgpu_asic_funcs vi_asic_funcs =
1104 {
1105 	.read_disabled_bios = &vi_read_disabled_bios,
1106 	.read_bios_from_rom = &vi_read_bios_from_rom,
1107 	.read_register = &vi_read_register,
1108 	.reset = &vi_asic_reset,
1109 	.reset_method = &vi_asic_reset_method,
1110 	.set_vga_state = &vi_vga_set_state,
1111 	.get_xclk = &vi_get_xclk,
1112 	.set_uvd_clocks = &vi_set_uvd_clocks,
1113 	.set_vce_clocks = &vi_set_vce_clocks,
1114 	.get_config_memsize = &vi_get_config_memsize,
1115 	.flush_hdp = &vi_flush_hdp,
1116 	.invalidate_hdp = &vi_invalidate_hdp,
1117 	.need_full_reset = &vi_need_full_reset,
1118 	.init_doorbell_index = &legacy_doorbell_index_init,
1119 	.get_pcie_usage = &vi_get_pcie_usage,
1120 	.need_reset_on_init = &vi_need_reset_on_init,
1121 	.get_pcie_replay_count = &vi_get_pcie_replay_count,
1122 };
1123 
1124 #define CZ_REV_BRISTOL(rev)	 \
1125 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1126 
1127 static int vi_common_early_init(void *handle)
1128 {
1129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 
1131 	if (adev->flags & AMD_IS_APU) {
1132 		adev->smc_rreg = &cz_smc_rreg;
1133 		adev->smc_wreg = &cz_smc_wreg;
1134 	} else {
1135 		adev->smc_rreg = &vi_smc_rreg;
1136 		adev->smc_wreg = &vi_smc_wreg;
1137 	}
1138 	adev->pcie_rreg = &vi_pcie_rreg;
1139 	adev->pcie_wreg = &vi_pcie_wreg;
1140 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1141 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1142 	adev->didt_rreg = &vi_didt_rreg;
1143 	adev->didt_wreg = &vi_didt_wreg;
1144 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
1145 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
1146 
1147 	adev->asic_funcs = &vi_asic_funcs;
1148 
1149 	adev->rev_id = vi_get_rev_id(adev);
1150 	adev->external_rev_id = 0xFF;
1151 	switch (adev->asic_type) {
1152 	case CHIP_TOPAZ:
1153 		adev->cg_flags = 0;
1154 		adev->pg_flags = 0;
1155 		adev->external_rev_id = 0x1;
1156 		break;
1157 	case CHIP_FIJI:
1158 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1159 			AMD_CG_SUPPORT_GFX_MGLS |
1160 			AMD_CG_SUPPORT_GFX_RLC_LS |
1161 			AMD_CG_SUPPORT_GFX_CP_LS |
1162 			AMD_CG_SUPPORT_GFX_CGTS |
1163 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1164 			AMD_CG_SUPPORT_GFX_CGCG |
1165 			AMD_CG_SUPPORT_GFX_CGLS |
1166 			AMD_CG_SUPPORT_SDMA_MGCG |
1167 			AMD_CG_SUPPORT_SDMA_LS |
1168 			AMD_CG_SUPPORT_BIF_LS |
1169 			AMD_CG_SUPPORT_HDP_MGCG |
1170 			AMD_CG_SUPPORT_HDP_LS |
1171 			AMD_CG_SUPPORT_ROM_MGCG |
1172 			AMD_CG_SUPPORT_MC_MGCG |
1173 			AMD_CG_SUPPORT_MC_LS |
1174 			AMD_CG_SUPPORT_UVD_MGCG;
1175 		adev->pg_flags = 0;
1176 		adev->external_rev_id = adev->rev_id + 0x3c;
1177 		break;
1178 	case CHIP_TONGA:
1179 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1180 			AMD_CG_SUPPORT_GFX_CGCG |
1181 			AMD_CG_SUPPORT_GFX_CGLS |
1182 			AMD_CG_SUPPORT_SDMA_MGCG |
1183 			AMD_CG_SUPPORT_SDMA_LS |
1184 			AMD_CG_SUPPORT_BIF_LS |
1185 			AMD_CG_SUPPORT_HDP_MGCG |
1186 			AMD_CG_SUPPORT_HDP_LS |
1187 			AMD_CG_SUPPORT_ROM_MGCG |
1188 			AMD_CG_SUPPORT_MC_MGCG |
1189 			AMD_CG_SUPPORT_MC_LS |
1190 			AMD_CG_SUPPORT_DRM_LS |
1191 			AMD_CG_SUPPORT_UVD_MGCG;
1192 		adev->pg_flags = 0;
1193 		adev->external_rev_id = adev->rev_id + 0x14;
1194 		break;
1195 	case CHIP_POLARIS11:
1196 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1197 			AMD_CG_SUPPORT_GFX_RLC_LS |
1198 			AMD_CG_SUPPORT_GFX_CP_LS |
1199 			AMD_CG_SUPPORT_GFX_CGCG |
1200 			AMD_CG_SUPPORT_GFX_CGLS |
1201 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1202 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1203 			AMD_CG_SUPPORT_SDMA_MGCG |
1204 			AMD_CG_SUPPORT_SDMA_LS |
1205 			AMD_CG_SUPPORT_BIF_MGCG |
1206 			AMD_CG_SUPPORT_BIF_LS |
1207 			AMD_CG_SUPPORT_HDP_MGCG |
1208 			AMD_CG_SUPPORT_HDP_LS |
1209 			AMD_CG_SUPPORT_ROM_MGCG |
1210 			AMD_CG_SUPPORT_MC_MGCG |
1211 			AMD_CG_SUPPORT_MC_LS |
1212 			AMD_CG_SUPPORT_DRM_LS |
1213 			AMD_CG_SUPPORT_UVD_MGCG |
1214 			AMD_CG_SUPPORT_VCE_MGCG;
1215 		adev->pg_flags = 0;
1216 		adev->external_rev_id = adev->rev_id + 0x5A;
1217 		break;
1218 	case CHIP_POLARIS10:
1219 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1220 			AMD_CG_SUPPORT_GFX_RLC_LS |
1221 			AMD_CG_SUPPORT_GFX_CP_LS |
1222 			AMD_CG_SUPPORT_GFX_CGCG |
1223 			AMD_CG_SUPPORT_GFX_CGLS |
1224 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1225 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1226 			AMD_CG_SUPPORT_SDMA_MGCG |
1227 			AMD_CG_SUPPORT_SDMA_LS |
1228 			AMD_CG_SUPPORT_BIF_MGCG |
1229 			AMD_CG_SUPPORT_BIF_LS |
1230 			AMD_CG_SUPPORT_HDP_MGCG |
1231 			AMD_CG_SUPPORT_HDP_LS |
1232 			AMD_CG_SUPPORT_ROM_MGCG |
1233 			AMD_CG_SUPPORT_MC_MGCG |
1234 			AMD_CG_SUPPORT_MC_LS |
1235 			AMD_CG_SUPPORT_DRM_LS |
1236 			AMD_CG_SUPPORT_UVD_MGCG |
1237 			AMD_CG_SUPPORT_VCE_MGCG;
1238 		adev->pg_flags = 0;
1239 		adev->external_rev_id = adev->rev_id + 0x50;
1240 		break;
1241 	case CHIP_POLARIS12:
1242 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1243 			AMD_CG_SUPPORT_GFX_RLC_LS |
1244 			AMD_CG_SUPPORT_GFX_CP_LS |
1245 			AMD_CG_SUPPORT_GFX_CGCG |
1246 			AMD_CG_SUPPORT_GFX_CGLS |
1247 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1248 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1249 			AMD_CG_SUPPORT_SDMA_MGCG |
1250 			AMD_CG_SUPPORT_SDMA_LS |
1251 			AMD_CG_SUPPORT_BIF_MGCG |
1252 			AMD_CG_SUPPORT_BIF_LS |
1253 			AMD_CG_SUPPORT_HDP_MGCG |
1254 			AMD_CG_SUPPORT_HDP_LS |
1255 			AMD_CG_SUPPORT_ROM_MGCG |
1256 			AMD_CG_SUPPORT_MC_MGCG |
1257 			AMD_CG_SUPPORT_MC_LS |
1258 			AMD_CG_SUPPORT_DRM_LS |
1259 			AMD_CG_SUPPORT_UVD_MGCG |
1260 			AMD_CG_SUPPORT_VCE_MGCG;
1261 		adev->pg_flags = 0;
1262 		adev->external_rev_id = adev->rev_id + 0x64;
1263 		break;
1264 	case CHIP_VEGAM:
1265 		adev->cg_flags = 0;
1266 			/*AMD_CG_SUPPORT_GFX_MGCG |
1267 			AMD_CG_SUPPORT_GFX_RLC_LS |
1268 			AMD_CG_SUPPORT_GFX_CP_LS |
1269 			AMD_CG_SUPPORT_GFX_CGCG |
1270 			AMD_CG_SUPPORT_GFX_CGLS |
1271 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1272 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1273 			AMD_CG_SUPPORT_SDMA_MGCG |
1274 			AMD_CG_SUPPORT_SDMA_LS |
1275 			AMD_CG_SUPPORT_BIF_MGCG |
1276 			AMD_CG_SUPPORT_BIF_LS |
1277 			AMD_CG_SUPPORT_HDP_MGCG |
1278 			AMD_CG_SUPPORT_HDP_LS |
1279 			AMD_CG_SUPPORT_ROM_MGCG |
1280 			AMD_CG_SUPPORT_MC_MGCG |
1281 			AMD_CG_SUPPORT_MC_LS |
1282 			AMD_CG_SUPPORT_DRM_LS |
1283 			AMD_CG_SUPPORT_UVD_MGCG |
1284 			AMD_CG_SUPPORT_VCE_MGCG;*/
1285 		adev->pg_flags = 0;
1286 		adev->external_rev_id = adev->rev_id + 0x6E;
1287 		break;
1288 	case CHIP_CARRIZO:
1289 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1290 			AMD_CG_SUPPORT_GFX_MGCG |
1291 			AMD_CG_SUPPORT_GFX_MGLS |
1292 			AMD_CG_SUPPORT_GFX_RLC_LS |
1293 			AMD_CG_SUPPORT_GFX_CP_LS |
1294 			AMD_CG_SUPPORT_GFX_CGTS |
1295 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1296 			AMD_CG_SUPPORT_GFX_CGCG |
1297 			AMD_CG_SUPPORT_GFX_CGLS |
1298 			AMD_CG_SUPPORT_BIF_LS |
1299 			AMD_CG_SUPPORT_HDP_MGCG |
1300 			AMD_CG_SUPPORT_HDP_LS |
1301 			AMD_CG_SUPPORT_SDMA_MGCG |
1302 			AMD_CG_SUPPORT_SDMA_LS |
1303 			AMD_CG_SUPPORT_VCE_MGCG;
1304 		/* rev0 hardware requires workarounds to support PG */
1305 		adev->pg_flags = 0;
1306 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1307 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1308 				AMD_PG_SUPPORT_GFX_PIPELINE |
1309 				AMD_PG_SUPPORT_CP |
1310 				AMD_PG_SUPPORT_UVD |
1311 				AMD_PG_SUPPORT_VCE;
1312 		}
1313 		adev->external_rev_id = adev->rev_id + 0x1;
1314 		break;
1315 	case CHIP_STONEY:
1316 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1317 			AMD_CG_SUPPORT_GFX_MGCG |
1318 			AMD_CG_SUPPORT_GFX_MGLS |
1319 			AMD_CG_SUPPORT_GFX_RLC_LS |
1320 			AMD_CG_SUPPORT_GFX_CP_LS |
1321 			AMD_CG_SUPPORT_GFX_CGTS |
1322 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1323 			AMD_CG_SUPPORT_GFX_CGLS |
1324 			AMD_CG_SUPPORT_BIF_LS |
1325 			AMD_CG_SUPPORT_HDP_MGCG |
1326 			AMD_CG_SUPPORT_HDP_LS |
1327 			AMD_CG_SUPPORT_SDMA_MGCG |
1328 			AMD_CG_SUPPORT_SDMA_LS |
1329 			AMD_CG_SUPPORT_VCE_MGCG;
1330 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1331 			AMD_PG_SUPPORT_GFX_SMG |
1332 			AMD_PG_SUPPORT_GFX_PIPELINE |
1333 			AMD_PG_SUPPORT_CP |
1334 			AMD_PG_SUPPORT_UVD |
1335 			AMD_PG_SUPPORT_VCE;
1336 		adev->external_rev_id = adev->rev_id + 0x61;
1337 		break;
1338 	default:
1339 		/* FIXME: not supported yet */
1340 		return -EINVAL;
1341 	}
1342 
1343 	if (amdgpu_sriov_vf(adev)) {
1344 		amdgpu_virt_init_setting(adev);
1345 		xgpu_vi_mailbox_set_irq_funcs(adev);
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int vi_common_late_init(void *handle)
1352 {
1353 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 
1355 	if (amdgpu_sriov_vf(adev))
1356 		xgpu_vi_mailbox_get_irq(adev);
1357 
1358 	return 0;
1359 }
1360 
1361 static int vi_common_sw_init(void *handle)
1362 {
1363 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364 
1365 	if (amdgpu_sriov_vf(adev))
1366 		xgpu_vi_mailbox_add_irq_id(adev);
1367 
1368 	return 0;
1369 }
1370 
1371 static int vi_common_sw_fini(void *handle)
1372 {
1373 	return 0;
1374 }
1375 
1376 static int vi_common_hw_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	/* move the golden regs per IP block */
1381 	vi_init_golden_registers(adev);
1382 	/* enable pcie gen2/3 link */
1383 	vi_pcie_gen3_enable(adev);
1384 	/* enable aspm */
1385 	vi_program_aspm(adev);
1386 	/* enable the doorbell aperture */
1387 	vi_enable_doorbell_aperture(adev, true);
1388 
1389 	return 0;
1390 }
1391 
1392 static int vi_common_hw_fini(void *handle)
1393 {
1394 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1395 
1396 	/* enable the doorbell aperture */
1397 	vi_enable_doorbell_aperture(adev, false);
1398 
1399 	if (amdgpu_sriov_vf(adev))
1400 		xgpu_vi_mailbox_put_irq(adev);
1401 
1402 	return 0;
1403 }
1404 
1405 static int vi_common_suspend(void *handle)
1406 {
1407 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408 
1409 	return vi_common_hw_fini(adev);
1410 }
1411 
1412 static int vi_common_resume(void *handle)
1413 {
1414 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 
1416 	return vi_common_hw_init(adev);
1417 }
1418 
1419 static bool vi_common_is_idle(void *handle)
1420 {
1421 	return true;
1422 }
1423 
1424 static int vi_common_wait_for_idle(void *handle)
1425 {
1426 	return 0;
1427 }
1428 
1429 static int vi_common_soft_reset(void *handle)
1430 {
1431 	return 0;
1432 }
1433 
1434 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1435 						   bool enable)
1436 {
1437 	uint32_t temp, data;
1438 
1439 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1440 
1441 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1442 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1443 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1444 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1445 	else
1446 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1447 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1448 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1449 
1450 	if (temp != data)
1451 		WREG32_PCIE(ixPCIE_CNTL2, data);
1452 }
1453 
1454 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1455 						    bool enable)
1456 {
1457 	uint32_t temp, data;
1458 
1459 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1460 
1461 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1462 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1463 	else
1464 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1465 
1466 	if (temp != data)
1467 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1468 }
1469 
1470 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1471 				      bool enable)
1472 {
1473 	uint32_t temp, data;
1474 
1475 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1476 
1477 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1478 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1479 	else
1480 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1481 
1482 	if (temp != data)
1483 		WREG32(mmHDP_MEM_POWER_LS, data);
1484 }
1485 
1486 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1487 				      bool enable)
1488 {
1489 	uint32_t temp, data;
1490 
1491 	temp = data = RREG32(0x157a);
1492 
1493 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1494 		data |= 1;
1495 	else
1496 		data &= ~1;
1497 
1498 	if (temp != data)
1499 		WREG32(0x157a, data);
1500 }
1501 
1502 
1503 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1504 						    bool enable)
1505 {
1506 	uint32_t temp, data;
1507 
1508 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1509 
1510 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1511 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1512 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1513 	else
1514 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1515 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1516 
1517 	if (temp != data)
1518 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1519 }
1520 
1521 static int vi_common_set_clockgating_state_by_smu(void *handle,
1522 					   enum amd_clockgating_state state)
1523 {
1524 	uint32_t msg_id, pp_state = 0;
1525 	uint32_t pp_support_state = 0;
1526 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1527 
1528 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1529 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1530 			pp_support_state = PP_STATE_SUPPORT_LS;
1531 			pp_state = PP_STATE_LS;
1532 		}
1533 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1534 			pp_support_state |= PP_STATE_SUPPORT_CG;
1535 			pp_state |= PP_STATE_CG;
1536 		}
1537 		if (state == AMD_CG_STATE_UNGATE)
1538 			pp_state = 0;
1539 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1540 			       PP_BLOCK_SYS_MC,
1541 			       pp_support_state,
1542 			       pp_state);
1543 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1544 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1545 	}
1546 
1547 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1548 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1549 			pp_support_state = PP_STATE_SUPPORT_LS;
1550 			pp_state = PP_STATE_LS;
1551 		}
1552 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1553 			pp_support_state |= PP_STATE_SUPPORT_CG;
1554 			pp_state |= PP_STATE_CG;
1555 		}
1556 		if (state == AMD_CG_STATE_UNGATE)
1557 			pp_state = 0;
1558 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1559 			       PP_BLOCK_SYS_SDMA,
1560 			       pp_support_state,
1561 			       pp_state);
1562 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1563 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1564 	}
1565 
1566 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1567 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1568 			pp_support_state = PP_STATE_SUPPORT_LS;
1569 			pp_state = PP_STATE_LS;
1570 		}
1571 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1572 			pp_support_state |= PP_STATE_SUPPORT_CG;
1573 			pp_state |= PP_STATE_CG;
1574 		}
1575 		if (state == AMD_CG_STATE_UNGATE)
1576 			pp_state = 0;
1577 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1578 			       PP_BLOCK_SYS_HDP,
1579 			       pp_support_state,
1580 			       pp_state);
1581 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1582 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1583 	}
1584 
1585 
1586 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1587 		if (state == AMD_CG_STATE_UNGATE)
1588 			pp_state = 0;
1589 		else
1590 			pp_state = PP_STATE_LS;
1591 
1592 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1593 			       PP_BLOCK_SYS_BIF,
1594 			       PP_STATE_SUPPORT_LS,
1595 			        pp_state);
1596 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1597 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1598 	}
1599 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1600 		if (state == AMD_CG_STATE_UNGATE)
1601 			pp_state = 0;
1602 		else
1603 			pp_state = PP_STATE_CG;
1604 
1605 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1606 			       PP_BLOCK_SYS_BIF,
1607 			       PP_STATE_SUPPORT_CG,
1608 			       pp_state);
1609 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1610 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1611 	}
1612 
1613 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1614 
1615 		if (state == AMD_CG_STATE_UNGATE)
1616 			pp_state = 0;
1617 		else
1618 			pp_state = PP_STATE_LS;
1619 
1620 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1621 			       PP_BLOCK_SYS_DRM,
1622 			       PP_STATE_SUPPORT_LS,
1623 			       pp_state);
1624 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1625 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1626 	}
1627 
1628 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1629 
1630 		if (state == AMD_CG_STATE_UNGATE)
1631 			pp_state = 0;
1632 		else
1633 			pp_state = PP_STATE_CG;
1634 
1635 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1636 			       PP_BLOCK_SYS_ROM,
1637 			       PP_STATE_SUPPORT_CG,
1638 			       pp_state);
1639 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1640 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1641 	}
1642 	return 0;
1643 }
1644 
1645 static int vi_common_set_clockgating_state(void *handle,
1646 					   enum amd_clockgating_state state)
1647 {
1648 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1649 
1650 	if (amdgpu_sriov_vf(adev))
1651 		return 0;
1652 
1653 	switch (adev->asic_type) {
1654 	case CHIP_FIJI:
1655 		vi_update_bif_medium_grain_light_sleep(adev,
1656 				state == AMD_CG_STATE_GATE);
1657 		vi_update_hdp_medium_grain_clock_gating(adev,
1658 				state == AMD_CG_STATE_GATE);
1659 		vi_update_hdp_light_sleep(adev,
1660 				state == AMD_CG_STATE_GATE);
1661 		vi_update_rom_medium_grain_clock_gating(adev,
1662 				state == AMD_CG_STATE_GATE);
1663 		break;
1664 	case CHIP_CARRIZO:
1665 	case CHIP_STONEY:
1666 		vi_update_bif_medium_grain_light_sleep(adev,
1667 				state == AMD_CG_STATE_GATE);
1668 		vi_update_hdp_medium_grain_clock_gating(adev,
1669 				state == AMD_CG_STATE_GATE);
1670 		vi_update_hdp_light_sleep(adev,
1671 				state == AMD_CG_STATE_GATE);
1672 		vi_update_drm_light_sleep(adev,
1673 				state == AMD_CG_STATE_GATE);
1674 		break;
1675 	case CHIP_TONGA:
1676 	case CHIP_POLARIS10:
1677 	case CHIP_POLARIS11:
1678 	case CHIP_POLARIS12:
1679 	case CHIP_VEGAM:
1680 		vi_common_set_clockgating_state_by_smu(adev, state);
1681 	default:
1682 		break;
1683 	}
1684 	return 0;
1685 }
1686 
1687 static int vi_common_set_powergating_state(void *handle,
1688 					    enum amd_powergating_state state)
1689 {
1690 	return 0;
1691 }
1692 
1693 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1694 {
1695 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 	int data;
1697 
1698 	if (amdgpu_sriov_vf(adev))
1699 		*flags = 0;
1700 
1701 	/* AMD_CG_SUPPORT_BIF_LS */
1702 	data = RREG32_PCIE(ixPCIE_CNTL2);
1703 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1704 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1705 
1706 	/* AMD_CG_SUPPORT_HDP_LS */
1707 	data = RREG32(mmHDP_MEM_POWER_LS);
1708 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1709 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1710 
1711 	/* AMD_CG_SUPPORT_HDP_MGCG */
1712 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1713 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1714 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1715 
1716 	/* AMD_CG_SUPPORT_ROM_MGCG */
1717 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1718 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1719 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1720 }
1721 
1722 static const struct amd_ip_funcs vi_common_ip_funcs = {
1723 	.name = "vi_common",
1724 	.early_init = vi_common_early_init,
1725 	.late_init = vi_common_late_init,
1726 	.sw_init = vi_common_sw_init,
1727 	.sw_fini = vi_common_sw_fini,
1728 	.hw_init = vi_common_hw_init,
1729 	.hw_fini = vi_common_hw_fini,
1730 	.suspend = vi_common_suspend,
1731 	.resume = vi_common_resume,
1732 	.is_idle = vi_common_is_idle,
1733 	.wait_for_idle = vi_common_wait_for_idle,
1734 	.soft_reset = vi_common_soft_reset,
1735 	.set_clockgating_state = vi_common_set_clockgating_state,
1736 	.set_powergating_state = vi_common_set_powergating_state,
1737 	.get_clockgating_state = vi_common_get_clockgating_state,
1738 };
1739 
1740 static const struct amdgpu_ip_block_version vi_common_ip_block =
1741 {
1742 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1743 	.major = 1,
1744 	.minor = 0,
1745 	.rev = 0,
1746 	.funcs = &vi_common_ip_funcs,
1747 };
1748 
1749 int vi_set_ip_blocks(struct amdgpu_device *adev)
1750 {
1751 	/* in early init stage, vbios code won't work */
1752 	vi_detect_hw_virtualization(adev);
1753 
1754 	if (amdgpu_sriov_vf(adev))
1755 		adev->virt.ops = &xgpu_vi_virt_ops;
1756 
1757 	switch (adev->asic_type) {
1758 	case CHIP_TOPAZ:
1759 		/* topaz has no DCE, UVD, VCE */
1760 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1761 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1762 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1763 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1764 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1765 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1766 		if (adev->enable_virtual_display)
1767 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1768 		break;
1769 	case CHIP_FIJI:
1770 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1771 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1772 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1773 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1774 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1775 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1776 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1777 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1778 #if defined(CONFIG_DRM_AMD_DC)
1779 		else if (amdgpu_device_has_dc_support(adev))
1780 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1781 #endif
1782 		else
1783 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1784 		if (!amdgpu_sriov_vf(adev)) {
1785 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1786 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1787 		}
1788 		break;
1789 	case CHIP_TONGA:
1790 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1791 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1792 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1793 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1794 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1795 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1796 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1797 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1798 #if defined(CONFIG_DRM_AMD_DC)
1799 		else if (amdgpu_device_has_dc_support(adev))
1800 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1801 #endif
1802 		else
1803 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1804 		if (!amdgpu_sriov_vf(adev)) {
1805 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1806 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1807 		}
1808 		break;
1809 	case CHIP_POLARIS10:
1810 	case CHIP_POLARIS11:
1811 	case CHIP_POLARIS12:
1812 	case CHIP_VEGAM:
1813 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1814 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1815 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1816 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1817 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1818 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1819 		if (adev->enable_virtual_display)
1820 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1821 #if defined(CONFIG_DRM_AMD_DC)
1822 		else if (amdgpu_device_has_dc_support(adev))
1823 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1824 #endif
1825 		else
1826 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1827 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1828 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1829 		break;
1830 	case CHIP_CARRIZO:
1831 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1832 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1833 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1834 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1835 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1836 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1837 		if (adev->enable_virtual_display)
1838 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1839 #if defined(CONFIG_DRM_AMD_DC)
1840 		else if (amdgpu_device_has_dc_support(adev))
1841 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1842 #endif
1843 		else
1844 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1845 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1846 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1847 #if defined(CONFIG_DRM_AMD_ACP)
1848 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1849 #endif
1850 		break;
1851 	case CHIP_STONEY:
1852 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1853 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1854 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1855 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1856 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1857 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1858 		if (adev->enable_virtual_display)
1859 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1860 #if defined(CONFIG_DRM_AMD_DC)
1861 		else if (amdgpu_device_has_dc_support(adev))
1862 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1863 #endif
1864 		else
1865 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1866 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1867 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1868 #if defined(CONFIG_DRM_AMD_ACP)
1869 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1870 #endif
1871 		break;
1872 	default:
1873 		/* FIXME: not supported yet */
1874 		return -EINVAL;
1875 	}
1876 
1877 	return 0;
1878 }
1879 
1880 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1881 {
1882 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1883 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1884 	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1885 	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1886 	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1887 	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1888 	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1889 	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1890 	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1891 	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1892 	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1893 	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1894 	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1895 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1896 }
1897