xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 8e8e69d6)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/slab.h>
24 #include <drm/drmP.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
31 #include "atom.h"
32 #include "amd_pcie.h"
33 
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
36 
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
39 
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
42 
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
45 
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
48 
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
51 
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
54 
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
57 
58 #include "vid.h"
59 #include "vi.h"
60 #include "vi_dpm.h"
61 #include "gmc_v8_0.h"
62 #include "gmc_v7_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74 #if defined(CONFIG_DRM_AMD_ACP)
75 #include "amdgpu_acp.h"
76 #endif
77 #include "dce_virtual.h"
78 #include "mxgpu_vi.h"
79 #include "amdgpu_dm.h"
80 
81 /*
82  * Indirect registers accessor
83  */
84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
85 {
86 	unsigned long flags;
87 	u32 r;
88 
89 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
90 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
91 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
92 	r = RREG32_NO_KIQ(mmPCIE_DATA);
93 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
94 	return r;
95 }
96 
97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
103 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
104 	WREG32_NO_KIQ(mmPCIE_DATA, v);
105 	(void)RREG32_NO_KIQ(mmPCIE_DATA);
106 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
107 }
108 
109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
110 {
111 	unsigned long flags;
112 	u32 r;
113 
114 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
115 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
116 	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
117 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
118 	return r;
119 }
120 
121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 {
123 	unsigned long flags;
124 
125 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
126 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
127 	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
128 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
129 }
130 
131 /* smu_8_0_d.h */
132 #define mmMP0PUB_IND_INDEX                                                      0x180
133 #define mmMP0PUB_IND_DATA                                                       0x181
134 
135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
136 {
137 	unsigned long flags;
138 	u32 r;
139 
140 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
141 	WREG32(mmMP0PUB_IND_INDEX, (reg));
142 	r = RREG32(mmMP0PUB_IND_DATA);
143 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
144 	return r;
145 }
146 
147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
148 {
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
152 	WREG32(mmMP0PUB_IND_INDEX, (reg));
153 	WREG32(mmMP0PUB_IND_DATA, (v));
154 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
155 }
156 
157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
158 {
159 	unsigned long flags;
160 	u32 r;
161 
162 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
164 	r = RREG32(mmUVD_CTX_DATA);
165 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
166 	return r;
167 }
168 
169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170 {
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
174 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
175 	WREG32(mmUVD_CTX_DATA, (v));
176 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
177 }
178 
179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
180 {
181 	unsigned long flags;
182 	u32 r;
183 
184 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
185 	WREG32(mmDIDT_IND_INDEX, (reg));
186 	r = RREG32(mmDIDT_IND_DATA);
187 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
188 	return r;
189 }
190 
191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
196 	WREG32(mmDIDT_IND_INDEX, (reg));
197 	WREG32(mmDIDT_IND_DATA, (v));
198 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
199 }
200 
201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
202 {
203 	unsigned long flags;
204 	u32 r;
205 
206 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
207 	WREG32(mmGC_CAC_IND_INDEX, (reg));
208 	r = RREG32(mmGC_CAC_IND_DATA);
209 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
210 	return r;
211 }
212 
213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
214 {
215 	unsigned long flags;
216 
217 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
218 	WREG32(mmGC_CAC_IND_INDEX, (reg));
219 	WREG32(mmGC_CAC_IND_DATA, (v));
220 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
221 }
222 
223 
224 static const u32 tonga_mgcg_cgcg_init[] =
225 {
226 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
227 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
228 	mmPCIE_DATA, 0x000f0000, 0x00000000,
229 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
230 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
231 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
233 };
234 
235 static const u32 fiji_mgcg_cgcg_init[] =
236 {
237 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
238 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
239 	mmPCIE_DATA, 0x000f0000, 0x00000000,
240 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
241 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
242 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
243 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
244 };
245 
246 static const u32 iceland_mgcg_cgcg_init[] =
247 {
248 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
249 	mmPCIE_DATA, 0x000f0000, 0x00000000,
250 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
251 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
252 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
253 };
254 
255 static const u32 cz_mgcg_cgcg_init[] =
256 {
257 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
258 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
259 	mmPCIE_DATA, 0x000f0000, 0x00000000,
260 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
261 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
262 };
263 
264 static const u32 stoney_mgcg_cgcg_init[] =
265 {
266 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
267 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
268 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
269 };
270 
271 static void vi_init_golden_registers(struct amdgpu_device *adev)
272 {
273 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
274 	mutex_lock(&adev->grbm_idx_mutex);
275 
276 	if (amdgpu_sriov_vf(adev)) {
277 		xgpu_vi_init_golden_registers(adev);
278 		mutex_unlock(&adev->grbm_idx_mutex);
279 		return;
280 	}
281 
282 	switch (adev->asic_type) {
283 	case CHIP_TOPAZ:
284 		amdgpu_device_program_register_sequence(adev,
285 							iceland_mgcg_cgcg_init,
286 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
287 		break;
288 	case CHIP_FIJI:
289 		amdgpu_device_program_register_sequence(adev,
290 							fiji_mgcg_cgcg_init,
291 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
292 		break;
293 	case CHIP_TONGA:
294 		amdgpu_device_program_register_sequence(adev,
295 							tonga_mgcg_cgcg_init,
296 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
297 		break;
298 	case CHIP_CARRIZO:
299 		amdgpu_device_program_register_sequence(adev,
300 							cz_mgcg_cgcg_init,
301 							ARRAY_SIZE(cz_mgcg_cgcg_init));
302 		break;
303 	case CHIP_STONEY:
304 		amdgpu_device_program_register_sequence(adev,
305 							stoney_mgcg_cgcg_init,
306 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
307 		break;
308 	case CHIP_POLARIS10:
309 	case CHIP_POLARIS11:
310 	case CHIP_POLARIS12:
311 	case CHIP_VEGAM:
312 	default:
313 		break;
314 	}
315 	mutex_unlock(&adev->grbm_idx_mutex);
316 }
317 
318 /**
319  * vi_get_xclk - get the xclk
320  *
321  * @adev: amdgpu_device pointer
322  *
323  * Returns the reference clock used by the gfx engine
324  * (VI).
325  */
326 static u32 vi_get_xclk(struct amdgpu_device *adev)
327 {
328 	u32 reference_clock = adev->clock.spll.reference_freq;
329 	u32 tmp;
330 
331 	if (adev->flags & AMD_IS_APU)
332 		return reference_clock;
333 
334 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
335 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
336 		return 1000;
337 
338 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
339 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
340 		return reference_clock / 4;
341 
342 	return reference_clock;
343 }
344 
345 /**
346  * vi_srbm_select - select specific register instances
347  *
348  * @adev: amdgpu_device pointer
349  * @me: selected ME (micro engine)
350  * @pipe: pipe
351  * @queue: queue
352  * @vmid: VMID
353  *
354  * Switches the currently active registers instances.  Some
355  * registers are instanced per VMID, others are instanced per
356  * me/pipe/queue combination.
357  */
358 void vi_srbm_select(struct amdgpu_device *adev,
359 		     u32 me, u32 pipe, u32 queue, u32 vmid)
360 {
361 	u32 srbm_gfx_cntl = 0;
362 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
363 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
365 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
366 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
367 }
368 
369 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
370 {
371 	/* todo */
372 }
373 
374 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
375 {
376 	u32 bus_cntl;
377 	u32 d1vga_control = 0;
378 	u32 d2vga_control = 0;
379 	u32 vga_render_control = 0;
380 	u32 rom_cntl;
381 	bool r;
382 
383 	bus_cntl = RREG32(mmBUS_CNTL);
384 	if (adev->mode_info.num_crtc) {
385 		d1vga_control = RREG32(mmD1VGA_CONTROL);
386 		d2vga_control = RREG32(mmD2VGA_CONTROL);
387 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
388 	}
389 	rom_cntl = RREG32_SMC(ixROM_CNTL);
390 
391 	/* enable the rom */
392 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
393 	if (adev->mode_info.num_crtc) {
394 		/* Disable VGA mode */
395 		WREG32(mmD1VGA_CONTROL,
396 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
397 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
398 		WREG32(mmD2VGA_CONTROL,
399 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
400 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
401 		WREG32(mmVGA_RENDER_CONTROL,
402 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
403 	}
404 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
405 
406 	r = amdgpu_read_bios(adev);
407 
408 	/* restore regs */
409 	WREG32(mmBUS_CNTL, bus_cntl);
410 	if (adev->mode_info.num_crtc) {
411 		WREG32(mmD1VGA_CONTROL, d1vga_control);
412 		WREG32(mmD2VGA_CONTROL, d2vga_control);
413 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
414 	}
415 	WREG32_SMC(ixROM_CNTL, rom_cntl);
416 	return r;
417 }
418 
419 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
420 				  u8 *bios, u32 length_bytes)
421 {
422 	u32 *dw_ptr;
423 	unsigned long flags;
424 	u32 i, length_dw;
425 
426 	if (bios == NULL)
427 		return false;
428 	if (length_bytes == 0)
429 		return false;
430 	/* APU vbios image is part of sbios image */
431 	if (adev->flags & AMD_IS_APU)
432 		return false;
433 
434 	dw_ptr = (u32 *)bios;
435 	length_dw = ALIGN(length_bytes, 4) / 4;
436 	/* take the smc lock since we are using the smc index */
437 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
438 	/* set rom index to 0 */
439 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
440 	WREG32(mmSMC_IND_DATA_11, 0);
441 	/* set index to data for continous read */
442 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
443 	for (i = 0; i < length_dw; i++)
444 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
445 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
446 
447 	return true;
448 }
449 
450 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
451 {
452 	uint32_t reg = 0;
453 
454 	if (adev->asic_type == CHIP_TONGA ||
455 	    adev->asic_type == CHIP_FIJI) {
456 	       reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
457 	       /* bit0: 0 means pf and 1 means vf */
458 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
459 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
460 	       /* bit31: 0 means disable IOV and 1 means enable */
461 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
462 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
463 	}
464 
465 	if (reg == 0) {
466 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
467 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
468 	}
469 }
470 
471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
472 	{mmGRBM_STATUS},
473 	{mmGRBM_STATUS2},
474 	{mmGRBM_STATUS_SE0},
475 	{mmGRBM_STATUS_SE1},
476 	{mmGRBM_STATUS_SE2},
477 	{mmGRBM_STATUS_SE3},
478 	{mmSRBM_STATUS},
479 	{mmSRBM_STATUS2},
480 	{mmSRBM_STATUS3},
481 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
482 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
483 	{mmCP_STAT},
484 	{mmCP_STALLED_STAT1},
485 	{mmCP_STALLED_STAT2},
486 	{mmCP_STALLED_STAT3},
487 	{mmCP_CPF_BUSY_STAT},
488 	{mmCP_CPF_STALLED_STAT1},
489 	{mmCP_CPF_STATUS},
490 	{mmCP_CPC_BUSY_STAT},
491 	{mmCP_CPC_STALLED_STAT1},
492 	{mmCP_CPC_STATUS},
493 	{mmGB_ADDR_CONFIG},
494 	{mmMC_ARB_RAMCFG},
495 	{mmGB_TILE_MODE0},
496 	{mmGB_TILE_MODE1},
497 	{mmGB_TILE_MODE2},
498 	{mmGB_TILE_MODE3},
499 	{mmGB_TILE_MODE4},
500 	{mmGB_TILE_MODE5},
501 	{mmGB_TILE_MODE6},
502 	{mmGB_TILE_MODE7},
503 	{mmGB_TILE_MODE8},
504 	{mmGB_TILE_MODE9},
505 	{mmGB_TILE_MODE10},
506 	{mmGB_TILE_MODE11},
507 	{mmGB_TILE_MODE12},
508 	{mmGB_TILE_MODE13},
509 	{mmGB_TILE_MODE14},
510 	{mmGB_TILE_MODE15},
511 	{mmGB_TILE_MODE16},
512 	{mmGB_TILE_MODE17},
513 	{mmGB_TILE_MODE18},
514 	{mmGB_TILE_MODE19},
515 	{mmGB_TILE_MODE20},
516 	{mmGB_TILE_MODE21},
517 	{mmGB_TILE_MODE22},
518 	{mmGB_TILE_MODE23},
519 	{mmGB_TILE_MODE24},
520 	{mmGB_TILE_MODE25},
521 	{mmGB_TILE_MODE26},
522 	{mmGB_TILE_MODE27},
523 	{mmGB_TILE_MODE28},
524 	{mmGB_TILE_MODE29},
525 	{mmGB_TILE_MODE30},
526 	{mmGB_TILE_MODE31},
527 	{mmGB_MACROTILE_MODE0},
528 	{mmGB_MACROTILE_MODE1},
529 	{mmGB_MACROTILE_MODE2},
530 	{mmGB_MACROTILE_MODE3},
531 	{mmGB_MACROTILE_MODE4},
532 	{mmGB_MACROTILE_MODE5},
533 	{mmGB_MACROTILE_MODE6},
534 	{mmGB_MACROTILE_MODE7},
535 	{mmGB_MACROTILE_MODE8},
536 	{mmGB_MACROTILE_MODE9},
537 	{mmGB_MACROTILE_MODE10},
538 	{mmGB_MACROTILE_MODE11},
539 	{mmGB_MACROTILE_MODE12},
540 	{mmGB_MACROTILE_MODE13},
541 	{mmGB_MACROTILE_MODE14},
542 	{mmGB_MACROTILE_MODE15},
543 	{mmCC_RB_BACKEND_DISABLE, true},
544 	{mmGC_USER_RB_BACKEND_DISABLE, true},
545 	{mmGB_BACKEND_MAP, false},
546 	{mmPA_SC_RASTER_CONFIG, true},
547 	{mmPA_SC_RASTER_CONFIG_1, true},
548 };
549 
550 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
551 				      bool indexed, u32 se_num,
552 				      u32 sh_num, u32 reg_offset)
553 {
554 	if (indexed) {
555 		uint32_t val;
556 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
557 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
558 
559 		switch (reg_offset) {
560 		case mmCC_RB_BACKEND_DISABLE:
561 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
562 		case mmGC_USER_RB_BACKEND_DISABLE:
563 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
564 		case mmPA_SC_RASTER_CONFIG:
565 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
566 		case mmPA_SC_RASTER_CONFIG_1:
567 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
568 		}
569 
570 		mutex_lock(&adev->grbm_idx_mutex);
571 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
572 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
573 
574 		val = RREG32(reg_offset);
575 
576 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
577 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
578 		mutex_unlock(&adev->grbm_idx_mutex);
579 		return val;
580 	} else {
581 		unsigned idx;
582 
583 		switch (reg_offset) {
584 		case mmGB_ADDR_CONFIG:
585 			return adev->gfx.config.gb_addr_config;
586 		case mmMC_ARB_RAMCFG:
587 			return adev->gfx.config.mc_arb_ramcfg;
588 		case mmGB_TILE_MODE0:
589 		case mmGB_TILE_MODE1:
590 		case mmGB_TILE_MODE2:
591 		case mmGB_TILE_MODE3:
592 		case mmGB_TILE_MODE4:
593 		case mmGB_TILE_MODE5:
594 		case mmGB_TILE_MODE6:
595 		case mmGB_TILE_MODE7:
596 		case mmGB_TILE_MODE8:
597 		case mmGB_TILE_MODE9:
598 		case mmGB_TILE_MODE10:
599 		case mmGB_TILE_MODE11:
600 		case mmGB_TILE_MODE12:
601 		case mmGB_TILE_MODE13:
602 		case mmGB_TILE_MODE14:
603 		case mmGB_TILE_MODE15:
604 		case mmGB_TILE_MODE16:
605 		case mmGB_TILE_MODE17:
606 		case mmGB_TILE_MODE18:
607 		case mmGB_TILE_MODE19:
608 		case mmGB_TILE_MODE20:
609 		case mmGB_TILE_MODE21:
610 		case mmGB_TILE_MODE22:
611 		case mmGB_TILE_MODE23:
612 		case mmGB_TILE_MODE24:
613 		case mmGB_TILE_MODE25:
614 		case mmGB_TILE_MODE26:
615 		case mmGB_TILE_MODE27:
616 		case mmGB_TILE_MODE28:
617 		case mmGB_TILE_MODE29:
618 		case mmGB_TILE_MODE30:
619 		case mmGB_TILE_MODE31:
620 			idx = (reg_offset - mmGB_TILE_MODE0);
621 			return adev->gfx.config.tile_mode_array[idx];
622 		case mmGB_MACROTILE_MODE0:
623 		case mmGB_MACROTILE_MODE1:
624 		case mmGB_MACROTILE_MODE2:
625 		case mmGB_MACROTILE_MODE3:
626 		case mmGB_MACROTILE_MODE4:
627 		case mmGB_MACROTILE_MODE5:
628 		case mmGB_MACROTILE_MODE6:
629 		case mmGB_MACROTILE_MODE7:
630 		case mmGB_MACROTILE_MODE8:
631 		case mmGB_MACROTILE_MODE9:
632 		case mmGB_MACROTILE_MODE10:
633 		case mmGB_MACROTILE_MODE11:
634 		case mmGB_MACROTILE_MODE12:
635 		case mmGB_MACROTILE_MODE13:
636 		case mmGB_MACROTILE_MODE14:
637 		case mmGB_MACROTILE_MODE15:
638 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
639 			return adev->gfx.config.macrotile_mode_array[idx];
640 		default:
641 			return RREG32(reg_offset);
642 		}
643 	}
644 }
645 
646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
647 			    u32 sh_num, u32 reg_offset, u32 *value)
648 {
649 	uint32_t i;
650 
651 	*value = 0;
652 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
653 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
654 
655 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
656 			continue;
657 
658 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
659 					       reg_offset);
660 		return 0;
661 	}
662 	return -EINVAL;
663 }
664 
665 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
666 {
667 	u32 i;
668 
669 	dev_info(adev->dev, "GPU pci config reset\n");
670 
671 	/* disable BM */
672 	pci_clear_master(adev->pdev);
673 	/* reset */
674 	amdgpu_device_pci_config_reset(adev);
675 
676 	udelay(100);
677 
678 	/* wait for asic to come out of reset */
679 	for (i = 0; i < adev->usec_timeout; i++) {
680 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
681 			/* enable BM */
682 			pci_set_master(adev->pdev);
683 			adev->has_hw_reset = true;
684 			return 0;
685 		}
686 		udelay(1);
687 	}
688 	return -EINVAL;
689 }
690 
691 /**
692  * vi_asic_reset - soft reset GPU
693  *
694  * @adev: amdgpu_device pointer
695  *
696  * Look up which blocks are hung and attempt
697  * to reset them.
698  * Returns 0 for success.
699  */
700 static int vi_asic_reset(struct amdgpu_device *adev)
701 {
702 	int r;
703 
704 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
705 
706 	r = vi_gpu_pci_config_reset(adev);
707 
708 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
709 
710 	return r;
711 }
712 
713 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
714 {
715 	return RREG32(mmCONFIG_MEMSIZE);
716 }
717 
718 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
719 			u32 cntl_reg, u32 status_reg)
720 {
721 	int r, i;
722 	struct atom_clock_dividers dividers;
723 	uint32_t tmp;
724 
725 	r = amdgpu_atombios_get_clock_dividers(adev,
726 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
727 					       clock, false, &dividers);
728 	if (r)
729 		return r;
730 
731 	tmp = RREG32_SMC(cntl_reg);
732 
733 	if (adev->flags & AMD_IS_APU)
734 		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
735 	else
736 		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
737 				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
738 	tmp |= dividers.post_divider;
739 	WREG32_SMC(cntl_reg, tmp);
740 
741 	for (i = 0; i < 100; i++) {
742 		tmp = RREG32_SMC(status_reg);
743 		if (adev->flags & AMD_IS_APU) {
744 			if (tmp & 0x10000)
745 				break;
746 		} else {
747 			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
748 				break;
749 		}
750 		mdelay(10);
751 	}
752 	if (i == 100)
753 		return -ETIMEDOUT;
754 	return 0;
755 }
756 
757 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
758 #define ixGNB_CLK1_STATUS   0xD822010C
759 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
760 #define ixGNB_CLK2_STATUS   0xD822012C
761 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
762 #define ixGNB_CLK3_STATUS   0xD822014C
763 
764 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
765 {
766 	int r;
767 
768 	if (adev->flags & AMD_IS_APU) {
769 		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
770 		if (r)
771 			return r;
772 
773 		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
774 		if (r)
775 			return r;
776 	} else {
777 		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
778 		if (r)
779 			return r;
780 
781 		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
782 		if (r)
783 			return r;
784 	}
785 
786 	return 0;
787 }
788 
789 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
790 {
791 	int r, i;
792 	struct atom_clock_dividers dividers;
793 	u32 tmp;
794 	u32 reg_ctrl;
795 	u32 reg_status;
796 	u32 status_mask;
797 	u32 reg_mask;
798 
799 	if (adev->flags & AMD_IS_APU) {
800 		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
801 		reg_status = ixGNB_CLK3_STATUS;
802 		status_mask = 0x00010000;
803 		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
804 	} else {
805 		reg_ctrl = ixCG_ECLK_CNTL;
806 		reg_status = ixCG_ECLK_STATUS;
807 		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
808 		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
809 	}
810 
811 	r = amdgpu_atombios_get_clock_dividers(adev,
812 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
813 					       ecclk, false, &dividers);
814 	if (r)
815 		return r;
816 
817 	for (i = 0; i < 100; i++) {
818 		if (RREG32_SMC(reg_status) & status_mask)
819 			break;
820 		mdelay(10);
821 	}
822 
823 	if (i == 100)
824 		return -ETIMEDOUT;
825 
826 	tmp = RREG32_SMC(reg_ctrl);
827 	tmp &= ~reg_mask;
828 	tmp |= dividers.post_divider;
829 	WREG32_SMC(reg_ctrl, tmp);
830 
831 	for (i = 0; i < 100; i++) {
832 		if (RREG32_SMC(reg_status) & status_mask)
833 			break;
834 		mdelay(10);
835 	}
836 
837 	if (i == 100)
838 		return -ETIMEDOUT;
839 
840 	return 0;
841 }
842 
843 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
844 {
845 	if (pci_is_root_bus(adev->pdev->bus))
846 		return;
847 
848 	if (amdgpu_pcie_gen2 == 0)
849 		return;
850 
851 	if (adev->flags & AMD_IS_APU)
852 		return;
853 
854 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
855 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
856 		return;
857 
858 	/* todo */
859 }
860 
861 static void vi_program_aspm(struct amdgpu_device *adev)
862 {
863 
864 	if (amdgpu_aspm == 0)
865 		return;
866 
867 	/* todo */
868 }
869 
870 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
871 					bool enable)
872 {
873 	u32 tmp;
874 
875 	/* not necessary on CZ */
876 	if (adev->flags & AMD_IS_APU)
877 		return;
878 
879 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
880 	if (enable)
881 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
882 	else
883 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
884 
885 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
886 }
887 
888 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
889 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
890 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
891 
892 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
893 {
894 	if (adev->flags & AMD_IS_APU)
895 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
896 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
897 	else
898 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
899 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
900 }
901 
902 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
903 {
904 	if (!ring || !ring->funcs->emit_wreg) {
905 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
906 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
907 	} else {
908 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
909 	}
910 }
911 
912 static void vi_invalidate_hdp(struct amdgpu_device *adev,
913 			      struct amdgpu_ring *ring)
914 {
915 	if (!ring || !ring->funcs->emit_wreg) {
916 		WREG32(mmHDP_DEBUG0, 1);
917 		RREG32(mmHDP_DEBUG0);
918 	} else {
919 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
920 	}
921 }
922 
923 static bool vi_need_full_reset(struct amdgpu_device *adev)
924 {
925 	switch (adev->asic_type) {
926 	case CHIP_CARRIZO:
927 	case CHIP_STONEY:
928 		/* CZ has hang issues with full reset at the moment */
929 		return false;
930 	case CHIP_FIJI:
931 	case CHIP_TONGA:
932 		/* XXX: soft reset should work on fiji and tonga */
933 		return true;
934 	case CHIP_POLARIS10:
935 	case CHIP_POLARIS11:
936 	case CHIP_POLARIS12:
937 	case CHIP_TOPAZ:
938 	default:
939 		/* change this when we support soft reset */
940 		return true;
941 	}
942 }
943 
944 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
945 			      uint64_t *count1)
946 {
947 	uint32_t perfctr = 0;
948 	uint64_t cnt0_of, cnt1_of;
949 	int tmp;
950 
951 	/* This reports 0 on APUs, so return to avoid writing/reading registers
952 	 * that may or may not be different from their GPU counterparts
953 	 */
954 	if (adev->flags & AMD_IS_APU)
955 		return;
956 
957 	/* Set the 2 events that we wish to watch, defined above */
958 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
959 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
960 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
961 
962 	/* Write to enable desired perf counters */
963 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
964 	/* Zero out and enable the perf counters
965 	 * Write 0x5:
966 	 * Bit 0 = Start all counters(1)
967 	 * Bit 2 = Global counter reset enable(1)
968 	 */
969 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
970 
971 	msleep(1000);
972 
973 	/* Load the shadow and disable the perf counters
974 	 * Write 0x2:
975 	 * Bit 0 = Stop counters(0)
976 	 * Bit 1 = Load the shadow counters(1)
977 	 */
978 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
979 
980 	/* Read register values to get any >32bit overflow */
981 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
982 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
983 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
984 
985 	/* Get the values and add the overflow */
986 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
987 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
988 }
989 
990 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
991 {
992 	u32 clock_cntl, pc;
993 
994 	if (adev->flags & AMD_IS_APU)
995 		return false;
996 
997 	/* check if the SMC is already running */
998 	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
999 	pc = RREG32_SMC(ixSMC_PC_C);
1000 	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1001 	    (0x20100 <= pc))
1002 		return true;
1003 
1004 	return false;
1005 }
1006 
1007 static const struct amdgpu_asic_funcs vi_asic_funcs =
1008 {
1009 	.read_disabled_bios = &vi_read_disabled_bios,
1010 	.read_bios_from_rom = &vi_read_bios_from_rom,
1011 	.read_register = &vi_read_register,
1012 	.reset = &vi_asic_reset,
1013 	.set_vga_state = &vi_vga_set_state,
1014 	.get_xclk = &vi_get_xclk,
1015 	.set_uvd_clocks = &vi_set_uvd_clocks,
1016 	.set_vce_clocks = &vi_set_vce_clocks,
1017 	.get_config_memsize = &vi_get_config_memsize,
1018 	.flush_hdp = &vi_flush_hdp,
1019 	.invalidate_hdp = &vi_invalidate_hdp,
1020 	.need_full_reset = &vi_need_full_reset,
1021 	.init_doorbell_index = &legacy_doorbell_index_init,
1022 	.get_pcie_usage = &vi_get_pcie_usage,
1023 	.need_reset_on_init = &vi_need_reset_on_init,
1024 };
1025 
1026 #define CZ_REV_BRISTOL(rev)	 \
1027 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1028 
1029 static int vi_common_early_init(void *handle)
1030 {
1031 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 
1033 	if (adev->flags & AMD_IS_APU) {
1034 		adev->smc_rreg = &cz_smc_rreg;
1035 		adev->smc_wreg = &cz_smc_wreg;
1036 	} else {
1037 		adev->smc_rreg = &vi_smc_rreg;
1038 		adev->smc_wreg = &vi_smc_wreg;
1039 	}
1040 	adev->pcie_rreg = &vi_pcie_rreg;
1041 	adev->pcie_wreg = &vi_pcie_wreg;
1042 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1043 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1044 	adev->didt_rreg = &vi_didt_rreg;
1045 	adev->didt_wreg = &vi_didt_wreg;
1046 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
1047 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
1048 
1049 	adev->asic_funcs = &vi_asic_funcs;
1050 
1051 	adev->rev_id = vi_get_rev_id(adev);
1052 	adev->external_rev_id = 0xFF;
1053 	switch (adev->asic_type) {
1054 	case CHIP_TOPAZ:
1055 		adev->cg_flags = 0;
1056 		adev->pg_flags = 0;
1057 		adev->external_rev_id = 0x1;
1058 		break;
1059 	case CHIP_FIJI:
1060 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1061 			AMD_CG_SUPPORT_GFX_MGLS |
1062 			AMD_CG_SUPPORT_GFX_RLC_LS |
1063 			AMD_CG_SUPPORT_GFX_CP_LS |
1064 			AMD_CG_SUPPORT_GFX_CGTS |
1065 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1066 			AMD_CG_SUPPORT_GFX_CGCG |
1067 			AMD_CG_SUPPORT_GFX_CGLS |
1068 			AMD_CG_SUPPORT_SDMA_MGCG |
1069 			AMD_CG_SUPPORT_SDMA_LS |
1070 			AMD_CG_SUPPORT_BIF_LS |
1071 			AMD_CG_SUPPORT_HDP_MGCG |
1072 			AMD_CG_SUPPORT_HDP_LS |
1073 			AMD_CG_SUPPORT_ROM_MGCG |
1074 			AMD_CG_SUPPORT_MC_MGCG |
1075 			AMD_CG_SUPPORT_MC_LS |
1076 			AMD_CG_SUPPORT_UVD_MGCG;
1077 		adev->pg_flags = 0;
1078 		adev->external_rev_id = adev->rev_id + 0x3c;
1079 		break;
1080 	case CHIP_TONGA:
1081 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1082 			AMD_CG_SUPPORT_GFX_CGCG |
1083 			AMD_CG_SUPPORT_GFX_CGLS |
1084 			AMD_CG_SUPPORT_SDMA_MGCG |
1085 			AMD_CG_SUPPORT_SDMA_LS |
1086 			AMD_CG_SUPPORT_BIF_LS |
1087 			AMD_CG_SUPPORT_HDP_MGCG |
1088 			AMD_CG_SUPPORT_HDP_LS |
1089 			AMD_CG_SUPPORT_ROM_MGCG |
1090 			AMD_CG_SUPPORT_MC_MGCG |
1091 			AMD_CG_SUPPORT_MC_LS |
1092 			AMD_CG_SUPPORT_DRM_LS |
1093 			AMD_CG_SUPPORT_UVD_MGCG;
1094 		adev->pg_flags = 0;
1095 		adev->external_rev_id = adev->rev_id + 0x14;
1096 		break;
1097 	case CHIP_POLARIS11:
1098 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1099 			AMD_CG_SUPPORT_GFX_RLC_LS |
1100 			AMD_CG_SUPPORT_GFX_CP_LS |
1101 			AMD_CG_SUPPORT_GFX_CGCG |
1102 			AMD_CG_SUPPORT_GFX_CGLS |
1103 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1104 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1105 			AMD_CG_SUPPORT_SDMA_MGCG |
1106 			AMD_CG_SUPPORT_SDMA_LS |
1107 			AMD_CG_SUPPORT_BIF_MGCG |
1108 			AMD_CG_SUPPORT_BIF_LS |
1109 			AMD_CG_SUPPORT_HDP_MGCG |
1110 			AMD_CG_SUPPORT_HDP_LS |
1111 			AMD_CG_SUPPORT_ROM_MGCG |
1112 			AMD_CG_SUPPORT_MC_MGCG |
1113 			AMD_CG_SUPPORT_MC_LS |
1114 			AMD_CG_SUPPORT_DRM_LS |
1115 			AMD_CG_SUPPORT_UVD_MGCG |
1116 			AMD_CG_SUPPORT_VCE_MGCG;
1117 		adev->pg_flags = 0;
1118 		adev->external_rev_id = adev->rev_id + 0x5A;
1119 		break;
1120 	case CHIP_POLARIS10:
1121 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1122 			AMD_CG_SUPPORT_GFX_RLC_LS |
1123 			AMD_CG_SUPPORT_GFX_CP_LS |
1124 			AMD_CG_SUPPORT_GFX_CGCG |
1125 			AMD_CG_SUPPORT_GFX_CGLS |
1126 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1127 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1128 			AMD_CG_SUPPORT_SDMA_MGCG |
1129 			AMD_CG_SUPPORT_SDMA_LS |
1130 			AMD_CG_SUPPORT_BIF_MGCG |
1131 			AMD_CG_SUPPORT_BIF_LS |
1132 			AMD_CG_SUPPORT_HDP_MGCG |
1133 			AMD_CG_SUPPORT_HDP_LS |
1134 			AMD_CG_SUPPORT_ROM_MGCG |
1135 			AMD_CG_SUPPORT_MC_MGCG |
1136 			AMD_CG_SUPPORT_MC_LS |
1137 			AMD_CG_SUPPORT_DRM_LS |
1138 			AMD_CG_SUPPORT_UVD_MGCG |
1139 			AMD_CG_SUPPORT_VCE_MGCG;
1140 		adev->pg_flags = 0;
1141 		adev->external_rev_id = adev->rev_id + 0x50;
1142 		break;
1143 	case CHIP_POLARIS12:
1144 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1145 			AMD_CG_SUPPORT_GFX_RLC_LS |
1146 			AMD_CG_SUPPORT_GFX_CP_LS |
1147 			AMD_CG_SUPPORT_GFX_CGCG |
1148 			AMD_CG_SUPPORT_GFX_CGLS |
1149 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1150 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1151 			AMD_CG_SUPPORT_SDMA_MGCG |
1152 			AMD_CG_SUPPORT_SDMA_LS |
1153 			AMD_CG_SUPPORT_BIF_MGCG |
1154 			AMD_CG_SUPPORT_BIF_LS |
1155 			AMD_CG_SUPPORT_HDP_MGCG |
1156 			AMD_CG_SUPPORT_HDP_LS |
1157 			AMD_CG_SUPPORT_ROM_MGCG |
1158 			AMD_CG_SUPPORT_MC_MGCG |
1159 			AMD_CG_SUPPORT_MC_LS |
1160 			AMD_CG_SUPPORT_DRM_LS |
1161 			AMD_CG_SUPPORT_UVD_MGCG |
1162 			AMD_CG_SUPPORT_VCE_MGCG;
1163 		adev->pg_flags = 0;
1164 		adev->external_rev_id = adev->rev_id + 0x64;
1165 		break;
1166 	case CHIP_VEGAM:
1167 		adev->cg_flags = 0;
1168 			/*AMD_CG_SUPPORT_GFX_MGCG |
1169 			AMD_CG_SUPPORT_GFX_RLC_LS |
1170 			AMD_CG_SUPPORT_GFX_CP_LS |
1171 			AMD_CG_SUPPORT_GFX_CGCG |
1172 			AMD_CG_SUPPORT_GFX_CGLS |
1173 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1174 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1175 			AMD_CG_SUPPORT_SDMA_MGCG |
1176 			AMD_CG_SUPPORT_SDMA_LS |
1177 			AMD_CG_SUPPORT_BIF_MGCG |
1178 			AMD_CG_SUPPORT_BIF_LS |
1179 			AMD_CG_SUPPORT_HDP_MGCG |
1180 			AMD_CG_SUPPORT_HDP_LS |
1181 			AMD_CG_SUPPORT_ROM_MGCG |
1182 			AMD_CG_SUPPORT_MC_MGCG |
1183 			AMD_CG_SUPPORT_MC_LS |
1184 			AMD_CG_SUPPORT_DRM_LS |
1185 			AMD_CG_SUPPORT_UVD_MGCG |
1186 			AMD_CG_SUPPORT_VCE_MGCG;*/
1187 		adev->pg_flags = 0;
1188 		adev->external_rev_id = adev->rev_id + 0x6E;
1189 		break;
1190 	case CHIP_CARRIZO:
1191 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1192 			AMD_CG_SUPPORT_GFX_MGCG |
1193 			AMD_CG_SUPPORT_GFX_MGLS |
1194 			AMD_CG_SUPPORT_GFX_RLC_LS |
1195 			AMD_CG_SUPPORT_GFX_CP_LS |
1196 			AMD_CG_SUPPORT_GFX_CGTS |
1197 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1198 			AMD_CG_SUPPORT_GFX_CGCG |
1199 			AMD_CG_SUPPORT_GFX_CGLS |
1200 			AMD_CG_SUPPORT_BIF_LS |
1201 			AMD_CG_SUPPORT_HDP_MGCG |
1202 			AMD_CG_SUPPORT_HDP_LS |
1203 			AMD_CG_SUPPORT_SDMA_MGCG |
1204 			AMD_CG_SUPPORT_SDMA_LS |
1205 			AMD_CG_SUPPORT_VCE_MGCG;
1206 		/* rev0 hardware requires workarounds to support PG */
1207 		adev->pg_flags = 0;
1208 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1209 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1210 				AMD_PG_SUPPORT_GFX_PIPELINE |
1211 				AMD_PG_SUPPORT_CP |
1212 				AMD_PG_SUPPORT_UVD |
1213 				AMD_PG_SUPPORT_VCE;
1214 		}
1215 		adev->external_rev_id = adev->rev_id + 0x1;
1216 		break;
1217 	case CHIP_STONEY:
1218 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1219 			AMD_CG_SUPPORT_GFX_MGCG |
1220 			AMD_CG_SUPPORT_GFX_MGLS |
1221 			AMD_CG_SUPPORT_GFX_RLC_LS |
1222 			AMD_CG_SUPPORT_GFX_CP_LS |
1223 			AMD_CG_SUPPORT_GFX_CGTS |
1224 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1225 			AMD_CG_SUPPORT_GFX_CGLS |
1226 			AMD_CG_SUPPORT_BIF_LS |
1227 			AMD_CG_SUPPORT_HDP_MGCG |
1228 			AMD_CG_SUPPORT_HDP_LS |
1229 			AMD_CG_SUPPORT_SDMA_MGCG |
1230 			AMD_CG_SUPPORT_SDMA_LS |
1231 			AMD_CG_SUPPORT_VCE_MGCG;
1232 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1233 			AMD_PG_SUPPORT_GFX_SMG |
1234 			AMD_PG_SUPPORT_GFX_PIPELINE |
1235 			AMD_PG_SUPPORT_CP |
1236 			AMD_PG_SUPPORT_UVD |
1237 			AMD_PG_SUPPORT_VCE;
1238 		adev->external_rev_id = adev->rev_id + 0x61;
1239 		break;
1240 	default:
1241 		/* FIXME: not supported yet */
1242 		return -EINVAL;
1243 	}
1244 
1245 	if (amdgpu_sriov_vf(adev)) {
1246 		amdgpu_virt_init_setting(adev);
1247 		xgpu_vi_mailbox_set_irq_funcs(adev);
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static int vi_common_late_init(void *handle)
1254 {
1255 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1256 
1257 	if (amdgpu_sriov_vf(adev))
1258 		xgpu_vi_mailbox_get_irq(adev);
1259 
1260 	return 0;
1261 }
1262 
1263 static int vi_common_sw_init(void *handle)
1264 {
1265 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1266 
1267 	if (amdgpu_sriov_vf(adev))
1268 		xgpu_vi_mailbox_add_irq_id(adev);
1269 
1270 	return 0;
1271 }
1272 
1273 static int vi_common_sw_fini(void *handle)
1274 {
1275 	return 0;
1276 }
1277 
1278 static int vi_common_hw_init(void *handle)
1279 {
1280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281 
1282 	/* move the golden regs per IP block */
1283 	vi_init_golden_registers(adev);
1284 	/* enable pcie gen2/3 link */
1285 	vi_pcie_gen3_enable(adev);
1286 	/* enable aspm */
1287 	vi_program_aspm(adev);
1288 	/* enable the doorbell aperture */
1289 	vi_enable_doorbell_aperture(adev, true);
1290 
1291 	return 0;
1292 }
1293 
1294 static int vi_common_hw_fini(void *handle)
1295 {
1296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 
1298 	/* enable the doorbell aperture */
1299 	vi_enable_doorbell_aperture(adev, false);
1300 
1301 	if (amdgpu_sriov_vf(adev))
1302 		xgpu_vi_mailbox_put_irq(adev);
1303 
1304 	return 0;
1305 }
1306 
1307 static int vi_common_suspend(void *handle)
1308 {
1309 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1310 
1311 	return vi_common_hw_fini(adev);
1312 }
1313 
1314 static int vi_common_resume(void *handle)
1315 {
1316 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317 
1318 	return vi_common_hw_init(adev);
1319 }
1320 
1321 static bool vi_common_is_idle(void *handle)
1322 {
1323 	return true;
1324 }
1325 
1326 static int vi_common_wait_for_idle(void *handle)
1327 {
1328 	return 0;
1329 }
1330 
1331 static int vi_common_soft_reset(void *handle)
1332 {
1333 	return 0;
1334 }
1335 
1336 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1337 						   bool enable)
1338 {
1339 	uint32_t temp, data;
1340 
1341 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1342 
1343 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1344 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1345 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1346 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1347 	else
1348 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1349 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1350 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1351 
1352 	if (temp != data)
1353 		WREG32_PCIE(ixPCIE_CNTL2, data);
1354 }
1355 
1356 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1357 						    bool enable)
1358 {
1359 	uint32_t temp, data;
1360 
1361 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1362 
1363 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1364 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1365 	else
1366 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1367 
1368 	if (temp != data)
1369 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1370 }
1371 
1372 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1373 				      bool enable)
1374 {
1375 	uint32_t temp, data;
1376 
1377 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1378 
1379 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1380 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1381 	else
1382 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1383 
1384 	if (temp != data)
1385 		WREG32(mmHDP_MEM_POWER_LS, data);
1386 }
1387 
1388 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1389 				      bool enable)
1390 {
1391 	uint32_t temp, data;
1392 
1393 	temp = data = RREG32(0x157a);
1394 
1395 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1396 		data |= 1;
1397 	else
1398 		data &= ~1;
1399 
1400 	if (temp != data)
1401 		WREG32(0x157a, data);
1402 }
1403 
1404 
1405 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1406 						    bool enable)
1407 {
1408 	uint32_t temp, data;
1409 
1410 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1411 
1412 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1413 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1414 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1415 	else
1416 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1417 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1418 
1419 	if (temp != data)
1420 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1421 }
1422 
1423 static int vi_common_set_clockgating_state_by_smu(void *handle,
1424 					   enum amd_clockgating_state state)
1425 {
1426 	uint32_t msg_id, pp_state = 0;
1427 	uint32_t pp_support_state = 0;
1428 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1429 
1430 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1431 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1432 			pp_support_state = PP_STATE_SUPPORT_LS;
1433 			pp_state = PP_STATE_LS;
1434 		}
1435 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1436 			pp_support_state |= PP_STATE_SUPPORT_CG;
1437 			pp_state |= PP_STATE_CG;
1438 		}
1439 		if (state == AMD_CG_STATE_UNGATE)
1440 			pp_state = 0;
1441 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1442 			       PP_BLOCK_SYS_MC,
1443 			       pp_support_state,
1444 			       pp_state);
1445 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1446 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1447 	}
1448 
1449 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1450 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1451 			pp_support_state = PP_STATE_SUPPORT_LS;
1452 			pp_state = PP_STATE_LS;
1453 		}
1454 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1455 			pp_support_state |= PP_STATE_SUPPORT_CG;
1456 			pp_state |= PP_STATE_CG;
1457 		}
1458 		if (state == AMD_CG_STATE_UNGATE)
1459 			pp_state = 0;
1460 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1461 			       PP_BLOCK_SYS_SDMA,
1462 			       pp_support_state,
1463 			       pp_state);
1464 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1465 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1466 	}
1467 
1468 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1469 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1470 			pp_support_state = PP_STATE_SUPPORT_LS;
1471 			pp_state = PP_STATE_LS;
1472 		}
1473 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1474 			pp_support_state |= PP_STATE_SUPPORT_CG;
1475 			pp_state |= PP_STATE_CG;
1476 		}
1477 		if (state == AMD_CG_STATE_UNGATE)
1478 			pp_state = 0;
1479 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1480 			       PP_BLOCK_SYS_HDP,
1481 			       pp_support_state,
1482 			       pp_state);
1483 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1484 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1485 	}
1486 
1487 
1488 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1489 		if (state == AMD_CG_STATE_UNGATE)
1490 			pp_state = 0;
1491 		else
1492 			pp_state = PP_STATE_LS;
1493 
1494 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1495 			       PP_BLOCK_SYS_BIF,
1496 			       PP_STATE_SUPPORT_LS,
1497 			        pp_state);
1498 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1499 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1500 	}
1501 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1502 		if (state == AMD_CG_STATE_UNGATE)
1503 			pp_state = 0;
1504 		else
1505 			pp_state = PP_STATE_CG;
1506 
1507 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1508 			       PP_BLOCK_SYS_BIF,
1509 			       PP_STATE_SUPPORT_CG,
1510 			       pp_state);
1511 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1512 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1513 	}
1514 
1515 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1516 
1517 		if (state == AMD_CG_STATE_UNGATE)
1518 			pp_state = 0;
1519 		else
1520 			pp_state = PP_STATE_LS;
1521 
1522 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1523 			       PP_BLOCK_SYS_DRM,
1524 			       PP_STATE_SUPPORT_LS,
1525 			       pp_state);
1526 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1527 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1528 	}
1529 
1530 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1531 
1532 		if (state == AMD_CG_STATE_UNGATE)
1533 			pp_state = 0;
1534 		else
1535 			pp_state = PP_STATE_CG;
1536 
1537 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1538 			       PP_BLOCK_SYS_ROM,
1539 			       PP_STATE_SUPPORT_CG,
1540 			       pp_state);
1541 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1542 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1543 	}
1544 	return 0;
1545 }
1546 
1547 static int vi_common_set_clockgating_state(void *handle,
1548 					   enum amd_clockgating_state state)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	if (amdgpu_sriov_vf(adev))
1553 		return 0;
1554 
1555 	switch (adev->asic_type) {
1556 	case CHIP_FIJI:
1557 		vi_update_bif_medium_grain_light_sleep(adev,
1558 				state == AMD_CG_STATE_GATE);
1559 		vi_update_hdp_medium_grain_clock_gating(adev,
1560 				state == AMD_CG_STATE_GATE);
1561 		vi_update_hdp_light_sleep(adev,
1562 				state == AMD_CG_STATE_GATE);
1563 		vi_update_rom_medium_grain_clock_gating(adev,
1564 				state == AMD_CG_STATE_GATE);
1565 		break;
1566 	case CHIP_CARRIZO:
1567 	case CHIP_STONEY:
1568 		vi_update_bif_medium_grain_light_sleep(adev,
1569 				state == AMD_CG_STATE_GATE);
1570 		vi_update_hdp_medium_grain_clock_gating(adev,
1571 				state == AMD_CG_STATE_GATE);
1572 		vi_update_hdp_light_sleep(adev,
1573 				state == AMD_CG_STATE_GATE);
1574 		vi_update_drm_light_sleep(adev,
1575 				state == AMD_CG_STATE_GATE);
1576 		break;
1577 	case CHIP_TONGA:
1578 	case CHIP_POLARIS10:
1579 	case CHIP_POLARIS11:
1580 	case CHIP_POLARIS12:
1581 	case CHIP_VEGAM:
1582 		vi_common_set_clockgating_state_by_smu(adev, state);
1583 	default:
1584 		break;
1585 	}
1586 	return 0;
1587 }
1588 
1589 static int vi_common_set_powergating_state(void *handle,
1590 					    enum amd_powergating_state state)
1591 {
1592 	return 0;
1593 }
1594 
1595 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1596 {
1597 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1598 	int data;
1599 
1600 	if (amdgpu_sriov_vf(adev))
1601 		*flags = 0;
1602 
1603 	/* AMD_CG_SUPPORT_BIF_LS */
1604 	data = RREG32_PCIE(ixPCIE_CNTL2);
1605 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1606 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1607 
1608 	/* AMD_CG_SUPPORT_HDP_LS */
1609 	data = RREG32(mmHDP_MEM_POWER_LS);
1610 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1611 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1612 
1613 	/* AMD_CG_SUPPORT_HDP_MGCG */
1614 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1615 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1616 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1617 
1618 	/* AMD_CG_SUPPORT_ROM_MGCG */
1619 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1620 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1621 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1622 }
1623 
1624 static const struct amd_ip_funcs vi_common_ip_funcs = {
1625 	.name = "vi_common",
1626 	.early_init = vi_common_early_init,
1627 	.late_init = vi_common_late_init,
1628 	.sw_init = vi_common_sw_init,
1629 	.sw_fini = vi_common_sw_fini,
1630 	.hw_init = vi_common_hw_init,
1631 	.hw_fini = vi_common_hw_fini,
1632 	.suspend = vi_common_suspend,
1633 	.resume = vi_common_resume,
1634 	.is_idle = vi_common_is_idle,
1635 	.wait_for_idle = vi_common_wait_for_idle,
1636 	.soft_reset = vi_common_soft_reset,
1637 	.set_clockgating_state = vi_common_set_clockgating_state,
1638 	.set_powergating_state = vi_common_set_powergating_state,
1639 	.get_clockgating_state = vi_common_get_clockgating_state,
1640 };
1641 
1642 static const struct amdgpu_ip_block_version vi_common_ip_block =
1643 {
1644 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1645 	.major = 1,
1646 	.minor = 0,
1647 	.rev = 0,
1648 	.funcs = &vi_common_ip_funcs,
1649 };
1650 
1651 int vi_set_ip_blocks(struct amdgpu_device *adev)
1652 {
1653 	/* in early init stage, vbios code won't work */
1654 	vi_detect_hw_virtualization(adev);
1655 
1656 	if (amdgpu_sriov_vf(adev))
1657 		adev->virt.ops = &xgpu_vi_virt_ops;
1658 
1659 	switch (adev->asic_type) {
1660 	case CHIP_TOPAZ:
1661 		/* topaz has no DCE, UVD, VCE */
1662 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1663 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1664 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1665 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1666 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1667 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1668 		if (adev->enable_virtual_display)
1669 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1670 		break;
1671 	case CHIP_FIJI:
1672 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1673 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1674 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1675 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1676 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1677 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1678 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1679 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1680 #if defined(CONFIG_DRM_AMD_DC)
1681 		else if (amdgpu_device_has_dc_support(adev))
1682 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1683 #endif
1684 		else
1685 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1686 		if (!amdgpu_sriov_vf(adev)) {
1687 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1688 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1689 		}
1690 		break;
1691 	case CHIP_TONGA:
1692 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1693 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1694 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1695 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1696 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1697 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1698 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1699 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1700 #if defined(CONFIG_DRM_AMD_DC)
1701 		else if (amdgpu_device_has_dc_support(adev))
1702 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1703 #endif
1704 		else
1705 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1706 		if (!amdgpu_sriov_vf(adev)) {
1707 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1708 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1709 		}
1710 		break;
1711 	case CHIP_POLARIS10:
1712 	case CHIP_POLARIS11:
1713 	case CHIP_POLARIS12:
1714 	case CHIP_VEGAM:
1715 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1716 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1717 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1718 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1719 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1720 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1721 		if (adev->enable_virtual_display)
1722 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1723 #if defined(CONFIG_DRM_AMD_DC)
1724 		else if (amdgpu_device_has_dc_support(adev))
1725 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1726 #endif
1727 		else
1728 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1729 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1730 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1731 		break;
1732 	case CHIP_CARRIZO:
1733 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1734 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1735 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1736 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1737 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1738 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1739 		if (adev->enable_virtual_display)
1740 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1741 #if defined(CONFIG_DRM_AMD_DC)
1742 		else if (amdgpu_device_has_dc_support(adev))
1743 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1744 #endif
1745 		else
1746 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1747 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1748 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1749 #if defined(CONFIG_DRM_AMD_ACP)
1750 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1751 #endif
1752 		break;
1753 	case CHIP_STONEY:
1754 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1755 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1756 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1757 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1758 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1759 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1760 		if (adev->enable_virtual_display)
1761 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1762 #if defined(CONFIG_DRM_AMD_DC)
1763 		else if (amdgpu_device_has_dc_support(adev))
1764 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1765 #endif
1766 		else
1767 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1768 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1769 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1770 #if defined(CONFIG_DRM_AMD_ACP)
1771 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1772 #endif
1773 		break;
1774 	default:
1775 		/* FIXME: not supported yet */
1776 		return -EINVAL;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
1782 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1783 {
1784 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1785 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1786 	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1787 	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1788 	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1789 	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1790 	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1791 	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1792 	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1793 	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1794 	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1795 	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1796 	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1797 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1798 }
1799