xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 160b8e75)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/slab.h>
24 #include <drm/drmP.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
31 #include "atom.h"
32 #include "amd_pcie.h"
33 
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
36 
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
39 
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
42 
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
45 
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
48 
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
51 
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
54 
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
57 
58 #include "vid.h"
59 #include "vi.h"
60 #include "vi_dpm.h"
61 #include "gmc_v8_0.h"
62 #include "gmc_v7_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74 #if defined(CONFIG_DRM_AMD_ACP)
75 #include "amdgpu_acp.h"
76 #endif
77 #include "dce_virtual.h"
78 #include "mxgpu_vi.h"
79 #include "amdgpu_dm.h"
80 
81 /*
82  * Indirect registers accessor
83  */
84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
85 {
86 	unsigned long flags;
87 	u32 r;
88 
89 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
90 	WREG32(mmPCIE_INDEX, reg);
91 	(void)RREG32(mmPCIE_INDEX);
92 	r = RREG32(mmPCIE_DATA);
93 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
94 	return r;
95 }
96 
97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 	WREG32(mmPCIE_INDEX, reg);
103 	(void)RREG32(mmPCIE_INDEX);
104 	WREG32(mmPCIE_DATA, v);
105 	(void)RREG32(mmPCIE_DATA);
106 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
107 }
108 
109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
110 {
111 	unsigned long flags;
112 	u32 r;
113 
114 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
115 	WREG32(mmSMC_IND_INDEX_11, (reg));
116 	r = RREG32(mmSMC_IND_DATA_11);
117 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
118 	return r;
119 }
120 
121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 {
123 	unsigned long flags;
124 
125 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
126 	WREG32(mmSMC_IND_INDEX_11, (reg));
127 	WREG32(mmSMC_IND_DATA_11, (v));
128 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
129 }
130 
131 /* smu_8_0_d.h */
132 #define mmMP0PUB_IND_INDEX                                                      0x180
133 #define mmMP0PUB_IND_DATA                                                       0x181
134 
135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
136 {
137 	unsigned long flags;
138 	u32 r;
139 
140 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
141 	WREG32(mmMP0PUB_IND_INDEX, (reg));
142 	r = RREG32(mmMP0PUB_IND_DATA);
143 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
144 	return r;
145 }
146 
147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
148 {
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
152 	WREG32(mmMP0PUB_IND_INDEX, (reg));
153 	WREG32(mmMP0PUB_IND_DATA, (v));
154 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
155 }
156 
157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
158 {
159 	unsigned long flags;
160 	u32 r;
161 
162 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
164 	r = RREG32(mmUVD_CTX_DATA);
165 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
166 	return r;
167 }
168 
169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170 {
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
174 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
175 	WREG32(mmUVD_CTX_DATA, (v));
176 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
177 }
178 
179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
180 {
181 	unsigned long flags;
182 	u32 r;
183 
184 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
185 	WREG32(mmDIDT_IND_INDEX, (reg));
186 	r = RREG32(mmDIDT_IND_DATA);
187 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
188 	return r;
189 }
190 
191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
196 	WREG32(mmDIDT_IND_INDEX, (reg));
197 	WREG32(mmDIDT_IND_DATA, (v));
198 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
199 }
200 
201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
202 {
203 	unsigned long flags;
204 	u32 r;
205 
206 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
207 	WREG32(mmGC_CAC_IND_INDEX, (reg));
208 	r = RREG32(mmGC_CAC_IND_DATA);
209 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
210 	return r;
211 }
212 
213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
214 {
215 	unsigned long flags;
216 
217 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
218 	WREG32(mmGC_CAC_IND_INDEX, (reg));
219 	WREG32(mmGC_CAC_IND_DATA, (v));
220 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
221 }
222 
223 
224 static const u32 tonga_mgcg_cgcg_init[] =
225 {
226 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
227 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
228 	mmPCIE_DATA, 0x000f0000, 0x00000000,
229 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
230 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
231 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
233 };
234 
235 static const u32 fiji_mgcg_cgcg_init[] =
236 {
237 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
238 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
239 	mmPCIE_DATA, 0x000f0000, 0x00000000,
240 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
241 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
242 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
243 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
244 };
245 
246 static const u32 iceland_mgcg_cgcg_init[] =
247 {
248 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
249 	mmPCIE_DATA, 0x000f0000, 0x00000000,
250 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
251 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
252 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
253 };
254 
255 static const u32 cz_mgcg_cgcg_init[] =
256 {
257 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
258 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
259 	mmPCIE_DATA, 0x000f0000, 0x00000000,
260 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
261 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
262 };
263 
264 static const u32 stoney_mgcg_cgcg_init[] =
265 {
266 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
267 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
268 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
269 };
270 
271 static void vi_init_golden_registers(struct amdgpu_device *adev)
272 {
273 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
274 	mutex_lock(&adev->grbm_idx_mutex);
275 
276 	if (amdgpu_sriov_vf(adev)) {
277 		xgpu_vi_init_golden_registers(adev);
278 		mutex_unlock(&adev->grbm_idx_mutex);
279 		return;
280 	}
281 
282 	switch (adev->asic_type) {
283 	case CHIP_TOPAZ:
284 		amdgpu_device_program_register_sequence(adev,
285 							iceland_mgcg_cgcg_init,
286 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
287 		break;
288 	case CHIP_FIJI:
289 		amdgpu_device_program_register_sequence(adev,
290 							fiji_mgcg_cgcg_init,
291 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
292 		break;
293 	case CHIP_TONGA:
294 		amdgpu_device_program_register_sequence(adev,
295 							tonga_mgcg_cgcg_init,
296 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
297 		break;
298 	case CHIP_CARRIZO:
299 		amdgpu_device_program_register_sequence(adev,
300 							cz_mgcg_cgcg_init,
301 							ARRAY_SIZE(cz_mgcg_cgcg_init));
302 		break;
303 	case CHIP_STONEY:
304 		amdgpu_device_program_register_sequence(adev,
305 							stoney_mgcg_cgcg_init,
306 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
307 		break;
308 	case CHIP_POLARIS11:
309 	case CHIP_POLARIS10:
310 	case CHIP_POLARIS12:
311 	default:
312 		break;
313 	}
314 	mutex_unlock(&adev->grbm_idx_mutex);
315 }
316 
317 /**
318  * vi_get_xclk - get the xclk
319  *
320  * @adev: amdgpu_device pointer
321  *
322  * Returns the reference clock used by the gfx engine
323  * (VI).
324  */
325 static u32 vi_get_xclk(struct amdgpu_device *adev)
326 {
327 	u32 reference_clock = adev->clock.spll.reference_freq;
328 	u32 tmp;
329 
330 	if (adev->flags & AMD_IS_APU)
331 		return reference_clock;
332 
333 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
334 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
335 		return 1000;
336 
337 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
338 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
339 		return reference_clock / 4;
340 
341 	return reference_clock;
342 }
343 
344 /**
345  * vi_srbm_select - select specific register instances
346  *
347  * @adev: amdgpu_device pointer
348  * @me: selected ME (micro engine)
349  * @pipe: pipe
350  * @queue: queue
351  * @vmid: VMID
352  *
353  * Switches the currently active registers instances.  Some
354  * registers are instanced per VMID, others are instanced per
355  * me/pipe/queue combination.
356  */
357 void vi_srbm_select(struct amdgpu_device *adev,
358 		     u32 me, u32 pipe, u32 queue, u32 vmid)
359 {
360 	u32 srbm_gfx_cntl = 0;
361 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
362 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
363 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
365 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
366 }
367 
368 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
369 {
370 	/* todo */
371 }
372 
373 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
374 {
375 	u32 bus_cntl;
376 	u32 d1vga_control = 0;
377 	u32 d2vga_control = 0;
378 	u32 vga_render_control = 0;
379 	u32 rom_cntl;
380 	bool r;
381 
382 	bus_cntl = RREG32(mmBUS_CNTL);
383 	if (adev->mode_info.num_crtc) {
384 		d1vga_control = RREG32(mmD1VGA_CONTROL);
385 		d2vga_control = RREG32(mmD2VGA_CONTROL);
386 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
387 	}
388 	rom_cntl = RREG32_SMC(ixROM_CNTL);
389 
390 	/* enable the rom */
391 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
392 	if (adev->mode_info.num_crtc) {
393 		/* Disable VGA mode */
394 		WREG32(mmD1VGA_CONTROL,
395 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
396 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
397 		WREG32(mmD2VGA_CONTROL,
398 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
399 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
400 		WREG32(mmVGA_RENDER_CONTROL,
401 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
402 	}
403 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
404 
405 	r = amdgpu_read_bios(adev);
406 
407 	/* restore regs */
408 	WREG32(mmBUS_CNTL, bus_cntl);
409 	if (adev->mode_info.num_crtc) {
410 		WREG32(mmD1VGA_CONTROL, d1vga_control);
411 		WREG32(mmD2VGA_CONTROL, d2vga_control);
412 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
413 	}
414 	WREG32_SMC(ixROM_CNTL, rom_cntl);
415 	return r;
416 }
417 
418 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
419 				  u8 *bios, u32 length_bytes)
420 {
421 	u32 *dw_ptr;
422 	unsigned long flags;
423 	u32 i, length_dw;
424 
425 	if (bios == NULL)
426 		return false;
427 	if (length_bytes == 0)
428 		return false;
429 	/* APU vbios image is part of sbios image */
430 	if (adev->flags & AMD_IS_APU)
431 		return false;
432 
433 	dw_ptr = (u32 *)bios;
434 	length_dw = ALIGN(length_bytes, 4) / 4;
435 	/* take the smc lock since we are using the smc index */
436 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
437 	/* set rom index to 0 */
438 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
439 	WREG32(mmSMC_IND_DATA_11, 0);
440 	/* set index to data for continous read */
441 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
442 	for (i = 0; i < length_dw; i++)
443 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
444 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
445 
446 	return true;
447 }
448 
449 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
450 {
451 	uint32_t reg = 0;
452 
453 	if (adev->asic_type == CHIP_TONGA ||
454 	    adev->asic_type == CHIP_FIJI) {
455 	       reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
456 	       /* bit0: 0 means pf and 1 means vf */
457 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
458 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
459 	       /* bit31: 0 means disable IOV and 1 means enable */
460 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
461 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
462 	}
463 
464 	if (reg == 0) {
465 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
466 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
467 	}
468 }
469 
470 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
471 	{mmGRBM_STATUS},
472 	{mmGRBM_STATUS2},
473 	{mmGRBM_STATUS_SE0},
474 	{mmGRBM_STATUS_SE1},
475 	{mmGRBM_STATUS_SE2},
476 	{mmGRBM_STATUS_SE3},
477 	{mmSRBM_STATUS},
478 	{mmSRBM_STATUS2},
479 	{mmSRBM_STATUS3},
480 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
481 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
482 	{mmCP_STAT},
483 	{mmCP_STALLED_STAT1},
484 	{mmCP_STALLED_STAT2},
485 	{mmCP_STALLED_STAT3},
486 	{mmCP_CPF_BUSY_STAT},
487 	{mmCP_CPF_STALLED_STAT1},
488 	{mmCP_CPF_STATUS},
489 	{mmCP_CPC_BUSY_STAT},
490 	{mmCP_CPC_STALLED_STAT1},
491 	{mmCP_CPC_STATUS},
492 	{mmGB_ADDR_CONFIG},
493 	{mmMC_ARB_RAMCFG},
494 	{mmGB_TILE_MODE0},
495 	{mmGB_TILE_MODE1},
496 	{mmGB_TILE_MODE2},
497 	{mmGB_TILE_MODE3},
498 	{mmGB_TILE_MODE4},
499 	{mmGB_TILE_MODE5},
500 	{mmGB_TILE_MODE6},
501 	{mmGB_TILE_MODE7},
502 	{mmGB_TILE_MODE8},
503 	{mmGB_TILE_MODE9},
504 	{mmGB_TILE_MODE10},
505 	{mmGB_TILE_MODE11},
506 	{mmGB_TILE_MODE12},
507 	{mmGB_TILE_MODE13},
508 	{mmGB_TILE_MODE14},
509 	{mmGB_TILE_MODE15},
510 	{mmGB_TILE_MODE16},
511 	{mmGB_TILE_MODE17},
512 	{mmGB_TILE_MODE18},
513 	{mmGB_TILE_MODE19},
514 	{mmGB_TILE_MODE20},
515 	{mmGB_TILE_MODE21},
516 	{mmGB_TILE_MODE22},
517 	{mmGB_TILE_MODE23},
518 	{mmGB_TILE_MODE24},
519 	{mmGB_TILE_MODE25},
520 	{mmGB_TILE_MODE26},
521 	{mmGB_TILE_MODE27},
522 	{mmGB_TILE_MODE28},
523 	{mmGB_TILE_MODE29},
524 	{mmGB_TILE_MODE30},
525 	{mmGB_TILE_MODE31},
526 	{mmGB_MACROTILE_MODE0},
527 	{mmGB_MACROTILE_MODE1},
528 	{mmGB_MACROTILE_MODE2},
529 	{mmGB_MACROTILE_MODE3},
530 	{mmGB_MACROTILE_MODE4},
531 	{mmGB_MACROTILE_MODE5},
532 	{mmGB_MACROTILE_MODE6},
533 	{mmGB_MACROTILE_MODE7},
534 	{mmGB_MACROTILE_MODE8},
535 	{mmGB_MACROTILE_MODE9},
536 	{mmGB_MACROTILE_MODE10},
537 	{mmGB_MACROTILE_MODE11},
538 	{mmGB_MACROTILE_MODE12},
539 	{mmGB_MACROTILE_MODE13},
540 	{mmGB_MACROTILE_MODE14},
541 	{mmGB_MACROTILE_MODE15},
542 	{mmCC_RB_BACKEND_DISABLE, true},
543 	{mmGC_USER_RB_BACKEND_DISABLE, true},
544 	{mmGB_BACKEND_MAP, false},
545 	{mmPA_SC_RASTER_CONFIG, true},
546 	{mmPA_SC_RASTER_CONFIG_1, true},
547 };
548 
549 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
550 				      bool indexed, u32 se_num,
551 				      u32 sh_num, u32 reg_offset)
552 {
553 	if (indexed) {
554 		uint32_t val;
555 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
556 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
557 
558 		switch (reg_offset) {
559 		case mmCC_RB_BACKEND_DISABLE:
560 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
561 		case mmGC_USER_RB_BACKEND_DISABLE:
562 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
563 		case mmPA_SC_RASTER_CONFIG:
564 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
565 		case mmPA_SC_RASTER_CONFIG_1:
566 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
567 		}
568 
569 		mutex_lock(&adev->grbm_idx_mutex);
570 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
571 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
572 
573 		val = RREG32(reg_offset);
574 
575 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
576 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
577 		mutex_unlock(&adev->grbm_idx_mutex);
578 		return val;
579 	} else {
580 		unsigned idx;
581 
582 		switch (reg_offset) {
583 		case mmGB_ADDR_CONFIG:
584 			return adev->gfx.config.gb_addr_config;
585 		case mmMC_ARB_RAMCFG:
586 			return adev->gfx.config.mc_arb_ramcfg;
587 		case mmGB_TILE_MODE0:
588 		case mmGB_TILE_MODE1:
589 		case mmGB_TILE_MODE2:
590 		case mmGB_TILE_MODE3:
591 		case mmGB_TILE_MODE4:
592 		case mmGB_TILE_MODE5:
593 		case mmGB_TILE_MODE6:
594 		case mmGB_TILE_MODE7:
595 		case mmGB_TILE_MODE8:
596 		case mmGB_TILE_MODE9:
597 		case mmGB_TILE_MODE10:
598 		case mmGB_TILE_MODE11:
599 		case mmGB_TILE_MODE12:
600 		case mmGB_TILE_MODE13:
601 		case mmGB_TILE_MODE14:
602 		case mmGB_TILE_MODE15:
603 		case mmGB_TILE_MODE16:
604 		case mmGB_TILE_MODE17:
605 		case mmGB_TILE_MODE18:
606 		case mmGB_TILE_MODE19:
607 		case mmGB_TILE_MODE20:
608 		case mmGB_TILE_MODE21:
609 		case mmGB_TILE_MODE22:
610 		case mmGB_TILE_MODE23:
611 		case mmGB_TILE_MODE24:
612 		case mmGB_TILE_MODE25:
613 		case mmGB_TILE_MODE26:
614 		case mmGB_TILE_MODE27:
615 		case mmGB_TILE_MODE28:
616 		case mmGB_TILE_MODE29:
617 		case mmGB_TILE_MODE30:
618 		case mmGB_TILE_MODE31:
619 			idx = (reg_offset - mmGB_TILE_MODE0);
620 			return adev->gfx.config.tile_mode_array[idx];
621 		case mmGB_MACROTILE_MODE0:
622 		case mmGB_MACROTILE_MODE1:
623 		case mmGB_MACROTILE_MODE2:
624 		case mmGB_MACROTILE_MODE3:
625 		case mmGB_MACROTILE_MODE4:
626 		case mmGB_MACROTILE_MODE5:
627 		case mmGB_MACROTILE_MODE6:
628 		case mmGB_MACROTILE_MODE7:
629 		case mmGB_MACROTILE_MODE8:
630 		case mmGB_MACROTILE_MODE9:
631 		case mmGB_MACROTILE_MODE10:
632 		case mmGB_MACROTILE_MODE11:
633 		case mmGB_MACROTILE_MODE12:
634 		case mmGB_MACROTILE_MODE13:
635 		case mmGB_MACROTILE_MODE14:
636 		case mmGB_MACROTILE_MODE15:
637 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
638 			return adev->gfx.config.macrotile_mode_array[idx];
639 		default:
640 			return RREG32(reg_offset);
641 		}
642 	}
643 }
644 
645 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
646 			    u32 sh_num, u32 reg_offset, u32 *value)
647 {
648 	uint32_t i;
649 
650 	*value = 0;
651 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
652 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
653 
654 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
655 			continue;
656 
657 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
658 					       reg_offset);
659 		return 0;
660 	}
661 	return -EINVAL;
662 }
663 
664 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
665 {
666 	u32 i;
667 
668 	dev_info(adev->dev, "GPU pci config reset\n");
669 
670 	/* disable BM */
671 	pci_clear_master(adev->pdev);
672 	/* reset */
673 	amdgpu_device_pci_config_reset(adev);
674 
675 	udelay(100);
676 
677 	/* wait for asic to come out of reset */
678 	for (i = 0; i < adev->usec_timeout; i++) {
679 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
680 			/* enable BM */
681 			pci_set_master(adev->pdev);
682 			adev->has_hw_reset = true;
683 			return 0;
684 		}
685 		udelay(1);
686 	}
687 	return -EINVAL;
688 }
689 
690 /**
691  * vi_asic_reset - soft reset GPU
692  *
693  * @adev: amdgpu_device pointer
694  *
695  * Look up which blocks are hung and attempt
696  * to reset them.
697  * Returns 0 for success.
698  */
699 static int vi_asic_reset(struct amdgpu_device *adev)
700 {
701 	int r;
702 
703 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
704 
705 	r = vi_gpu_pci_config_reset(adev);
706 
707 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
708 
709 	return r;
710 }
711 
712 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
713 {
714 	return RREG32(mmCONFIG_MEMSIZE);
715 }
716 
717 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
718 			u32 cntl_reg, u32 status_reg)
719 {
720 	int r, i;
721 	struct atom_clock_dividers dividers;
722 	uint32_t tmp;
723 
724 	r = amdgpu_atombios_get_clock_dividers(adev,
725 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
726 					       clock, false, &dividers);
727 	if (r)
728 		return r;
729 
730 	tmp = RREG32_SMC(cntl_reg);
731 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
732 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
733 	tmp |= dividers.post_divider;
734 	WREG32_SMC(cntl_reg, tmp);
735 
736 	for (i = 0; i < 100; i++) {
737 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
738 			break;
739 		mdelay(10);
740 	}
741 	if (i == 100)
742 		return -ETIMEDOUT;
743 
744 	return 0;
745 }
746 
747 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
748 {
749 	int r;
750 
751 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
752 	if (r)
753 		return r;
754 
755 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
756 	if (r)
757 		return r;
758 
759 	return 0;
760 }
761 
762 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
763 {
764 	int r, i;
765 	struct atom_clock_dividers dividers;
766 	u32 tmp;
767 
768 	r = amdgpu_atombios_get_clock_dividers(adev,
769 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
770 					       ecclk, false, &dividers);
771 	if (r)
772 		return r;
773 
774 	for (i = 0; i < 100; i++) {
775 		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
776 			break;
777 		mdelay(10);
778 	}
779 	if (i == 100)
780 		return -ETIMEDOUT;
781 
782 	tmp = RREG32_SMC(ixCG_ECLK_CNTL);
783 	tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
784 		CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
785 	tmp |= dividers.post_divider;
786 	WREG32_SMC(ixCG_ECLK_CNTL, tmp);
787 
788 	for (i = 0; i < 100; i++) {
789 		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
790 			break;
791 		mdelay(10);
792 	}
793 	if (i == 100)
794 		return -ETIMEDOUT;
795 
796 	return 0;
797 }
798 
799 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
800 {
801 	if (pci_is_root_bus(adev->pdev->bus))
802 		return;
803 
804 	if (amdgpu_pcie_gen2 == 0)
805 		return;
806 
807 	if (adev->flags & AMD_IS_APU)
808 		return;
809 
810 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
811 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
812 		return;
813 
814 	/* todo */
815 }
816 
817 static void vi_program_aspm(struct amdgpu_device *adev)
818 {
819 
820 	if (amdgpu_aspm == 0)
821 		return;
822 
823 	/* todo */
824 }
825 
826 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
827 					bool enable)
828 {
829 	u32 tmp;
830 
831 	/* not necessary on CZ */
832 	if (adev->flags & AMD_IS_APU)
833 		return;
834 
835 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
836 	if (enable)
837 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
838 	else
839 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
840 
841 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
842 }
843 
844 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
845 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
846 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
847 
848 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
849 {
850 	if (adev->flags & AMD_IS_APU)
851 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
852 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
853 	else
854 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
855 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
856 }
857 
858 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
859 {
860 	if (!ring || !ring->funcs->emit_wreg) {
861 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
862 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
863 	} else {
864 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
865 	}
866 }
867 
868 static void vi_invalidate_hdp(struct amdgpu_device *adev,
869 			      struct amdgpu_ring *ring)
870 {
871 	if (!ring || !ring->funcs->emit_wreg) {
872 		WREG32(mmHDP_DEBUG0, 1);
873 		RREG32(mmHDP_DEBUG0);
874 	} else {
875 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
876 	}
877 }
878 
879 static const struct amdgpu_asic_funcs vi_asic_funcs =
880 {
881 	.read_disabled_bios = &vi_read_disabled_bios,
882 	.read_bios_from_rom = &vi_read_bios_from_rom,
883 	.read_register = &vi_read_register,
884 	.reset = &vi_asic_reset,
885 	.set_vga_state = &vi_vga_set_state,
886 	.get_xclk = &vi_get_xclk,
887 	.set_uvd_clocks = &vi_set_uvd_clocks,
888 	.set_vce_clocks = &vi_set_vce_clocks,
889 	.get_config_memsize = &vi_get_config_memsize,
890 	.flush_hdp = &vi_flush_hdp,
891 	.invalidate_hdp = &vi_invalidate_hdp,
892 };
893 
894 #define CZ_REV_BRISTOL(rev)	 \
895 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
896 
897 static int vi_common_early_init(void *handle)
898 {
899 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
900 
901 	if (adev->flags & AMD_IS_APU) {
902 		adev->smc_rreg = &cz_smc_rreg;
903 		adev->smc_wreg = &cz_smc_wreg;
904 	} else {
905 		adev->smc_rreg = &vi_smc_rreg;
906 		adev->smc_wreg = &vi_smc_wreg;
907 	}
908 	adev->pcie_rreg = &vi_pcie_rreg;
909 	adev->pcie_wreg = &vi_pcie_wreg;
910 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
911 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
912 	adev->didt_rreg = &vi_didt_rreg;
913 	adev->didt_wreg = &vi_didt_wreg;
914 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
915 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
916 
917 	adev->asic_funcs = &vi_asic_funcs;
918 
919 	adev->rev_id = vi_get_rev_id(adev);
920 	adev->external_rev_id = 0xFF;
921 	switch (adev->asic_type) {
922 	case CHIP_TOPAZ:
923 		adev->cg_flags = 0;
924 		adev->pg_flags = 0;
925 		adev->external_rev_id = 0x1;
926 		break;
927 	case CHIP_FIJI:
928 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
929 			AMD_CG_SUPPORT_GFX_MGLS |
930 			AMD_CG_SUPPORT_GFX_RLC_LS |
931 			AMD_CG_SUPPORT_GFX_CP_LS |
932 			AMD_CG_SUPPORT_GFX_CGTS |
933 			AMD_CG_SUPPORT_GFX_CGTS_LS |
934 			AMD_CG_SUPPORT_GFX_CGCG |
935 			AMD_CG_SUPPORT_GFX_CGLS |
936 			AMD_CG_SUPPORT_SDMA_MGCG |
937 			AMD_CG_SUPPORT_SDMA_LS |
938 			AMD_CG_SUPPORT_BIF_LS |
939 			AMD_CG_SUPPORT_HDP_MGCG |
940 			AMD_CG_SUPPORT_HDP_LS |
941 			AMD_CG_SUPPORT_ROM_MGCG |
942 			AMD_CG_SUPPORT_MC_MGCG |
943 			AMD_CG_SUPPORT_MC_LS |
944 			AMD_CG_SUPPORT_UVD_MGCG;
945 		adev->pg_flags = 0;
946 		adev->external_rev_id = adev->rev_id + 0x3c;
947 		break;
948 	case CHIP_TONGA:
949 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
950 			AMD_CG_SUPPORT_GFX_CGCG |
951 			AMD_CG_SUPPORT_GFX_CGLS |
952 			AMD_CG_SUPPORT_SDMA_MGCG |
953 			AMD_CG_SUPPORT_SDMA_LS |
954 			AMD_CG_SUPPORT_BIF_LS |
955 			AMD_CG_SUPPORT_HDP_MGCG |
956 			AMD_CG_SUPPORT_HDP_LS |
957 			AMD_CG_SUPPORT_ROM_MGCG |
958 			AMD_CG_SUPPORT_MC_MGCG |
959 			AMD_CG_SUPPORT_MC_LS |
960 			AMD_CG_SUPPORT_DRM_LS |
961 			AMD_CG_SUPPORT_UVD_MGCG;
962 		adev->pg_flags = 0;
963 		adev->external_rev_id = adev->rev_id + 0x14;
964 		break;
965 	case CHIP_POLARIS11:
966 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
967 			AMD_CG_SUPPORT_GFX_RLC_LS |
968 			AMD_CG_SUPPORT_GFX_CP_LS |
969 			AMD_CG_SUPPORT_GFX_CGCG |
970 			AMD_CG_SUPPORT_GFX_CGLS |
971 			AMD_CG_SUPPORT_GFX_3D_CGCG |
972 			AMD_CG_SUPPORT_GFX_3D_CGLS |
973 			AMD_CG_SUPPORT_SDMA_MGCG |
974 			AMD_CG_SUPPORT_SDMA_LS |
975 			AMD_CG_SUPPORT_BIF_MGCG |
976 			AMD_CG_SUPPORT_BIF_LS |
977 			AMD_CG_SUPPORT_HDP_MGCG |
978 			AMD_CG_SUPPORT_HDP_LS |
979 			AMD_CG_SUPPORT_ROM_MGCG |
980 			AMD_CG_SUPPORT_MC_MGCG |
981 			AMD_CG_SUPPORT_MC_LS |
982 			AMD_CG_SUPPORT_DRM_LS |
983 			AMD_CG_SUPPORT_UVD_MGCG |
984 			AMD_CG_SUPPORT_VCE_MGCG;
985 		adev->pg_flags = 0;
986 		adev->external_rev_id = adev->rev_id + 0x5A;
987 		break;
988 	case CHIP_POLARIS10:
989 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
990 			AMD_CG_SUPPORT_GFX_RLC_LS |
991 			AMD_CG_SUPPORT_GFX_CP_LS |
992 			AMD_CG_SUPPORT_GFX_CGCG |
993 			AMD_CG_SUPPORT_GFX_CGLS |
994 			AMD_CG_SUPPORT_GFX_3D_CGCG |
995 			AMD_CG_SUPPORT_GFX_3D_CGLS |
996 			AMD_CG_SUPPORT_SDMA_MGCG |
997 			AMD_CG_SUPPORT_SDMA_LS |
998 			AMD_CG_SUPPORT_BIF_MGCG |
999 			AMD_CG_SUPPORT_BIF_LS |
1000 			AMD_CG_SUPPORT_HDP_MGCG |
1001 			AMD_CG_SUPPORT_HDP_LS |
1002 			AMD_CG_SUPPORT_ROM_MGCG |
1003 			AMD_CG_SUPPORT_MC_MGCG |
1004 			AMD_CG_SUPPORT_MC_LS |
1005 			AMD_CG_SUPPORT_DRM_LS |
1006 			AMD_CG_SUPPORT_UVD_MGCG |
1007 			AMD_CG_SUPPORT_VCE_MGCG;
1008 		adev->pg_flags = 0;
1009 		adev->external_rev_id = adev->rev_id + 0x50;
1010 		break;
1011 	case CHIP_POLARIS12:
1012 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1013 			AMD_CG_SUPPORT_GFX_RLC_LS |
1014 			AMD_CG_SUPPORT_GFX_CP_LS |
1015 			AMD_CG_SUPPORT_GFX_CGCG |
1016 			AMD_CG_SUPPORT_GFX_CGLS |
1017 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1018 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1019 			AMD_CG_SUPPORT_SDMA_MGCG |
1020 			AMD_CG_SUPPORT_SDMA_LS |
1021 			AMD_CG_SUPPORT_BIF_MGCG |
1022 			AMD_CG_SUPPORT_BIF_LS |
1023 			AMD_CG_SUPPORT_HDP_MGCG |
1024 			AMD_CG_SUPPORT_HDP_LS |
1025 			AMD_CG_SUPPORT_ROM_MGCG |
1026 			AMD_CG_SUPPORT_MC_MGCG |
1027 			AMD_CG_SUPPORT_MC_LS |
1028 			AMD_CG_SUPPORT_DRM_LS |
1029 			AMD_CG_SUPPORT_UVD_MGCG |
1030 			AMD_CG_SUPPORT_VCE_MGCG;
1031 		adev->pg_flags = 0;
1032 		adev->external_rev_id = adev->rev_id + 0x64;
1033 		break;
1034 	case CHIP_CARRIZO:
1035 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1036 			AMD_CG_SUPPORT_GFX_MGCG |
1037 			AMD_CG_SUPPORT_GFX_MGLS |
1038 			AMD_CG_SUPPORT_GFX_RLC_LS |
1039 			AMD_CG_SUPPORT_GFX_CP_LS |
1040 			AMD_CG_SUPPORT_GFX_CGTS |
1041 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1042 			AMD_CG_SUPPORT_GFX_CGCG |
1043 			AMD_CG_SUPPORT_GFX_CGLS |
1044 			AMD_CG_SUPPORT_BIF_LS |
1045 			AMD_CG_SUPPORT_HDP_MGCG |
1046 			AMD_CG_SUPPORT_HDP_LS |
1047 			AMD_CG_SUPPORT_SDMA_MGCG |
1048 			AMD_CG_SUPPORT_SDMA_LS |
1049 			AMD_CG_SUPPORT_VCE_MGCG;
1050 		/* rev0 hardware requires workarounds to support PG */
1051 		adev->pg_flags = 0;
1052 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1053 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1054 				AMD_PG_SUPPORT_GFX_PIPELINE |
1055 				AMD_PG_SUPPORT_CP |
1056 				AMD_PG_SUPPORT_UVD |
1057 				AMD_PG_SUPPORT_VCE;
1058 		}
1059 		adev->external_rev_id = adev->rev_id + 0x1;
1060 		break;
1061 	case CHIP_STONEY:
1062 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1063 			AMD_CG_SUPPORT_GFX_MGCG |
1064 			AMD_CG_SUPPORT_GFX_MGLS |
1065 			AMD_CG_SUPPORT_GFX_RLC_LS |
1066 			AMD_CG_SUPPORT_GFX_CP_LS |
1067 			AMD_CG_SUPPORT_GFX_CGTS |
1068 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1069 			AMD_CG_SUPPORT_GFX_CGLS |
1070 			AMD_CG_SUPPORT_BIF_LS |
1071 			AMD_CG_SUPPORT_HDP_MGCG |
1072 			AMD_CG_SUPPORT_HDP_LS |
1073 			AMD_CG_SUPPORT_SDMA_MGCG |
1074 			AMD_CG_SUPPORT_SDMA_LS |
1075 			AMD_CG_SUPPORT_VCE_MGCG;
1076 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1077 			AMD_PG_SUPPORT_GFX_SMG |
1078 			AMD_PG_SUPPORT_GFX_PIPELINE |
1079 			AMD_PG_SUPPORT_CP |
1080 			AMD_PG_SUPPORT_UVD |
1081 			AMD_PG_SUPPORT_VCE;
1082 		adev->external_rev_id = adev->rev_id + 0x61;
1083 		break;
1084 	default:
1085 		/* FIXME: not supported yet */
1086 		return -EINVAL;
1087 	}
1088 
1089 	if (amdgpu_sriov_vf(adev)) {
1090 		amdgpu_virt_init_setting(adev);
1091 		xgpu_vi_mailbox_set_irq_funcs(adev);
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static int vi_common_late_init(void *handle)
1098 {
1099 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1100 
1101 	if (amdgpu_sriov_vf(adev))
1102 		xgpu_vi_mailbox_get_irq(adev);
1103 
1104 	return 0;
1105 }
1106 
1107 static int vi_common_sw_init(void *handle)
1108 {
1109 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1110 
1111 	if (amdgpu_sriov_vf(adev))
1112 		xgpu_vi_mailbox_add_irq_id(adev);
1113 
1114 	return 0;
1115 }
1116 
1117 static int vi_common_sw_fini(void *handle)
1118 {
1119 	return 0;
1120 }
1121 
1122 static int vi_common_hw_init(void *handle)
1123 {
1124 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125 
1126 	/* move the golden regs per IP block */
1127 	vi_init_golden_registers(adev);
1128 	/* enable pcie gen2/3 link */
1129 	vi_pcie_gen3_enable(adev);
1130 	/* enable aspm */
1131 	vi_program_aspm(adev);
1132 	/* enable the doorbell aperture */
1133 	vi_enable_doorbell_aperture(adev, true);
1134 
1135 	return 0;
1136 }
1137 
1138 static int vi_common_hw_fini(void *handle)
1139 {
1140 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1141 
1142 	/* enable the doorbell aperture */
1143 	vi_enable_doorbell_aperture(adev, false);
1144 
1145 	if (amdgpu_sriov_vf(adev))
1146 		xgpu_vi_mailbox_put_irq(adev);
1147 
1148 	return 0;
1149 }
1150 
1151 static int vi_common_suspend(void *handle)
1152 {
1153 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1154 
1155 	return vi_common_hw_fini(adev);
1156 }
1157 
1158 static int vi_common_resume(void *handle)
1159 {
1160 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1161 
1162 	return vi_common_hw_init(adev);
1163 }
1164 
1165 static bool vi_common_is_idle(void *handle)
1166 {
1167 	return true;
1168 }
1169 
1170 static int vi_common_wait_for_idle(void *handle)
1171 {
1172 	return 0;
1173 }
1174 
1175 static int vi_common_soft_reset(void *handle)
1176 {
1177 	return 0;
1178 }
1179 
1180 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1181 						   bool enable)
1182 {
1183 	uint32_t temp, data;
1184 
1185 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1186 
1187 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1188 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1189 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1190 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1191 	else
1192 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1193 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1194 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1195 
1196 	if (temp != data)
1197 		WREG32_PCIE(ixPCIE_CNTL2, data);
1198 }
1199 
1200 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1201 						    bool enable)
1202 {
1203 	uint32_t temp, data;
1204 
1205 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1206 
1207 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1208 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1209 	else
1210 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1211 
1212 	if (temp != data)
1213 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1214 }
1215 
1216 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1217 				      bool enable)
1218 {
1219 	uint32_t temp, data;
1220 
1221 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1222 
1223 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1224 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1225 	else
1226 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1227 
1228 	if (temp != data)
1229 		WREG32(mmHDP_MEM_POWER_LS, data);
1230 }
1231 
1232 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1233 				      bool enable)
1234 {
1235 	uint32_t temp, data;
1236 
1237 	temp = data = RREG32(0x157a);
1238 
1239 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1240 		data |= 1;
1241 	else
1242 		data &= ~1;
1243 
1244 	if (temp != data)
1245 		WREG32(0x157a, data);
1246 }
1247 
1248 
1249 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1250 						    bool enable)
1251 {
1252 	uint32_t temp, data;
1253 
1254 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1255 
1256 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1257 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1258 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1259 	else
1260 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1261 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1262 
1263 	if (temp != data)
1264 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1265 }
1266 
1267 static int vi_common_set_clockgating_state_by_smu(void *handle,
1268 					   enum amd_clockgating_state state)
1269 {
1270 	uint32_t msg_id, pp_state = 0;
1271 	uint32_t pp_support_state = 0;
1272 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1273 
1274 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1275 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1276 			pp_support_state = AMD_CG_SUPPORT_MC_LS;
1277 			pp_state = PP_STATE_LS;
1278 		}
1279 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1280 			pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
1281 			pp_state |= PP_STATE_CG;
1282 		}
1283 		if (state == AMD_CG_STATE_UNGATE)
1284 			pp_state = 0;
1285 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1286 			       PP_BLOCK_SYS_MC,
1287 			       pp_support_state,
1288 			       pp_state);
1289 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1290 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1291 	}
1292 
1293 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1294 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1295 			pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
1296 			pp_state = PP_STATE_LS;
1297 		}
1298 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1299 			pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
1300 			pp_state |= PP_STATE_CG;
1301 		}
1302 		if (state == AMD_CG_STATE_UNGATE)
1303 			pp_state = 0;
1304 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1305 			       PP_BLOCK_SYS_SDMA,
1306 			       pp_support_state,
1307 			       pp_state);
1308 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1309 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1310 	}
1311 
1312 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1313 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1314 			pp_support_state = AMD_CG_SUPPORT_HDP_LS;
1315 			pp_state = PP_STATE_LS;
1316 		}
1317 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1318 			pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
1319 			pp_state |= PP_STATE_CG;
1320 		}
1321 		if (state == AMD_CG_STATE_UNGATE)
1322 			pp_state = 0;
1323 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1324 			       PP_BLOCK_SYS_HDP,
1325 			       pp_support_state,
1326 			       pp_state);
1327 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1328 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1329 	}
1330 
1331 
1332 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1333 		if (state == AMD_CG_STATE_UNGATE)
1334 			pp_state = 0;
1335 		else
1336 			pp_state = PP_STATE_LS;
1337 
1338 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1339 			       PP_BLOCK_SYS_BIF,
1340 			       PP_STATE_SUPPORT_LS,
1341 			        pp_state);
1342 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1343 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1344 	}
1345 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1346 		if (state == AMD_CG_STATE_UNGATE)
1347 			pp_state = 0;
1348 		else
1349 			pp_state = PP_STATE_CG;
1350 
1351 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1352 			       PP_BLOCK_SYS_BIF,
1353 			       PP_STATE_SUPPORT_CG,
1354 			       pp_state);
1355 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1356 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1357 	}
1358 
1359 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1360 
1361 		if (state == AMD_CG_STATE_UNGATE)
1362 			pp_state = 0;
1363 		else
1364 			pp_state = PP_STATE_LS;
1365 
1366 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1367 			       PP_BLOCK_SYS_DRM,
1368 			       PP_STATE_SUPPORT_LS,
1369 			       pp_state);
1370 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1371 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1372 	}
1373 
1374 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1375 
1376 		if (state == AMD_CG_STATE_UNGATE)
1377 			pp_state = 0;
1378 		else
1379 			pp_state = PP_STATE_CG;
1380 
1381 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1382 			       PP_BLOCK_SYS_ROM,
1383 			       PP_STATE_SUPPORT_CG,
1384 			       pp_state);
1385 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1386 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1387 	}
1388 	return 0;
1389 }
1390 
1391 static int vi_common_set_clockgating_state(void *handle,
1392 					   enum amd_clockgating_state state)
1393 {
1394 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1395 
1396 	if (amdgpu_sriov_vf(adev))
1397 		return 0;
1398 
1399 	switch (adev->asic_type) {
1400 	case CHIP_FIJI:
1401 		vi_update_bif_medium_grain_light_sleep(adev,
1402 				state == AMD_CG_STATE_GATE);
1403 		vi_update_hdp_medium_grain_clock_gating(adev,
1404 				state == AMD_CG_STATE_GATE);
1405 		vi_update_hdp_light_sleep(adev,
1406 				state == AMD_CG_STATE_GATE);
1407 		vi_update_rom_medium_grain_clock_gating(adev,
1408 				state == AMD_CG_STATE_GATE);
1409 		break;
1410 	case CHIP_CARRIZO:
1411 	case CHIP_STONEY:
1412 		vi_update_bif_medium_grain_light_sleep(adev,
1413 				state == AMD_CG_STATE_GATE);
1414 		vi_update_hdp_medium_grain_clock_gating(adev,
1415 				state == AMD_CG_STATE_GATE);
1416 		vi_update_hdp_light_sleep(adev,
1417 				state == AMD_CG_STATE_GATE);
1418 		vi_update_drm_light_sleep(adev,
1419 				state == AMD_CG_STATE_GATE);
1420 		break;
1421 	case CHIP_TONGA:
1422 	case CHIP_POLARIS10:
1423 	case CHIP_POLARIS11:
1424 	case CHIP_POLARIS12:
1425 		vi_common_set_clockgating_state_by_smu(adev, state);
1426 	default:
1427 		break;
1428 	}
1429 	return 0;
1430 }
1431 
1432 static int vi_common_set_powergating_state(void *handle,
1433 					    enum amd_powergating_state state)
1434 {
1435 	return 0;
1436 }
1437 
1438 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1439 {
1440 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1441 	int data;
1442 
1443 	if (amdgpu_sriov_vf(adev))
1444 		*flags = 0;
1445 
1446 	/* AMD_CG_SUPPORT_BIF_LS */
1447 	data = RREG32_PCIE(ixPCIE_CNTL2);
1448 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1449 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1450 
1451 	/* AMD_CG_SUPPORT_HDP_LS */
1452 	data = RREG32(mmHDP_MEM_POWER_LS);
1453 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1454 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1455 
1456 	/* AMD_CG_SUPPORT_HDP_MGCG */
1457 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1458 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1459 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1460 
1461 	/* AMD_CG_SUPPORT_ROM_MGCG */
1462 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1463 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1464 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1465 }
1466 
1467 static const struct amd_ip_funcs vi_common_ip_funcs = {
1468 	.name = "vi_common",
1469 	.early_init = vi_common_early_init,
1470 	.late_init = vi_common_late_init,
1471 	.sw_init = vi_common_sw_init,
1472 	.sw_fini = vi_common_sw_fini,
1473 	.hw_init = vi_common_hw_init,
1474 	.hw_fini = vi_common_hw_fini,
1475 	.suspend = vi_common_suspend,
1476 	.resume = vi_common_resume,
1477 	.is_idle = vi_common_is_idle,
1478 	.wait_for_idle = vi_common_wait_for_idle,
1479 	.soft_reset = vi_common_soft_reset,
1480 	.set_clockgating_state = vi_common_set_clockgating_state,
1481 	.set_powergating_state = vi_common_set_powergating_state,
1482 	.get_clockgating_state = vi_common_get_clockgating_state,
1483 };
1484 
1485 static const struct amdgpu_ip_block_version vi_common_ip_block =
1486 {
1487 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1488 	.major = 1,
1489 	.minor = 0,
1490 	.rev = 0,
1491 	.funcs = &vi_common_ip_funcs,
1492 };
1493 
1494 int vi_set_ip_blocks(struct amdgpu_device *adev)
1495 {
1496 	/* in early init stage, vbios code won't work */
1497 	vi_detect_hw_virtualization(adev);
1498 
1499 	if (amdgpu_sriov_vf(adev))
1500 		adev->virt.ops = &xgpu_vi_virt_ops;
1501 
1502 	switch (adev->asic_type) {
1503 	case CHIP_TOPAZ:
1504 		/* topaz has no DCE, UVD, VCE */
1505 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1506 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1507 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1508 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1509 		if (adev->enable_virtual_display)
1510 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1511 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1512 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1513 		break;
1514 	case CHIP_FIJI:
1515 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1516 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1517 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1518 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1519 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1520 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1521 #if defined(CONFIG_DRM_AMD_DC)
1522 		else if (amdgpu_device_has_dc_support(adev))
1523 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1524 #endif
1525 		else
1526 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1527 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1528 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1529 		if (!amdgpu_sriov_vf(adev)) {
1530 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1531 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1532 		}
1533 		break;
1534 	case CHIP_TONGA:
1535 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1536 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1537 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1538 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1539 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1540 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1541 #if defined(CONFIG_DRM_AMD_DC)
1542 		else if (amdgpu_device_has_dc_support(adev))
1543 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1544 #endif
1545 		else
1546 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1547 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1548 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1549 		if (!amdgpu_sriov_vf(adev)) {
1550 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1551 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1552 		}
1553 		break;
1554 	case CHIP_POLARIS11:
1555 	case CHIP_POLARIS10:
1556 	case CHIP_POLARIS12:
1557 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1558 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1559 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1560 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1561 		if (adev->enable_virtual_display)
1562 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1563 #if defined(CONFIG_DRM_AMD_DC)
1564 		else if (amdgpu_device_has_dc_support(adev))
1565 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1566 #endif
1567 		else
1568 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1569 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1570 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1571 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1572 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1573 		break;
1574 	case CHIP_CARRIZO:
1575 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1576 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1577 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1578 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1579 		if (adev->enable_virtual_display)
1580 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1581 #if defined(CONFIG_DRM_AMD_DC)
1582 		else if (amdgpu_device_has_dc_support(adev))
1583 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1584 #endif
1585 		else
1586 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1587 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1588 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1589 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1590 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1591 #if defined(CONFIG_DRM_AMD_ACP)
1592 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1593 #endif
1594 		break;
1595 	case CHIP_STONEY:
1596 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1597 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1598 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1599 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1600 		if (adev->enable_virtual_display)
1601 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1602 #if defined(CONFIG_DRM_AMD_DC)
1603 		else if (amdgpu_device_has_dc_support(adev))
1604 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1605 #endif
1606 		else
1607 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1608 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1609 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1610 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1611 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1612 #if defined(CONFIG_DRM_AMD_ACP)
1613 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1614 #endif
1615 		break;
1616 	default:
1617 		/* FIXME: not supported yet */
1618 		return -EINVAL;
1619 	}
1620 
1621 	return 0;
1622 }
1623