xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision fd37b884)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 
27 #include <drm/amdgpu_drm.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_atombios.h"
31 #include "amdgpu_ih.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "amdgpu_ucode.h"
35 #include "atom.h"
36 #include "amd_pcie.h"
37 
38 #include "gmc/gmc_8_1_d.h"
39 #include "gmc/gmc_8_1_sh_mask.h"
40 
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43 
44 #include "bif/bif_5_0_d.h"
45 #include "bif/bif_5_0_sh_mask.h"
46 
47 #include "gca/gfx_8_0_d.h"
48 #include "gca/gfx_8_0_sh_mask.h"
49 
50 #include "smu/smu_7_1_1_d.h"
51 #include "smu/smu_7_1_1_sh_mask.h"
52 
53 #include "uvd/uvd_5_0_d.h"
54 #include "uvd/uvd_5_0_sh_mask.h"
55 
56 #include "vce/vce_3_0_d.h"
57 #include "vce/vce_3_0_sh_mask.h"
58 
59 #include "dce/dce_10_0_d.h"
60 #include "dce/dce_10_0_sh_mask.h"
61 
62 #include "vid.h"
63 #include "vi.h"
64 #include "gmc_v8_0.h"
65 #include "gmc_v7_0.h"
66 #include "gfx_v8_0.h"
67 #include "sdma_v2_4.h"
68 #include "sdma_v3_0.h"
69 #include "dce_v10_0.h"
70 #include "dce_v11_0.h"
71 #include "iceland_ih.h"
72 #include "tonga_ih.h"
73 #include "cz_ih.h"
74 #include "uvd_v5_0.h"
75 #include "uvd_v6_0.h"
76 #include "vce_v3_0.h"
77 #if defined(CONFIG_DRM_AMD_ACP)
78 #include "amdgpu_acp.h"
79 #endif
80 #include "amdgpu_vkms.h"
81 #include "mxgpu_vi.h"
82 #include "amdgpu_dm.h"
83 
84 #define ixPCIE_LC_L1_PM_SUBSTATE	0x100100C6
85 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK	0x00000001L
86 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK	0x00000002L
87 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK	0x00000004L
88 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK		0x00000008L
89 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK		0x00000010L
90 #define ixPCIE_L1_PM_SUB_CNTL	0x378
91 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK	0x00000004L
92 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK	0x00000008L
93 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK	0x00000001L
94 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK	0x00000002L
95 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK		0x00200000L
96 #define LINK_CAP	0x64
97 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK	0x00040000L
98 #define ixCPM_CONTROL	0x1400118
99 #define ixPCIE_LC_CNTL7	0x100100BC
100 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK	0x00000400L
101 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT	0x00000007
102 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT	0x00000009
103 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK	0x01000000L
104 #define PCIE_L1_PM_SUB_CNTL	0x378
105 #define ASIC_IS_P22(asic_type, rid)	((asic_type >= CHIP_POLARIS10) && \
106 									(asic_type <= CHIP_POLARIS12) && \
107 									(rid >= 0x6E))
108 /* Topaz */
109 static const struct amdgpu_video_codecs topaz_video_codecs_encode =
110 {
111 	.codec_count = 0,
112 	.codec_array = NULL,
113 };
114 
115 /* Tonga, CZ, ST, Fiji */
116 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
117 {
118 	{
119 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
120 		.max_width = 4096,
121 		.max_height = 2304,
122 		.max_pixels_per_frame = 4096 * 2304,
123 		.max_level = 0,
124 	},
125 };
126 
127 static const struct amdgpu_video_codecs tonga_video_codecs_encode =
128 {
129 	.codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
130 	.codec_array = tonga_video_codecs_encode_array,
131 };
132 
133 /* Polaris */
134 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
135 {
136 	{
137 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
138 		.max_width = 4096,
139 		.max_height = 2304,
140 		.max_pixels_per_frame = 4096 * 2304,
141 		.max_level = 0,
142 	},
143 	{
144 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
145 		.max_width = 4096,
146 		.max_height = 2304,
147 		.max_pixels_per_frame = 4096 * 2304,
148 		.max_level = 0,
149 	},
150 };
151 
152 static const struct amdgpu_video_codecs polaris_video_codecs_encode =
153 {
154 	.codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
155 	.codec_array = polaris_video_codecs_encode_array,
156 };
157 
158 /* Topaz */
159 static const struct amdgpu_video_codecs topaz_video_codecs_decode =
160 {
161 	.codec_count = 0,
162 	.codec_array = NULL,
163 };
164 
165 /* Tonga */
166 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
167 {
168 	{
169 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
170 		.max_width = 4096,
171 		.max_height = 4096,
172 		.max_pixels_per_frame = 4096 * 4096,
173 		.max_level = 3,
174 	},
175 	{
176 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
177 		.max_width = 4096,
178 		.max_height = 4096,
179 		.max_pixels_per_frame = 4096 * 4096,
180 		.max_level = 5,
181 	},
182 	{
183 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
184 		.max_width = 4096,
185 		.max_height = 4096,
186 		.max_pixels_per_frame = 4096 * 4096,
187 		.max_level = 52,
188 	},
189 	{
190 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
191 		.max_width = 4096,
192 		.max_height = 4096,
193 		.max_pixels_per_frame = 4096 * 4096,
194 		.max_level = 4,
195 	},
196 };
197 
198 static const struct amdgpu_video_codecs tonga_video_codecs_decode =
199 {
200 	.codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
201 	.codec_array = tonga_video_codecs_decode_array,
202 };
203 
204 /* CZ, ST, Fiji, Polaris */
205 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
206 {
207 	{
208 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
209 		.max_width = 4096,
210 		.max_height = 4096,
211 		.max_pixels_per_frame = 4096 * 4096,
212 		.max_level = 3,
213 	},
214 	{
215 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
216 		.max_width = 4096,
217 		.max_height = 4096,
218 		.max_pixels_per_frame = 4096 * 4096,
219 		.max_level = 5,
220 	},
221 	{
222 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
223 		.max_width = 4096,
224 		.max_height = 4096,
225 		.max_pixels_per_frame = 4096 * 4096,
226 		.max_level = 52,
227 	},
228 	{
229 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
230 		.max_width = 4096,
231 		.max_height = 4096,
232 		.max_pixels_per_frame = 4096 * 4096,
233 		.max_level = 4,
234 	},
235 	{
236 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
237 		.max_width = 4096,
238 		.max_height = 4096,
239 		.max_pixels_per_frame = 4096 * 4096,
240 		.max_level = 186,
241 	},
242 	{
243 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
244 		.max_width = 4096,
245 		.max_height = 4096,
246 		.max_pixels_per_frame = 4096 * 4096,
247 		.max_level = 0,
248 	},
249 };
250 
251 static const struct amdgpu_video_codecs cz_video_codecs_decode =
252 {
253 	.codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
254 	.codec_array = cz_video_codecs_decode_array,
255 };
256 
257 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
258 				 const struct amdgpu_video_codecs **codecs)
259 {
260 	switch (adev->asic_type) {
261 	case CHIP_TOPAZ:
262 		if (encode)
263 			*codecs = &topaz_video_codecs_encode;
264 		else
265 			*codecs = &topaz_video_codecs_decode;
266 		return 0;
267 	case CHIP_TONGA:
268 		if (encode)
269 			*codecs = &tonga_video_codecs_encode;
270 		else
271 			*codecs = &tonga_video_codecs_decode;
272 		return 0;
273 	case CHIP_POLARIS10:
274 	case CHIP_POLARIS11:
275 	case CHIP_POLARIS12:
276 	case CHIP_VEGAM:
277 		if (encode)
278 			*codecs = &polaris_video_codecs_encode;
279 		else
280 			*codecs = &cz_video_codecs_decode;
281 		return 0;
282 	case CHIP_FIJI:
283 	case CHIP_CARRIZO:
284 	case CHIP_STONEY:
285 		if (encode)
286 			*codecs = &tonga_video_codecs_encode;
287 		else
288 			*codecs = &cz_video_codecs_decode;
289 		return 0;
290 	default:
291 		return -EINVAL;
292 	}
293 }
294 
295 /*
296  * Indirect registers accessor
297  */
298 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
299 {
300 	unsigned long flags;
301 	u32 r;
302 
303 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
304 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
305 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
306 	r = RREG32_NO_KIQ(mmPCIE_DATA);
307 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
308 	return r;
309 }
310 
311 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
312 {
313 	unsigned long flags;
314 
315 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
316 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
317 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
318 	WREG32_NO_KIQ(mmPCIE_DATA, v);
319 	(void)RREG32_NO_KIQ(mmPCIE_DATA);
320 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
321 }
322 
323 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
324 {
325 	unsigned long flags;
326 	u32 r;
327 
328 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
329 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
330 	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
331 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
332 	return r;
333 }
334 
335 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
336 {
337 	unsigned long flags;
338 
339 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
340 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
341 	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
342 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
343 }
344 
345 /* smu_8_0_d.h */
346 #define mmMP0PUB_IND_INDEX                                                      0x180
347 #define mmMP0PUB_IND_DATA                                                       0x181
348 
349 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
350 {
351 	unsigned long flags;
352 	u32 r;
353 
354 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
355 	WREG32(mmMP0PUB_IND_INDEX, (reg));
356 	r = RREG32(mmMP0PUB_IND_DATA);
357 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
358 	return r;
359 }
360 
361 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
362 {
363 	unsigned long flags;
364 
365 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
366 	WREG32(mmMP0PUB_IND_INDEX, (reg));
367 	WREG32(mmMP0PUB_IND_DATA, (v));
368 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
369 }
370 
371 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
372 {
373 	unsigned long flags;
374 	u32 r;
375 
376 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
377 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
378 	r = RREG32(mmUVD_CTX_DATA);
379 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
380 	return r;
381 }
382 
383 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
384 {
385 	unsigned long flags;
386 
387 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
388 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
389 	WREG32(mmUVD_CTX_DATA, (v));
390 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
391 }
392 
393 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
394 {
395 	unsigned long flags;
396 	u32 r;
397 
398 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
399 	WREG32(mmDIDT_IND_INDEX, (reg));
400 	r = RREG32(mmDIDT_IND_DATA);
401 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
402 	return r;
403 }
404 
405 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
406 {
407 	unsigned long flags;
408 
409 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
410 	WREG32(mmDIDT_IND_INDEX, (reg));
411 	WREG32(mmDIDT_IND_DATA, (v));
412 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
413 }
414 
415 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
416 {
417 	unsigned long flags;
418 	u32 r;
419 
420 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
421 	WREG32(mmGC_CAC_IND_INDEX, (reg));
422 	r = RREG32(mmGC_CAC_IND_DATA);
423 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
424 	return r;
425 }
426 
427 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
428 {
429 	unsigned long flags;
430 
431 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
432 	WREG32(mmGC_CAC_IND_INDEX, (reg));
433 	WREG32(mmGC_CAC_IND_DATA, (v));
434 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
435 }
436 
437 
438 static const u32 tonga_mgcg_cgcg_init[] =
439 {
440 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
441 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
442 	mmPCIE_DATA, 0x000f0000, 0x00000000,
443 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
444 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
445 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
446 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
447 };
448 
449 static const u32 fiji_mgcg_cgcg_init[] =
450 {
451 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
452 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
453 	mmPCIE_DATA, 0x000f0000, 0x00000000,
454 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
455 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
456 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
457 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
458 };
459 
460 static const u32 iceland_mgcg_cgcg_init[] =
461 {
462 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
463 	mmPCIE_DATA, 0x000f0000, 0x00000000,
464 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
465 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
466 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
467 };
468 
469 static const u32 cz_mgcg_cgcg_init[] =
470 {
471 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
472 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
473 	mmPCIE_DATA, 0x000f0000, 0x00000000,
474 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
475 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
476 };
477 
478 static const u32 stoney_mgcg_cgcg_init[] =
479 {
480 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
481 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
482 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
483 };
484 
485 static void vi_init_golden_registers(struct amdgpu_device *adev)
486 {
487 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
488 	mutex_lock(&adev->grbm_idx_mutex);
489 
490 	if (amdgpu_sriov_vf(adev)) {
491 		xgpu_vi_init_golden_registers(adev);
492 		mutex_unlock(&adev->grbm_idx_mutex);
493 		return;
494 	}
495 
496 	switch (adev->asic_type) {
497 	case CHIP_TOPAZ:
498 		amdgpu_device_program_register_sequence(adev,
499 							iceland_mgcg_cgcg_init,
500 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
501 		break;
502 	case CHIP_FIJI:
503 		amdgpu_device_program_register_sequence(adev,
504 							fiji_mgcg_cgcg_init,
505 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
506 		break;
507 	case CHIP_TONGA:
508 		amdgpu_device_program_register_sequence(adev,
509 							tonga_mgcg_cgcg_init,
510 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
511 		break;
512 	case CHIP_CARRIZO:
513 		amdgpu_device_program_register_sequence(adev,
514 							cz_mgcg_cgcg_init,
515 							ARRAY_SIZE(cz_mgcg_cgcg_init));
516 		break;
517 	case CHIP_STONEY:
518 		amdgpu_device_program_register_sequence(adev,
519 							stoney_mgcg_cgcg_init,
520 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
521 		break;
522 	case CHIP_POLARIS10:
523 	case CHIP_POLARIS11:
524 	case CHIP_POLARIS12:
525 	case CHIP_VEGAM:
526 	default:
527 		break;
528 	}
529 	mutex_unlock(&adev->grbm_idx_mutex);
530 }
531 
532 /**
533  * vi_get_xclk - get the xclk
534  *
535  * @adev: amdgpu_device pointer
536  *
537  * Returns the reference clock used by the gfx engine
538  * (VI).
539  */
540 static u32 vi_get_xclk(struct amdgpu_device *adev)
541 {
542 	u32 reference_clock = adev->clock.spll.reference_freq;
543 	u32 tmp;
544 
545 	if (adev->flags & AMD_IS_APU) {
546 		switch (adev->asic_type) {
547 		case CHIP_STONEY:
548 			/* vbios says 48Mhz, but the actual freq is 100Mhz */
549 			return 10000;
550 		default:
551 			return reference_clock;
552 		}
553 	}
554 
555 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
556 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
557 		return 1000;
558 
559 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
560 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
561 		return reference_clock / 4;
562 
563 	return reference_clock;
564 }
565 
566 /**
567  * vi_srbm_select - select specific register instances
568  *
569  * @adev: amdgpu_device pointer
570  * @me: selected ME (micro engine)
571  * @pipe: pipe
572  * @queue: queue
573  * @vmid: VMID
574  *
575  * Switches the currently active registers instances.  Some
576  * registers are instanced per VMID, others are instanced per
577  * me/pipe/queue combination.
578  */
579 void vi_srbm_select(struct amdgpu_device *adev,
580 		     u32 me, u32 pipe, u32 queue, u32 vmid)
581 {
582 	u32 srbm_gfx_cntl = 0;
583 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
584 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
585 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
586 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
587 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
588 }
589 
590 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
591 {
592 	/* todo */
593 }
594 
595 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
596 {
597 	u32 bus_cntl;
598 	u32 d1vga_control = 0;
599 	u32 d2vga_control = 0;
600 	u32 vga_render_control = 0;
601 	u32 rom_cntl;
602 	bool r;
603 
604 	bus_cntl = RREG32(mmBUS_CNTL);
605 	if (adev->mode_info.num_crtc) {
606 		d1vga_control = RREG32(mmD1VGA_CONTROL);
607 		d2vga_control = RREG32(mmD2VGA_CONTROL);
608 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
609 	}
610 	rom_cntl = RREG32_SMC(ixROM_CNTL);
611 
612 	/* enable the rom */
613 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
614 	if (adev->mode_info.num_crtc) {
615 		/* Disable VGA mode */
616 		WREG32(mmD1VGA_CONTROL,
617 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
618 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
619 		WREG32(mmD2VGA_CONTROL,
620 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
621 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
622 		WREG32(mmVGA_RENDER_CONTROL,
623 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
624 	}
625 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
626 
627 	r = amdgpu_read_bios(adev);
628 
629 	/* restore regs */
630 	WREG32(mmBUS_CNTL, bus_cntl);
631 	if (adev->mode_info.num_crtc) {
632 		WREG32(mmD1VGA_CONTROL, d1vga_control);
633 		WREG32(mmD2VGA_CONTROL, d2vga_control);
634 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
635 	}
636 	WREG32_SMC(ixROM_CNTL, rom_cntl);
637 	return r;
638 }
639 
640 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
641 				  u8 *bios, u32 length_bytes)
642 {
643 	u32 *dw_ptr;
644 	unsigned long flags;
645 	u32 i, length_dw;
646 
647 	if (bios == NULL)
648 		return false;
649 	if (length_bytes == 0)
650 		return false;
651 	/* APU vbios image is part of sbios image */
652 	if (adev->flags & AMD_IS_APU)
653 		return false;
654 
655 	dw_ptr = (u32 *)bios;
656 	length_dw = ALIGN(length_bytes, 4) / 4;
657 	/* take the smc lock since we are using the smc index */
658 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
659 	/* set rom index to 0 */
660 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
661 	WREG32(mmSMC_IND_DATA_11, 0);
662 	/* set index to data for continous read */
663 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
664 	for (i = 0; i < length_dw; i++)
665 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
666 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
667 
668 	return true;
669 }
670 
671 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
672 	{mmGRBM_STATUS},
673 	{mmGRBM_STATUS2},
674 	{mmGRBM_STATUS_SE0},
675 	{mmGRBM_STATUS_SE1},
676 	{mmGRBM_STATUS_SE2},
677 	{mmGRBM_STATUS_SE3},
678 	{mmSRBM_STATUS},
679 	{mmSRBM_STATUS2},
680 	{mmSRBM_STATUS3},
681 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
682 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
683 	{mmCP_STAT},
684 	{mmCP_STALLED_STAT1},
685 	{mmCP_STALLED_STAT2},
686 	{mmCP_STALLED_STAT3},
687 	{mmCP_CPF_BUSY_STAT},
688 	{mmCP_CPF_STALLED_STAT1},
689 	{mmCP_CPF_STATUS},
690 	{mmCP_CPC_BUSY_STAT},
691 	{mmCP_CPC_STALLED_STAT1},
692 	{mmCP_CPC_STATUS},
693 	{mmGB_ADDR_CONFIG},
694 	{mmMC_ARB_RAMCFG},
695 	{mmGB_TILE_MODE0},
696 	{mmGB_TILE_MODE1},
697 	{mmGB_TILE_MODE2},
698 	{mmGB_TILE_MODE3},
699 	{mmGB_TILE_MODE4},
700 	{mmGB_TILE_MODE5},
701 	{mmGB_TILE_MODE6},
702 	{mmGB_TILE_MODE7},
703 	{mmGB_TILE_MODE8},
704 	{mmGB_TILE_MODE9},
705 	{mmGB_TILE_MODE10},
706 	{mmGB_TILE_MODE11},
707 	{mmGB_TILE_MODE12},
708 	{mmGB_TILE_MODE13},
709 	{mmGB_TILE_MODE14},
710 	{mmGB_TILE_MODE15},
711 	{mmGB_TILE_MODE16},
712 	{mmGB_TILE_MODE17},
713 	{mmGB_TILE_MODE18},
714 	{mmGB_TILE_MODE19},
715 	{mmGB_TILE_MODE20},
716 	{mmGB_TILE_MODE21},
717 	{mmGB_TILE_MODE22},
718 	{mmGB_TILE_MODE23},
719 	{mmGB_TILE_MODE24},
720 	{mmGB_TILE_MODE25},
721 	{mmGB_TILE_MODE26},
722 	{mmGB_TILE_MODE27},
723 	{mmGB_TILE_MODE28},
724 	{mmGB_TILE_MODE29},
725 	{mmGB_TILE_MODE30},
726 	{mmGB_TILE_MODE31},
727 	{mmGB_MACROTILE_MODE0},
728 	{mmGB_MACROTILE_MODE1},
729 	{mmGB_MACROTILE_MODE2},
730 	{mmGB_MACROTILE_MODE3},
731 	{mmGB_MACROTILE_MODE4},
732 	{mmGB_MACROTILE_MODE5},
733 	{mmGB_MACROTILE_MODE6},
734 	{mmGB_MACROTILE_MODE7},
735 	{mmGB_MACROTILE_MODE8},
736 	{mmGB_MACROTILE_MODE9},
737 	{mmGB_MACROTILE_MODE10},
738 	{mmGB_MACROTILE_MODE11},
739 	{mmGB_MACROTILE_MODE12},
740 	{mmGB_MACROTILE_MODE13},
741 	{mmGB_MACROTILE_MODE14},
742 	{mmGB_MACROTILE_MODE15},
743 	{mmCC_RB_BACKEND_DISABLE, true},
744 	{mmGC_USER_RB_BACKEND_DISABLE, true},
745 	{mmGB_BACKEND_MAP, false},
746 	{mmPA_SC_RASTER_CONFIG, true},
747 	{mmPA_SC_RASTER_CONFIG_1, true},
748 };
749 
750 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
751 				      bool indexed, u32 se_num,
752 				      u32 sh_num, u32 reg_offset)
753 {
754 	if (indexed) {
755 		uint32_t val;
756 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
757 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
758 
759 		switch (reg_offset) {
760 		case mmCC_RB_BACKEND_DISABLE:
761 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
762 		case mmGC_USER_RB_BACKEND_DISABLE:
763 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
764 		case mmPA_SC_RASTER_CONFIG:
765 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
766 		case mmPA_SC_RASTER_CONFIG_1:
767 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
768 		}
769 
770 		mutex_lock(&adev->grbm_idx_mutex);
771 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
772 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
773 
774 		val = RREG32(reg_offset);
775 
776 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
777 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
778 		mutex_unlock(&adev->grbm_idx_mutex);
779 		return val;
780 	} else {
781 		unsigned idx;
782 
783 		switch (reg_offset) {
784 		case mmGB_ADDR_CONFIG:
785 			return adev->gfx.config.gb_addr_config;
786 		case mmMC_ARB_RAMCFG:
787 			return adev->gfx.config.mc_arb_ramcfg;
788 		case mmGB_TILE_MODE0:
789 		case mmGB_TILE_MODE1:
790 		case mmGB_TILE_MODE2:
791 		case mmGB_TILE_MODE3:
792 		case mmGB_TILE_MODE4:
793 		case mmGB_TILE_MODE5:
794 		case mmGB_TILE_MODE6:
795 		case mmGB_TILE_MODE7:
796 		case mmGB_TILE_MODE8:
797 		case mmGB_TILE_MODE9:
798 		case mmGB_TILE_MODE10:
799 		case mmGB_TILE_MODE11:
800 		case mmGB_TILE_MODE12:
801 		case mmGB_TILE_MODE13:
802 		case mmGB_TILE_MODE14:
803 		case mmGB_TILE_MODE15:
804 		case mmGB_TILE_MODE16:
805 		case mmGB_TILE_MODE17:
806 		case mmGB_TILE_MODE18:
807 		case mmGB_TILE_MODE19:
808 		case mmGB_TILE_MODE20:
809 		case mmGB_TILE_MODE21:
810 		case mmGB_TILE_MODE22:
811 		case mmGB_TILE_MODE23:
812 		case mmGB_TILE_MODE24:
813 		case mmGB_TILE_MODE25:
814 		case mmGB_TILE_MODE26:
815 		case mmGB_TILE_MODE27:
816 		case mmGB_TILE_MODE28:
817 		case mmGB_TILE_MODE29:
818 		case mmGB_TILE_MODE30:
819 		case mmGB_TILE_MODE31:
820 			idx = (reg_offset - mmGB_TILE_MODE0);
821 			return adev->gfx.config.tile_mode_array[idx];
822 		case mmGB_MACROTILE_MODE0:
823 		case mmGB_MACROTILE_MODE1:
824 		case mmGB_MACROTILE_MODE2:
825 		case mmGB_MACROTILE_MODE3:
826 		case mmGB_MACROTILE_MODE4:
827 		case mmGB_MACROTILE_MODE5:
828 		case mmGB_MACROTILE_MODE6:
829 		case mmGB_MACROTILE_MODE7:
830 		case mmGB_MACROTILE_MODE8:
831 		case mmGB_MACROTILE_MODE9:
832 		case mmGB_MACROTILE_MODE10:
833 		case mmGB_MACROTILE_MODE11:
834 		case mmGB_MACROTILE_MODE12:
835 		case mmGB_MACROTILE_MODE13:
836 		case mmGB_MACROTILE_MODE14:
837 		case mmGB_MACROTILE_MODE15:
838 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
839 			return adev->gfx.config.macrotile_mode_array[idx];
840 		default:
841 			return RREG32(reg_offset);
842 		}
843 	}
844 }
845 
846 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
847 			    u32 sh_num, u32 reg_offset, u32 *value)
848 {
849 	uint32_t i;
850 
851 	*value = 0;
852 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
853 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
854 
855 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
856 			continue;
857 
858 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
859 					       reg_offset);
860 		return 0;
861 	}
862 	return -EINVAL;
863 }
864 
865 /**
866  * vi_asic_pci_config_reset - soft reset GPU
867  *
868  * @adev: amdgpu_device pointer
869  *
870  * Use PCI Config method to reset the GPU.
871  *
872  * Returns 0 for success.
873  */
874 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
875 {
876 	u32 i;
877 	int r = -EINVAL;
878 
879 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
880 
881 	/* disable BM */
882 	pci_clear_master(adev->pdev);
883 	/* reset */
884 	amdgpu_device_pci_config_reset(adev);
885 
886 	udelay(100);
887 
888 	/* wait for asic to come out of reset */
889 	for (i = 0; i < adev->usec_timeout; i++) {
890 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
891 			/* enable BM */
892 			pci_set_master(adev->pdev);
893 			adev->has_hw_reset = true;
894 			r = 0;
895 			break;
896 		}
897 		udelay(1);
898 	}
899 
900 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
901 
902 	return r;
903 }
904 
905 static bool vi_asic_supports_baco(struct amdgpu_device *adev)
906 {
907 	switch (adev->asic_type) {
908 	case CHIP_FIJI:
909 	case CHIP_TONGA:
910 	case CHIP_POLARIS10:
911 	case CHIP_POLARIS11:
912 	case CHIP_POLARIS12:
913 	case CHIP_TOPAZ:
914 		return amdgpu_dpm_is_baco_supported(adev);
915 	default:
916 		return false;
917 	}
918 }
919 
920 static enum amd_reset_method
921 vi_asic_reset_method(struct amdgpu_device *adev)
922 {
923 	bool baco_reset;
924 
925 	if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
926 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
927 		return amdgpu_reset_method;
928 
929 	if (amdgpu_reset_method != -1)
930 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
931 				  amdgpu_reset_method);
932 
933 	switch (adev->asic_type) {
934 	case CHIP_FIJI:
935 	case CHIP_TONGA:
936 	case CHIP_POLARIS10:
937 	case CHIP_POLARIS11:
938 	case CHIP_POLARIS12:
939 	case CHIP_TOPAZ:
940 		baco_reset = amdgpu_dpm_is_baco_supported(adev);
941 		break;
942 	default:
943 		baco_reset = false;
944 		break;
945 	}
946 
947 	if (baco_reset)
948 		return AMD_RESET_METHOD_BACO;
949 	else
950 		return AMD_RESET_METHOD_LEGACY;
951 }
952 
953 /**
954  * vi_asic_reset - soft reset GPU
955  *
956  * @adev: amdgpu_device pointer
957  *
958  * Look up which blocks are hung and attempt
959  * to reset them.
960  * Returns 0 for success.
961  */
962 static int vi_asic_reset(struct amdgpu_device *adev)
963 {
964 	int r;
965 
966 	/* APUs don't have full asic reset */
967 	if (adev->flags & AMD_IS_APU)
968 		return 0;
969 
970 	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
971 		dev_info(adev->dev, "BACO reset\n");
972 		r = amdgpu_dpm_baco_reset(adev);
973 	} else {
974 		dev_info(adev->dev, "PCI CONFIG reset\n");
975 		r = vi_asic_pci_config_reset(adev);
976 	}
977 
978 	return r;
979 }
980 
981 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
982 {
983 	return RREG32(mmCONFIG_MEMSIZE);
984 }
985 
986 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
987 			u32 cntl_reg, u32 status_reg)
988 {
989 	int r, i;
990 	struct atom_clock_dividers dividers;
991 	uint32_t tmp;
992 
993 	r = amdgpu_atombios_get_clock_dividers(adev,
994 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
995 					       clock, false, &dividers);
996 	if (r)
997 		return r;
998 
999 	tmp = RREG32_SMC(cntl_reg);
1000 
1001 	if (adev->flags & AMD_IS_APU)
1002 		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
1003 	else
1004 		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1005 				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1006 	tmp |= dividers.post_divider;
1007 	WREG32_SMC(cntl_reg, tmp);
1008 
1009 	for (i = 0; i < 100; i++) {
1010 		tmp = RREG32_SMC(status_reg);
1011 		if (adev->flags & AMD_IS_APU) {
1012 			if (tmp & 0x10000)
1013 				break;
1014 		} else {
1015 			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1016 				break;
1017 		}
1018 		mdelay(10);
1019 	}
1020 	if (i == 100)
1021 		return -ETIMEDOUT;
1022 	return 0;
1023 }
1024 
1025 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
1026 #define ixGNB_CLK1_STATUS   0xD822010C
1027 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
1028 #define ixGNB_CLK2_STATUS   0xD822012C
1029 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
1030 #define ixGNB_CLK3_STATUS   0xD822014C
1031 
1032 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1033 {
1034 	int r;
1035 
1036 	if (adev->flags & AMD_IS_APU) {
1037 		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
1038 		if (r)
1039 			return r;
1040 
1041 		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
1042 		if (r)
1043 			return r;
1044 	} else {
1045 		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1046 		if (r)
1047 			return r;
1048 
1049 		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1050 		if (r)
1051 			return r;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1058 {
1059 	int r, i;
1060 	struct atom_clock_dividers dividers;
1061 	u32 tmp;
1062 	u32 reg_ctrl;
1063 	u32 reg_status;
1064 	u32 status_mask;
1065 	u32 reg_mask;
1066 
1067 	if (adev->flags & AMD_IS_APU) {
1068 		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
1069 		reg_status = ixGNB_CLK3_STATUS;
1070 		status_mask = 0x00010000;
1071 		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1072 	} else {
1073 		reg_ctrl = ixCG_ECLK_CNTL;
1074 		reg_status = ixCG_ECLK_STATUS;
1075 		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
1076 		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1077 	}
1078 
1079 	r = amdgpu_atombios_get_clock_dividers(adev,
1080 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1081 					       ecclk, false, &dividers);
1082 	if (r)
1083 		return r;
1084 
1085 	for (i = 0; i < 100; i++) {
1086 		if (RREG32_SMC(reg_status) & status_mask)
1087 			break;
1088 		mdelay(10);
1089 	}
1090 
1091 	if (i == 100)
1092 		return -ETIMEDOUT;
1093 
1094 	tmp = RREG32_SMC(reg_ctrl);
1095 	tmp &= ~reg_mask;
1096 	tmp |= dividers.post_divider;
1097 	WREG32_SMC(reg_ctrl, tmp);
1098 
1099 	for (i = 0; i < 100; i++) {
1100 		if (RREG32_SMC(reg_status) & status_mask)
1101 			break;
1102 		mdelay(10);
1103 	}
1104 
1105 	if (i == 100)
1106 		return -ETIMEDOUT;
1107 
1108 	return 0;
1109 }
1110 
1111 static void vi_enable_aspm(struct amdgpu_device *adev)
1112 {
1113 	u32 data, orig;
1114 
1115 	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1116 	data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
1117 			PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
1118 	data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
1119 			PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
1120 	data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1121 	data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
1122 	if (orig != data)
1123 		WREG32_PCIE(ixPCIE_LC_CNTL, data);
1124 }
1125 
1126 static void vi_program_aspm(struct amdgpu_device *adev)
1127 {
1128 	u32 data, data1, orig;
1129 	bool bL1SS = false;
1130 	bool bClkReqSupport = true;
1131 
1132 	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
1133 		return;
1134 
1135 	if (adev->flags & AMD_IS_APU ||
1136 	    adev->asic_type < CHIP_POLARIS10)
1137 		return;
1138 
1139 	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1140 	data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
1141 	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1142 	data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1143 	if (orig != data)
1144 		WREG32_PCIE(ixPCIE_LC_CNTL, data);
1145 
1146 	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1147 	data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
1148 	data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
1149 	data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
1150 	if (orig != data)
1151 		WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
1152 
1153 	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
1154 	data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
1155 	if (orig != data)
1156 		WREG32_PCIE(ixPCIE_LC_CNTL3, data);
1157 
1158 	orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
1159 	data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
1160 	if (orig != data)
1161 		WREG32_PCIE(ixPCIE_P_CNTL, data);
1162 
1163 	data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
1164 	pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
1165 	if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
1166 	    (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
1167 		    PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
1168 			PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
1169 			PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
1170 		bL1SS = true;
1171 	} else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
1172 	    PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
1173 	    PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
1174 	    PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
1175 		bL1SS = true;
1176 	}
1177 
1178 	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
1179 	data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
1180 	if (orig != data)
1181 		WREG32_PCIE(ixPCIE_LC_CNTL6, data);
1182 
1183 	orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
1184 	data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
1185 	if (orig != data)
1186 		WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
1187 
1188 	pci_read_config_dword(adev->pdev, LINK_CAP, &data);
1189 	if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
1190 		bClkReqSupport = false;
1191 
1192 	if (bClkReqSupport) {
1193 		orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
1194 		data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
1195 		data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
1196 				(1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
1197 		if (orig != data)
1198 			WREG32_SMC(ixTHM_CLK_CNTL, data);
1199 
1200 		orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
1201 		data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
1202 			MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
1203 		data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
1204 				(1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
1205 		data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
1206 		if (orig != data)
1207 			WREG32_SMC(ixMISC_CLK_CTRL, data);
1208 
1209 		orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
1210 		data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
1211 		if (orig != data)
1212 			WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1213 
1214 		orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
1215 		data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
1216 		if (orig != data)
1217 			WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1218 
1219 		orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
1220 		data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
1221 		data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
1222 		if (orig != data)
1223 			WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
1224 
1225 		orig = data = RREG32_PCIE(ixCPM_CONTROL);
1226 		data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
1227 				CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
1228 		if (orig != data)
1229 			WREG32_PCIE(ixCPM_CONTROL, data);
1230 
1231 		orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
1232 		data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
1233 		data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
1234 		if (orig != data)
1235 			WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
1236 
1237 		orig = data = RREG32(mmBIF_CLK_CTRL);
1238 		data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
1239 		if (orig != data)
1240 			WREG32(mmBIF_CLK_CTRL, data);
1241 
1242 		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
1243 		data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
1244 		if (orig != data)
1245 			WREG32_PCIE(ixPCIE_LC_CNTL7, data);
1246 
1247 		orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
1248 		data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
1249 		if (orig != data)
1250 			WREG32_PCIE(ixPCIE_HW_DEBUG, data);
1251 
1252 		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
1253 		data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
1254 		data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1255 		if (bL1SS)
1256 			data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1257 		if (orig != data)
1258 			WREG32_PCIE(ixPCIE_LC_CNTL2, data);
1259 
1260 	}
1261 
1262 	vi_enable_aspm(adev);
1263 
1264 	data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1265 	data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
1266 	if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
1267 	    data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
1268 	    data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
1269 		orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1270 		data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1271 		if (orig != data)
1272 			WREG32_PCIE(ixPCIE_LC_CNTL, data);
1273 	}
1274 
1275 	if ((adev->asic_type == CHIP_POLARIS12 &&
1276 	    !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
1277 	    ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
1278 		orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
1279 		data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
1280 		if (orig != data)
1281 			WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
1282 	}
1283 }
1284 
1285 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1286 					bool enable)
1287 {
1288 	u32 tmp;
1289 
1290 	/* not necessary on CZ */
1291 	if (adev->flags & AMD_IS_APU)
1292 		return;
1293 
1294 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1295 	if (enable)
1296 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1297 	else
1298 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1299 
1300 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1301 }
1302 
1303 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1304 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1305 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1306 
1307 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1308 {
1309 	if (adev->flags & AMD_IS_APU)
1310 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1311 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
1312 	else
1313 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1314 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1315 }
1316 
1317 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1318 {
1319 	if (!ring || !ring->funcs->emit_wreg) {
1320 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1321 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1322 	} else {
1323 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1324 	}
1325 }
1326 
1327 static void vi_invalidate_hdp(struct amdgpu_device *adev,
1328 			      struct amdgpu_ring *ring)
1329 {
1330 	if (!ring || !ring->funcs->emit_wreg) {
1331 		WREG32(mmHDP_DEBUG0, 1);
1332 		RREG32(mmHDP_DEBUG0);
1333 	} else {
1334 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1335 	}
1336 }
1337 
1338 static bool vi_need_full_reset(struct amdgpu_device *adev)
1339 {
1340 	switch (adev->asic_type) {
1341 	case CHIP_CARRIZO:
1342 	case CHIP_STONEY:
1343 		/* CZ has hang issues with full reset at the moment */
1344 		return false;
1345 	case CHIP_FIJI:
1346 	case CHIP_TONGA:
1347 		/* XXX: soft reset should work on fiji and tonga */
1348 		return true;
1349 	case CHIP_POLARIS10:
1350 	case CHIP_POLARIS11:
1351 	case CHIP_POLARIS12:
1352 	case CHIP_TOPAZ:
1353 	default:
1354 		/* change this when we support soft reset */
1355 		return true;
1356 	}
1357 }
1358 
1359 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1360 			      uint64_t *count1)
1361 {
1362 	uint32_t perfctr = 0;
1363 	uint64_t cnt0_of, cnt1_of;
1364 	int tmp;
1365 
1366 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1367 	 * that may or may not be different from their GPU counterparts
1368 	 */
1369 	if (adev->flags & AMD_IS_APU)
1370 		return;
1371 
1372 	/* Set the 2 events that we wish to watch, defined above */
1373 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1374 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1375 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1376 
1377 	/* Write to enable desired perf counters */
1378 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1379 	/* Zero out and enable the perf counters
1380 	 * Write 0x5:
1381 	 * Bit 0 = Start all counters(1)
1382 	 * Bit 2 = Global counter reset enable(1)
1383 	 */
1384 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1385 
1386 	msleep(1000);
1387 
1388 	/* Load the shadow and disable the perf counters
1389 	 * Write 0x2:
1390 	 * Bit 0 = Stop counters(0)
1391 	 * Bit 1 = Load the shadow counters(1)
1392 	 */
1393 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1394 
1395 	/* Read register values to get any >32bit overflow */
1396 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1397 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1398 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1399 
1400 	/* Get the values and add the overflow */
1401 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1402 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1403 }
1404 
1405 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1406 {
1407 	uint64_t nak_r, nak_g;
1408 
1409 	/* Get the number of NAKs received and generated */
1410 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1411 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1412 
1413 	/* Add the total number of NAKs, i.e the number of replays */
1414 	return (nak_r + nak_g);
1415 }
1416 
1417 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1418 {
1419 	u32 clock_cntl, pc;
1420 
1421 	if (adev->flags & AMD_IS_APU)
1422 		return false;
1423 
1424 	/* check if the SMC is already running */
1425 	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1426 	pc = RREG32_SMC(ixSMC_PC_C);
1427 	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1428 	    (0x20100 <= pc))
1429 		return true;
1430 
1431 	return false;
1432 }
1433 
1434 static void vi_pre_asic_init(struct amdgpu_device *adev)
1435 {
1436 }
1437 
1438 static const struct amdgpu_asic_funcs vi_asic_funcs =
1439 {
1440 	.read_disabled_bios = &vi_read_disabled_bios,
1441 	.read_bios_from_rom = &vi_read_bios_from_rom,
1442 	.read_register = &vi_read_register,
1443 	.reset = &vi_asic_reset,
1444 	.reset_method = &vi_asic_reset_method,
1445 	.set_vga_state = &vi_vga_set_state,
1446 	.get_xclk = &vi_get_xclk,
1447 	.set_uvd_clocks = &vi_set_uvd_clocks,
1448 	.set_vce_clocks = &vi_set_vce_clocks,
1449 	.get_config_memsize = &vi_get_config_memsize,
1450 	.flush_hdp = &vi_flush_hdp,
1451 	.invalidate_hdp = &vi_invalidate_hdp,
1452 	.need_full_reset = &vi_need_full_reset,
1453 	.init_doorbell_index = &legacy_doorbell_index_init,
1454 	.get_pcie_usage = &vi_get_pcie_usage,
1455 	.need_reset_on_init = &vi_need_reset_on_init,
1456 	.get_pcie_replay_count = &vi_get_pcie_replay_count,
1457 	.supports_baco = &vi_asic_supports_baco,
1458 	.pre_asic_init = &vi_pre_asic_init,
1459 	.query_video_codecs = &vi_query_video_codecs,
1460 };
1461 
1462 #define CZ_REV_BRISTOL(rev)	 \
1463 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1464 
1465 static int vi_common_early_init(void *handle)
1466 {
1467 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1468 
1469 	if (adev->flags & AMD_IS_APU) {
1470 		adev->smc_rreg = &cz_smc_rreg;
1471 		adev->smc_wreg = &cz_smc_wreg;
1472 	} else {
1473 		adev->smc_rreg = &vi_smc_rreg;
1474 		adev->smc_wreg = &vi_smc_wreg;
1475 	}
1476 	adev->pcie_rreg = &vi_pcie_rreg;
1477 	adev->pcie_wreg = &vi_pcie_wreg;
1478 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1479 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1480 	adev->didt_rreg = &vi_didt_rreg;
1481 	adev->didt_wreg = &vi_didt_wreg;
1482 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
1483 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
1484 
1485 	adev->asic_funcs = &vi_asic_funcs;
1486 
1487 	adev->rev_id = vi_get_rev_id(adev);
1488 	adev->external_rev_id = 0xFF;
1489 	switch (adev->asic_type) {
1490 	case CHIP_TOPAZ:
1491 		adev->cg_flags = 0;
1492 		adev->pg_flags = 0;
1493 		adev->external_rev_id = 0x1;
1494 		break;
1495 	case CHIP_FIJI:
1496 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1497 			AMD_CG_SUPPORT_GFX_MGLS |
1498 			AMD_CG_SUPPORT_GFX_RLC_LS |
1499 			AMD_CG_SUPPORT_GFX_CP_LS |
1500 			AMD_CG_SUPPORT_GFX_CGTS |
1501 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1502 			AMD_CG_SUPPORT_GFX_CGCG |
1503 			AMD_CG_SUPPORT_GFX_CGLS |
1504 			AMD_CG_SUPPORT_SDMA_MGCG |
1505 			AMD_CG_SUPPORT_SDMA_LS |
1506 			AMD_CG_SUPPORT_BIF_LS |
1507 			AMD_CG_SUPPORT_HDP_MGCG |
1508 			AMD_CG_SUPPORT_HDP_LS |
1509 			AMD_CG_SUPPORT_ROM_MGCG |
1510 			AMD_CG_SUPPORT_MC_MGCG |
1511 			AMD_CG_SUPPORT_MC_LS |
1512 			AMD_CG_SUPPORT_UVD_MGCG;
1513 		adev->pg_flags = 0;
1514 		adev->external_rev_id = adev->rev_id + 0x3c;
1515 		break;
1516 	case CHIP_TONGA:
1517 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1518 			AMD_CG_SUPPORT_GFX_CGCG |
1519 			AMD_CG_SUPPORT_GFX_CGLS |
1520 			AMD_CG_SUPPORT_SDMA_MGCG |
1521 			AMD_CG_SUPPORT_SDMA_LS |
1522 			AMD_CG_SUPPORT_BIF_LS |
1523 			AMD_CG_SUPPORT_HDP_MGCG |
1524 			AMD_CG_SUPPORT_HDP_LS |
1525 			AMD_CG_SUPPORT_ROM_MGCG |
1526 			AMD_CG_SUPPORT_MC_MGCG |
1527 			AMD_CG_SUPPORT_MC_LS |
1528 			AMD_CG_SUPPORT_DRM_LS |
1529 			AMD_CG_SUPPORT_UVD_MGCG;
1530 		adev->pg_flags = 0;
1531 		adev->external_rev_id = adev->rev_id + 0x14;
1532 		break;
1533 	case CHIP_POLARIS11:
1534 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1535 			AMD_CG_SUPPORT_GFX_RLC_LS |
1536 			AMD_CG_SUPPORT_GFX_CP_LS |
1537 			AMD_CG_SUPPORT_GFX_CGCG |
1538 			AMD_CG_SUPPORT_GFX_CGLS |
1539 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1540 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1541 			AMD_CG_SUPPORT_SDMA_MGCG |
1542 			AMD_CG_SUPPORT_SDMA_LS |
1543 			AMD_CG_SUPPORT_BIF_MGCG |
1544 			AMD_CG_SUPPORT_BIF_LS |
1545 			AMD_CG_SUPPORT_HDP_MGCG |
1546 			AMD_CG_SUPPORT_HDP_LS |
1547 			AMD_CG_SUPPORT_ROM_MGCG |
1548 			AMD_CG_SUPPORT_MC_MGCG |
1549 			AMD_CG_SUPPORT_MC_LS |
1550 			AMD_CG_SUPPORT_DRM_LS |
1551 			AMD_CG_SUPPORT_UVD_MGCG |
1552 			AMD_CG_SUPPORT_VCE_MGCG;
1553 		adev->pg_flags = 0;
1554 		adev->external_rev_id = adev->rev_id + 0x5A;
1555 		break;
1556 	case CHIP_POLARIS10:
1557 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1558 			AMD_CG_SUPPORT_GFX_RLC_LS |
1559 			AMD_CG_SUPPORT_GFX_CP_LS |
1560 			AMD_CG_SUPPORT_GFX_CGCG |
1561 			AMD_CG_SUPPORT_GFX_CGLS |
1562 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1563 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1564 			AMD_CG_SUPPORT_SDMA_MGCG |
1565 			AMD_CG_SUPPORT_SDMA_LS |
1566 			AMD_CG_SUPPORT_BIF_MGCG |
1567 			AMD_CG_SUPPORT_BIF_LS |
1568 			AMD_CG_SUPPORT_HDP_MGCG |
1569 			AMD_CG_SUPPORT_HDP_LS |
1570 			AMD_CG_SUPPORT_ROM_MGCG |
1571 			AMD_CG_SUPPORT_MC_MGCG |
1572 			AMD_CG_SUPPORT_MC_LS |
1573 			AMD_CG_SUPPORT_DRM_LS |
1574 			AMD_CG_SUPPORT_UVD_MGCG |
1575 			AMD_CG_SUPPORT_VCE_MGCG;
1576 		adev->pg_flags = 0;
1577 		adev->external_rev_id = adev->rev_id + 0x50;
1578 		break;
1579 	case CHIP_POLARIS12:
1580 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1581 			AMD_CG_SUPPORT_GFX_RLC_LS |
1582 			AMD_CG_SUPPORT_GFX_CP_LS |
1583 			AMD_CG_SUPPORT_GFX_CGCG |
1584 			AMD_CG_SUPPORT_GFX_CGLS |
1585 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1586 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1587 			AMD_CG_SUPPORT_SDMA_MGCG |
1588 			AMD_CG_SUPPORT_SDMA_LS |
1589 			AMD_CG_SUPPORT_BIF_MGCG |
1590 			AMD_CG_SUPPORT_BIF_LS |
1591 			AMD_CG_SUPPORT_HDP_MGCG |
1592 			AMD_CG_SUPPORT_HDP_LS |
1593 			AMD_CG_SUPPORT_ROM_MGCG |
1594 			AMD_CG_SUPPORT_MC_MGCG |
1595 			AMD_CG_SUPPORT_MC_LS |
1596 			AMD_CG_SUPPORT_DRM_LS |
1597 			AMD_CG_SUPPORT_UVD_MGCG |
1598 			AMD_CG_SUPPORT_VCE_MGCG;
1599 		adev->pg_flags = 0;
1600 		adev->external_rev_id = adev->rev_id + 0x64;
1601 		break;
1602 	case CHIP_VEGAM:
1603 		adev->cg_flags = 0;
1604 			/*AMD_CG_SUPPORT_GFX_MGCG |
1605 			AMD_CG_SUPPORT_GFX_RLC_LS |
1606 			AMD_CG_SUPPORT_GFX_CP_LS |
1607 			AMD_CG_SUPPORT_GFX_CGCG |
1608 			AMD_CG_SUPPORT_GFX_CGLS |
1609 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1610 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1611 			AMD_CG_SUPPORT_SDMA_MGCG |
1612 			AMD_CG_SUPPORT_SDMA_LS |
1613 			AMD_CG_SUPPORT_BIF_MGCG |
1614 			AMD_CG_SUPPORT_BIF_LS |
1615 			AMD_CG_SUPPORT_HDP_MGCG |
1616 			AMD_CG_SUPPORT_HDP_LS |
1617 			AMD_CG_SUPPORT_ROM_MGCG |
1618 			AMD_CG_SUPPORT_MC_MGCG |
1619 			AMD_CG_SUPPORT_MC_LS |
1620 			AMD_CG_SUPPORT_DRM_LS |
1621 			AMD_CG_SUPPORT_UVD_MGCG |
1622 			AMD_CG_SUPPORT_VCE_MGCG;*/
1623 		adev->pg_flags = 0;
1624 		adev->external_rev_id = adev->rev_id + 0x6E;
1625 		break;
1626 	case CHIP_CARRIZO:
1627 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1628 			AMD_CG_SUPPORT_GFX_MGCG |
1629 			AMD_CG_SUPPORT_GFX_MGLS |
1630 			AMD_CG_SUPPORT_GFX_RLC_LS |
1631 			AMD_CG_SUPPORT_GFX_CP_LS |
1632 			AMD_CG_SUPPORT_GFX_CGTS |
1633 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1634 			AMD_CG_SUPPORT_GFX_CGCG |
1635 			AMD_CG_SUPPORT_GFX_CGLS |
1636 			AMD_CG_SUPPORT_BIF_LS |
1637 			AMD_CG_SUPPORT_HDP_MGCG |
1638 			AMD_CG_SUPPORT_HDP_LS |
1639 			AMD_CG_SUPPORT_SDMA_MGCG |
1640 			AMD_CG_SUPPORT_SDMA_LS |
1641 			AMD_CG_SUPPORT_VCE_MGCG;
1642 		/* rev0 hardware requires workarounds to support PG */
1643 		adev->pg_flags = 0;
1644 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1645 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1646 				AMD_PG_SUPPORT_GFX_PIPELINE |
1647 				AMD_PG_SUPPORT_CP |
1648 				AMD_PG_SUPPORT_UVD |
1649 				AMD_PG_SUPPORT_VCE;
1650 		}
1651 		adev->external_rev_id = adev->rev_id + 0x1;
1652 		break;
1653 	case CHIP_STONEY:
1654 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1655 			AMD_CG_SUPPORT_GFX_MGCG |
1656 			AMD_CG_SUPPORT_GFX_MGLS |
1657 			AMD_CG_SUPPORT_GFX_RLC_LS |
1658 			AMD_CG_SUPPORT_GFX_CP_LS |
1659 			AMD_CG_SUPPORT_GFX_CGTS |
1660 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1661 			AMD_CG_SUPPORT_GFX_CGLS |
1662 			AMD_CG_SUPPORT_BIF_LS |
1663 			AMD_CG_SUPPORT_HDP_MGCG |
1664 			AMD_CG_SUPPORT_HDP_LS |
1665 			AMD_CG_SUPPORT_SDMA_MGCG |
1666 			AMD_CG_SUPPORT_SDMA_LS |
1667 			AMD_CG_SUPPORT_VCE_MGCG;
1668 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1669 			AMD_PG_SUPPORT_GFX_SMG |
1670 			AMD_PG_SUPPORT_GFX_PIPELINE |
1671 			AMD_PG_SUPPORT_CP |
1672 			AMD_PG_SUPPORT_UVD |
1673 			AMD_PG_SUPPORT_VCE;
1674 		adev->external_rev_id = adev->rev_id + 0x61;
1675 		break;
1676 	default:
1677 		/* FIXME: not supported yet */
1678 		return -EINVAL;
1679 	}
1680 
1681 	if (amdgpu_sriov_vf(adev)) {
1682 		amdgpu_virt_init_setting(adev);
1683 		xgpu_vi_mailbox_set_irq_funcs(adev);
1684 	}
1685 
1686 	return 0;
1687 }
1688 
1689 static int vi_common_late_init(void *handle)
1690 {
1691 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1692 
1693 	if (amdgpu_sriov_vf(adev))
1694 		xgpu_vi_mailbox_get_irq(adev);
1695 
1696 	return 0;
1697 }
1698 
1699 static int vi_common_sw_init(void *handle)
1700 {
1701 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1702 
1703 	if (amdgpu_sriov_vf(adev))
1704 		xgpu_vi_mailbox_add_irq_id(adev);
1705 
1706 	return 0;
1707 }
1708 
1709 static int vi_common_sw_fini(void *handle)
1710 {
1711 	return 0;
1712 }
1713 
1714 static int vi_common_hw_init(void *handle)
1715 {
1716 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1717 
1718 	/* move the golden regs per IP block */
1719 	vi_init_golden_registers(adev);
1720 	/* enable aspm */
1721 	vi_program_aspm(adev);
1722 	/* enable the doorbell aperture */
1723 	vi_enable_doorbell_aperture(adev, true);
1724 
1725 	return 0;
1726 }
1727 
1728 static int vi_common_hw_fini(void *handle)
1729 {
1730 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1731 
1732 	/* enable the doorbell aperture */
1733 	vi_enable_doorbell_aperture(adev, false);
1734 
1735 	if (amdgpu_sriov_vf(adev))
1736 		xgpu_vi_mailbox_put_irq(adev);
1737 
1738 	return 0;
1739 }
1740 
1741 static int vi_common_suspend(void *handle)
1742 {
1743 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1744 
1745 	return vi_common_hw_fini(adev);
1746 }
1747 
1748 static int vi_common_resume(void *handle)
1749 {
1750 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1751 
1752 	return vi_common_hw_init(adev);
1753 }
1754 
1755 static bool vi_common_is_idle(void *handle)
1756 {
1757 	return true;
1758 }
1759 
1760 static int vi_common_wait_for_idle(void *handle)
1761 {
1762 	return 0;
1763 }
1764 
1765 static int vi_common_soft_reset(void *handle)
1766 {
1767 	return 0;
1768 }
1769 
1770 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1771 						   bool enable)
1772 {
1773 	uint32_t temp, data;
1774 
1775 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1776 
1777 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1778 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1779 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1780 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1781 	else
1782 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1783 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1784 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1785 
1786 	if (temp != data)
1787 		WREG32_PCIE(ixPCIE_CNTL2, data);
1788 }
1789 
1790 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1791 						    bool enable)
1792 {
1793 	uint32_t temp, data;
1794 
1795 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1796 
1797 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1798 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1799 	else
1800 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1801 
1802 	if (temp != data)
1803 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1804 }
1805 
1806 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1807 				      bool enable)
1808 {
1809 	uint32_t temp, data;
1810 
1811 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1812 
1813 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1814 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1815 	else
1816 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1817 
1818 	if (temp != data)
1819 		WREG32(mmHDP_MEM_POWER_LS, data);
1820 }
1821 
1822 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1823 				      bool enable)
1824 {
1825 	uint32_t temp, data;
1826 
1827 	temp = data = RREG32(0x157a);
1828 
1829 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1830 		data |= 1;
1831 	else
1832 		data &= ~1;
1833 
1834 	if (temp != data)
1835 		WREG32(0x157a, data);
1836 }
1837 
1838 
1839 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1840 						    bool enable)
1841 {
1842 	uint32_t temp, data;
1843 
1844 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1845 
1846 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1847 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1848 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1849 	else
1850 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1851 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1852 
1853 	if (temp != data)
1854 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1855 }
1856 
1857 static int vi_common_set_clockgating_state_by_smu(void *handle,
1858 					   enum amd_clockgating_state state)
1859 {
1860 	uint32_t msg_id, pp_state = 0;
1861 	uint32_t pp_support_state = 0;
1862 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1863 
1864 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1865 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1866 			pp_support_state = PP_STATE_SUPPORT_LS;
1867 			pp_state = PP_STATE_LS;
1868 		}
1869 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1870 			pp_support_state |= PP_STATE_SUPPORT_CG;
1871 			pp_state |= PP_STATE_CG;
1872 		}
1873 		if (state == AMD_CG_STATE_UNGATE)
1874 			pp_state = 0;
1875 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1876 			       PP_BLOCK_SYS_MC,
1877 			       pp_support_state,
1878 			       pp_state);
1879 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1880 	}
1881 
1882 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1883 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1884 			pp_support_state = PP_STATE_SUPPORT_LS;
1885 			pp_state = PP_STATE_LS;
1886 		}
1887 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1888 			pp_support_state |= PP_STATE_SUPPORT_CG;
1889 			pp_state |= PP_STATE_CG;
1890 		}
1891 		if (state == AMD_CG_STATE_UNGATE)
1892 			pp_state = 0;
1893 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1894 			       PP_BLOCK_SYS_SDMA,
1895 			       pp_support_state,
1896 			       pp_state);
1897 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1898 	}
1899 
1900 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1901 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1902 			pp_support_state = PP_STATE_SUPPORT_LS;
1903 			pp_state = PP_STATE_LS;
1904 		}
1905 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1906 			pp_support_state |= PP_STATE_SUPPORT_CG;
1907 			pp_state |= PP_STATE_CG;
1908 		}
1909 		if (state == AMD_CG_STATE_UNGATE)
1910 			pp_state = 0;
1911 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1912 			       PP_BLOCK_SYS_HDP,
1913 			       pp_support_state,
1914 			       pp_state);
1915 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1916 	}
1917 
1918 
1919 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1920 		if (state == AMD_CG_STATE_UNGATE)
1921 			pp_state = 0;
1922 		else
1923 			pp_state = PP_STATE_LS;
1924 
1925 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1926 			       PP_BLOCK_SYS_BIF,
1927 			       PP_STATE_SUPPORT_LS,
1928 			        pp_state);
1929 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1930 	}
1931 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1932 		if (state == AMD_CG_STATE_UNGATE)
1933 			pp_state = 0;
1934 		else
1935 			pp_state = PP_STATE_CG;
1936 
1937 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1938 			       PP_BLOCK_SYS_BIF,
1939 			       PP_STATE_SUPPORT_CG,
1940 			       pp_state);
1941 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1942 	}
1943 
1944 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1945 
1946 		if (state == AMD_CG_STATE_UNGATE)
1947 			pp_state = 0;
1948 		else
1949 			pp_state = PP_STATE_LS;
1950 
1951 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1952 			       PP_BLOCK_SYS_DRM,
1953 			       PP_STATE_SUPPORT_LS,
1954 			       pp_state);
1955 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1956 	}
1957 
1958 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1959 
1960 		if (state == AMD_CG_STATE_UNGATE)
1961 			pp_state = 0;
1962 		else
1963 			pp_state = PP_STATE_CG;
1964 
1965 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1966 			       PP_BLOCK_SYS_ROM,
1967 			       PP_STATE_SUPPORT_CG,
1968 			       pp_state);
1969 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1970 	}
1971 	return 0;
1972 }
1973 
1974 static int vi_common_set_clockgating_state(void *handle,
1975 					   enum amd_clockgating_state state)
1976 {
1977 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1978 
1979 	if (amdgpu_sriov_vf(adev))
1980 		return 0;
1981 
1982 	switch (adev->asic_type) {
1983 	case CHIP_FIJI:
1984 		vi_update_bif_medium_grain_light_sleep(adev,
1985 				state == AMD_CG_STATE_GATE);
1986 		vi_update_hdp_medium_grain_clock_gating(adev,
1987 				state == AMD_CG_STATE_GATE);
1988 		vi_update_hdp_light_sleep(adev,
1989 				state == AMD_CG_STATE_GATE);
1990 		vi_update_rom_medium_grain_clock_gating(adev,
1991 				state == AMD_CG_STATE_GATE);
1992 		break;
1993 	case CHIP_CARRIZO:
1994 	case CHIP_STONEY:
1995 		vi_update_bif_medium_grain_light_sleep(adev,
1996 				state == AMD_CG_STATE_GATE);
1997 		vi_update_hdp_medium_grain_clock_gating(adev,
1998 				state == AMD_CG_STATE_GATE);
1999 		vi_update_hdp_light_sleep(adev,
2000 				state == AMD_CG_STATE_GATE);
2001 		vi_update_drm_light_sleep(adev,
2002 				state == AMD_CG_STATE_GATE);
2003 		break;
2004 	case CHIP_TONGA:
2005 	case CHIP_POLARIS10:
2006 	case CHIP_POLARIS11:
2007 	case CHIP_POLARIS12:
2008 	case CHIP_VEGAM:
2009 		vi_common_set_clockgating_state_by_smu(adev, state);
2010 		break;
2011 	default:
2012 		break;
2013 	}
2014 	return 0;
2015 }
2016 
2017 static int vi_common_set_powergating_state(void *handle,
2018 					    enum amd_powergating_state state)
2019 {
2020 	return 0;
2021 }
2022 
2023 static void vi_common_get_clockgating_state(void *handle, u64 *flags)
2024 {
2025 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2026 	int data;
2027 
2028 	if (amdgpu_sriov_vf(adev))
2029 		*flags = 0;
2030 
2031 	/* AMD_CG_SUPPORT_BIF_LS */
2032 	data = RREG32_PCIE(ixPCIE_CNTL2);
2033 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
2034 		*flags |= AMD_CG_SUPPORT_BIF_LS;
2035 
2036 	/* AMD_CG_SUPPORT_HDP_LS */
2037 	data = RREG32(mmHDP_MEM_POWER_LS);
2038 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
2039 		*flags |= AMD_CG_SUPPORT_HDP_LS;
2040 
2041 	/* AMD_CG_SUPPORT_HDP_MGCG */
2042 	data = RREG32(mmHDP_HOST_PATH_CNTL);
2043 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
2044 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
2045 
2046 	/* AMD_CG_SUPPORT_ROM_MGCG */
2047 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
2048 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
2049 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
2050 }
2051 
2052 static const struct amd_ip_funcs vi_common_ip_funcs = {
2053 	.name = "vi_common",
2054 	.early_init = vi_common_early_init,
2055 	.late_init = vi_common_late_init,
2056 	.sw_init = vi_common_sw_init,
2057 	.sw_fini = vi_common_sw_fini,
2058 	.hw_init = vi_common_hw_init,
2059 	.hw_fini = vi_common_hw_fini,
2060 	.suspend = vi_common_suspend,
2061 	.resume = vi_common_resume,
2062 	.is_idle = vi_common_is_idle,
2063 	.wait_for_idle = vi_common_wait_for_idle,
2064 	.soft_reset = vi_common_soft_reset,
2065 	.set_clockgating_state = vi_common_set_clockgating_state,
2066 	.set_powergating_state = vi_common_set_powergating_state,
2067 	.get_clockgating_state = vi_common_get_clockgating_state,
2068 };
2069 
2070 static const struct amdgpu_ip_block_version vi_common_ip_block =
2071 {
2072 	.type = AMD_IP_BLOCK_TYPE_COMMON,
2073 	.major = 1,
2074 	.minor = 0,
2075 	.rev = 0,
2076 	.funcs = &vi_common_ip_funcs,
2077 };
2078 
2079 void vi_set_virt_ops(struct amdgpu_device *adev)
2080 {
2081 	adev->virt.ops = &xgpu_vi_virt_ops;
2082 }
2083 
2084 int vi_set_ip_blocks(struct amdgpu_device *adev)
2085 {
2086 	amdgpu_device_set_sriov_virtual_display(adev);
2087 
2088 	switch (adev->asic_type) {
2089 	case CHIP_TOPAZ:
2090 		/* topaz has no DCE, UVD, VCE */
2091 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2092 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
2093 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
2094 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2095 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
2096 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2097 		if (adev->enable_virtual_display)
2098 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2099 		break;
2100 	case CHIP_FIJI:
2101 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2102 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
2103 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2104 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2105 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2106 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2107 		if (adev->enable_virtual_display)
2108 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2109 #if defined(CONFIG_DRM_AMD_DC)
2110 		else if (amdgpu_device_has_dc_support(adev))
2111 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2112 #endif
2113 		else
2114 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
2115 		if (!amdgpu_sriov_vf(adev)) {
2116 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2117 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2118 		}
2119 		break;
2120 	case CHIP_TONGA:
2121 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2122 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2123 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2124 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2125 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2126 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2127 		if (adev->enable_virtual_display)
2128 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2129 #if defined(CONFIG_DRM_AMD_DC)
2130 		else if (amdgpu_device_has_dc_support(adev))
2131 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2132 #endif
2133 		else
2134 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
2135 		if (!amdgpu_sriov_vf(adev)) {
2136 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
2137 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2138 		}
2139 		break;
2140 	case CHIP_POLARIS10:
2141 	case CHIP_POLARIS11:
2142 	case CHIP_POLARIS12:
2143 	case CHIP_VEGAM:
2144 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2145 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
2146 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2147 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2148 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
2149 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2150 		if (adev->enable_virtual_display)
2151 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2152 #if defined(CONFIG_DRM_AMD_DC)
2153 		else if (amdgpu_device_has_dc_support(adev))
2154 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2155 #endif
2156 		else
2157 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
2158 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
2159 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2160 		break;
2161 	case CHIP_CARRIZO:
2162 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2163 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2164 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2165 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2166 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2167 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2168 		if (adev->enable_virtual_display)
2169 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2170 #if defined(CONFIG_DRM_AMD_DC)
2171 		else if (amdgpu_device_has_dc_support(adev))
2172 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2173 #endif
2174 		else
2175 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2176 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2177 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
2178 #if defined(CONFIG_DRM_AMD_ACP)
2179 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
2180 #endif
2181 		break;
2182 	case CHIP_STONEY:
2183 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2184 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2185 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2186 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
2187 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2188 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2189 		if (adev->enable_virtual_display)
2190 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2191 #if defined(CONFIG_DRM_AMD_DC)
2192 		else if (amdgpu_device_has_dc_support(adev))
2193 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2194 #endif
2195 		else
2196 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2197 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
2198 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2199 #if defined(CONFIG_DRM_AMD_ACP)
2200 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
2201 #endif
2202 		break;
2203 	default:
2204 		/* FIXME: not supported yet */
2205 		return -EINVAL;
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 void legacy_doorbell_index_init(struct amdgpu_device *adev)
2212 {
2213 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
2214 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
2215 	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
2216 	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
2217 	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
2218 	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
2219 	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
2220 	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
2221 	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
2222 	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
2223 	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
2224 	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
2225 	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
2226 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
2227 }
2228