xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision a8fe58ce)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "vi_dpm.h"
63 #include "gmc_v8_0.h"
64 #include "gmc_v7_0.h"
65 #include "gfx_v8_0.h"
66 #include "sdma_v2_4.h"
67 #include "sdma_v3_0.h"
68 #include "dce_v10_0.h"
69 #include "dce_v11_0.h"
70 #include "iceland_ih.h"
71 #include "tonga_ih.h"
72 #include "cz_ih.h"
73 #include "uvd_v5_0.h"
74 #include "uvd_v6_0.h"
75 #include "vce_v3_0.h"
76 #include "amdgpu_powerplay.h"
77 #if defined(CONFIG_DRM_AMD_ACP)
78 #include "amdgpu_acp.h"
79 #endif
80 
81 /*
82  * Indirect registers accessor
83  */
84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
85 {
86 	unsigned long flags;
87 	u32 r;
88 
89 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
90 	WREG32(mmPCIE_INDEX, reg);
91 	(void)RREG32(mmPCIE_INDEX);
92 	r = RREG32(mmPCIE_DATA);
93 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
94 	return r;
95 }
96 
97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 	WREG32(mmPCIE_INDEX, reg);
103 	(void)RREG32(mmPCIE_INDEX);
104 	WREG32(mmPCIE_DATA, v);
105 	(void)RREG32(mmPCIE_DATA);
106 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
107 }
108 
109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
110 {
111 	unsigned long flags;
112 	u32 r;
113 
114 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
115 	WREG32(mmSMC_IND_INDEX_0, (reg));
116 	r = RREG32(mmSMC_IND_DATA_0);
117 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
118 	return r;
119 }
120 
121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 {
123 	unsigned long flags;
124 
125 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
126 	WREG32(mmSMC_IND_INDEX_0, (reg));
127 	WREG32(mmSMC_IND_DATA_0, (v));
128 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
129 }
130 
131 /* smu_8_0_d.h */
132 #define mmMP0PUB_IND_INDEX                                                      0x180
133 #define mmMP0PUB_IND_DATA                                                       0x181
134 
135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
136 {
137 	unsigned long flags;
138 	u32 r;
139 
140 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
141 	WREG32(mmMP0PUB_IND_INDEX, (reg));
142 	r = RREG32(mmMP0PUB_IND_DATA);
143 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
144 	return r;
145 }
146 
147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
148 {
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
152 	WREG32(mmMP0PUB_IND_INDEX, (reg));
153 	WREG32(mmMP0PUB_IND_DATA, (v));
154 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
155 }
156 
157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
158 {
159 	unsigned long flags;
160 	u32 r;
161 
162 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
164 	r = RREG32(mmUVD_CTX_DATA);
165 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
166 	return r;
167 }
168 
169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170 {
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
174 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
175 	WREG32(mmUVD_CTX_DATA, (v));
176 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
177 }
178 
179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
180 {
181 	unsigned long flags;
182 	u32 r;
183 
184 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
185 	WREG32(mmDIDT_IND_INDEX, (reg));
186 	r = RREG32(mmDIDT_IND_DATA);
187 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
188 	return r;
189 }
190 
191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
196 	WREG32(mmDIDT_IND_INDEX, (reg));
197 	WREG32(mmDIDT_IND_DATA, (v));
198 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
199 }
200 
201 static const u32 tonga_mgcg_cgcg_init[] =
202 {
203 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
204 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
205 	mmPCIE_DATA, 0x000f0000, 0x00000000,
206 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
207 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
208 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
209 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
210 };
211 
212 static const u32 fiji_mgcg_cgcg_init[] =
213 {
214 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
215 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
216 	mmPCIE_DATA, 0x000f0000, 0x00000000,
217 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
218 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
219 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
220 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
221 };
222 
223 static const u32 iceland_mgcg_cgcg_init[] =
224 {
225 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
226 	mmPCIE_DATA, 0x000f0000, 0x00000000,
227 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
228 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
229 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
230 };
231 
232 static const u32 cz_mgcg_cgcg_init[] =
233 {
234 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
235 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
236 	mmPCIE_DATA, 0x000f0000, 0x00000000,
237 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
238 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
239 };
240 
241 static const u32 stoney_mgcg_cgcg_init[] =
242 {
243 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
244 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
245 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
246 };
247 
248 static void vi_init_golden_registers(struct amdgpu_device *adev)
249 {
250 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
251 	mutex_lock(&adev->grbm_idx_mutex);
252 
253 	switch (adev->asic_type) {
254 	case CHIP_TOPAZ:
255 		amdgpu_program_register_sequence(adev,
256 						 iceland_mgcg_cgcg_init,
257 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
258 		break;
259 	case CHIP_FIJI:
260 		amdgpu_program_register_sequence(adev,
261 						 fiji_mgcg_cgcg_init,
262 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
263 		break;
264 	case CHIP_TONGA:
265 		amdgpu_program_register_sequence(adev,
266 						 tonga_mgcg_cgcg_init,
267 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
268 		break;
269 	case CHIP_CARRIZO:
270 		amdgpu_program_register_sequence(adev,
271 						 cz_mgcg_cgcg_init,
272 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
273 		break;
274 	case CHIP_STONEY:
275 		amdgpu_program_register_sequence(adev,
276 						 stoney_mgcg_cgcg_init,
277 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
278 		break;
279 	default:
280 		break;
281 	}
282 	mutex_unlock(&adev->grbm_idx_mutex);
283 }
284 
285 /**
286  * vi_get_xclk - get the xclk
287  *
288  * @adev: amdgpu_device pointer
289  *
290  * Returns the reference clock used by the gfx engine
291  * (VI).
292  */
293 static u32 vi_get_xclk(struct amdgpu_device *adev)
294 {
295 	u32 reference_clock = adev->clock.spll.reference_freq;
296 	u32 tmp;
297 
298 	if (adev->flags & AMD_IS_APU)
299 		return reference_clock;
300 
301 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
302 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
303 		return 1000;
304 
305 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
306 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
307 		return reference_clock / 4;
308 
309 	return reference_clock;
310 }
311 
312 /**
313  * vi_srbm_select - select specific register instances
314  *
315  * @adev: amdgpu_device pointer
316  * @me: selected ME (micro engine)
317  * @pipe: pipe
318  * @queue: queue
319  * @vmid: VMID
320  *
321  * Switches the currently active registers instances.  Some
322  * registers are instanced per VMID, others are instanced per
323  * me/pipe/queue combination.
324  */
325 void vi_srbm_select(struct amdgpu_device *adev,
326 		     u32 me, u32 pipe, u32 queue, u32 vmid)
327 {
328 	u32 srbm_gfx_cntl = 0;
329 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
330 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
331 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
332 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
333 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
334 }
335 
336 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
337 {
338 	/* todo */
339 }
340 
341 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
342 {
343 	u32 bus_cntl;
344 	u32 d1vga_control = 0;
345 	u32 d2vga_control = 0;
346 	u32 vga_render_control = 0;
347 	u32 rom_cntl;
348 	bool r;
349 
350 	bus_cntl = RREG32(mmBUS_CNTL);
351 	if (adev->mode_info.num_crtc) {
352 		d1vga_control = RREG32(mmD1VGA_CONTROL);
353 		d2vga_control = RREG32(mmD2VGA_CONTROL);
354 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
355 	}
356 	rom_cntl = RREG32_SMC(ixROM_CNTL);
357 
358 	/* enable the rom */
359 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
360 	if (adev->mode_info.num_crtc) {
361 		/* Disable VGA mode */
362 		WREG32(mmD1VGA_CONTROL,
363 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
364 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
365 		WREG32(mmD2VGA_CONTROL,
366 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
367 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
368 		WREG32(mmVGA_RENDER_CONTROL,
369 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
370 	}
371 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
372 
373 	r = amdgpu_read_bios(adev);
374 
375 	/* restore regs */
376 	WREG32(mmBUS_CNTL, bus_cntl);
377 	if (adev->mode_info.num_crtc) {
378 		WREG32(mmD1VGA_CONTROL, d1vga_control);
379 		WREG32(mmD2VGA_CONTROL, d2vga_control);
380 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
381 	}
382 	WREG32_SMC(ixROM_CNTL, rom_cntl);
383 	return r;
384 }
385 
386 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
387 				  u8 *bios, u32 length_bytes)
388 {
389 	u32 *dw_ptr;
390 	unsigned long flags;
391 	u32 i, length_dw;
392 
393 	if (bios == NULL)
394 		return false;
395 	if (length_bytes == 0)
396 		return false;
397 	/* APU vbios image is part of sbios image */
398 	if (adev->flags & AMD_IS_APU)
399 		return false;
400 
401 	dw_ptr = (u32 *)bios;
402 	length_dw = ALIGN(length_bytes, 4) / 4;
403 	/* take the smc lock since we are using the smc index */
404 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
405 	/* set rom index to 0 */
406 	WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
407 	WREG32(mmSMC_IND_DATA_0, 0);
408 	/* set index to data for continous read */
409 	WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
410 	for (i = 0; i < length_dw; i++)
411 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
412 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
413 
414 	return true;
415 }
416 
417 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
418 	{mmGB_MACROTILE_MODE7, true},
419 };
420 
421 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
422 	{mmGB_TILE_MODE7, true},
423 	{mmGB_TILE_MODE12, true},
424 	{mmGB_TILE_MODE17, true},
425 	{mmGB_TILE_MODE23, true},
426 	{mmGB_MACROTILE_MODE7, true},
427 };
428 
429 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
430 	{mmGRBM_STATUS, false},
431 	{mmGRBM_STATUS2, false},
432 	{mmGRBM_STATUS_SE0, false},
433 	{mmGRBM_STATUS_SE1, false},
434 	{mmGRBM_STATUS_SE2, false},
435 	{mmGRBM_STATUS_SE3, false},
436 	{mmSRBM_STATUS, false},
437 	{mmSRBM_STATUS2, false},
438 	{mmSRBM_STATUS3, false},
439 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
440 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
441 	{mmCP_STAT, false},
442 	{mmCP_STALLED_STAT1, false},
443 	{mmCP_STALLED_STAT2, false},
444 	{mmCP_STALLED_STAT3, false},
445 	{mmCP_CPF_BUSY_STAT, false},
446 	{mmCP_CPF_STALLED_STAT1, false},
447 	{mmCP_CPF_STATUS, false},
448 	{mmCP_CPC_BUSY_STAT, false},
449 	{mmCP_CPC_STALLED_STAT1, false},
450 	{mmCP_CPC_STATUS, false},
451 	{mmGB_ADDR_CONFIG, false},
452 	{mmMC_ARB_RAMCFG, false},
453 	{mmGB_TILE_MODE0, false},
454 	{mmGB_TILE_MODE1, false},
455 	{mmGB_TILE_MODE2, false},
456 	{mmGB_TILE_MODE3, false},
457 	{mmGB_TILE_MODE4, false},
458 	{mmGB_TILE_MODE5, false},
459 	{mmGB_TILE_MODE6, false},
460 	{mmGB_TILE_MODE7, false},
461 	{mmGB_TILE_MODE8, false},
462 	{mmGB_TILE_MODE9, false},
463 	{mmGB_TILE_MODE10, false},
464 	{mmGB_TILE_MODE11, false},
465 	{mmGB_TILE_MODE12, false},
466 	{mmGB_TILE_MODE13, false},
467 	{mmGB_TILE_MODE14, false},
468 	{mmGB_TILE_MODE15, false},
469 	{mmGB_TILE_MODE16, false},
470 	{mmGB_TILE_MODE17, false},
471 	{mmGB_TILE_MODE18, false},
472 	{mmGB_TILE_MODE19, false},
473 	{mmGB_TILE_MODE20, false},
474 	{mmGB_TILE_MODE21, false},
475 	{mmGB_TILE_MODE22, false},
476 	{mmGB_TILE_MODE23, false},
477 	{mmGB_TILE_MODE24, false},
478 	{mmGB_TILE_MODE25, false},
479 	{mmGB_TILE_MODE26, false},
480 	{mmGB_TILE_MODE27, false},
481 	{mmGB_TILE_MODE28, false},
482 	{mmGB_TILE_MODE29, false},
483 	{mmGB_TILE_MODE30, false},
484 	{mmGB_TILE_MODE31, false},
485 	{mmGB_MACROTILE_MODE0, false},
486 	{mmGB_MACROTILE_MODE1, false},
487 	{mmGB_MACROTILE_MODE2, false},
488 	{mmGB_MACROTILE_MODE3, false},
489 	{mmGB_MACROTILE_MODE4, false},
490 	{mmGB_MACROTILE_MODE5, false},
491 	{mmGB_MACROTILE_MODE6, false},
492 	{mmGB_MACROTILE_MODE7, false},
493 	{mmGB_MACROTILE_MODE8, false},
494 	{mmGB_MACROTILE_MODE9, false},
495 	{mmGB_MACROTILE_MODE10, false},
496 	{mmGB_MACROTILE_MODE11, false},
497 	{mmGB_MACROTILE_MODE12, false},
498 	{mmGB_MACROTILE_MODE13, false},
499 	{mmGB_MACROTILE_MODE14, false},
500 	{mmGB_MACROTILE_MODE15, false},
501 	{mmCC_RB_BACKEND_DISABLE, false, true},
502 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
503 	{mmGB_BACKEND_MAP, false, false},
504 	{mmPA_SC_RASTER_CONFIG, false, true},
505 	{mmPA_SC_RASTER_CONFIG_1, false, true},
506 };
507 
508 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
509 					 u32 sh_num, u32 reg_offset)
510 {
511 	uint32_t val;
512 
513 	mutex_lock(&adev->grbm_idx_mutex);
514 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
515 		gfx_v8_0_select_se_sh(adev, se_num, sh_num);
516 
517 	val = RREG32(reg_offset);
518 
519 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
520 		gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
521 	mutex_unlock(&adev->grbm_idx_mutex);
522 	return val;
523 }
524 
525 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
526 			    u32 sh_num, u32 reg_offset, u32 *value)
527 {
528 	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
529 	struct amdgpu_allowed_register_entry *asic_register_entry;
530 	uint32_t size, i;
531 
532 	*value = 0;
533 	switch (adev->asic_type) {
534 	case CHIP_TOPAZ:
535 		asic_register_table = tonga_allowed_read_registers;
536 		size = ARRAY_SIZE(tonga_allowed_read_registers);
537 		break;
538 	case CHIP_FIJI:
539 	case CHIP_TONGA:
540 	case CHIP_CARRIZO:
541 	case CHIP_STONEY:
542 		asic_register_table = cz_allowed_read_registers;
543 		size = ARRAY_SIZE(cz_allowed_read_registers);
544 		break;
545 	default:
546 		return -EINVAL;
547 	}
548 
549 	if (asic_register_table) {
550 		for (i = 0; i < size; i++) {
551 			asic_register_entry = asic_register_table + i;
552 			if (reg_offset != asic_register_entry->reg_offset)
553 				continue;
554 			if (!asic_register_entry->untouched)
555 				*value = asic_register_entry->grbm_indexed ?
556 					vi_read_indexed_register(adev, se_num,
557 								 sh_num, reg_offset) :
558 					RREG32(reg_offset);
559 			return 0;
560 		}
561 	}
562 
563 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
564 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
565 			continue;
566 
567 		if (!vi_allowed_read_registers[i].untouched)
568 			*value = vi_allowed_read_registers[i].grbm_indexed ?
569 				vi_read_indexed_register(adev, se_num,
570 							 sh_num, reg_offset) :
571 				RREG32(reg_offset);
572 		return 0;
573 	}
574 	return -EINVAL;
575 }
576 
577 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
578 {
579 	u32 i;
580 
581 	dev_info(adev->dev, "GPU pci config reset\n");
582 
583 	/* disable BM */
584 	pci_clear_master(adev->pdev);
585 	/* reset */
586 	amdgpu_pci_config_reset(adev);
587 
588 	udelay(100);
589 
590 	/* wait for asic to come out of reset */
591 	for (i = 0; i < adev->usec_timeout; i++) {
592 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
593 			break;
594 		udelay(1);
595 	}
596 
597 }
598 
599 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
600 {
601 	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
602 
603 	if (hung)
604 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
605 	else
606 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
607 
608 	WREG32(mmBIOS_SCRATCH_3, tmp);
609 }
610 
611 /**
612  * vi_asic_reset - soft reset GPU
613  *
614  * @adev: amdgpu_device pointer
615  *
616  * Look up which blocks are hung and attempt
617  * to reset them.
618  * Returns 0 for success.
619  */
620 static int vi_asic_reset(struct amdgpu_device *adev)
621 {
622 	vi_set_bios_scratch_engine_hung(adev, true);
623 
624 	vi_gpu_pci_config_reset(adev);
625 
626 	vi_set_bios_scratch_engine_hung(adev, false);
627 
628 	return 0;
629 }
630 
631 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
632 			u32 cntl_reg, u32 status_reg)
633 {
634 	int r, i;
635 	struct atom_clock_dividers dividers;
636 	uint32_t tmp;
637 
638 	r = amdgpu_atombios_get_clock_dividers(adev,
639 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
640 					       clock, false, &dividers);
641 	if (r)
642 		return r;
643 
644 	tmp = RREG32_SMC(cntl_reg);
645 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
646 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
647 	tmp |= dividers.post_divider;
648 	WREG32_SMC(cntl_reg, tmp);
649 
650 	for (i = 0; i < 100; i++) {
651 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
652 			break;
653 		mdelay(10);
654 	}
655 	if (i == 100)
656 		return -ETIMEDOUT;
657 
658 	return 0;
659 }
660 
661 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
662 {
663 	int r;
664 
665 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
666 	if (r)
667 		return r;
668 
669 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
670 
671 	return 0;
672 }
673 
674 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
675 {
676 	/* todo */
677 
678 	return 0;
679 }
680 
681 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
682 {
683 	if (pci_is_root_bus(adev->pdev->bus))
684 		return;
685 
686 	if (amdgpu_pcie_gen2 == 0)
687 		return;
688 
689 	if (adev->flags & AMD_IS_APU)
690 		return;
691 
692 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
693 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
694 		return;
695 
696 	/* todo */
697 }
698 
699 static void vi_program_aspm(struct amdgpu_device *adev)
700 {
701 
702 	if (amdgpu_aspm == 0)
703 		return;
704 
705 	/* todo */
706 }
707 
708 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
709 					bool enable)
710 {
711 	u32 tmp;
712 
713 	/* not necessary on CZ */
714 	if (adev->flags & AMD_IS_APU)
715 		return;
716 
717 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
718 	if (enable)
719 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
720 	else
721 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
722 
723 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
724 }
725 
726 /* topaz has no DCE, UVD, VCE */
727 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
728 {
729 	/* ORDER MATTERS! */
730 	{
731 		.type = AMD_IP_BLOCK_TYPE_COMMON,
732 		.major = 2,
733 		.minor = 0,
734 		.rev = 0,
735 		.funcs = &vi_common_ip_funcs,
736 	},
737 	{
738 		.type = AMD_IP_BLOCK_TYPE_GMC,
739 		.major = 7,
740 		.minor = 4,
741 		.rev = 0,
742 		.funcs = &gmc_v7_0_ip_funcs,
743 	},
744 	{
745 		.type = AMD_IP_BLOCK_TYPE_IH,
746 		.major = 2,
747 		.minor = 4,
748 		.rev = 0,
749 		.funcs = &iceland_ih_ip_funcs,
750 	},
751 	{
752 		.type = AMD_IP_BLOCK_TYPE_SMC,
753 		.major = 7,
754 		.minor = 1,
755 		.rev = 0,
756 		.funcs = &amdgpu_pp_ip_funcs,
757 	},
758 	{
759 		.type = AMD_IP_BLOCK_TYPE_GFX,
760 		.major = 8,
761 		.minor = 0,
762 		.rev = 0,
763 		.funcs = &gfx_v8_0_ip_funcs,
764 	},
765 	{
766 		.type = AMD_IP_BLOCK_TYPE_SDMA,
767 		.major = 2,
768 		.minor = 4,
769 		.rev = 0,
770 		.funcs = &sdma_v2_4_ip_funcs,
771 	},
772 };
773 
774 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
775 {
776 	/* ORDER MATTERS! */
777 	{
778 		.type = AMD_IP_BLOCK_TYPE_COMMON,
779 		.major = 2,
780 		.minor = 0,
781 		.rev = 0,
782 		.funcs = &vi_common_ip_funcs,
783 	},
784 	{
785 		.type = AMD_IP_BLOCK_TYPE_GMC,
786 		.major = 8,
787 		.minor = 0,
788 		.rev = 0,
789 		.funcs = &gmc_v8_0_ip_funcs,
790 	},
791 	{
792 		.type = AMD_IP_BLOCK_TYPE_IH,
793 		.major = 3,
794 		.minor = 0,
795 		.rev = 0,
796 		.funcs = &tonga_ih_ip_funcs,
797 	},
798 	{
799 		.type = AMD_IP_BLOCK_TYPE_SMC,
800 		.major = 7,
801 		.minor = 1,
802 		.rev = 0,
803 		.funcs = &amdgpu_pp_ip_funcs,
804 	},
805 	{
806 		.type = AMD_IP_BLOCK_TYPE_DCE,
807 		.major = 10,
808 		.minor = 0,
809 		.rev = 0,
810 		.funcs = &dce_v10_0_ip_funcs,
811 	},
812 	{
813 		.type = AMD_IP_BLOCK_TYPE_GFX,
814 		.major = 8,
815 		.minor = 0,
816 		.rev = 0,
817 		.funcs = &gfx_v8_0_ip_funcs,
818 	},
819 	{
820 		.type = AMD_IP_BLOCK_TYPE_SDMA,
821 		.major = 3,
822 		.minor = 0,
823 		.rev = 0,
824 		.funcs = &sdma_v3_0_ip_funcs,
825 	},
826 	{
827 		.type = AMD_IP_BLOCK_TYPE_UVD,
828 		.major = 5,
829 		.minor = 0,
830 		.rev = 0,
831 		.funcs = &uvd_v5_0_ip_funcs,
832 	},
833 	{
834 		.type = AMD_IP_BLOCK_TYPE_VCE,
835 		.major = 3,
836 		.minor = 0,
837 		.rev = 0,
838 		.funcs = &vce_v3_0_ip_funcs,
839 	},
840 };
841 
842 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
843 {
844 	/* ORDER MATTERS! */
845 	{
846 		.type = AMD_IP_BLOCK_TYPE_COMMON,
847 		.major = 2,
848 		.minor = 0,
849 		.rev = 0,
850 		.funcs = &vi_common_ip_funcs,
851 	},
852 	{
853 		.type = AMD_IP_BLOCK_TYPE_GMC,
854 		.major = 8,
855 		.minor = 5,
856 		.rev = 0,
857 		.funcs = &gmc_v8_0_ip_funcs,
858 	},
859 	{
860 		.type = AMD_IP_BLOCK_TYPE_IH,
861 		.major = 3,
862 		.minor = 0,
863 		.rev = 0,
864 		.funcs = &tonga_ih_ip_funcs,
865 	},
866 	{
867 		.type = AMD_IP_BLOCK_TYPE_SMC,
868 		.major = 7,
869 		.minor = 1,
870 		.rev = 0,
871 		.funcs = &amdgpu_pp_ip_funcs,
872 	},
873 	{
874 		.type = AMD_IP_BLOCK_TYPE_DCE,
875 		.major = 10,
876 		.minor = 1,
877 		.rev = 0,
878 		.funcs = &dce_v10_0_ip_funcs,
879 	},
880 	{
881 		.type = AMD_IP_BLOCK_TYPE_GFX,
882 		.major = 8,
883 		.minor = 0,
884 		.rev = 0,
885 		.funcs = &gfx_v8_0_ip_funcs,
886 	},
887 	{
888 		.type = AMD_IP_BLOCK_TYPE_SDMA,
889 		.major = 3,
890 		.minor = 0,
891 		.rev = 0,
892 		.funcs = &sdma_v3_0_ip_funcs,
893 	},
894 	{
895 		.type = AMD_IP_BLOCK_TYPE_UVD,
896 		.major = 6,
897 		.minor = 0,
898 		.rev = 0,
899 		.funcs = &uvd_v6_0_ip_funcs,
900 	},
901 	{
902 		.type = AMD_IP_BLOCK_TYPE_VCE,
903 		.major = 3,
904 		.minor = 0,
905 		.rev = 0,
906 		.funcs = &vce_v3_0_ip_funcs,
907 	},
908 };
909 
910 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
911 {
912 	/* ORDER MATTERS! */
913 	{
914 		.type = AMD_IP_BLOCK_TYPE_COMMON,
915 		.major = 2,
916 		.minor = 0,
917 		.rev = 0,
918 		.funcs = &vi_common_ip_funcs,
919 	},
920 	{
921 		.type = AMD_IP_BLOCK_TYPE_GMC,
922 		.major = 8,
923 		.minor = 0,
924 		.rev = 0,
925 		.funcs = &gmc_v8_0_ip_funcs,
926 	},
927 	{
928 		.type = AMD_IP_BLOCK_TYPE_IH,
929 		.major = 3,
930 		.minor = 0,
931 		.rev = 0,
932 		.funcs = &cz_ih_ip_funcs,
933 	},
934 	{
935 		.type = AMD_IP_BLOCK_TYPE_SMC,
936 		.major = 8,
937 		.minor = 0,
938 		.rev = 0,
939 		.funcs = &amdgpu_pp_ip_funcs
940 	},
941 	{
942 		.type = AMD_IP_BLOCK_TYPE_DCE,
943 		.major = 11,
944 		.minor = 0,
945 		.rev = 0,
946 		.funcs = &dce_v11_0_ip_funcs,
947 	},
948 	{
949 		.type = AMD_IP_BLOCK_TYPE_GFX,
950 		.major = 8,
951 		.minor = 0,
952 		.rev = 0,
953 		.funcs = &gfx_v8_0_ip_funcs,
954 	},
955 	{
956 		.type = AMD_IP_BLOCK_TYPE_SDMA,
957 		.major = 3,
958 		.minor = 0,
959 		.rev = 0,
960 		.funcs = &sdma_v3_0_ip_funcs,
961 	},
962 	{
963 		.type = AMD_IP_BLOCK_TYPE_UVD,
964 		.major = 6,
965 		.minor = 0,
966 		.rev = 0,
967 		.funcs = &uvd_v6_0_ip_funcs,
968 	},
969 	{
970 		.type = AMD_IP_BLOCK_TYPE_VCE,
971 		.major = 3,
972 		.minor = 0,
973 		.rev = 0,
974 		.funcs = &vce_v3_0_ip_funcs,
975 	},
976 #if defined(CONFIG_DRM_AMD_ACP)
977 	{
978 		.type = AMD_IP_BLOCK_TYPE_ACP,
979 		.major = 2,
980 		.minor = 2,
981 		.rev = 0,
982 		.funcs = &acp_ip_funcs,
983 	},
984 #endif
985 };
986 
987 int vi_set_ip_blocks(struct amdgpu_device *adev)
988 {
989 	switch (adev->asic_type) {
990 	case CHIP_TOPAZ:
991 		adev->ip_blocks = topaz_ip_blocks;
992 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
993 		break;
994 	case CHIP_FIJI:
995 		adev->ip_blocks = fiji_ip_blocks;
996 		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
997 		break;
998 	case CHIP_TONGA:
999 		adev->ip_blocks = tonga_ip_blocks;
1000 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1001 		break;
1002 	case CHIP_CARRIZO:
1003 	case CHIP_STONEY:
1004 		adev->ip_blocks = cz_ip_blocks;
1005 		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1006 		break;
1007 	default:
1008 		/* FIXME: not supported yet */
1009 		return -EINVAL;
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1016 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1017 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1018 
1019 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1020 {
1021 	if (adev->flags & AMD_IS_APU)
1022 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1023 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
1024 	else
1025 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1026 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1027 }
1028 
1029 static const struct amdgpu_asic_funcs vi_asic_funcs =
1030 {
1031 	.read_disabled_bios = &vi_read_disabled_bios,
1032 	.read_bios_from_rom = &vi_read_bios_from_rom,
1033 	.read_register = &vi_read_register,
1034 	.reset = &vi_asic_reset,
1035 	.set_vga_state = &vi_vga_set_state,
1036 	.get_xclk = &vi_get_xclk,
1037 	.set_uvd_clocks = &vi_set_uvd_clocks,
1038 	.set_vce_clocks = &vi_set_vce_clocks,
1039 	.get_cu_info = &gfx_v8_0_get_cu_info,
1040 	/* these should be moved to their own ip modules */
1041 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1042 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1043 };
1044 
1045 static int vi_common_early_init(void *handle)
1046 {
1047 	bool smc_enabled = false;
1048 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1049 
1050 	if (adev->flags & AMD_IS_APU) {
1051 		adev->smc_rreg = &cz_smc_rreg;
1052 		adev->smc_wreg = &cz_smc_wreg;
1053 	} else {
1054 		adev->smc_rreg = &vi_smc_rreg;
1055 		adev->smc_wreg = &vi_smc_wreg;
1056 	}
1057 	adev->pcie_rreg = &vi_pcie_rreg;
1058 	adev->pcie_wreg = &vi_pcie_wreg;
1059 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1060 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1061 	adev->didt_rreg = &vi_didt_rreg;
1062 	adev->didt_wreg = &vi_didt_wreg;
1063 
1064 	adev->asic_funcs = &vi_asic_funcs;
1065 
1066 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1067 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1068 		smc_enabled = true;
1069 
1070 	adev->rev_id = vi_get_rev_id(adev);
1071 	adev->external_rev_id = 0xFF;
1072 	switch (adev->asic_type) {
1073 	case CHIP_TOPAZ:
1074 		adev->has_uvd = false;
1075 		adev->cg_flags = 0;
1076 		adev->pg_flags = 0;
1077 		adev->external_rev_id = 0x1;
1078 		break;
1079 	case CHIP_FIJI:
1080 		adev->has_uvd = true;
1081 		adev->cg_flags = 0;
1082 		adev->pg_flags = 0;
1083 		adev->external_rev_id = adev->rev_id + 0x3c;
1084 		break;
1085 	case CHIP_TONGA:
1086 		adev->has_uvd = true;
1087 		adev->cg_flags = 0;
1088 		adev->pg_flags = 0;
1089 		adev->external_rev_id = adev->rev_id + 0x14;
1090 		break;
1091 	case CHIP_CARRIZO:
1092 	case CHIP_STONEY:
1093 		adev->has_uvd = true;
1094 		adev->cg_flags = 0;
1095 		/* Disable UVD pg */
1096 		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1097 		adev->external_rev_id = adev->rev_id + 0x1;
1098 		break;
1099 	default:
1100 		/* FIXME: not supported yet */
1101 		return -EINVAL;
1102 	}
1103 
1104 	if (amdgpu_smc_load_fw && smc_enabled)
1105 		adev->firmware.smu_load = true;
1106 
1107 	amdgpu_get_pcie_info(adev);
1108 
1109 	return 0;
1110 }
1111 
1112 static int vi_common_sw_init(void *handle)
1113 {
1114 	return 0;
1115 }
1116 
1117 static int vi_common_sw_fini(void *handle)
1118 {
1119 	return 0;
1120 }
1121 
1122 static int vi_common_hw_init(void *handle)
1123 {
1124 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125 
1126 	/* move the golden regs per IP block */
1127 	vi_init_golden_registers(adev);
1128 	/* enable pcie gen2/3 link */
1129 	vi_pcie_gen3_enable(adev);
1130 	/* enable aspm */
1131 	vi_program_aspm(adev);
1132 	/* enable the doorbell aperture */
1133 	vi_enable_doorbell_aperture(adev, true);
1134 
1135 	return 0;
1136 }
1137 
1138 static int vi_common_hw_fini(void *handle)
1139 {
1140 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1141 
1142 	/* enable the doorbell aperture */
1143 	vi_enable_doorbell_aperture(adev, false);
1144 
1145 	return 0;
1146 }
1147 
1148 static int vi_common_suspend(void *handle)
1149 {
1150 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1151 
1152 	return vi_common_hw_fini(adev);
1153 }
1154 
1155 static int vi_common_resume(void *handle)
1156 {
1157 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158 
1159 	return vi_common_hw_init(adev);
1160 }
1161 
1162 static bool vi_common_is_idle(void *handle)
1163 {
1164 	return true;
1165 }
1166 
1167 static int vi_common_wait_for_idle(void *handle)
1168 {
1169 	return 0;
1170 }
1171 
1172 static void vi_common_print_status(void *handle)
1173 {
1174 	return;
1175 }
1176 
1177 static int vi_common_soft_reset(void *handle)
1178 {
1179 	return 0;
1180 }
1181 
1182 static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1183 		bool enable)
1184 {
1185 	uint32_t temp, data;
1186 
1187 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1188 
1189 	if (enable)
1190 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1191 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1192 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1193 	else
1194 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1195 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1196 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1197 
1198 	if (temp != data)
1199 		WREG32_PCIE(ixPCIE_CNTL2, data);
1200 }
1201 
1202 static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1203 		bool enable)
1204 {
1205 	uint32_t temp, data;
1206 
1207 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1208 
1209 	if (enable)
1210 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1211 	else
1212 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1213 
1214 	if (temp != data)
1215 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1216 }
1217 
1218 static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
1219 		bool enable)
1220 {
1221 	uint32_t temp, data;
1222 
1223 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1224 
1225 	if (enable)
1226 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1227 	else
1228 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1229 
1230 	if (temp != data)
1231 		WREG32(mmHDP_MEM_POWER_LS, data);
1232 }
1233 
1234 static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1235 		bool enable)
1236 {
1237 	uint32_t temp, data;
1238 
1239 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1240 
1241 	if (enable)
1242 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1243 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1244 	else
1245 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1246 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1247 
1248 	if (temp != data)
1249 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1250 }
1251 
1252 static int vi_common_set_clockgating_state(void *handle,
1253 					    enum amd_clockgating_state state)
1254 {
1255 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1256 
1257 	switch (adev->asic_type) {
1258 	case CHIP_FIJI:
1259 		fiji_update_bif_medium_grain_light_sleep(adev,
1260 				state == AMD_CG_STATE_GATE ? true : false);
1261 		fiji_update_hdp_medium_grain_clock_gating(adev,
1262 				state == AMD_CG_STATE_GATE ? true : false);
1263 		fiji_update_hdp_light_sleep(adev,
1264 				state == AMD_CG_STATE_GATE ? true : false);
1265 		fiji_update_rom_medium_grain_clock_gating(adev,
1266 				state == AMD_CG_STATE_GATE ? true : false);
1267 		break;
1268 	default:
1269 		break;
1270 	}
1271 	return 0;
1272 }
1273 
1274 static int vi_common_set_powergating_state(void *handle,
1275 					    enum amd_powergating_state state)
1276 {
1277 	return 0;
1278 }
1279 
1280 const struct amd_ip_funcs vi_common_ip_funcs = {
1281 	.early_init = vi_common_early_init,
1282 	.late_init = NULL,
1283 	.sw_init = vi_common_sw_init,
1284 	.sw_fini = vi_common_sw_fini,
1285 	.hw_init = vi_common_hw_init,
1286 	.hw_fini = vi_common_hw_fini,
1287 	.suspend = vi_common_suspend,
1288 	.resume = vi_common_resume,
1289 	.is_idle = vi_common_is_idle,
1290 	.wait_for_idle = vi_common_wait_for_idle,
1291 	.soft_reset = vi_common_soft_reset,
1292 	.print_status = vi_common_print_status,
1293 	.set_clockgating_state = vi_common_set_clockgating_state,
1294 	.set_powergating_state = vi_common_set_powergating_state,
1295 };
1296 
1297