xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 94085fe570e7b87597d4695e6fa77d4256efd29e)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "vi_dpm.h"
63 #include "gmc_v8_0.h"
64 #include "gfx_v8_0.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
70 #include "tonga_ih.h"
71 #include "cz_ih.h"
72 #include "uvd_v5_0.h"
73 #include "uvd_v6_0.h"
74 #include "vce_v3_0.h"
75 #include "amdgpu_powerplay.h"
76 
77 /*
78  * Indirect registers accessor
79  */
80 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
81 {
82 	unsigned long flags;
83 	u32 r;
84 
85 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
86 	WREG32(mmPCIE_INDEX, reg);
87 	(void)RREG32(mmPCIE_INDEX);
88 	r = RREG32(mmPCIE_DATA);
89 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
90 	return r;
91 }
92 
93 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
94 {
95 	unsigned long flags;
96 
97 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
98 	WREG32(mmPCIE_INDEX, reg);
99 	(void)RREG32(mmPCIE_INDEX);
100 	WREG32(mmPCIE_DATA, v);
101 	(void)RREG32(mmPCIE_DATA);
102 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
103 }
104 
105 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
106 {
107 	unsigned long flags;
108 	u32 r;
109 
110 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
111 	WREG32(mmSMC_IND_INDEX_0, (reg));
112 	r = RREG32(mmSMC_IND_DATA_0);
113 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
114 	return r;
115 }
116 
117 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
118 {
119 	unsigned long flags;
120 
121 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
122 	WREG32(mmSMC_IND_INDEX_0, (reg));
123 	WREG32(mmSMC_IND_DATA_0, (v));
124 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
125 }
126 
127 /* smu_8_0_d.h */
128 #define mmMP0PUB_IND_INDEX                                                      0x180
129 #define mmMP0PUB_IND_DATA                                                       0x181
130 
131 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
132 {
133 	unsigned long flags;
134 	u32 r;
135 
136 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
137 	WREG32(mmMP0PUB_IND_INDEX, (reg));
138 	r = RREG32(mmMP0PUB_IND_DATA);
139 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
140 	return r;
141 }
142 
143 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
144 {
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
148 	WREG32(mmMP0PUB_IND_INDEX, (reg));
149 	WREG32(mmMP0PUB_IND_DATA, (v));
150 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
151 }
152 
153 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
154 {
155 	unsigned long flags;
156 	u32 r;
157 
158 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
159 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
160 	r = RREG32(mmUVD_CTX_DATA);
161 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
162 	return r;
163 }
164 
165 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
166 {
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
170 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
171 	WREG32(mmUVD_CTX_DATA, (v));
172 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
173 }
174 
175 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
176 {
177 	unsigned long flags;
178 	u32 r;
179 
180 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
181 	WREG32(mmDIDT_IND_INDEX, (reg));
182 	r = RREG32(mmDIDT_IND_DATA);
183 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
184 	return r;
185 }
186 
187 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
188 {
189 	unsigned long flags;
190 
191 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
192 	WREG32(mmDIDT_IND_INDEX, (reg));
193 	WREG32(mmDIDT_IND_DATA, (v));
194 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
195 }
196 
197 static const u32 tonga_mgcg_cgcg_init[] =
198 {
199 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
200 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
201 	mmPCIE_DATA, 0x000f0000, 0x00000000,
202 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
203 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
204 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
205 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
206 };
207 
208 static const u32 fiji_mgcg_cgcg_init[] =
209 {
210 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
211 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
212 	mmPCIE_DATA, 0x000f0000, 0x00000000,
213 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
214 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
215 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
216 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
217 };
218 
219 static const u32 iceland_mgcg_cgcg_init[] =
220 {
221 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
222 	mmPCIE_DATA, 0x000f0000, 0x00000000,
223 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
224 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
225 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
226 };
227 
228 static const u32 cz_mgcg_cgcg_init[] =
229 {
230 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
231 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
232 	mmPCIE_DATA, 0x000f0000, 0x00000000,
233 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
234 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
235 };
236 
237 static const u32 stoney_mgcg_cgcg_init[] =
238 {
239 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
240 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
241 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
242 };
243 
244 static void vi_init_golden_registers(struct amdgpu_device *adev)
245 {
246 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
247 	mutex_lock(&adev->grbm_idx_mutex);
248 
249 	switch (adev->asic_type) {
250 	case CHIP_TOPAZ:
251 		amdgpu_program_register_sequence(adev,
252 						 iceland_mgcg_cgcg_init,
253 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
254 		break;
255 	case CHIP_FIJI:
256 		amdgpu_program_register_sequence(adev,
257 						 fiji_mgcg_cgcg_init,
258 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
259 		break;
260 	case CHIP_TONGA:
261 		amdgpu_program_register_sequence(adev,
262 						 tonga_mgcg_cgcg_init,
263 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
264 		break;
265 	case CHIP_CARRIZO:
266 		amdgpu_program_register_sequence(adev,
267 						 cz_mgcg_cgcg_init,
268 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
269 		break;
270 	case CHIP_STONEY:
271 		amdgpu_program_register_sequence(adev,
272 						 stoney_mgcg_cgcg_init,
273 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
274 		break;
275 	default:
276 		break;
277 	}
278 	mutex_unlock(&adev->grbm_idx_mutex);
279 }
280 
281 /**
282  * vi_get_xclk - get the xclk
283  *
284  * @adev: amdgpu_device pointer
285  *
286  * Returns the reference clock used by the gfx engine
287  * (VI).
288  */
289 static u32 vi_get_xclk(struct amdgpu_device *adev)
290 {
291 	u32 reference_clock = adev->clock.spll.reference_freq;
292 	u32 tmp;
293 
294 	if (adev->flags & AMD_IS_APU)
295 		return reference_clock;
296 
297 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
298 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
299 		return 1000;
300 
301 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
302 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
303 		return reference_clock / 4;
304 
305 	return reference_clock;
306 }
307 
308 /**
309  * vi_srbm_select - select specific register instances
310  *
311  * @adev: amdgpu_device pointer
312  * @me: selected ME (micro engine)
313  * @pipe: pipe
314  * @queue: queue
315  * @vmid: VMID
316  *
317  * Switches the currently active registers instances.  Some
318  * registers are instanced per VMID, others are instanced per
319  * me/pipe/queue combination.
320  */
321 void vi_srbm_select(struct amdgpu_device *adev,
322 		     u32 me, u32 pipe, u32 queue, u32 vmid)
323 {
324 	u32 srbm_gfx_cntl = 0;
325 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
326 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
327 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
328 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
329 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
330 }
331 
332 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
333 {
334 	/* todo */
335 }
336 
337 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
338 {
339 	u32 bus_cntl;
340 	u32 d1vga_control = 0;
341 	u32 d2vga_control = 0;
342 	u32 vga_render_control = 0;
343 	u32 rom_cntl;
344 	bool r;
345 
346 	bus_cntl = RREG32(mmBUS_CNTL);
347 	if (adev->mode_info.num_crtc) {
348 		d1vga_control = RREG32(mmD1VGA_CONTROL);
349 		d2vga_control = RREG32(mmD2VGA_CONTROL);
350 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
351 	}
352 	rom_cntl = RREG32_SMC(ixROM_CNTL);
353 
354 	/* enable the rom */
355 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
356 	if (adev->mode_info.num_crtc) {
357 		/* Disable VGA mode */
358 		WREG32(mmD1VGA_CONTROL,
359 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
360 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
361 		WREG32(mmD2VGA_CONTROL,
362 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
363 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
364 		WREG32(mmVGA_RENDER_CONTROL,
365 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
366 	}
367 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
368 
369 	r = amdgpu_read_bios(adev);
370 
371 	/* restore regs */
372 	WREG32(mmBUS_CNTL, bus_cntl);
373 	if (adev->mode_info.num_crtc) {
374 		WREG32(mmD1VGA_CONTROL, d1vga_control);
375 		WREG32(mmD2VGA_CONTROL, d2vga_control);
376 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
377 	}
378 	WREG32_SMC(ixROM_CNTL, rom_cntl);
379 	return r;
380 }
381 
382 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
383 				  u8 *bios, u32 length_bytes)
384 {
385 	u32 *dw_ptr;
386 	unsigned long flags;
387 	u32 i, length_dw;
388 
389 	if (bios == NULL)
390 		return false;
391 	if (length_bytes == 0)
392 		return false;
393 	/* APU vbios image is part of sbios image */
394 	if (adev->flags & AMD_IS_APU)
395 		return false;
396 
397 	dw_ptr = (u32 *)bios;
398 	length_dw = ALIGN(length_bytes, 4) / 4;
399 	/* take the smc lock since we are using the smc index */
400 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
401 	/* set rom index to 0 */
402 	WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
403 	WREG32(mmSMC_IND_DATA_0, 0);
404 	/* set index to data for continous read */
405 	WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
406 	for (i = 0; i < length_dw; i++)
407 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
408 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
409 
410 	return true;
411 }
412 
413 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
414 	{mmGB_MACROTILE_MODE7, true},
415 };
416 
417 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
418 	{mmGB_TILE_MODE7, true},
419 	{mmGB_TILE_MODE12, true},
420 	{mmGB_TILE_MODE17, true},
421 	{mmGB_TILE_MODE23, true},
422 	{mmGB_MACROTILE_MODE7, true},
423 };
424 
425 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
426 	{mmGRBM_STATUS, false},
427 	{mmGRBM_STATUS2, false},
428 	{mmGRBM_STATUS_SE0, false},
429 	{mmGRBM_STATUS_SE1, false},
430 	{mmGRBM_STATUS_SE2, false},
431 	{mmGRBM_STATUS_SE3, false},
432 	{mmSRBM_STATUS, false},
433 	{mmSRBM_STATUS2, false},
434 	{mmSRBM_STATUS3, false},
435 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
436 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
437 	{mmCP_STAT, false},
438 	{mmCP_STALLED_STAT1, false},
439 	{mmCP_STALLED_STAT2, false},
440 	{mmCP_STALLED_STAT3, false},
441 	{mmCP_CPF_BUSY_STAT, false},
442 	{mmCP_CPF_STALLED_STAT1, false},
443 	{mmCP_CPF_STATUS, false},
444 	{mmCP_CPC_BUSY_STAT, false},
445 	{mmCP_CPC_STALLED_STAT1, false},
446 	{mmCP_CPC_STATUS, false},
447 	{mmGB_ADDR_CONFIG, false},
448 	{mmMC_ARB_RAMCFG, false},
449 	{mmGB_TILE_MODE0, false},
450 	{mmGB_TILE_MODE1, false},
451 	{mmGB_TILE_MODE2, false},
452 	{mmGB_TILE_MODE3, false},
453 	{mmGB_TILE_MODE4, false},
454 	{mmGB_TILE_MODE5, false},
455 	{mmGB_TILE_MODE6, false},
456 	{mmGB_TILE_MODE7, false},
457 	{mmGB_TILE_MODE8, false},
458 	{mmGB_TILE_MODE9, false},
459 	{mmGB_TILE_MODE10, false},
460 	{mmGB_TILE_MODE11, false},
461 	{mmGB_TILE_MODE12, false},
462 	{mmGB_TILE_MODE13, false},
463 	{mmGB_TILE_MODE14, false},
464 	{mmGB_TILE_MODE15, false},
465 	{mmGB_TILE_MODE16, false},
466 	{mmGB_TILE_MODE17, false},
467 	{mmGB_TILE_MODE18, false},
468 	{mmGB_TILE_MODE19, false},
469 	{mmGB_TILE_MODE20, false},
470 	{mmGB_TILE_MODE21, false},
471 	{mmGB_TILE_MODE22, false},
472 	{mmGB_TILE_MODE23, false},
473 	{mmGB_TILE_MODE24, false},
474 	{mmGB_TILE_MODE25, false},
475 	{mmGB_TILE_MODE26, false},
476 	{mmGB_TILE_MODE27, false},
477 	{mmGB_TILE_MODE28, false},
478 	{mmGB_TILE_MODE29, false},
479 	{mmGB_TILE_MODE30, false},
480 	{mmGB_TILE_MODE31, false},
481 	{mmGB_MACROTILE_MODE0, false},
482 	{mmGB_MACROTILE_MODE1, false},
483 	{mmGB_MACROTILE_MODE2, false},
484 	{mmGB_MACROTILE_MODE3, false},
485 	{mmGB_MACROTILE_MODE4, false},
486 	{mmGB_MACROTILE_MODE5, false},
487 	{mmGB_MACROTILE_MODE6, false},
488 	{mmGB_MACROTILE_MODE7, false},
489 	{mmGB_MACROTILE_MODE8, false},
490 	{mmGB_MACROTILE_MODE9, false},
491 	{mmGB_MACROTILE_MODE10, false},
492 	{mmGB_MACROTILE_MODE11, false},
493 	{mmGB_MACROTILE_MODE12, false},
494 	{mmGB_MACROTILE_MODE13, false},
495 	{mmGB_MACROTILE_MODE14, false},
496 	{mmGB_MACROTILE_MODE15, false},
497 	{mmCC_RB_BACKEND_DISABLE, false, true},
498 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
499 	{mmGB_BACKEND_MAP, false, false},
500 	{mmPA_SC_RASTER_CONFIG, false, true},
501 	{mmPA_SC_RASTER_CONFIG_1, false, true},
502 };
503 
504 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
505 					 u32 sh_num, u32 reg_offset)
506 {
507 	uint32_t val;
508 
509 	mutex_lock(&adev->grbm_idx_mutex);
510 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
511 		gfx_v8_0_select_se_sh(adev, se_num, sh_num);
512 
513 	val = RREG32(reg_offset);
514 
515 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
516 		gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
517 	mutex_unlock(&adev->grbm_idx_mutex);
518 	return val;
519 }
520 
521 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
522 			    u32 sh_num, u32 reg_offset, u32 *value)
523 {
524 	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
525 	struct amdgpu_allowed_register_entry *asic_register_entry;
526 	uint32_t size, i;
527 
528 	*value = 0;
529 	switch (adev->asic_type) {
530 	case CHIP_TOPAZ:
531 		asic_register_table = tonga_allowed_read_registers;
532 		size = ARRAY_SIZE(tonga_allowed_read_registers);
533 		break;
534 	case CHIP_FIJI:
535 	case CHIP_TONGA:
536 	case CHIP_CARRIZO:
537 	case CHIP_STONEY:
538 		asic_register_table = cz_allowed_read_registers;
539 		size = ARRAY_SIZE(cz_allowed_read_registers);
540 		break;
541 	default:
542 		return -EINVAL;
543 	}
544 
545 	if (asic_register_table) {
546 		for (i = 0; i < size; i++) {
547 			asic_register_entry = asic_register_table + i;
548 			if (reg_offset != asic_register_entry->reg_offset)
549 				continue;
550 			if (!asic_register_entry->untouched)
551 				*value = asic_register_entry->grbm_indexed ?
552 					vi_read_indexed_register(adev, se_num,
553 								 sh_num, reg_offset) :
554 					RREG32(reg_offset);
555 			return 0;
556 		}
557 	}
558 
559 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
560 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
561 			continue;
562 
563 		if (!vi_allowed_read_registers[i].untouched)
564 			*value = vi_allowed_read_registers[i].grbm_indexed ?
565 				vi_read_indexed_register(adev, se_num,
566 							 sh_num, reg_offset) :
567 				RREG32(reg_offset);
568 		return 0;
569 	}
570 	return -EINVAL;
571 }
572 
573 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
574 {
575 	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
576 		RREG32(mmGRBM_STATUS));
577 	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
578 		RREG32(mmGRBM_STATUS2));
579 	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
580 		RREG32(mmGRBM_STATUS_SE0));
581 	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
582 		RREG32(mmGRBM_STATUS_SE1));
583 	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
584 		RREG32(mmGRBM_STATUS_SE2));
585 	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
586 		RREG32(mmGRBM_STATUS_SE3));
587 	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
588 		RREG32(mmSRBM_STATUS));
589 	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
590 		RREG32(mmSRBM_STATUS2));
591 	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
592 		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
593 	if (adev->sdma.num_instances > 1) {
594 		dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
595 			RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
596 	}
597 	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
598 	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
599 		 RREG32(mmCP_STALLED_STAT1));
600 	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
601 		 RREG32(mmCP_STALLED_STAT2));
602 	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
603 		 RREG32(mmCP_STALLED_STAT3));
604 	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
605 		 RREG32(mmCP_CPF_BUSY_STAT));
606 	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
607 		 RREG32(mmCP_CPF_STALLED_STAT1));
608 	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
609 	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
610 	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
611 		 RREG32(mmCP_CPC_STALLED_STAT1));
612 	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
613 }
614 
615 /**
616  * vi_gpu_check_soft_reset - check which blocks are busy
617  *
618  * @adev: amdgpu_device pointer
619  *
620  * Check which blocks are busy and return the relevant reset
621  * mask to be used by vi_gpu_soft_reset().
622  * Returns a mask of the blocks to be reset.
623  */
624 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
625 {
626 	u32 reset_mask = 0;
627 	u32 tmp;
628 
629 	/* GRBM_STATUS */
630 	tmp = RREG32(mmGRBM_STATUS);
631 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
632 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
633 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
634 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
635 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
636 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
637 		reset_mask |= AMDGPU_RESET_GFX;
638 
639 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
640 		reset_mask |= AMDGPU_RESET_CP;
641 
642 	/* GRBM_STATUS2 */
643 	tmp = RREG32(mmGRBM_STATUS2);
644 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
645 		reset_mask |= AMDGPU_RESET_RLC;
646 
647 	if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
648 		   GRBM_STATUS2__CPC_BUSY_MASK |
649 		   GRBM_STATUS2__CPG_BUSY_MASK))
650 		reset_mask |= AMDGPU_RESET_CP;
651 
652 	/* SRBM_STATUS2 */
653 	tmp = RREG32(mmSRBM_STATUS2);
654 	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
655 		reset_mask |= AMDGPU_RESET_DMA;
656 
657 	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
658 		reset_mask |= AMDGPU_RESET_DMA1;
659 
660 	/* SRBM_STATUS */
661 	tmp = RREG32(mmSRBM_STATUS);
662 
663 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
664 		reset_mask |= AMDGPU_RESET_IH;
665 
666 	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
667 		reset_mask |= AMDGPU_RESET_SEM;
668 
669 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
670 		reset_mask |= AMDGPU_RESET_GRBM;
671 
672 	if (adev->asic_type != CHIP_TOPAZ) {
673 		if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
674 			   SRBM_STATUS__UVD_BUSY_MASK))
675 			reset_mask |= AMDGPU_RESET_UVD;
676 	}
677 
678 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
679 		reset_mask |= AMDGPU_RESET_VMC;
680 
681 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
682 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
683 		reset_mask |= AMDGPU_RESET_MC;
684 
685 	/* SDMA0_STATUS_REG */
686 	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
687 	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
688 		reset_mask |= AMDGPU_RESET_DMA;
689 
690 	/* SDMA1_STATUS_REG */
691 	if (adev->sdma.num_instances > 1) {
692 		tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
693 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
694 			reset_mask |= AMDGPU_RESET_DMA1;
695 	}
696 #if 0
697 	/* VCE_STATUS */
698 	if (adev->asic_type != CHIP_TOPAZ) {
699 		tmp = RREG32(mmVCE_STATUS);
700 		if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
701 			reset_mask |= AMDGPU_RESET_VCE;
702 		if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
703 			reset_mask |= AMDGPU_RESET_VCE1;
704 
705 	}
706 
707 	if (adev->asic_type != CHIP_TOPAZ) {
708 		if (amdgpu_display_is_display_hung(adev))
709 			reset_mask |= AMDGPU_RESET_DISPLAY;
710 	}
711 #endif
712 
713 	/* Skip MC reset as it's mostly likely not hung, just busy */
714 	if (reset_mask & AMDGPU_RESET_MC) {
715 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
716 		reset_mask &= ~AMDGPU_RESET_MC;
717 	}
718 
719 	return reset_mask;
720 }
721 
722 /**
723  * vi_gpu_soft_reset - soft reset GPU
724  *
725  * @adev: amdgpu_device pointer
726  * @reset_mask: mask of which blocks to reset
727  *
728  * Soft reset the blocks specified in @reset_mask.
729  */
730 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
731 {
732 	struct amdgpu_mode_mc_save save;
733 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
734 	u32 tmp;
735 
736 	if (reset_mask == 0)
737 		return;
738 
739 	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
740 
741 	vi_print_gpu_status_regs(adev);
742 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
743 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
744 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
745 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
746 
747 	/* disable CG/PG */
748 
749 	/* stop the rlc */
750 	//XXX
751 	//gfx_v8_0_rlc_stop(adev);
752 
753 	/* Disable GFX parsing/prefetching */
754 	tmp = RREG32(mmCP_ME_CNTL);
755 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
756 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
757 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
758 	WREG32(mmCP_ME_CNTL, tmp);
759 
760 	/* Disable MEC parsing/prefetching */
761 	tmp = RREG32(mmCP_MEC_CNTL);
762 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
763 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
764 	WREG32(mmCP_MEC_CNTL, tmp);
765 
766 	if (reset_mask & AMDGPU_RESET_DMA) {
767 		/* sdma0 */
768 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
769 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
770 		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
771 	}
772 	if (reset_mask & AMDGPU_RESET_DMA1) {
773 		/* sdma1 */
774 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
775 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
776 		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
777 	}
778 
779 	gmc_v8_0_mc_stop(adev, &save);
780 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
781 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
782 	}
783 
784 	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
785 		grbm_soft_reset =
786 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
787 		grbm_soft_reset =
788 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
789 	}
790 
791 	if (reset_mask & AMDGPU_RESET_CP) {
792 		grbm_soft_reset =
793 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
794 		srbm_soft_reset =
795 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
796 	}
797 
798 	if (reset_mask & AMDGPU_RESET_DMA)
799 		srbm_soft_reset =
800 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
801 
802 	if (reset_mask & AMDGPU_RESET_DMA1)
803 		srbm_soft_reset =
804 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
805 
806 	if (reset_mask & AMDGPU_RESET_DISPLAY)
807 		srbm_soft_reset =
808 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
809 
810 	if (reset_mask & AMDGPU_RESET_RLC)
811 		grbm_soft_reset =
812 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
813 
814 	if (reset_mask & AMDGPU_RESET_SEM)
815 		srbm_soft_reset =
816 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
817 
818 	if (reset_mask & AMDGPU_RESET_IH)
819 		srbm_soft_reset =
820 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
821 
822 	if (reset_mask & AMDGPU_RESET_GRBM)
823 		srbm_soft_reset =
824 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
825 
826 	if (reset_mask & AMDGPU_RESET_VMC)
827 		srbm_soft_reset =
828 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
829 
830 	if (reset_mask & AMDGPU_RESET_UVD)
831 		srbm_soft_reset =
832 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
833 
834 	if (reset_mask & AMDGPU_RESET_VCE)
835 		srbm_soft_reset =
836 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
837 
838 	if (reset_mask & AMDGPU_RESET_VCE)
839 		srbm_soft_reset =
840 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
841 
842 	if (!(adev->flags & AMD_IS_APU)) {
843 		if (reset_mask & AMDGPU_RESET_MC)
844 		srbm_soft_reset =
845 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
846 	}
847 
848 	if (grbm_soft_reset) {
849 		tmp = RREG32(mmGRBM_SOFT_RESET);
850 		tmp |= grbm_soft_reset;
851 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
852 		WREG32(mmGRBM_SOFT_RESET, tmp);
853 		tmp = RREG32(mmGRBM_SOFT_RESET);
854 
855 		udelay(50);
856 
857 		tmp &= ~grbm_soft_reset;
858 		WREG32(mmGRBM_SOFT_RESET, tmp);
859 		tmp = RREG32(mmGRBM_SOFT_RESET);
860 	}
861 
862 	if (srbm_soft_reset) {
863 		tmp = RREG32(mmSRBM_SOFT_RESET);
864 		tmp |= srbm_soft_reset;
865 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
866 		WREG32(mmSRBM_SOFT_RESET, tmp);
867 		tmp = RREG32(mmSRBM_SOFT_RESET);
868 
869 		udelay(50);
870 
871 		tmp &= ~srbm_soft_reset;
872 		WREG32(mmSRBM_SOFT_RESET, tmp);
873 		tmp = RREG32(mmSRBM_SOFT_RESET);
874 	}
875 
876 	/* Wait a little for things to settle down */
877 	udelay(50);
878 
879 	gmc_v8_0_mc_resume(adev, &save);
880 	udelay(50);
881 
882 	vi_print_gpu_status_regs(adev);
883 }
884 
885 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
886 {
887 	struct amdgpu_mode_mc_save save;
888 	u32 tmp, i;
889 
890 	dev_info(adev->dev, "GPU pci config reset\n");
891 
892 	/* disable dpm? */
893 
894 	/* disable cg/pg */
895 
896 	/* Disable GFX parsing/prefetching */
897 	tmp = RREG32(mmCP_ME_CNTL);
898 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
899 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
900 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
901 	WREG32(mmCP_ME_CNTL, tmp);
902 
903 	/* Disable MEC parsing/prefetching */
904 	tmp = RREG32(mmCP_MEC_CNTL);
905 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
906 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
907 	WREG32(mmCP_MEC_CNTL, tmp);
908 
909 	/* Disable GFX parsing/prefetching */
910 	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
911 		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
912 
913 	/* Disable MEC parsing/prefetching */
914 	WREG32(mmCP_MEC_CNTL,
915 			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
916 
917 	/* sdma0 */
918 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
919 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
920 	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
921 
922 	/* sdma1 */
923 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
924 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
925 	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
926 
927 	/* XXX other engines? */
928 
929 	/* halt the rlc, disable cp internal ints */
930 	//XXX
931 	//gfx_v8_0_rlc_stop(adev);
932 
933 	udelay(50);
934 
935 	/* disable mem access */
936 	gmc_v8_0_mc_stop(adev, &save);
937 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
938 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
939 	}
940 
941 	/* disable BM */
942 	pci_clear_master(adev->pdev);
943 	/* reset */
944 	amdgpu_pci_config_reset(adev);
945 
946 	udelay(100);
947 
948 	/* wait for asic to come out of reset */
949 	for (i = 0; i < adev->usec_timeout; i++) {
950 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
951 			break;
952 		udelay(1);
953 	}
954 
955 }
956 
957 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
958 {
959 	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
960 
961 	if (hung)
962 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
963 	else
964 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
965 
966 	WREG32(mmBIOS_SCRATCH_3, tmp);
967 }
968 
969 /**
970  * vi_asic_reset - soft reset GPU
971  *
972  * @adev: amdgpu_device pointer
973  *
974  * Look up which blocks are hung and attempt
975  * to reset them.
976  * Returns 0 for success.
977  */
978 static int vi_asic_reset(struct amdgpu_device *adev)
979 {
980 	u32 reset_mask;
981 
982 	reset_mask = vi_gpu_check_soft_reset(adev);
983 
984 	if (reset_mask)
985 		vi_set_bios_scratch_engine_hung(adev, true);
986 
987 	/* try soft reset */
988 	vi_gpu_soft_reset(adev, reset_mask);
989 
990 	reset_mask = vi_gpu_check_soft_reset(adev);
991 
992 	/* try pci config reset */
993 	if (reset_mask && amdgpu_hard_reset)
994 		vi_gpu_pci_config_reset(adev);
995 
996 	reset_mask = vi_gpu_check_soft_reset(adev);
997 
998 	if (!reset_mask)
999 		vi_set_bios_scratch_engine_hung(adev, false);
1000 
1001 	return 0;
1002 }
1003 
1004 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
1005 			u32 cntl_reg, u32 status_reg)
1006 {
1007 	int r, i;
1008 	struct atom_clock_dividers dividers;
1009 	uint32_t tmp;
1010 
1011 	r = amdgpu_atombios_get_clock_dividers(adev,
1012 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1013 					       clock, false, &dividers);
1014 	if (r)
1015 		return r;
1016 
1017 	tmp = RREG32_SMC(cntl_reg);
1018 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1019 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1020 	tmp |= dividers.post_divider;
1021 	WREG32_SMC(cntl_reg, tmp);
1022 
1023 	for (i = 0; i < 100; i++) {
1024 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1025 			break;
1026 		mdelay(10);
1027 	}
1028 	if (i == 100)
1029 		return -ETIMEDOUT;
1030 
1031 	return 0;
1032 }
1033 
1034 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1035 {
1036 	int r;
1037 
1038 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1039 	if (r)
1040 		return r;
1041 
1042 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1043 
1044 	return 0;
1045 }
1046 
1047 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1048 {
1049 	/* todo */
1050 
1051 	return 0;
1052 }
1053 
1054 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1055 {
1056 	if (pci_is_root_bus(adev->pdev->bus))
1057 		return;
1058 
1059 	if (amdgpu_pcie_gen2 == 0)
1060 		return;
1061 
1062 	if (adev->flags & AMD_IS_APU)
1063 		return;
1064 
1065 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1066 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1067 		return;
1068 
1069 	/* todo */
1070 }
1071 
1072 static void vi_program_aspm(struct amdgpu_device *adev)
1073 {
1074 
1075 	if (amdgpu_aspm == 0)
1076 		return;
1077 
1078 	/* todo */
1079 }
1080 
1081 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1082 					bool enable)
1083 {
1084 	u32 tmp;
1085 
1086 	/* not necessary on CZ */
1087 	if (adev->flags & AMD_IS_APU)
1088 		return;
1089 
1090 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1091 	if (enable)
1092 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1093 	else
1094 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1095 
1096 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1097 }
1098 
1099 /* topaz has no DCE, UVD, VCE */
1100 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1101 {
1102 	/* ORDER MATTERS! */
1103 	{
1104 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1105 		.major = 2,
1106 		.minor = 0,
1107 		.rev = 0,
1108 		.funcs = &vi_common_ip_funcs,
1109 	},
1110 	{
1111 		.type = AMD_IP_BLOCK_TYPE_GMC,
1112 		.major = 8,
1113 		.minor = 0,
1114 		.rev = 0,
1115 		.funcs = &gmc_v8_0_ip_funcs,
1116 	},
1117 	{
1118 		.type = AMD_IP_BLOCK_TYPE_IH,
1119 		.major = 2,
1120 		.minor = 4,
1121 		.rev = 0,
1122 		.funcs = &iceland_ih_ip_funcs,
1123 	},
1124 	{
1125 		.type = AMD_IP_BLOCK_TYPE_SMC,
1126 		.major = 7,
1127 		.minor = 1,
1128 		.rev = 0,
1129 		.funcs = &amdgpu_pp_ip_funcs,
1130 	},
1131 	{
1132 		.type = AMD_IP_BLOCK_TYPE_GFX,
1133 		.major = 8,
1134 		.minor = 0,
1135 		.rev = 0,
1136 		.funcs = &gfx_v8_0_ip_funcs,
1137 	},
1138 	{
1139 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1140 		.major = 2,
1141 		.minor = 4,
1142 		.rev = 0,
1143 		.funcs = &sdma_v2_4_ip_funcs,
1144 	},
1145 };
1146 
1147 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1148 {
1149 	/* ORDER MATTERS! */
1150 	{
1151 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1152 		.major = 2,
1153 		.minor = 0,
1154 		.rev = 0,
1155 		.funcs = &vi_common_ip_funcs,
1156 	},
1157 	{
1158 		.type = AMD_IP_BLOCK_TYPE_GMC,
1159 		.major = 8,
1160 		.minor = 0,
1161 		.rev = 0,
1162 		.funcs = &gmc_v8_0_ip_funcs,
1163 	},
1164 	{
1165 		.type = AMD_IP_BLOCK_TYPE_IH,
1166 		.major = 3,
1167 		.minor = 0,
1168 		.rev = 0,
1169 		.funcs = &tonga_ih_ip_funcs,
1170 	},
1171 	{
1172 		.type = AMD_IP_BLOCK_TYPE_SMC,
1173 		.major = 7,
1174 		.minor = 1,
1175 		.rev = 0,
1176 		.funcs = &amdgpu_pp_ip_funcs,
1177 	},
1178 	{
1179 		.type = AMD_IP_BLOCK_TYPE_DCE,
1180 		.major = 10,
1181 		.minor = 0,
1182 		.rev = 0,
1183 		.funcs = &dce_v10_0_ip_funcs,
1184 	},
1185 	{
1186 		.type = AMD_IP_BLOCK_TYPE_GFX,
1187 		.major = 8,
1188 		.minor = 0,
1189 		.rev = 0,
1190 		.funcs = &gfx_v8_0_ip_funcs,
1191 	},
1192 	{
1193 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1194 		.major = 3,
1195 		.minor = 0,
1196 		.rev = 0,
1197 		.funcs = &sdma_v3_0_ip_funcs,
1198 	},
1199 	{
1200 		.type = AMD_IP_BLOCK_TYPE_UVD,
1201 		.major = 5,
1202 		.minor = 0,
1203 		.rev = 0,
1204 		.funcs = &uvd_v5_0_ip_funcs,
1205 	},
1206 	{
1207 		.type = AMD_IP_BLOCK_TYPE_VCE,
1208 		.major = 3,
1209 		.minor = 0,
1210 		.rev = 0,
1211 		.funcs = &vce_v3_0_ip_funcs,
1212 	},
1213 };
1214 
1215 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1216 {
1217 	/* ORDER MATTERS! */
1218 	{
1219 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1220 		.major = 2,
1221 		.minor = 0,
1222 		.rev = 0,
1223 		.funcs = &vi_common_ip_funcs,
1224 	},
1225 	{
1226 		.type = AMD_IP_BLOCK_TYPE_GMC,
1227 		.major = 8,
1228 		.minor = 5,
1229 		.rev = 0,
1230 		.funcs = &gmc_v8_0_ip_funcs,
1231 	},
1232 	{
1233 		.type = AMD_IP_BLOCK_TYPE_IH,
1234 		.major = 3,
1235 		.minor = 0,
1236 		.rev = 0,
1237 		.funcs = &tonga_ih_ip_funcs,
1238 	},
1239 	{
1240 		.type = AMD_IP_BLOCK_TYPE_SMC,
1241 		.major = 7,
1242 		.minor = 1,
1243 		.rev = 0,
1244 		.funcs = &amdgpu_pp_ip_funcs,
1245 	},
1246 	{
1247 		.type = AMD_IP_BLOCK_TYPE_DCE,
1248 		.major = 10,
1249 		.minor = 1,
1250 		.rev = 0,
1251 		.funcs = &dce_v10_0_ip_funcs,
1252 	},
1253 	{
1254 		.type = AMD_IP_BLOCK_TYPE_GFX,
1255 		.major = 8,
1256 		.minor = 0,
1257 		.rev = 0,
1258 		.funcs = &gfx_v8_0_ip_funcs,
1259 	},
1260 	{
1261 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1262 		.major = 3,
1263 		.minor = 0,
1264 		.rev = 0,
1265 		.funcs = &sdma_v3_0_ip_funcs,
1266 	},
1267 	{
1268 		.type = AMD_IP_BLOCK_TYPE_UVD,
1269 		.major = 6,
1270 		.minor = 0,
1271 		.rev = 0,
1272 		.funcs = &uvd_v6_0_ip_funcs,
1273 	},
1274 	{
1275 		.type = AMD_IP_BLOCK_TYPE_VCE,
1276 		.major = 3,
1277 		.minor = 0,
1278 		.rev = 0,
1279 		.funcs = &vce_v3_0_ip_funcs,
1280 	},
1281 };
1282 
1283 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1284 {
1285 	/* ORDER MATTERS! */
1286 	{
1287 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1288 		.major = 2,
1289 		.minor = 0,
1290 		.rev = 0,
1291 		.funcs = &vi_common_ip_funcs,
1292 	},
1293 	{
1294 		.type = AMD_IP_BLOCK_TYPE_GMC,
1295 		.major = 8,
1296 		.minor = 0,
1297 		.rev = 0,
1298 		.funcs = &gmc_v8_0_ip_funcs,
1299 	},
1300 	{
1301 		.type = AMD_IP_BLOCK_TYPE_IH,
1302 		.major = 3,
1303 		.minor = 0,
1304 		.rev = 0,
1305 		.funcs = &cz_ih_ip_funcs,
1306 	},
1307 	{
1308 		.type = AMD_IP_BLOCK_TYPE_SMC,
1309 		.major = 8,
1310 		.minor = 0,
1311 		.rev = 0,
1312 		.funcs = &amdgpu_pp_ip_funcs
1313 	},
1314 	{
1315 		.type = AMD_IP_BLOCK_TYPE_DCE,
1316 		.major = 11,
1317 		.minor = 0,
1318 		.rev = 0,
1319 		.funcs = &dce_v11_0_ip_funcs,
1320 	},
1321 	{
1322 		.type = AMD_IP_BLOCK_TYPE_GFX,
1323 		.major = 8,
1324 		.minor = 0,
1325 		.rev = 0,
1326 		.funcs = &gfx_v8_0_ip_funcs,
1327 	},
1328 	{
1329 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1330 		.major = 3,
1331 		.minor = 0,
1332 		.rev = 0,
1333 		.funcs = &sdma_v3_0_ip_funcs,
1334 	},
1335 	{
1336 		.type = AMD_IP_BLOCK_TYPE_UVD,
1337 		.major = 6,
1338 		.minor = 0,
1339 		.rev = 0,
1340 		.funcs = &uvd_v6_0_ip_funcs,
1341 	},
1342 	{
1343 		.type = AMD_IP_BLOCK_TYPE_VCE,
1344 		.major = 3,
1345 		.minor = 0,
1346 		.rev = 0,
1347 		.funcs = &vce_v3_0_ip_funcs,
1348 	},
1349 };
1350 
1351 int vi_set_ip_blocks(struct amdgpu_device *adev)
1352 {
1353 	switch (adev->asic_type) {
1354 	case CHIP_TOPAZ:
1355 		adev->ip_blocks = topaz_ip_blocks;
1356 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1357 		break;
1358 	case CHIP_FIJI:
1359 		adev->ip_blocks = fiji_ip_blocks;
1360 		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1361 		break;
1362 	case CHIP_TONGA:
1363 		adev->ip_blocks = tonga_ip_blocks;
1364 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1365 		break;
1366 	case CHIP_CARRIZO:
1367 	case CHIP_STONEY:
1368 		adev->ip_blocks = cz_ip_blocks;
1369 		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1370 		break;
1371 	default:
1372 		/* FIXME: not supported yet */
1373 		return -EINVAL;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1380 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1381 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1382 
1383 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1384 {
1385 	if (adev->flags & AMD_IS_APU)
1386 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1387 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
1388 	else
1389 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1390 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1391 }
1392 
1393 static const struct amdgpu_asic_funcs vi_asic_funcs =
1394 {
1395 	.read_disabled_bios = &vi_read_disabled_bios,
1396 	.read_bios_from_rom = &vi_read_bios_from_rom,
1397 	.read_register = &vi_read_register,
1398 	.reset = &vi_asic_reset,
1399 	.set_vga_state = &vi_vga_set_state,
1400 	.get_xclk = &vi_get_xclk,
1401 	.set_uvd_clocks = &vi_set_uvd_clocks,
1402 	.set_vce_clocks = &vi_set_vce_clocks,
1403 	.get_cu_info = &gfx_v8_0_get_cu_info,
1404 	/* these should be moved to their own ip modules */
1405 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1406 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1407 };
1408 
1409 static int vi_common_early_init(void *handle)
1410 {
1411 	bool smc_enabled = false;
1412 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1413 
1414 	if (adev->flags & AMD_IS_APU) {
1415 		adev->smc_rreg = &cz_smc_rreg;
1416 		adev->smc_wreg = &cz_smc_wreg;
1417 	} else {
1418 		adev->smc_rreg = &vi_smc_rreg;
1419 		adev->smc_wreg = &vi_smc_wreg;
1420 	}
1421 	adev->pcie_rreg = &vi_pcie_rreg;
1422 	adev->pcie_wreg = &vi_pcie_wreg;
1423 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1424 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1425 	adev->didt_rreg = &vi_didt_rreg;
1426 	adev->didt_wreg = &vi_didt_wreg;
1427 
1428 	adev->asic_funcs = &vi_asic_funcs;
1429 
1430 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1431 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1432 		smc_enabled = true;
1433 
1434 	adev->rev_id = vi_get_rev_id(adev);
1435 	adev->external_rev_id = 0xFF;
1436 	switch (adev->asic_type) {
1437 	case CHIP_TOPAZ:
1438 		adev->has_uvd = false;
1439 		adev->cg_flags = 0;
1440 		adev->pg_flags = 0;
1441 		adev->external_rev_id = 0x1;
1442 		break;
1443 	case CHIP_FIJI:
1444 		adev->has_uvd = true;
1445 		adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
1446 				AMDGPU_CG_SUPPORT_VCE_MGCG;
1447 		adev->pg_flags = 0;
1448 		adev->external_rev_id = adev->rev_id + 0x3c;
1449 		break;
1450 	case CHIP_TONGA:
1451 		adev->has_uvd = true;
1452 		adev->cg_flags = 0;
1453 		adev->pg_flags = 0;
1454 		adev->external_rev_id = adev->rev_id + 0x14;
1455 		break;
1456 	case CHIP_CARRIZO:
1457 	case CHIP_STONEY:
1458 		adev->has_uvd = true;
1459 		adev->cg_flags = 0;
1460 		/* Disable UVD pg */
1461 		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1462 		adev->external_rev_id = adev->rev_id + 0x1;
1463 		break;
1464 	default:
1465 		/* FIXME: not supported yet */
1466 		return -EINVAL;
1467 	}
1468 
1469 	if (amdgpu_smc_load_fw && smc_enabled)
1470 		adev->firmware.smu_load = true;
1471 
1472 	amdgpu_get_pcie_info(adev);
1473 
1474 	return 0;
1475 }
1476 
1477 static int vi_common_sw_init(void *handle)
1478 {
1479 	return 0;
1480 }
1481 
1482 static int vi_common_sw_fini(void *handle)
1483 {
1484 	return 0;
1485 }
1486 
1487 static int vi_common_hw_init(void *handle)
1488 {
1489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 
1491 	/* move the golden regs per IP block */
1492 	vi_init_golden_registers(adev);
1493 	/* enable pcie gen2/3 link */
1494 	vi_pcie_gen3_enable(adev);
1495 	/* enable aspm */
1496 	vi_program_aspm(adev);
1497 	/* enable the doorbell aperture */
1498 	vi_enable_doorbell_aperture(adev, true);
1499 
1500 	return 0;
1501 }
1502 
1503 static int vi_common_hw_fini(void *handle)
1504 {
1505 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506 
1507 	/* enable the doorbell aperture */
1508 	vi_enable_doorbell_aperture(adev, false);
1509 
1510 	return 0;
1511 }
1512 
1513 static int vi_common_suspend(void *handle)
1514 {
1515 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 
1517 	return vi_common_hw_fini(adev);
1518 }
1519 
1520 static int vi_common_resume(void *handle)
1521 {
1522 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523 
1524 	return vi_common_hw_init(adev);
1525 }
1526 
1527 static bool vi_common_is_idle(void *handle)
1528 {
1529 	return true;
1530 }
1531 
1532 static int vi_common_wait_for_idle(void *handle)
1533 {
1534 	return 0;
1535 }
1536 
1537 static void vi_common_print_status(void *handle)
1538 {
1539 	return;
1540 }
1541 
1542 static int vi_common_soft_reset(void *handle)
1543 {
1544 	return 0;
1545 }
1546 
1547 static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1548 		bool enable)
1549 {
1550 	uint32_t temp, data;
1551 
1552 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1553 
1554 	if (enable)
1555 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1556 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1557 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1558 	else
1559 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1560 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1561 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1562 
1563 	if (temp != data)
1564 		WREG32_PCIE(ixPCIE_CNTL2, data);
1565 }
1566 
1567 static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1568 		bool enable)
1569 {
1570 	uint32_t temp, data;
1571 
1572 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1573 
1574 	if (enable)
1575 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1576 	else
1577 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1578 
1579 	if (temp != data)
1580 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1581 }
1582 
1583 static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
1584 		bool enable)
1585 {
1586 	uint32_t temp, data;
1587 
1588 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1589 
1590 	if (enable)
1591 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1592 	else
1593 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1594 
1595 	if (temp != data)
1596 		WREG32(mmHDP_MEM_POWER_LS, data);
1597 }
1598 
1599 static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1600 		bool enable)
1601 {
1602 	uint32_t temp, data;
1603 
1604 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1605 
1606 	if (enable)
1607 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1608 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1609 	else
1610 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1611 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1612 
1613 	if (temp != data)
1614 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1615 }
1616 
1617 static int vi_common_set_clockgating_state(void *handle,
1618 					    enum amd_clockgating_state state)
1619 {
1620 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621 
1622 	switch (adev->asic_type) {
1623 	case CHIP_FIJI:
1624 		fiji_update_bif_medium_grain_light_sleep(adev,
1625 				state == AMD_CG_STATE_GATE ? true : false);
1626 		fiji_update_hdp_medium_grain_clock_gating(adev,
1627 				state == AMD_CG_STATE_GATE ? true : false);
1628 		fiji_update_hdp_light_sleep(adev,
1629 				state == AMD_CG_STATE_GATE ? true : false);
1630 		fiji_update_rom_medium_grain_clock_gating(adev,
1631 				state == AMD_CG_STATE_GATE ? true : false);
1632 		break;
1633 	default:
1634 		break;
1635 	}
1636 	return 0;
1637 }
1638 
1639 static int vi_common_set_powergating_state(void *handle,
1640 					    enum amd_powergating_state state)
1641 {
1642 	return 0;
1643 }
1644 
1645 const struct amd_ip_funcs vi_common_ip_funcs = {
1646 	.early_init = vi_common_early_init,
1647 	.late_init = NULL,
1648 	.sw_init = vi_common_sw_init,
1649 	.sw_fini = vi_common_sw_fini,
1650 	.hw_init = vi_common_hw_init,
1651 	.hw_fini = vi_common_hw_fini,
1652 	.suspend = vi_common_suspend,
1653 	.resume = vi_common_resume,
1654 	.is_idle = vi_common_is_idle,
1655 	.wait_for_idle = vi_common_wait_for_idle,
1656 	.soft_reset = vi_common_soft_reset,
1657 	.print_status = vi_common_print_status,
1658 	.set_clockgating_state = vi_common_set_clockgating_state,
1659 	.set_powergating_state = vi_common_set_powergating_state,
1660 };
1661 
1662