xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/soc15.c (revision 165f2d28)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "atom.h"
36 #include "amd_pcie.h"
37 
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
52 
53 #include "soc15.h"
54 #include "soc15_common.h"
55 #include "gfx_v9_0.h"
56 #include "gmc_v9_0.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
59 #include "df_v1_7.h"
60 #include "df_v3_6.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "sdma_v4_0.h"
66 #include "uvd_v7_0.h"
67 #include "vce_v4_0.h"
68 #include "vcn_v1_0.h"
69 #include "vcn_v2_0.h"
70 #include "jpeg_v2_0.h"
71 #include "vcn_v2_5.h"
72 #include "jpeg_v2_5.h"
73 #include "dce_virtual.h"
74 #include "mxgpu_ai.h"
75 #include "amdgpu_smu.h"
76 #include "amdgpu_ras.h"
77 #include "amdgpu_xgmi.h"
78 #include <uapi/linux/kfd_ioctl.h>
79 
80 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
81 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
82 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
84 
85 /* for Vega20 register name change */
86 #define mmHDP_MEM_POWER_CTRL	0x00d4
87 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
88 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
89 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
91 #define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
92 
93 /* for Vega20/arcturus regiter offset change */
94 #define	mmROM_INDEX_VG20				0x00e4
95 #define	mmROM_INDEX_VG20_BASE_IDX			0
96 #define	mmROM_DATA_VG20					0x00e5
97 #define	mmROM_DATA_VG20_BASE_IDX			0
98 
99 /*
100  * Indirect registers accessor
101  */
102 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
103 {
104 	unsigned long flags, address, data;
105 	u32 r;
106 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
107 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
108 
109 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
110 	WREG32(address, reg);
111 	(void)RREG32(address);
112 	r = RREG32(data);
113 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
114 	return r;
115 }
116 
117 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
118 {
119 	unsigned long flags, address, data;
120 
121 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
122 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
123 
124 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
125 	WREG32(address, reg);
126 	(void)RREG32(address);
127 	WREG32(data, v);
128 	(void)RREG32(data);
129 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
130 }
131 
132 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
133 {
134 	unsigned long flags, address, data;
135 	u64 r;
136 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
137 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
138 
139 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
140 	/* read low 32 bit */
141 	WREG32(address, reg);
142 	(void)RREG32(address);
143 	r = RREG32(data);
144 
145 	/* read high 32 bit*/
146 	WREG32(address, reg + 4);
147 	(void)RREG32(address);
148 	r |= ((u64)RREG32(data) << 32);
149 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
150 	return r;
151 }
152 
153 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
154 {
155 	unsigned long flags, address, data;
156 
157 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
158 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
159 
160 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
161 	/* write low 32 bit */
162 	WREG32(address, reg);
163 	(void)RREG32(address);
164 	WREG32(data, (u32)(v & 0xffffffffULL));
165 	(void)RREG32(data);
166 
167 	/* write high 32 bit */
168 	WREG32(address, reg + 4);
169 	(void)RREG32(address);
170 	WREG32(data, (u32)(v >> 32));
171 	(void)RREG32(data);
172 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
173 }
174 
175 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
176 {
177 	unsigned long flags, address, data;
178 	u32 r;
179 
180 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
181 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
182 
183 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
184 	WREG32(address, ((reg) & 0x1ff));
185 	r = RREG32(data);
186 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
187 	return r;
188 }
189 
190 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
191 {
192 	unsigned long flags, address, data;
193 
194 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
195 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
196 
197 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
198 	WREG32(address, ((reg) & 0x1ff));
199 	WREG32(data, (v));
200 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
201 }
202 
203 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
204 {
205 	unsigned long flags, address, data;
206 	u32 r;
207 
208 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
209 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
210 
211 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
212 	WREG32(address, (reg));
213 	r = RREG32(data);
214 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
215 	return r;
216 }
217 
218 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
219 {
220 	unsigned long flags, address, data;
221 
222 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
223 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
224 
225 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
226 	WREG32(address, (reg));
227 	WREG32(data, (v));
228 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
229 }
230 
231 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
232 {
233 	unsigned long flags;
234 	u32 r;
235 
236 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
237 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
238 	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
239 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
240 	return r;
241 }
242 
243 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
244 {
245 	unsigned long flags;
246 
247 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
248 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
249 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
250 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
251 }
252 
253 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
254 {
255 	unsigned long flags;
256 	u32 r;
257 
258 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
259 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
260 	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
261 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
262 	return r;
263 }
264 
265 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
266 {
267 	unsigned long flags;
268 
269 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
270 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
271 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
272 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
273 }
274 
275 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
276 {
277 	return adev->nbio.funcs->get_memsize(adev);
278 }
279 
280 static u32 soc15_get_xclk(struct amdgpu_device *adev)
281 {
282 	u32 reference_clock = adev->clock.spll.reference_freq;
283 
284 	if (adev->asic_type == CHIP_RAVEN)
285 		return reference_clock / 4;
286 
287 	return reference_clock;
288 }
289 
290 
291 void soc15_grbm_select(struct amdgpu_device *adev,
292 		     u32 me, u32 pipe, u32 queue, u32 vmid)
293 {
294 	u32 grbm_gfx_cntl = 0;
295 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
296 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
297 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
298 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
299 
300 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
301 }
302 
303 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
304 {
305 	/* todo */
306 }
307 
308 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
309 {
310 	/* todo */
311 	return false;
312 }
313 
314 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
315 				     u8 *bios, u32 length_bytes)
316 {
317 	u32 *dw_ptr;
318 	u32 i, length_dw;
319 	uint32_t rom_index_offset;
320 	uint32_t rom_data_offset;
321 
322 	if (bios == NULL)
323 		return false;
324 	if (length_bytes == 0)
325 		return false;
326 	/* APU vbios image is part of sbios image */
327 	if (adev->flags & AMD_IS_APU)
328 		return false;
329 
330 	dw_ptr = (u32 *)bios;
331 	length_dw = ALIGN(length_bytes, 4) / 4;
332 
333 	switch (adev->asic_type) {
334 	case CHIP_VEGA20:
335 	case CHIP_ARCTURUS:
336 		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
337 		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
338 		break;
339 	default:
340 		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
341 		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
342 		break;
343 	}
344 
345 	/* set rom index to 0 */
346 	WREG32(rom_index_offset, 0);
347 	/* read out the rom data */
348 	for (i = 0; i < length_dw; i++)
349 		dw_ptr[i] = RREG32(rom_data_offset);
350 
351 	return true;
352 }
353 
354 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
355 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
356 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
357 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
358 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
359 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
360 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
361 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
362 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
363 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
364 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
365 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
366 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
367 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
368 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
369 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
370 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
371 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
372 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
373 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
374 	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
375 };
376 
377 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
378 					 u32 sh_num, u32 reg_offset)
379 {
380 	uint32_t val;
381 
382 	mutex_lock(&adev->grbm_idx_mutex);
383 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
384 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
385 
386 	val = RREG32(reg_offset);
387 
388 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
389 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
390 	mutex_unlock(&adev->grbm_idx_mutex);
391 	return val;
392 }
393 
394 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
395 					 bool indexed, u32 se_num,
396 					 u32 sh_num, u32 reg_offset)
397 {
398 	if (indexed) {
399 		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
400 	} else {
401 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
402 			return adev->gfx.config.gb_addr_config;
403 		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
404 			return adev->gfx.config.db_debug2;
405 		return RREG32(reg_offset);
406 	}
407 }
408 
409 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
410 			    u32 sh_num, u32 reg_offset, u32 *value)
411 {
412 	uint32_t i;
413 	struct soc15_allowed_register_entry  *en;
414 
415 	*value = 0;
416 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
417 		en = &soc15_allowed_read_registers[i];
418 		if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
419 					+ en->reg_offset))
420 			continue;
421 
422 		*value = soc15_get_register_value(adev,
423 						  soc15_allowed_read_registers[i].grbm_indexed,
424 						  se_num, sh_num, reg_offset);
425 		return 0;
426 	}
427 	return -EINVAL;
428 }
429 
430 
431 /**
432  * soc15_program_register_sequence - program an array of registers.
433  *
434  * @adev: amdgpu_device pointer
435  * @regs: pointer to the register array
436  * @array_size: size of the register array
437  *
438  * Programs an array or registers with and and or masks.
439  * This is a helper for setting golden registers.
440  */
441 
442 void soc15_program_register_sequence(struct amdgpu_device *adev,
443 					     const struct soc15_reg_golden *regs,
444 					     const u32 array_size)
445 {
446 	const struct soc15_reg_golden *entry;
447 	u32 tmp, reg;
448 	int i;
449 
450 	for (i = 0; i < array_size; ++i) {
451 		entry = &regs[i];
452 		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
453 
454 		if (entry->and_mask == 0xffffffff) {
455 			tmp = entry->or_mask;
456 		} else {
457 			tmp = RREG32(reg);
458 			tmp &= ~(entry->and_mask);
459 			tmp |= (entry->or_mask & entry->and_mask);
460 		}
461 
462 		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
463 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
464 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
465 			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
466 			WREG32_RLC(reg, tmp);
467 		else
468 			WREG32(reg, tmp);
469 
470 	}
471 
472 }
473 
474 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
475 {
476 	u32 i;
477 	int ret = 0;
478 
479 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
480 
481 	dev_info(adev->dev, "GPU mode1 reset\n");
482 
483 	/* disable BM */
484 	pci_clear_master(adev->pdev);
485 
486 	pci_save_state(adev->pdev);
487 
488 	ret = psp_gpu_reset(adev);
489 	if (ret)
490 		dev_err(adev->dev, "GPU mode1 reset failed\n");
491 
492 	pci_restore_state(adev->pdev);
493 
494 	/* wait for asic to come out of reset */
495 	for (i = 0; i < adev->usec_timeout; i++) {
496 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
497 
498 		if (memsize != 0xffffffff)
499 			break;
500 		udelay(1);
501 	}
502 
503 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
504 
505 	return ret;
506 }
507 
508 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
509 {
510 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
511 	int ret = 0;
512 
513 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
514 	if (ras && ras->supported)
515 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
516 
517 	ret = amdgpu_dpm_baco_reset(adev);
518 	if (ret)
519 		return ret;
520 
521 	/* re-enable doorbell interrupt after BACO exit */
522 	if (ras && ras->supported)
523 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
524 
525 	return 0;
526 }
527 
528 static enum amd_reset_method
529 soc15_asic_reset_method(struct amdgpu_device *adev)
530 {
531 	bool baco_reset = false;
532 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
533 
534 	switch (adev->asic_type) {
535 	case CHIP_RAVEN:
536 	case CHIP_RENOIR:
537 		return AMD_RESET_METHOD_MODE2;
538 	case CHIP_VEGA10:
539 	case CHIP_VEGA12:
540 	case CHIP_ARCTURUS:
541 		baco_reset = amdgpu_dpm_is_baco_supported(adev);
542 		break;
543 	case CHIP_VEGA20:
544 		if (adev->psp.sos_fw_version >= 0x80067)
545 			baco_reset = amdgpu_dpm_is_baco_supported(adev);
546 
547 		/*
548 		 * 1. PMFW version > 0x284300: all cases use baco
549 		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
550 		 */
551 		if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
552 			baco_reset = false;
553 		break;
554 	default:
555 		break;
556 	}
557 
558 	if (baco_reset)
559 		return AMD_RESET_METHOD_BACO;
560 	else
561 		return AMD_RESET_METHOD_MODE1;
562 }
563 
564 static int soc15_asic_reset(struct amdgpu_device *adev)
565 {
566 	/* original raven doesn't have full asic reset */
567 	if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
568 		return 0;
569 
570 	switch (soc15_asic_reset_method(adev)) {
571 		case AMD_RESET_METHOD_BACO:
572 			return soc15_asic_baco_reset(adev);
573 		case AMD_RESET_METHOD_MODE2:
574 			return amdgpu_dpm_mode2_reset(adev);
575 		default:
576 			return soc15_asic_mode1_reset(adev);
577 	}
578 }
579 
580 static bool soc15_supports_baco(struct amdgpu_device *adev)
581 {
582 	switch (adev->asic_type) {
583 	case CHIP_VEGA10:
584 	case CHIP_VEGA12:
585 	case CHIP_ARCTURUS:
586 		return amdgpu_dpm_is_baco_supported(adev);
587 	case CHIP_VEGA20:
588 		if (adev->psp.sos_fw_version >= 0x80067)
589 			return amdgpu_dpm_is_baco_supported(adev);
590 		return false;
591 	default:
592 		return false;
593 	}
594 }
595 
596 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
597 			u32 cntl_reg, u32 status_reg)
598 {
599 	return 0;
600 }*/
601 
602 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
603 {
604 	/*int r;
605 
606 	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
607 	if (r)
608 		return r;
609 
610 	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
611 	*/
612 	return 0;
613 }
614 
615 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
616 {
617 	/* todo */
618 
619 	return 0;
620 }
621 
622 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
623 {
624 	if (pci_is_root_bus(adev->pdev->bus))
625 		return;
626 
627 	if (amdgpu_pcie_gen2 == 0)
628 		return;
629 
630 	if (adev->flags & AMD_IS_APU)
631 		return;
632 
633 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
634 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
635 		return;
636 
637 	/* todo */
638 }
639 
640 static void soc15_program_aspm(struct amdgpu_device *adev)
641 {
642 
643 	if (amdgpu_aspm == 0)
644 		return;
645 
646 	/* todo */
647 }
648 
649 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
650 					   bool enable)
651 {
652 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
653 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
654 }
655 
656 static const struct amdgpu_ip_block_version vega10_common_ip_block =
657 {
658 	.type = AMD_IP_BLOCK_TYPE_COMMON,
659 	.major = 2,
660 	.minor = 0,
661 	.rev = 0,
662 	.funcs = &soc15_common_ip_funcs,
663 };
664 
665 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
666 {
667 	return adev->nbio.funcs->get_rev_id(adev);
668 }
669 
670 int soc15_set_ip_blocks(struct amdgpu_device *adev)
671 {
672 	/* Set IP register base before any HW register access */
673 	switch (adev->asic_type) {
674 	case CHIP_VEGA10:
675 	case CHIP_VEGA12:
676 	case CHIP_RAVEN:
677 	case CHIP_RENOIR:
678 		vega10_reg_base_init(adev);
679 		break;
680 	case CHIP_VEGA20:
681 		vega20_reg_base_init(adev);
682 		break;
683 	case CHIP_ARCTURUS:
684 		arct_reg_base_init(adev);
685 		break;
686 	default:
687 		return -EINVAL;
688 	}
689 
690 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
691 		adev->gmc.xgmi.supported = true;
692 
693 	if (adev->flags & AMD_IS_APU) {
694 		adev->nbio.funcs = &nbio_v7_0_funcs;
695 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
696 	} else if (adev->asic_type == CHIP_VEGA20 ||
697 		   adev->asic_type == CHIP_ARCTURUS) {
698 		adev->nbio.funcs = &nbio_v7_4_funcs;
699 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
700 	} else {
701 		adev->nbio.funcs = &nbio_v6_1_funcs;
702 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
703 	}
704 
705 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
706 		adev->df.funcs = &df_v3_6_funcs;
707 	else
708 		adev->df.funcs = &df_v1_7_funcs;
709 
710 	adev->rev_id = soc15_get_rev_id(adev);
711 	adev->nbio.funcs->detect_hw_virt(adev);
712 
713 	if (amdgpu_sriov_vf(adev))
714 		adev->virt.ops = &xgpu_ai_virt_ops;
715 
716 	switch (adev->asic_type) {
717 	case CHIP_VEGA10:
718 	case CHIP_VEGA12:
719 	case CHIP_VEGA20:
720 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
721 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
722 
723 		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
724 		if (amdgpu_sriov_vf(adev)) {
725 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
726 				if (adev->asic_type == CHIP_VEGA20)
727 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
728 				else
729 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
730 			}
731 			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
732 		} else {
733 			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
734 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
735 				if (adev->asic_type == CHIP_VEGA20)
736 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
737 				else
738 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
739 			}
740 		}
741 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
742 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
743 		if (is_support_sw_smu(adev)) {
744 			if (!amdgpu_sriov_vf(adev))
745 				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
746 		} else {
747 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
748 		}
749 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
750 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
751 #if defined(CONFIG_DRM_AMD_DC)
752 		else if (amdgpu_device_has_dc_support(adev))
753 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
754 #endif
755 		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
756 			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
757 			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
758 		}
759 		break;
760 	case CHIP_RAVEN:
761 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
762 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
763 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
764 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
765 			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
766 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
767 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
768 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
769 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
770 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
771 #if defined(CONFIG_DRM_AMD_DC)
772 		else if (amdgpu_device_has_dc_support(adev))
773 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
774 #endif
775 		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
776 		break;
777 	case CHIP_ARCTURUS:
778 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
779 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
780 
781 		if (amdgpu_sriov_vf(adev)) {
782 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
783 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
784 			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
785 		} else {
786 			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
787 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
788 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
789 		}
790 
791 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
792 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
793 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
794 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
795 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
796 
797 		if (amdgpu_sriov_vf(adev)) {
798 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
799 				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
800 		} else {
801 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
802 		}
803 		if (!amdgpu_sriov_vf(adev))
804 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
805 		break;
806 	case CHIP_RENOIR:
807 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
808 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
809 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
810 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
811 			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
812 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
813 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
814 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
815 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
816 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
817 #if defined(CONFIG_DRM_AMD_DC)
818                 else if (amdgpu_device_has_dc_support(adev))
819                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
820 #endif
821 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
822 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
823 		break;
824 	default:
825 		return -EINVAL;
826 	}
827 
828 	return 0;
829 }
830 
831 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
832 {
833 	adev->nbio.funcs->hdp_flush(adev, ring);
834 }
835 
836 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
837 				 struct amdgpu_ring *ring)
838 {
839 	if (!ring || !ring->funcs->emit_wreg)
840 		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
841 	else
842 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
843 			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
844 }
845 
846 static bool soc15_need_full_reset(struct amdgpu_device *adev)
847 {
848 	/* change this when we implement soft reset */
849 	return true;
850 }
851 
852 static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
853 {
854 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
855 		return;
856 	/*read back hdp ras counter to reset it to 0 */
857 	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
858 }
859 
860 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
861 				 uint64_t *count1)
862 {
863 	uint32_t perfctr = 0;
864 	uint64_t cnt0_of, cnt1_of;
865 	int tmp;
866 
867 	/* This reports 0 on APUs, so return to avoid writing/reading registers
868 	 * that may or may not be different from their GPU counterparts
869 	 */
870 	if (adev->flags & AMD_IS_APU)
871 		return;
872 
873 	/* Set the 2 events that we wish to watch, defined above */
874 	/* Reg 40 is # received msgs */
875 	/* Reg 104 is # of posted requests sent */
876 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
877 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
878 
879 	/* Write to enable desired perf counters */
880 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
881 	/* Zero out and enable the perf counters
882 	 * Write 0x5:
883 	 * Bit 0 = Start all counters(1)
884 	 * Bit 2 = Global counter reset enable(1)
885 	 */
886 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
887 
888 	msleep(1000);
889 
890 	/* Load the shadow and disable the perf counters
891 	 * Write 0x2:
892 	 * Bit 0 = Stop counters(0)
893 	 * Bit 1 = Load the shadow counters(1)
894 	 */
895 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
896 
897 	/* Read register values to get any >32bit overflow */
898 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
899 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
900 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
901 
902 	/* Get the values and add the overflow */
903 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
904 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
905 }
906 
907 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
908 				 uint64_t *count1)
909 {
910 	uint32_t perfctr = 0;
911 	uint64_t cnt0_of, cnt1_of;
912 	int tmp;
913 
914 	/* This reports 0 on APUs, so return to avoid writing/reading registers
915 	 * that may or may not be different from their GPU counterparts
916 	 */
917 	if (adev->flags & AMD_IS_APU)
918 		return;
919 
920 	/* Set the 2 events that we wish to watch, defined above */
921 	/* Reg 40 is # received msgs */
922 	/* Reg 108 is # of posted requests sent on VG20 */
923 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
924 				EVENT0_SEL, 40);
925 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
926 				EVENT1_SEL, 108);
927 
928 	/* Write to enable desired perf counters */
929 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
930 	/* Zero out and enable the perf counters
931 	 * Write 0x5:
932 	 * Bit 0 = Start all counters(1)
933 	 * Bit 2 = Global counter reset enable(1)
934 	 */
935 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
936 
937 	msleep(1000);
938 
939 	/* Load the shadow and disable the perf counters
940 	 * Write 0x2:
941 	 * Bit 0 = Stop counters(0)
942 	 * Bit 1 = Load the shadow counters(1)
943 	 */
944 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
945 
946 	/* Read register values to get any >32bit overflow */
947 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
948 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
949 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
950 
951 	/* Get the values and add the overflow */
952 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
953 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
954 }
955 
956 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
957 {
958 	u32 sol_reg;
959 
960 	/* Just return false for soc15 GPUs.  Reset does not seem to
961 	 * be necessary.
962 	 */
963 	if (!amdgpu_passthrough(adev))
964 		return false;
965 
966 	if (adev->flags & AMD_IS_APU)
967 		return false;
968 
969 	/* Check sOS sign of life register to confirm sys driver and sOS
970 	 * are already been loaded.
971 	 */
972 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
973 	if (sol_reg)
974 		return true;
975 
976 	return false;
977 }
978 
979 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
980 {
981 	uint64_t nak_r, nak_g;
982 
983 	/* Get the number of NAKs received and generated */
984 	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
985 	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
986 
987 	/* Add the total number of NAKs, i.e the number of replays */
988 	return (nak_r + nak_g);
989 }
990 
991 static const struct amdgpu_asic_funcs soc15_asic_funcs =
992 {
993 	.read_disabled_bios = &soc15_read_disabled_bios,
994 	.read_bios_from_rom = &soc15_read_bios_from_rom,
995 	.read_register = &soc15_read_register,
996 	.reset = &soc15_asic_reset,
997 	.reset_method = &soc15_asic_reset_method,
998 	.set_vga_state = &soc15_vga_set_state,
999 	.get_xclk = &soc15_get_xclk,
1000 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1001 	.set_vce_clocks = &soc15_set_vce_clocks,
1002 	.get_config_memsize = &soc15_get_config_memsize,
1003 	.flush_hdp = &soc15_flush_hdp,
1004 	.invalidate_hdp = &soc15_invalidate_hdp,
1005 	.need_full_reset = &soc15_need_full_reset,
1006 	.init_doorbell_index = &vega10_doorbell_index_init,
1007 	.get_pcie_usage = &soc15_get_pcie_usage,
1008 	.need_reset_on_init = &soc15_need_reset_on_init,
1009 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1010 	.supports_baco = &soc15_supports_baco,
1011 };
1012 
1013 static const struct amdgpu_asic_funcs vega20_asic_funcs =
1014 {
1015 	.read_disabled_bios = &soc15_read_disabled_bios,
1016 	.read_bios_from_rom = &soc15_read_bios_from_rom,
1017 	.read_register = &soc15_read_register,
1018 	.reset = &soc15_asic_reset,
1019 	.reset_method = &soc15_asic_reset_method,
1020 	.set_vga_state = &soc15_vga_set_state,
1021 	.get_xclk = &soc15_get_xclk,
1022 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1023 	.set_vce_clocks = &soc15_set_vce_clocks,
1024 	.get_config_memsize = &soc15_get_config_memsize,
1025 	.flush_hdp = &soc15_flush_hdp,
1026 	.invalidate_hdp = &soc15_invalidate_hdp,
1027 	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1028 	.need_full_reset = &soc15_need_full_reset,
1029 	.init_doorbell_index = &vega20_doorbell_index_init,
1030 	.get_pcie_usage = &vega20_get_pcie_usage,
1031 	.need_reset_on_init = &soc15_need_reset_on_init,
1032 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1033 	.supports_baco = &soc15_supports_baco,
1034 };
1035 
1036 static int soc15_common_early_init(void *handle)
1037 {
1038 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1039 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040 
1041 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1042 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1043 	adev->smc_rreg = NULL;
1044 	adev->smc_wreg = NULL;
1045 	adev->pcie_rreg = &soc15_pcie_rreg;
1046 	adev->pcie_wreg = &soc15_pcie_wreg;
1047 	adev->pcie_rreg64 = &soc15_pcie_rreg64;
1048 	adev->pcie_wreg64 = &soc15_pcie_wreg64;
1049 	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1050 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1051 	adev->didt_rreg = &soc15_didt_rreg;
1052 	adev->didt_wreg = &soc15_didt_wreg;
1053 	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1054 	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1055 	adev->se_cac_rreg = &soc15_se_cac_rreg;
1056 	adev->se_cac_wreg = &soc15_se_cac_wreg;
1057 
1058 
1059 	adev->external_rev_id = 0xFF;
1060 	switch (adev->asic_type) {
1061 	case CHIP_VEGA10:
1062 		adev->asic_funcs = &soc15_asic_funcs;
1063 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1064 			AMD_CG_SUPPORT_GFX_MGLS |
1065 			AMD_CG_SUPPORT_GFX_RLC_LS |
1066 			AMD_CG_SUPPORT_GFX_CP_LS |
1067 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1068 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1069 			AMD_CG_SUPPORT_GFX_CGCG |
1070 			AMD_CG_SUPPORT_GFX_CGLS |
1071 			AMD_CG_SUPPORT_BIF_MGCG |
1072 			AMD_CG_SUPPORT_BIF_LS |
1073 			AMD_CG_SUPPORT_HDP_LS |
1074 			AMD_CG_SUPPORT_DRM_MGCG |
1075 			AMD_CG_SUPPORT_DRM_LS |
1076 			AMD_CG_SUPPORT_ROM_MGCG |
1077 			AMD_CG_SUPPORT_DF_MGCG |
1078 			AMD_CG_SUPPORT_SDMA_MGCG |
1079 			AMD_CG_SUPPORT_SDMA_LS |
1080 			AMD_CG_SUPPORT_MC_MGCG |
1081 			AMD_CG_SUPPORT_MC_LS;
1082 		adev->pg_flags = 0;
1083 		adev->external_rev_id = 0x1;
1084 		break;
1085 	case CHIP_VEGA12:
1086 		adev->asic_funcs = &soc15_asic_funcs;
1087 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1088 			AMD_CG_SUPPORT_GFX_MGLS |
1089 			AMD_CG_SUPPORT_GFX_CGCG |
1090 			AMD_CG_SUPPORT_GFX_CGLS |
1091 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1092 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1093 			AMD_CG_SUPPORT_GFX_CP_LS |
1094 			AMD_CG_SUPPORT_MC_LS |
1095 			AMD_CG_SUPPORT_MC_MGCG |
1096 			AMD_CG_SUPPORT_SDMA_MGCG |
1097 			AMD_CG_SUPPORT_SDMA_LS |
1098 			AMD_CG_SUPPORT_BIF_MGCG |
1099 			AMD_CG_SUPPORT_BIF_LS |
1100 			AMD_CG_SUPPORT_HDP_MGCG |
1101 			AMD_CG_SUPPORT_HDP_LS |
1102 			AMD_CG_SUPPORT_ROM_MGCG |
1103 			AMD_CG_SUPPORT_VCE_MGCG |
1104 			AMD_CG_SUPPORT_UVD_MGCG;
1105 		adev->pg_flags = 0;
1106 		adev->external_rev_id = adev->rev_id + 0x14;
1107 		break;
1108 	case CHIP_VEGA20:
1109 		adev->asic_funcs = &vega20_asic_funcs;
1110 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1111 			AMD_CG_SUPPORT_GFX_MGLS |
1112 			AMD_CG_SUPPORT_GFX_CGCG |
1113 			AMD_CG_SUPPORT_GFX_CGLS |
1114 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1115 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1116 			AMD_CG_SUPPORT_GFX_CP_LS |
1117 			AMD_CG_SUPPORT_MC_LS |
1118 			AMD_CG_SUPPORT_MC_MGCG |
1119 			AMD_CG_SUPPORT_SDMA_MGCG |
1120 			AMD_CG_SUPPORT_SDMA_LS |
1121 			AMD_CG_SUPPORT_BIF_MGCG |
1122 			AMD_CG_SUPPORT_BIF_LS |
1123 			AMD_CG_SUPPORT_HDP_MGCG |
1124 			AMD_CG_SUPPORT_HDP_LS |
1125 			AMD_CG_SUPPORT_ROM_MGCG |
1126 			AMD_CG_SUPPORT_VCE_MGCG |
1127 			AMD_CG_SUPPORT_UVD_MGCG;
1128 		adev->pg_flags = 0;
1129 		adev->external_rev_id = adev->rev_id + 0x28;
1130 		break;
1131 	case CHIP_RAVEN:
1132 		adev->asic_funcs = &soc15_asic_funcs;
1133 		if (adev->rev_id >= 0x8)
1134 			adev->external_rev_id = adev->rev_id + 0x79;
1135 		else if (adev->pdev->device == 0x15d8)
1136 			adev->external_rev_id = adev->rev_id + 0x41;
1137 		else if (adev->rev_id == 1)
1138 			adev->external_rev_id = adev->rev_id + 0x20;
1139 		else
1140 			adev->external_rev_id = adev->rev_id + 0x01;
1141 
1142 		if (adev->rev_id >= 0x8) {
1143 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1144 				AMD_CG_SUPPORT_GFX_MGLS |
1145 				AMD_CG_SUPPORT_GFX_CP_LS |
1146 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1147 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1148 				AMD_CG_SUPPORT_GFX_CGCG |
1149 				AMD_CG_SUPPORT_GFX_CGLS |
1150 				AMD_CG_SUPPORT_BIF_LS |
1151 				AMD_CG_SUPPORT_HDP_LS |
1152 				AMD_CG_SUPPORT_ROM_MGCG |
1153 				AMD_CG_SUPPORT_MC_MGCG |
1154 				AMD_CG_SUPPORT_MC_LS |
1155 				AMD_CG_SUPPORT_SDMA_MGCG |
1156 				AMD_CG_SUPPORT_SDMA_LS |
1157 				AMD_CG_SUPPORT_VCN_MGCG;
1158 
1159 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1160 		} else if (adev->pdev->device == 0x15d8) {
1161 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1162 				AMD_CG_SUPPORT_GFX_MGLS |
1163 				AMD_CG_SUPPORT_GFX_CP_LS |
1164 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1165 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1166 				AMD_CG_SUPPORT_GFX_CGCG |
1167 				AMD_CG_SUPPORT_GFX_CGLS |
1168 				AMD_CG_SUPPORT_BIF_LS |
1169 				AMD_CG_SUPPORT_HDP_LS |
1170 				AMD_CG_SUPPORT_ROM_MGCG |
1171 				AMD_CG_SUPPORT_MC_MGCG |
1172 				AMD_CG_SUPPORT_MC_LS |
1173 				AMD_CG_SUPPORT_SDMA_MGCG |
1174 				AMD_CG_SUPPORT_SDMA_LS;
1175 
1176 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1177 				AMD_PG_SUPPORT_MMHUB |
1178 				AMD_PG_SUPPORT_VCN |
1179 				AMD_PG_SUPPORT_VCN_DPG;
1180 		} else {
1181 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1182 				AMD_CG_SUPPORT_GFX_MGLS |
1183 				AMD_CG_SUPPORT_GFX_RLC_LS |
1184 				AMD_CG_SUPPORT_GFX_CP_LS |
1185 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1186 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1187 				AMD_CG_SUPPORT_GFX_CGCG |
1188 				AMD_CG_SUPPORT_GFX_CGLS |
1189 				AMD_CG_SUPPORT_BIF_MGCG |
1190 				AMD_CG_SUPPORT_BIF_LS |
1191 				AMD_CG_SUPPORT_HDP_MGCG |
1192 				AMD_CG_SUPPORT_HDP_LS |
1193 				AMD_CG_SUPPORT_DRM_MGCG |
1194 				AMD_CG_SUPPORT_DRM_LS |
1195 				AMD_CG_SUPPORT_ROM_MGCG |
1196 				AMD_CG_SUPPORT_MC_MGCG |
1197 				AMD_CG_SUPPORT_MC_LS |
1198 				AMD_CG_SUPPORT_SDMA_MGCG |
1199 				AMD_CG_SUPPORT_SDMA_LS |
1200 				AMD_CG_SUPPORT_VCN_MGCG;
1201 
1202 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1203 		}
1204 		break;
1205 	case CHIP_ARCTURUS:
1206 		adev->asic_funcs = &vega20_asic_funcs;
1207 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1208 			AMD_CG_SUPPORT_GFX_MGLS |
1209 			AMD_CG_SUPPORT_GFX_CGCG |
1210 			AMD_CG_SUPPORT_GFX_CGLS |
1211 			AMD_CG_SUPPORT_GFX_CP_LS |
1212 			AMD_CG_SUPPORT_HDP_MGCG |
1213 			AMD_CG_SUPPORT_HDP_LS |
1214 			AMD_CG_SUPPORT_SDMA_MGCG |
1215 			AMD_CG_SUPPORT_SDMA_LS |
1216 			AMD_CG_SUPPORT_MC_MGCG |
1217 			AMD_CG_SUPPORT_MC_LS |
1218 			AMD_CG_SUPPORT_IH_CG |
1219 			AMD_CG_SUPPORT_VCN_MGCG |
1220 			AMD_CG_SUPPORT_JPEG_MGCG;
1221 		adev->pg_flags = 0;
1222 		adev->external_rev_id = adev->rev_id + 0x32;
1223 		break;
1224 	case CHIP_RENOIR:
1225 		adev->asic_funcs = &soc15_asic_funcs;
1226 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1227 				 AMD_CG_SUPPORT_GFX_MGLS |
1228 				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1229 				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1230 				 AMD_CG_SUPPORT_GFX_CGCG |
1231 				 AMD_CG_SUPPORT_GFX_CGLS |
1232 				 AMD_CG_SUPPORT_GFX_CP_LS |
1233 				 AMD_CG_SUPPORT_MC_MGCG |
1234 				 AMD_CG_SUPPORT_MC_LS |
1235 				 AMD_CG_SUPPORT_SDMA_MGCG |
1236 				 AMD_CG_SUPPORT_SDMA_LS |
1237 				 AMD_CG_SUPPORT_BIF_LS |
1238 				 AMD_CG_SUPPORT_HDP_LS |
1239 				 AMD_CG_SUPPORT_ROM_MGCG |
1240 				 AMD_CG_SUPPORT_VCN_MGCG |
1241 				 AMD_CG_SUPPORT_JPEG_MGCG |
1242 				 AMD_CG_SUPPORT_IH_CG |
1243 				 AMD_CG_SUPPORT_ATHUB_LS |
1244 				 AMD_CG_SUPPORT_ATHUB_MGCG |
1245 				 AMD_CG_SUPPORT_DF_MGCG;
1246 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1247 				 AMD_PG_SUPPORT_VCN |
1248 				 AMD_PG_SUPPORT_JPEG |
1249 				 AMD_PG_SUPPORT_VCN_DPG;
1250 		adev->external_rev_id = adev->rev_id + 0x91;
1251 		break;
1252 	default:
1253 		/* FIXME: not supported yet */
1254 		return -EINVAL;
1255 	}
1256 
1257 	if (amdgpu_sriov_vf(adev)) {
1258 		amdgpu_virt_init_setting(adev);
1259 		xgpu_ai_mailbox_set_irq_funcs(adev);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static int soc15_common_late_init(void *handle)
1266 {
1267 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1268 	int r = 0;
1269 
1270 	if (amdgpu_sriov_vf(adev))
1271 		xgpu_ai_mailbox_get_irq(adev);
1272 
1273 	if (adev->asic_funcs &&
1274 	    adev->asic_funcs->reset_hdp_ras_error_count)
1275 		adev->asic_funcs->reset_hdp_ras_error_count(adev);
1276 
1277 	if (adev->nbio.funcs->ras_late_init)
1278 		r = adev->nbio.funcs->ras_late_init(adev);
1279 
1280 	return r;
1281 }
1282 
1283 static int soc15_common_sw_init(void *handle)
1284 {
1285 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1286 
1287 	if (amdgpu_sriov_vf(adev))
1288 		xgpu_ai_mailbox_add_irq_id(adev);
1289 
1290 	adev->df.funcs->sw_init(adev);
1291 
1292 	return 0;
1293 }
1294 
1295 static int soc15_common_sw_fini(void *handle)
1296 {
1297 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298 
1299 	amdgpu_nbio_ras_fini(adev);
1300 	adev->df.funcs->sw_fini(adev);
1301 	return 0;
1302 }
1303 
1304 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1305 {
1306 	int i;
1307 	struct amdgpu_ring *ring;
1308 
1309 	/* sdma/ih doorbell range are programed by hypervisor */
1310 	if (!amdgpu_sriov_vf(adev)) {
1311 		for (i = 0; i < adev->sdma.num_instances; i++) {
1312 			ring = &adev->sdma.instance[i].ring;
1313 			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1314 				ring->use_doorbell, ring->doorbell_index,
1315 				adev->doorbell_index.sdma_doorbell_range);
1316 		}
1317 
1318 		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1319 						adev->irq.ih.doorbell_index);
1320 	}
1321 }
1322 
1323 static int soc15_common_hw_init(void *handle)
1324 {
1325 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1326 
1327 	/* enable pcie gen2/3 link */
1328 	soc15_pcie_gen3_enable(adev);
1329 	/* enable aspm */
1330 	soc15_program_aspm(adev);
1331 	/* setup nbio registers */
1332 	adev->nbio.funcs->init_registers(adev);
1333 	/* remap HDP registers to a hole in mmio space,
1334 	 * for the purpose of expose those registers
1335 	 * to process space
1336 	 */
1337 	if (adev->nbio.funcs->remap_hdp_registers)
1338 		adev->nbio.funcs->remap_hdp_registers(adev);
1339 
1340 	/* enable the doorbell aperture */
1341 	soc15_enable_doorbell_aperture(adev, true);
1342 	/* HW doorbell routing policy: doorbell writing not
1343 	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1344 	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1345 	 * to CP ip block init and ring test.
1346 	 */
1347 	soc15_doorbell_range_init(adev);
1348 
1349 	return 0;
1350 }
1351 
1352 static int soc15_common_hw_fini(void *handle)
1353 {
1354 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 
1356 	/* disable the doorbell aperture */
1357 	soc15_enable_doorbell_aperture(adev, false);
1358 	if (amdgpu_sriov_vf(adev))
1359 		xgpu_ai_mailbox_put_irq(adev);
1360 
1361 	if (adev->nbio.ras_if &&
1362 	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1363 		if (adev->nbio.funcs->init_ras_controller_interrupt)
1364 			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1365 		if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1366 			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 static int soc15_common_suspend(void *handle)
1373 {
1374 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1375 
1376 	return soc15_common_hw_fini(adev);
1377 }
1378 
1379 static int soc15_common_resume(void *handle)
1380 {
1381 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1382 
1383 	return soc15_common_hw_init(adev);
1384 }
1385 
1386 static bool soc15_common_is_idle(void *handle)
1387 {
1388 	return true;
1389 }
1390 
1391 static int soc15_common_wait_for_idle(void *handle)
1392 {
1393 	return 0;
1394 }
1395 
1396 static int soc15_common_soft_reset(void *handle)
1397 {
1398 	return 0;
1399 }
1400 
1401 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1402 {
1403 	uint32_t def, data;
1404 
1405 	if (adev->asic_type == CHIP_VEGA20 ||
1406 		adev->asic_type == CHIP_ARCTURUS) {
1407 		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1408 
1409 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1410 			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1411 				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1412 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1413 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1414 		else
1415 			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1416 				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1417 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1418 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1419 
1420 		if (def != data)
1421 			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1422 	} else {
1423 		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1424 
1425 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1426 			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1427 		else
1428 			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1429 
1430 		if (def != data)
1431 			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1432 	}
1433 }
1434 
1435 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1436 {
1437 	uint32_t def, data;
1438 
1439 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1440 
1441 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1442 		data &= ~(0x01000000 |
1443 			  0x02000000 |
1444 			  0x04000000 |
1445 			  0x08000000 |
1446 			  0x10000000 |
1447 			  0x20000000 |
1448 			  0x40000000 |
1449 			  0x80000000);
1450 	else
1451 		data |= (0x01000000 |
1452 			 0x02000000 |
1453 			 0x04000000 |
1454 			 0x08000000 |
1455 			 0x10000000 |
1456 			 0x20000000 |
1457 			 0x40000000 |
1458 			 0x80000000);
1459 
1460 	if (def != data)
1461 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1462 }
1463 
1464 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1465 {
1466 	uint32_t def, data;
1467 
1468 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1469 
1470 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1471 		data |= 1;
1472 	else
1473 		data &= ~1;
1474 
1475 	if (def != data)
1476 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1477 }
1478 
1479 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1480 						       bool enable)
1481 {
1482 	uint32_t def, data;
1483 
1484 	def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1485 
1486 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1487 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1488 			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1489 	else
1490 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1491 			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1492 
1493 	if (def != data)
1494 		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1495 }
1496 
1497 static int soc15_common_set_clockgating_state(void *handle,
1498 					    enum amd_clockgating_state state)
1499 {
1500 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1501 
1502 	if (amdgpu_sriov_vf(adev))
1503 		return 0;
1504 
1505 	switch (adev->asic_type) {
1506 	case CHIP_VEGA10:
1507 	case CHIP_VEGA12:
1508 	case CHIP_VEGA20:
1509 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1510 				state == AMD_CG_STATE_GATE);
1511 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1512 				state == AMD_CG_STATE_GATE);
1513 		soc15_update_hdp_light_sleep(adev,
1514 				state == AMD_CG_STATE_GATE);
1515 		soc15_update_drm_clock_gating(adev,
1516 				state == AMD_CG_STATE_GATE);
1517 		soc15_update_drm_light_sleep(adev,
1518 				state == AMD_CG_STATE_GATE);
1519 		soc15_update_rom_medium_grain_clock_gating(adev,
1520 				state == AMD_CG_STATE_GATE);
1521 		adev->df.funcs->update_medium_grain_clock_gating(adev,
1522 				state == AMD_CG_STATE_GATE);
1523 		break;
1524 	case CHIP_RAVEN:
1525 	case CHIP_RENOIR:
1526 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1527 				state == AMD_CG_STATE_GATE);
1528 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1529 				state == AMD_CG_STATE_GATE);
1530 		soc15_update_hdp_light_sleep(adev,
1531 				state == AMD_CG_STATE_GATE);
1532 		soc15_update_drm_clock_gating(adev,
1533 				state == AMD_CG_STATE_GATE);
1534 		soc15_update_drm_light_sleep(adev,
1535 				state == AMD_CG_STATE_GATE);
1536 		soc15_update_rom_medium_grain_clock_gating(adev,
1537 				state == AMD_CG_STATE_GATE);
1538 		break;
1539 	case CHIP_ARCTURUS:
1540 		soc15_update_hdp_light_sleep(adev,
1541 				state == AMD_CG_STATE_GATE);
1542 		break;
1543 	default:
1544 		break;
1545 	}
1546 	return 0;
1547 }
1548 
1549 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1550 {
1551 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1552 	int data;
1553 
1554 	if (amdgpu_sriov_vf(adev))
1555 		*flags = 0;
1556 
1557 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1558 
1559 	/* AMD_CG_SUPPORT_HDP_LS */
1560 	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1561 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1562 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1563 
1564 	/* AMD_CG_SUPPORT_DRM_MGCG */
1565 	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1566 	if (!(data & 0x01000000))
1567 		*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1568 
1569 	/* AMD_CG_SUPPORT_DRM_LS */
1570 	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1571 	if (data & 0x1)
1572 		*flags |= AMD_CG_SUPPORT_DRM_LS;
1573 
1574 	/* AMD_CG_SUPPORT_ROM_MGCG */
1575 	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1576 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1577 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1578 
1579 	adev->df.funcs->get_clockgating_state(adev, flags);
1580 }
1581 
1582 static int soc15_common_set_powergating_state(void *handle,
1583 					    enum amd_powergating_state state)
1584 {
1585 	/* todo */
1586 	return 0;
1587 }
1588 
1589 const struct amd_ip_funcs soc15_common_ip_funcs = {
1590 	.name = "soc15_common",
1591 	.early_init = soc15_common_early_init,
1592 	.late_init = soc15_common_late_init,
1593 	.sw_init = soc15_common_sw_init,
1594 	.sw_fini = soc15_common_sw_fini,
1595 	.hw_init = soc15_common_hw_init,
1596 	.hw_fini = soc15_common_hw_fini,
1597 	.suspend = soc15_common_suspend,
1598 	.resume = soc15_common_resume,
1599 	.is_idle = soc15_common_is_idle,
1600 	.wait_for_idle = soc15_common_wait_for_idle,
1601 	.soft_reset = soc15_common_soft_reset,
1602 	.set_clockgating_state = soc15_common_set_clockgating_state,
1603 	.set_powergating_state = soc15_common_set_powergating_state,
1604 	.get_clockgating_state= soc15_common_get_clockgating_state,
1605 };
1606