xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/soc15.c (revision 4ee57308)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "atom.h"
36 #include "amd_pcie.h"
37 
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
52 
53 #include "soc15.h"
54 #include "soc15_common.h"
55 #include "gfx_v9_0.h"
56 #include "gmc_v9_0.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
59 #include "df_v1_7.h"
60 #include "df_v3_6.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "navi10_ih.h"
66 #include "sdma_v4_0.h"
67 #include "uvd_v7_0.h"
68 #include "vce_v4_0.h"
69 #include "vcn_v1_0.h"
70 #include "vcn_v2_0.h"
71 #include "jpeg_v2_0.h"
72 #include "vcn_v2_5.h"
73 #include "jpeg_v2_5.h"
74 #include "dce_virtual.h"
75 #include "mxgpu_ai.h"
76 #include "amdgpu_smu.h"
77 #include "amdgpu_ras.h"
78 #include "amdgpu_xgmi.h"
79 #include <uapi/linux/kfd_ioctl.h>
80 
81 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
82 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
84 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
85 
86 /* for Vega20 register name change */
87 #define mmHDP_MEM_POWER_CTRL	0x00d4
88 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
89 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
91 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
92 #define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
93 
94 /* for Vega20/arcturus regiter offset change */
95 #define	mmROM_INDEX_VG20				0x00e4
96 #define	mmROM_INDEX_VG20_BASE_IDX			0
97 #define	mmROM_DATA_VG20					0x00e5
98 #define	mmROM_DATA_VG20_BASE_IDX			0
99 
100 /*
101  * Indirect registers accessor
102  */
103 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
104 {
105 	unsigned long address, data;
106 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
107 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
108 
109 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
110 }
111 
112 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
113 {
114 	unsigned long address, data;
115 
116 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
117 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
118 
119 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
120 }
121 
122 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
123 {
124 	unsigned long address, data;
125 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
126 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
127 
128 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
129 }
130 
131 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
132 {
133 	unsigned long address, data;
134 
135 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
136 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
137 
138 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
139 }
140 
141 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
142 {
143 	unsigned long flags, address, data;
144 	u32 r;
145 
146 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
147 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
148 
149 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
150 	WREG32(address, ((reg) & 0x1ff));
151 	r = RREG32(data);
152 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
153 	return r;
154 }
155 
156 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
157 {
158 	unsigned long flags, address, data;
159 
160 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
161 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
162 
163 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 	WREG32(address, ((reg) & 0x1ff));
165 	WREG32(data, (v));
166 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167 }
168 
169 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
170 {
171 	unsigned long flags, address, data;
172 	u32 r;
173 
174 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
175 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
176 
177 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
178 	WREG32(address, (reg));
179 	r = RREG32(data);
180 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
181 	return r;
182 }
183 
184 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
185 {
186 	unsigned long flags, address, data;
187 
188 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
189 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
190 
191 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
192 	WREG32(address, (reg));
193 	WREG32(data, (v));
194 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
195 }
196 
197 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
198 {
199 	unsigned long flags;
200 	u32 r;
201 
202 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
203 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
204 	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
205 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
206 	return r;
207 }
208 
209 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
210 {
211 	unsigned long flags;
212 
213 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
214 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
215 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
216 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
217 }
218 
219 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
220 {
221 	unsigned long flags;
222 	u32 r;
223 
224 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
225 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
226 	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
227 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
228 	return r;
229 }
230 
231 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
236 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
237 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
238 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
239 }
240 
241 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
242 {
243 	return adev->nbio.funcs->get_memsize(adev);
244 }
245 
246 static u32 soc15_get_xclk(struct amdgpu_device *adev)
247 {
248 	u32 reference_clock = adev->clock.spll.reference_freq;
249 
250 	if (adev->asic_type == CHIP_RAVEN)
251 		return reference_clock / 4;
252 
253 	return reference_clock;
254 }
255 
256 
257 void soc15_grbm_select(struct amdgpu_device *adev,
258 		     u32 me, u32 pipe, u32 queue, u32 vmid)
259 {
260 	u32 grbm_gfx_cntl = 0;
261 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
262 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
263 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
264 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
265 
266 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
267 }
268 
269 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
270 {
271 	/* todo */
272 }
273 
274 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
275 {
276 	/* todo */
277 	return false;
278 }
279 
280 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
281 				     u8 *bios, u32 length_bytes)
282 {
283 	u32 *dw_ptr;
284 	u32 i, length_dw;
285 	uint32_t rom_index_offset;
286 	uint32_t rom_data_offset;
287 
288 	if (bios == NULL)
289 		return false;
290 	if (length_bytes == 0)
291 		return false;
292 	/* APU vbios image is part of sbios image */
293 	if (adev->flags & AMD_IS_APU)
294 		return false;
295 
296 	dw_ptr = (u32 *)bios;
297 	length_dw = ALIGN(length_bytes, 4) / 4;
298 
299 	switch (adev->asic_type) {
300 	case CHIP_VEGA20:
301 	case CHIP_ARCTURUS:
302 		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
303 		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
304 		break;
305 	default:
306 		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
307 		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
308 		break;
309 	}
310 
311 	/* set rom index to 0 */
312 	WREG32(rom_index_offset, 0);
313 	/* read out the rom data */
314 	for (i = 0; i < length_dw; i++)
315 		dw_ptr[i] = RREG32(rom_data_offset);
316 
317 	return true;
318 }
319 
320 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
321 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
322 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
323 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
324 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
325 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
326 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
327 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
328 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
329 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
330 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
331 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
332 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
333 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
334 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
335 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
336 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
337 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
338 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
339 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
340 	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
341 };
342 
343 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
344 					 u32 sh_num, u32 reg_offset)
345 {
346 	uint32_t val;
347 
348 	mutex_lock(&adev->grbm_idx_mutex);
349 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
350 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
351 
352 	val = RREG32(reg_offset);
353 
354 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
355 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
356 	mutex_unlock(&adev->grbm_idx_mutex);
357 	return val;
358 }
359 
360 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
361 					 bool indexed, u32 se_num,
362 					 u32 sh_num, u32 reg_offset)
363 {
364 	if (indexed) {
365 		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
366 	} else {
367 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
368 			return adev->gfx.config.gb_addr_config;
369 		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
370 			return adev->gfx.config.db_debug2;
371 		return RREG32(reg_offset);
372 	}
373 }
374 
375 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
376 			    u32 sh_num, u32 reg_offset, u32 *value)
377 {
378 	uint32_t i;
379 	struct soc15_allowed_register_entry  *en;
380 
381 	*value = 0;
382 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
383 		en = &soc15_allowed_read_registers[i];
384 		if (adev->reg_offset[en->hwip][en->inst] &&
385 			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
386 					+ en->reg_offset))
387 			continue;
388 
389 		*value = soc15_get_register_value(adev,
390 						  soc15_allowed_read_registers[i].grbm_indexed,
391 						  se_num, sh_num, reg_offset);
392 		return 0;
393 	}
394 	return -EINVAL;
395 }
396 
397 
398 /**
399  * soc15_program_register_sequence - program an array of registers.
400  *
401  * @adev: amdgpu_device pointer
402  * @regs: pointer to the register array
403  * @array_size: size of the register array
404  *
405  * Programs an array or registers with and and or masks.
406  * This is a helper for setting golden registers.
407  */
408 
409 void soc15_program_register_sequence(struct amdgpu_device *adev,
410 					     const struct soc15_reg_golden *regs,
411 					     const u32 array_size)
412 {
413 	const struct soc15_reg_golden *entry;
414 	u32 tmp, reg;
415 	int i;
416 
417 	for (i = 0; i < array_size; ++i) {
418 		entry = &regs[i];
419 		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
420 
421 		if (entry->and_mask == 0xffffffff) {
422 			tmp = entry->or_mask;
423 		} else {
424 			tmp = RREG32(reg);
425 			tmp &= ~(entry->and_mask);
426 			tmp |= (entry->or_mask & entry->and_mask);
427 		}
428 
429 		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
430 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
431 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
432 			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
433 			WREG32_RLC(reg, tmp);
434 		else
435 			WREG32(reg, tmp);
436 
437 	}
438 
439 }
440 
441 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
442 {
443 	u32 i;
444 	int ret = 0;
445 
446 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
447 
448 	dev_info(adev->dev, "GPU mode1 reset\n");
449 
450 	/* disable BM */
451 	pci_clear_master(adev->pdev);
452 
453 	amdgpu_device_cache_pci_state(adev->pdev);
454 
455 	ret = psp_gpu_reset(adev);
456 	if (ret)
457 		dev_err(adev->dev, "GPU mode1 reset failed\n");
458 
459 	amdgpu_device_load_pci_state(adev->pdev);
460 
461 	/* wait for asic to come out of reset */
462 	for (i = 0; i < adev->usec_timeout; i++) {
463 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
464 
465 		if (memsize != 0xffffffff)
466 			break;
467 		udelay(1);
468 	}
469 
470 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
471 
472 	return ret;
473 }
474 
475 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
476 {
477 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
478 	int ret = 0;
479 
480 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
481 	if (ras && ras->supported)
482 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
483 
484 	ret = amdgpu_dpm_baco_reset(adev);
485 	if (ret)
486 		return ret;
487 
488 	/* re-enable doorbell interrupt after BACO exit */
489 	if (ras && ras->supported)
490 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
491 
492 	return 0;
493 }
494 
495 static enum amd_reset_method
496 soc15_asic_reset_method(struct amdgpu_device *adev)
497 {
498 	bool baco_reset = false;
499 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
500 
501 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
502 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
503 		amdgpu_reset_method == AMD_RESET_METHOD_BACO)
504 		return amdgpu_reset_method;
505 
506 	if (amdgpu_reset_method != -1)
507 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
508 				  amdgpu_reset_method);
509 
510 	switch (adev->asic_type) {
511 	case CHIP_RAVEN:
512 	case CHIP_RENOIR:
513 		return AMD_RESET_METHOD_MODE2;
514 	case CHIP_VEGA10:
515 	case CHIP_VEGA12:
516 	case CHIP_ARCTURUS:
517 		baco_reset = amdgpu_dpm_is_baco_supported(adev);
518 		break;
519 	case CHIP_VEGA20:
520 		if (adev->psp.sos_fw_version >= 0x80067)
521 			baco_reset = amdgpu_dpm_is_baco_supported(adev);
522 
523 		/*
524 		 * 1. PMFW version > 0x284300: all cases use baco
525 		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
526 		 */
527 		if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
528 			baco_reset = false;
529 		break;
530 	default:
531 		break;
532 	}
533 
534 	if (baco_reset)
535 		return AMD_RESET_METHOD_BACO;
536 	else
537 		return AMD_RESET_METHOD_MODE1;
538 }
539 
540 static int soc15_asic_reset(struct amdgpu_device *adev)
541 {
542 	/* original raven doesn't have full asic reset */
543 	if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
544 	    !(adev->apu_flags & AMD_APU_IS_RAVEN2))
545 		return 0;
546 
547 	switch (soc15_asic_reset_method(adev)) {
548 		case AMD_RESET_METHOD_BACO:
549 			dev_info(adev->dev, "BACO reset\n");
550 			return soc15_asic_baco_reset(adev);
551 		case AMD_RESET_METHOD_MODE2:
552 			dev_info(adev->dev, "MODE2 reset\n");
553 			return amdgpu_dpm_mode2_reset(adev);
554 		default:
555 			dev_info(adev->dev, "MODE1 reset\n");
556 			return soc15_asic_mode1_reset(adev);
557 	}
558 }
559 
560 static bool soc15_supports_baco(struct amdgpu_device *adev)
561 {
562 	switch (adev->asic_type) {
563 	case CHIP_VEGA10:
564 	case CHIP_VEGA12:
565 	case CHIP_ARCTURUS:
566 		return amdgpu_dpm_is_baco_supported(adev);
567 	case CHIP_VEGA20:
568 		if (adev->psp.sos_fw_version >= 0x80067)
569 			return amdgpu_dpm_is_baco_supported(adev);
570 		return false;
571 	default:
572 		return false;
573 	}
574 }
575 
576 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
577 			u32 cntl_reg, u32 status_reg)
578 {
579 	return 0;
580 }*/
581 
582 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
583 {
584 	/*int r;
585 
586 	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
587 	if (r)
588 		return r;
589 
590 	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
591 	*/
592 	return 0;
593 }
594 
595 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
596 {
597 	/* todo */
598 
599 	return 0;
600 }
601 
602 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
603 {
604 	if (pci_is_root_bus(adev->pdev->bus))
605 		return;
606 
607 	if (amdgpu_pcie_gen2 == 0)
608 		return;
609 
610 	if (adev->flags & AMD_IS_APU)
611 		return;
612 
613 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
614 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
615 		return;
616 
617 	/* todo */
618 }
619 
620 static void soc15_program_aspm(struct amdgpu_device *adev)
621 {
622 
623 	if (amdgpu_aspm == 0)
624 		return;
625 
626 	/* todo */
627 }
628 
629 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
630 					   bool enable)
631 {
632 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
633 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
634 }
635 
636 static const struct amdgpu_ip_block_version vega10_common_ip_block =
637 {
638 	.type = AMD_IP_BLOCK_TYPE_COMMON,
639 	.major = 2,
640 	.minor = 0,
641 	.rev = 0,
642 	.funcs = &soc15_common_ip_funcs,
643 };
644 
645 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
646 {
647 	return adev->nbio.funcs->get_rev_id(adev);
648 }
649 
650 static void soc15_reg_base_init(struct amdgpu_device *adev)
651 {
652 	int r;
653 
654 	/* Set IP register base before any HW register access */
655 	switch (adev->asic_type) {
656 	case CHIP_VEGA10:
657 	case CHIP_VEGA12:
658 	case CHIP_RAVEN:
659 		vega10_reg_base_init(adev);
660 		break;
661 	case CHIP_RENOIR:
662 		/* It's safe to do ip discovery here for Renior,
663 		 * it doesn't support SRIOV. */
664 		if (amdgpu_discovery) {
665 			r = amdgpu_discovery_reg_base_init(adev);
666 			if (r == 0)
667 				break;
668 			DRM_WARN("failed to init reg base from ip discovery table, "
669 				 "fallback to legacy init method\n");
670 		}
671 		vega10_reg_base_init(adev);
672 		break;
673 	case CHIP_VEGA20:
674 		vega20_reg_base_init(adev);
675 		break;
676 	case CHIP_ARCTURUS:
677 		arct_reg_base_init(adev);
678 		break;
679 	default:
680 		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
681 		break;
682 	}
683 }
684 
685 void soc15_set_virt_ops(struct amdgpu_device *adev)
686 {
687 	adev->virt.ops = &xgpu_ai_virt_ops;
688 
689 	/* init soc15 reg base early enough so we can
690 	 * request request full access for sriov before
691 	 * set_ip_blocks. */
692 	soc15_reg_base_init(adev);
693 }
694 
695 int soc15_set_ip_blocks(struct amdgpu_device *adev)
696 {
697 	/* for bare metal case */
698 	if (!amdgpu_sriov_vf(adev))
699 		soc15_reg_base_init(adev);
700 
701 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
702 		adev->gmc.xgmi.supported = true;
703 
704 	if (adev->flags & AMD_IS_APU) {
705 		adev->nbio.funcs = &nbio_v7_0_funcs;
706 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
707 	} else if (adev->asic_type == CHIP_VEGA20 ||
708 		   adev->asic_type == CHIP_ARCTURUS) {
709 		adev->nbio.funcs = &nbio_v7_4_funcs;
710 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
711 	} else {
712 		adev->nbio.funcs = &nbio_v6_1_funcs;
713 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
714 	}
715 
716 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
717 		adev->df.funcs = &df_v3_6_funcs;
718 	else
719 		adev->df.funcs = &df_v1_7_funcs;
720 
721 	adev->rev_id = soc15_get_rev_id(adev);
722 
723 	switch (adev->asic_type) {
724 	case CHIP_VEGA10:
725 	case CHIP_VEGA12:
726 	case CHIP_VEGA20:
727 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
728 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
729 
730 		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
731 		if (amdgpu_sriov_vf(adev)) {
732 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
733 				if (adev->asic_type == CHIP_VEGA20)
734 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
735 				else
736 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
737 			}
738 			if (adev->asic_type == CHIP_VEGA20)
739 				amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
740 			else
741 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
742 		} else {
743 			if (adev->asic_type == CHIP_VEGA20)
744 				amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
745 			else
746 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
747 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
748 				if (adev->asic_type == CHIP_VEGA20)
749 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
750 				else
751 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
752 			}
753 		}
754 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
755 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
756 		if (is_support_sw_smu(adev)) {
757 			if (!amdgpu_sriov_vf(adev))
758 				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
759 		} else {
760 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
761 		}
762 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
763 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
764 #if defined(CONFIG_DRM_AMD_DC)
765 		else if (amdgpu_device_has_dc_support(adev))
766 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
767 #endif
768 		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
769 			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
770 			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
771 		}
772 		break;
773 	case CHIP_RAVEN:
774 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
775 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
776 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
777 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
778 			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
779 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
780 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
781 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
782 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
783 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
784 #if defined(CONFIG_DRM_AMD_DC)
785 		else if (amdgpu_device_has_dc_support(adev))
786 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
787 #endif
788 		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
789 		break;
790 	case CHIP_ARCTURUS:
791 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
792 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
793 
794 		if (amdgpu_sriov_vf(adev)) {
795 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
796 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
797 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
798 		} else {
799 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
800 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
801 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
802 		}
803 
804 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
805 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
806 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
807 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
808 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
809 
810 		if (amdgpu_sriov_vf(adev)) {
811 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
812 				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
813 		} else {
814 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
815 		}
816 		if (!amdgpu_sriov_vf(adev))
817 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
818 		break;
819 	case CHIP_RENOIR:
820 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
821 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
822 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
823 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
824 			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
825 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
826 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
827 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
828 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
829 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
830 #if defined(CONFIG_DRM_AMD_DC)
831                 else if (amdgpu_device_has_dc_support(adev))
832 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
833 #endif
834 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
835 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
836 		break;
837 	default:
838 		return -EINVAL;
839 	}
840 
841 	return 0;
842 }
843 
844 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
845 {
846 	adev->nbio.funcs->hdp_flush(adev, ring);
847 }
848 
849 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
850 				 struct amdgpu_ring *ring)
851 {
852 	if (!ring || !ring->funcs->emit_wreg)
853 		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
854 	else
855 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
856 			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
857 }
858 
859 static bool soc15_need_full_reset(struct amdgpu_device *adev)
860 {
861 	/* change this when we implement soft reset */
862 	return true;
863 }
864 
865 static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
866 {
867 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
868 		return;
869 	/*read back hdp ras counter to reset it to 0 */
870 	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
871 }
872 
873 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
874 				 uint64_t *count1)
875 {
876 	uint32_t perfctr = 0;
877 	uint64_t cnt0_of, cnt1_of;
878 	int tmp;
879 
880 	/* This reports 0 on APUs, so return to avoid writing/reading registers
881 	 * that may or may not be different from their GPU counterparts
882 	 */
883 	if (adev->flags & AMD_IS_APU)
884 		return;
885 
886 	/* Set the 2 events that we wish to watch, defined above */
887 	/* Reg 40 is # received msgs */
888 	/* Reg 104 is # of posted requests sent */
889 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
890 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
891 
892 	/* Write to enable desired perf counters */
893 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
894 	/* Zero out and enable the perf counters
895 	 * Write 0x5:
896 	 * Bit 0 = Start all counters(1)
897 	 * Bit 2 = Global counter reset enable(1)
898 	 */
899 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
900 
901 	msleep(1000);
902 
903 	/* Load the shadow and disable the perf counters
904 	 * Write 0x2:
905 	 * Bit 0 = Stop counters(0)
906 	 * Bit 1 = Load the shadow counters(1)
907 	 */
908 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
909 
910 	/* Read register values to get any >32bit overflow */
911 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
912 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
913 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
914 
915 	/* Get the values and add the overflow */
916 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
917 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
918 }
919 
920 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
921 				 uint64_t *count1)
922 {
923 	uint32_t perfctr = 0;
924 	uint64_t cnt0_of, cnt1_of;
925 	int tmp;
926 
927 	/* This reports 0 on APUs, so return to avoid writing/reading registers
928 	 * that may or may not be different from their GPU counterparts
929 	 */
930 	if (adev->flags & AMD_IS_APU)
931 		return;
932 
933 	/* Set the 2 events that we wish to watch, defined above */
934 	/* Reg 40 is # received msgs */
935 	/* Reg 108 is # of posted requests sent on VG20 */
936 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
937 				EVENT0_SEL, 40);
938 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
939 				EVENT1_SEL, 108);
940 
941 	/* Write to enable desired perf counters */
942 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
943 	/* Zero out and enable the perf counters
944 	 * Write 0x5:
945 	 * Bit 0 = Start all counters(1)
946 	 * Bit 2 = Global counter reset enable(1)
947 	 */
948 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
949 
950 	msleep(1000);
951 
952 	/* Load the shadow and disable the perf counters
953 	 * Write 0x2:
954 	 * Bit 0 = Stop counters(0)
955 	 * Bit 1 = Load the shadow counters(1)
956 	 */
957 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
958 
959 	/* Read register values to get any >32bit overflow */
960 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
961 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
962 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
963 
964 	/* Get the values and add the overflow */
965 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
966 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
967 }
968 
969 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
970 {
971 	u32 sol_reg;
972 
973 	/* Just return false for soc15 GPUs.  Reset does not seem to
974 	 * be necessary.
975 	 */
976 	if (!amdgpu_passthrough(adev))
977 		return false;
978 
979 	if (adev->flags & AMD_IS_APU)
980 		return false;
981 
982 	/* Check sOS sign of life register to confirm sys driver and sOS
983 	 * are already been loaded.
984 	 */
985 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
986 	if (sol_reg)
987 		return true;
988 
989 	return false;
990 }
991 
992 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
993 {
994 	uint64_t nak_r, nak_g;
995 
996 	/* Get the number of NAKs received and generated */
997 	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
998 	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
999 
1000 	/* Add the total number of NAKs, i.e the number of replays */
1001 	return (nak_r + nak_g);
1002 }
1003 
1004 static void soc15_pre_asic_init(struct amdgpu_device *adev)
1005 {
1006 	gmc_v9_0_restore_registers(adev);
1007 }
1008 
1009 static const struct amdgpu_asic_funcs soc15_asic_funcs =
1010 {
1011 	.read_disabled_bios = &soc15_read_disabled_bios,
1012 	.read_bios_from_rom = &soc15_read_bios_from_rom,
1013 	.read_register = &soc15_read_register,
1014 	.reset = &soc15_asic_reset,
1015 	.reset_method = &soc15_asic_reset_method,
1016 	.set_vga_state = &soc15_vga_set_state,
1017 	.get_xclk = &soc15_get_xclk,
1018 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1019 	.set_vce_clocks = &soc15_set_vce_clocks,
1020 	.get_config_memsize = &soc15_get_config_memsize,
1021 	.flush_hdp = &soc15_flush_hdp,
1022 	.invalidate_hdp = &soc15_invalidate_hdp,
1023 	.need_full_reset = &soc15_need_full_reset,
1024 	.init_doorbell_index = &vega10_doorbell_index_init,
1025 	.get_pcie_usage = &soc15_get_pcie_usage,
1026 	.need_reset_on_init = &soc15_need_reset_on_init,
1027 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1028 	.supports_baco = &soc15_supports_baco,
1029 	.pre_asic_init = &soc15_pre_asic_init,
1030 };
1031 
1032 static const struct amdgpu_asic_funcs vega20_asic_funcs =
1033 {
1034 	.read_disabled_bios = &soc15_read_disabled_bios,
1035 	.read_bios_from_rom = &soc15_read_bios_from_rom,
1036 	.read_register = &soc15_read_register,
1037 	.reset = &soc15_asic_reset,
1038 	.reset_method = &soc15_asic_reset_method,
1039 	.set_vga_state = &soc15_vga_set_state,
1040 	.get_xclk = &soc15_get_xclk,
1041 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1042 	.set_vce_clocks = &soc15_set_vce_clocks,
1043 	.get_config_memsize = &soc15_get_config_memsize,
1044 	.flush_hdp = &soc15_flush_hdp,
1045 	.invalidate_hdp = &soc15_invalidate_hdp,
1046 	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1047 	.need_full_reset = &soc15_need_full_reset,
1048 	.init_doorbell_index = &vega20_doorbell_index_init,
1049 	.get_pcie_usage = &vega20_get_pcie_usage,
1050 	.need_reset_on_init = &soc15_need_reset_on_init,
1051 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1052 	.supports_baco = &soc15_supports_baco,
1053 	.pre_asic_init = &soc15_pre_asic_init,
1054 };
1055 
1056 static int soc15_common_early_init(void *handle)
1057 {
1058 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1059 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060 
1061 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1062 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1063 	adev->smc_rreg = NULL;
1064 	adev->smc_wreg = NULL;
1065 	adev->pcie_rreg = &soc15_pcie_rreg;
1066 	adev->pcie_wreg = &soc15_pcie_wreg;
1067 	adev->pcie_rreg64 = &soc15_pcie_rreg64;
1068 	adev->pcie_wreg64 = &soc15_pcie_wreg64;
1069 	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1070 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1071 	adev->didt_rreg = &soc15_didt_rreg;
1072 	adev->didt_wreg = &soc15_didt_wreg;
1073 	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1074 	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1075 	adev->se_cac_rreg = &soc15_se_cac_rreg;
1076 	adev->se_cac_wreg = &soc15_se_cac_wreg;
1077 
1078 
1079 	adev->external_rev_id = 0xFF;
1080 	switch (adev->asic_type) {
1081 	case CHIP_VEGA10:
1082 		adev->asic_funcs = &soc15_asic_funcs;
1083 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1084 			AMD_CG_SUPPORT_GFX_MGLS |
1085 			AMD_CG_SUPPORT_GFX_RLC_LS |
1086 			AMD_CG_SUPPORT_GFX_CP_LS |
1087 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1088 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1089 			AMD_CG_SUPPORT_GFX_CGCG |
1090 			AMD_CG_SUPPORT_GFX_CGLS |
1091 			AMD_CG_SUPPORT_BIF_MGCG |
1092 			AMD_CG_SUPPORT_BIF_LS |
1093 			AMD_CG_SUPPORT_HDP_LS |
1094 			AMD_CG_SUPPORT_DRM_MGCG |
1095 			AMD_CG_SUPPORT_DRM_LS |
1096 			AMD_CG_SUPPORT_ROM_MGCG |
1097 			AMD_CG_SUPPORT_DF_MGCG |
1098 			AMD_CG_SUPPORT_SDMA_MGCG |
1099 			AMD_CG_SUPPORT_SDMA_LS |
1100 			AMD_CG_SUPPORT_MC_MGCG |
1101 			AMD_CG_SUPPORT_MC_LS;
1102 		adev->pg_flags = 0;
1103 		adev->external_rev_id = 0x1;
1104 		break;
1105 	case CHIP_VEGA12:
1106 		adev->asic_funcs = &soc15_asic_funcs;
1107 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1108 			AMD_CG_SUPPORT_GFX_MGLS |
1109 			AMD_CG_SUPPORT_GFX_CGCG |
1110 			AMD_CG_SUPPORT_GFX_CGLS |
1111 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1112 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1113 			AMD_CG_SUPPORT_GFX_CP_LS |
1114 			AMD_CG_SUPPORT_MC_LS |
1115 			AMD_CG_SUPPORT_MC_MGCG |
1116 			AMD_CG_SUPPORT_SDMA_MGCG |
1117 			AMD_CG_SUPPORT_SDMA_LS |
1118 			AMD_CG_SUPPORT_BIF_MGCG |
1119 			AMD_CG_SUPPORT_BIF_LS |
1120 			AMD_CG_SUPPORT_HDP_MGCG |
1121 			AMD_CG_SUPPORT_HDP_LS |
1122 			AMD_CG_SUPPORT_ROM_MGCG |
1123 			AMD_CG_SUPPORT_VCE_MGCG |
1124 			AMD_CG_SUPPORT_UVD_MGCG;
1125 		adev->pg_flags = 0;
1126 		adev->external_rev_id = adev->rev_id + 0x14;
1127 		break;
1128 	case CHIP_VEGA20:
1129 		adev->asic_funcs = &vega20_asic_funcs;
1130 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1131 			AMD_CG_SUPPORT_GFX_MGLS |
1132 			AMD_CG_SUPPORT_GFX_CGCG |
1133 			AMD_CG_SUPPORT_GFX_CGLS |
1134 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1135 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1136 			AMD_CG_SUPPORT_GFX_CP_LS |
1137 			AMD_CG_SUPPORT_MC_LS |
1138 			AMD_CG_SUPPORT_MC_MGCG |
1139 			AMD_CG_SUPPORT_SDMA_MGCG |
1140 			AMD_CG_SUPPORT_SDMA_LS |
1141 			AMD_CG_SUPPORT_BIF_MGCG |
1142 			AMD_CG_SUPPORT_BIF_LS |
1143 			AMD_CG_SUPPORT_HDP_MGCG |
1144 			AMD_CG_SUPPORT_HDP_LS |
1145 			AMD_CG_SUPPORT_ROM_MGCG |
1146 			AMD_CG_SUPPORT_VCE_MGCG |
1147 			AMD_CG_SUPPORT_UVD_MGCG;
1148 		adev->pg_flags = 0;
1149 		adev->external_rev_id = adev->rev_id + 0x28;
1150 		break;
1151 	case CHIP_RAVEN:
1152 		adev->asic_funcs = &soc15_asic_funcs;
1153 		if (adev->pdev->device == 0x15dd)
1154 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1155 		if (adev->pdev->device == 0x15d8)
1156 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1157 		if (adev->rev_id >= 0x8)
1158 			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1159 
1160 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1161 			adev->external_rev_id = adev->rev_id + 0x79;
1162 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1163 			adev->external_rev_id = adev->rev_id + 0x41;
1164 		else if (adev->rev_id == 1)
1165 			adev->external_rev_id = adev->rev_id + 0x20;
1166 		else
1167 			adev->external_rev_id = adev->rev_id + 0x01;
1168 
1169 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1170 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1171 				AMD_CG_SUPPORT_GFX_MGLS |
1172 				AMD_CG_SUPPORT_GFX_CP_LS |
1173 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1174 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1175 				AMD_CG_SUPPORT_GFX_CGCG |
1176 				AMD_CG_SUPPORT_GFX_CGLS |
1177 				AMD_CG_SUPPORT_BIF_LS |
1178 				AMD_CG_SUPPORT_HDP_LS |
1179 				AMD_CG_SUPPORT_ROM_MGCG |
1180 				AMD_CG_SUPPORT_MC_MGCG |
1181 				AMD_CG_SUPPORT_MC_LS |
1182 				AMD_CG_SUPPORT_SDMA_MGCG |
1183 				AMD_CG_SUPPORT_SDMA_LS |
1184 				AMD_CG_SUPPORT_VCN_MGCG;
1185 
1186 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1187 		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1188 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1189 				AMD_CG_SUPPORT_GFX_MGLS |
1190 				AMD_CG_SUPPORT_GFX_CP_LS |
1191 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1192 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1193 				AMD_CG_SUPPORT_GFX_CGCG |
1194 				AMD_CG_SUPPORT_GFX_CGLS |
1195 				AMD_CG_SUPPORT_BIF_LS |
1196 				AMD_CG_SUPPORT_HDP_LS |
1197 				AMD_CG_SUPPORT_ROM_MGCG |
1198 				AMD_CG_SUPPORT_MC_MGCG |
1199 				AMD_CG_SUPPORT_MC_LS |
1200 				AMD_CG_SUPPORT_SDMA_MGCG |
1201 				AMD_CG_SUPPORT_SDMA_LS;
1202 
1203 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1204 				AMD_PG_SUPPORT_MMHUB |
1205 				AMD_PG_SUPPORT_VCN;
1206 		} else {
1207 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1208 				AMD_CG_SUPPORT_GFX_MGLS |
1209 				AMD_CG_SUPPORT_GFX_RLC_LS |
1210 				AMD_CG_SUPPORT_GFX_CP_LS |
1211 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1212 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1213 				AMD_CG_SUPPORT_GFX_CGCG |
1214 				AMD_CG_SUPPORT_GFX_CGLS |
1215 				AMD_CG_SUPPORT_BIF_MGCG |
1216 				AMD_CG_SUPPORT_BIF_LS |
1217 				AMD_CG_SUPPORT_HDP_MGCG |
1218 				AMD_CG_SUPPORT_HDP_LS |
1219 				AMD_CG_SUPPORT_DRM_MGCG |
1220 				AMD_CG_SUPPORT_DRM_LS |
1221 				AMD_CG_SUPPORT_ROM_MGCG |
1222 				AMD_CG_SUPPORT_MC_MGCG |
1223 				AMD_CG_SUPPORT_MC_LS |
1224 				AMD_CG_SUPPORT_SDMA_MGCG |
1225 				AMD_CG_SUPPORT_SDMA_LS |
1226 				AMD_CG_SUPPORT_VCN_MGCG;
1227 
1228 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1229 		}
1230 		break;
1231 	case CHIP_ARCTURUS:
1232 		adev->asic_funcs = &vega20_asic_funcs;
1233 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1234 			AMD_CG_SUPPORT_GFX_MGLS |
1235 			AMD_CG_SUPPORT_GFX_CGCG |
1236 			AMD_CG_SUPPORT_GFX_CGLS |
1237 			AMD_CG_SUPPORT_GFX_CP_LS |
1238 			AMD_CG_SUPPORT_HDP_MGCG |
1239 			AMD_CG_SUPPORT_HDP_LS |
1240 			AMD_CG_SUPPORT_SDMA_MGCG |
1241 			AMD_CG_SUPPORT_SDMA_LS |
1242 			AMD_CG_SUPPORT_MC_MGCG |
1243 			AMD_CG_SUPPORT_MC_LS |
1244 			AMD_CG_SUPPORT_IH_CG |
1245 			AMD_CG_SUPPORT_VCN_MGCG |
1246 			AMD_CG_SUPPORT_JPEG_MGCG;
1247 		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1248 		adev->external_rev_id = adev->rev_id + 0x32;
1249 		break;
1250 	case CHIP_RENOIR:
1251 		adev->asic_funcs = &soc15_asic_funcs;
1252 		if (adev->pdev->device == 0x1636)
1253 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1254 		else
1255 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1256 
1257 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1258 			adev->external_rev_id = adev->rev_id + 0x91;
1259 		else
1260 			adev->external_rev_id = adev->rev_id + 0xa1;
1261 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1262 				 AMD_CG_SUPPORT_GFX_MGLS |
1263 				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1264 				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1265 				 AMD_CG_SUPPORT_GFX_CGCG |
1266 				 AMD_CG_SUPPORT_GFX_CGLS |
1267 				 AMD_CG_SUPPORT_GFX_CP_LS |
1268 				 AMD_CG_SUPPORT_MC_MGCG |
1269 				 AMD_CG_SUPPORT_MC_LS |
1270 				 AMD_CG_SUPPORT_SDMA_MGCG |
1271 				 AMD_CG_SUPPORT_SDMA_LS |
1272 				 AMD_CG_SUPPORT_BIF_LS |
1273 				 AMD_CG_SUPPORT_HDP_LS |
1274 				 AMD_CG_SUPPORT_ROM_MGCG |
1275 				 AMD_CG_SUPPORT_VCN_MGCG |
1276 				 AMD_CG_SUPPORT_JPEG_MGCG |
1277 				 AMD_CG_SUPPORT_IH_CG |
1278 				 AMD_CG_SUPPORT_ATHUB_LS |
1279 				 AMD_CG_SUPPORT_ATHUB_MGCG |
1280 				 AMD_CG_SUPPORT_DF_MGCG;
1281 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1282 				 AMD_PG_SUPPORT_VCN |
1283 				 AMD_PG_SUPPORT_JPEG |
1284 				 AMD_PG_SUPPORT_VCN_DPG;
1285 		break;
1286 	default:
1287 		/* FIXME: not supported yet */
1288 		return -EINVAL;
1289 	}
1290 
1291 	if (amdgpu_sriov_vf(adev)) {
1292 		amdgpu_virt_init_setting(adev);
1293 		xgpu_ai_mailbox_set_irq_funcs(adev);
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 static int soc15_common_late_init(void *handle)
1300 {
1301 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302 	int r = 0;
1303 
1304 	if (amdgpu_sriov_vf(adev))
1305 		xgpu_ai_mailbox_get_irq(adev);
1306 
1307 	if (adev->asic_funcs &&
1308 	    adev->asic_funcs->reset_hdp_ras_error_count)
1309 		adev->asic_funcs->reset_hdp_ras_error_count(adev);
1310 
1311 	if (adev->nbio.funcs->ras_late_init)
1312 		r = adev->nbio.funcs->ras_late_init(adev);
1313 
1314 	return r;
1315 }
1316 
1317 static int soc15_common_sw_init(void *handle)
1318 {
1319 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320 
1321 	if (amdgpu_sriov_vf(adev))
1322 		xgpu_ai_mailbox_add_irq_id(adev);
1323 
1324 	adev->df.funcs->sw_init(adev);
1325 
1326 	return 0;
1327 }
1328 
1329 static int soc15_common_sw_fini(void *handle)
1330 {
1331 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332 
1333 	amdgpu_nbio_ras_fini(adev);
1334 	adev->df.funcs->sw_fini(adev);
1335 	return 0;
1336 }
1337 
1338 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1339 {
1340 	int i;
1341 	struct amdgpu_ring *ring;
1342 
1343 	/* sdma/ih doorbell range are programed by hypervisor */
1344 	if (!amdgpu_sriov_vf(adev)) {
1345 		for (i = 0; i < adev->sdma.num_instances; i++) {
1346 			ring = &adev->sdma.instance[i].ring;
1347 			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1348 				ring->use_doorbell, ring->doorbell_index,
1349 				adev->doorbell_index.sdma_doorbell_range);
1350 		}
1351 
1352 		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1353 						adev->irq.ih.doorbell_index);
1354 	}
1355 }
1356 
1357 static int soc15_common_hw_init(void *handle)
1358 {
1359 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 
1361 	/* enable pcie gen2/3 link */
1362 	soc15_pcie_gen3_enable(adev);
1363 	/* enable aspm */
1364 	soc15_program_aspm(adev);
1365 	/* setup nbio registers */
1366 	adev->nbio.funcs->init_registers(adev);
1367 	/* remap HDP registers to a hole in mmio space,
1368 	 * for the purpose of expose those registers
1369 	 * to process space
1370 	 */
1371 	if (adev->nbio.funcs->remap_hdp_registers)
1372 		adev->nbio.funcs->remap_hdp_registers(adev);
1373 
1374 	/* enable the doorbell aperture */
1375 	soc15_enable_doorbell_aperture(adev, true);
1376 	/* HW doorbell routing policy: doorbell writing not
1377 	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1378 	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1379 	 * to CP ip block init and ring test.
1380 	 */
1381 	soc15_doorbell_range_init(adev);
1382 
1383 	return 0;
1384 }
1385 
1386 static int soc15_common_hw_fini(void *handle)
1387 {
1388 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 
1390 	/* disable the doorbell aperture */
1391 	soc15_enable_doorbell_aperture(adev, false);
1392 	if (amdgpu_sriov_vf(adev))
1393 		xgpu_ai_mailbox_put_irq(adev);
1394 
1395 	if (adev->nbio.ras_if &&
1396 	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1397 		if (adev->nbio.funcs->init_ras_controller_interrupt)
1398 			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1399 		if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1400 			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 static int soc15_common_suspend(void *handle)
1407 {
1408 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1409 
1410 	return soc15_common_hw_fini(adev);
1411 }
1412 
1413 static int soc15_common_resume(void *handle)
1414 {
1415 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1416 
1417 	return soc15_common_hw_init(adev);
1418 }
1419 
1420 static bool soc15_common_is_idle(void *handle)
1421 {
1422 	return true;
1423 }
1424 
1425 static int soc15_common_wait_for_idle(void *handle)
1426 {
1427 	return 0;
1428 }
1429 
1430 static int soc15_common_soft_reset(void *handle)
1431 {
1432 	return 0;
1433 }
1434 
1435 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1436 {
1437 	uint32_t def, data;
1438 
1439 	if (adev->asic_type == CHIP_VEGA20 ||
1440 		adev->asic_type == CHIP_ARCTURUS ||
1441 		adev->asic_type == CHIP_RENOIR) {
1442 		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1443 
1444 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1445 			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1446 				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1447 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1448 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1449 		else
1450 			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1451 				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1452 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1453 				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1454 
1455 		if (def != data)
1456 			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1457 	} else {
1458 		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1459 
1460 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1461 			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1462 		else
1463 			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1464 
1465 		if (def != data)
1466 			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1467 	}
1468 }
1469 
1470 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1471 {
1472 	uint32_t def, data;
1473 
1474 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1475 
1476 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1477 		data &= ~(0x01000000 |
1478 			  0x02000000 |
1479 			  0x04000000 |
1480 			  0x08000000 |
1481 			  0x10000000 |
1482 			  0x20000000 |
1483 			  0x40000000 |
1484 			  0x80000000);
1485 	else
1486 		data |= (0x01000000 |
1487 			 0x02000000 |
1488 			 0x04000000 |
1489 			 0x08000000 |
1490 			 0x10000000 |
1491 			 0x20000000 |
1492 			 0x40000000 |
1493 			 0x80000000);
1494 
1495 	if (def != data)
1496 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1497 }
1498 
1499 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1500 {
1501 	uint32_t def, data;
1502 
1503 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1504 
1505 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1506 		data |= 1;
1507 	else
1508 		data &= ~1;
1509 
1510 	if (def != data)
1511 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1512 }
1513 
1514 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1515 						       bool enable)
1516 {
1517 	uint32_t def, data;
1518 
1519 	def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1520 
1521 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1522 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1523 			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1524 	else
1525 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1526 			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1527 
1528 	if (def != data)
1529 		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1530 }
1531 
1532 static int soc15_common_set_clockgating_state(void *handle,
1533 					    enum amd_clockgating_state state)
1534 {
1535 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1536 
1537 	if (amdgpu_sriov_vf(adev))
1538 		return 0;
1539 
1540 	switch (adev->asic_type) {
1541 	case CHIP_VEGA10:
1542 	case CHIP_VEGA12:
1543 	case CHIP_VEGA20:
1544 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1545 				state == AMD_CG_STATE_GATE);
1546 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1547 				state == AMD_CG_STATE_GATE);
1548 		soc15_update_hdp_light_sleep(adev,
1549 				state == AMD_CG_STATE_GATE);
1550 		soc15_update_drm_clock_gating(adev,
1551 				state == AMD_CG_STATE_GATE);
1552 		soc15_update_drm_light_sleep(adev,
1553 				state == AMD_CG_STATE_GATE);
1554 		soc15_update_rom_medium_grain_clock_gating(adev,
1555 				state == AMD_CG_STATE_GATE);
1556 		adev->df.funcs->update_medium_grain_clock_gating(adev,
1557 				state == AMD_CG_STATE_GATE);
1558 		break;
1559 	case CHIP_RAVEN:
1560 	case CHIP_RENOIR:
1561 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1562 				state == AMD_CG_STATE_GATE);
1563 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1564 				state == AMD_CG_STATE_GATE);
1565 		soc15_update_hdp_light_sleep(adev,
1566 				state == AMD_CG_STATE_GATE);
1567 		soc15_update_drm_clock_gating(adev,
1568 				state == AMD_CG_STATE_GATE);
1569 		soc15_update_drm_light_sleep(adev,
1570 				state == AMD_CG_STATE_GATE);
1571 		soc15_update_rom_medium_grain_clock_gating(adev,
1572 				state == AMD_CG_STATE_GATE);
1573 		break;
1574 	case CHIP_ARCTURUS:
1575 		soc15_update_hdp_light_sleep(adev,
1576 				state == AMD_CG_STATE_GATE);
1577 		break;
1578 	default:
1579 		break;
1580 	}
1581 	return 0;
1582 }
1583 
1584 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1585 {
1586 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1587 	int data;
1588 
1589 	if (amdgpu_sriov_vf(adev))
1590 		*flags = 0;
1591 
1592 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1593 
1594 	/* AMD_CG_SUPPORT_HDP_LS */
1595 	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1596 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1597 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1598 
1599 	/* AMD_CG_SUPPORT_DRM_MGCG */
1600 	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1601 	if (!(data & 0x01000000))
1602 		*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1603 
1604 	/* AMD_CG_SUPPORT_DRM_LS */
1605 	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1606 	if (data & 0x1)
1607 		*flags |= AMD_CG_SUPPORT_DRM_LS;
1608 
1609 	/* AMD_CG_SUPPORT_ROM_MGCG */
1610 	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1611 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1612 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1613 
1614 	adev->df.funcs->get_clockgating_state(adev, flags);
1615 }
1616 
1617 static int soc15_common_set_powergating_state(void *handle,
1618 					    enum amd_powergating_state state)
1619 {
1620 	/* todo */
1621 	return 0;
1622 }
1623 
1624 const struct amd_ip_funcs soc15_common_ip_funcs = {
1625 	.name = "soc15_common",
1626 	.early_init = soc15_common_early_init,
1627 	.late_init = soc15_common_late_init,
1628 	.sw_init = soc15_common_sw_init,
1629 	.sw_fini = soc15_common_sw_fini,
1630 	.hw_init = soc15_common_hw_init,
1631 	.hw_fini = soc15_common_hw_fini,
1632 	.suspend = soc15_common_suspend,
1633 	.resume = soc15_common_resume,
1634 	.is_idle = soc15_common_is_idle,
1635 	.wait_for_idle = soc15_common_wait_for_idle,
1636 	.soft_reset = soc15_common_soft_reset,
1637 	.set_clockgating_state = soc15_common_set_clockgating_state,
1638 	.set_powergating_state = soc15_common_set_powergating_state,
1639 	.get_clockgating_state= soc15_common_get_clockgating_state,
1640 };
1641