xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/soc15.c (revision 1d789535a03679e5ce0b56a0d32a5e44596dfcdb)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39 
40 #include "uvd/uvd_7_0_offset.h"
41 #include "gc/gc_9_0_offset.h"
42 #include "gc/gc_9_0_sh_mask.h"
43 #include "sdma0/sdma0_4_0_offset.h"
44 #include "sdma1/sdma1_4_0_offset.h"
45 #include "nbio/nbio_7_0_default.h"
46 #include "nbio/nbio_7_0_offset.h"
47 #include "nbio/nbio_7_0_sh_mask.h"
48 #include "nbio/nbio_7_0_smn.h"
49 #include "mp/mp_9_0_offset.h"
50 
51 #include "soc15.h"
52 #include "soc15_common.h"
53 #include "gfx_v9_0.h"
54 #include "gmc_v9_0.h"
55 #include "gfxhub_v1_0.h"
56 #include "mmhub_v1_0.h"
57 #include "df_v1_7.h"
58 #include "df_v3_6.h"
59 #include "nbio_v6_1.h"
60 #include "nbio_v7_0.h"
61 #include "nbio_v7_4.h"
62 #include "hdp_v4_0.h"
63 #include "vega10_ih.h"
64 #include "vega20_ih.h"
65 #include "navi10_ih.h"
66 #include "sdma_v4_0.h"
67 #include "uvd_v7_0.h"
68 #include "vce_v4_0.h"
69 #include "vcn_v1_0.h"
70 #include "vcn_v2_0.h"
71 #include "jpeg_v2_0.h"
72 #include "vcn_v2_5.h"
73 #include "jpeg_v2_5.h"
74 #include "smuio_v9_0.h"
75 #include "smuio_v11_0.h"
76 #include "smuio_v13_0.h"
77 #include "amdgpu_vkms.h"
78 #include "mxgpu_ai.h"
79 #include "amdgpu_ras.h"
80 #include "amdgpu_xgmi.h"
81 #include <uapi/linux/kfd_ioctl.h>
82 
83 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
84 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
85 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
86 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
87 
88 static const struct amd_ip_funcs soc15_common_ip_funcs;
89 
90 /* Vega, Raven, Arcturus */
91 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
92 {
93 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
94 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
95 };
96 
97 static const struct amdgpu_video_codecs vega_video_codecs_encode =
98 {
99 	.codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
100 	.codec_array = vega_video_codecs_encode_array,
101 };
102 
103 /* Vega */
104 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
105 {
106 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
107 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
108 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
109 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
110 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
111 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
112 };
113 
114 static const struct amdgpu_video_codecs vega_video_codecs_decode =
115 {
116 	.codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
117 	.codec_array = vega_video_codecs_decode_array,
118 };
119 
120 /* Raven */
121 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
122 {
123 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
124 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
125 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
126 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
127 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
128 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
129 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
130 };
131 
132 static const struct amdgpu_video_codecs rv_video_codecs_decode =
133 {
134 	.codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
135 	.codec_array = rv_video_codecs_decode_array,
136 };
137 
138 /* Renoir, Arcturus */
139 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
140 {
141 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
142 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
143 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
144 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
145 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
146 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
147 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
148 };
149 
150 static const struct amdgpu_video_codecs rn_video_codecs_decode =
151 {
152 	.codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
153 	.codec_array = rn_video_codecs_decode_array,
154 };
155 
156 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
157 				    const struct amdgpu_video_codecs **codecs)
158 {
159 	if (adev->ip_versions[VCE_HWIP][0]) {
160 		switch (adev->ip_versions[VCE_HWIP][0]) {
161 		case IP_VERSION(4, 0, 0):
162 		case IP_VERSION(4, 1, 0):
163 			if (encode)
164 				*codecs = &vega_video_codecs_encode;
165 			else
166 				*codecs = &vega_video_codecs_decode;
167 			return 0;
168 		default:
169 			return -EINVAL;
170 		}
171 	} else {
172 		switch (adev->ip_versions[UVD_HWIP][0]) {
173 		case IP_VERSION(1, 0, 0):
174 		case IP_VERSION(1, 0, 1):
175 			if (encode)
176 				*codecs = &vega_video_codecs_encode;
177 			else
178 				*codecs = &rv_video_codecs_decode;
179 			return 0;
180 		case IP_VERSION(2, 5, 0):
181 		case IP_VERSION(2, 6, 0):
182 		case IP_VERSION(2, 2, 0):
183 			if (encode)
184 				*codecs = &vega_video_codecs_encode;
185 			else
186 				*codecs = &rn_video_codecs_decode;
187 			return 0;
188 		default:
189 			return -EINVAL;
190 		}
191 	}
192 }
193 
194 /*
195  * Indirect registers accessor
196  */
197 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
198 {
199 	unsigned long address, data;
200 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
201 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
202 
203 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
204 }
205 
206 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
207 {
208 	unsigned long address, data;
209 
210 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
211 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
212 
213 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
214 }
215 
216 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
217 {
218 	unsigned long address, data;
219 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
220 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
221 
222 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
223 }
224 
225 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
226 {
227 	unsigned long address, data;
228 
229 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
230 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
231 
232 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
233 }
234 
235 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
236 {
237 	unsigned long flags, address, data;
238 	u32 r;
239 
240 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
241 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
242 
243 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
244 	WREG32(address, ((reg) & 0x1ff));
245 	r = RREG32(data);
246 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
247 	return r;
248 }
249 
250 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
251 {
252 	unsigned long flags, address, data;
253 
254 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
255 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
256 
257 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
258 	WREG32(address, ((reg) & 0x1ff));
259 	WREG32(data, (v));
260 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
261 }
262 
263 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
264 {
265 	unsigned long flags, address, data;
266 	u32 r;
267 
268 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
269 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
270 
271 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
272 	WREG32(address, (reg));
273 	r = RREG32(data);
274 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
275 	return r;
276 }
277 
278 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
279 {
280 	unsigned long flags, address, data;
281 
282 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
283 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
284 
285 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
286 	WREG32(address, (reg));
287 	WREG32(data, (v));
288 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
289 }
290 
291 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
292 {
293 	unsigned long flags;
294 	u32 r;
295 
296 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
297 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
298 	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
299 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
300 	return r;
301 }
302 
303 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
304 {
305 	unsigned long flags;
306 
307 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
308 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
309 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
310 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
311 }
312 
313 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
314 {
315 	unsigned long flags;
316 	u32 r;
317 
318 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
319 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
320 	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
321 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
322 	return r;
323 }
324 
325 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
326 {
327 	unsigned long flags;
328 
329 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
330 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
331 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
332 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
333 }
334 
335 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
336 {
337 	return adev->nbio.funcs->get_memsize(adev);
338 }
339 
340 static u32 soc15_get_xclk(struct amdgpu_device *adev)
341 {
342 	u32 reference_clock = adev->clock.spll.reference_freq;
343 
344 	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
345 	    adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
346 		return 10000;
347 	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
348 	    adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
349 		return reference_clock / 4;
350 
351 	return reference_clock;
352 }
353 
354 
355 void soc15_grbm_select(struct amdgpu_device *adev,
356 		     u32 me, u32 pipe, u32 queue, u32 vmid)
357 {
358 	u32 grbm_gfx_cntl = 0;
359 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
360 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
361 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
362 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
363 
364 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
365 }
366 
367 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
368 {
369 	/* todo */
370 }
371 
372 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
373 {
374 	/* todo */
375 	return false;
376 }
377 
378 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
379 				     u8 *bios, u32 length_bytes)
380 {
381 	u32 *dw_ptr;
382 	u32 i, length_dw;
383 	uint32_t rom_index_offset;
384 	uint32_t rom_data_offset;
385 
386 	if (bios == NULL)
387 		return false;
388 	if (length_bytes == 0)
389 		return false;
390 	/* APU vbios image is part of sbios image */
391 	if (adev->flags & AMD_IS_APU)
392 		return false;
393 
394 	dw_ptr = (u32 *)bios;
395 	length_dw = ALIGN(length_bytes, 4) / 4;
396 
397 	rom_index_offset =
398 		adev->smuio.funcs->get_rom_index_offset(adev);
399 	rom_data_offset =
400 		adev->smuio.funcs->get_rom_data_offset(adev);
401 
402 	/* set rom index to 0 */
403 	WREG32(rom_index_offset, 0);
404 	/* read out the rom data */
405 	for (i = 0; i < length_dw; i++)
406 		dw_ptr[i] = RREG32(rom_data_offset);
407 
408 	return true;
409 }
410 
411 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
412 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
413 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
414 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
415 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
416 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
417 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
418 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
419 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
420 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
421 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
422 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
423 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
424 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
425 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
426 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
427 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
428 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
429 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
430 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
431 	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
432 };
433 
434 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
435 					 u32 sh_num, u32 reg_offset)
436 {
437 	uint32_t val;
438 
439 	mutex_lock(&adev->grbm_idx_mutex);
440 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
441 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
442 
443 	val = RREG32(reg_offset);
444 
445 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
446 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
447 	mutex_unlock(&adev->grbm_idx_mutex);
448 	return val;
449 }
450 
451 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
452 					 bool indexed, u32 se_num,
453 					 u32 sh_num, u32 reg_offset)
454 {
455 	if (indexed) {
456 		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
457 	} else {
458 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
459 			return adev->gfx.config.gb_addr_config;
460 		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
461 			return adev->gfx.config.db_debug2;
462 		return RREG32(reg_offset);
463 	}
464 }
465 
466 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
467 			    u32 sh_num, u32 reg_offset, u32 *value)
468 {
469 	uint32_t i;
470 	struct soc15_allowed_register_entry  *en;
471 
472 	*value = 0;
473 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
474 		en = &soc15_allowed_read_registers[i];
475 		if (adev->reg_offset[en->hwip][en->inst] &&
476 			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
477 					+ en->reg_offset))
478 			continue;
479 
480 		*value = soc15_get_register_value(adev,
481 						  soc15_allowed_read_registers[i].grbm_indexed,
482 						  se_num, sh_num, reg_offset);
483 		return 0;
484 	}
485 	return -EINVAL;
486 }
487 
488 
489 /**
490  * soc15_program_register_sequence - program an array of registers.
491  *
492  * @adev: amdgpu_device pointer
493  * @regs: pointer to the register array
494  * @array_size: size of the register array
495  *
496  * Programs an array or registers with and and or masks.
497  * This is a helper for setting golden registers.
498  */
499 
500 void soc15_program_register_sequence(struct amdgpu_device *adev,
501 					     const struct soc15_reg_golden *regs,
502 					     const u32 array_size)
503 {
504 	const struct soc15_reg_golden *entry;
505 	u32 tmp, reg;
506 	int i;
507 
508 	for (i = 0; i < array_size; ++i) {
509 		entry = &regs[i];
510 		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
511 
512 		if (entry->and_mask == 0xffffffff) {
513 			tmp = entry->or_mask;
514 		} else {
515 			tmp = (entry->hwip == GC_HWIP) ?
516 				RREG32_SOC15_IP(GC, reg) : RREG32(reg);
517 
518 			tmp &= ~(entry->and_mask);
519 			tmp |= (entry->or_mask & entry->and_mask);
520 		}
521 
522 		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
523 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
524 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
525 			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
526 			WREG32_RLC(reg, tmp);
527 		else
528 			(entry->hwip == GC_HWIP) ?
529 				WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
530 
531 	}
532 
533 }
534 
535 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
536 {
537 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
538 	int ret = 0;
539 
540 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
541 	if (ras && adev->ras_enabled)
542 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
543 
544 	ret = amdgpu_dpm_baco_reset(adev);
545 	if (ret)
546 		return ret;
547 
548 	/* re-enable doorbell interrupt after BACO exit */
549 	if (ras && adev->ras_enabled)
550 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
551 
552 	return 0;
553 }
554 
555 static enum amd_reset_method
556 soc15_asic_reset_method(struct amdgpu_device *adev)
557 {
558 	bool baco_reset = false;
559 	bool connected_to_cpu = false;
560 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
561 
562         if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
563                 connected_to_cpu = true;
564 
565 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
566 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
567 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
568 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
569 		/* If connected to cpu, driver only support mode2 */
570                 if (connected_to_cpu)
571                         return AMD_RESET_METHOD_MODE2;
572                 return amdgpu_reset_method;
573         }
574 
575 	if (amdgpu_reset_method != -1)
576 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
577 				  amdgpu_reset_method);
578 
579 	switch (adev->ip_versions[MP1_HWIP][0]) {
580 	case IP_VERSION(10, 0, 0):
581 	case IP_VERSION(10, 0, 1):
582 	case IP_VERSION(12, 0, 0):
583 	case IP_VERSION(12, 0, 1):
584 		return AMD_RESET_METHOD_MODE2;
585 	case IP_VERSION(9, 0, 0):
586 	case IP_VERSION(11, 0, 2):
587 		if (adev->asic_type == CHIP_VEGA20) {
588 			if (adev->psp.sos.fw_version >= 0x80067)
589 				baco_reset = amdgpu_dpm_is_baco_supported(adev);
590 			/*
591 			 * 1. PMFW version > 0x284300: all cases use baco
592 			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
593 			 */
594 			if (ras && adev->ras_enabled &&
595 			    adev->pm.fw_version <= 0x283400)
596 				baco_reset = false;
597 		} else {
598 			baco_reset = amdgpu_dpm_is_baco_supported(adev);
599 		}
600 		break;
601 	case IP_VERSION(13, 0, 2):
602 		 /*
603 		 * 1.connected to cpu: driver issue mode2 reset
604 		 * 2.discret gpu: driver issue mode1 reset
605 		 */
606 		if (connected_to_cpu)
607 			return AMD_RESET_METHOD_MODE2;
608 		break;
609 	default:
610 		break;
611 	}
612 
613 	if (baco_reset)
614 		return AMD_RESET_METHOD_BACO;
615 	else
616 		return AMD_RESET_METHOD_MODE1;
617 }
618 
619 static int soc15_asic_reset(struct amdgpu_device *adev)
620 {
621 	/* original raven doesn't have full asic reset */
622 	if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
623 	    !(adev->apu_flags & AMD_APU_IS_RAVEN2))
624 		return 0;
625 
626 	switch (soc15_asic_reset_method(adev)) {
627 	case AMD_RESET_METHOD_PCI:
628 		dev_info(adev->dev, "PCI reset\n");
629 		return amdgpu_device_pci_reset(adev);
630 	case AMD_RESET_METHOD_BACO:
631 		dev_info(adev->dev, "BACO reset\n");
632 		return soc15_asic_baco_reset(adev);
633 	case AMD_RESET_METHOD_MODE2:
634 		dev_info(adev->dev, "MODE2 reset\n");
635 		return amdgpu_dpm_mode2_reset(adev);
636 	default:
637 		dev_info(adev->dev, "MODE1 reset\n");
638 		return amdgpu_device_mode1_reset(adev);
639 	}
640 }
641 
642 static bool soc15_supports_baco(struct amdgpu_device *adev)
643 {
644 	switch (adev->ip_versions[MP1_HWIP][0]) {
645 	case IP_VERSION(9, 0, 0):
646 	case IP_VERSION(11, 0, 2):
647 		if (adev->asic_type == CHIP_VEGA20) {
648 			if (adev->psp.sos.fw_version >= 0x80067)
649 				return amdgpu_dpm_is_baco_supported(adev);
650 			return false;
651 		} else {
652 			return amdgpu_dpm_is_baco_supported(adev);
653 		}
654 		break;
655 	default:
656 		return false;
657 	}
658 }
659 
660 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
661 			u32 cntl_reg, u32 status_reg)
662 {
663 	return 0;
664 }*/
665 
666 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
667 {
668 	/*int r;
669 
670 	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
671 	if (r)
672 		return r;
673 
674 	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
675 	*/
676 	return 0;
677 }
678 
679 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
680 {
681 	/* todo */
682 
683 	return 0;
684 }
685 
686 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
687 {
688 	if (pci_is_root_bus(adev->pdev->bus))
689 		return;
690 
691 	if (amdgpu_pcie_gen2 == 0)
692 		return;
693 
694 	if (adev->flags & AMD_IS_APU)
695 		return;
696 
697 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
698 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
699 		return;
700 
701 	/* todo */
702 }
703 
704 static void soc15_program_aspm(struct amdgpu_device *adev)
705 {
706 	if (!amdgpu_aspm)
707 		return;
708 
709 	if (!(adev->flags & AMD_IS_APU) &&
710 	    (adev->nbio.funcs->program_aspm))
711 		adev->nbio.funcs->program_aspm(adev);
712 }
713 
714 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
715 					   bool enable)
716 {
717 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
718 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
719 }
720 
721 const struct amdgpu_ip_block_version vega10_common_ip_block =
722 {
723 	.type = AMD_IP_BLOCK_TYPE_COMMON,
724 	.major = 2,
725 	.minor = 0,
726 	.rev = 0,
727 	.funcs = &soc15_common_ip_funcs,
728 };
729 
730 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
731 {
732 	return adev->nbio.funcs->get_rev_id(adev);
733 }
734 
735 static void soc15_reg_base_init(struct amdgpu_device *adev)
736 {
737 	int r;
738 
739 	/* Set IP register base before any HW register access */
740 	switch (adev->asic_type) {
741 	case CHIP_VEGA10:
742 	case CHIP_VEGA12:
743 	case CHIP_RAVEN:
744 		vega10_reg_base_init(adev);
745 		break;
746 	case CHIP_RENOIR:
747 		/* It's safe to do ip discovery here for Renior,
748 		 * it doesn't support SRIOV. */
749 		if (amdgpu_discovery) {
750 			r = amdgpu_discovery_reg_base_init(adev);
751 			if (r == 0)
752 				break;
753 			DRM_WARN("failed to init reg base from ip discovery table, "
754 				 "fallback to legacy init method\n");
755 		}
756 		vega10_reg_base_init(adev);
757 		break;
758 	case CHIP_VEGA20:
759 		vega20_reg_base_init(adev);
760 		break;
761 	case CHIP_ARCTURUS:
762 		arct_reg_base_init(adev);
763 		break;
764 	case CHIP_ALDEBARAN:
765 		aldebaran_reg_base_init(adev);
766 		break;
767 	default:
768 		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
769 		break;
770 	}
771 }
772 
773 void soc15_set_virt_ops(struct amdgpu_device *adev)
774 {
775 	adev->virt.ops = &xgpu_ai_virt_ops;
776 
777 	/* init soc15 reg base early enough so we can
778 	 * request request full access for sriov before
779 	 * set_ip_blocks. */
780 	soc15_reg_base_init(adev);
781 }
782 
783 int soc15_set_ip_blocks(struct amdgpu_device *adev)
784 {
785 	/* for bare metal case */
786 	if (!amdgpu_sriov_vf(adev))
787 		soc15_reg_base_init(adev);
788 
789 	if (adev->flags & AMD_IS_APU) {
790 		adev->nbio.funcs = &nbio_v7_0_funcs;
791 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
792 	} else if (adev->asic_type == CHIP_VEGA20 ||
793 		   adev->asic_type == CHIP_ARCTURUS ||
794 		   adev->asic_type == CHIP_ALDEBARAN) {
795 		adev->nbio.funcs = &nbio_v7_4_funcs;
796 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
797 	} else {
798 		adev->nbio.funcs = &nbio_v6_1_funcs;
799 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
800 	}
801 	adev->hdp.funcs = &hdp_v4_0_funcs;
802 
803 	if (adev->asic_type == CHIP_VEGA20 ||
804 	    adev->asic_type == CHIP_ARCTURUS ||
805 	    adev->asic_type == CHIP_ALDEBARAN)
806 		adev->df.funcs = &df_v3_6_funcs;
807 	else
808 		adev->df.funcs = &df_v1_7_funcs;
809 
810 	if (adev->asic_type == CHIP_VEGA20 ||
811 	    adev->asic_type == CHIP_ARCTURUS)
812 		adev->smuio.funcs = &smuio_v11_0_funcs;
813 	else if (adev->asic_type == CHIP_ALDEBARAN)
814 		adev->smuio.funcs = &smuio_v13_0_funcs;
815 	else
816 		adev->smuio.funcs = &smuio_v9_0_funcs;
817 
818 	adev->rev_id = soc15_get_rev_id(adev);
819 
820 	switch (adev->asic_type) {
821 	case CHIP_VEGA10:
822 	case CHIP_VEGA12:
823 	case CHIP_VEGA20:
824 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
825 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
826 
827 		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
828 		if (amdgpu_sriov_vf(adev)) {
829 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
830 				if (adev->asic_type == CHIP_VEGA20)
831 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
832 				else
833 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
834 			}
835 			if (adev->asic_type == CHIP_VEGA20)
836 				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
837 			else
838 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
839 		} else {
840 			if (adev->asic_type == CHIP_VEGA20)
841 				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
842 			else
843 				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
844 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
845 				if (adev->asic_type == CHIP_VEGA20)
846 					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
847 				else
848 					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
849 			}
850 		}
851 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
852 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
853 		if (is_support_sw_smu(adev)) {
854 			if (!amdgpu_sriov_vf(adev))
855 				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
856 		} else {
857 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
858 		}
859 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
860 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
861 #if defined(CONFIG_DRM_AMD_DC)
862 		else if (amdgpu_device_has_dc_support(adev))
863 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
864 #endif
865 		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
866 			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
867 			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
868 		}
869 		break;
870 	case CHIP_RAVEN:
871 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
872 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
873 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
874 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
875 			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
876 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
877 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
878 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
879 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
880 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
881 #if defined(CONFIG_DRM_AMD_DC)
882 		else if (amdgpu_device_has_dc_support(adev))
883 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
884 #endif
885 		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
886 		break;
887 	case CHIP_ARCTURUS:
888 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
889 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
890 
891 		if (amdgpu_sriov_vf(adev)) {
892 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
893 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
894 			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
895 		} else {
896 			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
897 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
898 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
899 		}
900 
901 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
902 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
903 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
904 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
905 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
906 
907 		if (amdgpu_sriov_vf(adev)) {
908 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
909 				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
910 		} else {
911 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
912 		}
913 		if (!amdgpu_sriov_vf(adev))
914 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
915 		break;
916 	case CHIP_RENOIR:
917 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
918 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
919 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
920 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
921 			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
922 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
923 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
924 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
925 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
926 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
927 #if defined(CONFIG_DRM_AMD_DC)
928                 else if (amdgpu_device_has_dc_support(adev))
929 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
930 #endif
931 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
932 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
933 		break;
934 	case CHIP_ALDEBARAN:
935 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
936 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
937 
938 		if (amdgpu_sriov_vf(adev)) {
939 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
940 				amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
941 			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
942 		} else {
943 			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
944 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
945 				amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
946 		}
947 
948 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
949 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
950 
951 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
952 		amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
953 		amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
954 		break;
955 	default:
956 		return -EINVAL;
957 	}
958 
959 	return 0;
960 }
961 
962 static bool soc15_need_full_reset(struct amdgpu_device *adev)
963 {
964 	/* change this when we implement soft reset */
965 	return true;
966 }
967 
968 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
969 				 uint64_t *count1)
970 {
971 	uint32_t perfctr = 0;
972 	uint64_t cnt0_of, cnt1_of;
973 	int tmp;
974 
975 	/* This reports 0 on APUs, so return to avoid writing/reading registers
976 	 * that may or may not be different from their GPU counterparts
977 	 */
978 	if (adev->flags & AMD_IS_APU)
979 		return;
980 
981 	/* Set the 2 events that we wish to watch, defined above */
982 	/* Reg 40 is # received msgs */
983 	/* Reg 104 is # of posted requests sent */
984 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
985 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
986 
987 	/* Write to enable desired perf counters */
988 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
989 	/* Zero out and enable the perf counters
990 	 * Write 0x5:
991 	 * Bit 0 = Start all counters(1)
992 	 * Bit 2 = Global counter reset enable(1)
993 	 */
994 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
995 
996 	msleep(1000);
997 
998 	/* Load the shadow and disable the perf counters
999 	 * Write 0x2:
1000 	 * Bit 0 = Stop counters(0)
1001 	 * Bit 1 = Load the shadow counters(1)
1002 	 */
1003 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
1004 
1005 	/* Read register values to get any >32bit overflow */
1006 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
1007 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1008 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1009 
1010 	/* Get the values and add the overflow */
1011 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1012 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1013 }
1014 
1015 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1016 				 uint64_t *count1)
1017 {
1018 	uint32_t perfctr = 0;
1019 	uint64_t cnt0_of, cnt1_of;
1020 	int tmp;
1021 
1022 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1023 	 * that may or may not be different from their GPU counterparts
1024 	 */
1025 	if (adev->flags & AMD_IS_APU)
1026 		return;
1027 
1028 	/* Set the 2 events that we wish to watch, defined above */
1029 	/* Reg 40 is # received msgs */
1030 	/* Reg 108 is # of posted requests sent on VG20 */
1031 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
1032 				EVENT0_SEL, 40);
1033 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
1034 				EVENT1_SEL, 108);
1035 
1036 	/* Write to enable desired perf counters */
1037 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
1038 	/* Zero out and enable the perf counters
1039 	 * Write 0x5:
1040 	 * Bit 0 = Start all counters(1)
1041 	 * Bit 2 = Global counter reset enable(1)
1042 	 */
1043 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
1044 
1045 	msleep(1000);
1046 
1047 	/* Load the shadow and disable the perf counters
1048 	 * Write 0x2:
1049 	 * Bit 0 = Stop counters(0)
1050 	 * Bit 1 = Load the shadow counters(1)
1051 	 */
1052 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
1053 
1054 	/* Read register values to get any >32bit overflow */
1055 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
1056 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
1057 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
1058 
1059 	/* Get the values and add the overflow */
1060 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
1061 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
1062 }
1063 
1064 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
1065 {
1066 	u32 sol_reg;
1067 
1068 	/* Just return false for soc15 GPUs.  Reset does not seem to
1069 	 * be necessary.
1070 	 */
1071 	if (!amdgpu_passthrough(adev))
1072 		return false;
1073 
1074 	if (adev->flags & AMD_IS_APU)
1075 		return false;
1076 
1077 	/* Check sOS sign of life register to confirm sys driver and sOS
1078 	 * are already been loaded.
1079 	 */
1080 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
1081 	if (sol_reg)
1082 		return true;
1083 
1084 	return false;
1085 }
1086 
1087 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
1088 {
1089 	uint64_t nak_r, nak_g;
1090 
1091 	/* Get the number of NAKs received and generated */
1092 	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
1093 	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
1094 
1095 	/* Add the total number of NAKs, i.e the number of replays */
1096 	return (nak_r + nak_g);
1097 }
1098 
1099 static void soc15_pre_asic_init(struct amdgpu_device *adev)
1100 {
1101 	gmc_v9_0_restore_registers(adev);
1102 }
1103 
1104 static const struct amdgpu_asic_funcs soc15_asic_funcs =
1105 {
1106 	.read_disabled_bios = &soc15_read_disabled_bios,
1107 	.read_bios_from_rom = &soc15_read_bios_from_rom,
1108 	.read_register = &soc15_read_register,
1109 	.reset = &soc15_asic_reset,
1110 	.reset_method = &soc15_asic_reset_method,
1111 	.set_vga_state = &soc15_vga_set_state,
1112 	.get_xclk = &soc15_get_xclk,
1113 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1114 	.set_vce_clocks = &soc15_set_vce_clocks,
1115 	.get_config_memsize = &soc15_get_config_memsize,
1116 	.need_full_reset = &soc15_need_full_reset,
1117 	.init_doorbell_index = &vega10_doorbell_index_init,
1118 	.get_pcie_usage = &soc15_get_pcie_usage,
1119 	.need_reset_on_init = &soc15_need_reset_on_init,
1120 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1121 	.supports_baco = &soc15_supports_baco,
1122 	.pre_asic_init = &soc15_pre_asic_init,
1123 	.query_video_codecs = &soc15_query_video_codecs,
1124 };
1125 
1126 static const struct amdgpu_asic_funcs vega20_asic_funcs =
1127 {
1128 	.read_disabled_bios = &soc15_read_disabled_bios,
1129 	.read_bios_from_rom = &soc15_read_bios_from_rom,
1130 	.read_register = &soc15_read_register,
1131 	.reset = &soc15_asic_reset,
1132 	.reset_method = &soc15_asic_reset_method,
1133 	.set_vga_state = &soc15_vga_set_state,
1134 	.get_xclk = &soc15_get_xclk,
1135 	.set_uvd_clocks = &soc15_set_uvd_clocks,
1136 	.set_vce_clocks = &soc15_set_vce_clocks,
1137 	.get_config_memsize = &soc15_get_config_memsize,
1138 	.need_full_reset = &soc15_need_full_reset,
1139 	.init_doorbell_index = &vega20_doorbell_index_init,
1140 	.get_pcie_usage = &vega20_get_pcie_usage,
1141 	.need_reset_on_init = &soc15_need_reset_on_init,
1142 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1143 	.supports_baco = &soc15_supports_baco,
1144 	.pre_asic_init = &soc15_pre_asic_init,
1145 	.query_video_codecs = &soc15_query_video_codecs,
1146 };
1147 
1148 static int soc15_common_early_init(void *handle)
1149 {
1150 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1151 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152 
1153 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1154 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1155 	adev->smc_rreg = NULL;
1156 	adev->smc_wreg = NULL;
1157 	adev->pcie_rreg = &soc15_pcie_rreg;
1158 	adev->pcie_wreg = &soc15_pcie_wreg;
1159 	adev->pcie_rreg64 = &soc15_pcie_rreg64;
1160 	adev->pcie_wreg64 = &soc15_pcie_wreg64;
1161 	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1162 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1163 	adev->didt_rreg = &soc15_didt_rreg;
1164 	adev->didt_wreg = &soc15_didt_wreg;
1165 	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1166 	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1167 	adev->se_cac_rreg = &soc15_se_cac_rreg;
1168 	adev->se_cac_wreg = &soc15_se_cac_wreg;
1169 
1170 	adev->rev_id = soc15_get_rev_id(adev);
1171 	adev->external_rev_id = 0xFF;
1172 	/* TODO: split the GC and PG flags based on the relevant IP version for which
1173 	 * they are relevant.
1174 	 */
1175 	switch (adev->ip_versions[GC_HWIP][0]) {
1176 	case IP_VERSION(9, 0, 1):
1177 		adev->asic_funcs = &soc15_asic_funcs;
1178 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1179 			AMD_CG_SUPPORT_GFX_MGLS |
1180 			AMD_CG_SUPPORT_GFX_RLC_LS |
1181 			AMD_CG_SUPPORT_GFX_CP_LS |
1182 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1183 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1184 			AMD_CG_SUPPORT_GFX_CGCG |
1185 			AMD_CG_SUPPORT_GFX_CGLS |
1186 			AMD_CG_SUPPORT_BIF_MGCG |
1187 			AMD_CG_SUPPORT_BIF_LS |
1188 			AMD_CG_SUPPORT_HDP_LS |
1189 			AMD_CG_SUPPORT_DRM_MGCG |
1190 			AMD_CG_SUPPORT_DRM_LS |
1191 			AMD_CG_SUPPORT_ROM_MGCG |
1192 			AMD_CG_SUPPORT_DF_MGCG |
1193 			AMD_CG_SUPPORT_SDMA_MGCG |
1194 			AMD_CG_SUPPORT_SDMA_LS |
1195 			AMD_CG_SUPPORT_MC_MGCG |
1196 			AMD_CG_SUPPORT_MC_LS;
1197 		adev->pg_flags = 0;
1198 		adev->external_rev_id = 0x1;
1199 		break;
1200 	case IP_VERSION(9, 2, 1):
1201 		adev->asic_funcs = &soc15_asic_funcs;
1202 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1203 			AMD_CG_SUPPORT_GFX_MGLS |
1204 			AMD_CG_SUPPORT_GFX_CGCG |
1205 			AMD_CG_SUPPORT_GFX_CGLS |
1206 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1207 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1208 			AMD_CG_SUPPORT_GFX_CP_LS |
1209 			AMD_CG_SUPPORT_MC_LS |
1210 			AMD_CG_SUPPORT_MC_MGCG |
1211 			AMD_CG_SUPPORT_SDMA_MGCG |
1212 			AMD_CG_SUPPORT_SDMA_LS |
1213 			AMD_CG_SUPPORT_BIF_MGCG |
1214 			AMD_CG_SUPPORT_BIF_LS |
1215 			AMD_CG_SUPPORT_HDP_MGCG |
1216 			AMD_CG_SUPPORT_HDP_LS |
1217 			AMD_CG_SUPPORT_ROM_MGCG |
1218 			AMD_CG_SUPPORT_VCE_MGCG |
1219 			AMD_CG_SUPPORT_UVD_MGCG;
1220 		adev->pg_flags = 0;
1221 		adev->external_rev_id = adev->rev_id + 0x14;
1222 		break;
1223 	case IP_VERSION(9, 4, 0):
1224 		adev->asic_funcs = &vega20_asic_funcs;
1225 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1226 			AMD_CG_SUPPORT_GFX_MGLS |
1227 			AMD_CG_SUPPORT_GFX_CGCG |
1228 			AMD_CG_SUPPORT_GFX_CGLS |
1229 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1230 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1231 			AMD_CG_SUPPORT_GFX_CP_LS |
1232 			AMD_CG_SUPPORT_MC_LS |
1233 			AMD_CG_SUPPORT_MC_MGCG |
1234 			AMD_CG_SUPPORT_SDMA_MGCG |
1235 			AMD_CG_SUPPORT_SDMA_LS |
1236 			AMD_CG_SUPPORT_BIF_MGCG |
1237 			AMD_CG_SUPPORT_BIF_LS |
1238 			AMD_CG_SUPPORT_HDP_MGCG |
1239 			AMD_CG_SUPPORT_HDP_LS |
1240 			AMD_CG_SUPPORT_ROM_MGCG |
1241 			AMD_CG_SUPPORT_VCE_MGCG |
1242 			AMD_CG_SUPPORT_UVD_MGCG;
1243 		adev->pg_flags = 0;
1244 		adev->external_rev_id = adev->rev_id + 0x28;
1245 		break;
1246 	case IP_VERSION(9, 1, 0):
1247 	case IP_VERSION(9, 2, 2):
1248 		adev->asic_funcs = &soc15_asic_funcs;
1249 
1250 		if (adev->rev_id >= 0x8)
1251 			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1252 
1253 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1254 			adev->external_rev_id = adev->rev_id + 0x79;
1255 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1256 			adev->external_rev_id = adev->rev_id + 0x41;
1257 		else if (adev->rev_id == 1)
1258 			adev->external_rev_id = adev->rev_id + 0x20;
1259 		else
1260 			adev->external_rev_id = adev->rev_id + 0x01;
1261 
1262 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1263 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1264 				AMD_CG_SUPPORT_GFX_MGLS |
1265 				AMD_CG_SUPPORT_GFX_CP_LS |
1266 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1267 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1268 				AMD_CG_SUPPORT_GFX_CGCG |
1269 				AMD_CG_SUPPORT_GFX_CGLS |
1270 				AMD_CG_SUPPORT_BIF_LS |
1271 				AMD_CG_SUPPORT_HDP_LS |
1272 				AMD_CG_SUPPORT_MC_MGCG |
1273 				AMD_CG_SUPPORT_MC_LS |
1274 				AMD_CG_SUPPORT_SDMA_MGCG |
1275 				AMD_CG_SUPPORT_SDMA_LS |
1276 				AMD_CG_SUPPORT_VCN_MGCG;
1277 
1278 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1279 		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1280 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1281 				AMD_CG_SUPPORT_GFX_MGLS |
1282 				AMD_CG_SUPPORT_GFX_CP_LS |
1283 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1284 				AMD_CG_SUPPORT_GFX_CGCG |
1285 				AMD_CG_SUPPORT_GFX_CGLS |
1286 				AMD_CG_SUPPORT_BIF_LS |
1287 				AMD_CG_SUPPORT_HDP_LS |
1288 				AMD_CG_SUPPORT_MC_MGCG |
1289 				AMD_CG_SUPPORT_MC_LS |
1290 				AMD_CG_SUPPORT_SDMA_MGCG |
1291 				AMD_CG_SUPPORT_SDMA_LS |
1292 				AMD_CG_SUPPORT_VCN_MGCG;
1293 
1294 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1295 				AMD_PG_SUPPORT_MMHUB |
1296 				AMD_PG_SUPPORT_VCN;
1297 		} else {
1298 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1299 				AMD_CG_SUPPORT_GFX_MGLS |
1300 				AMD_CG_SUPPORT_GFX_RLC_LS |
1301 				AMD_CG_SUPPORT_GFX_CP_LS |
1302 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1303 				AMD_CG_SUPPORT_GFX_CGCG |
1304 				AMD_CG_SUPPORT_GFX_CGLS |
1305 				AMD_CG_SUPPORT_BIF_MGCG |
1306 				AMD_CG_SUPPORT_BIF_LS |
1307 				AMD_CG_SUPPORT_HDP_MGCG |
1308 				AMD_CG_SUPPORT_HDP_LS |
1309 				AMD_CG_SUPPORT_DRM_MGCG |
1310 				AMD_CG_SUPPORT_DRM_LS |
1311 				AMD_CG_SUPPORT_MC_MGCG |
1312 				AMD_CG_SUPPORT_MC_LS |
1313 				AMD_CG_SUPPORT_SDMA_MGCG |
1314 				AMD_CG_SUPPORT_SDMA_LS |
1315 				AMD_CG_SUPPORT_VCN_MGCG;
1316 
1317 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1318 		}
1319 		break;
1320 	case IP_VERSION(9, 4, 1):
1321 		adev->asic_funcs = &vega20_asic_funcs;
1322 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1323 			AMD_CG_SUPPORT_GFX_MGLS |
1324 			AMD_CG_SUPPORT_GFX_CGCG |
1325 			AMD_CG_SUPPORT_GFX_CGLS |
1326 			AMD_CG_SUPPORT_GFX_CP_LS |
1327 			AMD_CG_SUPPORT_HDP_MGCG |
1328 			AMD_CG_SUPPORT_HDP_LS |
1329 			AMD_CG_SUPPORT_SDMA_MGCG |
1330 			AMD_CG_SUPPORT_SDMA_LS |
1331 			AMD_CG_SUPPORT_MC_MGCG |
1332 			AMD_CG_SUPPORT_MC_LS |
1333 			AMD_CG_SUPPORT_IH_CG |
1334 			AMD_CG_SUPPORT_VCN_MGCG |
1335 			AMD_CG_SUPPORT_JPEG_MGCG;
1336 		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1337 		adev->external_rev_id = adev->rev_id + 0x32;
1338 		break;
1339 	case IP_VERSION(9, 3, 0):
1340 		adev->asic_funcs = &soc15_asic_funcs;
1341 
1342 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1343 			adev->external_rev_id = adev->rev_id + 0x91;
1344 		else
1345 			adev->external_rev_id = adev->rev_id + 0xa1;
1346 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1347 				 AMD_CG_SUPPORT_GFX_MGLS |
1348 				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1349 				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1350 				 AMD_CG_SUPPORT_GFX_CGCG |
1351 				 AMD_CG_SUPPORT_GFX_CGLS |
1352 				 AMD_CG_SUPPORT_GFX_CP_LS |
1353 				 AMD_CG_SUPPORT_MC_MGCG |
1354 				 AMD_CG_SUPPORT_MC_LS |
1355 				 AMD_CG_SUPPORT_SDMA_MGCG |
1356 				 AMD_CG_SUPPORT_SDMA_LS |
1357 				 AMD_CG_SUPPORT_BIF_LS |
1358 				 AMD_CG_SUPPORT_HDP_LS |
1359 				 AMD_CG_SUPPORT_VCN_MGCG |
1360 				 AMD_CG_SUPPORT_JPEG_MGCG |
1361 				 AMD_CG_SUPPORT_IH_CG |
1362 				 AMD_CG_SUPPORT_ATHUB_LS |
1363 				 AMD_CG_SUPPORT_ATHUB_MGCG |
1364 				 AMD_CG_SUPPORT_DF_MGCG;
1365 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1366 				 AMD_PG_SUPPORT_VCN |
1367 				 AMD_PG_SUPPORT_JPEG |
1368 				 AMD_PG_SUPPORT_VCN_DPG;
1369 		break;
1370 	case IP_VERSION(9, 4, 2):
1371 		adev->asic_funcs = &vega20_asic_funcs;
1372 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1373 			AMD_CG_SUPPORT_GFX_MGLS |
1374 			AMD_CG_SUPPORT_GFX_CP_LS |
1375 			AMD_CG_SUPPORT_HDP_LS |
1376 			AMD_CG_SUPPORT_SDMA_MGCG |
1377 			AMD_CG_SUPPORT_SDMA_LS |
1378 			AMD_CG_SUPPORT_IH_CG |
1379 			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1380 		adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1381 		adev->external_rev_id = adev->rev_id + 0x3c;
1382 		break;
1383 	default:
1384 		/* FIXME: not supported yet */
1385 		return -EINVAL;
1386 	}
1387 
1388 	if (amdgpu_sriov_vf(adev)) {
1389 		amdgpu_virt_init_setting(adev);
1390 		xgpu_ai_mailbox_set_irq_funcs(adev);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 static int soc15_common_late_init(void *handle)
1397 {
1398 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399 	int r = 0;
1400 
1401 	if (amdgpu_sriov_vf(adev))
1402 		xgpu_ai_mailbox_get_irq(adev);
1403 
1404 	if (adev->nbio.ras_funcs &&
1405 	    adev->nbio.ras_funcs->ras_late_init)
1406 		r = adev->nbio.ras_funcs->ras_late_init(adev);
1407 
1408 	return r;
1409 }
1410 
1411 static int soc15_common_sw_init(void *handle)
1412 {
1413 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1414 
1415 	if (amdgpu_sriov_vf(adev))
1416 		xgpu_ai_mailbox_add_irq_id(adev);
1417 
1418 	adev->df.funcs->sw_init(adev);
1419 
1420 	return 0;
1421 }
1422 
1423 static int soc15_common_sw_fini(void *handle)
1424 {
1425 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1426 
1427 	if (adev->nbio.ras_funcs &&
1428 	    adev->nbio.ras_funcs->ras_fini)
1429 		adev->nbio.ras_funcs->ras_fini(adev);
1430 	adev->df.funcs->sw_fini(adev);
1431 	return 0;
1432 }
1433 
1434 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1435 {
1436 	int i;
1437 	struct amdgpu_ring *ring;
1438 
1439 	/* sdma/ih doorbell range are programed by hypervisor */
1440 	if (!amdgpu_sriov_vf(adev)) {
1441 		for (i = 0; i < adev->sdma.num_instances; i++) {
1442 			ring = &adev->sdma.instance[i].ring;
1443 			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1444 				ring->use_doorbell, ring->doorbell_index,
1445 				adev->doorbell_index.sdma_doorbell_range);
1446 		}
1447 
1448 		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1449 						adev->irq.ih.doorbell_index);
1450 	}
1451 }
1452 
1453 static int soc15_common_hw_init(void *handle)
1454 {
1455 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1456 
1457 	/* enable pcie gen2/3 link */
1458 	soc15_pcie_gen3_enable(adev);
1459 	/* enable aspm */
1460 	soc15_program_aspm(adev);
1461 	/* setup nbio registers */
1462 	adev->nbio.funcs->init_registers(adev);
1463 	/* remap HDP registers to a hole in mmio space,
1464 	 * for the purpose of expose those registers
1465 	 * to process space
1466 	 */
1467 	if (adev->nbio.funcs->remap_hdp_registers)
1468 		adev->nbio.funcs->remap_hdp_registers(adev);
1469 
1470 	/* enable the doorbell aperture */
1471 	soc15_enable_doorbell_aperture(adev, true);
1472 	/* HW doorbell routing policy: doorbell writing not
1473 	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1474 	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1475 	 * to CP ip block init and ring test.
1476 	 */
1477 	soc15_doorbell_range_init(adev);
1478 
1479 	return 0;
1480 }
1481 
1482 static int soc15_common_hw_fini(void *handle)
1483 {
1484 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485 
1486 	/* disable the doorbell aperture */
1487 	soc15_enable_doorbell_aperture(adev, false);
1488 	if (amdgpu_sriov_vf(adev))
1489 		xgpu_ai_mailbox_put_irq(adev);
1490 
1491 	if (adev->nbio.ras_if &&
1492 	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1493 		if (adev->nbio.ras_funcs &&
1494 		    adev->nbio.ras_funcs->init_ras_controller_interrupt)
1495 			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1496 		if (adev->nbio.ras_funcs &&
1497 		    adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
1498 			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1499 	}
1500 
1501 	return 0;
1502 }
1503 
1504 static int soc15_common_suspend(void *handle)
1505 {
1506 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507 
1508 	return soc15_common_hw_fini(adev);
1509 }
1510 
1511 static int soc15_common_resume(void *handle)
1512 {
1513 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1514 
1515 	return soc15_common_hw_init(adev);
1516 }
1517 
1518 static bool soc15_common_is_idle(void *handle)
1519 {
1520 	return true;
1521 }
1522 
1523 static int soc15_common_wait_for_idle(void *handle)
1524 {
1525 	return 0;
1526 }
1527 
1528 static int soc15_common_soft_reset(void *handle)
1529 {
1530 	return 0;
1531 }
1532 
1533 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1534 {
1535 	uint32_t def, data;
1536 
1537 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1538 
1539 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1540 		data &= ~(0x01000000 |
1541 			  0x02000000 |
1542 			  0x04000000 |
1543 			  0x08000000 |
1544 			  0x10000000 |
1545 			  0x20000000 |
1546 			  0x40000000 |
1547 			  0x80000000);
1548 	else
1549 		data |= (0x01000000 |
1550 			 0x02000000 |
1551 			 0x04000000 |
1552 			 0x08000000 |
1553 			 0x10000000 |
1554 			 0x20000000 |
1555 			 0x40000000 |
1556 			 0x80000000);
1557 
1558 	if (def != data)
1559 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1560 }
1561 
1562 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1563 {
1564 	uint32_t def, data;
1565 
1566 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1567 
1568 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1569 		data |= 1;
1570 	else
1571 		data &= ~1;
1572 
1573 	if (def != data)
1574 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1575 }
1576 
1577 static int soc15_common_set_clockgating_state(void *handle,
1578 					    enum amd_clockgating_state state)
1579 {
1580 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1581 
1582 	if (amdgpu_sriov_vf(adev))
1583 		return 0;
1584 
1585 	switch (adev->ip_versions[NBIO_HWIP][0]) {
1586 	case IP_VERSION(6, 1, 0):
1587 	case IP_VERSION(6, 2, 0):
1588 	case IP_VERSION(7, 4, 0):
1589 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1590 				state == AMD_CG_STATE_GATE);
1591 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1592 				state == AMD_CG_STATE_GATE);
1593 		adev->hdp.funcs->update_clock_gating(adev,
1594 				state == AMD_CG_STATE_GATE);
1595 		soc15_update_drm_clock_gating(adev,
1596 				state == AMD_CG_STATE_GATE);
1597 		soc15_update_drm_light_sleep(adev,
1598 				state == AMD_CG_STATE_GATE);
1599 		adev->smuio.funcs->update_rom_clock_gating(adev,
1600 				state == AMD_CG_STATE_GATE);
1601 		adev->df.funcs->update_medium_grain_clock_gating(adev,
1602 				state == AMD_CG_STATE_GATE);
1603 		break;
1604 	case IP_VERSION(7, 0, 0):
1605 	case IP_VERSION(7, 0, 1):
1606 	case IP_VERSION(2, 5, 0):
1607 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1608 				state == AMD_CG_STATE_GATE);
1609 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1610 				state == AMD_CG_STATE_GATE);
1611 		adev->hdp.funcs->update_clock_gating(adev,
1612 				state == AMD_CG_STATE_GATE);
1613 		soc15_update_drm_clock_gating(adev,
1614 				state == AMD_CG_STATE_GATE);
1615 		soc15_update_drm_light_sleep(adev,
1616 				state == AMD_CG_STATE_GATE);
1617 		break;
1618 	case IP_VERSION(7, 4, 1):
1619 	case IP_VERSION(7, 4, 4):
1620 		adev->hdp.funcs->update_clock_gating(adev,
1621 				state == AMD_CG_STATE_GATE);
1622 		break;
1623 	default:
1624 		break;
1625 	}
1626 	return 0;
1627 }
1628 
1629 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1630 {
1631 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1632 	int data;
1633 
1634 	if (amdgpu_sriov_vf(adev))
1635 		*flags = 0;
1636 
1637 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1638 
1639 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1640 
1641 	if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
1642 
1643 		/* AMD_CG_SUPPORT_DRM_MGCG */
1644 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1645 		if (!(data & 0x01000000))
1646 			*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1647 
1648 		/* AMD_CG_SUPPORT_DRM_LS */
1649 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1650 		if (data & 0x1)
1651 			*flags |= AMD_CG_SUPPORT_DRM_LS;
1652 	}
1653 
1654 	/* AMD_CG_SUPPORT_ROM_MGCG */
1655 	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1656 
1657 	adev->df.funcs->get_clockgating_state(adev, flags);
1658 }
1659 
1660 static int soc15_common_set_powergating_state(void *handle,
1661 					    enum amd_powergating_state state)
1662 {
1663 	/* todo */
1664 	return 0;
1665 }
1666 
1667 static const struct amd_ip_funcs soc15_common_ip_funcs = {
1668 	.name = "soc15_common",
1669 	.early_init = soc15_common_early_init,
1670 	.late_init = soc15_common_late_init,
1671 	.sw_init = soc15_common_sw_init,
1672 	.sw_fini = soc15_common_sw_fini,
1673 	.hw_init = soc15_common_hw_init,
1674 	.hw_fini = soc15_common_hw_fini,
1675 	.suspend = soc15_common_suspend,
1676 	.resume = soc15_common_resume,
1677 	.is_idle = soc15_common_is_idle,
1678 	.wait_for_idle = soc15_common_wait_for_idle,
1679 	.soft_reset = soc15_common_soft_reset,
1680 	.set_clockgating_state = soc15_common_set_clockgating_state,
1681 	.set_powergating_state = soc15_common_set_powergating_state,
1682 	.get_clockgating_state= soc15_common_get_clockgating_state,
1683 };
1684