1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_atombios.h"
25 #include "nbio_v7_9.h"
26 #include "amdgpu_ras.h"
27 
28 #include "nbio/nbio_7_9_0_offset.h"
29 #include "nbio/nbio_7_9_0_sh_mask.h"
30 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
31 #include <uapi/linux/kfd_ioctl.h>
32 
33 #define NPS_MODE_MASK 0x000000FFL
34 
35 /* Core 0 Port 0 counter */
36 #define smnPCIEP_NAK_COUNTER 0x1A340218
37 
38 static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
39 {
40 	WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
41 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
42 	WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
43 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
44 }
45 
46 static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
47 {
48 	u32 tmp;
49 
50 	tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
51 	tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0);
52 
53 	return tmp;
54 }
55 
56 static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
57 {
58 	if (enable)
59 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
60 			BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
61 	else
62 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
63 }
64 
65 static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev)
66 {
67 	return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
68 }
69 
70 static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
71 			bool use_doorbell, int doorbell_index, int doorbell_size)
72 {
73 	u32 doorbell_range = 0, doorbell_ctrl = 0;
74 	int aid_id, dev_inst;
75 
76 	dev_inst = GET_INST(SDMA0, instance);
77 	aid_id = adev->sdma.instance[instance].aid_id;
78 
79 	if (use_doorbell == false)
80 		return;
81 
82 	doorbell_range =
83 		REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
84 			BIF_DOORBELL0_RANGE_OFFSET_ENTRY, doorbell_index);
85 	doorbell_range =
86 		REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
87 			BIF_DOORBELL0_RANGE_SIZE_ENTRY, doorbell_size);
88 	doorbell_ctrl =
89 		REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
90 			S2A_DOORBELL_PORT1_ENABLE, 1);
91 	doorbell_ctrl =
92 		REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
93 			S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size);
94 
95 	switch (dev_inst % adev->sdma.num_inst_per_aid) {
96 	case 0:
97 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1,
98 			4 * aid_id, doorbell_range);
99 
100 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
101 					S2A_DOORBELL_ENTRY_1_CTRL,
102 					S2A_DOORBELL_PORT1_AWID, 0xe);
103 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
104 					S2A_DOORBELL_ENTRY_1_CTRL,
105 					S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xe);
106 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
107 					S2A_DOORBELL_ENTRY_1_CTRL,
108 					S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
109 					0x1);
110 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_1_CTRL,
111 			aid_id, doorbell_ctrl);
112 		break;
113 	case 1:
114 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2,
115 			4 * aid_id, doorbell_range);
116 
117 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
118 					S2A_DOORBELL_ENTRY_1_CTRL,
119 					S2A_DOORBELL_PORT1_AWID, 0x8);
120 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
121 					S2A_DOORBELL_ENTRY_1_CTRL,
122 					S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x8);
123 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
124 					S2A_DOORBELL_ENTRY_1_CTRL,
125 					S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
126 					0x2);
127 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_2_CTRL,
128 			aid_id, doorbell_ctrl);
129 		break;
130 	case 2:
131 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3,
132 			4 * aid_id, doorbell_range);
133 
134 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
135 					S2A_DOORBELL_ENTRY_1_CTRL,
136 					S2A_DOORBELL_PORT1_AWID, 0x9);
137 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
138 					S2A_DOORBELL_ENTRY_1_CTRL,
139 					S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x9);
140 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
141 					S2A_DOORBELL_ENTRY_1_CTRL,
142 					S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
143 					0x8);
144 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_5_CTRL,
145 			aid_id, doorbell_ctrl);
146 		break;
147 	case 3:
148 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4,
149 			4 * aid_id, doorbell_range);
150 
151 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
152 					S2A_DOORBELL_ENTRY_1_CTRL,
153 					S2A_DOORBELL_PORT1_AWID, 0xa);
154 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
155 					S2A_DOORBELL_ENTRY_1_CTRL,
156 					S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xa);
157 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
158 					S2A_DOORBELL_ENTRY_1_CTRL,
159 					S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
160 					0x9);
161 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_6_CTRL,
162 			aid_id, doorbell_ctrl);
163 		break;
164 	default:
165 		break;
166 	}
167 
168 	return;
169 }
170 
171 static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
172 					 int doorbell_index, int instance)
173 {
174 	u32 doorbell_range = 0, doorbell_ctrl = 0;
175 	u32 aid_id = instance;
176 
177 	if (use_doorbell) {
178 		doorbell_range = REG_SET_FIELD(doorbell_range,
179 				DOORBELL0_CTRL_ENTRY_0,
180 				BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
181 				doorbell_index);
182 		doorbell_range = REG_SET_FIELD(doorbell_range,
183 				DOORBELL0_CTRL_ENTRY_0,
184 				BIF_DOORBELL0_RANGE_SIZE_ENTRY,
185 				0x9);
186 		if (aid_id)
187 			doorbell_range = REG_SET_FIELD(doorbell_range,
188 					DOORBELL0_CTRL_ENTRY_0,
189 					DOORBELL0_FENCE_ENABLE_ENTRY,
190 					0x4);
191 
192 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
193 				S2A_DOORBELL_ENTRY_1_CTRL,
194 				S2A_DOORBELL_PORT1_ENABLE, 1);
195 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
196 				S2A_DOORBELL_ENTRY_1_CTRL,
197 				S2A_DOORBELL_PORT1_AWID, 0x4);
198 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
199 				S2A_DOORBELL_ENTRY_1_CTRL,
200 				S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
201 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
202 				S2A_DOORBELL_ENTRY_1_CTRL,
203 				S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
204 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
205 				S2A_DOORBELL_ENTRY_1_CTRL,
206 				S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
207 
208 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
209 					aid_id, doorbell_range);
210 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
211 				aid_id, doorbell_ctrl);
212 	} else {
213 		doorbell_range = REG_SET_FIELD(doorbell_range,
214 				DOORBELL0_CTRL_ENTRY_0,
215 				BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
216 		doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
217 				S2A_DOORBELL_ENTRY_1_CTRL,
218 				S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
219 
220 		WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
221 					aid_id, doorbell_range);
222 		WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
223 				aid_id, doorbell_ctrl);
224 	}
225 }
226 
227 static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev,
228 					       bool enable)
229 {
230 	/* Enable to allow doorbell pass thru on pre-silicon bare-metal */
231 	WREG32_SOC15(NBIO, 0, regBIFC_DOORBELL_ACCESS_EN_PF, 0xfffff);
232 	WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
233 			BIF_DOORBELL_APER_EN, enable ? 1 : 0);
234 }
235 
236 static void nbio_v7_9_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
237 							bool enable)
238 {
239 	u32 tmp = 0;
240 
241 	if (enable) {
242 		tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
243 				    DOORBELL_SELFRING_GPA_APER_EN, 1) |
244 		      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
245 				    DOORBELL_SELFRING_GPA_APER_MODE, 1) |
246 		      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
247 				    DOORBELL_SELFRING_GPA_APER_SIZE, 0);
248 
249 		WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
250 			     lower_32_bits(adev->doorbell.base));
251 		WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
252 			     upper_32_bits(adev->doorbell.base));
253 	}
254 
255 	WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
256 }
257 
258 static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
259 					bool use_doorbell, int doorbell_index)
260 {
261 	u32 ih_doorbell_range = 0, ih_doorbell_ctrl = 0;
262 
263 	if (use_doorbell) {
264 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
265 				DOORBELL0_CTRL_ENTRY_0,
266 				BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
267 				doorbell_index);
268 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
269 				DOORBELL0_CTRL_ENTRY_0,
270 				BIF_DOORBELL0_RANGE_SIZE_ENTRY,
271 				0x8);
272 
273 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
274 				S2A_DOORBELL_ENTRY_1_CTRL,
275 				S2A_DOORBELL_PORT1_ENABLE, 1);
276 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
277 				S2A_DOORBELL_ENTRY_1_CTRL,
278 				S2A_DOORBELL_PORT1_AWID, 0);
279 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
280 				S2A_DOORBELL_ENTRY_1_CTRL,
281 				S2A_DOORBELL_PORT1_RANGE_OFFSET, 0);
282 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
283 				S2A_DOORBELL_ENTRY_1_CTRL,
284 				S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8);
285 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
286 				S2A_DOORBELL_ENTRY_1_CTRL,
287 				S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0);
288 	} else {
289 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
290 				DOORBELL0_CTRL_ENTRY_0,
291 				BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
292 		ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
293 				S2A_DOORBELL_ENTRY_1_CTRL,
294 				S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
295 	}
296 
297 	WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_0, ih_doorbell_range);
298 	WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, ih_doorbell_ctrl);
299 }
300 
301 
302 static void nbio_v7_9_update_medium_grain_clock_gating(struct amdgpu_device *adev,
303 						       bool enable)
304 {
305 }
306 
307 static void nbio_v7_9_update_medium_grain_light_sleep(struct amdgpu_device *adev,
308 						      bool enable)
309 {
310 }
311 
312 static void nbio_v7_9_get_clockgating_state(struct amdgpu_device *adev,
313 					    u64 *flags)
314 {
315 }
316 
317 static void nbio_v7_9_ih_control(struct amdgpu_device *adev)
318 {
319 	u32 interrupt_cntl;
320 
321 	/* setup interrupt control */
322 	WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
323 	interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
324 	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
325 	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
326 	 */
327 	interrupt_cntl =
328 		REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
329 	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
330 	interrupt_cntl =
331 		REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
332 	WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
333 }
334 
335 static u32 nbio_v7_9_get_hdp_flush_req_offset(struct amdgpu_device *adev)
336 {
337 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
338 }
339 
340 static u32 nbio_v7_9_get_hdp_flush_done_offset(struct amdgpu_device *adev)
341 {
342 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
343 }
344 
345 static u32 nbio_v7_9_get_pcie_index_offset(struct amdgpu_device *adev)
346 {
347 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
348 }
349 
350 static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev)
351 {
352 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
353 }
354 
355 static u32 nbio_v7_9_get_pcie_index_hi_offset(struct amdgpu_device *adev)
356 {
357 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2_HI);
358 }
359 
360 const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = {
361 	.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
362 	.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
363 	.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
364 	.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
365 	.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
366 	.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
367 	.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
368 	.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
369 	.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
370 	.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
371 	.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
372 	.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
373 	.ref_and_mask_sdma2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
374 	.ref_and_mask_sdma3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
375 	.ref_and_mask_sdma4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
376 	.ref_and_mask_sdma5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
377 	.ref_and_mask_sdma6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
378 	.ref_and_mask_sdma7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
379 };
380 
381 static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev,
382 						bool enable)
383 {
384 	WREG32_FIELD15_PREREG(NBIO, 0, BIF_BX0_BIF_DOORBELL_INT_CNTL,
385 			      DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
386 }
387 
388 static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
389 {
390 	u32 tmp, px;
391 
392 	tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS);
393 	px = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS,
394 			   PARTITION_MODE);
395 
396 	return px;
397 }
398 
399 static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
400 					       u32 *supp_modes)
401 {
402 	u32 tmp;
403 
404 	tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
405 	tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
406 
407 	if (supp_modes) {
408 		*supp_modes =
409 			RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_CAP);
410 	}
411 
412 	return ffs(tmp);
413 }
414 
415 static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
416 {
417 	u32 inst_mask;
418 	int i;
419 
420 	WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
421 		0xff & ~(adev->gfx.xcc_mask));
422 
423 	WREG32_SOC15(NBIO, 0, regBIFC_GFX_INT_MONITOR_MASK, 0x7ff);
424 
425 	inst_mask = adev->aid_mask & ~1U;
426 	for_each_inst(i, inst_mask) {
427 		WREG32_SOC15_EXT(NBIO, i, regXCC_DOORBELL_FENCE, i,
428 			XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
429 
430 	}
431 }
432 
433 static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
434 {
435 	u32 val, nak_r, nak_g;
436 
437 	if (adev->flags & AMD_IS_APU)
438 		return 0;
439 
440 	/* Get the number of NAKs received and generated */
441 	val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
442 	nak_r = val & 0xFFFF;
443 	nak_g = val >> 16;
444 
445 	/* Add the total number of NAKs, i.e the number of replays */
446 	return (nak_r + nak_g);
447 }
448 
449 const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
450 	.get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
451 	.get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
452 	.get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset,
453 	.get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset,
454 	.get_pcie_index_hi_offset = nbio_v7_9_get_pcie_index_hi_offset,
455 	.get_rev_id = nbio_v7_9_get_rev_id,
456 	.mc_access_enable = nbio_v7_9_mc_access_enable,
457 	.get_memsize = nbio_v7_9_get_memsize,
458 	.sdma_doorbell_range = nbio_v7_9_sdma_doorbell_range,
459 	.vcn_doorbell_range = nbio_v7_9_vcn_doorbell_range,
460 	.enable_doorbell_aperture = nbio_v7_9_enable_doorbell_aperture,
461 	.enable_doorbell_selfring_aperture = nbio_v7_9_enable_doorbell_selfring_aperture,
462 	.ih_doorbell_range = nbio_v7_9_ih_doorbell_range,
463 	.enable_doorbell_interrupt = nbio_v7_9_enable_doorbell_interrupt,
464 	.update_medium_grain_clock_gating = nbio_v7_9_update_medium_grain_clock_gating,
465 	.update_medium_grain_light_sleep = nbio_v7_9_update_medium_grain_light_sleep,
466 	.get_clockgating_state = nbio_v7_9_get_clockgating_state,
467 	.ih_control = nbio_v7_9_ih_control,
468 	.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
469 	.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
470 	.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
471 	.init_registers = nbio_v7_9_init_registers,
472 	.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
473 };
474