1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_atombios.h"
25 #include "nbio_v7_4.h"
26 #include "amdgpu_ras.h"
27 
28 #include "nbio/nbio_7_4_offset.h"
29 #include "nbio/nbio_7_4_sh_mask.h"
30 #include "nbio/nbio_7_4_0_smn.h"
31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
32 #include <uapi/linux/kfd_ioctl.h>
33 
34 #define smnNBIF_MGCG_CTRL_LCLK	0x1013a21c
35 
36 /*
37  * These are nbio v7_4_1 registers mask. Temporarily define these here since
38  * nbio v7_4_1 header is incomplete.
39  */
40 #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK	0x00001000L
41 #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK	0x00002000L
42 #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK	0x00004000L
43 #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK	0x00008000L
44 #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK	0x00010000L
45 #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK	0x00020000L
46 
47 #define mmBIF_MMSCH1_DOORBELL_RANGE                     0x01dc
48 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX            2
49 //BIF_MMSCH1_DOORBELL_RANGE
50 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT        0x2
51 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT          0x10
52 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK          0x00000FFCL
53 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK            0x001F0000L
54 
55 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
56 					void *ras_error_status);
57 
58 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
59 {
60 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
61 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
62 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
63 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
64 }
65 
66 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
67 {
68 	u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
69 
70 	tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
71 	tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
72 
73 	return tmp;
74 }
75 
76 static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
77 {
78 	if (enable)
79 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
80 			BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
81 	else
82 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
83 }
84 
85 static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
86 				struct amdgpu_ring *ring)
87 {
88 	if (!ring || !ring->funcs->emit_wreg)
89 		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
90 	else
91 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
92 }
93 
94 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
95 {
96 	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
97 }
98 
99 static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
100 			bool use_doorbell, int doorbell_index, int doorbell_size)
101 {
102 	u32 reg, doorbell_range;
103 
104 	if (instance < 2)
105 		reg = instance +
106 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
107 	else
108 		/*
109 		 * These registers address of SDMA2~7 is not consecutive
110 		 * from SDMA0~1. Need plus 4 dwords offset.
111 		 *
112 		 *   BIF_SDMA0_DOORBELL_RANGE:  0x3bc0
113 		 *   BIF_SDMA1_DOORBELL_RANGE:  0x3bc4
114 		 *   BIF_SDMA2_DOORBELL_RANGE:  0x3bd8
115 		 */
116 		reg = instance + 0x4 +
117 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
118 
119 	doorbell_range = RREG32(reg);
120 
121 	if (use_doorbell) {
122 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
123 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
124 	} else
125 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
126 
127 	WREG32(reg, doorbell_range);
128 }
129 
130 static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
131 					 int doorbell_index, int instance)
132 {
133 	u32 reg;
134 	u32 doorbell_range;
135 
136 	if (instance)
137 		reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
138 	else
139 		reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
140 
141 	doorbell_range = RREG32(reg);
142 
143 	if (use_doorbell) {
144 		doorbell_range = REG_SET_FIELD(doorbell_range,
145 					       BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
146 					       doorbell_index);
147 		doorbell_range = REG_SET_FIELD(doorbell_range,
148 					       BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
149 	} else
150 		doorbell_range = REG_SET_FIELD(doorbell_range,
151 					       BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
152 
153 	WREG32(reg, doorbell_range);
154 }
155 
156 static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
157 					       bool enable)
158 {
159 	WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
160 }
161 
162 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
163 							bool enable)
164 {
165 	u32 tmp = 0;
166 
167 	if (enable) {
168 		tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
169 		      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
170 		      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
171 
172 		WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
173 			     lower_32_bits(adev->doorbell.base));
174 		WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
175 			     upper_32_bits(adev->doorbell.base));
176 	}
177 
178 	WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
179 }
180 
181 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
182 					bool use_doorbell, int doorbell_index)
183 {
184 	u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
185 
186 	if (use_doorbell) {
187 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
188 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
189 	} else
190 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
191 
192 	WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
193 }
194 
195 
196 static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
197 						       bool enable)
198 {
199 	//TODO: Add support for v7.4
200 }
201 
202 static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
203 						      bool enable)
204 {
205 	uint32_t def, data;
206 
207 	def = data = RREG32_PCIE(smnPCIE_CNTL2);
208 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
209 		data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
210 			 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
211 			 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
212 	} else {
213 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
214 			  PCIE_CNTL2__MST_MEM_LS_EN_MASK |
215 			  PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
216 	}
217 
218 	if (def != data)
219 		WREG32_PCIE(smnPCIE_CNTL2, data);
220 }
221 
222 static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
223 					    u32 *flags)
224 {
225 	int data;
226 
227 	/* AMD_CG_SUPPORT_BIF_MGCG */
228 	data = RREG32_PCIE(smnCPM_CONTROL);
229 	if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
230 		*flags |= AMD_CG_SUPPORT_BIF_MGCG;
231 
232 	/* AMD_CG_SUPPORT_BIF_LS */
233 	data = RREG32_PCIE(smnPCIE_CNTL2);
234 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
235 		*flags |= AMD_CG_SUPPORT_BIF_LS;
236 }
237 
238 static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
239 {
240 	u32 interrupt_cntl;
241 
242 	/* setup interrupt control */
243 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
244 	interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
245 	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
246 	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
247 	 */
248 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
249 	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
250 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
251 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
252 }
253 
254 static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
255 {
256 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
257 }
258 
259 static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
260 {
261 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
262 }
263 
264 static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
265 {
266 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
267 }
268 
269 static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
270 {
271 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
272 }
273 
274 const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
275 	.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
276 	.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
277 	.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
278 	.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
279 	.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
280 	.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
281 	.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
282 	.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
283 	.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
284 	.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
285 	.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
286 	.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
287 	.ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
288 	.ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
289 	.ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
290 	.ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
291 	.ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
292 	.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
293 };
294 
295 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
296 {
297 
298 }
299 
300 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
301 {
302 	uint32_t bif_doorbell_intr_cntl;
303 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
304 	struct ras_err_data err_data = {0, 0, 0, NULL};
305 
306 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
307 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
308 		BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
309 		/* driver has to clear the interrupt status when bif ring is disabled */
310 		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
311 						BIF_DOORBELL_INT_CNTL,
312 						RAS_CNTLR_INTERRUPT_CLEAR, 1);
313 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
314 
315 		/*
316 		 * clear error status after ras_controller_intr according to
317 		 * hw team and count ue number for query
318 		 */
319 		nbio_v7_4_query_ras_error_count(adev, &err_data);
320 
321 		/* logging on error counter and printing for awareness */
322 		obj->err_data.ue_count += err_data.ue_count;
323 		obj->err_data.ce_count += err_data.ce_count;
324 
325 		if (err_data.ce_count)
326 			dev_info(adev->dev, "%ld correctable hardware "
327 					"errors detected in %s block, "
328 					"no user action is needed.\n",
329 					obj->err_data.ce_count,
330 					adev->nbio.ras_if->name);
331 
332 		if (err_data.ue_count)
333 			dev_info(adev->dev, "%ld uncorrectable hardware "
334 					"errors detected in %s block\n",
335 					obj->err_data.ue_count,
336 					adev->nbio.ras_if->name);
337 
338 		dev_info(adev->dev, "RAS controller interrupt triggered "
339 					"by NBIF error\n");
340 
341 		/* ras_controller_int is dedicated for nbif ras error,
342 		 * not the global interrupt for sync flood
343 		 */
344 		amdgpu_ras_reset_gpu(adev);
345 	}
346 }
347 
348 static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
349 {
350 	uint32_t bif_doorbell_intr_cntl;
351 
352 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
353 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
354 		BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
355 		/* driver has to clear the interrupt status when bif ring is disabled */
356 		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
357 						BIF_DOORBELL_INT_CNTL,
358 						RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
359 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
360 
361 		amdgpu_ras_global_ras_isr(adev);
362 	}
363 }
364 
365 
366 static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
367 						  struct amdgpu_irq_src *src,
368 						  unsigned type,
369 						  enum amdgpu_interrupt_state state)
370 {
371 	/* The ras_controller_irq enablement should be done in psp bl when it
372 	 * tries to enable ras feature. Driver only need to set the correct interrupt
373 	 * vector for bare-metal and sriov use case respectively
374 	 */
375 	uint32_t bif_intr_cntl;
376 
377 	bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
378 	if (state == AMDGPU_IRQ_STATE_ENABLE) {
379 		/* set interrupt vector select bit to 0 to select
380 		 * vetcor 1 for bare metal case */
381 		bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
382 					      BIF_INTR_CNTL,
383 					      RAS_INTR_VEC_SEL, 0);
384 		WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
385 	}
386 
387 	return 0;
388 }
389 
390 static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
391 						struct amdgpu_irq_src *source,
392 						struct amdgpu_iv_entry *entry)
393 {
394 	/* By design, the ih cookie for ras_controller_irq should be written
395 	 * to BIFring instead of general iv ring. However, due to known bif ring
396 	 * hw bug, it has to be disabled. There is no chance the process function
397 	 * will be involked. Just left it as a dummy one.
398 	 */
399 	return 0;
400 }
401 
402 static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
403 						       struct amdgpu_irq_src *src,
404 						       unsigned type,
405 						       enum amdgpu_interrupt_state state)
406 {
407 	/* The ras_controller_irq enablement should be done in psp bl when it
408 	 * tries to enable ras feature. Driver only need to set the correct interrupt
409 	 * vector for bare-metal and sriov use case respectively
410 	 */
411 	uint32_t bif_intr_cntl;
412 
413 	bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
414 	if (state == AMDGPU_IRQ_STATE_ENABLE) {
415 		/* set interrupt vector select bit to 0 to select
416 		 * vetcor 1 for bare metal case */
417 		bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
418 					      BIF_INTR_CNTL,
419 					      RAS_INTR_VEC_SEL, 0);
420 		WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
421 	}
422 
423 	return 0;
424 }
425 
426 static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
427 						 struct amdgpu_irq_src *source,
428 						 struct amdgpu_iv_entry *entry)
429 {
430 	/* By design, the ih cookie for err_event_athub_irq should be written
431 	 * to BIFring instead of general iv ring. However, due to known bif ring
432 	 * hw bug, it has to be disabled. There is no chance the process function
433 	 * will be involked. Just left it as a dummy one.
434 	 */
435 	return 0;
436 }
437 
438 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
439 	.set = nbio_v7_4_set_ras_controller_irq_state,
440 	.process = nbio_v7_4_process_ras_controller_irq,
441 };
442 
443 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
444 	.set = nbio_v7_4_set_ras_err_event_athub_irq_state,
445 	.process = nbio_v7_4_process_err_event_athub_irq,
446 };
447 
448 static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
449 {
450 	int r;
451 
452 	/* init the irq funcs */
453 	adev->nbio.ras_controller_irq.funcs =
454 		&nbio_v7_4_ras_controller_irq_funcs;
455 	adev->nbio.ras_controller_irq.num_types = 1;
456 
457 	/* register ras controller interrupt */
458 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
459 			      NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
460 			      &adev->nbio.ras_controller_irq);
461 
462 	return r;
463 }
464 
465 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
466 {
467 
468 	int r;
469 
470 	/* init the irq funcs */
471 	adev->nbio.ras_err_event_athub_irq.funcs =
472 		&nbio_v7_4_ras_err_event_athub_irq_funcs;
473 	adev->nbio.ras_err_event_athub_irq.num_types = 1;
474 
475 	/* register ras err event athub interrupt */
476 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
477 			      NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
478 			      &adev->nbio.ras_err_event_athub_irq);
479 
480 	return r;
481 }
482 
483 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2	0x13a20030
484 
485 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
486 					void *ras_error_status)
487 {
488 	uint32_t global_sts, central_sts, int_eoi, parity_sts;
489 	uint32_t corr, fatal, non_fatal;
490 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
491 
492 	global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
493 	corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
494 	fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
495 	non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
496 				ParityErrNonFatal);
497 	parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
498 
499 	if (corr)
500 		err_data->ce_count++;
501 	if (fatal)
502 		err_data->ue_count++;
503 
504 	if (corr || fatal || non_fatal) {
505 		central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
506 		/* clear error status register */
507 		WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
508 
509 		if (fatal)
510 			/* clear parity fatal error indication field */
511 			WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
512 				    parity_sts);
513 
514 		if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
515 				BIFL_RasContller_Intr_Recv)) {
516 			/* clear interrupt status register */
517 			WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
518 			int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
519 			int_eoi = REG_SET_FIELD(int_eoi,
520 					IOHC_INTERRUPT_EOI, SMI_EOI, 1);
521 			WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
522 		}
523 	}
524 }
525 
526 static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
527 						bool enable)
528 {
529 	WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
530 		       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
531 }
532 
533 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
534 	.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
535 	.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
536 	.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
537 	.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
538 	.get_rev_id = nbio_v7_4_get_rev_id,
539 	.mc_access_enable = nbio_v7_4_mc_access_enable,
540 	.hdp_flush = nbio_v7_4_hdp_flush,
541 	.get_memsize = nbio_v7_4_get_memsize,
542 	.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
543 	.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
544 	.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
545 	.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
546 	.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
547 	.enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
548 	.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
549 	.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
550 	.get_clockgating_state = nbio_v7_4_get_clockgating_state,
551 	.ih_control = nbio_v7_4_ih_control,
552 	.init_registers = nbio_v7_4_init_registers,
553 	.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
554 	.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
555 	.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
556 	.init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
557 	.init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
558 	.query_ras_error_count = nbio_v7_4_query_ras_error_count,
559 	.ras_late_init = amdgpu_nbio_ras_late_init,
560 };
561