1fe3c9489SFeifei Xu /*
2fe3c9489SFeifei Xu  * Copyright 2018 Advanced Micro Devices, Inc.
3fe3c9489SFeifei Xu  *
4fe3c9489SFeifei Xu  * Permission is hereby granted, free of charge, to any person obtaining a
5fe3c9489SFeifei Xu  * copy of this software and associated documentation files (the "Software"),
6fe3c9489SFeifei Xu  * to deal in the Software without restriction, including without limitation
7fe3c9489SFeifei Xu  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fe3c9489SFeifei Xu  * and/or sell copies of the Software, and to permit persons to whom the
9fe3c9489SFeifei Xu  * Software is furnished to do so, subject to the following conditions:
10fe3c9489SFeifei Xu  *
11fe3c9489SFeifei Xu  * The above copyright notice and this permission notice shall be included in
12fe3c9489SFeifei Xu  * all copies or substantial portions of the Software.
13fe3c9489SFeifei Xu  *
14fe3c9489SFeifei Xu  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fe3c9489SFeifei Xu  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fe3c9489SFeifei Xu  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17fe3c9489SFeifei Xu  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fe3c9489SFeifei Xu  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fe3c9489SFeifei Xu  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fe3c9489SFeifei Xu  * OTHER DEALINGS IN THE SOFTWARE.
21fe3c9489SFeifei Xu  *
22fe3c9489SFeifei Xu  */
23fe3c9489SFeifei Xu #include "amdgpu.h"
24fe3c9489SFeifei Xu #include "amdgpu_atombios.h"
25fe3c9489SFeifei Xu #include "nbio_v7_4.h"
269ad1dc29SHawking Zhang #include "amdgpu_ras.h"
27fe3c9489SFeifei Xu 
28fe3c9489SFeifei Xu #include "nbio/nbio_7_4_offset.h"
29fe3c9489SFeifei Xu #include "nbio/nbio_7_4_sh_mask.h"
30a0bb79e2SKent Russell #include "nbio/nbio_7_4_0_smn.h"
314e644fffSHawking Zhang #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
3288807dc8SOak Zeng #include <uapi/linux/kfd_ioctl.h>
33fe3c9489SFeifei Xu 
34fe3c9489SFeifei Xu #define smnNBIF_MGCG_CTRL_LCLK	0x1013a21c
35fe3c9489SFeifei Xu 
360fe6a7b4SLe Ma /*
370fe6a7b4SLe Ma  * These are nbio v7_4_1 registers mask. Temporarily define these here since
380fe6a7b4SLe Ma  * nbio v7_4_1 header is incomplete.
390fe6a7b4SLe Ma  */
400fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK	0x00001000L
410fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK	0x00002000L
420fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK	0x00004000L
430fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK	0x00008000L
440fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK	0x00010000L
450fe6a7b4SLe Ma #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK	0x00020000L
460fe6a7b4SLe Ma 
47989b6a05SJames Zhu #define mmBIF_MMSCH1_DOORBELL_RANGE                     0x01dc
48989b6a05SJames Zhu #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX            2
49989b6a05SJames Zhu //BIF_MMSCH1_DOORBELL_RANGE
50989b6a05SJames Zhu #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT        0x2
51989b6a05SJames Zhu #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT          0x10
52989b6a05SJames Zhu #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK          0x00000FFCL
53989b6a05SJames Zhu #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK            0x001F0000L
54989b6a05SJames Zhu 
5528f87950SLe Ma static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
5628f87950SLe Ma 					void *ras_error_status);
5728f87950SLe Ma 
5888807dc8SOak Zeng static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
5988807dc8SOak Zeng {
6088807dc8SOak Zeng 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
6188807dc8SOak Zeng 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
6288807dc8SOak Zeng 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
6388807dc8SOak Zeng 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
6488807dc8SOak Zeng }
6588807dc8SOak Zeng 
66fe3c9489SFeifei Xu static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
67fe3c9489SFeifei Xu {
68fe3c9489SFeifei Xu 	u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
69fe3c9489SFeifei Xu 
70fe3c9489SFeifei Xu 	tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
71fe3c9489SFeifei Xu 	tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
72fe3c9489SFeifei Xu 
73fe3c9489SFeifei Xu 	return tmp;
74fe3c9489SFeifei Xu }
75fe3c9489SFeifei Xu 
76fe3c9489SFeifei Xu static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
77fe3c9489SFeifei Xu {
78fe3c9489SFeifei Xu 	if (enable)
79fe3c9489SFeifei Xu 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
80fe3c9489SFeifei Xu 			BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
81fe3c9489SFeifei Xu 	else
82fe3c9489SFeifei Xu 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
83fe3c9489SFeifei Xu }
84fe3c9489SFeifei Xu 
85fe3c9489SFeifei Xu static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
86fe3c9489SFeifei Xu 				struct amdgpu_ring *ring)
87fe3c9489SFeifei Xu {
88fe3c9489SFeifei Xu 	if (!ring || !ring->funcs->emit_wreg)
8988807dc8SOak Zeng 		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
90fe3c9489SFeifei Xu 	else
9188807dc8SOak Zeng 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
92fe3c9489SFeifei Xu }
93fe3c9489SFeifei Xu 
94fe3c9489SFeifei Xu static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
95fe3c9489SFeifei Xu {
96fe3c9489SFeifei Xu 	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
97fe3c9489SFeifei Xu }
98fe3c9489SFeifei Xu 
99fe3c9489SFeifei Xu static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
1008987e2e2SOak Zeng 			bool use_doorbell, int doorbell_index, int doorbell_size)
101fe3c9489SFeifei Xu {
1023d81f67aSLe Ma 	u32 reg, doorbell_range;
103fe3c9489SFeifei Xu 
1043d81f67aSLe Ma 	if (instance < 2)
1053d81f67aSLe Ma 		reg = instance +
1063d81f67aSLe Ma 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
1073d81f67aSLe Ma 	else
1083d81f67aSLe Ma 		/*
1093d81f67aSLe Ma 		 * These registers address of SDMA2~7 is not consecutive
1103d81f67aSLe Ma 		 * from SDMA0~1. Need plus 4 dwords offset.
1113d81f67aSLe Ma 		 *
1123d81f67aSLe Ma 		 *   BIF_SDMA0_DOORBELL_RANGE:  0x3bc0
1133d81f67aSLe Ma 		 *   BIF_SDMA1_DOORBELL_RANGE:  0x3bc4
1143d81f67aSLe Ma 		 *   BIF_SDMA2_DOORBELL_RANGE:  0x3bd8
1153d81f67aSLe Ma 		 */
1163d81f67aSLe Ma 		reg = instance + 0x4 +
1173d81f67aSLe Ma 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
1183d81f67aSLe Ma 
1193d81f67aSLe Ma 	doorbell_range = RREG32(reg);
120fe3c9489SFeifei Xu 
121fe3c9489SFeifei Xu 	if (use_doorbell) {
122fe3c9489SFeifei Xu 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
1238987e2e2SOak Zeng 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
124fe3c9489SFeifei Xu 	} else
125fe3c9489SFeifei Xu 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
126fe3c9489SFeifei Xu 
127fe3c9489SFeifei Xu 	WREG32(reg, doorbell_range);
128fe3c9489SFeifei Xu }
129fe3c9489SFeifei Xu 
13039a5053fSLeo Liu static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
131989b6a05SJames Zhu 					 int doorbell_index, int instance)
13239a5053fSLeo Liu {
133989b6a05SJames Zhu 	u32 reg;
134989b6a05SJames Zhu 	u32 doorbell_range;
13539a5053fSLeo Liu 
136989b6a05SJames Zhu 	if (instance)
137989b6a05SJames Zhu 		reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
138989b6a05SJames Zhu 	else
139989b6a05SJames Zhu 		reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
140989b6a05SJames Zhu 
141989b6a05SJames Zhu 	doorbell_range = RREG32(reg);
14239a5053fSLeo Liu 
14339a5053fSLeo Liu 	if (use_doorbell) {
14439a5053fSLeo Liu 		doorbell_range = REG_SET_FIELD(doorbell_range,
14539a5053fSLeo Liu 					       BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
14639a5053fSLeo Liu 					       doorbell_index);
14739a5053fSLeo Liu 		doorbell_range = REG_SET_FIELD(doorbell_range,
14839a5053fSLeo Liu 					       BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
14939a5053fSLeo Liu 	} else
15039a5053fSLeo Liu 		doorbell_range = REG_SET_FIELD(doorbell_range,
15139a5053fSLeo Liu 					       BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
15239a5053fSLeo Liu 
15339a5053fSLeo Liu 	WREG32(reg, doorbell_range);
15439a5053fSLeo Liu }
15539a5053fSLeo Liu 
156fe3c9489SFeifei Xu static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
157fe3c9489SFeifei Xu 					       bool enable)
158fe3c9489SFeifei Xu {
159fe3c9489SFeifei Xu 	WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
160fe3c9489SFeifei Xu }
161fe3c9489SFeifei Xu 
162fe3c9489SFeifei Xu static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
163fe3c9489SFeifei Xu 							bool enable)
164fe3c9489SFeifei Xu {
16512292519SJay Cornwall 	u32 tmp = 0;
166fe3c9489SFeifei Xu 
16712292519SJay Cornwall 	if (enable) {
16812292519SJay Cornwall 		tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
16912292519SJay Cornwall 		      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
17012292519SJay Cornwall 		      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
17112292519SJay Cornwall 
17212292519SJay Cornwall 		WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
17312292519SJay Cornwall 			     lower_32_bits(adev->doorbell.base));
17412292519SJay Cornwall 		WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
17512292519SJay Cornwall 			     upper_32_bits(adev->doorbell.base));
17612292519SJay Cornwall 	}
17712292519SJay Cornwall 
17812292519SJay Cornwall 	WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
179fe3c9489SFeifei Xu }
180fe3c9489SFeifei Xu 
181fe3c9489SFeifei Xu static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
182fe3c9489SFeifei Xu 					bool use_doorbell, int doorbell_index)
183fe3c9489SFeifei Xu {
184fe3c9489SFeifei Xu 	u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
185fe3c9489SFeifei Xu 
186fe3c9489SFeifei Xu 	if (use_doorbell) {
187fe3c9489SFeifei Xu 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
188b635ae87SAlex Sierra 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
189fe3c9489SFeifei Xu 	} else
190fe3c9489SFeifei Xu 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
191fe3c9489SFeifei Xu 
192fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
193fe3c9489SFeifei Xu }
194fe3c9489SFeifei Xu 
195fe3c9489SFeifei Xu 
196fe3c9489SFeifei Xu static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
197fe3c9489SFeifei Xu 						       bool enable)
198fe3c9489SFeifei Xu {
199fe3c9489SFeifei Xu 	//TODO: Add support for v7.4
200fe3c9489SFeifei Xu }
201fe3c9489SFeifei Xu 
202fe3c9489SFeifei Xu static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
203fe3c9489SFeifei Xu 						      bool enable)
204fe3c9489SFeifei Xu {
205fe3c9489SFeifei Xu 	uint32_t def, data;
206fe3c9489SFeifei Xu 
207fe3c9489SFeifei Xu 	def = data = RREG32_PCIE(smnPCIE_CNTL2);
208fe3c9489SFeifei Xu 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
209fe3c9489SFeifei Xu 		data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
210fe3c9489SFeifei Xu 			 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
211fe3c9489SFeifei Xu 			 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
212fe3c9489SFeifei Xu 	} else {
213fe3c9489SFeifei Xu 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
214fe3c9489SFeifei Xu 			  PCIE_CNTL2__MST_MEM_LS_EN_MASK |
215fe3c9489SFeifei Xu 			  PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
216fe3c9489SFeifei Xu 	}
217fe3c9489SFeifei Xu 
218fe3c9489SFeifei Xu 	if (def != data)
219fe3c9489SFeifei Xu 		WREG32_PCIE(smnPCIE_CNTL2, data);
220fe3c9489SFeifei Xu }
221fe3c9489SFeifei Xu 
222fe3c9489SFeifei Xu static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
223fe3c9489SFeifei Xu 					    u32 *flags)
224fe3c9489SFeifei Xu {
225fe3c9489SFeifei Xu 	int data;
226fe3c9489SFeifei Xu 
227fe3c9489SFeifei Xu 	/* AMD_CG_SUPPORT_BIF_MGCG */
228fe3c9489SFeifei Xu 	data = RREG32_PCIE(smnCPM_CONTROL);
229fe3c9489SFeifei Xu 	if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
230fe3c9489SFeifei Xu 		*flags |= AMD_CG_SUPPORT_BIF_MGCG;
231fe3c9489SFeifei Xu 
232fe3c9489SFeifei Xu 	/* AMD_CG_SUPPORT_BIF_LS */
233fe3c9489SFeifei Xu 	data = RREG32_PCIE(smnPCIE_CNTL2);
234fe3c9489SFeifei Xu 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
235fe3c9489SFeifei Xu 		*flags |= AMD_CG_SUPPORT_BIF_LS;
236fe3c9489SFeifei Xu }
237fe3c9489SFeifei Xu 
238fe3c9489SFeifei Xu static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
239fe3c9489SFeifei Xu {
240fe3c9489SFeifei Xu 	u32 interrupt_cntl;
241fe3c9489SFeifei Xu 
242fe3c9489SFeifei Xu 	/* setup interrupt control */
243fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
244fe3c9489SFeifei Xu 	interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
245fe3c9489SFeifei Xu 	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
246fe3c9489SFeifei Xu 	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
247fe3c9489SFeifei Xu 	 */
248fe3c9489SFeifei Xu 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
249fe3c9489SFeifei Xu 	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
250fe3c9489SFeifei Xu 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
251fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
252fe3c9489SFeifei Xu }
253fe3c9489SFeifei Xu 
254fe3c9489SFeifei Xu static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
255fe3c9489SFeifei Xu {
256fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
257fe3c9489SFeifei Xu }
258fe3c9489SFeifei Xu 
259fe3c9489SFeifei Xu static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
260fe3c9489SFeifei Xu {
261fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
262fe3c9489SFeifei Xu }
263fe3c9489SFeifei Xu 
264fe3c9489SFeifei Xu static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
265fe3c9489SFeifei Xu {
266fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
267fe3c9489SFeifei Xu }
268fe3c9489SFeifei Xu 
269fe3c9489SFeifei Xu static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
270fe3c9489SFeifei Xu {
271fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
272fe3c9489SFeifei Xu }
273fe3c9489SFeifei Xu 
274bebc0762SHawking Zhang const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
275fe3c9489SFeifei Xu 	.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
276fe3c9489SFeifei Xu 	.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
277fe3c9489SFeifei Xu 	.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
278fe3c9489SFeifei Xu 	.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
279fe3c9489SFeifei Xu 	.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
280fe3c9489SFeifei Xu 	.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
281fe3c9489SFeifei Xu 	.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
282fe3c9489SFeifei Xu 	.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
283fe3c9489SFeifei Xu 	.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
284fe3c9489SFeifei Xu 	.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
285fe3c9489SFeifei Xu 	.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
286fe3c9489SFeifei Xu 	.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
2870fe6a7b4SLe Ma 	.ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
2880fe6a7b4SLe Ma 	.ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
2890fe6a7b4SLe Ma 	.ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
2900fe6a7b4SLe Ma 	.ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
2910fe6a7b4SLe Ma 	.ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
2920fe6a7b4SLe Ma 	.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
293fe3c9489SFeifei Xu };
294fe3c9489SFeifei Xu 
295fe3c9489SFeifei Xu static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
296fe3c9489SFeifei Xu {
297fe3c9489SFeifei Xu 
298fe3c9489SFeifei Xu }
299fe3c9489SFeifei Xu 
3004241863aSHawking Zhang static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
3014241863aSHawking Zhang {
3024241863aSHawking Zhang 	uint32_t bif_doorbell_intr_cntl;
30328f87950SLe Ma 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
3043cd4f618SGuchun Chen 	struct ras_err_data err_data = {0, 0, 0, NULL};
3054241863aSHawking Zhang 
3064241863aSHawking Zhang 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
3074241863aSHawking Zhang 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
3084241863aSHawking Zhang 		BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
3094241863aSHawking Zhang 		/* driver has to clear the interrupt status when bif ring is disabled */
3104241863aSHawking Zhang 		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
3114241863aSHawking Zhang 						BIF_DOORBELL_INT_CNTL,
3124241863aSHawking Zhang 						RAS_CNTLR_INTERRUPT_CLEAR, 1);
3134241863aSHawking Zhang 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
3147c6e68c7SAndrey Grodzovsky 
31528f87950SLe Ma 		/*
31628f87950SLe Ma 		 * clear error status after ras_controller_intr according to
31728f87950SLe Ma 		 * hw team and count ue number for query
31828f87950SLe Ma 		 */
3193cd4f618SGuchun Chen 		nbio_v7_4_query_ras_error_count(adev, &err_data);
3203cd4f618SGuchun Chen 
3213cd4f618SGuchun Chen 		/* logging on error counter and printing for awareness */
3223cd4f618SGuchun Chen 		obj->err_data.ue_count += err_data.ue_count;
3233cd4f618SGuchun Chen 		obj->err_data.ce_count += err_data.ce_count;
3243cd4f618SGuchun Chen 
3253cd4f618SGuchun Chen 		if (err_data.ce_count)
3263cd4f618SGuchun Chen 			DRM_INFO("%ld correctable errors detected in %s block\n",
3273cd4f618SGuchun Chen 				obj->err_data.ce_count, adev->nbio.ras_if->name);
3283cd4f618SGuchun Chen 
3293cd4f618SGuchun Chen 		if (err_data.ue_count)
3303cd4f618SGuchun Chen 			DRM_INFO("%ld uncorrectable errors detected in %s block\n",
3313cd4f618SGuchun Chen 				obj->err_data.ue_count, adev->nbio.ras_if->name);
33228f87950SLe Ma 
3334a2d9356SLe Ma 		DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
3344a2d9356SLe Ma 
3354a2d9356SLe Ma 		/* ras_controller_int is dedicated for nbif ras error,
3364a2d9356SLe Ma 		 * not the global interrupt for sync flood
3374a2d9356SLe Ma 		 */
33861934624SGuchun Chen 		amdgpu_ras_reset_gpu(adev);
3394241863aSHawking Zhang 	}
3404241863aSHawking Zhang }
3414241863aSHawking Zhang 
3424241863aSHawking Zhang static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
3434241863aSHawking Zhang {
3444241863aSHawking Zhang 	uint32_t bif_doorbell_intr_cntl;
3454241863aSHawking Zhang 
3464241863aSHawking Zhang 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
3474241863aSHawking Zhang 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
3484241863aSHawking Zhang 		BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
3494241863aSHawking Zhang 		/* driver has to clear the interrupt status when bif ring is disabled */
3504241863aSHawking Zhang 		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
3514241863aSHawking Zhang 						BIF_DOORBELL_INT_CNTL,
3524241863aSHawking Zhang 						RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
3534241863aSHawking Zhang 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
3547c6e68c7SAndrey Grodzovsky 
3557c6e68c7SAndrey Grodzovsky 		amdgpu_ras_global_ras_isr(adev);
3564241863aSHawking Zhang 	}
3574241863aSHawking Zhang }
3584241863aSHawking Zhang 
3594e644fffSHawking Zhang 
3604e644fffSHawking Zhang static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
3614e644fffSHawking Zhang 						  struct amdgpu_irq_src *src,
3624e644fffSHawking Zhang 						  unsigned type,
3634e644fffSHawking Zhang 						  enum amdgpu_interrupt_state state)
3644e644fffSHawking Zhang {
3654e644fffSHawking Zhang 	/* The ras_controller_irq enablement should be done in psp bl when it
3664e644fffSHawking Zhang 	 * tries to enable ras feature. Driver only need to set the correct interrupt
3674e644fffSHawking Zhang 	 * vector for bare-metal and sriov use case respectively
3684e644fffSHawking Zhang 	 */
3694e644fffSHawking Zhang 	uint32_t bif_intr_cntl;
3704e644fffSHawking Zhang 
3714e644fffSHawking Zhang 	bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
3724e644fffSHawking Zhang 	if (state == AMDGPU_IRQ_STATE_ENABLE) {
3734e644fffSHawking Zhang 		/* set interrupt vector select bit to 0 to select
3744e644fffSHawking Zhang 		 * vetcor 1 for bare metal case */
3754e644fffSHawking Zhang 		bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
3764e644fffSHawking Zhang 					      BIF_INTR_CNTL,
3774e644fffSHawking Zhang 					      RAS_INTR_VEC_SEL, 0);
3784e644fffSHawking Zhang 		WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
3794e644fffSHawking Zhang 	}
3804e644fffSHawking Zhang 
3814e644fffSHawking Zhang 	return 0;
3824e644fffSHawking Zhang }
3834e644fffSHawking Zhang 
3844e644fffSHawking Zhang static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
3854e644fffSHawking Zhang 						struct amdgpu_irq_src *source,
3864e644fffSHawking Zhang 						struct amdgpu_iv_entry *entry)
3874e644fffSHawking Zhang {
3884e644fffSHawking Zhang 	/* By design, the ih cookie for ras_controller_irq should be written
3894e644fffSHawking Zhang 	 * to BIFring instead of general iv ring. However, due to known bif ring
3904e644fffSHawking Zhang 	 * hw bug, it has to be disabled. There is no chance the process function
3914e644fffSHawking Zhang 	 * will be involked. Just left it as a dummy one.
3924e644fffSHawking Zhang 	 */
3934e644fffSHawking Zhang 	return 0;
3944e644fffSHawking Zhang }
3954e644fffSHawking Zhang 
3964e644fffSHawking Zhang static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
3974e644fffSHawking Zhang 						       struct amdgpu_irq_src *src,
3984e644fffSHawking Zhang 						       unsigned type,
3994e644fffSHawking Zhang 						       enum amdgpu_interrupt_state state)
4004e644fffSHawking Zhang {
4014e644fffSHawking Zhang 	/* The ras_controller_irq enablement should be done in psp bl when it
4024e644fffSHawking Zhang 	 * tries to enable ras feature. Driver only need to set the correct interrupt
4034e644fffSHawking Zhang 	 * vector for bare-metal and sriov use case respectively
4044e644fffSHawking Zhang 	 */
4054e644fffSHawking Zhang 	uint32_t bif_intr_cntl;
4064e644fffSHawking Zhang 
4074e644fffSHawking Zhang 	bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
4084e644fffSHawking Zhang 	if (state == AMDGPU_IRQ_STATE_ENABLE) {
4094e644fffSHawking Zhang 		/* set interrupt vector select bit to 0 to select
4104e644fffSHawking Zhang 		 * vetcor 1 for bare metal case */
4114e644fffSHawking Zhang 		bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
4124e644fffSHawking Zhang 					      BIF_INTR_CNTL,
4134e644fffSHawking Zhang 					      RAS_INTR_VEC_SEL, 0);
4144e644fffSHawking Zhang 		WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
4154e644fffSHawking Zhang 	}
4164e644fffSHawking Zhang 
4174e644fffSHawking Zhang 	return 0;
4184e644fffSHawking Zhang }
4194e644fffSHawking Zhang 
4204e644fffSHawking Zhang static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
4214e644fffSHawking Zhang 						 struct amdgpu_irq_src *source,
4224e644fffSHawking Zhang 						 struct amdgpu_iv_entry *entry)
4234e644fffSHawking Zhang {
4244e644fffSHawking Zhang 	/* By design, the ih cookie for err_event_athub_irq should be written
4254e644fffSHawking Zhang 	 * to BIFring instead of general iv ring. However, due to known bif ring
4264e644fffSHawking Zhang 	 * hw bug, it has to be disabled. There is no chance the process function
4274e644fffSHawking Zhang 	 * will be involked. Just left it as a dummy one.
4284e644fffSHawking Zhang 	 */
4294e644fffSHawking Zhang 	return 0;
4304e644fffSHawking Zhang }
4314e644fffSHawking Zhang 
4324e644fffSHawking Zhang static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
4334e644fffSHawking Zhang 	.set = nbio_v7_4_set_ras_controller_irq_state,
4344e644fffSHawking Zhang 	.process = nbio_v7_4_process_ras_controller_irq,
4354e644fffSHawking Zhang };
4364e644fffSHawking Zhang 
4374e644fffSHawking Zhang static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
4384e644fffSHawking Zhang 	.set = nbio_v7_4_set_ras_err_event_athub_irq_state,
4394e644fffSHawking Zhang 	.process = nbio_v7_4_process_err_event_athub_irq,
4404e644fffSHawking Zhang };
4414e644fffSHawking Zhang 
4424e644fffSHawking Zhang static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
4434e644fffSHawking Zhang {
4444e644fffSHawking Zhang 	int r;
4454e644fffSHawking Zhang 
4464e644fffSHawking Zhang 	/* init the irq funcs */
4474e644fffSHawking Zhang 	adev->nbio.ras_controller_irq.funcs =
4484e644fffSHawking Zhang 		&nbio_v7_4_ras_controller_irq_funcs;
4494e644fffSHawking Zhang 	adev->nbio.ras_controller_irq.num_types = 1;
4504e644fffSHawking Zhang 
4514e644fffSHawking Zhang 	/* register ras controller interrupt */
4524e644fffSHawking Zhang 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
4534e644fffSHawking Zhang 			      NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
4544e644fffSHawking Zhang 			      &adev->nbio.ras_controller_irq);
4554e644fffSHawking Zhang 
4568831fa6eSGuchun Chen 	return r;
4574e644fffSHawking Zhang }
4584e644fffSHawking Zhang 
4594e644fffSHawking Zhang static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
4604e644fffSHawking Zhang {
4614e644fffSHawking Zhang 
4624e644fffSHawking Zhang 	int r;
4634e644fffSHawking Zhang 
4644e644fffSHawking Zhang 	/* init the irq funcs */
4654e644fffSHawking Zhang 	adev->nbio.ras_err_event_athub_irq.funcs =
4664e644fffSHawking Zhang 		&nbio_v7_4_ras_err_event_athub_irq_funcs;
4674e644fffSHawking Zhang 	adev->nbio.ras_err_event_athub_irq.num_types = 1;
4684e644fffSHawking Zhang 
4694e644fffSHawking Zhang 	/* register ras err event athub interrupt */
4704e644fffSHawking Zhang 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
4714e644fffSHawking Zhang 			      NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
4724e644fffSHawking Zhang 			      &adev->nbio.ras_err_event_athub_irq);
4734e644fffSHawking Zhang 
4748831fa6eSGuchun Chen 	return r;
4754e644fffSHawking Zhang }
4764e644fffSHawking Zhang 
4775c39d600SLe Ma #define smnPARITY_ERROR_STATUS_UNCORR_GRP2	0x13a20030
4785c39d600SLe Ma 
47952652ef2SGuchun Chen static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
48052652ef2SGuchun Chen 					void *ras_error_status)
48152652ef2SGuchun Chen {
4825c39d600SLe Ma 	uint32_t global_sts, central_sts, int_eoi, parity_sts;
4831a3f2e8cSGuchun Chen 	uint32_t corr, fatal, non_fatal;
4841a3f2e8cSGuchun Chen 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4851a3f2e8cSGuchun Chen 
4861a3f2e8cSGuchun Chen 	global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
4871a3f2e8cSGuchun Chen 	corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
4881a3f2e8cSGuchun Chen 	fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
4891a3f2e8cSGuchun Chen 	non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
4901a3f2e8cSGuchun Chen 				ParityErrNonFatal);
4915c39d600SLe Ma 	parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
4921a3f2e8cSGuchun Chen 
4931a3f2e8cSGuchun Chen 	if (corr)
4941a3f2e8cSGuchun Chen 		err_data->ce_count++;
4951a3f2e8cSGuchun Chen 	if (fatal)
4961a3f2e8cSGuchun Chen 		err_data->ue_count++;
4971a3f2e8cSGuchun Chen 
4981a3f2e8cSGuchun Chen 	if (corr || fatal || non_fatal) {
4991a3f2e8cSGuchun Chen 		central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
5001a3f2e8cSGuchun Chen 		/* clear error status register */
5011a3f2e8cSGuchun Chen 		WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
5021a3f2e8cSGuchun Chen 
5035c39d600SLe Ma 		if (fatal)
5045c39d600SLe Ma 			/* clear parity fatal error indication field */
5055c39d600SLe Ma 			WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
5065c39d600SLe Ma 				    parity_sts);
5075c39d600SLe Ma 
5081a3f2e8cSGuchun Chen 		if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
5091a3f2e8cSGuchun Chen 				BIFL_RasContller_Intr_Recv)) {
5101a3f2e8cSGuchun Chen 			/* clear interrupt status register */
5111a3f2e8cSGuchun Chen 			WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
5121a3f2e8cSGuchun Chen 			int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
5131a3f2e8cSGuchun Chen 			int_eoi = REG_SET_FIELD(int_eoi,
5141a3f2e8cSGuchun Chen 					IOHC_INTERRUPT_EOI, SMI_EOI, 1);
5151a3f2e8cSGuchun Chen 			WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
5161a3f2e8cSGuchun Chen 		}
5171a3f2e8cSGuchun Chen 	}
51852652ef2SGuchun Chen }
51952652ef2SGuchun Chen 
520956f6705SLe Ma static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
521956f6705SLe Ma 						bool enable)
522956f6705SLe Ma {
523956f6705SLe Ma 	WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
524956f6705SLe Ma 		       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
525956f6705SLe Ma }
526956f6705SLe Ma 
527fe3c9489SFeifei Xu const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
528fe3c9489SFeifei Xu 	.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
529fe3c9489SFeifei Xu 	.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
530fe3c9489SFeifei Xu 	.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
531fe3c9489SFeifei Xu 	.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
532fe3c9489SFeifei Xu 	.get_rev_id = nbio_v7_4_get_rev_id,
533fe3c9489SFeifei Xu 	.mc_access_enable = nbio_v7_4_mc_access_enable,
534fe3c9489SFeifei Xu 	.hdp_flush = nbio_v7_4_hdp_flush,
535fe3c9489SFeifei Xu 	.get_memsize = nbio_v7_4_get_memsize,
536fe3c9489SFeifei Xu 	.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
53739a5053fSLeo Liu 	.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
538fe3c9489SFeifei Xu 	.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
539fe3c9489SFeifei Xu 	.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
540fe3c9489SFeifei Xu 	.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
541956f6705SLe Ma 	.enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
542fe3c9489SFeifei Xu 	.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
543fe3c9489SFeifei Xu 	.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
544fe3c9489SFeifei Xu 	.get_clockgating_state = nbio_v7_4_get_clockgating_state,
545fe3c9489SFeifei Xu 	.ih_control = nbio_v7_4_ih_control,
546fe3c9489SFeifei Xu 	.init_registers = nbio_v7_4_init_registers,
54788807dc8SOak Zeng 	.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
5484241863aSHawking Zhang 	.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
5494241863aSHawking Zhang 	.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
5504e644fffSHawking Zhang 	.init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
5514e644fffSHawking Zhang 	.init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
55252652ef2SGuchun Chen 	.query_ras_error_count = nbio_v7_4_query_ras_error_count,
5531c70d3d9SHawking Zhang 	.ras_late_init = amdgpu_nbio_ras_late_init,
554fe3c9489SFeifei Xu };
555