1edc61147SHawking Zhang /*
2edc61147SHawking Zhang  * Copyright 2019 Advanced Micro Devices, Inc.
3edc61147SHawking Zhang  *
4edc61147SHawking Zhang  * Permission is hereby granted, free of charge, to any person obtaining a
5edc61147SHawking Zhang  * copy of this software and associated documentation files (the "Software"),
6edc61147SHawking Zhang  * to deal in the Software without restriction, including without limitation
7edc61147SHawking Zhang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8edc61147SHawking Zhang  * and/or sell copies of the Software, and to permit persons to whom the
9edc61147SHawking Zhang  * Software is furnished to do so, subject to the following conditions:
10edc61147SHawking Zhang  *
11edc61147SHawking Zhang  * The above copyright notice and this permission notice shall be included in
12edc61147SHawking Zhang  * all copies or substantial portions of the Software.
13edc61147SHawking Zhang  *
14edc61147SHawking Zhang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15edc61147SHawking Zhang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16edc61147SHawking Zhang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17edc61147SHawking Zhang  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18edc61147SHawking Zhang  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19edc61147SHawking Zhang  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20edc61147SHawking Zhang  * OTHER DEALINGS IN THE SOFTWARE.
21edc61147SHawking Zhang  *
22edc61147SHawking Zhang  */
23edc61147SHawking Zhang 
24b23b2e9eSAlex Deucher #include <linux/pci.h>
25b23b2e9eSAlex Deucher 
26edc61147SHawking Zhang #include "amdgpu.h"
27edc61147SHawking Zhang #include "amdgpu_ih.h"
28edc61147SHawking Zhang 
29edc61147SHawking Zhang #include "oss/osssys_5_0_0_offset.h"
30edc61147SHawking Zhang #include "oss/osssys_5_0_0_sh_mask.h"
31edc61147SHawking Zhang 
32edc61147SHawking Zhang #include "soc15_common.h"
33edc61147SHawking Zhang #include "navi10_ih.h"
34edc61147SHawking Zhang 
35022b6518SSamir Dhume #define MAX_REARM_RETRY 10
36edc61147SHawking Zhang 
37757b3af8SLikun Gao #define mmIH_CHICKEN_Sienna_Cichlid                 0x018d
38757b3af8SLikun Gao #define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX        0
39757b3af8SLikun Gao 
40edc61147SHawking Zhang static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
41edc61147SHawking Zhang 
42edc61147SHawking Zhang /**
435212d163SHawking Zhang  * navi10_ih_init_register_offset - Initialize register offset for ih rings
445212d163SHawking Zhang  *
455212d163SHawking Zhang  * @adev: amdgpu_device pointer
465212d163SHawking Zhang  *
475212d163SHawking Zhang  * Initialize register offset ih rings (NAVI10).
485212d163SHawking Zhang  */
495212d163SHawking Zhang static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
505212d163SHawking Zhang {
515212d163SHawking Zhang 	struct amdgpu_ih_regs *ih_regs;
525212d163SHawking Zhang 
535212d163SHawking Zhang 	if (adev->irq.ih.ring_size) {
545212d163SHawking Zhang 		ih_regs = &adev->irq.ih.ih_regs;
555212d163SHawking Zhang 		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
565212d163SHawking Zhang 		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
575212d163SHawking Zhang 		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
585212d163SHawking Zhang 		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
595212d163SHawking Zhang 		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
605212d163SHawking Zhang 		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
615212d163SHawking Zhang 		ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
625212d163SHawking Zhang 		ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
635212d163SHawking Zhang 		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
645212d163SHawking Zhang 	}
655212d163SHawking Zhang 
665212d163SHawking Zhang 	if (adev->irq.ih1.ring_size) {
675212d163SHawking Zhang 		ih_regs = &adev->irq.ih1.ih_regs;
685212d163SHawking Zhang 		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
695212d163SHawking Zhang 		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
705212d163SHawking Zhang 		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
715212d163SHawking Zhang 		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
725212d163SHawking Zhang 		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
735212d163SHawking Zhang 		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
745212d163SHawking Zhang 		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
755212d163SHawking Zhang 	}
765212d163SHawking Zhang 
775212d163SHawking Zhang 	if (adev->irq.ih2.ring_size) {
785212d163SHawking Zhang 		ih_regs = &adev->irq.ih2.ih_regs;
795212d163SHawking Zhang 		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
805212d163SHawking Zhang 		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
815212d163SHawking Zhang 		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
825212d163SHawking Zhang 		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
835212d163SHawking Zhang 		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
845212d163SHawking Zhang 		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
855212d163SHawking Zhang 		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
865212d163SHawking Zhang 	}
875212d163SHawking Zhang }
885212d163SHawking Zhang 
895212d163SHawking Zhang /**
905ea6f9c2SChengming Gui  * force_update_wptr_for_self_int - Force update the wptr for self interrupt
915ea6f9c2SChengming Gui  *
925ea6f9c2SChengming Gui  * @adev: amdgpu_device pointer
935ea6f9c2SChengming Gui  * @threshold: threshold to trigger the wptr reporting
945ea6f9c2SChengming Gui  * @timeout: timeout to trigger the wptr reporting
955ea6f9c2SChengming Gui  * @enabled: Enable/disable timeout flush mechanism
965ea6f9c2SChengming Gui  *
975ea6f9c2SChengming Gui  * threshold input range: 0 ~ 15, default 0,
985ea6f9c2SChengming Gui  * real_threshold = 2^threshold
995ea6f9c2SChengming Gui  * timeout input range: 0 ~ 20, default 8,
1005ea6f9c2SChengming Gui  * real_timeout = (2^timeout) * 1024 / (socclk_freq)
1015ea6f9c2SChengming Gui  *
1025ea6f9c2SChengming Gui  * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
1035ea6f9c2SChengming Gui  */
1045ea6f9c2SChengming Gui static void
1055ea6f9c2SChengming Gui force_update_wptr_for_self_int(struct amdgpu_device *adev,
1065ea6f9c2SChengming Gui 			       u32 threshold, u32 timeout, bool enabled)
1075ea6f9c2SChengming Gui {
1085ea6f9c2SChengming Gui 	u32 ih_cntl, ih_rb_cntl;
1095ea6f9c2SChengming Gui 
1105ea6f9c2SChengming Gui 	if (adev->asic_type < CHIP_SIENNA_CICHLID)
1115ea6f9c2SChengming Gui 		return;
1125ea6f9c2SChengming Gui 
1135ea6f9c2SChengming Gui 	ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
1145ea6f9c2SChengming Gui 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
1155ea6f9c2SChengming Gui 
1165ea6f9c2SChengming Gui 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
1175ea6f9c2SChengming Gui 				SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
1185ea6f9c2SChengming Gui 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
1195ea6f9c2SChengming Gui 				SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
1205ea6f9c2SChengming Gui 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
1215ea6f9c2SChengming Gui 				   RB_USED_INT_THRESHOLD, threshold);
1225ea6f9c2SChengming Gui 
1235ea6f9c2SChengming Gui 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
1245ea6f9c2SChengming Gui 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
1255ea6f9c2SChengming Gui 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
1265ea6f9c2SChengming Gui 				   RB_USED_INT_THRESHOLD, threshold);
1275ea6f9c2SChengming Gui 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
1285ea6f9c2SChengming Gui 	WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
1295ea6f9c2SChengming Gui }
1305ea6f9c2SChengming Gui 
1315ea6f9c2SChengming Gui /**
132edc61147SHawking Zhang  * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
133edc61147SHawking Zhang  *
134edc61147SHawking Zhang  * @adev: amdgpu_device pointer
135edc61147SHawking Zhang  *
136edc61147SHawking Zhang  * Enable the interrupt ring buffer (NAVI10).
137edc61147SHawking Zhang  */
138edc61147SHawking Zhang static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
139edc61147SHawking Zhang {
140edc61147SHawking Zhang 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
141edc61147SHawking Zhang 
142edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
143edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
144193cce34SAlex Sierra 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
1450ab176e6SAlex Sierra 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
1460ab176e6SAlex Sierra 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
1470ab176e6SAlex Sierra 			return;
1480ab176e6SAlex Sierra 		}
1490ab176e6SAlex Sierra 	} else {
150edc61147SHawking Zhang 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
1510ab176e6SAlex Sierra 	}
1520ab176e6SAlex Sierra 
153edc61147SHawking Zhang 	adev->irq.ih.enabled = true;
154ab518012SAlex Sierra 
155ab518012SAlex Sierra 	if (adev->irq.ih1.ring_size) {
156ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
157ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
158ab518012SAlex Sierra 					   RB_ENABLE, 1);
159193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
1600ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
1610ab176e6SAlex Sierra 						ih_rb_cntl)) {
1620ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
1630ab176e6SAlex Sierra 				return;
1640ab176e6SAlex Sierra 			}
1650ab176e6SAlex Sierra 		} else {
166ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
1670ab176e6SAlex Sierra 		}
168ab518012SAlex Sierra 		adev->irq.ih1.enabled = true;
169ab518012SAlex Sierra 	}
170ab518012SAlex Sierra 
171ab518012SAlex Sierra 	if (adev->irq.ih2.ring_size) {
172ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
173ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
174ab518012SAlex Sierra 					   RB_ENABLE, 1);
175193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
1760ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
1770ab176e6SAlex Sierra 						ih_rb_cntl)) {
1780ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
1790ab176e6SAlex Sierra 				return;
1800ab176e6SAlex Sierra 			}
1810ab176e6SAlex Sierra 		} else {
182ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
1830ab176e6SAlex Sierra 		}
184ab518012SAlex Sierra 		adev->irq.ih2.enabled = true;
185ab518012SAlex Sierra 	}
186d4581f7dSChristian König 
187d4581f7dSChristian König 	if (adev->irq.ih_soft.ring_size)
188d4581f7dSChristian König 		adev->irq.ih_soft.enabled = true;
189edc61147SHawking Zhang }
190edc61147SHawking Zhang 
191edc61147SHawking Zhang /**
192edc61147SHawking Zhang  * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
193edc61147SHawking Zhang  *
194edc61147SHawking Zhang  * @adev: amdgpu_device pointer
195edc61147SHawking Zhang  *
196edc61147SHawking Zhang  * Disable the interrupt ring buffer (NAVI10).
197edc61147SHawking Zhang  */
198edc61147SHawking Zhang static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
199edc61147SHawking Zhang {
200edc61147SHawking Zhang 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
201edc61147SHawking Zhang 
202edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
203edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
204193cce34SAlex Sierra 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
2050ab176e6SAlex Sierra 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
2060ab176e6SAlex Sierra 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
2070ab176e6SAlex Sierra 			return;
2080ab176e6SAlex Sierra 		}
2090ab176e6SAlex Sierra 	} else {
210edc61147SHawking Zhang 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
2110ab176e6SAlex Sierra 	}
2120ab176e6SAlex Sierra 
213edc61147SHawking Zhang 	/* set rptr, wptr to 0 */
214edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
215edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
216edc61147SHawking Zhang 	adev->irq.ih.enabled = false;
217edc61147SHawking Zhang 	adev->irq.ih.rptr = 0;
218ab518012SAlex Sierra 
219ab518012SAlex Sierra 	if (adev->irq.ih1.ring_size) {
220ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
221ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
222ab518012SAlex Sierra 					   RB_ENABLE, 0);
223193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
2240ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
2250ab176e6SAlex Sierra 						ih_rb_cntl)) {
2260ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
2270ab176e6SAlex Sierra 				return;
2280ab176e6SAlex Sierra 			}
2290ab176e6SAlex Sierra 		} else {
230ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
2310ab176e6SAlex Sierra 		}
232ab518012SAlex Sierra 		/* set rptr, wptr to 0 */
233ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
234ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
235ab518012SAlex Sierra 		adev->irq.ih1.enabled = false;
236ab518012SAlex Sierra 		adev->irq.ih1.rptr = 0;
237ab518012SAlex Sierra 	}
238ab518012SAlex Sierra 
239ab518012SAlex Sierra 	if (adev->irq.ih2.ring_size) {
240ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
241ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
242ab518012SAlex Sierra 					   RB_ENABLE, 0);
243193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
2440ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
2450ab176e6SAlex Sierra 						ih_rb_cntl)) {
2460ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
2470ab176e6SAlex Sierra 				return;
2480ab176e6SAlex Sierra 			}
2490ab176e6SAlex Sierra 		} else {
250ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
2510ab176e6SAlex Sierra 		}
252ab518012SAlex Sierra 		/* set rptr, wptr to 0 */
253ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
254ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
255ab518012SAlex Sierra 		adev->irq.ih2.enabled = false;
256ab518012SAlex Sierra 		adev->irq.ih2.rptr = 0;
257ab518012SAlex Sierra 	}
258ab518012SAlex Sierra 
259edc61147SHawking Zhang }
260edc61147SHawking Zhang 
261edc61147SHawking Zhang static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
262edc61147SHawking Zhang {
263edc61147SHawking Zhang 	int rb_bufsz = order_base_2(ih->ring_size / 4);
264edc61147SHawking Zhang 
265edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
266edc61147SHawking Zhang 				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
267edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
268edc61147SHawking Zhang 				   WPTR_OVERFLOW_CLEAR, 1);
269edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
270edc61147SHawking Zhang 				   WPTR_OVERFLOW_ENABLE, 1);
271edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
272edc61147SHawking Zhang 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
273edc61147SHawking Zhang 	 * value is written to memory
274edc61147SHawking Zhang 	 */
275edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
276edc61147SHawking Zhang 				   WPTR_WRITEBACK_ENABLE, 1);
277edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
278edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
279edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
280edc61147SHawking Zhang 
281edc61147SHawking Zhang 	return ih_rb_cntl;
282edc61147SHawking Zhang }
283edc61147SHawking Zhang 
284ab518012SAlex Sierra static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
285ab518012SAlex Sierra {
286ab518012SAlex Sierra 	u32 ih_doorbell_rtpr = 0;
287ab518012SAlex Sierra 
288ab518012SAlex Sierra 	if (ih->use_doorbell) {
289ab518012SAlex Sierra 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
290ab518012SAlex Sierra 						 IH_DOORBELL_RPTR, OFFSET,
291ab518012SAlex Sierra 						 ih->doorbell_index);
292ab518012SAlex Sierra 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
293ab518012SAlex Sierra 						 IH_DOORBELL_RPTR,
294ab518012SAlex Sierra 						 ENABLE, 1);
295ab518012SAlex Sierra 	} else {
296ab518012SAlex Sierra 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
297ab518012SAlex Sierra 						 IH_DOORBELL_RPTR,
298ab518012SAlex Sierra 						 ENABLE, 0);
299ab518012SAlex Sierra 	}
300ab518012SAlex Sierra 	return ih_doorbell_rtpr;
301ab518012SAlex Sierra }
302ab518012SAlex Sierra 
303*1514cb7dSHawking Zhang /**
304*1514cb7dSHawking Zhang  * navi10_ih_enable_ring - enable an ih ring buffer
305*1514cb7dSHawking Zhang  *
306*1514cb7dSHawking Zhang  * @adev: amdgpu_device pointer
307*1514cb7dSHawking Zhang  * @ih: amdgpu_ih_ring pointer
308*1514cb7dSHawking Zhang  *
309*1514cb7dSHawking Zhang  * Enable an ih ring buffer (NAVI10)
310*1514cb7dSHawking Zhang  */
311*1514cb7dSHawking Zhang static int navi10_ih_enable_ring(struct amdgpu_device *adev,
312*1514cb7dSHawking Zhang 				 struct amdgpu_ih_ring *ih)
313*1514cb7dSHawking Zhang {
314*1514cb7dSHawking Zhang 	struct amdgpu_ih_regs *ih_regs;
315*1514cb7dSHawking Zhang 	uint32_t tmp;
316*1514cb7dSHawking Zhang 
317*1514cb7dSHawking Zhang 	ih_regs = &ih->ih_regs;
318*1514cb7dSHawking Zhang 
319*1514cb7dSHawking Zhang 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
320*1514cb7dSHawking Zhang 	WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
321*1514cb7dSHawking Zhang 	WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
322*1514cb7dSHawking Zhang 
323*1514cb7dSHawking Zhang 	tmp = RREG32(ih_regs->ih_rb_cntl);
324*1514cb7dSHawking Zhang 	tmp = navi10_ih_rb_cntl(ih, tmp);
325*1514cb7dSHawking Zhang 	if (ih == &adev->irq.ih)
326*1514cb7dSHawking Zhang 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
327*1514cb7dSHawking Zhang 	if (ih == &adev->irq.ih1) {
328*1514cb7dSHawking Zhang 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
329*1514cb7dSHawking Zhang 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
330*1514cb7dSHawking Zhang 	}
331*1514cb7dSHawking Zhang 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
332*1514cb7dSHawking Zhang 		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
333*1514cb7dSHawking Zhang 			dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
334*1514cb7dSHawking Zhang 			return -ETIMEDOUT;
335*1514cb7dSHawking Zhang 		}
336*1514cb7dSHawking Zhang 	} else {
337*1514cb7dSHawking Zhang 		WREG32(ih_regs->ih_rb_cntl, tmp);
338*1514cb7dSHawking Zhang 	}
339*1514cb7dSHawking Zhang 
340*1514cb7dSHawking Zhang 	if (ih == &adev->irq.ih) {
341*1514cb7dSHawking Zhang 		/* set the ih ring 0 writeback address whether it's enabled or not */
342*1514cb7dSHawking Zhang 		WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
343*1514cb7dSHawking Zhang 		WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
344*1514cb7dSHawking Zhang 	}
345*1514cb7dSHawking Zhang 
346*1514cb7dSHawking Zhang 	/* set rptr, wptr to 0 */
347*1514cb7dSHawking Zhang 	WREG32(ih_regs->ih_rb_wptr, 0);
348*1514cb7dSHawking Zhang 	WREG32(ih_regs->ih_rb_rptr, 0);
349*1514cb7dSHawking Zhang 
350*1514cb7dSHawking Zhang 	WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
351*1514cb7dSHawking Zhang 
352*1514cb7dSHawking Zhang 	return 0;
353*1514cb7dSHawking Zhang }
354*1514cb7dSHawking Zhang 
3559e94ff33SAlex Sierra static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
3569e94ff33SAlex Sierra {
3579e94ff33SAlex Sierra 	uint32_t tmp;
3589e94ff33SAlex Sierra 
3599e94ff33SAlex Sierra 	/* Reroute to IH ring 1 for VMC */
3609e94ff33SAlex Sierra 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
3619e94ff33SAlex Sierra 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
3629e94ff33SAlex Sierra 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
3639e94ff33SAlex Sierra 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
3649e94ff33SAlex Sierra 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
3659e94ff33SAlex Sierra 
3669e94ff33SAlex Sierra 	/* Reroute IH ring 1 for UMC */
3679e94ff33SAlex Sierra 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
3689e94ff33SAlex Sierra 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
3699e94ff33SAlex Sierra 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
3709e94ff33SAlex Sierra 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
3719e94ff33SAlex Sierra }
3729e94ff33SAlex Sierra 
373edc61147SHawking Zhang /**
374edc61147SHawking Zhang  * navi10_ih_irq_init - init and enable the interrupt ring
375edc61147SHawking Zhang  *
376edc61147SHawking Zhang  * @adev: amdgpu_device pointer
377edc61147SHawking Zhang  *
378edc61147SHawking Zhang  * Allocate a ring buffer for the interrupt controller,
379edc61147SHawking Zhang  * enable the RLC, disable interrupts, enable the IH
380edc61147SHawking Zhang  * ring buffer and enable it (NAVI).
381edc61147SHawking Zhang  * Called at device load and reume.
382edc61147SHawking Zhang  * Returns 0 for success, errors for failure.
383edc61147SHawking Zhang  */
384edc61147SHawking Zhang static int navi10_ih_irq_init(struct amdgpu_device *adev)
385edc61147SHawking Zhang {
386edc61147SHawking Zhang 	struct amdgpu_ih_ring *ih = &adev->irq.ih;
387ab518012SAlex Sierra 	u32 ih_rb_cntl, ih_chicken;
388edc61147SHawking Zhang 	u32 tmp;
389edc61147SHawking Zhang 
390edc61147SHawking Zhang 	/* disable irqs */
391edc61147SHawking Zhang 	navi10_ih_disable_interrupts(adev);
392edc61147SHawking Zhang 
393bebc0762SHawking Zhang 	adev->nbio.funcs->ih_control(adev);
394edc61147SHawking Zhang 
395edc61147SHawking Zhang 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
396edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
397edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
398edc61147SHawking Zhang 
399edc61147SHawking Zhang 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
400edc61147SHawking Zhang 	ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
401edc61147SHawking Zhang 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
402edc61147SHawking Zhang 				   !!adev->irq.msi_enabled);
403193cce34SAlex Sierra 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
4040ab176e6SAlex Sierra 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
4050ab176e6SAlex Sierra 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
4060ab176e6SAlex Sierra 			return -ETIMEDOUT;
4070ab176e6SAlex Sierra 		}
4080ab176e6SAlex Sierra 	} else {
4090ab176e6SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
4100ab176e6SAlex Sierra 	}
411abb6fccbSAlex Sierra 	if (adev->irq.ih1.ring_size)
4129e94ff33SAlex Sierra 		navi10_ih_reroute_ih(adev);
413edc61147SHawking Zhang 
414edc61147SHawking Zhang 	if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
415edc61147SHawking Zhang 		if (ih->use_bus_addr) {
416757b3af8SLikun Gao 			switch (adev->asic_type) {
417757b3af8SLikun Gao 			case CHIP_SIENNA_CICHLID:
418026c396bSJiansong Chen 			case CHIP_NAVY_FLOUNDER:
419bd4f2811SHuang Rui 			case CHIP_VANGOGH:
420771cc67eSTao Zhou 			case CHIP_DIMGREY_CAVEFISH:
421757b3af8SLikun Gao 				ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
422757b3af8SLikun Gao 				ih_chicken = REG_SET_FIELD(ih_chicken,
423757b3af8SLikun Gao 						IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
424757b3af8SLikun Gao 				WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken);
425757b3af8SLikun Gao 				break;
426757b3af8SLikun Gao 			default:
427edc61147SHawking Zhang 				ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
428edc61147SHawking Zhang 				ih_chicken = REG_SET_FIELD(ih_chicken,
429edc61147SHawking Zhang 						IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
430edc61147SHawking Zhang 				WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
431757b3af8SLikun Gao 				break;
432757b3af8SLikun Gao 			}
433edc61147SHawking Zhang 		}
434edc61147SHawking Zhang 	}
435edc61147SHawking Zhang 
436edc61147SHawking Zhang 	/* set the writeback address whether it's enabled or not */
437edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
438edc61147SHawking Zhang 		     lower_32_bits(ih->wptr_addr));
439edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
440edc61147SHawking Zhang 		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
441edc61147SHawking Zhang 
442edc61147SHawking Zhang 	/* set rptr, wptr to 0 */
443edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
444edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
445edc61147SHawking Zhang 
446ab518012SAlex Sierra 	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
447ab518012SAlex Sierra 			navi10_ih_doorbell_rptr(ih));
448edc61147SHawking Zhang 
449bebc0762SHawking Zhang 	adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
450edc61147SHawking Zhang 					    ih->doorbell_index);
451edc61147SHawking Zhang 
452ab518012SAlex Sierra 	ih = &adev->irq.ih1;
453ab518012SAlex Sierra 	if (ih->ring_size) {
454ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
455ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
456ab518012SAlex Sierra 			     (ih->gpu_addr >> 40) & 0xff);
457ab518012SAlex Sierra 
458ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
459ab518012SAlex Sierra 		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
460ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
461ab518012SAlex Sierra 					   WPTR_OVERFLOW_ENABLE, 0);
462ab518012SAlex Sierra 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
463ab518012SAlex Sierra 					   RB_FULL_DRAIN_ENABLE, 1);
464193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
4650ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
4660ab176e6SAlex Sierra 						ih_rb_cntl)) {
4670ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
4680ab176e6SAlex Sierra 				return -ETIMEDOUT;
4690ab176e6SAlex Sierra 			}
4700ab176e6SAlex Sierra 		} else {
471ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
4720ab176e6SAlex Sierra 		}
473ab518012SAlex Sierra 		/* set rptr, wptr to 0 */
474ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
475ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
476ab518012SAlex Sierra 
477ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
478ab518012SAlex Sierra 				navi10_ih_doorbell_rptr(ih));
479ab518012SAlex Sierra 	}
480ab518012SAlex Sierra 
481ab518012SAlex Sierra 	ih = &adev->irq.ih2;
482ab518012SAlex Sierra 	if (ih->ring_size) {
483ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
484ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
485ab518012SAlex Sierra 			     (ih->gpu_addr >> 40) & 0xff);
486ab518012SAlex Sierra 
487ab518012SAlex Sierra 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
488ab518012SAlex Sierra 		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
489ab518012SAlex Sierra 
490193cce34SAlex Sierra 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
4910ab176e6SAlex Sierra 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
4920ab176e6SAlex Sierra 						ih_rb_cntl)) {
4930ab176e6SAlex Sierra 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
4940ab176e6SAlex Sierra 				return -ETIMEDOUT;
4950ab176e6SAlex Sierra 			}
4960ab176e6SAlex Sierra 		} else {
497ab518012SAlex Sierra 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
4980ab176e6SAlex Sierra 		}
499ab518012SAlex Sierra 		/* set rptr, wptr to 0 */
500ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
501ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
502ab518012SAlex Sierra 
503ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
504ab518012SAlex Sierra 			     navi10_ih_doorbell_rptr(ih));
505ab518012SAlex Sierra 	}
506ab518012SAlex Sierra 
507ab518012SAlex Sierra 
508edc61147SHawking Zhang 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
509edc61147SHawking Zhang 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
510edc61147SHawking Zhang 			    CLIENT18_IS_STORM_CLIENT, 1);
511edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
512edc61147SHawking Zhang 
513edc61147SHawking Zhang 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
514edc61147SHawking Zhang 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
515edc61147SHawking Zhang 	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
516edc61147SHawking Zhang 
517edc61147SHawking Zhang 	pci_set_master(adev->pdev);
518edc61147SHawking Zhang 
519edc61147SHawking Zhang 	/* enable interrupts */
520edc61147SHawking Zhang 	navi10_ih_enable_interrupts(adev);
5215ea6f9c2SChengming Gui 	/* enable wptr force update for self int */
5225ea6f9c2SChengming Gui 	force_update_wptr_for_self_int(adev, 0, 8, true);
523edc61147SHawking Zhang 
5247eca4006SMa Feng 	return 0;
525edc61147SHawking Zhang }
526edc61147SHawking Zhang 
527edc61147SHawking Zhang /**
528edc61147SHawking Zhang  * navi10_ih_irq_disable - disable interrupts
529edc61147SHawking Zhang  *
530edc61147SHawking Zhang  * @adev: amdgpu_device pointer
531edc61147SHawking Zhang  *
532edc61147SHawking Zhang  * Disable interrupts on the hw (NAVI10).
533edc61147SHawking Zhang  */
534edc61147SHawking Zhang static void navi10_ih_irq_disable(struct amdgpu_device *adev)
535edc61147SHawking Zhang {
5365ea6f9c2SChengming Gui 	force_update_wptr_for_self_int(adev, 0, 8, false);
537edc61147SHawking Zhang 	navi10_ih_disable_interrupts(adev);
538edc61147SHawking Zhang 
539edc61147SHawking Zhang 	/* Wait and acknowledge irq */
540edc61147SHawking Zhang 	mdelay(1);
541edc61147SHawking Zhang }
542edc61147SHawking Zhang 
543edc61147SHawking Zhang /**
544edc61147SHawking Zhang  * navi10_ih_get_wptr - get the IH ring buffer wptr
545edc61147SHawking Zhang  *
546edc61147SHawking Zhang  * @adev: amdgpu_device pointer
547c56fb081SLee Jones  * @ih: IH ring buffer to fetch wptr
548edc61147SHawking Zhang  *
549edc61147SHawking Zhang  * Get the IH ring buffer wptr from either the register
550edc61147SHawking Zhang  * or the writeback memory buffer (NAVI10).  Also check for
551edc61147SHawking Zhang  * ring buffer overflow and deal with it.
552edc61147SHawking Zhang  * Returns the value of the wptr.
553edc61147SHawking Zhang  */
554edc61147SHawking Zhang static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
555edc61147SHawking Zhang 			      struct amdgpu_ih_ring *ih)
556edc61147SHawking Zhang {
557edc61147SHawking Zhang 	u32 wptr, reg, tmp;
558edc61147SHawking Zhang 
559edc61147SHawking Zhang 	wptr = le32_to_cpu(*ih->wptr_cpu);
560edc61147SHawking Zhang 
561edc61147SHawking Zhang 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
562edc61147SHawking Zhang 		goto out;
563edc61147SHawking Zhang 
564ab518012SAlex Sierra 	if (ih == &adev->irq.ih)
565edc61147SHawking Zhang 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
566ab518012SAlex Sierra 	else if (ih == &adev->irq.ih1)
567ab518012SAlex Sierra 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
568ab518012SAlex Sierra 	else if (ih == &adev->irq.ih2)
569ab518012SAlex Sierra 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
570ab518012SAlex Sierra 	else
571ab518012SAlex Sierra 		BUG();
572ab518012SAlex Sierra 
573edc61147SHawking Zhang 	wptr = RREG32_NO_KIQ(reg);
574edc61147SHawking Zhang 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
575edc61147SHawking Zhang 		goto out;
576edc61147SHawking Zhang 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
577edc61147SHawking Zhang 
578edc61147SHawking Zhang 	/* When a ring buffer overflow happen start parsing interrupt
579edc61147SHawking Zhang 	 * from the last not overwritten vector (wptr + 32). Hopefully
580edc61147SHawking Zhang 	 * this should allow us to catch up.
581edc61147SHawking Zhang 	 */
582edc61147SHawking Zhang 	tmp = (wptr + 32) & ih->ptr_mask;
583edc61147SHawking Zhang 	dev_warn(adev->dev, "IH ring buffer overflow "
584edc61147SHawking Zhang 		 "(0x%08X, 0x%08X, 0x%08X)\n",
585edc61147SHawking Zhang 		 wptr, ih->rptr, tmp);
586edc61147SHawking Zhang 	ih->rptr = tmp;
587edc61147SHawking Zhang 
588ab518012SAlex Sierra 	if (ih == &adev->irq.ih)
589edc61147SHawking Zhang 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
590ab518012SAlex Sierra 	else if (ih == &adev->irq.ih1)
591ab518012SAlex Sierra 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
592ab518012SAlex Sierra 	else if (ih == &adev->irq.ih2)
593ab518012SAlex Sierra 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
594ab518012SAlex Sierra 	else
595ab518012SAlex Sierra 		BUG();
596ab518012SAlex Sierra 
597edc61147SHawking Zhang 	tmp = RREG32_NO_KIQ(reg);
598edc61147SHawking Zhang 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
599edc61147SHawking Zhang 	WREG32_NO_KIQ(reg, tmp);
600edc61147SHawking Zhang out:
601edc61147SHawking Zhang 	return (wptr & ih->ptr_mask);
602edc61147SHawking Zhang }
603edc61147SHawking Zhang 
604edc61147SHawking Zhang /**
605edc61147SHawking Zhang  * navi10_ih_decode_iv - decode an interrupt vector
606edc61147SHawking Zhang  *
607edc61147SHawking Zhang  * @adev: amdgpu_device pointer
608c56fb081SLee Jones  * @ih: IH ring buffer to decode
609c56fb081SLee Jones  * @entry: IV entry to place decoded information into
610edc61147SHawking Zhang  *
611edc61147SHawking Zhang  * Decodes the interrupt vector at the current rptr
612edc61147SHawking Zhang  * position and also advance the position.
613edc61147SHawking Zhang  */
614edc61147SHawking Zhang static void navi10_ih_decode_iv(struct amdgpu_device *adev,
615edc61147SHawking Zhang 				struct amdgpu_ih_ring *ih,
616edc61147SHawking Zhang 				struct amdgpu_iv_entry *entry)
617edc61147SHawking Zhang {
618edc61147SHawking Zhang 	/* wptr/rptr are in bytes! */
619edc61147SHawking Zhang 	u32 ring_index = ih->rptr >> 2;
620edc61147SHawking Zhang 	uint32_t dw[8];
621edc61147SHawking Zhang 
622edc61147SHawking Zhang 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
623edc61147SHawking Zhang 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
624edc61147SHawking Zhang 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
625edc61147SHawking Zhang 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
626edc61147SHawking Zhang 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
627edc61147SHawking Zhang 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
628edc61147SHawking Zhang 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
629edc61147SHawking Zhang 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
630edc61147SHawking Zhang 
631edc61147SHawking Zhang 	entry->client_id = dw[0] & 0xff;
632edc61147SHawking Zhang 	entry->src_id = (dw[0] >> 8) & 0xff;
633edc61147SHawking Zhang 	entry->ring_id = (dw[0] >> 16) & 0xff;
634edc61147SHawking Zhang 	entry->vmid = (dw[0] >> 24) & 0xf;
635edc61147SHawking Zhang 	entry->vmid_src = (dw[0] >> 31);
636edc61147SHawking Zhang 	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
637edc61147SHawking Zhang 	entry->timestamp_src = dw[2] >> 31;
638edc61147SHawking Zhang 	entry->pasid = dw[3] & 0xffff;
639edc61147SHawking Zhang 	entry->pasid_src = dw[3] >> 31;
640edc61147SHawking Zhang 	entry->src_data[0] = dw[4];
641edc61147SHawking Zhang 	entry->src_data[1] = dw[5];
642edc61147SHawking Zhang 	entry->src_data[2] = dw[6];
643edc61147SHawking Zhang 	entry->src_data[3] = dw[7];
644edc61147SHawking Zhang 
645edc61147SHawking Zhang 	/* wptr/rptr are in bytes! */
646edc61147SHawking Zhang 	ih->rptr += 32;
647edc61147SHawking Zhang }
648edc61147SHawking Zhang 
649edc61147SHawking Zhang /**
650022b6518SSamir Dhume  * navi10_ih_irq_rearm - rearm IRQ if lost
651022b6518SSamir Dhume  *
652022b6518SSamir Dhume  * @adev: amdgpu_device pointer
653c56fb081SLee Jones  * @ih: IH ring to match
654022b6518SSamir Dhume  *
655022b6518SSamir Dhume  */
656022b6518SSamir Dhume static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
657022b6518SSamir Dhume 			       struct amdgpu_ih_ring *ih)
658022b6518SSamir Dhume {
659022b6518SSamir Dhume 	uint32_t reg_rptr = 0;
660022b6518SSamir Dhume 	uint32_t v = 0;
661022b6518SSamir Dhume 	uint32_t i = 0;
662022b6518SSamir Dhume 
663022b6518SSamir Dhume 	if (ih == &adev->irq.ih)
664022b6518SSamir Dhume 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
665022b6518SSamir Dhume 	else if (ih == &adev->irq.ih1)
666022b6518SSamir Dhume 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
667022b6518SSamir Dhume 	else if (ih == &adev->irq.ih2)
668022b6518SSamir Dhume 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
669022b6518SSamir Dhume 	else
670022b6518SSamir Dhume 		return;
671022b6518SSamir Dhume 
672022b6518SSamir Dhume 	/* Rearm IRQ / re-write doorbell if doorbell write is lost */
673022b6518SSamir Dhume 	for (i = 0; i < MAX_REARM_RETRY; i++) {
674022b6518SSamir Dhume 		v = RREG32_NO_KIQ(reg_rptr);
675022b6518SSamir Dhume 		if ((v < ih->ring_size) && (v != ih->rptr))
676022b6518SSamir Dhume 			WDOORBELL32(ih->doorbell_index, ih->rptr);
677022b6518SSamir Dhume 		else
678022b6518SSamir Dhume 			break;
679022b6518SSamir Dhume 	}
680022b6518SSamir Dhume }
681022b6518SSamir Dhume 
682022b6518SSamir Dhume /**
683edc61147SHawking Zhang  * navi10_ih_set_rptr - set the IH ring buffer rptr
684edc61147SHawking Zhang  *
685edc61147SHawking Zhang  * @adev: amdgpu_device pointer
686edc61147SHawking Zhang  *
687c56fb081SLee Jones  * @ih: IH ring buffer to set rptr
688edc61147SHawking Zhang  * Set the IH ring buffer rptr.
689edc61147SHawking Zhang  */
690edc61147SHawking Zhang static void navi10_ih_set_rptr(struct amdgpu_device *adev,
691edc61147SHawking Zhang 			       struct amdgpu_ih_ring *ih)
692edc61147SHawking Zhang {
693edc61147SHawking Zhang 	if (ih->use_doorbell) {
694edc61147SHawking Zhang 		/* XXX check if swapping is necessary on BE */
695edc61147SHawking Zhang 		*ih->rptr_cpu = ih->rptr;
696edc61147SHawking Zhang 		WDOORBELL32(ih->doorbell_index, ih->rptr);
697022b6518SSamir Dhume 
698022b6518SSamir Dhume 		if (amdgpu_sriov_vf(adev))
699022b6518SSamir Dhume 			navi10_ih_irq_rearm(adev, ih);
700ab518012SAlex Sierra 	} else if (ih == &adev->irq.ih) {
701edc61147SHawking Zhang 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
702ab518012SAlex Sierra 	} else if (ih == &adev->irq.ih1) {
703ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
704ab518012SAlex Sierra 	} else if (ih == &adev->irq.ih2) {
705ab518012SAlex Sierra 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
706ab518012SAlex Sierra 	}
707ab518012SAlex Sierra }
708ab518012SAlex Sierra 
709ab518012SAlex Sierra /**
710ab518012SAlex Sierra  * navi10_ih_self_irq - dispatch work for ring 1 and 2
711ab518012SAlex Sierra  *
712ab518012SAlex Sierra  * @adev: amdgpu_device pointer
713ab518012SAlex Sierra  * @source: irq source
714ab518012SAlex Sierra  * @entry: IV with WPTR update
715ab518012SAlex Sierra  *
716ab518012SAlex Sierra  * Update the WPTR from the IV and schedule work to handle the entries.
717ab518012SAlex Sierra  */
718ab518012SAlex Sierra static int navi10_ih_self_irq(struct amdgpu_device *adev,
719ab518012SAlex Sierra 			      struct amdgpu_irq_src *source,
720ab518012SAlex Sierra 			      struct amdgpu_iv_entry *entry)
721ab518012SAlex Sierra {
722ab518012SAlex Sierra 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
723ab518012SAlex Sierra 
724ab518012SAlex Sierra 	switch (entry->ring_id) {
725ab518012SAlex Sierra 	case 1:
726ab518012SAlex Sierra 		*adev->irq.ih1.wptr_cpu = wptr;
727ab518012SAlex Sierra 		schedule_work(&adev->irq.ih1_work);
728ab518012SAlex Sierra 		break;
729ab518012SAlex Sierra 	case 2:
730ab518012SAlex Sierra 		*adev->irq.ih2.wptr_cpu = wptr;
731ab518012SAlex Sierra 		schedule_work(&adev->irq.ih2_work);
732ab518012SAlex Sierra 		break;
733ab518012SAlex Sierra 	default: break;
734ab518012SAlex Sierra 	}
735ab518012SAlex Sierra 	return 0;
736ab518012SAlex Sierra }
737ab518012SAlex Sierra 
738ab518012SAlex Sierra static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
739ab518012SAlex Sierra 	.process = navi10_ih_self_irq,
740ab518012SAlex Sierra };
741ab518012SAlex Sierra 
742ab518012SAlex Sierra static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
743ab518012SAlex Sierra {
744ab518012SAlex Sierra 	adev->irq.self_irq.num_types = 0;
745ab518012SAlex Sierra 	adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
746edc61147SHawking Zhang }
747edc61147SHawking Zhang 
748edc61147SHawking Zhang static int navi10_ih_early_init(void *handle)
749edc61147SHawking Zhang {
750edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
751edc61147SHawking Zhang 
752edc61147SHawking Zhang 	navi10_ih_set_interrupt_funcs(adev);
753ab518012SAlex Sierra 	navi10_ih_set_self_irq_funcs(adev);
754edc61147SHawking Zhang 	return 0;
755edc61147SHawking Zhang }
756edc61147SHawking Zhang 
757edc61147SHawking Zhang static int navi10_ih_sw_init(void *handle)
758edc61147SHawking Zhang {
759edc61147SHawking Zhang 	int r;
760edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761edc61147SHawking Zhang 	bool use_bus_addr;
762edc61147SHawking Zhang 
763ab518012SAlex Sierra 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
764ab518012SAlex Sierra 				&adev->irq.self_irq);
765ab518012SAlex Sierra 
766ab518012SAlex Sierra 	if (r)
767ab518012SAlex Sierra 		return r;
768ab518012SAlex Sierra 
769edc61147SHawking Zhang 	/* use gpu virtual address for ih ring
770edc61147SHawking Zhang 	 * until ih_checken is programmed to allow
771edc61147SHawking Zhang 	 * use bus address for ih ring by psp bl */
772bf13cb1fSHuang Rui 	if ((adev->flags & AMD_IS_APU) ||
773bf13cb1fSHuang Rui 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
774bf13cb1fSHuang Rui 		use_bus_addr = false;
775bf13cb1fSHuang Rui 	else
776bf13cb1fSHuang Rui 		use_bus_addr = true;
777edc61147SHawking Zhang 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
778edc61147SHawking Zhang 	if (r)
779edc61147SHawking Zhang 		return r;
780edc61147SHawking Zhang 
781edc61147SHawking Zhang 	adev->irq.ih.use_doorbell = true;
782edc61147SHawking Zhang 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
783edc61147SHawking Zhang 
784abb6fccbSAlex Sierra 	adev->irq.ih1.ring_size = 0;
785abb6fccbSAlex Sierra 	adev->irq.ih2.ring_size = 0;
786abb6fccbSAlex Sierra 
787abb6fccbSAlex Sierra 	if (adev->asic_type < CHIP_NAVI10) {
788ab518012SAlex Sierra 		r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
789ab518012SAlex Sierra 		if (r)
790ab518012SAlex Sierra 			return r;
791ab518012SAlex Sierra 
792ab518012SAlex Sierra 		adev->irq.ih1.use_doorbell = true;
793abb6fccbSAlex Sierra 		adev->irq.ih1.doorbell_index =
794abb6fccbSAlex Sierra 					(adev->doorbell_index.ih + 1) << 1;
795ab518012SAlex Sierra 
796ab518012SAlex Sierra 		r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
797ab518012SAlex Sierra 		if (r)
798ab518012SAlex Sierra 			return r;
799ab518012SAlex Sierra 
800ab518012SAlex Sierra 		adev->irq.ih2.use_doorbell = true;
801abb6fccbSAlex Sierra 		adev->irq.ih2.doorbell_index =
802abb6fccbSAlex Sierra 					(adev->doorbell_index.ih + 2) << 1;
803abb6fccbSAlex Sierra 	}
804ab518012SAlex Sierra 
805d4581f7dSChristian König 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
806d4581f7dSChristian König 	if (r)
807d4581f7dSChristian König 		return r;
808d4581f7dSChristian König 
809edc61147SHawking Zhang 	r = amdgpu_irq_init(adev);
810edc61147SHawking Zhang 
811edc61147SHawking Zhang 	return r;
812edc61147SHawking Zhang }
813edc61147SHawking Zhang 
814edc61147SHawking Zhang static int navi10_ih_sw_fini(void *handle)
815edc61147SHawking Zhang {
816edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817edc61147SHawking Zhang 
818edc61147SHawking Zhang 	amdgpu_irq_fini(adev);
819ab518012SAlex Sierra 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
820ab518012SAlex Sierra 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
821edc61147SHawking Zhang 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
822edc61147SHawking Zhang 
823edc61147SHawking Zhang 	return 0;
824edc61147SHawking Zhang }
825edc61147SHawking Zhang 
826edc61147SHawking Zhang static int navi10_ih_hw_init(void *handle)
827edc61147SHawking Zhang {
828edc61147SHawking Zhang 	int r;
829edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
830edc61147SHawking Zhang 
831edc61147SHawking Zhang 	r = navi10_ih_irq_init(adev);
832edc61147SHawking Zhang 	if (r)
833edc61147SHawking Zhang 		return r;
834edc61147SHawking Zhang 
835edc61147SHawking Zhang 	return 0;
836edc61147SHawking Zhang }
837edc61147SHawking Zhang 
838edc61147SHawking Zhang static int navi10_ih_hw_fini(void *handle)
839edc61147SHawking Zhang {
840edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
841edc61147SHawking Zhang 
842edc61147SHawking Zhang 	navi10_ih_irq_disable(adev);
843edc61147SHawking Zhang 
844edc61147SHawking Zhang 	return 0;
845edc61147SHawking Zhang }
846edc61147SHawking Zhang 
847edc61147SHawking Zhang static int navi10_ih_suspend(void *handle)
848edc61147SHawking Zhang {
849edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
850edc61147SHawking Zhang 
851edc61147SHawking Zhang 	return navi10_ih_hw_fini(adev);
852edc61147SHawking Zhang }
853edc61147SHawking Zhang 
854edc61147SHawking Zhang static int navi10_ih_resume(void *handle)
855edc61147SHawking Zhang {
856edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
857edc61147SHawking Zhang 
858edc61147SHawking Zhang 	return navi10_ih_hw_init(adev);
859edc61147SHawking Zhang }
860edc61147SHawking Zhang 
861edc61147SHawking Zhang static bool navi10_ih_is_idle(void *handle)
862edc61147SHawking Zhang {
863edc61147SHawking Zhang 	/* todo */
864edc61147SHawking Zhang 	return true;
865edc61147SHawking Zhang }
866edc61147SHawking Zhang 
867edc61147SHawking Zhang static int navi10_ih_wait_for_idle(void *handle)
868edc61147SHawking Zhang {
869edc61147SHawking Zhang 	/* todo */
870edc61147SHawking Zhang 	return -ETIMEDOUT;
871edc61147SHawking Zhang }
872edc61147SHawking Zhang 
873edc61147SHawking Zhang static int navi10_ih_soft_reset(void *handle)
874edc61147SHawking Zhang {
875edc61147SHawking Zhang 	/* todo */
876edc61147SHawking Zhang 	return 0;
877edc61147SHawking Zhang }
878edc61147SHawking Zhang 
879edc61147SHawking Zhang static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
880edc61147SHawking Zhang 					       bool enable)
881edc61147SHawking Zhang {
882edc61147SHawking Zhang 	uint32_t data, def, field_val;
883edc61147SHawking Zhang 
884edc61147SHawking Zhang 	if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
885edc61147SHawking Zhang 		def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
886edc61147SHawking Zhang 		field_val = enable ? 0 : 1;
887edc61147SHawking Zhang 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
888edc61147SHawking Zhang 				     DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
889edc61147SHawking Zhang 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
890edc61147SHawking Zhang 				     OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
891edc61147SHawking Zhang 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
892edc61147SHawking Zhang 				     LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
893edc61147SHawking Zhang 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
894edc61147SHawking Zhang 				     DYN_CLK_SOFT_OVERRIDE, field_val);
895edc61147SHawking Zhang 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
896edc61147SHawking Zhang 				     REG_CLK_SOFT_OVERRIDE, field_val);
897edc61147SHawking Zhang 		if (def != data)
898edc61147SHawking Zhang 			WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
899edc61147SHawking Zhang 	}
900edc61147SHawking Zhang 
901edc61147SHawking Zhang 	return;
902edc61147SHawking Zhang }
903edc61147SHawking Zhang 
904edc61147SHawking Zhang static int navi10_ih_set_clockgating_state(void *handle,
905edc61147SHawking Zhang 					   enum amd_clockgating_state state)
906edc61147SHawking Zhang {
907edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
908edc61147SHawking Zhang 
909edc61147SHawking Zhang 	navi10_ih_update_clockgating_state(adev,
910a9d4fe2fSNirmoy Das 				state == AMD_CG_STATE_GATE);
911edc61147SHawking Zhang 	return 0;
912edc61147SHawking Zhang }
913edc61147SHawking Zhang 
914edc61147SHawking Zhang static int navi10_ih_set_powergating_state(void *handle,
915edc61147SHawking Zhang 					   enum amd_powergating_state state)
916edc61147SHawking Zhang {
917edc61147SHawking Zhang 	return 0;
918edc61147SHawking Zhang }
919edc61147SHawking Zhang 
920edc61147SHawking Zhang static void navi10_ih_get_clockgating_state(void *handle, u32 *flags)
921edc61147SHawking Zhang {
922edc61147SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
923edc61147SHawking Zhang 
924edc61147SHawking Zhang 	if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
925edc61147SHawking Zhang 		*flags |= AMD_CG_SUPPORT_IH_CG;
926edc61147SHawking Zhang 
927edc61147SHawking Zhang 	return;
928edc61147SHawking Zhang }
929edc61147SHawking Zhang 
930edc61147SHawking Zhang static const struct amd_ip_funcs navi10_ih_ip_funcs = {
931edc61147SHawking Zhang 	.name = "navi10_ih",
932edc61147SHawking Zhang 	.early_init = navi10_ih_early_init,
933edc61147SHawking Zhang 	.late_init = NULL,
934edc61147SHawking Zhang 	.sw_init = navi10_ih_sw_init,
935edc61147SHawking Zhang 	.sw_fini = navi10_ih_sw_fini,
936edc61147SHawking Zhang 	.hw_init = navi10_ih_hw_init,
937edc61147SHawking Zhang 	.hw_fini = navi10_ih_hw_fini,
938edc61147SHawking Zhang 	.suspend = navi10_ih_suspend,
939edc61147SHawking Zhang 	.resume = navi10_ih_resume,
940edc61147SHawking Zhang 	.is_idle = navi10_ih_is_idle,
941edc61147SHawking Zhang 	.wait_for_idle = navi10_ih_wait_for_idle,
942edc61147SHawking Zhang 	.soft_reset = navi10_ih_soft_reset,
943edc61147SHawking Zhang 	.set_clockgating_state = navi10_ih_set_clockgating_state,
944edc61147SHawking Zhang 	.set_powergating_state = navi10_ih_set_powergating_state,
945edc61147SHawking Zhang 	.get_clockgating_state = navi10_ih_get_clockgating_state,
946edc61147SHawking Zhang };
947edc61147SHawking Zhang 
948edc61147SHawking Zhang static const struct amdgpu_ih_funcs navi10_ih_funcs = {
949edc61147SHawking Zhang 	.get_wptr = navi10_ih_get_wptr,
950edc61147SHawking Zhang 	.decode_iv = navi10_ih_decode_iv,
951edc61147SHawking Zhang 	.set_rptr = navi10_ih_set_rptr
952edc61147SHawking Zhang };
953edc61147SHawking Zhang 
954edc61147SHawking Zhang static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
955edc61147SHawking Zhang {
956edc61147SHawking Zhang 	if (adev->irq.ih_funcs == NULL)
957edc61147SHawking Zhang 		adev->irq.ih_funcs = &navi10_ih_funcs;
958edc61147SHawking Zhang }
959edc61147SHawking Zhang 
960edc61147SHawking Zhang const struct amdgpu_ip_block_version navi10_ih_ip_block =
961edc61147SHawking Zhang {
962edc61147SHawking Zhang 	.type = AMD_IP_BLOCK_TYPE_IH,
963edc61147SHawking Zhang 	.major = 5,
964edc61147SHawking Zhang 	.minor = 0,
965edc61147SHawking Zhang 	.rev = 0,
966edc61147SHawking Zhang 	.funcs = &navi10_ih_ip_funcs,
967edc61147SHawking Zhang };
968