xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/navi10_ih.c (revision 06ff634c0dae791c17ceeeb60c74e14470d76898)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 
29 #include "oss/osssys_5_0_0_offset.h"
30 #include "oss/osssys_5_0_0_sh_mask.h"
31 
32 #include "soc15_common.h"
33 #include "navi10_ih.h"
34 
35 #define MAX_REARM_RETRY 10
36 
37 #define mmIH_CHICKEN_Sienna_Cichlid                 0x018d
38 #define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX        0
39 
40 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
41 
42 /**
43  * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
44  *
45  * @adev: amdgpu_device pointer
46  *
47  * Enable the interrupt ring buffer (NAVI10).
48  */
49 static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
50 {
51 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
52 
53 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
54 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
55 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
56 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
57 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
58 			return;
59 		}
60 	} else {
61 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
62 	}
63 
64 	adev->irq.ih.enabled = true;
65 
66 	if (adev->irq.ih1.ring_size) {
67 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
68 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
69 					   RB_ENABLE, 1);
70 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
71 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
72 						ih_rb_cntl)) {
73 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
74 				return;
75 			}
76 		} else {
77 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
78 		}
79 		adev->irq.ih1.enabled = true;
80 	}
81 
82 	if (adev->irq.ih2.ring_size) {
83 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
84 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
85 					   RB_ENABLE, 1);
86 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
87 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
88 						ih_rb_cntl)) {
89 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
90 				return;
91 			}
92 		} else {
93 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
94 		}
95 		adev->irq.ih2.enabled = true;
96 	}
97 }
98 
99 /**
100  * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
101  *
102  * @adev: amdgpu_device pointer
103  *
104  * Disable the interrupt ring buffer (NAVI10).
105  */
106 static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
107 {
108 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
109 
110 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
111 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
112 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
113 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
114 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
115 			return;
116 		}
117 	} else {
118 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
119 	}
120 
121 	/* set rptr, wptr to 0 */
122 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
123 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
124 	adev->irq.ih.enabled = false;
125 	adev->irq.ih.rptr = 0;
126 
127 	if (adev->irq.ih1.ring_size) {
128 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
129 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
130 					   RB_ENABLE, 0);
131 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
132 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
133 						ih_rb_cntl)) {
134 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
135 				return;
136 			}
137 		} else {
138 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
139 		}
140 		/* set rptr, wptr to 0 */
141 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
142 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
143 		adev->irq.ih1.enabled = false;
144 		adev->irq.ih1.rptr = 0;
145 	}
146 
147 	if (adev->irq.ih2.ring_size) {
148 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
149 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
150 					   RB_ENABLE, 0);
151 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
152 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
153 						ih_rb_cntl)) {
154 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
155 				return;
156 			}
157 		} else {
158 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
159 		}
160 		/* set rptr, wptr to 0 */
161 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
162 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
163 		adev->irq.ih2.enabled = false;
164 		adev->irq.ih2.rptr = 0;
165 	}
166 
167 }
168 
169 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
170 {
171 	int rb_bufsz = order_base_2(ih->ring_size / 4);
172 
173 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
174 				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
175 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
176 				   WPTR_OVERFLOW_CLEAR, 1);
177 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
178 				   WPTR_OVERFLOW_ENABLE, 1);
179 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
180 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
181 	 * value is written to memory
182 	 */
183 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
184 				   WPTR_WRITEBACK_ENABLE, 1);
185 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
186 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
187 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
188 
189 	return ih_rb_cntl;
190 }
191 
192 static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
193 {
194 	u32 ih_doorbell_rtpr = 0;
195 
196 	if (ih->use_doorbell) {
197 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
198 						 IH_DOORBELL_RPTR, OFFSET,
199 						 ih->doorbell_index);
200 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
201 						 IH_DOORBELL_RPTR,
202 						 ENABLE, 1);
203 	} else {
204 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
205 						 IH_DOORBELL_RPTR,
206 						 ENABLE, 0);
207 	}
208 	return ih_doorbell_rtpr;
209 }
210 
211 static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
212 {
213 	uint32_t tmp;
214 
215 	/* Reroute to IH ring 1 for VMC */
216 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
217 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
218 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
219 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
220 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
221 
222 	/* Reroute IH ring 1 for UMC */
223 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
224 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
225 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
226 	WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
227 }
228 
229 /**
230  * navi10_ih_irq_init - init and enable the interrupt ring
231  *
232  * @adev: amdgpu_device pointer
233  *
234  * Allocate a ring buffer for the interrupt controller,
235  * enable the RLC, disable interrupts, enable the IH
236  * ring buffer and enable it (NAVI).
237  * Called at device load and reume.
238  * Returns 0 for success, errors for failure.
239  */
240 static int navi10_ih_irq_init(struct amdgpu_device *adev)
241 {
242 	struct amdgpu_ih_ring *ih = &adev->irq.ih;
243 	u32 ih_rb_cntl, ih_chicken;
244 	u32 tmp;
245 
246 	/* disable irqs */
247 	navi10_ih_disable_interrupts(adev);
248 
249 	adev->nbio.funcs->ih_control(adev);
250 
251 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
252 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
253 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
254 
255 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
256 	ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
257 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
258 				   !!adev->irq.msi_enabled);
259 	if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
260 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
261 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
262 			return -ETIMEDOUT;
263 		}
264 	} else {
265 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
266 	}
267 	navi10_ih_reroute_ih(adev);
268 
269 	if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
270 		if (ih->use_bus_addr) {
271 			switch (adev->asic_type) {
272 			case CHIP_SIENNA_CICHLID:
273 				ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
274 				ih_chicken = REG_SET_FIELD(ih_chicken,
275 						IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
276 				WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken);
277 				break;
278 			default:
279 				ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
280 				ih_chicken = REG_SET_FIELD(ih_chicken,
281 						IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
282 				WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
283 				break;
284 			}
285 		}
286 	}
287 
288 	/* set the writeback address whether it's enabled or not */
289 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
290 		     lower_32_bits(ih->wptr_addr));
291 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
292 		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
293 
294 	/* set rptr, wptr to 0 */
295 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
296 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
297 
298 	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
299 			navi10_ih_doorbell_rptr(ih));
300 
301 	adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
302 					    ih->doorbell_index);
303 
304 	ih = &adev->irq.ih1;
305 	if (ih->ring_size) {
306 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
307 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
308 			     (ih->gpu_addr >> 40) & 0xff);
309 
310 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
311 		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
312 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
313 					   WPTR_OVERFLOW_ENABLE, 0);
314 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
315 					   RB_FULL_DRAIN_ENABLE, 1);
316 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
317 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
318 						ih_rb_cntl)) {
319 				DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
320 				return -ETIMEDOUT;
321 			}
322 		} else {
323 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
324 		}
325 		/* set rptr, wptr to 0 */
326 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
327 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
328 
329 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
330 				navi10_ih_doorbell_rptr(ih));
331 	}
332 
333 	ih = &adev->irq.ih2;
334 	if (ih->ring_size) {
335 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
336 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
337 			     (ih->gpu_addr >> 40) & 0xff);
338 
339 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
340 		ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
341 
342 		if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
343 			if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
344 						ih_rb_cntl)) {
345 				DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
346 				return -ETIMEDOUT;
347 			}
348 		} else {
349 			WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
350 		}
351 		/* set rptr, wptr to 0 */
352 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
353 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
354 
355 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
356 			     navi10_ih_doorbell_rptr(ih));
357 	}
358 
359 
360 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
361 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
362 			    CLIENT18_IS_STORM_CLIENT, 1);
363 	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
364 
365 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
366 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
367 	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
368 
369 	pci_set_master(adev->pdev);
370 
371 	/* enable interrupts */
372 	navi10_ih_enable_interrupts(adev);
373 
374 	return 0;
375 }
376 
377 /**
378  * navi10_ih_irq_disable - disable interrupts
379  *
380  * @adev: amdgpu_device pointer
381  *
382  * Disable interrupts on the hw (NAVI10).
383  */
384 static void navi10_ih_irq_disable(struct amdgpu_device *adev)
385 {
386 	navi10_ih_disable_interrupts(adev);
387 
388 	/* Wait and acknowledge irq */
389 	mdelay(1);
390 }
391 
392 /**
393  * navi10_ih_get_wptr - get the IH ring buffer wptr
394  *
395  * @adev: amdgpu_device pointer
396  *
397  * Get the IH ring buffer wptr from either the register
398  * or the writeback memory buffer (NAVI10).  Also check for
399  * ring buffer overflow and deal with it.
400  * Returns the value of the wptr.
401  */
402 static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
403 			      struct amdgpu_ih_ring *ih)
404 {
405 	u32 wptr, reg, tmp;
406 
407 	wptr = le32_to_cpu(*ih->wptr_cpu);
408 
409 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
410 		goto out;
411 
412 	if (ih == &adev->irq.ih)
413 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
414 	else if (ih == &adev->irq.ih1)
415 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
416 	else if (ih == &adev->irq.ih2)
417 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
418 	else
419 		BUG();
420 
421 	wptr = RREG32_NO_KIQ(reg);
422 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
423 		goto out;
424 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
425 
426 	/* When a ring buffer overflow happen start parsing interrupt
427 	 * from the last not overwritten vector (wptr + 32). Hopefully
428 	 * this should allow us to catch up.
429 	 */
430 	tmp = (wptr + 32) & ih->ptr_mask;
431 	dev_warn(adev->dev, "IH ring buffer overflow "
432 		 "(0x%08X, 0x%08X, 0x%08X)\n",
433 		 wptr, ih->rptr, tmp);
434 	ih->rptr = tmp;
435 
436 	if (ih == &adev->irq.ih)
437 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
438 	else if (ih == &adev->irq.ih1)
439 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
440 	else if (ih == &adev->irq.ih2)
441 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
442 	else
443 		BUG();
444 
445 	tmp = RREG32_NO_KIQ(reg);
446 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
447 	WREG32_NO_KIQ(reg, tmp);
448 out:
449 	return (wptr & ih->ptr_mask);
450 }
451 
452 /**
453  * navi10_ih_decode_iv - decode an interrupt vector
454  *
455  * @adev: amdgpu_device pointer
456  *
457  * Decodes the interrupt vector at the current rptr
458  * position and also advance the position.
459  */
460 static void navi10_ih_decode_iv(struct amdgpu_device *adev,
461 				struct amdgpu_ih_ring *ih,
462 				struct amdgpu_iv_entry *entry)
463 {
464 	/* wptr/rptr are in bytes! */
465 	u32 ring_index = ih->rptr >> 2;
466 	uint32_t dw[8];
467 
468 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
469 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
470 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
471 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
472 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
473 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
474 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
475 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
476 
477 	entry->client_id = dw[0] & 0xff;
478 	entry->src_id = (dw[0] >> 8) & 0xff;
479 	entry->ring_id = (dw[0] >> 16) & 0xff;
480 	entry->vmid = (dw[0] >> 24) & 0xf;
481 	entry->vmid_src = (dw[0] >> 31);
482 	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
483 	entry->timestamp_src = dw[2] >> 31;
484 	entry->pasid = dw[3] & 0xffff;
485 	entry->pasid_src = dw[3] >> 31;
486 	entry->src_data[0] = dw[4];
487 	entry->src_data[1] = dw[5];
488 	entry->src_data[2] = dw[6];
489 	entry->src_data[3] = dw[7];
490 
491 	/* wptr/rptr are in bytes! */
492 	ih->rptr += 32;
493 }
494 
495 /**
496  * navi10_ih_irq_rearm - rearm IRQ if lost
497  *
498  * @adev: amdgpu_device pointer
499  *
500  */
501 static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
502 			       struct amdgpu_ih_ring *ih)
503 {
504 	uint32_t reg_rptr = 0;
505 	uint32_t v = 0;
506 	uint32_t i = 0;
507 
508 	if (ih == &adev->irq.ih)
509 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
510 	else if (ih == &adev->irq.ih1)
511 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
512 	else if (ih == &adev->irq.ih2)
513 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
514 	else
515 		return;
516 
517 	/* Rearm IRQ / re-write doorbell if doorbell write is lost */
518 	for (i = 0; i < MAX_REARM_RETRY; i++) {
519 		v = RREG32_NO_KIQ(reg_rptr);
520 		if ((v < ih->ring_size) && (v != ih->rptr))
521 			WDOORBELL32(ih->doorbell_index, ih->rptr);
522 		else
523 			break;
524 	}
525 }
526 
527 /**
528  * navi10_ih_set_rptr - set the IH ring buffer rptr
529  *
530  * @adev: amdgpu_device pointer
531  *
532  * Set the IH ring buffer rptr.
533  */
534 static void navi10_ih_set_rptr(struct amdgpu_device *adev,
535 			       struct amdgpu_ih_ring *ih)
536 {
537 	if (ih->use_doorbell) {
538 		/* XXX check if swapping is necessary on BE */
539 		*ih->rptr_cpu = ih->rptr;
540 		WDOORBELL32(ih->doorbell_index, ih->rptr);
541 
542 		if (amdgpu_sriov_vf(adev))
543 			navi10_ih_irq_rearm(adev, ih);
544 	} else if (ih == &adev->irq.ih) {
545 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
546 	} else if (ih == &adev->irq.ih1) {
547 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
548 	} else if (ih == &adev->irq.ih2) {
549 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
550 	}
551 }
552 
553 /**
554  * navi10_ih_self_irq - dispatch work for ring 1 and 2
555  *
556  * @adev: amdgpu_device pointer
557  * @source: irq source
558  * @entry: IV with WPTR update
559  *
560  * Update the WPTR from the IV and schedule work to handle the entries.
561  */
562 static int navi10_ih_self_irq(struct amdgpu_device *adev,
563 			      struct amdgpu_irq_src *source,
564 			      struct amdgpu_iv_entry *entry)
565 {
566 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
567 
568 	switch (entry->ring_id) {
569 	case 1:
570 		*adev->irq.ih1.wptr_cpu = wptr;
571 		schedule_work(&adev->irq.ih1_work);
572 		break;
573 	case 2:
574 		*adev->irq.ih2.wptr_cpu = wptr;
575 		schedule_work(&adev->irq.ih2_work);
576 		break;
577 	default: break;
578 	}
579 	return 0;
580 }
581 
582 static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
583 	.process = navi10_ih_self_irq,
584 };
585 
586 static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
587 {
588 	adev->irq.self_irq.num_types = 0;
589 	adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
590 }
591 
592 static int navi10_ih_early_init(void *handle)
593 {
594 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
595 
596 	navi10_ih_set_interrupt_funcs(adev);
597 	navi10_ih_set_self_irq_funcs(adev);
598 	return 0;
599 }
600 
601 static int navi10_ih_sw_init(void *handle)
602 {
603 	int r;
604 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
605 	bool use_bus_addr;
606 
607 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
608 				&adev->irq.self_irq);
609 
610 	if (r)
611 		return r;
612 
613 	/* use gpu virtual address for ih ring
614 	 * until ih_checken is programmed to allow
615 	 * use bus address for ih ring by psp bl */
616 	use_bus_addr =
617 		(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
618 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
619 	if (r)
620 		return r;
621 
622 	adev->irq.ih.use_doorbell = true;
623 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
624 
625 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
626 	if (r)
627 		return r;
628 
629 	adev->irq.ih1.use_doorbell = true;
630 	adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
631 
632 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
633 	if (r)
634 		return r;
635 
636 	adev->irq.ih2.use_doorbell = true;
637 	adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
638 
639 	r = amdgpu_irq_init(adev);
640 
641 	return r;
642 }
643 
644 static int navi10_ih_sw_fini(void *handle)
645 {
646 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647 
648 	amdgpu_irq_fini(adev);
649 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
650 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
651 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
652 
653 	return 0;
654 }
655 
656 static int navi10_ih_hw_init(void *handle)
657 {
658 	int r;
659 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
660 
661 	r = navi10_ih_irq_init(adev);
662 	if (r)
663 		return r;
664 
665 	return 0;
666 }
667 
668 static int navi10_ih_hw_fini(void *handle)
669 {
670 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671 
672 	navi10_ih_irq_disable(adev);
673 
674 	return 0;
675 }
676 
677 static int navi10_ih_suspend(void *handle)
678 {
679 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
680 
681 	return navi10_ih_hw_fini(adev);
682 }
683 
684 static int navi10_ih_resume(void *handle)
685 {
686 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
687 
688 	return navi10_ih_hw_init(adev);
689 }
690 
691 static bool navi10_ih_is_idle(void *handle)
692 {
693 	/* todo */
694 	return true;
695 }
696 
697 static int navi10_ih_wait_for_idle(void *handle)
698 {
699 	/* todo */
700 	return -ETIMEDOUT;
701 }
702 
703 static int navi10_ih_soft_reset(void *handle)
704 {
705 	/* todo */
706 	return 0;
707 }
708 
709 static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
710 					       bool enable)
711 {
712 	uint32_t data, def, field_val;
713 
714 	if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
715 		def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
716 		field_val = enable ? 0 : 1;
717 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
718 				     DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
719 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
720 				     OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
721 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
722 				     LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
723 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
724 				     DYN_CLK_SOFT_OVERRIDE, field_val);
725 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
726 				     REG_CLK_SOFT_OVERRIDE, field_val);
727 		if (def != data)
728 			WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
729 	}
730 
731 	return;
732 }
733 
734 static int navi10_ih_set_clockgating_state(void *handle,
735 					   enum amd_clockgating_state state)
736 {
737 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738 
739 	navi10_ih_update_clockgating_state(adev,
740 				state == AMD_CG_STATE_GATE);
741 	return 0;
742 }
743 
744 static int navi10_ih_set_powergating_state(void *handle,
745 					   enum amd_powergating_state state)
746 {
747 	return 0;
748 }
749 
750 static void navi10_ih_get_clockgating_state(void *handle, u32 *flags)
751 {
752 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
753 
754 	if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
755 		*flags |= AMD_CG_SUPPORT_IH_CG;
756 
757 	return;
758 }
759 
760 static const struct amd_ip_funcs navi10_ih_ip_funcs = {
761 	.name = "navi10_ih",
762 	.early_init = navi10_ih_early_init,
763 	.late_init = NULL,
764 	.sw_init = navi10_ih_sw_init,
765 	.sw_fini = navi10_ih_sw_fini,
766 	.hw_init = navi10_ih_hw_init,
767 	.hw_fini = navi10_ih_hw_fini,
768 	.suspend = navi10_ih_suspend,
769 	.resume = navi10_ih_resume,
770 	.is_idle = navi10_ih_is_idle,
771 	.wait_for_idle = navi10_ih_wait_for_idle,
772 	.soft_reset = navi10_ih_soft_reset,
773 	.set_clockgating_state = navi10_ih_set_clockgating_state,
774 	.set_powergating_state = navi10_ih_set_powergating_state,
775 	.get_clockgating_state = navi10_ih_get_clockgating_state,
776 };
777 
778 static const struct amdgpu_ih_funcs navi10_ih_funcs = {
779 	.get_wptr = navi10_ih_get_wptr,
780 	.decode_iv = navi10_ih_decode_iv,
781 	.set_rptr = navi10_ih_set_rptr
782 };
783 
784 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
785 {
786 	if (adev->irq.ih_funcs == NULL)
787 		adev->irq.ih_funcs = &navi10_ih_funcs;
788 }
789 
790 const struct amdgpu_ip_block_version navi10_ih_ip_block =
791 {
792 	.type = AMD_IP_BLOCK_TYPE_IH,
793 	.major = 5,
794 	.minor = 0,
795 	.rev = 0,
796 	.funcs = &navi10_ih_ip_funcs,
797 };
798