1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <drm/drmP.h>
24 #include "amdgpu.h"
25 #include "amdgpu_ih.h"
26 #include "soc15.h"
27 
28 #include "oss/osssys_4_0_offset.h"
29 #include "oss/osssys_4_0_sh_mask.h"
30 
31 #include "soc15_common.h"
32 #include "vega10_ih.h"
33 
34 #define MAX_REARM_RETRY 10
35 
36 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
37 
38 /**
39  * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
40  *
41  * @adev: amdgpu_device pointer
42  *
43  * Enable the interrupt ring buffer (VEGA10).
44  */
45 static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
46 {
47 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
48 
49 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
50 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
51 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
52 	adev->irq.ih.enabled = true;
53 
54 	if (adev->irq.ih1.ring_size) {
55 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
56 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
57 					   RB_ENABLE, 1);
58 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
59 		adev->irq.ih1.enabled = true;
60 	}
61 
62 	if (adev->irq.ih2.ring_size) {
63 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
64 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
65 					   RB_ENABLE, 1);
66 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
67 		adev->irq.ih2.enabled = true;
68 	}
69 }
70 
71 /**
72  * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
73  *
74  * @adev: amdgpu_device pointer
75  *
76  * Disable the interrupt ring buffer (VEGA10).
77  */
78 static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
79 {
80 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
81 
82 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
83 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
84 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
85 	/* set rptr, wptr to 0 */
86 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
87 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
88 	adev->irq.ih.enabled = false;
89 	adev->irq.ih.rptr = 0;
90 
91 	if (adev->irq.ih1.ring_size) {
92 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
93 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
94 					   RB_ENABLE, 0);
95 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
96 		/* set rptr, wptr to 0 */
97 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
98 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
99 		adev->irq.ih1.enabled = false;
100 		adev->irq.ih1.rptr = 0;
101 	}
102 
103 	if (adev->irq.ih2.ring_size) {
104 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
105 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
106 					   RB_ENABLE, 0);
107 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
108 		/* set rptr, wptr to 0 */
109 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
110 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
111 		adev->irq.ih2.enabled = false;
112 		adev->irq.ih2.rptr = 0;
113 	}
114 }
115 
116 static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
117 {
118 	int rb_bufsz = order_base_2(ih->ring_size / 4);
119 
120 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
121 				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
122 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
123 				   WPTR_OVERFLOW_CLEAR, 1);
124 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
125 				   WPTR_OVERFLOW_ENABLE, 1);
126 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
127 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
128 	 * value is written to memory
129 	 */
130 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
131 				   WPTR_WRITEBACK_ENABLE, 1);
132 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
133 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
134 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
135 
136 	return ih_rb_cntl;
137 }
138 
139 static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
140 {
141 	u32 ih_doorbell_rtpr = 0;
142 
143 	if (ih->use_doorbell) {
144 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
145 						 IH_DOORBELL_RPTR, OFFSET,
146 						 ih->doorbell_index);
147 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
148 						 IH_DOORBELL_RPTR,
149 						 ENABLE, 1);
150 	} else {
151 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
152 						 IH_DOORBELL_RPTR,
153 						 ENABLE, 0);
154 	}
155 	return ih_doorbell_rtpr;
156 }
157 
158 /**
159  * vega10_ih_irq_init - init and enable the interrupt ring
160  *
161  * @adev: amdgpu_device pointer
162  *
163  * Allocate a ring buffer for the interrupt controller,
164  * enable the RLC, disable interrupts, enable the IH
165  * ring buffer and enable it (VI).
166  * Called at device load and reume.
167  * Returns 0 for success, errors for failure.
168  */
169 static int vega10_ih_irq_init(struct amdgpu_device *adev)
170 {
171 	struct amdgpu_ih_ring *ih;
172 	u32 ih_rb_cntl;
173 	int ret = 0;
174 	u32 tmp;
175 
176 	/* disable irqs */
177 	vega10_ih_disable_interrupts(adev);
178 
179 	adev->nbio_funcs->ih_control(adev);
180 
181 	ih = &adev->irq.ih;
182 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
183 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
184 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
185 
186 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
187 	ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
188 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
189 				   !!adev->irq.msi_enabled);
190 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
191 
192 	/* set the writeback address whether it's enabled or not */
193 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
194 		     lower_32_bits(ih->wptr_addr));
195 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
196 		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
197 
198 	/* set rptr, wptr to 0 */
199 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
200 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
201 
202 	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
203 		     vega10_ih_doorbell_rptr(ih));
204 
205 	ih = &adev->irq.ih1;
206 	if (ih->ring_size) {
207 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
208 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
209 			     (ih->gpu_addr >> 40) & 0xff);
210 
211 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
212 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
213 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
214 					   WPTR_OVERFLOW_ENABLE, 0);
215 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
216 					   RB_FULL_DRAIN_ENABLE, 1);
217 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
218 
219 		/* set rptr, wptr to 0 */
220 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
221 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
222 
223 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
224 			     vega10_ih_doorbell_rptr(ih));
225 	}
226 
227 	ih = &adev->irq.ih2;
228 	if (ih->ring_size) {
229 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
230 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
231 			     (ih->gpu_addr >> 40) & 0xff);
232 
233 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
234 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
235 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
236 
237 		/* set rptr, wptr to 0 */
238 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
239 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
240 
241 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
242 			     vega10_ih_doorbell_rptr(ih));
243 	}
244 
245 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
246 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
247 			    CLIENT18_IS_STORM_CLIENT, 1);
248 	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
249 
250 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
251 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
252 	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
253 
254 	pci_set_master(adev->pdev);
255 
256 	/* enable interrupts */
257 	vega10_ih_enable_interrupts(adev);
258 
259 	return ret;
260 }
261 
262 /**
263  * vega10_ih_irq_disable - disable interrupts
264  *
265  * @adev: amdgpu_device pointer
266  *
267  * Disable interrupts on the hw (VEGA10).
268  */
269 static void vega10_ih_irq_disable(struct amdgpu_device *adev)
270 {
271 	vega10_ih_disable_interrupts(adev);
272 
273 	/* Wait and acknowledge irq */
274 	mdelay(1);
275 }
276 
277 /**
278  * vega10_ih_get_wptr - get the IH ring buffer wptr
279  *
280  * @adev: amdgpu_device pointer
281  *
282  * Get the IH ring buffer wptr from either the register
283  * or the writeback memory buffer (VEGA10).  Also check for
284  * ring buffer overflow and deal with it.
285  * Returns the value of the wptr.
286  */
287 static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
288 			      struct amdgpu_ih_ring *ih)
289 {
290 	u32 wptr, reg, tmp;
291 
292 	wptr = le32_to_cpu(*ih->wptr_cpu);
293 
294 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
295 		goto out;
296 
297 	/* Double check that the overflow wasn't already cleared. */
298 
299 	if (ih == &adev->irq.ih)
300 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
301 	else if (ih == &adev->irq.ih1)
302 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
303 	else if (ih == &adev->irq.ih2)
304 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
305 	else
306 		BUG();
307 
308 	wptr = RREG32_NO_KIQ(reg);
309 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
310 		goto out;
311 
312 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
313 
314 	/* When a ring buffer overflow happen start parsing interrupt
315 	 * from the last not overwritten vector (wptr + 32). Hopefully
316 	 * this should allow us to catchup.
317 	 */
318 	tmp = (wptr + 32) & ih->ptr_mask;
319 	dev_warn(adev->dev, "IH ring buffer overflow "
320 		 "(0x%08X, 0x%08X, 0x%08X)\n",
321 		 wptr, ih->rptr, tmp);
322 	ih->rptr = tmp;
323 
324 	if (ih == &adev->irq.ih)
325 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
326 	else if (ih == &adev->irq.ih1)
327 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
328 	else if (ih == &adev->irq.ih2)
329 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
330 	else
331 		BUG();
332 
333 	tmp = RREG32_NO_KIQ(reg);
334 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
335 	WREG32_NO_KIQ(reg, tmp);
336 
337 out:
338 	return (wptr & ih->ptr_mask);
339 }
340 
341 /**
342  * vega10_ih_decode_iv - decode an interrupt vector
343  *
344  * @adev: amdgpu_device pointer
345  *
346  * Decodes the interrupt vector at the current rptr
347  * position and also advance the position.
348  */
349 static void vega10_ih_decode_iv(struct amdgpu_device *adev,
350 				struct amdgpu_ih_ring *ih,
351 				struct amdgpu_iv_entry *entry)
352 {
353 	/* wptr/rptr are in bytes! */
354 	u32 ring_index = ih->rptr >> 2;
355 	uint32_t dw[8];
356 
357 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
358 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
359 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
360 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
361 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
362 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
363 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
364 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
365 
366 	entry->client_id = dw[0] & 0xff;
367 	entry->src_id = (dw[0] >> 8) & 0xff;
368 	entry->ring_id = (dw[0] >> 16) & 0xff;
369 	entry->vmid = (dw[0] >> 24) & 0xf;
370 	entry->vmid_src = (dw[0] >> 31);
371 	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
372 	entry->timestamp_src = dw[2] >> 31;
373 	entry->pasid = dw[3] & 0xffff;
374 	entry->pasid_src = dw[3] >> 31;
375 	entry->src_data[0] = dw[4];
376 	entry->src_data[1] = dw[5];
377 	entry->src_data[2] = dw[6];
378 	entry->src_data[3] = dw[7];
379 
380 	/* wptr/rptr are in bytes! */
381 	ih->rptr += 32;
382 }
383 
384 /**
385  * vega10_ih_irq_rearm - rearm IRQ if lost
386  *
387  * @adev: amdgpu_device pointer
388  *
389  */
390 static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
391 			       struct amdgpu_ih_ring *ih)
392 {
393 	uint32_t reg_rptr = 0;
394 	uint32_t v = 0;
395 	uint32_t i = 0;
396 
397 	if (ih == &adev->irq.ih)
398 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
399 	else if (ih == &adev->irq.ih1)
400 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
401 	else if (ih == &adev->irq.ih2)
402 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
403 	else
404 		return;
405 
406 	/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
407 	for (i = 0; i < MAX_REARM_RETRY; i++) {
408 		v = RREG32_NO_KIQ(reg_rptr);
409 		if ((v < ih->ring_size) && (v != ih->rptr))
410 			WDOORBELL32(ih->doorbell_index, ih->rptr);
411 		else
412 			break;
413 	}
414 }
415 
416 /**
417  * vega10_ih_set_rptr - set the IH ring buffer rptr
418  *
419  * @adev: amdgpu_device pointer
420  *
421  * Set the IH ring buffer rptr.
422  */
423 static void vega10_ih_set_rptr(struct amdgpu_device *adev,
424 			       struct amdgpu_ih_ring *ih)
425 {
426 	if (ih->use_doorbell) {
427 		/* XXX check if swapping is necessary on BE */
428 		*ih->rptr_cpu = ih->rptr;
429 		WDOORBELL32(ih->doorbell_index, ih->rptr);
430 
431 		if (amdgpu_sriov_vf(adev))
432 			vega10_ih_irq_rearm(adev, ih);
433 	} else if (ih == &adev->irq.ih) {
434 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
435 	} else if (ih == &adev->irq.ih1) {
436 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
437 	} else if (ih == &adev->irq.ih2) {
438 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
439 	}
440 }
441 
442 /**
443  * vega10_ih_self_irq - dispatch work for ring 1 and 2
444  *
445  * @adev: amdgpu_device pointer
446  * @source: irq source
447  * @entry: IV with WPTR update
448  *
449  * Update the WPTR from the IV and schedule work to handle the entries.
450  */
451 static int vega10_ih_self_irq(struct amdgpu_device *adev,
452 			      struct amdgpu_irq_src *source,
453 			      struct amdgpu_iv_entry *entry)
454 {
455 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
456 
457 	switch (entry->ring_id) {
458 	case 1:
459 		*adev->irq.ih1.wptr_cpu = wptr;
460 		schedule_work(&adev->irq.ih1_work);
461 		break;
462 	case 2:
463 		*adev->irq.ih2.wptr_cpu = wptr;
464 		schedule_work(&adev->irq.ih2_work);
465 		break;
466 	default: break;
467 	}
468 	return 0;
469 }
470 
471 static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
472 	.process = vega10_ih_self_irq,
473 };
474 
475 static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
476 {
477 	adev->irq.self_irq.num_types = 0;
478 	adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
479 }
480 
481 static int vega10_ih_early_init(void *handle)
482 {
483 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
484 
485 	vega10_ih_set_interrupt_funcs(adev);
486 	vega10_ih_set_self_irq_funcs(adev);
487 	return 0;
488 }
489 
490 static int vega10_ih_sw_init(void *handle)
491 {
492 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493 	int r;
494 
495 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
496 			      &adev->irq.self_irq);
497 	if (r)
498 		return r;
499 
500 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
501 	if (r)
502 		return r;
503 
504 	adev->irq.ih.use_doorbell = true;
505 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
506 
507 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
508 	if (r)
509 		return r;
510 
511 	adev->irq.ih1.use_doorbell = true;
512 	adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
513 
514 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
515 	if (r)
516 		return r;
517 
518 	adev->irq.ih2.use_doorbell = true;
519 	adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
520 
521 	r = amdgpu_irq_init(adev);
522 
523 	return r;
524 }
525 
526 static int vega10_ih_sw_fini(void *handle)
527 {
528 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 
530 	amdgpu_irq_fini(adev);
531 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
532 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
533 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
534 
535 	return 0;
536 }
537 
538 static int vega10_ih_hw_init(void *handle)
539 {
540 	int r;
541 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
542 
543 	r = vega10_ih_irq_init(adev);
544 	if (r)
545 		return r;
546 
547 	return 0;
548 }
549 
550 static int vega10_ih_hw_fini(void *handle)
551 {
552 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553 
554 	vega10_ih_irq_disable(adev);
555 
556 	return 0;
557 }
558 
559 static int vega10_ih_suspend(void *handle)
560 {
561 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
562 
563 	return vega10_ih_hw_fini(adev);
564 }
565 
566 static int vega10_ih_resume(void *handle)
567 {
568 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
569 
570 	return vega10_ih_hw_init(adev);
571 }
572 
573 static bool vega10_ih_is_idle(void *handle)
574 {
575 	/* todo */
576 	return true;
577 }
578 
579 static int vega10_ih_wait_for_idle(void *handle)
580 {
581 	/* todo */
582 	return -ETIMEDOUT;
583 }
584 
585 static int vega10_ih_soft_reset(void *handle)
586 {
587 	/* todo */
588 
589 	return 0;
590 }
591 
592 static int vega10_ih_set_clockgating_state(void *handle,
593 					  enum amd_clockgating_state state)
594 {
595 	return 0;
596 }
597 
598 static int vega10_ih_set_powergating_state(void *handle,
599 					  enum amd_powergating_state state)
600 {
601 	return 0;
602 }
603 
604 const struct amd_ip_funcs vega10_ih_ip_funcs = {
605 	.name = "vega10_ih",
606 	.early_init = vega10_ih_early_init,
607 	.late_init = NULL,
608 	.sw_init = vega10_ih_sw_init,
609 	.sw_fini = vega10_ih_sw_fini,
610 	.hw_init = vega10_ih_hw_init,
611 	.hw_fini = vega10_ih_hw_fini,
612 	.suspend = vega10_ih_suspend,
613 	.resume = vega10_ih_resume,
614 	.is_idle = vega10_ih_is_idle,
615 	.wait_for_idle = vega10_ih_wait_for_idle,
616 	.soft_reset = vega10_ih_soft_reset,
617 	.set_clockgating_state = vega10_ih_set_clockgating_state,
618 	.set_powergating_state = vega10_ih_set_powergating_state,
619 };
620 
621 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
622 	.get_wptr = vega10_ih_get_wptr,
623 	.decode_iv = vega10_ih_decode_iv,
624 	.set_rptr = vega10_ih_set_rptr
625 };
626 
627 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
628 {
629 	adev->irq.ih_funcs = &vega10_ih_funcs;
630 }
631 
632 const struct amdgpu_ip_block_version vega10_ih_ip_block =
633 {
634 	.type = AMD_IP_BLOCK_TYPE_IH,
635 	.major = 4,
636 	.minor = 0,
637 	.rev = 0,
638 	.funcs = &vega10_ih_ip_funcs,
639 };
640