1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <drm/drmP.h>
24 #include "amdgpu.h"
25 #include "amdgpu_ih.h"
26 #include "soc15.h"
27 
28 #include "oss/osssys_4_0_offset.h"
29 #include "oss/osssys_4_0_sh_mask.h"
30 
31 #include "soc15_common.h"
32 #include "vega10_ih.h"
33 
34 
35 
36 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
37 
38 /**
39  * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
40  *
41  * @adev: amdgpu_device pointer
42  *
43  * Enable the interrupt ring buffer (VEGA10).
44  */
45 static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
46 {
47 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
48 
49 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
50 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
51 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
52 	adev->irq.ih.enabled = true;
53 
54 	if (adev->irq.ih1.ring_size) {
55 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
56 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
57 					   RB_ENABLE, 1);
58 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
59 		adev->irq.ih1.enabled = true;
60 	}
61 
62 	if (adev->irq.ih2.ring_size) {
63 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
64 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
65 					   RB_ENABLE, 1);
66 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
67 		adev->irq.ih2.enabled = true;
68 	}
69 }
70 
71 /**
72  * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
73  *
74  * @adev: amdgpu_device pointer
75  *
76  * Disable the interrupt ring buffer (VEGA10).
77  */
78 static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
79 {
80 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
81 
82 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
83 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
84 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
85 	/* set rptr, wptr to 0 */
86 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
87 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
88 	adev->irq.ih.enabled = false;
89 	adev->irq.ih.rptr = 0;
90 
91 	if (adev->irq.ih1.ring_size) {
92 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
93 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
94 					   RB_ENABLE, 0);
95 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
96 		/* set rptr, wptr to 0 */
97 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
98 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
99 		adev->irq.ih1.enabled = false;
100 		adev->irq.ih1.rptr = 0;
101 	}
102 
103 	if (adev->irq.ih2.ring_size) {
104 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
105 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
106 					   RB_ENABLE, 0);
107 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
108 		/* set rptr, wptr to 0 */
109 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
110 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
111 		adev->irq.ih2.enabled = false;
112 		adev->irq.ih2.rptr = 0;
113 	}
114 }
115 
116 static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
117 {
118 	int rb_bufsz = order_base_2(ih->ring_size / 4);
119 
120 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
121 				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
122 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
123 				   WPTR_OVERFLOW_CLEAR, 1);
124 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
125 				   WPTR_OVERFLOW_ENABLE, 1);
126 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
127 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
128 	 * value is written to memory
129 	 */
130 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
131 				   WPTR_WRITEBACK_ENABLE, 1);
132 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
133 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
134 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
135 
136 	return ih_rb_cntl;
137 }
138 
139 /**
140  * vega10_ih_irq_init - init and enable the interrupt ring
141  *
142  * @adev: amdgpu_device pointer
143  *
144  * Allocate a ring buffer for the interrupt controller,
145  * enable the RLC, disable interrupts, enable the IH
146  * ring buffer and enable it (VI).
147  * Called at device load and reume.
148  * Returns 0 for success, errors for failure.
149  */
150 static int vega10_ih_irq_init(struct amdgpu_device *adev)
151 {
152 	struct amdgpu_ih_ring *ih;
153 	int ret = 0;
154 	u32 ih_rb_cntl, ih_doorbell_rtpr;
155 	u32 tmp;
156 
157 	/* disable irqs */
158 	vega10_ih_disable_interrupts(adev);
159 
160 	adev->nbio_funcs->ih_control(adev);
161 
162 	ih = &adev->irq.ih;
163 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
164 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
165 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
166 
167 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
168 	ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
169 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
170 				   !!adev->irq.msi_enabled);
171 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
172 
173 	/* set the writeback address whether it's enabled or not */
174 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
175 		     lower_32_bits(ih->wptr_addr));
176 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
177 		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
178 
179 	/* set rptr, wptr to 0 */
180 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
181 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
182 
183 	ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
184 	if (adev->irq.ih.use_doorbell) {
185 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
186 						 IH_DOORBELL_RPTR, OFFSET,
187 						 adev->irq.ih.doorbell_index);
188 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
189 						 IH_DOORBELL_RPTR,
190 						 ENABLE, 1);
191 	} else {
192 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
193 						 IH_DOORBELL_RPTR,
194 						 ENABLE, 0);
195 	}
196 	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
197 
198 	ih = &adev->irq.ih1;
199 	if (ih->ring_size) {
200 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
201 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
202 			     (ih->gpu_addr >> 40) & 0xff);
203 
204 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
205 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
206 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
207 					   WPTR_OVERFLOW_ENABLE, 0);
208 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
209 					   RB_FULL_DRAIN_ENABLE, 1);
210 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
211 
212 		/* set rptr, wptr to 0 */
213 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
214 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
215 	}
216 
217 	ih = &adev->irq.ih2;
218 	if (ih->ring_size) {
219 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
220 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
221 			     (ih->gpu_addr >> 40) & 0xff);
222 
223 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
224 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
225 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
226 
227 		/* set rptr, wptr to 0 */
228 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
229 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
230 	}
231 
232 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
233 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
234 			    CLIENT18_IS_STORM_CLIENT, 1);
235 	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
236 
237 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
238 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
239 	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
240 
241 	pci_set_master(adev->pdev);
242 
243 	/* enable interrupts */
244 	vega10_ih_enable_interrupts(adev);
245 
246 	return ret;
247 }
248 
249 /**
250  * vega10_ih_irq_disable - disable interrupts
251  *
252  * @adev: amdgpu_device pointer
253  *
254  * Disable interrupts on the hw (VEGA10).
255  */
256 static void vega10_ih_irq_disable(struct amdgpu_device *adev)
257 {
258 	vega10_ih_disable_interrupts(adev);
259 
260 	/* Wait and acknowledge irq */
261 	mdelay(1);
262 }
263 
264 /**
265  * vega10_ih_get_wptr - get the IH ring buffer wptr
266  *
267  * @adev: amdgpu_device pointer
268  *
269  * Get the IH ring buffer wptr from either the register
270  * or the writeback memory buffer (VEGA10).  Also check for
271  * ring buffer overflow and deal with it.
272  * Returns the value of the wptr.
273  */
274 static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
275 			      struct amdgpu_ih_ring *ih)
276 {
277 	u32 wptr, reg, tmp;
278 
279 	wptr = le32_to_cpu(*ih->wptr_cpu);
280 
281 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
282 		goto out;
283 
284 	/* Double check that the overflow wasn't already cleared. */
285 
286 	if (ih == &adev->irq.ih)
287 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
288 	else if (ih == &adev->irq.ih1)
289 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
290 	else if (ih == &adev->irq.ih2)
291 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
292 	else
293 		BUG();
294 
295 	wptr = RREG32_NO_KIQ(reg);
296 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
297 		goto out;
298 
299 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
300 
301 	/* When a ring buffer overflow happen start parsing interrupt
302 	 * from the last not overwritten vector (wptr + 32). Hopefully
303 	 * this should allow us to catchup.
304 	 */
305 	tmp = (wptr + 32) & ih->ptr_mask;
306 	dev_warn(adev->dev, "IH ring buffer overflow "
307 		 "(0x%08X, 0x%08X, 0x%08X)\n",
308 		 wptr, ih->rptr, tmp);
309 	ih->rptr = tmp;
310 
311 	if (ih == &adev->irq.ih)
312 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
313 	else if (ih == &adev->irq.ih1)
314 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
315 	else if (ih == &adev->irq.ih2)
316 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
317 	else
318 		BUG();
319 
320 	tmp = RREG32_NO_KIQ(reg);
321 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
322 	WREG32_NO_KIQ(reg, tmp);
323 
324 out:
325 	return (wptr & ih->ptr_mask);
326 }
327 
328 /**
329  * vega10_ih_decode_iv - decode an interrupt vector
330  *
331  * @adev: amdgpu_device pointer
332  *
333  * Decodes the interrupt vector at the current rptr
334  * position and also advance the position.
335  */
336 static void vega10_ih_decode_iv(struct amdgpu_device *adev,
337 				struct amdgpu_ih_ring *ih,
338 				struct amdgpu_iv_entry *entry)
339 {
340 	/* wptr/rptr are in bytes! */
341 	u32 ring_index = ih->rptr >> 2;
342 	uint32_t dw[8];
343 
344 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
345 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
346 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
347 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
348 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
349 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
350 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
351 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
352 
353 	entry->client_id = dw[0] & 0xff;
354 	entry->src_id = (dw[0] >> 8) & 0xff;
355 	entry->ring_id = (dw[0] >> 16) & 0xff;
356 	entry->vmid = (dw[0] >> 24) & 0xf;
357 	entry->vmid_src = (dw[0] >> 31);
358 	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
359 	entry->timestamp_src = dw[2] >> 31;
360 	entry->pasid = dw[3] & 0xffff;
361 	entry->pasid_src = dw[3] >> 31;
362 	entry->src_data[0] = dw[4];
363 	entry->src_data[1] = dw[5];
364 	entry->src_data[2] = dw[6];
365 	entry->src_data[3] = dw[7];
366 
367 	/* wptr/rptr are in bytes! */
368 	ih->rptr += 32;
369 }
370 
371 /**
372  * vega10_ih_set_rptr - set the IH ring buffer rptr
373  *
374  * @adev: amdgpu_device pointer
375  *
376  * Set the IH ring buffer rptr.
377  */
378 static void vega10_ih_set_rptr(struct amdgpu_device *adev,
379 			       struct amdgpu_ih_ring *ih)
380 {
381 	if (ih->use_doorbell) {
382 		/* XXX check if swapping is necessary on BE */
383 		*ih->rptr_cpu = ih->rptr;
384 		WDOORBELL32(ih->doorbell_index, ih->rptr);
385 	} else if (ih == &adev->irq.ih) {
386 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
387 	} else if (ih == &adev->irq.ih1) {
388 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
389 	} else if (ih == &adev->irq.ih2) {
390 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
391 	}
392 }
393 
394 /**
395  * vega10_ih_self_irq - dispatch work for ring 1 and 2
396  *
397  * @adev: amdgpu_device pointer
398  * @source: irq source
399  * @entry: IV with WPTR update
400  *
401  * Update the WPTR from the IV and schedule work to handle the entries.
402  */
403 static int vega10_ih_self_irq(struct amdgpu_device *adev,
404 			      struct amdgpu_irq_src *source,
405 			      struct amdgpu_iv_entry *entry)
406 {
407 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
408 
409 	switch (entry->ring_id) {
410 	case 1:
411 		*adev->irq.ih1.wptr_cpu = wptr;
412 		schedule_work(&adev->irq.ih1_work);
413 		break;
414 	case 2:
415 		*adev->irq.ih2.wptr_cpu = wptr;
416 		schedule_work(&adev->irq.ih2_work);
417 		break;
418 	default: break;
419 	}
420 	return 0;
421 }
422 
423 static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
424 	.process = vega10_ih_self_irq,
425 };
426 
427 static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
428 {
429 	adev->irq.self_irq.num_types = 0;
430 	adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
431 }
432 
433 static int vega10_ih_early_init(void *handle)
434 {
435 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
436 
437 	vega10_ih_set_interrupt_funcs(adev);
438 	vega10_ih_set_self_irq_funcs(adev);
439 	return 0;
440 }
441 
442 static int vega10_ih_sw_init(void *handle)
443 {
444 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
445 	int r;
446 
447 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
448 			      &adev->irq.self_irq);
449 	if (r)
450 		return r;
451 
452 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
453 	if (r)
454 		return r;
455 
456 	if (adev->asic_type == CHIP_VEGA10) {
457 		r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
458 		if (r)
459 			return r;
460 
461 		r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
462 		if (r)
463 			return r;
464 	}
465 
466 	/* TODO add doorbell for IH1 & IH2 as well */
467 	adev->irq.ih.use_doorbell = true;
468 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
469 
470 	r = amdgpu_irq_init(adev);
471 
472 	return r;
473 }
474 
475 static int vega10_ih_sw_fini(void *handle)
476 {
477 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478 
479 	amdgpu_irq_fini(adev);
480 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
481 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
482 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
483 
484 	return 0;
485 }
486 
487 static int vega10_ih_hw_init(void *handle)
488 {
489 	int r;
490 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
491 
492 	r = vega10_ih_irq_init(adev);
493 	if (r)
494 		return r;
495 
496 	return 0;
497 }
498 
499 static int vega10_ih_hw_fini(void *handle)
500 {
501 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
502 
503 	vega10_ih_irq_disable(adev);
504 
505 	return 0;
506 }
507 
508 static int vega10_ih_suspend(void *handle)
509 {
510 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
511 
512 	return vega10_ih_hw_fini(adev);
513 }
514 
515 static int vega10_ih_resume(void *handle)
516 {
517 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518 
519 	return vega10_ih_hw_init(adev);
520 }
521 
522 static bool vega10_ih_is_idle(void *handle)
523 {
524 	/* todo */
525 	return true;
526 }
527 
528 static int vega10_ih_wait_for_idle(void *handle)
529 {
530 	/* todo */
531 	return -ETIMEDOUT;
532 }
533 
534 static int vega10_ih_soft_reset(void *handle)
535 {
536 	/* todo */
537 
538 	return 0;
539 }
540 
541 static int vega10_ih_set_clockgating_state(void *handle,
542 					  enum amd_clockgating_state state)
543 {
544 	return 0;
545 }
546 
547 static int vega10_ih_set_powergating_state(void *handle,
548 					  enum amd_powergating_state state)
549 {
550 	return 0;
551 }
552 
553 const struct amd_ip_funcs vega10_ih_ip_funcs = {
554 	.name = "vega10_ih",
555 	.early_init = vega10_ih_early_init,
556 	.late_init = NULL,
557 	.sw_init = vega10_ih_sw_init,
558 	.sw_fini = vega10_ih_sw_fini,
559 	.hw_init = vega10_ih_hw_init,
560 	.hw_fini = vega10_ih_hw_fini,
561 	.suspend = vega10_ih_suspend,
562 	.resume = vega10_ih_resume,
563 	.is_idle = vega10_ih_is_idle,
564 	.wait_for_idle = vega10_ih_wait_for_idle,
565 	.soft_reset = vega10_ih_soft_reset,
566 	.set_clockgating_state = vega10_ih_set_clockgating_state,
567 	.set_powergating_state = vega10_ih_set_powergating_state,
568 };
569 
570 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
571 	.get_wptr = vega10_ih_get_wptr,
572 	.decode_iv = vega10_ih_decode_iv,
573 	.set_rptr = vega10_ih_set_rptr
574 };
575 
576 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
577 {
578 	adev->irq.ih_funcs = &vega10_ih_funcs;
579 }
580 
581 const struct amdgpu_ip_block_version vega10_ih_ip_block =
582 {
583 	.type = AMD_IP_BLOCK_TYPE_IH,
584 	.major = 4,
585 	.minor = 0,
586 	.rev = 0,
587 	.funcs = &vega10_ih_ip_funcs,
588 };
589