1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 #include "soc15.h"
29 
30 #include "oss/osssys_4_0_offset.h"
31 #include "oss/osssys_4_0_sh_mask.h"
32 
33 #include "soc15_common.h"
34 #include "vega10_ih.h"
35 
36 #define MAX_REARM_RETRY 10
37 
38 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
39 
40 /**
41  * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
42  *
43  * @adev: amdgpu_device pointer
44  *
45  * Enable the interrupt ring buffer (VEGA10).
46  */
47 static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
48 {
49 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
50 
51 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
52 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
53 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
54 	adev->irq.ih.enabled = true;
55 
56 	if (adev->irq.ih1.ring_size) {
57 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
58 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
59 					   RB_ENABLE, 1);
60 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
61 		adev->irq.ih1.enabled = true;
62 	}
63 
64 	if (adev->irq.ih2.ring_size) {
65 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
66 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
67 					   RB_ENABLE, 1);
68 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
69 		adev->irq.ih2.enabled = true;
70 	}
71 }
72 
73 /**
74  * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
75  *
76  * @adev: amdgpu_device pointer
77  *
78  * Disable the interrupt ring buffer (VEGA10).
79  */
80 static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
81 {
82 	u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
83 
84 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
85 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
86 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
87 	/* set rptr, wptr to 0 */
88 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
89 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
90 	adev->irq.ih.enabled = false;
91 	adev->irq.ih.rptr = 0;
92 
93 	if (adev->irq.ih1.ring_size) {
94 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
95 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
96 					   RB_ENABLE, 0);
97 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
98 		/* set rptr, wptr to 0 */
99 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
100 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
101 		adev->irq.ih1.enabled = false;
102 		adev->irq.ih1.rptr = 0;
103 	}
104 
105 	if (adev->irq.ih2.ring_size) {
106 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
107 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
108 					   RB_ENABLE, 0);
109 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
110 		/* set rptr, wptr to 0 */
111 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
112 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
113 		adev->irq.ih2.enabled = false;
114 		adev->irq.ih2.rptr = 0;
115 	}
116 }
117 
118 static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
119 {
120 	int rb_bufsz = order_base_2(ih->ring_size / 4);
121 
122 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
123 				   MC_SPACE, ih->use_bus_addr ? 1 : 4);
124 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
125 				   WPTR_OVERFLOW_CLEAR, 1);
126 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
127 				   WPTR_OVERFLOW_ENABLE, 1);
128 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
129 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
130 	 * value is written to memory
131 	 */
132 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
133 				   WPTR_WRITEBACK_ENABLE, 1);
134 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
135 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
136 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
137 
138 	return ih_rb_cntl;
139 }
140 
141 static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
142 {
143 	u32 ih_doorbell_rtpr = 0;
144 
145 	if (ih->use_doorbell) {
146 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
147 						 IH_DOORBELL_RPTR, OFFSET,
148 						 ih->doorbell_index);
149 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
150 						 IH_DOORBELL_RPTR,
151 						 ENABLE, 1);
152 	} else {
153 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
154 						 IH_DOORBELL_RPTR,
155 						 ENABLE, 0);
156 	}
157 	return ih_doorbell_rtpr;
158 }
159 
160 /**
161  * vega10_ih_irq_init - init and enable the interrupt ring
162  *
163  * @adev: amdgpu_device pointer
164  *
165  * Allocate a ring buffer for the interrupt controller,
166  * enable the RLC, disable interrupts, enable the IH
167  * ring buffer and enable it (VI).
168  * Called at device load and reume.
169  * Returns 0 for success, errors for failure.
170  */
171 static int vega10_ih_irq_init(struct amdgpu_device *adev)
172 {
173 	struct amdgpu_ih_ring *ih;
174 	u32 ih_rb_cntl;
175 	int ret = 0;
176 	u32 tmp;
177 
178 	/* disable irqs */
179 	vega10_ih_disable_interrupts(adev);
180 
181 	adev->nbio_funcs->ih_control(adev);
182 
183 	ih = &adev->irq.ih;
184 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
185 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
186 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
187 
188 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
189 	ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
190 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
191 				   !!adev->irq.msi_enabled);
192 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
193 
194 	/* set the writeback address whether it's enabled or not */
195 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
196 		     lower_32_bits(ih->wptr_addr));
197 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
198 		     upper_32_bits(ih->wptr_addr) & 0xFFFF);
199 
200 	/* set rptr, wptr to 0 */
201 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
202 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
203 
204 	WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
205 		     vega10_ih_doorbell_rptr(ih));
206 
207 	ih = &adev->irq.ih1;
208 	if (ih->ring_size) {
209 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
210 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
211 			     (ih->gpu_addr >> 40) & 0xff);
212 
213 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
214 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
215 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
216 					   WPTR_OVERFLOW_ENABLE, 0);
217 		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
218 					   RB_FULL_DRAIN_ENABLE, 1);
219 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
220 
221 		/* set rptr, wptr to 0 */
222 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
223 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
224 
225 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
226 			     vega10_ih_doorbell_rptr(ih));
227 	}
228 
229 	ih = &adev->irq.ih2;
230 	if (ih->ring_size) {
231 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
232 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
233 			     (ih->gpu_addr >> 40) & 0xff);
234 
235 		ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
236 		ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
237 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
238 
239 		/* set rptr, wptr to 0 */
240 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
241 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
242 
243 		WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
244 			     vega10_ih_doorbell_rptr(ih));
245 	}
246 
247 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
248 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
249 			    CLIENT18_IS_STORM_CLIENT, 1);
250 	WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
251 
252 	tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
253 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
254 	WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
255 
256 	pci_set_master(adev->pdev);
257 
258 	/* enable interrupts */
259 	vega10_ih_enable_interrupts(adev);
260 
261 	return ret;
262 }
263 
264 /**
265  * vega10_ih_irq_disable - disable interrupts
266  *
267  * @adev: amdgpu_device pointer
268  *
269  * Disable interrupts on the hw (VEGA10).
270  */
271 static void vega10_ih_irq_disable(struct amdgpu_device *adev)
272 {
273 	vega10_ih_disable_interrupts(adev);
274 
275 	/* Wait and acknowledge irq */
276 	mdelay(1);
277 }
278 
279 /**
280  * vega10_ih_get_wptr - get the IH ring buffer wptr
281  *
282  * @adev: amdgpu_device pointer
283  *
284  * Get the IH ring buffer wptr from either the register
285  * or the writeback memory buffer (VEGA10).  Also check for
286  * ring buffer overflow and deal with it.
287  * Returns the value of the wptr.
288  */
289 static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
290 			      struct amdgpu_ih_ring *ih)
291 {
292 	u32 wptr, reg, tmp;
293 
294 	wptr = le32_to_cpu(*ih->wptr_cpu);
295 
296 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
297 		goto out;
298 
299 	/* Double check that the overflow wasn't already cleared. */
300 
301 	if (ih == &adev->irq.ih)
302 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
303 	else if (ih == &adev->irq.ih1)
304 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
305 	else if (ih == &adev->irq.ih2)
306 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
307 	else
308 		BUG();
309 
310 	wptr = RREG32_NO_KIQ(reg);
311 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
312 		goto out;
313 
314 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
315 
316 	/* When a ring buffer overflow happen start parsing interrupt
317 	 * from the last not overwritten vector (wptr + 32). Hopefully
318 	 * this should allow us to catchup.
319 	 */
320 	tmp = (wptr + 32) & ih->ptr_mask;
321 	dev_warn(adev->dev, "IH ring buffer overflow "
322 		 "(0x%08X, 0x%08X, 0x%08X)\n",
323 		 wptr, ih->rptr, tmp);
324 	ih->rptr = tmp;
325 
326 	if (ih == &adev->irq.ih)
327 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
328 	else if (ih == &adev->irq.ih1)
329 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
330 	else if (ih == &adev->irq.ih2)
331 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
332 	else
333 		BUG();
334 
335 	tmp = RREG32_NO_KIQ(reg);
336 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
337 	WREG32_NO_KIQ(reg, tmp);
338 
339 out:
340 	return (wptr & ih->ptr_mask);
341 }
342 
343 /**
344  * vega10_ih_decode_iv - decode an interrupt vector
345  *
346  * @adev: amdgpu_device pointer
347  *
348  * Decodes the interrupt vector at the current rptr
349  * position and also advance the position.
350  */
351 static void vega10_ih_decode_iv(struct amdgpu_device *adev,
352 				struct amdgpu_ih_ring *ih,
353 				struct amdgpu_iv_entry *entry)
354 {
355 	/* wptr/rptr are in bytes! */
356 	u32 ring_index = ih->rptr >> 2;
357 	uint32_t dw[8];
358 
359 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
360 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
361 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
362 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
363 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
364 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
365 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
366 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
367 
368 	entry->client_id = dw[0] & 0xff;
369 	entry->src_id = (dw[0] >> 8) & 0xff;
370 	entry->ring_id = (dw[0] >> 16) & 0xff;
371 	entry->vmid = (dw[0] >> 24) & 0xf;
372 	entry->vmid_src = (dw[0] >> 31);
373 	entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
374 	entry->timestamp_src = dw[2] >> 31;
375 	entry->pasid = dw[3] & 0xffff;
376 	entry->pasid_src = dw[3] >> 31;
377 	entry->src_data[0] = dw[4];
378 	entry->src_data[1] = dw[5];
379 	entry->src_data[2] = dw[6];
380 	entry->src_data[3] = dw[7];
381 
382 	/* wptr/rptr are in bytes! */
383 	ih->rptr += 32;
384 }
385 
386 /**
387  * vega10_ih_irq_rearm - rearm IRQ if lost
388  *
389  * @adev: amdgpu_device pointer
390  *
391  */
392 static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
393 			       struct amdgpu_ih_ring *ih)
394 {
395 	uint32_t reg_rptr = 0;
396 	uint32_t v = 0;
397 	uint32_t i = 0;
398 
399 	if (ih == &adev->irq.ih)
400 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
401 	else if (ih == &adev->irq.ih1)
402 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
403 	else if (ih == &adev->irq.ih2)
404 		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
405 	else
406 		return;
407 
408 	/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
409 	for (i = 0; i < MAX_REARM_RETRY; i++) {
410 		v = RREG32_NO_KIQ(reg_rptr);
411 		if ((v < ih->ring_size) && (v != ih->rptr))
412 			WDOORBELL32(ih->doorbell_index, ih->rptr);
413 		else
414 			break;
415 	}
416 }
417 
418 /**
419  * vega10_ih_set_rptr - set the IH ring buffer rptr
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * Set the IH ring buffer rptr.
424  */
425 static void vega10_ih_set_rptr(struct amdgpu_device *adev,
426 			       struct amdgpu_ih_ring *ih)
427 {
428 	if (ih->use_doorbell) {
429 		/* XXX check if swapping is necessary on BE */
430 		*ih->rptr_cpu = ih->rptr;
431 		WDOORBELL32(ih->doorbell_index, ih->rptr);
432 
433 		if (amdgpu_sriov_vf(adev))
434 			vega10_ih_irq_rearm(adev, ih);
435 	} else if (ih == &adev->irq.ih) {
436 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
437 	} else if (ih == &adev->irq.ih1) {
438 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
439 	} else if (ih == &adev->irq.ih2) {
440 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
441 	}
442 }
443 
444 /**
445  * vega10_ih_self_irq - dispatch work for ring 1 and 2
446  *
447  * @adev: amdgpu_device pointer
448  * @source: irq source
449  * @entry: IV with WPTR update
450  *
451  * Update the WPTR from the IV and schedule work to handle the entries.
452  */
453 static int vega10_ih_self_irq(struct amdgpu_device *adev,
454 			      struct amdgpu_irq_src *source,
455 			      struct amdgpu_iv_entry *entry)
456 {
457 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
458 
459 	switch (entry->ring_id) {
460 	case 1:
461 		*adev->irq.ih1.wptr_cpu = wptr;
462 		schedule_work(&adev->irq.ih1_work);
463 		break;
464 	case 2:
465 		*adev->irq.ih2.wptr_cpu = wptr;
466 		schedule_work(&adev->irq.ih2_work);
467 		break;
468 	default: break;
469 	}
470 	return 0;
471 }
472 
473 static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
474 	.process = vega10_ih_self_irq,
475 };
476 
477 static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
478 {
479 	adev->irq.self_irq.num_types = 0;
480 	adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
481 }
482 
483 static int vega10_ih_early_init(void *handle)
484 {
485 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
486 
487 	vega10_ih_set_interrupt_funcs(adev);
488 	vega10_ih_set_self_irq_funcs(adev);
489 	return 0;
490 }
491 
492 static int vega10_ih_sw_init(void *handle)
493 {
494 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
495 	int r;
496 
497 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
498 			      &adev->irq.self_irq);
499 	if (r)
500 		return r;
501 
502 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
503 	if (r)
504 		return r;
505 
506 	adev->irq.ih.use_doorbell = true;
507 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
508 
509 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
510 	if (r)
511 		return r;
512 
513 	adev->irq.ih1.use_doorbell = true;
514 	adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
515 
516 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
517 	if (r)
518 		return r;
519 
520 	adev->irq.ih2.use_doorbell = true;
521 	adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
522 
523 	r = amdgpu_irq_init(adev);
524 
525 	return r;
526 }
527 
528 static int vega10_ih_sw_fini(void *handle)
529 {
530 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
531 
532 	amdgpu_irq_fini(adev);
533 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
534 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
535 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
536 
537 	return 0;
538 }
539 
540 static int vega10_ih_hw_init(void *handle)
541 {
542 	int r;
543 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
544 
545 	r = vega10_ih_irq_init(adev);
546 	if (r)
547 		return r;
548 
549 	return 0;
550 }
551 
552 static int vega10_ih_hw_fini(void *handle)
553 {
554 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
555 
556 	vega10_ih_irq_disable(adev);
557 
558 	return 0;
559 }
560 
561 static int vega10_ih_suspend(void *handle)
562 {
563 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
564 
565 	return vega10_ih_hw_fini(adev);
566 }
567 
568 static int vega10_ih_resume(void *handle)
569 {
570 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
571 
572 	return vega10_ih_hw_init(adev);
573 }
574 
575 static bool vega10_ih_is_idle(void *handle)
576 {
577 	/* todo */
578 	return true;
579 }
580 
581 static int vega10_ih_wait_for_idle(void *handle)
582 {
583 	/* todo */
584 	return -ETIMEDOUT;
585 }
586 
587 static int vega10_ih_soft_reset(void *handle)
588 {
589 	/* todo */
590 
591 	return 0;
592 }
593 
594 static int vega10_ih_set_clockgating_state(void *handle,
595 					  enum amd_clockgating_state state)
596 {
597 	return 0;
598 }
599 
600 static int vega10_ih_set_powergating_state(void *handle,
601 					  enum amd_powergating_state state)
602 {
603 	return 0;
604 }
605 
606 const struct amd_ip_funcs vega10_ih_ip_funcs = {
607 	.name = "vega10_ih",
608 	.early_init = vega10_ih_early_init,
609 	.late_init = NULL,
610 	.sw_init = vega10_ih_sw_init,
611 	.sw_fini = vega10_ih_sw_fini,
612 	.hw_init = vega10_ih_hw_init,
613 	.hw_fini = vega10_ih_hw_fini,
614 	.suspend = vega10_ih_suspend,
615 	.resume = vega10_ih_resume,
616 	.is_idle = vega10_ih_is_idle,
617 	.wait_for_idle = vega10_ih_wait_for_idle,
618 	.soft_reset = vega10_ih_soft_reset,
619 	.set_clockgating_state = vega10_ih_set_clockgating_state,
620 	.set_powergating_state = vega10_ih_set_powergating_state,
621 };
622 
623 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
624 	.get_wptr = vega10_ih_get_wptr,
625 	.decode_iv = vega10_ih_decode_iv,
626 	.set_rptr = vega10_ih_set_rptr
627 };
628 
629 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
630 {
631 	adev->irq.ih_funcs = &vega10_ih_funcs;
632 }
633 
634 const struct amdgpu_ip_block_version vega10_ih_ip_block =
635 {
636 	.type = AMD_IP_BLOCK_TYPE_IH,
637 	.major = 4,
638 	.minor = 0,
639 	.rev = 0,
640 	.funcs = &vega10_ih_ip_funcs,
641 };
642