xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/cz_ih.c (revision c595db6d7c8bcf87ef42204391fa890e5950e566)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  *
4aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
5aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
6aaa36a97SAlex Deucher  * to deal in the Software without restriction, including without limitation
7aaa36a97SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8aaa36a97SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
9aaa36a97SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
10aaa36a97SAlex Deucher  *
11aaa36a97SAlex Deucher  * The above copyright notice and this permission notice shall be included in
12aaa36a97SAlex Deucher  * all copies or substantial portions of the Software.
13aaa36a97SAlex Deucher  *
14aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18aaa36a97SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19aaa36a97SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20aaa36a97SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
21aaa36a97SAlex Deucher  *
22aaa36a97SAlex Deucher  */
2347b757fbSSam Ravnborg 
2447b757fbSSam Ravnborg #include <linux/pci.h>
2547b757fbSSam Ravnborg 
26aaa36a97SAlex Deucher #include "amdgpu.h"
27aaa36a97SAlex Deucher #include "amdgpu_ih.h"
28aaa36a97SAlex Deucher #include "vid.h"
29aaa36a97SAlex Deucher 
30aaa36a97SAlex Deucher #include "oss/oss_3_0_1_d.h"
31aaa36a97SAlex Deucher #include "oss/oss_3_0_1_sh_mask.h"
32aaa36a97SAlex Deucher 
33aaa36a97SAlex Deucher #include "bif/bif_5_1_d.h"
34aaa36a97SAlex Deucher #include "bif/bif_5_1_sh_mask.h"
35aaa36a97SAlex Deucher 
36aaa36a97SAlex Deucher /*
37aaa36a97SAlex Deucher  * Interrupts
38aaa36a97SAlex Deucher  * Starting with r6xx, interrupts are handled via a ring buffer.
39aaa36a97SAlex Deucher  * Ring buffers are areas of GPU accessible memory that the GPU
40aaa36a97SAlex Deucher  * writes interrupt vectors into and the host reads vectors out of.
41aaa36a97SAlex Deucher  * There is a rptr (read pointer) that determines where the
42aaa36a97SAlex Deucher  * host is currently reading, and a wptr (write pointer)
43aaa36a97SAlex Deucher  * which determines where the GPU has written.  When the
44aaa36a97SAlex Deucher  * pointers are equal, the ring is idle.  When the GPU
45aaa36a97SAlex Deucher  * writes vectors to the ring buffer, it increments the
46aaa36a97SAlex Deucher  * wptr.  When there is an interrupt, the host then starts
47aaa36a97SAlex Deucher  * fetching commands and processing them until the pointers are
48aaa36a97SAlex Deucher  * equal again at which point it updates the rptr.
49aaa36a97SAlex Deucher  */
50aaa36a97SAlex Deucher 
51aaa36a97SAlex Deucher static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev);
52aaa36a97SAlex Deucher 
53aaa36a97SAlex Deucher /**
54aaa36a97SAlex Deucher  * cz_ih_enable_interrupts - Enable the interrupt ring buffer
55aaa36a97SAlex Deucher  *
56aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
57aaa36a97SAlex Deucher  *
58aaa36a97SAlex Deucher  * Enable the interrupt ring buffer (VI).
59aaa36a97SAlex Deucher  */
cz_ih_enable_interrupts(struct amdgpu_device * adev)60aaa36a97SAlex Deucher static void cz_ih_enable_interrupts(struct amdgpu_device *adev)
61aaa36a97SAlex Deucher {
62aaa36a97SAlex Deucher 	u32 ih_cntl = RREG32(mmIH_CNTL);
63aaa36a97SAlex Deucher 	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
64aaa36a97SAlex Deucher 
65aaa36a97SAlex Deucher 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
66aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
67aaa36a97SAlex Deucher 	WREG32(mmIH_CNTL, ih_cntl);
68aaa36a97SAlex Deucher 	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
69aaa36a97SAlex Deucher 	adev->irq.ih.enabled = true;
70aaa36a97SAlex Deucher }
71aaa36a97SAlex Deucher 
72aaa36a97SAlex Deucher /**
73aaa36a97SAlex Deucher  * cz_ih_disable_interrupts - Disable the interrupt ring buffer
74aaa36a97SAlex Deucher  *
75aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
76aaa36a97SAlex Deucher  *
77aaa36a97SAlex Deucher  * Disable the interrupt ring buffer (VI).
78aaa36a97SAlex Deucher  */
cz_ih_disable_interrupts(struct amdgpu_device * adev)79aaa36a97SAlex Deucher static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
80aaa36a97SAlex Deucher {
81aaa36a97SAlex Deucher 	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
82aaa36a97SAlex Deucher 	u32 ih_cntl = RREG32(mmIH_CNTL);
83aaa36a97SAlex Deucher 
84aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
85aaa36a97SAlex Deucher 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
86aaa36a97SAlex Deucher 	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
87aaa36a97SAlex Deucher 	WREG32(mmIH_CNTL, ih_cntl);
88aaa36a97SAlex Deucher 	/* set rptr, wptr to 0 */
89aaa36a97SAlex Deucher 	WREG32(mmIH_RB_RPTR, 0);
90aaa36a97SAlex Deucher 	WREG32(mmIH_RB_WPTR, 0);
91aaa36a97SAlex Deucher 	adev->irq.ih.enabled = false;
92aaa36a97SAlex Deucher 	adev->irq.ih.rptr = 0;
93aaa36a97SAlex Deucher }
94aaa36a97SAlex Deucher 
95aaa36a97SAlex Deucher /**
96aaa36a97SAlex Deucher  * cz_ih_irq_init - init and enable the interrupt ring
97aaa36a97SAlex Deucher  *
98aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
99aaa36a97SAlex Deucher  *
100aaa36a97SAlex Deucher  * Allocate a ring buffer for the interrupt controller,
101aaa36a97SAlex Deucher  * enable the RLC, disable interrupts, enable the IH
102aaa36a97SAlex Deucher  * ring buffer and enable it (VI).
103aaa36a97SAlex Deucher  * Called at device load and reume.
104aaa36a97SAlex Deucher  * Returns 0 for success, errors for failure.
105aaa36a97SAlex Deucher  */
cz_ih_irq_init(struct amdgpu_device * adev)106aaa36a97SAlex Deucher static int cz_ih_irq_init(struct amdgpu_device *adev)
107aaa36a97SAlex Deucher {
108d81f78b4SChristian König 	struct amdgpu_ih_ring *ih = &adev->irq.ih;
109aaa36a97SAlex Deucher 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
110d81f78b4SChristian König 	int rb_bufsz;
111aaa36a97SAlex Deucher 
112aaa36a97SAlex Deucher 	/* disable irqs */
113aaa36a97SAlex Deucher 	cz_ih_disable_interrupts(adev);
114aaa36a97SAlex Deucher 
115aaa36a97SAlex Deucher 	/* setup interrupt control */
11692e71b06SChristian König 	WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
117aaa36a97SAlex Deucher 	interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
118aaa36a97SAlex Deucher 	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
119aaa36a97SAlex Deucher 	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
120aaa36a97SAlex Deucher 	 */
121aaa36a97SAlex Deucher 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
122aaa36a97SAlex Deucher 	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
123aaa36a97SAlex Deucher 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
124aaa36a97SAlex Deucher 	WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
125aaa36a97SAlex Deucher 
126aaa36a97SAlex Deucher 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
127aaa36a97SAlex Deucher 	WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
128aaa36a97SAlex Deucher 
129aaa36a97SAlex Deucher 	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
130aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
131aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
132aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
133aaa36a97SAlex Deucher 
134aaa36a97SAlex Deucher 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
135aaa36a97SAlex Deucher 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
136aaa36a97SAlex Deucher 
137aaa36a97SAlex Deucher 	/* set the writeback address whether it's enabled or not */
138d81f78b4SChristian König 	WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
139d81f78b4SChristian König 	WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
140aaa36a97SAlex Deucher 
141aaa36a97SAlex Deucher 	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
142aaa36a97SAlex Deucher 
143aaa36a97SAlex Deucher 	/* set rptr, wptr to 0 */
144aaa36a97SAlex Deucher 	WREG32(mmIH_RB_RPTR, 0);
145aaa36a97SAlex Deucher 	WREG32(mmIH_RB_WPTR, 0);
146aaa36a97SAlex Deucher 
147aaa36a97SAlex Deucher 	/* Default settings for IH_CNTL (disabled at first) */
148aaa36a97SAlex Deucher 	ih_cntl = RREG32(mmIH_CNTL);
149aaa36a97SAlex Deucher 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
150aaa36a97SAlex Deucher 
151aaa36a97SAlex Deucher 	if (adev->irq.msi_enabled)
152aaa36a97SAlex Deucher 		ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
153aaa36a97SAlex Deucher 	WREG32(mmIH_CNTL, ih_cntl);
154aaa36a97SAlex Deucher 
155aaa36a97SAlex Deucher 	pci_set_master(adev->pdev);
156aaa36a97SAlex Deucher 
157aaa36a97SAlex Deucher 	/* enable interrupts */
158aaa36a97SAlex Deucher 	cz_ih_enable_interrupts(adev);
159aaa36a97SAlex Deucher 
1600e2b854eSMuhammad Falak R Wani 	return 0;
161aaa36a97SAlex Deucher }
162aaa36a97SAlex Deucher 
163aaa36a97SAlex Deucher /**
164aaa36a97SAlex Deucher  * cz_ih_irq_disable - disable interrupts
165aaa36a97SAlex Deucher  *
166aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
167aaa36a97SAlex Deucher  *
168aaa36a97SAlex Deucher  * Disable interrupts on the hw (VI).
169aaa36a97SAlex Deucher  */
cz_ih_irq_disable(struct amdgpu_device * adev)170aaa36a97SAlex Deucher static void cz_ih_irq_disable(struct amdgpu_device *adev)
171aaa36a97SAlex Deucher {
172aaa36a97SAlex Deucher 	cz_ih_disable_interrupts(adev);
173aaa36a97SAlex Deucher 
174aaa36a97SAlex Deucher 	/* Wait and acknowledge irq */
175aaa36a97SAlex Deucher 	mdelay(1);
176aaa36a97SAlex Deucher }
177aaa36a97SAlex Deucher 
178aaa36a97SAlex Deucher /**
179aaa36a97SAlex Deucher  * cz_ih_get_wptr - get the IH ring buffer wptr
180aaa36a97SAlex Deucher  *
181aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
182a549a9daSLee Jones  * @ih: IH ring buffer to fetch wptr
183aaa36a97SAlex Deucher  *
184aaa36a97SAlex Deucher  * Get the IH ring buffer wptr from either the register
185aaa36a97SAlex Deucher  * or the writeback memory buffer (VI).  Also check for
186aaa36a97SAlex Deucher  * ring buffer overflow and deal with it.
187aaa36a97SAlex Deucher  * Used by cz_irq_process(VI).
188aaa36a97SAlex Deucher  * Returns the value of the wptr.
189aaa36a97SAlex Deucher  */
cz_ih_get_wptr(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)1908bb9eb48SChristian König static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
1918bb9eb48SChristian König 			  struct amdgpu_ih_ring *ih)
192aaa36a97SAlex Deucher {
193aaa36a97SAlex Deucher 	u32 wptr, tmp;
194aaa36a97SAlex Deucher 
195d81f78b4SChristian König 	wptr = le32_to_cpu(*ih->wptr_cpu);
196aaa36a97SAlex Deucher 
197e4180c42SDefang Bo 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
198e4180c42SDefang Bo 		goto out;
199e4180c42SDefang Bo 
200e4180c42SDefang Bo 	/* Double check that the overflow wasn't already cleared. */
201e4180c42SDefang Bo 	wptr = RREG32(mmIH_RB_WPTR);
202e4180c42SDefang Bo 
203e4180c42SDefang Bo 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
204e4180c42SDefang Bo 		goto out;
205e4180c42SDefang Bo 
206aaa36a97SAlex Deucher 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
207e4180c42SDefang Bo 
208aaa36a97SAlex Deucher 	/* When a ring buffer overflow happen start parsing interrupt
209aaa36a97SAlex Deucher 	 * from the last not overwritten vector (wptr + 16). Hopefully
210aaa36a97SAlex Deucher 	 * this should allow us to catchup.
211aaa36a97SAlex Deucher 	 */
212aaa36a97SAlex Deucher 	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
2138bb9eb48SChristian König 		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
2148bb9eb48SChristian König 	ih->rptr = (wptr + 16) & ih->ptr_mask;
215aaa36a97SAlex Deucher 	tmp = RREG32(mmIH_RB_CNTL);
216aaa36a97SAlex Deucher 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
217aaa36a97SAlex Deucher 	WREG32(mmIH_RB_CNTL, tmp);
218e4180c42SDefang Bo 
219*89833979SFriedrich Vock 	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
220*89833979SFriedrich Vock 	 * can be detected.
221*89833979SFriedrich Vock 	 */
222*89833979SFriedrich Vock 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
223*89833979SFriedrich Vock 	WREG32(mmIH_RB_CNTL, tmp);
224e4180c42SDefang Bo 
225e4180c42SDefang Bo out:
2268bb9eb48SChristian König 	return (wptr & ih->ptr_mask);
227aaa36a97SAlex Deucher }
228aaa36a97SAlex Deucher 
229aaa36a97SAlex Deucher /**
230aaa36a97SAlex Deucher  * cz_ih_decode_iv - decode an interrupt vector
231aaa36a97SAlex Deucher  *
232aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
233a549a9daSLee Jones  * @ih: IH ring buffer to decode
234a549a9daSLee Jones  * @entry: IV entry to place decoded information into
235aaa36a97SAlex Deucher  *
236aaa36a97SAlex Deucher  * Decodes the interrupt vector at the current rptr
237aaa36a97SAlex Deucher  * position and also advance the position.
238aaa36a97SAlex Deucher  */
cz_ih_decode_iv(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih,struct amdgpu_iv_entry * entry)239aaa36a97SAlex Deucher static void cz_ih_decode_iv(struct amdgpu_device *adev,
2408bb9eb48SChristian König 			    struct amdgpu_ih_ring *ih,
241aaa36a97SAlex Deucher 			    struct amdgpu_iv_entry *entry)
242aaa36a97SAlex Deucher {
243aaa36a97SAlex Deucher 	/* wptr/rptr are in bytes! */
2448bb9eb48SChristian König 	u32 ring_index = ih->rptr >> 2;
245aaa36a97SAlex Deucher 	uint32_t dw[4];
246aaa36a97SAlex Deucher 
2478bb9eb48SChristian König 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
2488bb9eb48SChristian König 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
2498bb9eb48SChristian König 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
2508bb9eb48SChristian König 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
251aaa36a97SAlex Deucher 
2521ffdeca6SChristian König 	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
253aaa36a97SAlex Deucher 	entry->src_id = dw[0] & 0xff;
2547ccf5aa8SAlex Deucher 	entry->src_data[0] = dw[1] & 0xfffffff;
255aaa36a97SAlex Deucher 	entry->ring_id = dw[2] & 0xff;
256c4f46f22SChristian König 	entry->vmid = (dw[2] >> 8) & 0xff;
2573816e42fSChristian König 	entry->pasid = (dw[2] >> 16) & 0xffff;
258aaa36a97SAlex Deucher 
259aaa36a97SAlex Deucher 	/* wptr/rptr are in bytes! */
2608bb9eb48SChristian König 	ih->rptr += 16;
261aaa36a97SAlex Deucher }
262aaa36a97SAlex Deucher 
263aaa36a97SAlex Deucher /**
264aaa36a97SAlex Deucher  * cz_ih_set_rptr - set the IH ring buffer rptr
265aaa36a97SAlex Deucher  *
266aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
267a549a9daSLee Jones  * @ih: IH ring buffer to set rptr
268aaa36a97SAlex Deucher  *
269aaa36a97SAlex Deucher  * Set the IH ring buffer rptr.
270aaa36a97SAlex Deucher  */
cz_ih_set_rptr(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)2718bb9eb48SChristian König static void cz_ih_set_rptr(struct amdgpu_device *adev,
2728bb9eb48SChristian König 			   struct amdgpu_ih_ring *ih)
273aaa36a97SAlex Deucher {
2748bb9eb48SChristian König 	WREG32(mmIH_RB_RPTR, ih->rptr);
275aaa36a97SAlex Deucher }
276aaa36a97SAlex Deucher 
cz_ih_early_init(void * handle)2775fc3aeebSyanyang1 static int cz_ih_early_init(void *handle)
278aaa36a97SAlex Deucher {
2795fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2805f232365SAlex Deucher 	int ret;
2815f232365SAlex Deucher 
2825f232365SAlex Deucher 	ret = amdgpu_irq_add_domain(adev);
2835f232365SAlex Deucher 	if (ret)
2845f232365SAlex Deucher 		return ret;
2855fc3aeebSyanyang1 
286aaa36a97SAlex Deucher 	cz_ih_set_interrupt_funcs(adev);
2875f232365SAlex Deucher 
288aaa36a97SAlex Deucher 	return 0;
289aaa36a97SAlex Deucher }
290aaa36a97SAlex Deucher 
cz_ih_sw_init(void * handle)2915fc3aeebSyanyang1 static int cz_ih_sw_init(void *handle)
292aaa36a97SAlex Deucher {
293aaa36a97SAlex Deucher 	int r;
2945fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
295aaa36a97SAlex Deucher 
296425c3143SChristian König 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
297aaa36a97SAlex Deucher 	if (r)
298aaa36a97SAlex Deucher 		return r;
299aaa36a97SAlex Deucher 
300aaa36a97SAlex Deucher 	r = amdgpu_irq_init(adev);
301aaa36a97SAlex Deucher 
302aaa36a97SAlex Deucher 	return r;
303aaa36a97SAlex Deucher }
304aaa36a97SAlex Deucher 
cz_ih_sw_fini(void * handle)3055fc3aeebSyanyang1 static int cz_ih_sw_fini(void *handle)
306aaa36a97SAlex Deucher {
3075fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3085fc3aeebSyanyang1 
30972c8c97bSAndrey Grodzovsky 	amdgpu_irq_fini_sw(adev);
3105f232365SAlex Deucher 	amdgpu_irq_remove_domain(adev);
311aaa36a97SAlex Deucher 
312aaa36a97SAlex Deucher 	return 0;
313aaa36a97SAlex Deucher }
314aaa36a97SAlex Deucher 
cz_ih_hw_init(void * handle)3155fc3aeebSyanyang1 static int cz_ih_hw_init(void *handle)
316aaa36a97SAlex Deucher {
317aaa36a97SAlex Deucher 	int r;
3185fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
319aaa36a97SAlex Deucher 
320aaa36a97SAlex Deucher 	r = cz_ih_irq_init(adev);
321aaa36a97SAlex Deucher 	if (r)
322aaa36a97SAlex Deucher 		return r;
323aaa36a97SAlex Deucher 
324aaa36a97SAlex Deucher 	return 0;
325aaa36a97SAlex Deucher }
326aaa36a97SAlex Deucher 
cz_ih_hw_fini(void * handle)3275fc3aeebSyanyang1 static int cz_ih_hw_fini(void *handle)
328aaa36a97SAlex Deucher {
3295fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3305fc3aeebSyanyang1 
331aaa36a97SAlex Deucher 	cz_ih_irq_disable(adev);
332aaa36a97SAlex Deucher 
333aaa36a97SAlex Deucher 	return 0;
334aaa36a97SAlex Deucher }
335aaa36a97SAlex Deucher 
cz_ih_suspend(void * handle)3365fc3aeebSyanyang1 static int cz_ih_suspend(void *handle)
337aaa36a97SAlex Deucher {
3385fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3395fc3aeebSyanyang1 
340aaa36a97SAlex Deucher 	return cz_ih_hw_fini(adev);
341aaa36a97SAlex Deucher }
342aaa36a97SAlex Deucher 
cz_ih_resume(void * handle)3435fc3aeebSyanyang1 static int cz_ih_resume(void *handle)
344aaa36a97SAlex Deucher {
3455fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3465fc3aeebSyanyang1 
347aaa36a97SAlex Deucher 	return cz_ih_hw_init(adev);
348aaa36a97SAlex Deucher }
349aaa36a97SAlex Deucher 
cz_ih_is_idle(void * handle)3505fc3aeebSyanyang1 static bool cz_ih_is_idle(void *handle)
351aaa36a97SAlex Deucher {
3525fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
353aaa36a97SAlex Deucher 	u32 tmp = RREG32(mmSRBM_STATUS);
354aaa36a97SAlex Deucher 
355aaa36a97SAlex Deucher 	if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
356aaa36a97SAlex Deucher 		return false;
357aaa36a97SAlex Deucher 
358aaa36a97SAlex Deucher 	return true;
359aaa36a97SAlex Deucher }
360aaa36a97SAlex Deucher 
cz_ih_wait_for_idle(void * handle)3615fc3aeebSyanyang1 static int cz_ih_wait_for_idle(void *handle)
362aaa36a97SAlex Deucher {
363aaa36a97SAlex Deucher 	unsigned i;
364aaa36a97SAlex Deucher 	u32 tmp;
3655fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
366aaa36a97SAlex Deucher 
367aaa36a97SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
368aaa36a97SAlex Deucher 		/* read MC_STATUS */
369aaa36a97SAlex Deucher 		tmp = RREG32(mmSRBM_STATUS);
370aaa36a97SAlex Deucher 		if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
371aaa36a97SAlex Deucher 			return 0;
372aaa36a97SAlex Deucher 		udelay(1);
373aaa36a97SAlex Deucher 	}
374aaa36a97SAlex Deucher 	return -ETIMEDOUT;
375aaa36a97SAlex Deucher }
376aaa36a97SAlex Deucher 
cz_ih_soft_reset(void * handle)3775fc3aeebSyanyang1 static int cz_ih_soft_reset(void *handle)
378aaa36a97SAlex Deucher {
379aaa36a97SAlex Deucher 	u32 srbm_soft_reset = 0;
3805fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
381aaa36a97SAlex Deucher 	u32 tmp = RREG32(mmSRBM_STATUS);
382aaa36a97SAlex Deucher 
383aaa36a97SAlex Deucher 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
384aaa36a97SAlex Deucher 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
385aaa36a97SAlex Deucher 						SOFT_RESET_IH, 1);
386aaa36a97SAlex Deucher 
387aaa36a97SAlex Deucher 	if (srbm_soft_reset) {
388aaa36a97SAlex Deucher 		tmp = RREG32(mmSRBM_SOFT_RESET);
389aaa36a97SAlex Deucher 		tmp |= srbm_soft_reset;
390aaa36a97SAlex Deucher 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
391aaa36a97SAlex Deucher 		WREG32(mmSRBM_SOFT_RESET, tmp);
392aaa36a97SAlex Deucher 		tmp = RREG32(mmSRBM_SOFT_RESET);
393aaa36a97SAlex Deucher 
394aaa36a97SAlex Deucher 		udelay(50);
395aaa36a97SAlex Deucher 
396aaa36a97SAlex Deucher 		tmp &= ~srbm_soft_reset;
397aaa36a97SAlex Deucher 		WREG32(mmSRBM_SOFT_RESET, tmp);
398aaa36a97SAlex Deucher 		tmp = RREG32(mmSRBM_SOFT_RESET);
399aaa36a97SAlex Deucher 
400aaa36a97SAlex Deucher 		/* Wait a little for things to settle down */
401aaa36a97SAlex Deucher 		udelay(50);
402aaa36a97SAlex Deucher 	}
403aaa36a97SAlex Deucher 
404aaa36a97SAlex Deucher 	return 0;
405aaa36a97SAlex Deucher }
406aaa36a97SAlex Deucher 
cz_ih_set_clockgating_state(void * handle,enum amd_clockgating_state state)4075fc3aeebSyanyang1 static int cz_ih_set_clockgating_state(void *handle,
4085fc3aeebSyanyang1 					  enum amd_clockgating_state state)
409aaa36a97SAlex Deucher {
410aaa36a97SAlex Deucher 	// TODO
411aaa36a97SAlex Deucher 	return 0;
412aaa36a97SAlex Deucher }
413aaa36a97SAlex Deucher 
cz_ih_set_powergating_state(void * handle,enum amd_powergating_state state)4145fc3aeebSyanyang1 static int cz_ih_set_powergating_state(void *handle,
4155fc3aeebSyanyang1 					  enum amd_powergating_state state)
416aaa36a97SAlex Deucher {
417aaa36a97SAlex Deucher 	// TODO
418aaa36a97SAlex Deucher 	return 0;
419aaa36a97SAlex Deucher }
420aaa36a97SAlex Deucher 
421a1255107SAlex Deucher static const struct amd_ip_funcs cz_ih_ip_funcs = {
42288a907d6STom St Denis 	.name = "cz_ih",
423aaa36a97SAlex Deucher 	.early_init = cz_ih_early_init,
424aaa36a97SAlex Deucher 	.late_init = NULL,
425aaa36a97SAlex Deucher 	.sw_init = cz_ih_sw_init,
426aaa36a97SAlex Deucher 	.sw_fini = cz_ih_sw_fini,
427aaa36a97SAlex Deucher 	.hw_init = cz_ih_hw_init,
428aaa36a97SAlex Deucher 	.hw_fini = cz_ih_hw_fini,
429aaa36a97SAlex Deucher 	.suspend = cz_ih_suspend,
430aaa36a97SAlex Deucher 	.resume = cz_ih_resume,
431aaa36a97SAlex Deucher 	.is_idle = cz_ih_is_idle,
432aaa36a97SAlex Deucher 	.wait_for_idle = cz_ih_wait_for_idle,
433aaa36a97SAlex Deucher 	.soft_reset = cz_ih_soft_reset,
434aaa36a97SAlex Deucher 	.set_clockgating_state = cz_ih_set_clockgating_state,
435aaa36a97SAlex Deucher 	.set_powergating_state = cz_ih_set_powergating_state,
436aaa36a97SAlex Deucher };
437aaa36a97SAlex Deucher 
438aaa36a97SAlex Deucher static const struct amdgpu_ih_funcs cz_ih_funcs = {
439aaa36a97SAlex Deucher 	.get_wptr = cz_ih_get_wptr,
440aaa36a97SAlex Deucher 	.decode_iv = cz_ih_decode_iv,
441aaa36a97SAlex Deucher 	.set_rptr = cz_ih_set_rptr
442aaa36a97SAlex Deucher };
443aaa36a97SAlex Deucher 
cz_ih_set_interrupt_funcs(struct amdgpu_device * adev)444aaa36a97SAlex Deucher static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
445aaa36a97SAlex Deucher {
446aaa36a97SAlex Deucher 	adev->irq.ih_funcs = &cz_ih_funcs;
447aaa36a97SAlex Deucher }
448aaa36a97SAlex Deucher 
449a1255107SAlex Deucher const struct amdgpu_ip_block_version cz_ih_ip_block =
450a1255107SAlex Deucher {
451a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_IH,
452a1255107SAlex Deucher 	.major = 3,
453a1255107SAlex Deucher 	.minor = 0,
454a1255107SAlex Deucher 	.rev = 0,
455a1255107SAlex Deucher 	.funcs = &cz_ih_ip_funcs,
456a1255107SAlex Deucher };
457