1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
31 
32 #include "amdgpu.h"
33 #include "atom.h"
34 #include "amdgpu_acp.h"
35 
36 #include "acp_gfx_if.h"
37 
38 #define ACP_TILE_ON_MASK                	0x03
39 #define ACP_TILE_OFF_MASK               	0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
42 
43 #define ACP_TILE_P1_MASK                	0x3e
44 #define ACP_TILE_P2_MASK                	0x3d
45 #define ACP_TILE_DSP0_MASK              	0x3b
46 #define ACP_TILE_DSP1_MASK              	0x37
47 
48 #define ACP_TILE_DSP2_MASK              	0x2f
49 
50 #define ACP_DMA_REGS_END			0x146c0
51 #define ACP_I2S_PLAY_REGS_START			0x14840
52 #define ACP_I2S_PLAY_REGS_END			0x148b4
53 #define ACP_I2S_CAP_REGS_START			0x148b8
54 #define ACP_I2S_CAP_REGS_END			0x1496c
55 
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
60 
61 #define mmACP_PGFSM_RETAIN_REG			0x51c9
62 #define mmACP_PGFSM_CONFIG_REG			0x51ca
63 #define mmACP_PGFSM_READ_REG_0			0x51cc
64 
65 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
66 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
67 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
68 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
69 
70 #define mmACP_CONTROL				0x5131
71 #define mmACP_STATUS				0x5133
72 #define mmACP_SOFT_RESET			0x5134
73 #define ACP_CONTROL__ClkEn_MASK 		0x1
74 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
75 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
76 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
77 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
78 
79 #define ACP_TIMEOUT_LOOP			0x000000FF
80 #define ACP_DEVS				3
81 #define ACP_SRC_ID				162
82 
83 enum {
84 	ACP_TILE_P1 = 0,
85 	ACP_TILE_P2,
86 	ACP_TILE_DSP0,
87 	ACP_TILE_DSP1,
88 	ACP_TILE_DSP2,
89 };
90 
91 static int acp_sw_init(void *handle)
92 {
93 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94 
95 	adev->acp.parent = adev->dev;
96 
97 	adev->acp.cgs_device =
98 		amdgpu_cgs_create_device(adev);
99 	if (!adev->acp.cgs_device)
100 		return -EINVAL;
101 
102 	return 0;
103 }
104 
105 static int acp_sw_fini(void *handle)
106 {
107 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108 
109 	if (adev->acp.cgs_device)
110 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
111 
112 	return 0;
113 }
114 
115 /* power off a tile/block within ACP */
116 static int acp_suspend_tile(void *cgs_dev, int tile)
117 {
118 	u32 val = 0;
119 	u32 count = 0;
120 
121 	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
122 		pr_err("Invalid ACP tile : %d to suspend\n", tile);
123 		return -1;
124 	}
125 
126 	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
127 	val &= ACP_TILE_ON_MASK;
128 
129 	if (val == 0x0) {
130 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
131 		val = val | (1 << tile);
132 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
133 		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
134 					0x500 + tile);
135 
136 		count = ACP_TIMEOUT_LOOP;
137 		while (true) {
138 			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
139 								+ tile);
140 			val = val & ACP_TILE_ON_MASK;
141 			if (val == ACP_TILE_OFF_MASK)
142 				break;
143 			if (--count == 0) {
144 				pr_err("Timeout reading ACP PGFSM status\n");
145 				return -ETIMEDOUT;
146 			}
147 			udelay(100);
148 		}
149 
150 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
151 
152 		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
153 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
154 	}
155 	return 0;
156 }
157 
158 /* power on a tile/block within ACP */
159 static int acp_resume_tile(void *cgs_dev, int tile)
160 {
161 	u32 val = 0;
162 	u32 count = 0;
163 
164 	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
165 		pr_err("Invalid ACP tile to resume\n");
166 		return -1;
167 	}
168 
169 	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
170 	val = val & ACP_TILE_ON_MASK;
171 
172 	if (val != 0x0) {
173 		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
174 					0x600 + tile);
175 		count = ACP_TIMEOUT_LOOP;
176 		while (true) {
177 			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
178 							+ tile);
179 			val = val & ACP_TILE_ON_MASK;
180 			if (val == 0x0)
181 				break;
182 			if (--count == 0) {
183 				pr_err("Timeout reading ACP PGFSM status\n");
184 				return -ETIMEDOUT;
185 			}
186 			udelay(100);
187 		}
188 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
189 		if (tile == ACP_TILE_P1)
190 			val = val & (ACP_TILE_P1_MASK);
191 		else if (tile == ACP_TILE_P2)
192 			val = val & (ACP_TILE_P2_MASK);
193 
194 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
195 	}
196 	return 0;
197 }
198 
199 struct acp_pm_domain {
200 	void *cgs_dev;
201 	struct generic_pm_domain gpd;
202 };
203 
204 static int acp_poweroff(struct generic_pm_domain *genpd)
205 {
206 	int i, ret;
207 	struct acp_pm_domain *apd;
208 
209 	apd = container_of(genpd, struct acp_pm_domain, gpd);
210 	if (apd != NULL) {
211 		/* Donot return abruptly if any of power tile fails to suspend.
212 		 * Log it and continue powering off other tile
213 		 */
214 		for (i = 4; i >= 0 ; i--) {
215 			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
216 			if (ret)
217 				pr_err("ACP tile %d tile suspend failed\n", i);
218 		}
219 	}
220 	return 0;
221 }
222 
223 static int acp_poweron(struct generic_pm_domain *genpd)
224 {
225 	int i, ret;
226 	struct acp_pm_domain *apd;
227 
228 	apd = container_of(genpd, struct acp_pm_domain, gpd);
229 	if (apd != NULL) {
230 		for (i = 0; i < 2; i++) {
231 			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
232 			if (ret) {
233 				pr_err("ACP tile %d resume failed\n", i);
234 				break;
235 			}
236 		}
237 
238 		/* Disable DSPs which are not going to be used */
239 		for (i = 0; i < 3; i++) {
240 			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
241 			/* Continue suspending other DSP, even if one fails */
242 			if (ret)
243 				pr_err("ACP DSP %d suspend failed\n", i);
244 		}
245 	}
246 	return 0;
247 }
248 
249 static struct device *get_mfd_cell_dev(const char *device_name, int r)
250 {
251 	char auto_dev_name[25];
252 	struct device *dev;
253 
254 	snprintf(auto_dev_name, sizeof(auto_dev_name),
255 		 "%s.%d.auto", device_name, r);
256 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
257 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
258 
259 	return dev;
260 }
261 
262 /**
263  * acp_hw_init - start and test ACP block
264  *
265  * @adev: amdgpu_device pointer
266  *
267  */
268 static int acp_hw_init(void *handle)
269 {
270 	int r, i;
271 	uint64_t acp_base;
272 	u32 val = 0;
273 	u32 count = 0;
274 	struct device *dev;
275 	struct i2s_platform_data *i2s_pdata;
276 
277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
278 
279 	const struct amdgpu_ip_block *ip_block =
280 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
281 
282 	if (!ip_block)
283 		return -EINVAL;
284 
285 	r = amd_acp_hw_init(adev->acp.cgs_device,
286 			    ip_block->version->major, ip_block->version->minor);
287 	/* -ENODEV means board uses AZ rather than ACP */
288 	if (r == -ENODEV)
289 		return 0;
290 	else if (r)
291 		return r;
292 
293 	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
294 			0x5289, 0, &acp_base);
295 	if (r == -ENODEV)
296 		return 0;
297 	else if (r)
298 		return r;
299 	if (adev->asic_type != CHIP_STONEY) {
300 		adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
301 		if (adev->acp.acp_genpd == NULL)
302 			return -ENOMEM;
303 
304 		adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
305 		adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
306 		adev->acp.acp_genpd->gpd.power_on = acp_poweron;
307 
308 
309 		adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
310 
311 		pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
312 	}
313 
314 	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
315 							GFP_KERNEL);
316 
317 	if (adev->acp.acp_cell == NULL)
318 		return -ENOMEM;
319 
320 	adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
321 
322 	if (adev->acp.acp_res == NULL) {
323 		kfree(adev->acp.acp_cell);
324 		return -ENOMEM;
325 	}
326 
327 	i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
328 	if (i2s_pdata == NULL) {
329 		kfree(adev->acp.acp_res);
330 		kfree(adev->acp.acp_cell);
331 		return -ENOMEM;
332 	}
333 
334 	switch (adev->asic_type) {
335 	case CHIP_STONEY:
336 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
337 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
338 		break;
339 	default:
340 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
341 	}
342 	i2s_pdata[0].cap = DWC_I2S_PLAY;
343 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
344 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
345 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
346 	switch (adev->asic_type) {
347 	case CHIP_STONEY:
348 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
349 			DW_I2S_QUIRK_COMP_PARAM1 |
350 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
351 		break;
352 	default:
353 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
354 			DW_I2S_QUIRK_COMP_PARAM1;
355 	}
356 
357 	i2s_pdata[1].cap = DWC_I2S_RECORD;
358 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
359 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
360 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
361 
362 	adev->acp.acp_res[0].name = "acp2x_dma";
363 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
364 	adev->acp.acp_res[0].start = acp_base;
365 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
366 
367 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
368 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
369 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
370 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
371 
372 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
373 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
374 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
375 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
376 
377 	adev->acp.acp_res[3].name = "acp2x_dma_irq";
378 	adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
379 	adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
380 	adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
381 
382 	adev->acp.acp_cell[0].name = "acp_audio_dma";
383 	adev->acp.acp_cell[0].num_resources = 4;
384 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
385 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
386 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
387 
388 	adev->acp.acp_cell[1].name = "designware-i2s";
389 	adev->acp.acp_cell[1].num_resources = 1;
390 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
391 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
392 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
393 
394 	adev->acp.acp_cell[2].name = "designware-i2s";
395 	adev->acp.acp_cell[2].num_resources = 1;
396 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
397 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
398 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
399 
400 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
401 								ACP_DEVS);
402 	if (r)
403 		return r;
404 
405 	if (adev->asic_type != CHIP_STONEY) {
406 		for (i = 0; i < ACP_DEVS ; i++) {
407 			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
408 			r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
409 			if (r) {
410 				dev_err(dev, "Failed to add dev to genpd\n");
411 				return r;
412 			}
413 		}
414 	}
415 
416 	/* Assert Soft reset of ACP */
417 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
418 
419 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
420 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
421 
422 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
423 	while (true) {
424 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
425 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
426 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
427 			break;
428 		if (--count == 0) {
429 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
430 			return -ETIMEDOUT;
431 		}
432 		udelay(100);
433 	}
434 	/* Enable clock to ACP and wait until the clock is enabled */
435 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
436 	val = val | ACP_CONTROL__ClkEn_MASK;
437 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
438 
439 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
440 
441 	while (true) {
442 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
443 		if (val & (u32) 0x1)
444 			break;
445 		if (--count == 0) {
446 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
447 			return -ETIMEDOUT;
448 		}
449 		udelay(100);
450 	}
451 	/* Deassert the SOFT RESET flags */
452 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
453 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
454 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
455 
456 	return 0;
457 }
458 
459 /**
460  * acp_hw_fini - stop the hardware block
461  *
462  * @adev: amdgpu_device pointer
463  *
464  */
465 static int acp_hw_fini(void *handle)
466 {
467 	int i, ret;
468 	u32 val = 0;
469 	u32 count = 0;
470 	struct device *dev;
471 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472 
473 	/* return early if no ACP */
474 	if (!adev->acp.acp_cell)
475 		return 0;
476 
477 	/* Assert Soft reset of ACP */
478 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
479 
480 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
481 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
482 
483 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
484 	while (true) {
485 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
486 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
487 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
488 			break;
489 		if (--count == 0) {
490 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
491 			return -ETIMEDOUT;
492 		}
493 		udelay(100);
494 	}
495 	/* Disable ACP clock */
496 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
497 	val &= ~ACP_CONTROL__ClkEn_MASK;
498 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
499 
500 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
501 
502 	while (true) {
503 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
504 		if (val & (u32) 0x1)
505 			break;
506 		if (--count == 0) {
507 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
508 			return -ETIMEDOUT;
509 		}
510 		udelay(100);
511 	}
512 
513 	if (adev->acp.acp_genpd) {
514 		for (i = 0; i < ACP_DEVS ; i++) {
515 			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
516 			ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
517 			/* If removal fails, dont giveup and try rest */
518 			if (ret)
519 				dev_err(dev, "remove dev from genpd failed\n");
520 		}
521 		kfree(adev->acp.acp_genpd);
522 	}
523 
524 	mfd_remove_devices(adev->acp.parent);
525 	kfree(adev->acp.acp_res);
526 	kfree(adev->acp.acp_cell);
527 
528 	return 0;
529 }
530 
531 static int acp_suspend(void *handle)
532 {
533 	return 0;
534 }
535 
536 static int acp_resume(void *handle)
537 {
538 	return 0;
539 }
540 
541 static int acp_early_init(void *handle)
542 {
543 	return 0;
544 }
545 
546 static bool acp_is_idle(void *handle)
547 {
548 	return true;
549 }
550 
551 static int acp_wait_for_idle(void *handle)
552 {
553 	return 0;
554 }
555 
556 static int acp_soft_reset(void *handle)
557 {
558 	return 0;
559 }
560 
561 static int acp_set_clockgating_state(void *handle,
562 				     enum amd_clockgating_state state)
563 {
564 	return 0;
565 }
566 
567 static int acp_set_powergating_state(void *handle,
568 				     enum amd_powergating_state state)
569 {
570 	return 0;
571 }
572 
573 static const struct amd_ip_funcs acp_ip_funcs = {
574 	.name = "acp_ip",
575 	.early_init = acp_early_init,
576 	.late_init = NULL,
577 	.sw_init = acp_sw_init,
578 	.sw_fini = acp_sw_fini,
579 	.hw_init = acp_hw_init,
580 	.hw_fini = acp_hw_fini,
581 	.suspend = acp_suspend,
582 	.resume = acp_resume,
583 	.is_idle = acp_is_idle,
584 	.wait_for_idle = acp_wait_for_idle,
585 	.soft_reset = acp_soft_reset,
586 	.set_clockgating_state = acp_set_clockgating_state,
587 	.set_powergating_state = acp_set_powergating_state,
588 };
589 
590 const struct amdgpu_ip_block_version acp_ip_block =
591 {
592 	.type = AMD_IP_BLOCK_TYPE_ACP,
593 	.major = 2,
594 	.minor = 2,
595 	.rev = 0,
596 	.funcs = &acp_ip_funcs,
597 };
598