1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
31 
32 #include "amdgpu.h"
33 #include "atom.h"
34 #include "amdgpu_acp.h"
35 
36 #include "acp_gfx_if.h"
37 
38 #define ACP_TILE_ON_MASK                	0x03
39 #define ACP_TILE_OFF_MASK               	0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
42 
43 #define ACP_TILE_P1_MASK                	0x3e
44 #define ACP_TILE_P2_MASK                	0x3d
45 #define ACP_TILE_DSP0_MASK              	0x3b
46 #define ACP_TILE_DSP1_MASK              	0x37
47 
48 #define ACP_TILE_DSP2_MASK              	0x2f
49 
50 #define ACP_DMA_REGS_END			0x146c0
51 #define ACP_I2S_PLAY_REGS_START			0x14840
52 #define ACP_I2S_PLAY_REGS_END			0x148b4
53 #define ACP_I2S_CAP_REGS_START			0x148b8
54 #define ACP_I2S_CAP_REGS_END			0x1496c
55 
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
60 #define ACP_BT_PLAY_REGS_START			0x14970
61 #define ACP_BT_PLAY_REGS_END			0x14a24
62 #define ACP_BT_COMP1_REG_OFFSET			0xac
63 #define ACP_BT_COMP2_REG_OFFSET			0xa8
64 
65 #define mmACP_PGFSM_RETAIN_REG			0x51c9
66 #define mmACP_PGFSM_CONFIG_REG			0x51ca
67 #define mmACP_PGFSM_READ_REG_0			0x51cc
68 
69 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
70 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
71 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
72 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
73 
74 #define mmACP_CONTROL				0x5131
75 #define mmACP_STATUS				0x5133
76 #define mmACP_SOFT_RESET			0x5134
77 #define ACP_CONTROL__ClkEn_MASK 		0x1
78 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
79 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
80 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
81 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
82 
83 #define ACP_TIMEOUT_LOOP			0x000000FF
84 #define ACP_DEVS				4
85 #define ACP_SRC_ID				162
86 
87 enum {
88 	ACP_TILE_P1 = 0,
89 	ACP_TILE_P2,
90 	ACP_TILE_DSP0,
91 	ACP_TILE_DSP1,
92 	ACP_TILE_DSP2,
93 };
94 
95 static int acp_sw_init(void *handle)
96 {
97 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 
99 	adev->acp.parent = adev->dev;
100 
101 	adev->acp.cgs_device =
102 		amdgpu_cgs_create_device(adev);
103 	if (!adev->acp.cgs_device)
104 		return -EINVAL;
105 
106 	return 0;
107 }
108 
109 static int acp_sw_fini(void *handle)
110 {
111 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
112 
113 	if (adev->acp.cgs_device)
114 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
115 
116 	return 0;
117 }
118 
119 /* power off a tile/block within ACP */
120 static int acp_suspend_tile(void *cgs_dev, int tile)
121 {
122 	u32 val = 0;
123 	u32 count = 0;
124 
125 	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
126 		pr_err("Invalid ACP tile : %d to suspend\n", tile);
127 		return -1;
128 	}
129 
130 	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
131 	val &= ACP_TILE_ON_MASK;
132 
133 	if (val == 0x0) {
134 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
135 		val = val | (1 << tile);
136 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
137 		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
138 					0x500 + tile);
139 
140 		count = ACP_TIMEOUT_LOOP;
141 		while (true) {
142 			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
143 								+ tile);
144 			val = val & ACP_TILE_ON_MASK;
145 			if (val == ACP_TILE_OFF_MASK)
146 				break;
147 			if (--count == 0) {
148 				pr_err("Timeout reading ACP PGFSM status\n");
149 				return -ETIMEDOUT;
150 			}
151 			udelay(100);
152 		}
153 
154 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
155 
156 		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
157 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
158 	}
159 	return 0;
160 }
161 
162 /* power on a tile/block within ACP */
163 static int acp_resume_tile(void *cgs_dev, int tile)
164 {
165 	u32 val = 0;
166 	u32 count = 0;
167 
168 	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
169 		pr_err("Invalid ACP tile to resume\n");
170 		return -1;
171 	}
172 
173 	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
174 	val = val & ACP_TILE_ON_MASK;
175 
176 	if (val != 0x0) {
177 		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
178 					0x600 + tile);
179 		count = ACP_TIMEOUT_LOOP;
180 		while (true) {
181 			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
182 							+ tile);
183 			val = val & ACP_TILE_ON_MASK;
184 			if (val == 0x0)
185 				break;
186 			if (--count == 0) {
187 				pr_err("Timeout reading ACP PGFSM status\n");
188 				return -ETIMEDOUT;
189 			}
190 			udelay(100);
191 		}
192 		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
193 		if (tile == ACP_TILE_P1)
194 			val = val & (ACP_TILE_P1_MASK);
195 		else if (tile == ACP_TILE_P2)
196 			val = val & (ACP_TILE_P2_MASK);
197 
198 		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
199 	}
200 	return 0;
201 }
202 
203 struct acp_pm_domain {
204 	void *cgs_dev;
205 	struct generic_pm_domain gpd;
206 };
207 
208 static int acp_poweroff(struct generic_pm_domain *genpd)
209 {
210 	int i, ret;
211 	struct acp_pm_domain *apd;
212 
213 	apd = container_of(genpd, struct acp_pm_domain, gpd);
214 	if (apd != NULL) {
215 		/* Donot return abruptly if any of power tile fails to suspend.
216 		 * Log it and continue powering off other tile
217 		 */
218 		for (i = 4; i >= 0 ; i--) {
219 			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
220 			if (ret)
221 				pr_err("ACP tile %d tile suspend failed\n", i);
222 		}
223 	}
224 	return 0;
225 }
226 
227 static int acp_poweron(struct generic_pm_domain *genpd)
228 {
229 	int i, ret;
230 	struct acp_pm_domain *apd;
231 
232 	apd = container_of(genpd, struct acp_pm_domain, gpd);
233 	if (apd != NULL) {
234 		for (i = 0; i < 2; i++) {
235 			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
236 			if (ret) {
237 				pr_err("ACP tile %d resume failed\n", i);
238 				break;
239 			}
240 		}
241 
242 		/* Disable DSPs which are not going to be used */
243 		for (i = 0; i < 3; i++) {
244 			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
245 			/* Continue suspending other DSP, even if one fails */
246 			if (ret)
247 				pr_err("ACP DSP %d suspend failed\n", i);
248 		}
249 	}
250 	return 0;
251 }
252 
253 static struct device *get_mfd_cell_dev(const char *device_name, int r)
254 {
255 	char auto_dev_name[25];
256 	struct device *dev;
257 
258 	snprintf(auto_dev_name, sizeof(auto_dev_name),
259 		 "%s.%d.auto", device_name, r);
260 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
261 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
262 
263 	return dev;
264 }
265 
266 /**
267  * acp_hw_init - start and test ACP block
268  *
269  * @adev: amdgpu_device pointer
270  *
271  */
272 static int acp_hw_init(void *handle)
273 {
274 	int r, i;
275 	uint64_t acp_base;
276 	u32 val = 0;
277 	u32 count = 0;
278 	struct device *dev;
279 	struct i2s_platform_data *i2s_pdata;
280 
281 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
282 
283 	const struct amdgpu_ip_block *ip_block =
284 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
285 
286 	if (!ip_block)
287 		return -EINVAL;
288 
289 	r = amd_acp_hw_init(adev->acp.cgs_device,
290 			    ip_block->version->major, ip_block->version->minor);
291 	/* -ENODEV means board uses AZ rather than ACP */
292 	if (r == -ENODEV) {
293 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
294 		return 0;
295 	} else if (r) {
296 		return r;
297 	}
298 
299 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
300 		return -EINVAL;
301 
302 	acp_base = adev->rmmio_base;
303 
304 	if (adev->asic_type != CHIP_STONEY) {
305 		adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
306 		if (adev->acp.acp_genpd == NULL)
307 			return -ENOMEM;
308 
309 		adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
310 		adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
311 		adev->acp.acp_genpd->gpd.power_on = acp_poweron;
312 
313 
314 		adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
315 
316 		pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
317 	}
318 
319 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
320 							GFP_KERNEL);
321 
322 	if (adev->acp.acp_cell == NULL)
323 		return -ENOMEM;
324 
325 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
326 	if (adev->acp.acp_res == NULL) {
327 		kfree(adev->acp.acp_cell);
328 		return -ENOMEM;
329 	}
330 
331 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
332 	if (i2s_pdata == NULL) {
333 		kfree(adev->acp.acp_res);
334 		kfree(adev->acp.acp_cell);
335 		return -ENOMEM;
336 	}
337 
338 	switch (adev->asic_type) {
339 	case CHIP_STONEY:
340 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
341 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
342 		break;
343 	default:
344 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
345 	}
346 	i2s_pdata[0].cap = DWC_I2S_PLAY;
347 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
348 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
349 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
350 	switch (adev->asic_type) {
351 	case CHIP_STONEY:
352 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
353 			DW_I2S_QUIRK_COMP_PARAM1 |
354 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
355 		break;
356 	default:
357 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
358 			DW_I2S_QUIRK_COMP_PARAM1;
359 	}
360 
361 	i2s_pdata[1].cap = DWC_I2S_RECORD;
362 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
363 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
364 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
365 
366 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
367 	switch (adev->asic_type) {
368 	case CHIP_STONEY:
369 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
370 		break;
371 	default:
372 		break;
373 	}
374 
375 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
376 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
377 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
378 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
379 
380 	adev->acp.acp_res[0].name = "acp2x_dma";
381 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
382 	adev->acp.acp_res[0].start = acp_base;
383 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
384 
385 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
386 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
387 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
388 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
389 
390 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
391 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
392 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
393 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
394 
395 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
396 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
397 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
398 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
399 
400 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
401 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
402 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
403 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
404 
405 	adev->acp.acp_cell[0].name = "acp_audio_dma";
406 	adev->acp.acp_cell[0].num_resources = 5;
407 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
408 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
409 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
410 
411 	adev->acp.acp_cell[1].name = "designware-i2s";
412 	adev->acp.acp_cell[1].num_resources = 1;
413 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
414 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
415 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
416 
417 	adev->acp.acp_cell[2].name = "designware-i2s";
418 	adev->acp.acp_cell[2].num_resources = 1;
419 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
420 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
421 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
422 
423 	adev->acp.acp_cell[3].name = "designware-i2s";
424 	adev->acp.acp_cell[3].num_resources = 1;
425 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
426 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
427 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
428 
429 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
430 								ACP_DEVS);
431 	if (r)
432 		return r;
433 
434 	if (adev->asic_type != CHIP_STONEY) {
435 		for (i = 0; i < ACP_DEVS ; i++) {
436 			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
437 			r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
438 			if (r) {
439 				dev_err(dev, "Failed to add dev to genpd\n");
440 				return r;
441 			}
442 		}
443 	}
444 
445 	/* Assert Soft reset of ACP */
446 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
447 
448 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
449 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
450 
451 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
452 	while (true) {
453 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
454 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
455 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
456 			break;
457 		if (--count == 0) {
458 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
459 			return -ETIMEDOUT;
460 		}
461 		udelay(100);
462 	}
463 	/* Enable clock to ACP and wait until the clock is enabled */
464 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
465 	val = val | ACP_CONTROL__ClkEn_MASK;
466 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
467 
468 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
469 
470 	while (true) {
471 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
472 		if (val & (u32) 0x1)
473 			break;
474 		if (--count == 0) {
475 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
476 			return -ETIMEDOUT;
477 		}
478 		udelay(100);
479 	}
480 	/* Deassert the SOFT RESET flags */
481 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
482 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
483 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
484 	return 0;
485 }
486 
487 /**
488  * acp_hw_fini - stop the hardware block
489  *
490  * @adev: amdgpu_device pointer
491  *
492  */
493 static int acp_hw_fini(void *handle)
494 {
495 	int i, ret;
496 	u32 val = 0;
497 	u32 count = 0;
498 	struct device *dev;
499 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
500 
501 	/* return early if no ACP */
502 	if (!adev->acp.acp_cell) {
503 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
504 		return 0;
505 	}
506 
507 	/* Assert Soft reset of ACP */
508 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
509 
510 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
511 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
512 
513 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
514 	while (true) {
515 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
516 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
517 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
518 			break;
519 		if (--count == 0) {
520 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
521 			return -ETIMEDOUT;
522 		}
523 		udelay(100);
524 	}
525 	/* Disable ACP clock */
526 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
527 	val &= ~ACP_CONTROL__ClkEn_MASK;
528 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
529 
530 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
531 
532 	while (true) {
533 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
534 		if (val & (u32) 0x1)
535 			break;
536 		if (--count == 0) {
537 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
538 			return -ETIMEDOUT;
539 		}
540 		udelay(100);
541 	}
542 
543 	if (adev->acp.acp_genpd) {
544 		for (i = 0; i < ACP_DEVS ; i++) {
545 			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
546 			ret = pm_genpd_remove_device(dev);
547 			/* If removal fails, dont giveup and try rest */
548 			if (ret)
549 				dev_err(dev, "remove dev from genpd failed\n");
550 		}
551 		kfree(adev->acp.acp_genpd);
552 	}
553 
554 	mfd_remove_devices(adev->acp.parent);
555 	kfree(adev->acp.acp_res);
556 	kfree(adev->acp.acp_cell);
557 
558 	return 0;
559 }
560 
561 static int acp_suspend(void *handle)
562 {
563 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
564 
565 	/* power up on suspend */
566 	if (!adev->acp.acp_cell)
567 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
568 	return 0;
569 }
570 
571 static int acp_resume(void *handle)
572 {
573 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
574 
575 	/* power down again on resume */
576 	if (!adev->acp.acp_cell)
577 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
578 	return 0;
579 }
580 
581 static int acp_early_init(void *handle)
582 {
583 	return 0;
584 }
585 
586 static bool acp_is_idle(void *handle)
587 {
588 	return true;
589 }
590 
591 static int acp_wait_for_idle(void *handle)
592 {
593 	return 0;
594 }
595 
596 static int acp_soft_reset(void *handle)
597 {
598 	return 0;
599 }
600 
601 static int acp_set_clockgating_state(void *handle,
602 				     enum amd_clockgating_state state)
603 {
604 	return 0;
605 }
606 
607 static int acp_set_powergating_state(void *handle,
608 				     enum amd_powergating_state state)
609 {
610 	return 0;
611 }
612 
613 static const struct amd_ip_funcs acp_ip_funcs = {
614 	.name = "acp_ip",
615 	.early_init = acp_early_init,
616 	.late_init = NULL,
617 	.sw_init = acp_sw_init,
618 	.sw_fini = acp_sw_fini,
619 	.hw_init = acp_hw_init,
620 	.hw_fini = acp_hw_fini,
621 	.suspend = acp_suspend,
622 	.resume = acp_resume,
623 	.is_idle = acp_is_idle,
624 	.wait_for_idle = acp_wait_for_idle,
625 	.soft_reset = acp_soft_reset,
626 	.set_clockgating_state = acp_set_clockgating_state,
627 	.set_powergating_state = acp_set_powergating_state,
628 };
629 
630 const struct amdgpu_ip_block_version acp_ip_block =
631 {
632 	.type = AMD_IP_BLOCK_TYPE_ACP,
633 	.major = 2,
634 	.minor = 2,
635 	.rev = 0,
636 	.funcs = &acp_ip_funcs,
637 };
638