1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pci.h>
28 #include <linux/pm_domain.h>
29 #include <linux/platform_device.h>
30 #include <sound/designware_i2s.h>
31 #include <sound/pcm.h>
32 
33 #include "amdgpu.h"
34 #include "atom.h"
35 #include "amdgpu_acp.h"
36 
37 #include "acp_gfx_if.h"
38 
39 #define ACP_TILE_ON_MASK                	0x03
40 #define ACP_TILE_OFF_MASK               	0x02
41 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
42 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
43 
44 #define ACP_TILE_P1_MASK                	0x3e
45 #define ACP_TILE_P2_MASK                	0x3d
46 #define ACP_TILE_DSP0_MASK              	0x3b
47 #define ACP_TILE_DSP1_MASK              	0x37
48 
49 #define ACP_TILE_DSP2_MASK              	0x2f
50 
51 #define ACP_DMA_REGS_END			0x146c0
52 #define ACP_I2S_PLAY_REGS_START			0x14840
53 #define ACP_I2S_PLAY_REGS_END			0x148b4
54 #define ACP_I2S_CAP_REGS_START			0x148b8
55 #define ACP_I2S_CAP_REGS_END			0x1496c
56 
57 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
58 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
59 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
60 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
61 #define ACP_BT_PLAY_REGS_START			0x14970
62 #define ACP_BT_PLAY_REGS_END			0x14a24
63 #define ACP_BT_COMP1_REG_OFFSET			0xac
64 #define ACP_BT_COMP2_REG_OFFSET			0xa8
65 
66 #define mmACP_PGFSM_RETAIN_REG			0x51c9
67 #define mmACP_PGFSM_CONFIG_REG			0x51ca
68 #define mmACP_PGFSM_READ_REG_0			0x51cc
69 
70 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
71 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
72 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
73 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
74 
75 #define mmACP_CONTROL				0x5131
76 #define mmACP_STATUS				0x5133
77 #define mmACP_SOFT_RESET			0x5134
78 #define ACP_CONTROL__ClkEn_MASK 		0x1
79 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
80 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
81 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
82 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
83 
84 #define ACP_TIMEOUT_LOOP			0x000000FF
85 #define ACP_DEVS				4
86 #define ACP_SRC_ID				162
87 
88 enum {
89 	ACP_TILE_P1 = 0,
90 	ACP_TILE_P2,
91 	ACP_TILE_DSP0,
92 	ACP_TILE_DSP1,
93 	ACP_TILE_DSP2,
94 };
95 
96 static int acp_sw_init(void *handle)
97 {
98 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
99 
100 	adev->acp.parent = adev->dev;
101 
102 	adev->acp.cgs_device =
103 		amdgpu_cgs_create_device(adev);
104 	if (!adev->acp.cgs_device)
105 		return -EINVAL;
106 
107 	return 0;
108 }
109 
110 static int acp_sw_fini(void *handle)
111 {
112 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
113 
114 	if (adev->acp.cgs_device)
115 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
116 
117 	return 0;
118 }
119 
120 struct acp_pm_domain {
121 	void *adev;
122 	struct generic_pm_domain gpd;
123 };
124 
125 static int acp_poweroff(struct generic_pm_domain *genpd)
126 {
127 	struct acp_pm_domain *apd;
128 	struct amdgpu_device *adev;
129 
130 	apd = container_of(genpd, struct acp_pm_domain, gpd);
131 	if (apd != NULL) {
132 		adev = apd->adev;
133 	/* call smu to POWER GATE ACP block
134 	 * smu will
135 	 * 1. turn off the acp clock
136 	 * 2. power off the acp tiles
137 	 * 3. check and enter ulv state
138 	 */
139 		if (adev->powerplay.pp_funcs &&
140 			adev->powerplay.pp_funcs->set_powergating_by_smu)
141 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
142 	}
143 	return 0;
144 }
145 
146 static int acp_poweron(struct generic_pm_domain *genpd)
147 {
148 	struct acp_pm_domain *apd;
149 	struct amdgpu_device *adev;
150 
151 	apd = container_of(genpd, struct acp_pm_domain, gpd);
152 	if (apd != NULL) {
153 		adev = apd->adev;
154 	/* call smu to UNGATE ACP block
155 	 * smu will
156 	 * 1. exit ulv
157 	 * 2. turn on acp clock
158 	 * 3. power on acp tiles
159 	 */
160 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
161 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
162 	}
163 	return 0;
164 }
165 
166 static struct device *get_mfd_cell_dev(const char *device_name, int r)
167 {
168 	char auto_dev_name[25];
169 	struct device *dev;
170 
171 	snprintf(auto_dev_name, sizeof(auto_dev_name),
172 		 "%s.%d.auto", device_name, r);
173 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
174 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
175 
176 	return dev;
177 }
178 
179 /**
180  * acp_hw_init - start and test ACP block
181  *
182  * @adev: amdgpu_device pointer
183  *
184  */
185 static int acp_hw_init(void *handle)
186 {
187 	int r, i;
188 	uint64_t acp_base;
189 	u32 val = 0;
190 	u32 count = 0;
191 	struct device *dev;
192 	struct i2s_platform_data *i2s_pdata;
193 
194 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195 
196 	const struct amdgpu_ip_block *ip_block =
197 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
198 
199 	if (!ip_block)
200 		return -EINVAL;
201 
202 	r = amd_acp_hw_init(adev->acp.cgs_device,
203 			    ip_block->version->major, ip_block->version->minor);
204 	/* -ENODEV means board uses AZ rather than ACP */
205 	if (r == -ENODEV) {
206 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
207 		return 0;
208 	} else if (r) {
209 		return r;
210 	}
211 
212 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
213 		return -EINVAL;
214 
215 	acp_base = adev->rmmio_base;
216 
217 
218 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
219 	if (adev->acp.acp_genpd == NULL)
220 		return -ENOMEM;
221 
222 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
223 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
224 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
225 
226 
227 	adev->acp.acp_genpd->adev = adev;
228 
229 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
230 
231 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
232 							GFP_KERNEL);
233 
234 	if (adev->acp.acp_cell == NULL)
235 		return -ENOMEM;
236 
237 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
238 	if (adev->acp.acp_res == NULL) {
239 		kfree(adev->acp.acp_cell);
240 		return -ENOMEM;
241 	}
242 
243 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
244 	if (i2s_pdata == NULL) {
245 		kfree(adev->acp.acp_res);
246 		kfree(adev->acp.acp_cell);
247 		return -ENOMEM;
248 	}
249 
250 	switch (adev->asic_type) {
251 	case CHIP_STONEY:
252 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
253 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
254 		break;
255 	default:
256 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
257 	}
258 	i2s_pdata[0].cap = DWC_I2S_PLAY;
259 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
260 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
261 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
262 	switch (adev->asic_type) {
263 	case CHIP_STONEY:
264 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
265 			DW_I2S_QUIRK_COMP_PARAM1 |
266 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
267 		break;
268 	default:
269 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
270 			DW_I2S_QUIRK_COMP_PARAM1;
271 	}
272 
273 	i2s_pdata[1].cap = DWC_I2S_RECORD;
274 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
275 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
276 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
277 
278 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
279 	switch (adev->asic_type) {
280 	case CHIP_STONEY:
281 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
282 		break;
283 	default:
284 		break;
285 	}
286 
287 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
288 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
289 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
290 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
291 
292 	adev->acp.acp_res[0].name = "acp2x_dma";
293 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
294 	adev->acp.acp_res[0].start = acp_base;
295 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
296 
297 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
298 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
299 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
300 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
301 
302 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
303 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
304 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
305 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
306 
307 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
308 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
309 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
310 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
311 
312 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
313 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
314 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
315 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
316 
317 	adev->acp.acp_cell[0].name = "acp_audio_dma";
318 	adev->acp.acp_cell[0].num_resources = 5;
319 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
320 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
321 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
322 
323 	adev->acp.acp_cell[1].name = "designware-i2s";
324 	adev->acp.acp_cell[1].num_resources = 1;
325 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
326 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
327 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
328 
329 	adev->acp.acp_cell[2].name = "designware-i2s";
330 	adev->acp.acp_cell[2].num_resources = 1;
331 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
332 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
333 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
334 
335 	adev->acp.acp_cell[3].name = "designware-i2s";
336 	adev->acp.acp_cell[3].num_resources = 1;
337 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
338 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
339 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
340 
341 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
342 								ACP_DEVS);
343 	if (r)
344 		return r;
345 
346 	for (i = 0; i < ACP_DEVS ; i++) {
347 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
348 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
349 		if (r) {
350 			dev_err(dev, "Failed to add dev to genpd\n");
351 			return r;
352 		}
353 	}
354 
355 
356 	/* Assert Soft reset of ACP */
357 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
358 
359 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
360 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
361 
362 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
363 	while (true) {
364 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
365 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
366 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
367 			break;
368 		if (--count == 0) {
369 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
370 			return -ETIMEDOUT;
371 		}
372 		udelay(100);
373 	}
374 	/* Enable clock to ACP and wait until the clock is enabled */
375 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
376 	val = val | ACP_CONTROL__ClkEn_MASK;
377 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
378 
379 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
380 
381 	while (true) {
382 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
383 		if (val & (u32) 0x1)
384 			break;
385 		if (--count == 0) {
386 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
387 			return -ETIMEDOUT;
388 		}
389 		udelay(100);
390 	}
391 	/* Deassert the SOFT RESET flags */
392 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
393 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
394 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
395 	return 0;
396 }
397 
398 /**
399  * acp_hw_fini - stop the hardware block
400  *
401  * @adev: amdgpu_device pointer
402  *
403  */
404 static int acp_hw_fini(void *handle)
405 {
406 	int i, ret;
407 	u32 val = 0;
408 	u32 count = 0;
409 	struct device *dev;
410 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
411 
412 	/* return early if no ACP */
413 	if (!adev->acp.acp_genpd) {
414 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
415 		return 0;
416 	}
417 
418 	/* Assert Soft reset of ACP */
419 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
420 
421 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
422 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
423 
424 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
425 	while (true) {
426 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
427 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
428 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
429 			break;
430 		if (--count == 0) {
431 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
432 			return -ETIMEDOUT;
433 		}
434 		udelay(100);
435 	}
436 	/* Disable ACP clock */
437 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
438 	val &= ~ACP_CONTROL__ClkEn_MASK;
439 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
440 
441 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
442 
443 	while (true) {
444 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
445 		if (val & (u32) 0x1)
446 			break;
447 		if (--count == 0) {
448 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
449 			return -ETIMEDOUT;
450 		}
451 		udelay(100);
452 	}
453 
454 	for (i = 0; i < ACP_DEVS ; i++) {
455 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
456 		ret = pm_genpd_remove_device(dev);
457 		/* If removal fails, dont giveup and try rest */
458 		if (ret)
459 			dev_err(dev, "remove dev from genpd failed\n");
460 	}
461 
462 	mfd_remove_devices(adev->acp.parent);
463 	kfree(adev->acp.acp_res);
464 	kfree(adev->acp.acp_genpd);
465 	kfree(adev->acp.acp_cell);
466 
467 	return 0;
468 }
469 
470 static int acp_suspend(void *handle)
471 {
472 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473 
474 	/* power up on suspend */
475 	if (!adev->acp.acp_cell)
476 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
477 	return 0;
478 }
479 
480 static int acp_resume(void *handle)
481 {
482 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483 
484 	/* power down again on resume */
485 	if (!adev->acp.acp_cell)
486 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
487 	return 0;
488 }
489 
490 static int acp_early_init(void *handle)
491 {
492 	return 0;
493 }
494 
495 static bool acp_is_idle(void *handle)
496 {
497 	return true;
498 }
499 
500 static int acp_wait_for_idle(void *handle)
501 {
502 	return 0;
503 }
504 
505 static int acp_soft_reset(void *handle)
506 {
507 	return 0;
508 }
509 
510 static int acp_set_clockgating_state(void *handle,
511 				     enum amd_clockgating_state state)
512 {
513 	return 0;
514 }
515 
516 static int acp_set_powergating_state(void *handle,
517 				     enum amd_powergating_state state)
518 {
519 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
520 	bool enable = state == AMD_PG_STATE_GATE ? true : false;
521 
522 	if (adev->powerplay.pp_funcs &&
523 		adev->powerplay.pp_funcs->set_powergating_by_smu)
524 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
525 
526 	return 0;
527 }
528 
529 static const struct amd_ip_funcs acp_ip_funcs = {
530 	.name = "acp_ip",
531 	.early_init = acp_early_init,
532 	.late_init = NULL,
533 	.sw_init = acp_sw_init,
534 	.sw_fini = acp_sw_fini,
535 	.hw_init = acp_hw_init,
536 	.hw_fini = acp_hw_fini,
537 	.suspend = acp_suspend,
538 	.resume = acp_resume,
539 	.is_idle = acp_is_idle,
540 	.wait_for_idle = acp_wait_for_idle,
541 	.soft_reset = acp_soft_reset,
542 	.set_clockgating_state = acp_set_clockgating_state,
543 	.set_powergating_state = acp_set_powergating_state,
544 };
545 
546 const struct amdgpu_ip_block_version acp_ip_block =
547 {
548 	.type = AMD_IP_BLOCK_TYPE_ACP,
549 	.major = 2,
550 	.minor = 2,
551 	.rev = 0,
552 	.funcs = &acp_ip_funcs,
553 };
554