1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
31 
32 #include "amdgpu.h"
33 #include "atom.h"
34 #include "amdgpu_acp.h"
35 
36 #include "acp_gfx_if.h"
37 
38 #define ACP_TILE_ON_MASK                	0x03
39 #define ACP_TILE_OFF_MASK               	0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
42 
43 #define ACP_TILE_P1_MASK                	0x3e
44 #define ACP_TILE_P2_MASK                	0x3d
45 #define ACP_TILE_DSP0_MASK              	0x3b
46 #define ACP_TILE_DSP1_MASK              	0x37
47 
48 #define ACP_TILE_DSP2_MASK              	0x2f
49 
50 #define ACP_DMA_REGS_END			0x146c0
51 #define ACP_I2S_PLAY_REGS_START			0x14840
52 #define ACP_I2S_PLAY_REGS_END			0x148b4
53 #define ACP_I2S_CAP_REGS_START			0x148b8
54 #define ACP_I2S_CAP_REGS_END			0x1496c
55 
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
60 #define ACP_BT_PLAY_REGS_START			0x14970
61 #define ACP_BT_PLAY_REGS_END			0x14a24
62 #define ACP_BT_COMP1_REG_OFFSET			0xac
63 #define ACP_BT_COMP2_REG_OFFSET			0xa8
64 
65 #define mmACP_PGFSM_RETAIN_REG			0x51c9
66 #define mmACP_PGFSM_CONFIG_REG			0x51ca
67 #define mmACP_PGFSM_READ_REG_0			0x51cc
68 
69 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
70 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
71 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
72 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
73 
74 #define mmACP_CONTROL				0x5131
75 #define mmACP_STATUS				0x5133
76 #define mmACP_SOFT_RESET			0x5134
77 #define ACP_CONTROL__ClkEn_MASK 		0x1
78 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
79 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
80 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
81 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
82 
83 #define ACP_TIMEOUT_LOOP			0x000000FF
84 #define ACP_DEVS				4
85 #define ACP_SRC_ID				162
86 
87 enum {
88 	ACP_TILE_P1 = 0,
89 	ACP_TILE_P2,
90 	ACP_TILE_DSP0,
91 	ACP_TILE_DSP1,
92 	ACP_TILE_DSP2,
93 };
94 
95 static int acp_sw_init(void *handle)
96 {
97 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 
99 	adev->acp.parent = adev->dev;
100 
101 	adev->acp.cgs_device =
102 		amdgpu_cgs_create_device(adev);
103 	if (!adev->acp.cgs_device)
104 		return -EINVAL;
105 
106 	return 0;
107 }
108 
109 static int acp_sw_fini(void *handle)
110 {
111 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
112 
113 	if (adev->acp.cgs_device)
114 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
115 
116 	return 0;
117 }
118 
119 struct acp_pm_domain {
120 	void *adev;
121 	struct generic_pm_domain gpd;
122 };
123 
124 static int acp_poweroff(struct generic_pm_domain *genpd)
125 {
126 	struct acp_pm_domain *apd;
127 	struct amdgpu_device *adev;
128 
129 	apd = container_of(genpd, struct acp_pm_domain, gpd);
130 	if (apd != NULL) {
131 		adev = apd->adev;
132 	/* call smu to POWER GATE ACP block
133 	 * smu will
134 	 * 1. turn off the acp clock
135 	 * 2. power off the acp tiles
136 	 * 3. check and enter ulv state
137 	 */
138 		if (adev->powerplay.pp_funcs &&
139 			adev->powerplay.pp_funcs->set_powergating_by_smu)
140 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
141 	}
142 	return 0;
143 }
144 
145 static int acp_poweron(struct generic_pm_domain *genpd)
146 {
147 	struct acp_pm_domain *apd;
148 	struct amdgpu_device *adev;
149 
150 	apd = container_of(genpd, struct acp_pm_domain, gpd);
151 	if (apd != NULL) {
152 		adev = apd->adev;
153 	/* call smu to UNGATE ACP block
154 	 * smu will
155 	 * 1. exit ulv
156 	 * 2. turn on acp clock
157 	 * 3. power on acp tiles
158 	 */
159 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
160 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
161 	}
162 	return 0;
163 }
164 
165 static struct device *get_mfd_cell_dev(const char *device_name, int r)
166 {
167 	char auto_dev_name[25];
168 	struct device *dev;
169 
170 	snprintf(auto_dev_name, sizeof(auto_dev_name),
171 		 "%s.%d.auto", device_name, r);
172 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
173 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
174 
175 	return dev;
176 }
177 
178 /**
179  * acp_hw_init - start and test ACP block
180  *
181  * @adev: amdgpu_device pointer
182  *
183  */
184 static int acp_hw_init(void *handle)
185 {
186 	int r, i;
187 	uint64_t acp_base;
188 	u32 val = 0;
189 	u32 count = 0;
190 	struct device *dev;
191 	struct i2s_platform_data *i2s_pdata;
192 
193 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194 
195 	const struct amdgpu_ip_block *ip_block =
196 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
197 
198 	if (!ip_block)
199 		return -EINVAL;
200 
201 	r = amd_acp_hw_init(adev->acp.cgs_device,
202 			    ip_block->version->major, ip_block->version->minor);
203 	/* -ENODEV means board uses AZ rather than ACP */
204 	if (r == -ENODEV) {
205 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
206 		return 0;
207 	} else if (r) {
208 		return r;
209 	}
210 
211 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
212 		return -EINVAL;
213 
214 	acp_base = adev->rmmio_base;
215 
216 
217 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
218 	if (adev->acp.acp_genpd == NULL)
219 		return -ENOMEM;
220 
221 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
222 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
223 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
224 
225 
226 	adev->acp.acp_genpd->adev = adev;
227 
228 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
229 
230 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
231 							GFP_KERNEL);
232 
233 	if (adev->acp.acp_cell == NULL)
234 		return -ENOMEM;
235 
236 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
237 	if (adev->acp.acp_res == NULL) {
238 		kfree(adev->acp.acp_cell);
239 		return -ENOMEM;
240 	}
241 
242 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
243 	if (i2s_pdata == NULL) {
244 		kfree(adev->acp.acp_res);
245 		kfree(adev->acp.acp_cell);
246 		return -ENOMEM;
247 	}
248 
249 	switch (adev->asic_type) {
250 	case CHIP_STONEY:
251 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
252 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
253 		break;
254 	default:
255 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
256 	}
257 	i2s_pdata[0].cap = DWC_I2S_PLAY;
258 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
259 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
260 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
261 	switch (adev->asic_type) {
262 	case CHIP_STONEY:
263 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
264 			DW_I2S_QUIRK_COMP_PARAM1 |
265 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
266 		break;
267 	default:
268 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
269 			DW_I2S_QUIRK_COMP_PARAM1;
270 	}
271 
272 	i2s_pdata[1].cap = DWC_I2S_RECORD;
273 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
274 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
275 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
276 
277 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
278 	switch (adev->asic_type) {
279 	case CHIP_STONEY:
280 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
281 		break;
282 	default:
283 		break;
284 	}
285 
286 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
287 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
288 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
289 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
290 
291 	adev->acp.acp_res[0].name = "acp2x_dma";
292 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
293 	adev->acp.acp_res[0].start = acp_base;
294 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
295 
296 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
297 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
298 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
299 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
300 
301 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
302 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
303 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
304 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
305 
306 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
307 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
308 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
309 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
310 
311 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
312 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
313 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
314 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
315 
316 	adev->acp.acp_cell[0].name = "acp_audio_dma";
317 	adev->acp.acp_cell[0].num_resources = 5;
318 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
319 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
320 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
321 
322 	adev->acp.acp_cell[1].name = "designware-i2s";
323 	adev->acp.acp_cell[1].num_resources = 1;
324 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
325 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
326 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
327 
328 	adev->acp.acp_cell[2].name = "designware-i2s";
329 	adev->acp.acp_cell[2].num_resources = 1;
330 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
331 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
332 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
333 
334 	adev->acp.acp_cell[3].name = "designware-i2s";
335 	adev->acp.acp_cell[3].num_resources = 1;
336 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
337 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
338 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
339 
340 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
341 								ACP_DEVS);
342 	if (r)
343 		return r;
344 
345 	for (i = 0; i < ACP_DEVS ; i++) {
346 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
347 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
348 		if (r) {
349 			dev_err(dev, "Failed to add dev to genpd\n");
350 			return r;
351 		}
352 	}
353 
354 
355 	/* Assert Soft reset of ACP */
356 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
357 
358 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
359 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
360 
361 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
362 	while (true) {
363 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
364 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
365 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
366 			break;
367 		if (--count == 0) {
368 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
369 			return -ETIMEDOUT;
370 		}
371 		udelay(100);
372 	}
373 	/* Enable clock to ACP and wait until the clock is enabled */
374 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
375 	val = val | ACP_CONTROL__ClkEn_MASK;
376 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
377 
378 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
379 
380 	while (true) {
381 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
382 		if (val & (u32) 0x1)
383 			break;
384 		if (--count == 0) {
385 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
386 			return -ETIMEDOUT;
387 		}
388 		udelay(100);
389 	}
390 	/* Deassert the SOFT RESET flags */
391 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
392 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
393 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
394 	return 0;
395 }
396 
397 /**
398  * acp_hw_fini - stop the hardware block
399  *
400  * @adev: amdgpu_device pointer
401  *
402  */
403 static int acp_hw_fini(void *handle)
404 {
405 	int i, ret;
406 	u32 val = 0;
407 	u32 count = 0;
408 	struct device *dev;
409 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
410 
411 	/* return early if no ACP */
412 	if (!adev->acp.acp_genpd) {
413 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
414 		return 0;
415 	}
416 
417 	/* Assert Soft reset of ACP */
418 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
419 
420 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
421 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
422 
423 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
424 	while (true) {
425 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
426 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
427 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
428 			break;
429 		if (--count == 0) {
430 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
431 			return -ETIMEDOUT;
432 		}
433 		udelay(100);
434 	}
435 	/* Disable ACP clock */
436 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
437 	val &= ~ACP_CONTROL__ClkEn_MASK;
438 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
439 
440 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
441 
442 	while (true) {
443 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
444 		if (val & (u32) 0x1)
445 			break;
446 		if (--count == 0) {
447 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
448 			return -ETIMEDOUT;
449 		}
450 		udelay(100);
451 	}
452 
453 	for (i = 0; i < ACP_DEVS ; i++) {
454 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
455 		ret = pm_genpd_remove_device(dev);
456 		/* If removal fails, dont giveup and try rest */
457 		if (ret)
458 			dev_err(dev, "remove dev from genpd failed\n");
459 	}
460 
461 	mfd_remove_devices(adev->acp.parent);
462 	kfree(adev->acp.acp_res);
463 	kfree(adev->acp.acp_genpd);
464 	kfree(adev->acp.acp_cell);
465 
466 	return 0;
467 }
468 
469 static int acp_suspend(void *handle)
470 {
471 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472 
473 	/* power up on suspend */
474 	if (!adev->acp.acp_cell)
475 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
476 	return 0;
477 }
478 
479 static int acp_resume(void *handle)
480 {
481 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482 
483 	/* power down again on resume */
484 	if (!adev->acp.acp_cell)
485 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
486 	return 0;
487 }
488 
489 static int acp_early_init(void *handle)
490 {
491 	return 0;
492 }
493 
494 static bool acp_is_idle(void *handle)
495 {
496 	return true;
497 }
498 
499 static int acp_wait_for_idle(void *handle)
500 {
501 	return 0;
502 }
503 
504 static int acp_soft_reset(void *handle)
505 {
506 	return 0;
507 }
508 
509 static int acp_set_clockgating_state(void *handle,
510 				     enum amd_clockgating_state state)
511 {
512 	return 0;
513 }
514 
515 static int acp_set_powergating_state(void *handle,
516 				     enum amd_powergating_state state)
517 {
518 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
519 	bool enable = state == AMD_PG_STATE_GATE ? true : false;
520 
521 	if (adev->powerplay.pp_funcs &&
522 		adev->powerplay.pp_funcs->set_powergating_by_smu)
523 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
524 
525 	return 0;
526 }
527 
528 static const struct amd_ip_funcs acp_ip_funcs = {
529 	.name = "acp_ip",
530 	.early_init = acp_early_init,
531 	.late_init = NULL,
532 	.sw_init = acp_sw_init,
533 	.sw_fini = acp_sw_fini,
534 	.hw_init = acp_hw_init,
535 	.hw_fini = acp_hw_fini,
536 	.suspend = acp_suspend,
537 	.resume = acp_resume,
538 	.is_idle = acp_is_idle,
539 	.wait_for_idle = acp_wait_for_idle,
540 	.soft_reset = acp_soft_reset,
541 	.set_clockgating_state = acp_set_clockgating_state,
542 	.set_powergating_state = acp_set_powergating_state,
543 };
544 
545 const struct amdgpu_ip_block_version acp_ip_block =
546 {
547 	.type = AMD_IP_BLOCK_TYPE_ACP,
548 	.major = 2,
549 	.minor = 2,
550 	.rev = 0,
551 	.funcs = &acp_ip_funcs,
552 };
553