1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pci.h>
28 #include <linux/pm_domain.h>
29 #include <linux/platform_device.h>
30 #include <sound/designware_i2s.h>
31 #include <sound/pcm.h>
32 
33 #include "amdgpu.h"
34 #include "atom.h"
35 #include "amdgpu_acp.h"
36 
37 #include "acp_gfx_if.h"
38 
39 #define ACP_TILE_ON_MASK                	0x03
40 #define ACP_TILE_OFF_MASK               	0x02
41 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
42 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
43 
44 #define ACP_TILE_P1_MASK                	0x3e
45 #define ACP_TILE_P2_MASK                	0x3d
46 #define ACP_TILE_DSP0_MASK              	0x3b
47 #define ACP_TILE_DSP1_MASK              	0x37
48 
49 #define ACP_TILE_DSP2_MASK              	0x2f
50 
51 #define ACP_DMA_REGS_END			0x146c0
52 #define ACP_I2S_PLAY_REGS_START			0x14840
53 #define ACP_I2S_PLAY_REGS_END			0x148b4
54 #define ACP_I2S_CAP_REGS_START			0x148b8
55 #define ACP_I2S_CAP_REGS_END			0x1496c
56 
57 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
58 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
59 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
60 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
61 #define ACP_BT_PLAY_REGS_START			0x14970
62 #define ACP_BT_PLAY_REGS_END			0x14a24
63 #define ACP_BT_COMP1_REG_OFFSET			0xac
64 #define ACP_BT_COMP2_REG_OFFSET			0xa8
65 
66 #define mmACP_PGFSM_RETAIN_REG			0x51c9
67 #define mmACP_PGFSM_CONFIG_REG			0x51ca
68 #define mmACP_PGFSM_READ_REG_0			0x51cc
69 
70 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
71 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
72 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
73 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
74 
75 #define mmACP_CONTROL				0x5131
76 #define mmACP_STATUS				0x5133
77 #define mmACP_SOFT_RESET			0x5134
78 #define ACP_CONTROL__ClkEn_MASK 		0x1
79 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
80 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
81 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
82 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
83 
84 #define ACP_TIMEOUT_LOOP			0x000000FF
85 #define ACP_DEVS				4
86 #define ACP_SRC_ID				162
87 
88 enum {
89 	ACP_TILE_P1 = 0,
90 	ACP_TILE_P2,
91 	ACP_TILE_DSP0,
92 	ACP_TILE_DSP1,
93 	ACP_TILE_DSP2,
94 };
95 
96 static int acp_sw_init(void *handle)
97 {
98 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
99 
100 	adev->acp.parent = adev->dev;
101 
102 	adev->acp.cgs_device =
103 		amdgpu_cgs_create_device(adev);
104 	if (!adev->acp.cgs_device)
105 		return -EINVAL;
106 
107 	return 0;
108 }
109 
110 static int acp_sw_fini(void *handle)
111 {
112 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
113 
114 	if (adev->acp.cgs_device)
115 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
116 
117 	return 0;
118 }
119 
120 struct acp_pm_domain {
121 	void *adev;
122 	struct generic_pm_domain gpd;
123 };
124 
125 static int acp_poweroff(struct generic_pm_domain *genpd)
126 {
127 	struct acp_pm_domain *apd;
128 	struct amdgpu_device *adev;
129 
130 	apd = container_of(genpd, struct acp_pm_domain, gpd);
131 	if (apd != NULL) {
132 		adev = apd->adev;
133 	/* call smu to POWER GATE ACP block
134 	 * smu will
135 	 * 1. turn off the acp clock
136 	 * 2. power off the acp tiles
137 	 * 3. check and enter ulv state
138 	 */
139 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
140 	}
141 	return 0;
142 }
143 
144 static int acp_poweron(struct generic_pm_domain *genpd)
145 {
146 	struct acp_pm_domain *apd;
147 	struct amdgpu_device *adev;
148 
149 	apd = container_of(genpd, struct acp_pm_domain, gpd);
150 	if (apd != NULL) {
151 		adev = apd->adev;
152 	/* call smu to UNGATE ACP block
153 	 * smu will
154 	 * 1. exit ulv
155 	 * 2. turn on acp clock
156 	 * 3. power on acp tiles
157 	 */
158 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
159 	}
160 	return 0;
161 }
162 
163 static struct device *get_mfd_cell_dev(const char *device_name, int r)
164 {
165 	char auto_dev_name[25];
166 	struct device *dev;
167 
168 	snprintf(auto_dev_name, sizeof(auto_dev_name),
169 		 "%s.%d.auto", device_name, r);
170 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
171 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
172 
173 	return dev;
174 }
175 
176 /**
177  * acp_hw_init - start and test ACP block
178  *
179  * @adev: amdgpu_device pointer
180  *
181  */
182 static int acp_hw_init(void *handle)
183 {
184 	int r, i;
185 	uint64_t acp_base;
186 	u32 val = 0;
187 	u32 count = 0;
188 	struct device *dev;
189 	struct i2s_platform_data *i2s_pdata = NULL;
190 
191 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192 
193 	const struct amdgpu_ip_block *ip_block =
194 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
195 
196 	if (!ip_block)
197 		return -EINVAL;
198 
199 	r = amd_acp_hw_init(adev->acp.cgs_device,
200 			    ip_block->version->major, ip_block->version->minor);
201 	/* -ENODEV means board uses AZ rather than ACP */
202 	if (r == -ENODEV) {
203 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
204 		return 0;
205 	} else if (r) {
206 		return r;
207 	}
208 
209 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
210 		return -EINVAL;
211 
212 	acp_base = adev->rmmio_base;
213 
214 
215 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
216 	if (adev->acp.acp_genpd == NULL)
217 		return -ENOMEM;
218 
219 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
220 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
221 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
222 
223 
224 	adev->acp.acp_genpd->adev = adev;
225 
226 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
227 
228 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
229 							GFP_KERNEL);
230 
231 	if (adev->acp.acp_cell == NULL) {
232 		r = -ENOMEM;
233 		goto failure;
234 	}
235 
236 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
237 	if (adev->acp.acp_res == NULL) {
238 		r = -ENOMEM;
239 		goto failure;
240 	}
241 
242 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
243 	if (i2s_pdata == NULL) {
244 		r = -ENOMEM;
245 		goto failure;
246 	}
247 
248 	switch (adev->asic_type) {
249 	case CHIP_STONEY:
250 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
251 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
252 		break;
253 	default:
254 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
255 	}
256 	i2s_pdata[0].cap = DWC_I2S_PLAY;
257 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
258 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
259 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
260 	switch (adev->asic_type) {
261 	case CHIP_STONEY:
262 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
263 			DW_I2S_QUIRK_COMP_PARAM1 |
264 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
265 		break;
266 	default:
267 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
268 			DW_I2S_QUIRK_COMP_PARAM1;
269 	}
270 
271 	i2s_pdata[1].cap = DWC_I2S_RECORD;
272 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
273 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
274 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
275 
276 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
277 	switch (adev->asic_type) {
278 	case CHIP_STONEY:
279 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
280 		break;
281 	default:
282 		break;
283 	}
284 
285 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
286 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
287 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
288 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
289 
290 	adev->acp.acp_res[0].name = "acp2x_dma";
291 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
292 	adev->acp.acp_res[0].start = acp_base;
293 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
294 
295 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
296 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
297 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
298 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
299 
300 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
301 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
302 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
303 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
304 
305 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
306 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
307 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
308 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
309 
310 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
311 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
312 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
313 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
314 
315 	adev->acp.acp_cell[0].name = "acp_audio_dma";
316 	adev->acp.acp_cell[0].num_resources = 5;
317 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
318 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
319 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
320 
321 	adev->acp.acp_cell[1].name = "designware-i2s";
322 	adev->acp.acp_cell[1].num_resources = 1;
323 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
324 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
325 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
326 
327 	adev->acp.acp_cell[2].name = "designware-i2s";
328 	adev->acp.acp_cell[2].num_resources = 1;
329 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
330 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
331 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
332 
333 	adev->acp.acp_cell[3].name = "designware-i2s";
334 	adev->acp.acp_cell[3].num_resources = 1;
335 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
336 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
337 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
338 
339 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
340 								ACP_DEVS);
341 	if (r)
342 		goto failure;
343 
344 	for (i = 0; i < ACP_DEVS ; i++) {
345 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
346 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
347 		if (r) {
348 			dev_err(dev, "Failed to add dev to genpd\n");
349 			goto failure;
350 		}
351 	}
352 
353 
354 	/* Assert Soft reset of ACP */
355 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
356 
357 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
358 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
359 
360 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
361 	while (true) {
362 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
363 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
364 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
365 			break;
366 		if (--count == 0) {
367 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
368 			r = -ETIMEDOUT;
369 			goto failure;
370 		}
371 		udelay(100);
372 	}
373 	/* Enable clock to ACP and wait until the clock is enabled */
374 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
375 	val = val | ACP_CONTROL__ClkEn_MASK;
376 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
377 
378 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
379 
380 	while (true) {
381 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
382 		if (val & (u32) 0x1)
383 			break;
384 		if (--count == 0) {
385 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
386 			r = -ETIMEDOUT;
387 			goto failure;
388 		}
389 		udelay(100);
390 	}
391 	/* Deassert the SOFT RESET flags */
392 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
393 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
394 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
395 	return 0;
396 
397 failure:
398 	kfree(i2s_pdata);
399 	kfree(adev->acp.acp_res);
400 	kfree(adev->acp.acp_cell);
401 	kfree(adev->acp.acp_genpd);
402 	return r;
403 }
404 
405 /**
406  * acp_hw_fini - stop the hardware block
407  *
408  * @adev: amdgpu_device pointer
409  *
410  */
411 static int acp_hw_fini(void *handle)
412 {
413 	int i, ret;
414 	u32 val = 0;
415 	u32 count = 0;
416 	struct device *dev;
417 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
418 
419 	/* return early if no ACP */
420 	if (!adev->acp.acp_genpd) {
421 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
422 		return 0;
423 	}
424 
425 	/* Assert Soft reset of ACP */
426 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
427 
428 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
429 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
430 
431 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
432 	while (true) {
433 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
434 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
435 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
436 			break;
437 		if (--count == 0) {
438 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
439 			return -ETIMEDOUT;
440 		}
441 		udelay(100);
442 	}
443 	/* Disable ACP clock */
444 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
445 	val &= ~ACP_CONTROL__ClkEn_MASK;
446 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
447 
448 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
449 
450 	while (true) {
451 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
452 		if (val & (u32) 0x1)
453 			break;
454 		if (--count == 0) {
455 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
456 			return -ETIMEDOUT;
457 		}
458 		udelay(100);
459 	}
460 
461 	for (i = 0; i < ACP_DEVS ; i++) {
462 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
463 		ret = pm_genpd_remove_device(dev);
464 		/* If removal fails, dont giveup and try rest */
465 		if (ret)
466 			dev_err(dev, "remove dev from genpd failed\n");
467 	}
468 
469 	mfd_remove_devices(adev->acp.parent);
470 	kfree(adev->acp.acp_res);
471 	kfree(adev->acp.acp_genpd);
472 	kfree(adev->acp.acp_cell);
473 
474 	return 0;
475 }
476 
477 static int acp_suspend(void *handle)
478 {
479 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
480 
481 	/* power up on suspend */
482 	if (!adev->acp.acp_cell)
483 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
484 	return 0;
485 }
486 
487 static int acp_resume(void *handle)
488 {
489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
490 
491 	/* power down again on resume */
492 	if (!adev->acp.acp_cell)
493 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
494 	return 0;
495 }
496 
497 static int acp_early_init(void *handle)
498 {
499 	return 0;
500 }
501 
502 static bool acp_is_idle(void *handle)
503 {
504 	return true;
505 }
506 
507 static int acp_wait_for_idle(void *handle)
508 {
509 	return 0;
510 }
511 
512 static int acp_soft_reset(void *handle)
513 {
514 	return 0;
515 }
516 
517 static int acp_set_clockgating_state(void *handle,
518 				     enum amd_clockgating_state state)
519 {
520 	return 0;
521 }
522 
523 static int acp_set_powergating_state(void *handle,
524 				     enum amd_powergating_state state)
525 {
526 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
527 	bool enable = (state == AMD_PG_STATE_GATE);
528 
529 	amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
530 
531 	return 0;
532 }
533 
534 static const struct amd_ip_funcs acp_ip_funcs = {
535 	.name = "acp_ip",
536 	.early_init = acp_early_init,
537 	.late_init = NULL,
538 	.sw_init = acp_sw_init,
539 	.sw_fini = acp_sw_fini,
540 	.hw_init = acp_hw_init,
541 	.hw_fini = acp_hw_fini,
542 	.suspend = acp_suspend,
543 	.resume = acp_resume,
544 	.is_idle = acp_is_idle,
545 	.wait_for_idle = acp_wait_for_idle,
546 	.soft_reset = acp_soft_reset,
547 	.set_clockgating_state = acp_set_clockgating_state,
548 	.set_powergating_state = acp_set_powergating_state,
549 };
550 
551 const struct amdgpu_ip_block_version acp_ip_block =
552 {
553 	.type = AMD_IP_BLOCK_TYPE_ACP,
554 	.major = 2,
555 	.minor = 2,
556 	.rev = 0,
557 	.funcs = &acp_ip_funcs,
558 };
559