1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
31 
32 #include "amdgpu.h"
33 #include "atom.h"
34 #include "amdgpu_acp.h"
35 
36 #include "acp_gfx_if.h"
37 
38 #define ACP_TILE_ON_MASK                	0x03
39 #define ACP_TILE_OFF_MASK               	0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
42 
43 #define ACP_TILE_P1_MASK                	0x3e
44 #define ACP_TILE_P2_MASK                	0x3d
45 #define ACP_TILE_DSP0_MASK              	0x3b
46 #define ACP_TILE_DSP1_MASK              	0x37
47 
48 #define ACP_TILE_DSP2_MASK              	0x2f
49 
50 #define ACP_DMA_REGS_END			0x146c0
51 #define ACP_I2S_PLAY_REGS_START			0x14840
52 #define ACP_I2S_PLAY_REGS_END			0x148b4
53 #define ACP_I2S_CAP_REGS_START			0x148b8
54 #define ACP_I2S_CAP_REGS_END			0x1496c
55 
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
60 #define ACP_BT_PLAY_REGS_START			0x14970
61 #define ACP_BT_PLAY_REGS_END			0x14a24
62 #define ACP_BT_COMP1_REG_OFFSET			0xac
63 #define ACP_BT_COMP2_REG_OFFSET			0xa8
64 
65 #define mmACP_PGFSM_RETAIN_REG			0x51c9
66 #define mmACP_PGFSM_CONFIG_REG			0x51ca
67 #define mmACP_PGFSM_READ_REG_0			0x51cc
68 
69 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
70 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
71 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
72 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
73 
74 #define mmACP_CONTROL				0x5131
75 #define mmACP_STATUS				0x5133
76 #define mmACP_SOFT_RESET			0x5134
77 #define ACP_CONTROL__ClkEn_MASK 		0x1
78 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
79 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
80 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
81 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
82 
83 #define ACP_TIMEOUT_LOOP			0x000000FF
84 #define ACP_DEVS				4
85 #define ACP_SRC_ID				162
86 
87 enum {
88 	ACP_TILE_P1 = 0,
89 	ACP_TILE_P2,
90 	ACP_TILE_DSP0,
91 	ACP_TILE_DSP1,
92 	ACP_TILE_DSP2,
93 };
94 
95 static int acp_sw_init(void *handle)
96 {
97 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 
99 	adev->acp.parent = adev->dev;
100 
101 	adev->acp.cgs_device =
102 		amdgpu_cgs_create_device(adev);
103 	if (!adev->acp.cgs_device)
104 		return -EINVAL;
105 
106 	return 0;
107 }
108 
109 static int acp_sw_fini(void *handle)
110 {
111 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
112 
113 	if (adev->acp.cgs_device)
114 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
115 
116 	return 0;
117 }
118 
119 struct acp_pm_domain {
120 	void *adev;
121 	struct generic_pm_domain gpd;
122 };
123 
124 static int acp_poweroff(struct generic_pm_domain *genpd)
125 {
126 	struct acp_pm_domain *apd;
127 	struct amdgpu_device *adev;
128 
129 	apd = container_of(genpd, struct acp_pm_domain, gpd);
130 	if (apd != NULL) {
131 		adev = apd->adev;
132 	/* call smu to POWER GATE ACP block
133 	 * smu will
134 	 * 1. turn off the acp clock
135 	 * 2. power off the acp tiles
136 	 * 3. check and enter ulv state
137 	 */
138 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
139 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
140 	}
141 	return 0;
142 }
143 
144 static int acp_poweron(struct generic_pm_domain *genpd)
145 {
146 	struct acp_pm_domain *apd;
147 	struct amdgpu_device *adev;
148 
149 	apd = container_of(genpd, struct acp_pm_domain, gpd);
150 	if (apd != NULL) {
151 		adev = apd->adev;
152 	/* call smu to UNGATE ACP block
153 	 * smu will
154 	 * 1. exit ulv
155 	 * 2. turn on acp clock
156 	 * 3. power on acp tiles
157 	 */
158 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
159 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
160 	}
161 	return 0;
162 }
163 
164 static struct device *get_mfd_cell_dev(const char *device_name, int r)
165 {
166 	char auto_dev_name[25];
167 	struct device *dev;
168 
169 	snprintf(auto_dev_name, sizeof(auto_dev_name),
170 		 "%s.%d.auto", device_name, r);
171 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
172 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
173 
174 	return dev;
175 }
176 
177 /**
178  * acp_hw_init - start and test ACP block
179  *
180  * @adev: amdgpu_device pointer
181  *
182  */
183 static int acp_hw_init(void *handle)
184 {
185 	int r, i;
186 	uint64_t acp_base;
187 	u32 val = 0;
188 	u32 count = 0;
189 	struct device *dev;
190 	struct i2s_platform_data *i2s_pdata;
191 
192 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
193 
194 	const struct amdgpu_ip_block *ip_block =
195 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
196 
197 	if (!ip_block)
198 		return -EINVAL;
199 
200 	r = amd_acp_hw_init(adev->acp.cgs_device,
201 			    ip_block->version->major, ip_block->version->minor);
202 	/* -ENODEV means board uses AZ rather than ACP */
203 	if (r == -ENODEV) {
204 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
205 		return 0;
206 	} else if (r) {
207 		return r;
208 	}
209 
210 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
211 		return -EINVAL;
212 
213 	acp_base = adev->rmmio_base;
214 
215 
216 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
217 	if (adev->acp.acp_genpd == NULL)
218 		return -ENOMEM;
219 
220 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
221 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
222 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
223 
224 
225 	adev->acp.acp_genpd->adev = adev;
226 
227 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
228 
229 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
230 							GFP_KERNEL);
231 
232 	if (adev->acp.acp_cell == NULL)
233 		return -ENOMEM;
234 
235 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
236 	if (adev->acp.acp_res == NULL) {
237 		kfree(adev->acp.acp_cell);
238 		return -ENOMEM;
239 	}
240 
241 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
242 	if (i2s_pdata == NULL) {
243 		kfree(adev->acp.acp_res);
244 		kfree(adev->acp.acp_cell);
245 		return -ENOMEM;
246 	}
247 
248 	switch (adev->asic_type) {
249 	case CHIP_STONEY:
250 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
251 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
252 		break;
253 	default:
254 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
255 	}
256 	i2s_pdata[0].cap = DWC_I2S_PLAY;
257 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
258 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
259 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
260 	switch (adev->asic_type) {
261 	case CHIP_STONEY:
262 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
263 			DW_I2S_QUIRK_COMP_PARAM1 |
264 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
265 		break;
266 	default:
267 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
268 			DW_I2S_QUIRK_COMP_PARAM1;
269 	}
270 
271 	i2s_pdata[1].cap = DWC_I2S_RECORD;
272 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
273 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
274 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
275 
276 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
277 	switch (adev->asic_type) {
278 	case CHIP_STONEY:
279 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
280 		break;
281 	default:
282 		break;
283 	}
284 
285 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
286 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
287 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
288 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
289 
290 	adev->acp.acp_res[0].name = "acp2x_dma";
291 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
292 	adev->acp.acp_res[0].start = acp_base;
293 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
294 
295 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
296 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
297 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
298 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
299 
300 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
301 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
302 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
303 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
304 
305 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
306 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
307 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
308 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
309 
310 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
311 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
312 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
313 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
314 
315 	adev->acp.acp_cell[0].name = "acp_audio_dma";
316 	adev->acp.acp_cell[0].num_resources = 5;
317 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
318 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
319 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
320 
321 	adev->acp.acp_cell[1].name = "designware-i2s";
322 	adev->acp.acp_cell[1].num_resources = 1;
323 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
324 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
325 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
326 
327 	adev->acp.acp_cell[2].name = "designware-i2s";
328 	adev->acp.acp_cell[2].num_resources = 1;
329 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
330 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
331 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
332 
333 	adev->acp.acp_cell[3].name = "designware-i2s";
334 	adev->acp.acp_cell[3].num_resources = 1;
335 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
336 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
337 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
338 
339 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
340 								ACP_DEVS);
341 	if (r)
342 		return r;
343 
344 	for (i = 0; i < ACP_DEVS ; i++) {
345 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
346 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
347 		if (r) {
348 			dev_err(dev, "Failed to add dev to genpd\n");
349 			return r;
350 		}
351 	}
352 
353 
354 	/* Assert Soft reset of ACP */
355 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
356 
357 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
358 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
359 
360 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
361 	while (true) {
362 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
363 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
364 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
365 			break;
366 		if (--count == 0) {
367 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
368 			return -ETIMEDOUT;
369 		}
370 		udelay(100);
371 	}
372 	/* Enable clock to ACP and wait until the clock is enabled */
373 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
374 	val = val | ACP_CONTROL__ClkEn_MASK;
375 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
376 
377 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
378 
379 	while (true) {
380 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
381 		if (val & (u32) 0x1)
382 			break;
383 		if (--count == 0) {
384 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
385 			return -ETIMEDOUT;
386 		}
387 		udelay(100);
388 	}
389 	/* Deassert the SOFT RESET flags */
390 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
391 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
392 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
393 	return 0;
394 }
395 
396 /**
397  * acp_hw_fini - stop the hardware block
398  *
399  * @adev: amdgpu_device pointer
400  *
401  */
402 static int acp_hw_fini(void *handle)
403 {
404 	int i, ret;
405 	u32 val = 0;
406 	u32 count = 0;
407 	struct device *dev;
408 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
409 
410 	/* return early if no ACP */
411 	if (!adev->acp.acp_genpd) {
412 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
413 		return 0;
414 	}
415 
416 	/* Assert Soft reset of ACP */
417 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
418 
419 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
420 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
421 
422 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
423 	while (true) {
424 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
425 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
426 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
427 			break;
428 		if (--count == 0) {
429 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
430 			return -ETIMEDOUT;
431 		}
432 		udelay(100);
433 	}
434 	/* Disable ACP clock */
435 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
436 	val &= ~ACP_CONTROL__ClkEn_MASK;
437 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
438 
439 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
440 
441 	while (true) {
442 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
443 		if (val & (u32) 0x1)
444 			break;
445 		if (--count == 0) {
446 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
447 			return -ETIMEDOUT;
448 		}
449 		udelay(100);
450 	}
451 
452 	for (i = 0; i < ACP_DEVS ; i++) {
453 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
454 		ret = pm_genpd_remove_device(dev);
455 		/* If removal fails, dont giveup and try rest */
456 		if (ret)
457 			dev_err(dev, "remove dev from genpd failed\n");
458 	}
459 
460 	mfd_remove_devices(adev->acp.parent);
461 	kfree(adev->acp.acp_res);
462 	kfree(adev->acp.acp_genpd);
463 	kfree(adev->acp.acp_cell);
464 
465 	return 0;
466 }
467 
468 static int acp_suspend(void *handle)
469 {
470 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
471 
472 	/* power up on suspend */
473 	if (!adev->acp.acp_cell)
474 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
475 	return 0;
476 }
477 
478 static int acp_resume(void *handle)
479 {
480 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
481 
482 	/* power down again on resume */
483 	if (!adev->acp.acp_cell)
484 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
485 	return 0;
486 }
487 
488 static int acp_early_init(void *handle)
489 {
490 	return 0;
491 }
492 
493 static bool acp_is_idle(void *handle)
494 {
495 	return true;
496 }
497 
498 static int acp_wait_for_idle(void *handle)
499 {
500 	return 0;
501 }
502 
503 static int acp_soft_reset(void *handle)
504 {
505 	return 0;
506 }
507 
508 static int acp_set_clockgating_state(void *handle,
509 				     enum amd_clockgating_state state)
510 {
511 	return 0;
512 }
513 
514 static int acp_set_powergating_state(void *handle,
515 				     enum amd_powergating_state state)
516 {
517 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518 	bool enable = state == AMD_PG_STATE_GATE ? true : false;
519 
520 	if (adev->powerplay.pp_funcs->set_powergating_by_smu)
521 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
522 
523 	return 0;
524 }
525 
526 static const struct amd_ip_funcs acp_ip_funcs = {
527 	.name = "acp_ip",
528 	.early_init = acp_early_init,
529 	.late_init = NULL,
530 	.sw_init = acp_sw_init,
531 	.sw_fini = acp_sw_fini,
532 	.hw_init = acp_hw_init,
533 	.hw_fini = acp_hw_fini,
534 	.suspend = acp_suspend,
535 	.resume = acp_resume,
536 	.is_idle = acp_is_idle,
537 	.wait_for_idle = acp_wait_for_idle,
538 	.soft_reset = acp_soft_reset,
539 	.set_clockgating_state = acp_set_clockgating_state,
540 	.set_powergating_state = acp_set_powergating_state,
541 };
542 
543 const struct amdgpu_ip_block_version acp_ip_block =
544 {
545 	.type = AMD_IP_BLOCK_TYPE_ACP,
546 	.major = 2,
547 	.minor = 2,
548 	.rev = 0,
549 	.funcs = &acp_ip_funcs,
550 };
551