1 /*
2  *  skl-message.c - HDA DSP interface for FW registration, Pipe and Module
3  *  configurations
4  *
5  *  Copyright (C) 2015 Intel Corp
6  *  Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7  *	   Jeeja KP <jeeja.kp@intel.com>
8  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  */
19 
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include <uapi/sound/skl-tplg-interface.h>
25 #include "skl-sst-dsp.h"
26 #include "cnl-sst-dsp.h"
27 #include "skl-sst-ipc.h"
28 #include "skl.h"
29 #include "../common/sst-dsp.h"
30 #include "../common/sst-dsp-priv.h"
31 #include "skl-topology.h"
32 
33 static int skl_alloc_dma_buf(struct device *dev,
34 		struct snd_dma_buffer *dmab, size_t size)
35 {
36 	struct hdac_bus *bus = dev_get_drvdata(dev);
37 
38 	if (!bus)
39 		return -ENODEV;
40 
41 	return  bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
42 }
43 
44 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
45 {
46 	struct hdac_bus *bus = dev_get_drvdata(dev);
47 
48 	if (!bus)
49 		return -ENODEV;
50 
51 	bus->io_ops->dma_free_pages(bus, dmab);
52 
53 	return 0;
54 }
55 
56 #define SKL_ASTATE_PARAM_ID	4
57 
58 void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data)
59 {
60 	struct skl_ipc_large_config_msg	msg = {0};
61 
62 	msg.large_param_id = SKL_ASTATE_PARAM_ID;
63 	msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
64 				sizeof(cnt));
65 
66 	skl_ipc_set_large_config(&ctx->ipc, &msg, data);
67 }
68 
69 #define NOTIFICATION_PARAM_ID 3
70 #define NOTIFICATION_MASK 0xf
71 
72 /* disable notfication for underruns/overruns from firmware module */
73 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
74 {
75 	struct notification_mask mask;
76 	struct skl_ipc_large_config_msg	msg = {0};
77 
78 	mask.notify = NOTIFICATION_MASK;
79 	mask.enable = enable;
80 
81 	msg.large_param_id = NOTIFICATION_PARAM_ID;
82 	msg.param_data_size = sizeof(mask);
83 
84 	skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
85 }
86 
87 static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
88 				int stream_tag, int enable)
89 {
90 	struct hdac_bus *bus = dev_get_drvdata(dev);
91 	struct hdac_stream *stream = snd_hdac_get_stream(bus,
92 			SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
93 	struct hdac_ext_stream *estream;
94 
95 	if (!stream)
96 		return -EINVAL;
97 
98 	estream = stream_to_hdac_ext_stream(stream);
99 	/* enable/disable SPIB for this hdac stream */
100 	snd_hdac_ext_stream_spbcap_enable(bus, enable, stream->index);
101 
102 	/* set the spib value */
103 	snd_hdac_ext_stream_set_spib(bus, estream, size);
104 
105 	return 0;
106 }
107 
108 static int skl_dsp_prepare(struct device *dev, unsigned int format,
109 			unsigned int size, struct snd_dma_buffer *dmab)
110 {
111 	struct hdac_bus *bus = dev_get_drvdata(dev);
112 	struct hdac_ext_stream *estream;
113 	struct hdac_stream *stream;
114 	struct snd_pcm_substream substream;
115 	int ret;
116 
117 	if (!bus)
118 		return -ENODEV;
119 
120 	memset(&substream, 0, sizeof(substream));
121 	substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
122 
123 	estream = snd_hdac_ext_stream_assign(bus, &substream,
124 					HDAC_EXT_STREAM_TYPE_HOST);
125 	if (!estream)
126 		return -ENODEV;
127 
128 	stream = hdac_stream(estream);
129 
130 	/* assign decouple host dma channel */
131 	ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
132 	if (ret < 0)
133 		return ret;
134 
135 	skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
136 
137 	return stream->stream_tag;
138 }
139 
140 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
141 {
142 	struct hdac_bus *bus = dev_get_drvdata(dev);
143 	struct hdac_stream *stream;
144 
145 	if (!bus)
146 		return -ENODEV;
147 
148 	stream = snd_hdac_get_stream(bus,
149 		SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
150 	if (!stream)
151 		return -EINVAL;
152 
153 	snd_hdac_dsp_trigger(stream, start);
154 
155 	return 0;
156 }
157 
158 static int skl_dsp_cleanup(struct device *dev,
159 		struct snd_dma_buffer *dmab, int stream_tag)
160 {
161 	struct hdac_bus *bus = dev_get_drvdata(dev);
162 	struct hdac_stream *stream;
163 	struct hdac_ext_stream *estream;
164 
165 	if (!bus)
166 		return -ENODEV;
167 
168 	stream = snd_hdac_get_stream(bus,
169 		SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
170 	if (!stream)
171 		return -EINVAL;
172 
173 	estream = stream_to_hdac_ext_stream(stream);
174 	skl_dsp_setup_spib(dev, 0, stream_tag, false);
175 	snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
176 
177 	snd_hdac_dsp_cleanup(stream, dmab);
178 
179 	return 0;
180 }
181 
182 static struct skl_dsp_loader_ops skl_get_loader_ops(void)
183 {
184 	struct skl_dsp_loader_ops loader_ops;
185 
186 	memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
187 
188 	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
189 	loader_ops.free_dma_buf = skl_free_dma_buf;
190 
191 	return loader_ops;
192 };
193 
194 static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
195 {
196 	struct skl_dsp_loader_ops loader_ops;
197 
198 	memset(&loader_ops, 0, sizeof(loader_ops));
199 
200 	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
201 	loader_ops.free_dma_buf = skl_free_dma_buf;
202 	loader_ops.prepare = skl_dsp_prepare;
203 	loader_ops.trigger = skl_dsp_trigger;
204 	loader_ops.cleanup = skl_dsp_cleanup;
205 
206 	return loader_ops;
207 };
208 
209 static const struct skl_dsp_ops dsp_ops[] = {
210 	{
211 		.id = 0x9d70,
212 		.num_cores = 2,
213 		.loader_ops = skl_get_loader_ops,
214 		.init = skl_sst_dsp_init,
215 		.init_fw = skl_sst_init_fw,
216 		.cleanup = skl_sst_dsp_cleanup
217 	},
218 	{
219 		.id = 0x9d71,
220 		.num_cores = 2,
221 		.loader_ops = skl_get_loader_ops,
222 		.init = skl_sst_dsp_init,
223 		.init_fw = skl_sst_init_fw,
224 		.cleanup = skl_sst_dsp_cleanup
225 	},
226 	{
227 		.id = 0x5a98,
228 		.num_cores = 2,
229 		.loader_ops = bxt_get_loader_ops,
230 		.init = bxt_sst_dsp_init,
231 		.init_fw = bxt_sst_init_fw,
232 		.cleanup = bxt_sst_dsp_cleanup
233 	},
234 	{
235 		.id = 0x3198,
236 		.num_cores = 2,
237 		.loader_ops = bxt_get_loader_ops,
238 		.init = bxt_sst_dsp_init,
239 		.init_fw = bxt_sst_init_fw,
240 		.cleanup = bxt_sst_dsp_cleanup
241 	},
242 	{
243 		.id = 0x9dc8,
244 		.num_cores = 4,
245 		.loader_ops = bxt_get_loader_ops,
246 		.init = cnl_sst_dsp_init,
247 		.init_fw = cnl_sst_init_fw,
248 		.cleanup = cnl_sst_dsp_cleanup
249 	},
250 	{
251 		.id = 0xa348,
252 		.num_cores = 4,
253 		.loader_ops = bxt_get_loader_ops,
254 		.init = cnl_sst_dsp_init,
255 		.init_fw = cnl_sst_init_fw,
256 		.cleanup = cnl_sst_dsp_cleanup
257 	},
258 };
259 
260 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
261 {
262 	int i;
263 
264 	for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
265 		if (dsp_ops[i].id == pci_id)
266 			return &dsp_ops[i];
267 	}
268 
269 	return NULL;
270 }
271 
272 int skl_init_dsp(struct skl *skl)
273 {
274 	void __iomem *mmio_base;
275 	struct hdac_bus *bus = skl_to_bus(skl);
276 	struct skl_dsp_loader_ops loader_ops;
277 	int irq = bus->irq;
278 	const struct skl_dsp_ops *ops;
279 	struct skl_dsp_cores *cores;
280 	int ret;
281 
282 	/* enable ppcap interrupt */
283 	snd_hdac_ext_bus_ppcap_enable(bus, true);
284 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
285 
286 	/* read the BAR of the ADSP MMIO */
287 	mmio_base = pci_ioremap_bar(skl->pci, 4);
288 	if (mmio_base == NULL) {
289 		dev_err(bus->dev, "ioremap error\n");
290 		return -ENXIO;
291 	}
292 
293 	ops = skl_get_dsp_ops(skl->pci->device);
294 	if (!ops) {
295 		ret = -EIO;
296 		goto unmap_mmio;
297 	}
298 
299 	loader_ops = ops->loader_ops();
300 	ret = ops->init(bus->dev, mmio_base, irq,
301 				skl->fw_name, loader_ops,
302 				&skl->skl_sst);
303 
304 	if (ret < 0)
305 		goto unmap_mmio;
306 
307 	skl->skl_sst->dsp_ops = ops;
308 	cores = &skl->skl_sst->cores;
309 	cores->count = ops->num_cores;
310 
311 	cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL);
312 	if (!cores->state) {
313 		ret = -ENOMEM;
314 		goto unmap_mmio;
315 	}
316 
317 	cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count),
318 				     GFP_KERNEL);
319 	if (!cores->usage_count) {
320 		ret = -ENOMEM;
321 		goto free_core_state;
322 	}
323 
324 	dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
325 
326 	return 0;
327 
328 free_core_state:
329 	kfree(cores->state);
330 
331 unmap_mmio:
332 	iounmap(mmio_base);
333 
334 	return ret;
335 }
336 
337 int skl_free_dsp(struct skl *skl)
338 {
339 	struct hdac_bus *bus = skl_to_bus(skl);
340 	struct skl_sst *ctx = skl->skl_sst;
341 
342 	/* disable  ppcap interrupt */
343 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
344 
345 	ctx->dsp_ops->cleanup(bus->dev, ctx);
346 
347 	kfree(ctx->cores.state);
348 	kfree(ctx->cores.usage_count);
349 
350 	if (ctx->dsp->addr.lpe)
351 		iounmap(ctx->dsp->addr.lpe);
352 
353 	return 0;
354 }
355 
356 /*
357  * In the case of "suspend_active" i.e, the Audio IP being active
358  * during system suspend, immediately excecute any pending D0i3 work
359  * before suspending. This is needed for the IP to work in low power
360  * mode during system suspend. In the case of normal suspend, cancel
361  * any pending D0i3 work.
362  */
363 int skl_suspend_late_dsp(struct skl *skl)
364 {
365 	struct skl_sst *ctx = skl->skl_sst;
366 	struct delayed_work *dwork;
367 
368 	if (!ctx)
369 		return 0;
370 
371 	dwork = &ctx->d0i3.work;
372 
373 	if (dwork->work.func) {
374 		if (skl->supend_active)
375 			flush_delayed_work(dwork);
376 		else
377 			cancel_delayed_work_sync(dwork);
378 	}
379 
380 	return 0;
381 }
382 
383 int skl_suspend_dsp(struct skl *skl)
384 {
385 	struct skl_sst *ctx = skl->skl_sst;
386 	struct hdac_bus *bus = skl_to_bus(skl);
387 	int ret;
388 
389 	/* if ppcap is not supported return 0 */
390 	if (!bus->ppcap)
391 		return 0;
392 
393 	ret = skl_dsp_sleep(ctx->dsp);
394 	if (ret < 0)
395 		return ret;
396 
397 	/* disable ppcap interrupt */
398 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
399 	snd_hdac_ext_bus_ppcap_enable(bus, false);
400 
401 	return 0;
402 }
403 
404 int skl_resume_dsp(struct skl *skl)
405 {
406 	struct skl_sst *ctx = skl->skl_sst;
407 	struct hdac_bus *bus = skl_to_bus(skl);
408 	int ret;
409 
410 	/* if ppcap is not supported return 0 */
411 	if (!bus->ppcap)
412 		return 0;
413 
414 	/* enable ppcap interrupt */
415 	snd_hdac_ext_bus_ppcap_enable(bus, true);
416 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
417 
418 	/* check if DSP 1st boot is done */
419 	if (skl->skl_sst->is_first_boot)
420 		return 0;
421 
422 	/*
423 	 * Disable dynamic clock and power gating during firmware
424 	 * and library download
425 	 */
426 	ctx->enable_miscbdcge(ctx->dev, false);
427 	ctx->clock_power_gating(ctx->dev, false);
428 
429 	ret = skl_dsp_wake(ctx->dsp);
430 	ctx->enable_miscbdcge(ctx->dev, true);
431 	ctx->clock_power_gating(ctx->dev, true);
432 	if (ret < 0)
433 		return ret;
434 
435 	skl_dsp_enable_notification(skl->skl_sst, false);
436 
437 	if (skl->cfg.astate_cfg != NULL) {
438 		skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count,
439 					skl->cfg.astate_cfg);
440 	}
441 	return ret;
442 }
443 
444 enum skl_bitdepth skl_get_bit_depth(int params)
445 {
446 	switch (params) {
447 	case 8:
448 		return SKL_DEPTH_8BIT;
449 
450 	case 16:
451 		return SKL_DEPTH_16BIT;
452 
453 	case 24:
454 		return SKL_DEPTH_24BIT;
455 
456 	case 32:
457 		return SKL_DEPTH_32BIT;
458 
459 	default:
460 		return SKL_DEPTH_INVALID;
461 
462 	}
463 }
464 
465 /*
466  * Each module in DSP expects a base module configuration, which consists of
467  * PCM format information, which we calculate in driver and resource values
468  * which are read from widget information passed through topology binary
469  * This is send when we create a module with INIT_INSTANCE IPC msg
470  */
471 static void skl_set_base_module_format(struct skl_sst *ctx,
472 			struct skl_module_cfg *mconfig,
473 			struct skl_base_cfg *base_cfg)
474 {
475 	struct skl_module *module = mconfig->module;
476 	struct skl_module_res *res = &module->resources[mconfig->res_idx];
477 	struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx];
478 	struct skl_module_fmt *format = &fmt->inputs[0].fmt;
479 
480 	base_cfg->audio_fmt.number_of_channels = format->channels;
481 
482 	base_cfg->audio_fmt.s_freq = format->s_freq;
483 	base_cfg->audio_fmt.bit_depth = format->bit_depth;
484 	base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
485 	base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
486 	base_cfg->audio_fmt.sample_type = format->sample_type;
487 
488 	dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
489 			format->bit_depth, format->valid_bit_depth,
490 			format->ch_cfg);
491 
492 	base_cfg->audio_fmt.channel_map = format->ch_map;
493 
494 	base_cfg->audio_fmt.interleaving = format->interleaving_style;
495 
496 	base_cfg->cps = res->cps;
497 	base_cfg->ibs = res->ibs;
498 	base_cfg->obs = res->obs;
499 	base_cfg->is_pages = res->is_pages;
500 }
501 
502 /*
503  * Copies copier capabilities into copier module and updates copier module
504  * config size.
505  */
506 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
507 				struct skl_cpr_cfg *cpr_mconfig)
508 {
509 	if (mconfig->formats_config.caps_size == 0)
510 		return;
511 
512 	memcpy(cpr_mconfig->gtw_cfg.config_data,
513 			mconfig->formats_config.caps,
514 			mconfig->formats_config.caps_size);
515 
516 	cpr_mconfig->gtw_cfg.config_length =
517 			(mconfig->formats_config.caps_size) / 4;
518 }
519 
520 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
521 /*
522  * Calculate the gatewat settings required for copier module, type of
523  * gateway and index of gateway to use
524  */
525 static u32 skl_get_node_id(struct skl_sst *ctx,
526 			struct skl_module_cfg *mconfig)
527 {
528 	union skl_connector_node_id node_id = {0};
529 	union skl_ssp_dma_node ssp_node  = {0};
530 	struct skl_pipe_params *params = mconfig->pipe->p_params;
531 
532 	switch (mconfig->dev_type) {
533 	case SKL_DEVICE_BT:
534 		node_id.node.dma_type =
535 			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
536 			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
537 			SKL_DMA_I2S_LINK_INPUT_CLASS;
538 		node_id.node.vindex = params->host_dma_id +
539 					(mconfig->vbus_id << 3);
540 		break;
541 
542 	case SKL_DEVICE_I2S:
543 		node_id.node.dma_type =
544 			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
545 			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
546 			SKL_DMA_I2S_LINK_INPUT_CLASS;
547 		ssp_node.dma_node.time_slot_index = mconfig->time_slot;
548 		ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
549 		node_id.node.vindex = ssp_node.val;
550 		break;
551 
552 	case SKL_DEVICE_DMIC:
553 		node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
554 		node_id.node.vindex = mconfig->vbus_id +
555 					 (mconfig->time_slot);
556 		break;
557 
558 	case SKL_DEVICE_HDALINK:
559 		node_id.node.dma_type =
560 			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
561 			SKL_DMA_HDA_LINK_OUTPUT_CLASS :
562 			SKL_DMA_HDA_LINK_INPUT_CLASS;
563 		node_id.node.vindex = params->link_dma_id;
564 		break;
565 
566 	case SKL_DEVICE_HDAHOST:
567 		node_id.node.dma_type =
568 			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
569 			SKL_DMA_HDA_HOST_OUTPUT_CLASS :
570 			SKL_DMA_HDA_HOST_INPUT_CLASS;
571 		node_id.node.vindex = params->host_dma_id;
572 		break;
573 
574 	default:
575 		node_id.val = 0xFFFFFFFF;
576 		break;
577 	}
578 
579 	return node_id.val;
580 }
581 
582 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
583 			struct skl_module_cfg *mconfig,
584 			struct skl_cpr_cfg *cpr_mconfig)
585 {
586 	u32 dma_io_buf;
587 	struct skl_module_res *res;
588 	int res_idx = mconfig->res_idx;
589 	struct skl *skl = get_skl_ctx(ctx->dev);
590 
591 	cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
592 
593 	if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
594 		cpr_mconfig->cpr_feature_mask = 0;
595 		return;
596 	}
597 
598 	if (skl->nr_modules) {
599 		res = &mconfig->module->resources[mconfig->res_idx];
600 		cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size;
601 		goto skip_buf_size_calc;
602 	} else {
603 		res = &mconfig->module->resources[res_idx];
604 	}
605 
606 	switch (mconfig->hw_conn_type) {
607 	case SKL_CONN_SOURCE:
608 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
609 			dma_io_buf =  res->ibs;
610 		else
611 			dma_io_buf =  res->obs;
612 		break;
613 
614 	case SKL_CONN_SINK:
615 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
616 			dma_io_buf =  res->obs;
617 		else
618 			dma_io_buf =  res->ibs;
619 		break;
620 
621 	default:
622 		dev_warn(ctx->dev, "wrong connection type: %d\n",
623 				mconfig->hw_conn_type);
624 		return;
625 	}
626 
627 	cpr_mconfig->gtw_cfg.dma_buffer_size =
628 				mconfig->dma_buffer_size * dma_io_buf;
629 
630 	/* fallback to 2ms default value */
631 	if (!cpr_mconfig->gtw_cfg.dma_buffer_size) {
632 		if (mconfig->hw_conn_type == SKL_CONN_SOURCE)
633 			cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs;
634 		else
635 			cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs;
636 	}
637 
638 skip_buf_size_calc:
639 	cpr_mconfig->cpr_feature_mask = 0;
640 	cpr_mconfig->gtw_cfg.config_length  = 0;
641 
642 	skl_copy_copier_caps(mconfig, cpr_mconfig);
643 }
644 
645 #define DMA_CONTROL_ID 5
646 #define DMA_I2S_BLOB_SIZE 21
647 
648 int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps,
649 				u32 caps_size, u32 node_id)
650 {
651 	struct skl_dma_control *dma_ctrl;
652 	struct skl_ipc_large_config_msg msg = {0};
653 	int err = 0;
654 
655 
656 	/*
657 	 * if blob size zero, then return
658 	 */
659 	if (caps_size == 0)
660 		return 0;
661 
662 	msg.large_param_id = DMA_CONTROL_ID;
663 	msg.param_data_size = sizeof(struct skl_dma_control) + caps_size;
664 
665 	dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
666 	if (dma_ctrl == NULL)
667 		return -ENOMEM;
668 
669 	dma_ctrl->node_id = node_id;
670 
671 	/*
672 	 * NHLT blob may contain additional configs along with i2s blob.
673 	 * firmware expects only the i2s blob size as the config_length.
674 	 * So fix to i2s blob size.
675 	 * size in dwords.
676 	 */
677 	dma_ctrl->config_length = DMA_I2S_BLOB_SIZE;
678 
679 	memcpy(dma_ctrl->config_data, caps, caps_size);
680 
681 	err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
682 
683 	kfree(dma_ctrl);
684 	return err;
685 }
686 EXPORT_SYMBOL_GPL(skl_dsp_set_dma_control);
687 
688 static void skl_setup_out_format(struct skl_sst *ctx,
689 			struct skl_module_cfg *mconfig,
690 			struct skl_audio_data_format *out_fmt)
691 {
692 	struct skl_module *module = mconfig->module;
693 	struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx];
694 	struct skl_module_fmt *format = &fmt->outputs[0].fmt;
695 
696 	out_fmt->number_of_channels = (u8)format->channels;
697 	out_fmt->s_freq = format->s_freq;
698 	out_fmt->bit_depth = format->bit_depth;
699 	out_fmt->valid_bit_depth = format->valid_bit_depth;
700 	out_fmt->ch_cfg = format->ch_cfg;
701 
702 	out_fmt->channel_map = format->ch_map;
703 	out_fmt->interleaving = format->interleaving_style;
704 	out_fmt->sample_type = format->sample_type;
705 
706 	dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
707 		out_fmt->number_of_channels, format->s_freq, format->bit_depth);
708 }
709 
710 /*
711  * DSP needs SRC module for frequency conversion, SRC takes base module
712  * configuration and the target frequency as extra parameter passed as src
713  * config
714  */
715 static void skl_set_src_format(struct skl_sst *ctx,
716 			struct skl_module_cfg *mconfig,
717 			struct skl_src_module_cfg *src_mconfig)
718 {
719 	struct skl_module *module = mconfig->module;
720 	struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx];
721 	struct skl_module_fmt *fmt = &iface->outputs[0].fmt;
722 
723 	skl_set_base_module_format(ctx, mconfig,
724 		(struct skl_base_cfg *)src_mconfig);
725 
726 	src_mconfig->src_cfg = fmt->s_freq;
727 }
728 
729 /*
730  * DSP needs updown module to do channel conversion. updown module take base
731  * module configuration and channel configuration
732  * It also take coefficients and now we have defaults applied here
733  */
734 static void skl_set_updown_mixer_format(struct skl_sst *ctx,
735 			struct skl_module_cfg *mconfig,
736 			struct skl_up_down_mixer_cfg *mixer_mconfig)
737 {
738 	struct skl_module *module = mconfig->module;
739 	struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx];
740 	struct skl_module_fmt *fmt = &iface->outputs[0].fmt;
741 
742 	skl_set_base_module_format(ctx,	mconfig,
743 		(struct skl_base_cfg *)mixer_mconfig);
744 	mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
745 	mixer_mconfig->ch_map = fmt->ch_map;
746 }
747 
748 /*
749  * 'copier' is DSP internal module which copies data from Host DMA (HDA host
750  * dma) or link (hda link, SSP, PDM)
751  * Here we calculate the copier module parameters, like PCM format, output
752  * format, gateway settings
753  * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
754  */
755 static void skl_set_copier_format(struct skl_sst *ctx,
756 			struct skl_module_cfg *mconfig,
757 			struct skl_cpr_cfg *cpr_mconfig)
758 {
759 	struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
760 	struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
761 
762 	skl_set_base_module_format(ctx, mconfig, base_cfg);
763 
764 	skl_setup_out_format(ctx, mconfig, out_fmt);
765 	skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
766 }
767 
768 /*
769  * Algo module are DSP pre processing modules. Algo module take base module
770  * configuration and params
771  */
772 
773 static void skl_set_algo_format(struct skl_sst *ctx,
774 			struct skl_module_cfg *mconfig,
775 			struct skl_algo_cfg *algo_mcfg)
776 {
777 	struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg;
778 
779 	skl_set_base_module_format(ctx, mconfig, base_cfg);
780 
781 	if (mconfig->formats_config.caps_size == 0)
782 		return;
783 
784 	memcpy(algo_mcfg->params,
785 			mconfig->formats_config.caps,
786 			mconfig->formats_config.caps_size);
787 
788 }
789 
790 /*
791  * Mic select module allows selecting one or many input channels, thus
792  * acting as a demux.
793  *
794  * Mic select module take base module configuration and out-format
795  * configuration
796  */
797 static void skl_set_base_outfmt_format(struct skl_sst *ctx,
798 			struct skl_module_cfg *mconfig,
799 			struct skl_base_outfmt_cfg *base_outfmt_mcfg)
800 {
801 	struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
802 	struct skl_base_cfg *base_cfg =
803 				(struct skl_base_cfg *)base_outfmt_mcfg;
804 
805 	skl_set_base_module_format(ctx, mconfig, base_cfg);
806 	skl_setup_out_format(ctx, mconfig, out_fmt);
807 }
808 
809 static u16 skl_get_module_param_size(struct skl_sst *ctx,
810 			struct skl_module_cfg *mconfig)
811 {
812 	u16 param_size;
813 
814 	switch (mconfig->m_type) {
815 	case SKL_MODULE_TYPE_COPIER:
816 		param_size = sizeof(struct skl_cpr_cfg);
817 		param_size += mconfig->formats_config.caps_size;
818 		return param_size;
819 
820 	case SKL_MODULE_TYPE_SRCINT:
821 		return sizeof(struct skl_src_module_cfg);
822 
823 	case SKL_MODULE_TYPE_UPDWMIX:
824 		return sizeof(struct skl_up_down_mixer_cfg);
825 
826 	case SKL_MODULE_TYPE_ALGO:
827 		param_size = sizeof(struct skl_base_cfg);
828 		param_size += mconfig->formats_config.caps_size;
829 		return param_size;
830 
831 	case SKL_MODULE_TYPE_BASE_OUTFMT:
832 	case SKL_MODULE_TYPE_MIC_SELECT:
833 	case SKL_MODULE_TYPE_KPB:
834 		return sizeof(struct skl_base_outfmt_cfg);
835 
836 	default:
837 		/*
838 		 * return only base cfg when no specific module type is
839 		 * specified
840 		 */
841 		return sizeof(struct skl_base_cfg);
842 	}
843 
844 	return 0;
845 }
846 
847 /*
848  * DSP firmware supports various modules like copier, SRC, updown etc.
849  * These modules required various parameters to be calculated and sent for
850  * the module initialization to DSP. By default a generic module needs only
851  * base module format configuration
852  */
853 
854 static int skl_set_module_format(struct skl_sst *ctx,
855 			struct skl_module_cfg *module_config,
856 			u16 *module_config_size,
857 			void **param_data)
858 {
859 	u16 param_size;
860 
861 	param_size  = skl_get_module_param_size(ctx, module_config);
862 
863 	*param_data = kzalloc(param_size, GFP_KERNEL);
864 	if (NULL == *param_data)
865 		return -ENOMEM;
866 
867 	*module_config_size = param_size;
868 
869 	switch (module_config->m_type) {
870 	case SKL_MODULE_TYPE_COPIER:
871 		skl_set_copier_format(ctx, module_config, *param_data);
872 		break;
873 
874 	case SKL_MODULE_TYPE_SRCINT:
875 		skl_set_src_format(ctx, module_config, *param_data);
876 		break;
877 
878 	case SKL_MODULE_TYPE_UPDWMIX:
879 		skl_set_updown_mixer_format(ctx, module_config, *param_data);
880 		break;
881 
882 	case SKL_MODULE_TYPE_ALGO:
883 		skl_set_algo_format(ctx, module_config, *param_data);
884 		break;
885 
886 	case SKL_MODULE_TYPE_BASE_OUTFMT:
887 	case SKL_MODULE_TYPE_MIC_SELECT:
888 	case SKL_MODULE_TYPE_KPB:
889 		skl_set_base_outfmt_format(ctx, module_config, *param_data);
890 		break;
891 
892 	default:
893 		skl_set_base_module_format(ctx, module_config, *param_data);
894 		break;
895 
896 	}
897 
898 	dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
899 			module_config->id.module_id, param_size);
900 	print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
901 			*param_data, param_size, false);
902 	return 0;
903 }
904 
905 static int skl_get_queue_index(struct skl_module_pin *mpin,
906 				struct skl_module_inst_id id, int max)
907 {
908 	int i;
909 
910 	for (i = 0; i < max; i++)  {
911 		if (mpin[i].id.module_id == id.module_id &&
912 			mpin[i].id.instance_id == id.instance_id)
913 			return i;
914 	}
915 
916 	return -EINVAL;
917 }
918 
919 /*
920  * Allocates queue for each module.
921  * if dynamic, the pin_index is allocated 0 to max_pin.
922  * In static, the pin_index is fixed based on module_id and instance id
923  */
924 static int skl_alloc_queue(struct skl_module_pin *mpin,
925 			struct skl_module_cfg *tgt_cfg, int max)
926 {
927 	int i;
928 	struct skl_module_inst_id id = tgt_cfg->id;
929 	/*
930 	 * if pin in dynamic, find first free pin
931 	 * otherwise find match module and instance id pin as topology will
932 	 * ensure a unique pin is assigned to this so no need to
933 	 * allocate/free
934 	 */
935 	for (i = 0; i < max; i++)  {
936 		if (mpin[i].is_dynamic) {
937 			if (!mpin[i].in_use &&
938 				mpin[i].pin_state == SKL_PIN_UNBIND) {
939 
940 				mpin[i].in_use = true;
941 				mpin[i].id.module_id = id.module_id;
942 				mpin[i].id.instance_id = id.instance_id;
943 				mpin[i].id.pvt_id = id.pvt_id;
944 				mpin[i].tgt_mcfg = tgt_cfg;
945 				return i;
946 			}
947 		} else {
948 			if (mpin[i].id.module_id == id.module_id &&
949 				mpin[i].id.instance_id == id.instance_id &&
950 				mpin[i].pin_state == SKL_PIN_UNBIND) {
951 
952 				mpin[i].tgt_mcfg = tgt_cfg;
953 				return i;
954 			}
955 		}
956 	}
957 
958 	return -EINVAL;
959 }
960 
961 static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
962 {
963 	if (mpin[q_index].is_dynamic) {
964 		mpin[q_index].in_use = false;
965 		mpin[q_index].id.module_id = 0;
966 		mpin[q_index].id.instance_id = 0;
967 		mpin[q_index].id.pvt_id = 0;
968 	}
969 	mpin[q_index].pin_state = SKL_PIN_UNBIND;
970 	mpin[q_index].tgt_mcfg = NULL;
971 }
972 
973 /* Module state will be set to unint, if all the out pin state is UNBIND */
974 
975 static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
976 						struct skl_module_cfg *mcfg)
977 {
978 	int i;
979 	bool found = false;
980 
981 	for (i = 0; i < max; i++)  {
982 		if (mpin[i].pin_state == SKL_PIN_UNBIND)
983 			continue;
984 		found = true;
985 		break;
986 	}
987 
988 	if (!found)
989 		mcfg->m_state = SKL_MODULE_INIT_DONE;
990 	return;
991 }
992 
993 /*
994  * A module needs to be instanataited in DSP. A mdoule is present in a
995  * collection of module referred as a PIPE.
996  * We first calculate the module format, based on module type and then
997  * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
998  */
999 int skl_init_module(struct skl_sst *ctx,
1000 			struct skl_module_cfg *mconfig)
1001 {
1002 	u16 module_config_size = 0;
1003 	void *param_data = NULL;
1004 	int ret;
1005 	struct skl_ipc_init_instance_msg msg;
1006 
1007 	dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
1008 		 mconfig->id.module_id, mconfig->id.pvt_id);
1009 
1010 	if (mconfig->pipe->state != SKL_PIPE_CREATED) {
1011 		dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
1012 				 mconfig->pipe->state, mconfig->pipe->ppl_id);
1013 		return -EIO;
1014 	}
1015 
1016 	ret = skl_set_module_format(ctx, mconfig,
1017 			&module_config_size, &param_data);
1018 	if (ret < 0) {
1019 		dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
1020 		return ret;
1021 	}
1022 
1023 	msg.module_id = mconfig->id.module_id;
1024 	msg.instance_id = mconfig->id.pvt_id;
1025 	msg.ppl_instance_id = mconfig->pipe->ppl_id;
1026 	msg.param_data_size = module_config_size;
1027 	msg.core_id = mconfig->core_id;
1028 	msg.domain = mconfig->domain;
1029 
1030 	ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
1031 	if (ret < 0) {
1032 		dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
1033 		kfree(param_data);
1034 		return ret;
1035 	}
1036 	mconfig->m_state = SKL_MODULE_INIT_DONE;
1037 	kfree(param_data);
1038 	return ret;
1039 }
1040 
1041 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
1042 	*src_module, struct skl_module_cfg *dst_module)
1043 {
1044 	dev_dbg(ctx->dev, "%s: src module_id = %d  src_instance=%d\n",
1045 		__func__, src_module->id.module_id, src_module->id.pvt_id);
1046 	dev_dbg(ctx->dev, "%s: dst_module=%d dst_instance=%d\n", __func__,
1047 		 dst_module->id.module_id, dst_module->id.pvt_id);
1048 
1049 	dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
1050 		src_module->m_state, dst_module->m_state);
1051 }
1052 
1053 /*
1054  * On module freeup, we need to unbind the module with modules
1055  * it is already bind.
1056  * Find the pin allocated and unbind then using bind_unbind IPC
1057  */
1058 int skl_unbind_modules(struct skl_sst *ctx,
1059 			struct skl_module_cfg *src_mcfg,
1060 			struct skl_module_cfg *dst_mcfg)
1061 {
1062 	int ret;
1063 	struct skl_ipc_bind_unbind_msg msg;
1064 	struct skl_module_inst_id src_id = src_mcfg->id;
1065 	struct skl_module_inst_id dst_id = dst_mcfg->id;
1066 	int in_max = dst_mcfg->module->max_input_pins;
1067 	int out_max = src_mcfg->module->max_output_pins;
1068 	int src_index, dst_index, src_pin_state, dst_pin_state;
1069 
1070 	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
1071 
1072 	/* get src queue index */
1073 	src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
1074 	if (src_index < 0)
1075 		return 0;
1076 
1077 	msg.src_queue = src_index;
1078 
1079 	/* get dst queue index */
1080 	dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
1081 	if (dst_index < 0)
1082 		return 0;
1083 
1084 	msg.dst_queue = dst_index;
1085 
1086 	src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
1087 	dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
1088 
1089 	if (src_pin_state != SKL_PIN_BIND_DONE ||
1090 		dst_pin_state != SKL_PIN_BIND_DONE)
1091 		return 0;
1092 
1093 	msg.module_id = src_mcfg->id.module_id;
1094 	msg.instance_id = src_mcfg->id.pvt_id;
1095 	msg.dst_module_id = dst_mcfg->id.module_id;
1096 	msg.dst_instance_id = dst_mcfg->id.pvt_id;
1097 	msg.bind = false;
1098 
1099 	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
1100 	if (!ret) {
1101 		/* free queue only if unbind is success */
1102 		skl_free_queue(src_mcfg->m_out_pin, src_index);
1103 		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1104 
1105 		/*
1106 		 * check only if src module bind state, bind is
1107 		 * always from src -> sink
1108 		 */
1109 		skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 static void fill_pin_params(struct skl_audio_data_format *pin_fmt,
1116 				struct skl_module_fmt *format)
1117 {
1118 	pin_fmt->number_of_channels = format->channels;
1119 	pin_fmt->s_freq = format->s_freq;
1120 	pin_fmt->bit_depth = format->bit_depth;
1121 	pin_fmt->valid_bit_depth = format->valid_bit_depth;
1122 	pin_fmt->ch_cfg = format->ch_cfg;
1123 	pin_fmt->sample_type = format->sample_type;
1124 	pin_fmt->channel_map = format->ch_map;
1125 	pin_fmt->interleaving = format->interleaving_style;
1126 }
1127 
1128 #define CPR_SINK_FMT_PARAM_ID 2
1129 
1130 /*
1131  * Once a module is instantiated it need to be 'bind' with other modules in
1132  * the pipeline. For binding we need to find the module pins which are bind
1133  * together
1134  * This function finds the pins and then sends bund_unbind IPC message to
1135  * DSP using IPC helper
1136  */
1137 int skl_bind_modules(struct skl_sst *ctx,
1138 			struct skl_module_cfg *src_mcfg,
1139 			struct skl_module_cfg *dst_mcfg)
1140 {
1141 	int ret = 0;
1142 	struct skl_ipc_bind_unbind_msg msg;
1143 	int in_max = dst_mcfg->module->max_input_pins;
1144 	int out_max = src_mcfg->module->max_output_pins;
1145 	int src_index, dst_index;
1146 	struct skl_module_fmt *format;
1147 	struct skl_cpr_pin_fmt pin_fmt;
1148 	struct skl_module *module;
1149 	struct skl_module_iface *fmt;
1150 
1151 	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
1152 
1153 	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
1154 		dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
1155 		return 0;
1156 
1157 	src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
1158 	if (src_index < 0)
1159 		return -EINVAL;
1160 
1161 	msg.src_queue = src_index;
1162 	dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
1163 	if (dst_index < 0) {
1164 		skl_free_queue(src_mcfg->m_out_pin, src_index);
1165 		return -EINVAL;
1166 	}
1167 
1168 	/*
1169 	 * Copier module requires the separate large_config_set_ipc to
1170 	 * configure the pins other than 0
1171 	 */
1172 	if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) {
1173 		pin_fmt.sink_id = src_index;
1174 		module = src_mcfg->module;
1175 		fmt = &module->formats[src_mcfg->fmt_idx];
1176 
1177 		/* Input fmt is same as that of src module input cfg */
1178 		format = &fmt->inputs[0].fmt;
1179 		fill_pin_params(&(pin_fmt.src_fmt), format);
1180 
1181 		format = &fmt->outputs[src_index].fmt;
1182 		fill_pin_params(&(pin_fmt.dst_fmt), format);
1183 		ret = skl_set_module_params(ctx, (void *)&pin_fmt,
1184 					sizeof(struct skl_cpr_pin_fmt),
1185 					CPR_SINK_FMT_PARAM_ID, src_mcfg);
1186 
1187 		if (ret < 0)
1188 			goto out;
1189 	}
1190 
1191 	msg.dst_queue = dst_index;
1192 
1193 	dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
1194 			 msg.src_queue, msg.dst_queue);
1195 
1196 	msg.module_id = src_mcfg->id.module_id;
1197 	msg.instance_id = src_mcfg->id.pvt_id;
1198 	msg.dst_module_id = dst_mcfg->id.module_id;
1199 	msg.dst_instance_id = dst_mcfg->id.pvt_id;
1200 	msg.bind = true;
1201 
1202 	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
1203 
1204 	if (!ret) {
1205 		src_mcfg->m_state = SKL_MODULE_BIND_DONE;
1206 		src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
1207 		dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
1208 		return ret;
1209 	}
1210 out:
1211 	/* error case , if IPC fails, clear the queue index */
1212 	skl_free_queue(src_mcfg->m_out_pin, src_index);
1213 	skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1214 
1215 	return ret;
1216 }
1217 
1218 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
1219 	enum skl_ipc_pipeline_state state)
1220 {
1221 	dev_dbg(ctx->dev, "%s: pipe_state = %d\n", __func__, state);
1222 
1223 	return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
1224 }
1225 
1226 /*
1227  * A pipeline is a collection of modules. Before a module in instantiated a
1228  * pipeline needs to be created for it.
1229  * This function creates pipeline, by sending create pipeline IPC messages
1230  * to FW
1231  */
1232 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
1233 {
1234 	int ret;
1235 
1236 	dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
1237 
1238 	ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
1239 				pipe->pipe_priority, pipe->ppl_id,
1240 				pipe->lp_mode);
1241 	if (ret < 0) {
1242 		dev_err(ctx->dev, "Failed to create pipeline\n");
1243 		return ret;
1244 	}
1245 
1246 	pipe->state = SKL_PIPE_CREATED;
1247 
1248 	return 0;
1249 }
1250 
1251 /*
1252  * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
1253  * pause the pipeline first and then delete it
1254  * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
1255  * DMA engines and releases resources
1256  */
1257 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1258 {
1259 	int ret;
1260 
1261 	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1262 
1263 	/* If pipe is started, do stop the pipe in FW. */
1264 	if (pipe->state >= SKL_PIPE_STARTED) {
1265 		ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1266 		if (ret < 0) {
1267 			dev_err(ctx->dev, "Failed to stop pipeline\n");
1268 			return ret;
1269 		}
1270 
1271 		pipe->state = SKL_PIPE_PAUSED;
1272 	}
1273 
1274 	/* If pipe was not created in FW, do not try to delete it */
1275 	if (pipe->state < SKL_PIPE_CREATED)
1276 		return 0;
1277 
1278 	ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
1279 	if (ret < 0) {
1280 		dev_err(ctx->dev, "Failed to delete pipeline\n");
1281 		return ret;
1282 	}
1283 
1284 	pipe->state = SKL_PIPE_INVALID;
1285 
1286 	return ret;
1287 }
1288 
1289 /*
1290  * A pipeline is also a scheduling entity in DSP which can be run, stopped
1291  * For processing data the pipe need to be run by sending IPC set pipe state
1292  * to DSP
1293  */
1294 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1295 {
1296 	int ret;
1297 
1298 	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1299 
1300 	/* If pipe was not created in FW, do not try to pause or delete */
1301 	if (pipe->state < SKL_PIPE_CREATED)
1302 		return 0;
1303 
1304 	/* Pipe has to be paused before it is started */
1305 	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1306 	if (ret < 0) {
1307 		dev_err(ctx->dev, "Failed to pause pipe\n");
1308 		return ret;
1309 	}
1310 
1311 	pipe->state = SKL_PIPE_PAUSED;
1312 
1313 	ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
1314 	if (ret < 0) {
1315 		dev_err(ctx->dev, "Failed to start pipe\n");
1316 		return ret;
1317 	}
1318 
1319 	pipe->state = SKL_PIPE_STARTED;
1320 
1321 	return 0;
1322 }
1323 
1324 /*
1325  * Stop the pipeline by sending set pipe state IPC
1326  * DSP doesnt implement stop so we always send pause message
1327  */
1328 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1329 {
1330 	int ret;
1331 
1332 	dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
1333 
1334 	/* If pipe was not created in FW, do not try to pause or delete */
1335 	if (pipe->state < SKL_PIPE_PAUSED)
1336 		return 0;
1337 
1338 	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1339 	if (ret < 0) {
1340 		dev_dbg(ctx->dev, "Failed to stop pipe\n");
1341 		return ret;
1342 	}
1343 
1344 	pipe->state = SKL_PIPE_PAUSED;
1345 
1346 	return 0;
1347 }
1348 
1349 /*
1350  * Reset the pipeline by sending set pipe state IPC this will reset the DMA
1351  * from the DSP side
1352  */
1353 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1354 {
1355 	int ret;
1356 
1357 	/* If pipe was not created in FW, do not try to pause or delete */
1358 	if (pipe->state < SKL_PIPE_PAUSED)
1359 		return 0;
1360 
1361 	ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
1362 	if (ret < 0) {
1363 		dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
1364 		return ret;
1365 	}
1366 
1367 	pipe->state = SKL_PIPE_RESET;
1368 
1369 	return 0;
1370 }
1371 
1372 /* Algo parameter set helper function */
1373 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
1374 				u32 param_id, struct skl_module_cfg *mcfg)
1375 {
1376 	struct skl_ipc_large_config_msg msg;
1377 
1378 	msg.module_id = mcfg->id.module_id;
1379 	msg.instance_id = mcfg->id.pvt_id;
1380 	msg.param_data_size = size;
1381 	msg.large_param_id = param_id;
1382 
1383 	return skl_ipc_set_large_config(&ctx->ipc, &msg, params);
1384 }
1385 
1386 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
1387 			  u32 param_id, struct skl_module_cfg *mcfg)
1388 {
1389 	struct skl_ipc_large_config_msg msg;
1390 
1391 	msg.module_id = mcfg->id.module_id;
1392 	msg.instance_id = mcfg->id.pvt_id;
1393 	msg.param_data_size = size;
1394 	msg.large_param_id = param_id;
1395 
1396 	return skl_ipc_get_large_config(&ctx->ipc, &msg, params);
1397 }
1398