xref: /openbmc/linux/sound/soc/intel/skylake/skl-topology.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  *  skl-topology.c - Implements Platform component ALSA controls/widget
3  *  handlers.
4  *
5  *  Copyright (C) 2014-2015 Intel Corp
6  *  Author: Jeeja KP <jeeja.kp@intel.com>
7  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <sound/soc.h>
23 #include <sound/soc-topology.h>
24 #include <uapi/sound/snd_sst_tokens.h>
25 #include "skl-sst-dsp.h"
26 #include "skl-sst-ipc.h"
27 #include "skl-topology.h"
28 #include "skl.h"
29 #include "skl-tplg-interface.h"
30 #include "../common/sst-dsp.h"
31 #include "../common/sst-dsp-priv.h"
32 
33 #define SKL_CH_FIXUP_MASK		(1 << 0)
34 #define SKL_RATE_FIXUP_MASK		(1 << 1)
35 #define SKL_FMT_FIXUP_MASK		(1 << 2)
36 #define SKL_IN_DIR_BIT_MASK		BIT(0)
37 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
38 
39 static const int mic_mono_list[] = {
40 0, 1, 2, 3,
41 };
42 static const int mic_stereo_list[][SKL_CH_STEREO] = {
43 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
44 };
45 static const int mic_trio_list[][SKL_CH_TRIO] = {
46 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
47 };
48 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
49 {0, 1, 2, 3},
50 };
51 
52 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
53 	((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
54 
55 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
56 {
57 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
58 
59 	switch (caps) {
60 	case SKL_D0I3_NONE:
61 		d0i3->non_d0i3++;
62 		break;
63 
64 	case SKL_D0I3_STREAMING:
65 		d0i3->streaming++;
66 		break;
67 
68 	case SKL_D0I3_NON_STREAMING:
69 		d0i3->non_streaming++;
70 		break;
71 	}
72 }
73 
74 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
75 {
76 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
77 
78 	switch (caps) {
79 	case SKL_D0I3_NONE:
80 		d0i3->non_d0i3--;
81 		break;
82 
83 	case SKL_D0I3_STREAMING:
84 		d0i3->streaming--;
85 		break;
86 
87 	case SKL_D0I3_NON_STREAMING:
88 		d0i3->non_streaming--;
89 		break;
90 	}
91 }
92 
93 /*
94  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
95  * ignore. This helpers checks if the SKL driver handles this widget type
96  */
97 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
98 				  struct device *dev)
99 {
100 	if (w->dapm->dev != dev)
101 		return false;
102 
103 	switch (w->id) {
104 	case snd_soc_dapm_dai_link:
105 	case snd_soc_dapm_dai_in:
106 	case snd_soc_dapm_aif_in:
107 	case snd_soc_dapm_aif_out:
108 	case snd_soc_dapm_dai_out:
109 	case snd_soc_dapm_switch:
110 		return false;
111 	default:
112 		return true;
113 	}
114 }
115 
116 /*
117  * Each pipelines needs memory to be allocated. Check if we have free memory
118  * from available pool.
119  */
120 static bool skl_is_pipe_mem_avail(struct skl *skl,
121 				struct skl_module_cfg *mconfig)
122 {
123 	struct skl_sst *ctx = skl->skl_sst;
124 
125 	if (skl->resource.mem + mconfig->pipe->memory_pages >
126 				skl->resource.max_mem) {
127 		dev_err(ctx->dev,
128 				"%s: module_id %d instance %d\n", __func__,
129 				mconfig->id.module_id,
130 				mconfig->id.instance_id);
131 		dev_err(ctx->dev,
132 				"exceeds ppl memory available %d mem %d\n",
133 				skl->resource.max_mem, skl->resource.mem);
134 		return false;
135 	} else {
136 		return true;
137 	}
138 }
139 
140 /*
141  * Add the mem to the mem pool. This is freed when pipe is deleted.
142  * Note: DSP does actual memory management we only keep track for complete
143  * pool
144  */
145 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
146 				struct skl_module_cfg *mconfig)
147 {
148 	skl->resource.mem += mconfig->pipe->memory_pages;
149 }
150 
151 /*
152  * Pipeline needs needs DSP CPU resources for computation, this is
153  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
154  *
155  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
156  * pipe.
157  */
158 
159 static bool skl_is_pipe_mcps_avail(struct skl *skl,
160 				struct skl_module_cfg *mconfig)
161 {
162 	struct skl_sst *ctx = skl->skl_sst;
163 	u8 res_idx = mconfig->res_idx;
164 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
165 
166 	if (skl->resource.mcps + res->cps > skl->resource.max_mcps) {
167 		dev_err(ctx->dev,
168 			"%s: module_id %d instance %d\n", __func__,
169 			mconfig->id.module_id, mconfig->id.instance_id);
170 		dev_err(ctx->dev,
171 			"exceeds ppl mcps available %d > mem %d\n",
172 			skl->resource.max_mcps, skl->resource.mcps);
173 		return false;
174 	} else {
175 		return true;
176 	}
177 }
178 
179 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
180 				struct skl_module_cfg *mconfig)
181 {
182 	u8 res_idx = mconfig->res_idx;
183 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
184 
185 	skl->resource.mcps += res->cps;
186 }
187 
188 /*
189  * Free the mcps when tearing down
190  */
191 static void
192 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
193 {
194 	u8 res_idx = mconfig->res_idx;
195 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
196 
197 	skl->resource.mcps -= res->cps;
198 }
199 
200 /*
201  * Free the memory when tearing down
202  */
203 static void
204 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
205 {
206 	skl->resource.mem -= mconfig->pipe->memory_pages;
207 }
208 
209 
210 static void skl_dump_mconfig(struct skl_sst *ctx,
211 					struct skl_module_cfg *mcfg)
212 {
213 	struct skl_module_iface *iface = &mcfg->module->formats[0];
214 
215 	dev_dbg(ctx->dev, "Dumping config\n");
216 	dev_dbg(ctx->dev, "Input Format:\n");
217 	dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
218 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
219 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
220 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
221 				iface->inputs[0].fmt.valid_bit_depth);
222 	dev_dbg(ctx->dev, "Output Format:\n");
223 	dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
224 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
225 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
226 				iface->outputs[0].fmt.valid_bit_depth);
227 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
228 }
229 
230 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
231 {
232 	int slot_map = 0xFFFFFFFF;
233 	int start_slot = 0;
234 	int i;
235 
236 	for (i = 0; i < chs; i++) {
237 		/*
238 		 * For 2 channels with starting slot as 0, slot map will
239 		 * look like 0xFFFFFF10.
240 		 */
241 		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
242 		start_slot++;
243 	}
244 	fmt->ch_map = slot_map;
245 }
246 
247 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
248 			struct skl_pipe_params *params, int fixup)
249 {
250 	if (fixup & SKL_RATE_FIXUP_MASK)
251 		fmt->s_freq = params->s_freq;
252 	if (fixup & SKL_CH_FIXUP_MASK) {
253 		fmt->channels = params->ch;
254 		skl_tplg_update_chmap(fmt, fmt->channels);
255 	}
256 	if (fixup & SKL_FMT_FIXUP_MASK) {
257 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
258 
259 		/*
260 		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
261 		 * container so update bit depth accordingly
262 		 */
263 		switch (fmt->valid_bit_depth) {
264 		case SKL_DEPTH_16BIT:
265 			fmt->bit_depth = fmt->valid_bit_depth;
266 			break;
267 
268 		default:
269 			fmt->bit_depth = SKL_DEPTH_32BIT;
270 			break;
271 		}
272 	}
273 
274 }
275 
276 /*
277  * A pipeline may have modules which impact the pcm parameters, like SRC,
278  * channel converter, format converter.
279  * We need to calculate the output params by applying the 'fixup'
280  * Topology will tell driver which type of fixup is to be applied by
281  * supplying the fixup mask, so based on that we calculate the output
282  *
283  * Now In FE the pcm hw_params is source/target format. Same is applicable
284  * for BE with its hw_params invoked.
285  * here based on FE, BE pipeline and direction we calculate the input and
286  * outfix and then apply that for a module
287  */
288 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
289 		struct skl_pipe_params *params, bool is_fe)
290 {
291 	int in_fixup, out_fixup;
292 	struct skl_module_fmt *in_fmt, *out_fmt;
293 
294 	/* Fixups will be applied to pin 0 only */
295 	in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
296 	out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
297 
298 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
299 		if (is_fe) {
300 			in_fixup = m_cfg->params_fixup;
301 			out_fixup = (~m_cfg->converter) &
302 					m_cfg->params_fixup;
303 		} else {
304 			out_fixup = m_cfg->params_fixup;
305 			in_fixup = (~m_cfg->converter) &
306 					m_cfg->params_fixup;
307 		}
308 	} else {
309 		if (is_fe) {
310 			out_fixup = m_cfg->params_fixup;
311 			in_fixup = (~m_cfg->converter) &
312 					m_cfg->params_fixup;
313 		} else {
314 			in_fixup = m_cfg->params_fixup;
315 			out_fixup = (~m_cfg->converter) &
316 					m_cfg->params_fixup;
317 		}
318 	}
319 
320 	skl_tplg_update_params(in_fmt, params, in_fixup);
321 	skl_tplg_update_params(out_fmt, params, out_fixup);
322 }
323 
324 /*
325  * A module needs input and output buffers, which are dependent upon pcm
326  * params, so once we have calculate params, we need buffer calculation as
327  * well.
328  */
329 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
330 				struct skl_module_cfg *mcfg)
331 {
332 	int multiplier = 1;
333 	struct skl_module_fmt *in_fmt, *out_fmt;
334 	struct skl_module_res *res;
335 
336 	/* Since fixups is applied to pin 0 only, ibs, obs needs
337 	 * change for pin 0 only
338 	 */
339 	res = &mcfg->module->resources[0];
340 	in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
341 	out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
342 
343 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
344 		multiplier = 5;
345 
346 	res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
347 			in_fmt->channels * (in_fmt->bit_depth >> 3) *
348 			multiplier;
349 
350 	res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
351 			out_fmt->channels * (out_fmt->bit_depth >> 3) *
352 			multiplier;
353 }
354 
355 static u8 skl_tplg_be_dev_type(int dev_type)
356 {
357 	int ret;
358 
359 	switch (dev_type) {
360 	case SKL_DEVICE_BT:
361 		ret = NHLT_DEVICE_BT;
362 		break;
363 
364 	case SKL_DEVICE_DMIC:
365 		ret = NHLT_DEVICE_DMIC;
366 		break;
367 
368 	case SKL_DEVICE_I2S:
369 		ret = NHLT_DEVICE_I2S;
370 		break;
371 
372 	default:
373 		ret = NHLT_DEVICE_INVALID;
374 		break;
375 	}
376 
377 	return ret;
378 }
379 
380 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
381 						struct skl_sst *ctx)
382 {
383 	struct skl_module_cfg *m_cfg = w->priv;
384 	int link_type, dir;
385 	u32 ch, s_freq, s_fmt;
386 	struct nhlt_specific_cfg *cfg;
387 	struct skl *skl = get_skl_ctx(ctx->dev);
388 	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
389 	int fmt_idx = m_cfg->fmt_idx;
390 	struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
391 
392 	/* check if we already have blob */
393 	if (m_cfg->formats_config.caps_size > 0)
394 		return 0;
395 
396 	dev_dbg(ctx->dev, "Applying default cfg blob\n");
397 	switch (m_cfg->dev_type) {
398 	case SKL_DEVICE_DMIC:
399 		link_type = NHLT_LINK_DMIC;
400 		dir = SNDRV_PCM_STREAM_CAPTURE;
401 		s_freq = m_iface->inputs[0].fmt.s_freq;
402 		s_fmt = m_iface->inputs[0].fmt.bit_depth;
403 		ch = m_iface->inputs[0].fmt.channels;
404 		break;
405 
406 	case SKL_DEVICE_I2S:
407 		link_type = NHLT_LINK_SSP;
408 		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
409 			dir = SNDRV_PCM_STREAM_PLAYBACK;
410 			s_freq = m_iface->outputs[0].fmt.s_freq;
411 			s_fmt = m_iface->outputs[0].fmt.bit_depth;
412 			ch = m_iface->outputs[0].fmt.channels;
413 		} else {
414 			dir = SNDRV_PCM_STREAM_CAPTURE;
415 			s_freq = m_iface->inputs[0].fmt.s_freq;
416 			s_fmt = m_iface->inputs[0].fmt.bit_depth;
417 			ch = m_iface->inputs[0].fmt.channels;
418 		}
419 		break;
420 
421 	default:
422 		return -EINVAL;
423 	}
424 
425 	/* update the blob based on virtual bus_id and default params */
426 	cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
427 					s_fmt, ch, s_freq, dir, dev_type);
428 	if (cfg) {
429 		m_cfg->formats_config.caps_size = cfg->size;
430 		m_cfg->formats_config.caps = (u32 *) &cfg->caps;
431 	} else {
432 		dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
433 					m_cfg->vbus_id, link_type, dir);
434 		dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
435 					ch, s_freq, s_fmt);
436 		return -EIO;
437 	}
438 
439 	return 0;
440 }
441 
442 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
443 							struct skl_sst *ctx)
444 {
445 	struct skl_module_cfg *m_cfg = w->priv;
446 	struct skl_pipe_params *params = m_cfg->pipe->p_params;
447 	int p_conn_type = m_cfg->pipe->conn_type;
448 	bool is_fe;
449 
450 	if (!m_cfg->params_fixup)
451 		return;
452 
453 	dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
454 				w->name);
455 
456 	skl_dump_mconfig(ctx, m_cfg);
457 
458 	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
459 		is_fe = true;
460 	else
461 		is_fe = false;
462 
463 	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
464 	skl_tplg_update_buffer_size(ctx, m_cfg);
465 
466 	dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
467 				w->name);
468 
469 	skl_dump_mconfig(ctx, m_cfg);
470 }
471 
472 /*
473  * some modules can have multiple params set from user control and
474  * need to be set after module is initialized. If set_param flag is
475  * set module params will be done after module is initialised.
476  */
477 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
478 						struct skl_sst *ctx)
479 {
480 	int i, ret;
481 	struct skl_module_cfg *mconfig = w->priv;
482 	const struct snd_kcontrol_new *k;
483 	struct soc_bytes_ext *sb;
484 	struct skl_algo_data *bc;
485 	struct skl_specific_cfg *sp_cfg;
486 
487 	if (mconfig->formats_config.caps_size > 0 &&
488 		mconfig->formats_config.set_params == SKL_PARAM_SET) {
489 		sp_cfg = &mconfig->formats_config;
490 		ret = skl_set_module_params(ctx, sp_cfg->caps,
491 					sp_cfg->caps_size,
492 					sp_cfg->param_id, mconfig);
493 		if (ret < 0)
494 			return ret;
495 	}
496 
497 	for (i = 0; i < w->num_kcontrols; i++) {
498 		k = &w->kcontrol_news[i];
499 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
500 			sb = (void *) k->private_value;
501 			bc = (struct skl_algo_data *)sb->dobj.private;
502 
503 			if (bc->set_params == SKL_PARAM_SET) {
504 				ret = skl_set_module_params(ctx,
505 						(u32 *)bc->params, bc->size,
506 						bc->param_id, mconfig);
507 				if (ret < 0)
508 					return ret;
509 			}
510 		}
511 	}
512 
513 	return 0;
514 }
515 
516 /*
517  * some module param can set from user control and this is required as
518  * when module is initailzed. if module param is required in init it is
519  * identifed by set_param flag. if set_param flag is not set, then this
520  * parameter needs to set as part of module init.
521  */
522 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
523 {
524 	const struct snd_kcontrol_new *k;
525 	struct soc_bytes_ext *sb;
526 	struct skl_algo_data *bc;
527 	struct skl_module_cfg *mconfig = w->priv;
528 	int i;
529 
530 	for (i = 0; i < w->num_kcontrols; i++) {
531 		k = &w->kcontrol_news[i];
532 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
533 			sb = (struct soc_bytes_ext *)k->private_value;
534 			bc = (struct skl_algo_data *)sb->dobj.private;
535 
536 			if (bc->set_params != SKL_PARAM_INIT)
537 				continue;
538 
539 			mconfig->formats_config.caps = (u32 *)bc->params;
540 			mconfig->formats_config.caps_size = bc->size;
541 
542 			break;
543 		}
544 	}
545 
546 	return 0;
547 }
548 
549 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
550 		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
551 {
552 	switch (mcfg->dev_type) {
553 	case SKL_DEVICE_HDAHOST:
554 		return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
555 
556 	case SKL_DEVICE_HDALINK:
557 		return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
558 	}
559 
560 	return 0;
561 }
562 
563 /*
564  * Inside a pipe instance, we can have various modules. These modules need
565  * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
566  * skl_init_module() routine, so invoke that for all modules in a pipeline
567  */
568 static int
569 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
570 {
571 	struct skl_pipe_module *w_module;
572 	struct snd_soc_dapm_widget *w;
573 	struct skl_module_cfg *mconfig;
574 	struct skl_sst *ctx = skl->skl_sst;
575 	u8 cfg_idx;
576 	int ret = 0;
577 
578 	list_for_each_entry(w_module, &pipe->w_list, node) {
579 		uuid_le *uuid_mod;
580 		w = w_module->w;
581 		mconfig = w->priv;
582 
583 		/* check if module ids are populated */
584 		if (mconfig->id.module_id < 0) {
585 			dev_err(skl->skl_sst->dev,
586 					"module %pUL id not populated\n",
587 					(uuid_le *)mconfig->guid);
588 			return -EIO;
589 		}
590 
591 		cfg_idx = mconfig->pipe->cur_config_idx;
592 		mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
593 		mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
594 
595 		/* check resource available */
596 		if (!skl_is_pipe_mcps_avail(skl, mconfig))
597 			return -ENOMEM;
598 
599 		if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) {
600 			ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
601 				mconfig->id.module_id, mconfig->guid);
602 			if (ret < 0)
603 				return ret;
604 
605 			mconfig->m_state = SKL_MODULE_LOADED;
606 		}
607 
608 		/* prepare the DMA if the module is gateway cpr */
609 		ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
610 		if (ret < 0)
611 			return ret;
612 
613 		/* update blob if blob is null for be with default value */
614 		skl_tplg_update_be_blob(w, ctx);
615 
616 		/*
617 		 * apply fix/conversion to module params based on
618 		 * FE/BE params
619 		 */
620 		skl_tplg_update_module_params(w, ctx);
621 		uuid_mod = (uuid_le *)mconfig->guid;
622 		mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
623 						mconfig->id.instance_id);
624 		if (mconfig->id.pvt_id < 0)
625 			return ret;
626 		skl_tplg_set_module_init_data(w);
627 
628 		ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id);
629 		if (ret < 0) {
630 			dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n",
631 						mconfig->core_id, ret);
632 			return ret;
633 		}
634 
635 		ret = skl_init_module(ctx, mconfig);
636 		if (ret < 0) {
637 			skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
638 			goto err;
639 		}
640 		skl_tplg_alloc_pipe_mcps(skl, mconfig);
641 		ret = skl_tplg_set_module_params(w, ctx);
642 		if (ret < 0)
643 			goto err;
644 	}
645 
646 	return 0;
647 err:
648 	skl_dsp_put_core(ctx->dsp, mconfig->core_id);
649 	return ret;
650 }
651 
652 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
653 	 struct skl_pipe *pipe)
654 {
655 	int ret = 0;
656 	struct skl_pipe_module *w_module = NULL;
657 	struct skl_module_cfg *mconfig = NULL;
658 
659 	list_for_each_entry(w_module, &pipe->w_list, node) {
660 		uuid_le *uuid_mod;
661 		mconfig  = w_module->w->priv;
662 		uuid_mod = (uuid_le *)mconfig->guid;
663 
664 		if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod &&
665 			mconfig->m_state > SKL_MODULE_UNINIT) {
666 			ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
667 						mconfig->id.module_id);
668 			if (ret < 0)
669 				return -EIO;
670 		}
671 		skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
672 
673 		ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id);
674 		if (ret < 0) {
675 			/* don't return; continue with other modules */
676 			dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n",
677 				mconfig->core_id, ret);
678 		}
679 	}
680 
681 	/* no modules to unload in this path, so return */
682 	return ret;
683 }
684 
685 /*
686  * Here, we select pipe format based on the pipe type and pipe
687  * direction to determine the current config index for the pipeline.
688  * The config index is then used to select proper module resources.
689  * Intermediate pipes currently have a fixed format hence we select the
690  * 0th configuratation by default for such pipes.
691  */
692 static int
693 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig)
694 {
695 	struct skl_sst *ctx = skl->skl_sst;
696 	struct skl_pipe *pipe = mconfig->pipe;
697 	struct skl_pipe_params *params = pipe->p_params;
698 	struct skl_path_config *pconfig = &pipe->configs[0];
699 	struct skl_pipe_fmt *fmt = NULL;
700 	bool in_fmt = false;
701 	int i;
702 
703 	if (pipe->nr_cfgs == 0) {
704 		pipe->cur_config_idx = 0;
705 		return 0;
706 	}
707 
708 	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
709 		dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n");
710 		pipe->cur_config_idx = 0;
711 		pipe->memory_pages = pconfig->mem_pages;
712 
713 		return 0;
714 	}
715 
716 	if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
717 	     pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
718 	     (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
719 	     pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
720 		in_fmt = true;
721 
722 	for (i = 0; i < pipe->nr_cfgs; i++) {
723 		pconfig = &pipe->configs[i];
724 		if (in_fmt)
725 			fmt = &pconfig->in_fmt;
726 		else
727 			fmt = &pconfig->out_fmt;
728 
729 		if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
730 				    fmt->channels, fmt->freq, fmt->bps)) {
731 			pipe->cur_config_idx = i;
732 			pipe->memory_pages = pconfig->mem_pages;
733 			dev_dbg(ctx->dev, "Using pipe config: %d\n", i);
734 
735 			return 0;
736 		}
737 	}
738 
739 	dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
740 		params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
741 	return -EINVAL;
742 }
743 
744 /*
745  * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
746  * need create the pipeline. So we do following:
747  *   - check the resources
748  *   - Create the pipeline
749  *   - Initialize the modules in pipeline
750  *   - finally bind all modules together
751  */
752 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
753 							struct skl *skl)
754 {
755 	int ret;
756 	struct skl_module_cfg *mconfig = w->priv;
757 	struct skl_pipe_module *w_module;
758 	struct skl_pipe *s_pipe = mconfig->pipe;
759 	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
760 	struct skl_sst *ctx = skl->skl_sst;
761 	struct skl_module_deferred_bind *modules;
762 
763 	ret = skl_tplg_get_pipe_config(skl, mconfig);
764 	if (ret < 0)
765 		return ret;
766 
767 	/* check resource available */
768 	if (!skl_is_pipe_mcps_avail(skl, mconfig))
769 		return -EBUSY;
770 
771 	if (!skl_is_pipe_mem_avail(skl, mconfig))
772 		return -ENOMEM;
773 
774 	/*
775 	 * Create a list of modules for pipe.
776 	 * This list contains modules from source to sink
777 	 */
778 	ret = skl_create_pipeline(ctx, mconfig->pipe);
779 	if (ret < 0)
780 		return ret;
781 
782 	skl_tplg_alloc_pipe_mem(skl, mconfig);
783 	skl_tplg_alloc_pipe_mcps(skl, mconfig);
784 
785 	/* Init all pipe modules from source to sink */
786 	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
787 	if (ret < 0)
788 		return ret;
789 
790 	/* Bind modules from source to sink */
791 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
792 		dst_module = w_module->w->priv;
793 
794 		if (src_module == NULL) {
795 			src_module = dst_module;
796 			continue;
797 		}
798 
799 		ret = skl_bind_modules(ctx, src_module, dst_module);
800 		if (ret < 0)
801 			return ret;
802 
803 		src_module = dst_module;
804 	}
805 
806 	/*
807 	 * When the destination module is initialized, check for these modules
808 	 * in deferred bind list. If found, bind them.
809 	 */
810 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
811 		if (list_empty(&skl->bind_list))
812 			break;
813 
814 		list_for_each_entry(modules, &skl->bind_list, node) {
815 			module = w_module->w->priv;
816 			if (modules->dst == module)
817 				skl_bind_modules(ctx, modules->src,
818 							modules->dst);
819 		}
820 	}
821 
822 	return 0;
823 }
824 
825 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
826 				int size, struct skl_module_cfg *mcfg)
827 {
828 	int i, pvt_id;
829 
830 	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
831 		struct skl_kpb_params *kpb_params =
832 				(struct skl_kpb_params *)params;
833 		struct skl_mod_inst_map *inst = kpb_params->u.map;
834 
835 		for (i = 0; i < kpb_params->num_modules; i++) {
836 			pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
837 								inst->inst_id);
838 			if (pvt_id < 0)
839 				return -EINVAL;
840 
841 			inst->inst_id = pvt_id;
842 			inst++;
843 		}
844 	}
845 
846 	return 0;
847 }
848 /*
849  * Some modules require params to be set after the module is bound to
850  * all pins connected.
851  *
852  * The module provider initializes set_param flag for such modules and we
853  * send params after binding
854  */
855 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
856 			struct skl_module_cfg *mcfg, struct skl_sst *ctx)
857 {
858 	int i, ret;
859 	struct skl_module_cfg *mconfig = w->priv;
860 	const struct snd_kcontrol_new *k;
861 	struct soc_bytes_ext *sb;
862 	struct skl_algo_data *bc;
863 	struct skl_specific_cfg *sp_cfg;
864 	u32 *params;
865 
866 	/*
867 	 * check all out/in pins are in bind state.
868 	 * if so set the module param
869 	 */
870 	for (i = 0; i < mcfg->module->max_output_pins; i++) {
871 		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
872 			return 0;
873 	}
874 
875 	for (i = 0; i < mcfg->module->max_input_pins; i++) {
876 		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
877 			return 0;
878 	}
879 
880 	if (mconfig->formats_config.caps_size > 0 &&
881 		mconfig->formats_config.set_params == SKL_PARAM_BIND) {
882 		sp_cfg = &mconfig->formats_config;
883 		ret = skl_set_module_params(ctx, sp_cfg->caps,
884 					sp_cfg->caps_size,
885 					sp_cfg->param_id, mconfig);
886 		if (ret < 0)
887 			return ret;
888 	}
889 
890 	for (i = 0; i < w->num_kcontrols; i++) {
891 		k = &w->kcontrol_news[i];
892 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
893 			sb = (void *) k->private_value;
894 			bc = (struct skl_algo_data *)sb->dobj.private;
895 
896 			if (bc->set_params == SKL_PARAM_BIND) {
897 				params = kzalloc(bc->max, GFP_KERNEL);
898 				if (!params)
899 					return -ENOMEM;
900 
901 				memcpy(params, bc->params, bc->max);
902 				skl_fill_sink_instance_id(ctx, params, bc->max,
903 								mconfig);
904 
905 				ret = skl_set_module_params(ctx, params,
906 						bc->max, bc->param_id, mconfig);
907 				kfree(params);
908 
909 				if (ret < 0)
910 					return ret;
911 			}
912 		}
913 	}
914 
915 	return 0;
916 }
917 
918 static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid)
919 {
920 	struct uuid_module *module;
921 
922 	list_for_each_entry(module, &ctx->uuid_list, list) {
923 		if (uuid_le_cmp(*uuid, module->uuid) == 0)
924 			return module->id;
925 	}
926 
927 	return -EINVAL;
928 }
929 
930 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl,
931 					const struct snd_kcontrol_new *k)
932 {
933 	struct soc_bytes_ext *sb = (void *) k->private_value;
934 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
935 	struct skl_kpb_params *uuid_params, *params;
936 	struct hdac_bus *bus = ebus_to_hbus(skl_to_ebus(skl));
937 	int i, size, module_id;
938 
939 	if (bc->set_params == SKL_PARAM_BIND && bc->max) {
940 		uuid_params = (struct skl_kpb_params *)bc->params;
941 		size = uuid_params->num_modules *
942 			sizeof(struct skl_mod_inst_map) +
943 			sizeof(uuid_params->num_modules);
944 
945 		params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
946 		if (!params)
947 			return -ENOMEM;
948 
949 		params->num_modules = uuid_params->num_modules;
950 
951 		for (i = 0; i < uuid_params->num_modules; i++) {
952 			module_id = skl_get_module_id(skl->skl_sst,
953 				&uuid_params->u.map_uuid[i].mod_uuid);
954 			if (module_id < 0) {
955 				devm_kfree(bus->dev, params);
956 				return -EINVAL;
957 			}
958 
959 			params->u.map[i].mod_id = module_id;
960 			params->u.map[i].inst_id =
961 				uuid_params->u.map_uuid[i].inst_id;
962 		}
963 
964 		devm_kfree(bus->dev, bc->params);
965 		bc->params = (char *)params;
966 		bc->max = size;
967 	}
968 
969 	return 0;
970 }
971 
972 /*
973  * Retrieve the module id from UUID mentioned in the
974  * post bind params
975  */
976 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl,
977 				struct snd_soc_dapm_widget *w)
978 {
979 	struct skl_module_cfg *mconfig = w->priv;
980 	int i;
981 
982 	/*
983 	 * Post bind params are used for only for KPB
984 	 * to set copier instances to drain the data
985 	 * in fast mode
986 	 */
987 	if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
988 		return;
989 
990 	for (i = 0; i < w->num_kcontrols; i++)
991 		if ((w->kcontrol_news[i].access &
992 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
993 			(skl_tplg_find_moduleid_from_uuid(skl,
994 			&w->kcontrol_news[i]) < 0))
995 			dev_err(skl->skl_sst->dev,
996 				"%s: invalid kpb post bind params\n",
997 				__func__);
998 }
999 
1000 static int skl_tplg_module_add_deferred_bind(struct skl *skl,
1001 	struct skl_module_cfg *src, struct skl_module_cfg *dst)
1002 {
1003 	struct skl_module_deferred_bind *m_list, *modules;
1004 	int i;
1005 
1006 	/* only supported for module with static pin connection */
1007 	for (i = 0; i < dst->module->max_input_pins; i++) {
1008 		struct skl_module_pin *pin = &dst->m_in_pin[i];
1009 
1010 		if (pin->is_dynamic)
1011 			continue;
1012 
1013 		if ((pin->id.module_id  == src->id.module_id) &&
1014 			(pin->id.instance_id  == src->id.instance_id)) {
1015 
1016 			if (!list_empty(&skl->bind_list)) {
1017 				list_for_each_entry(modules, &skl->bind_list, node) {
1018 					if (modules->src == src && modules->dst == dst)
1019 						return 0;
1020 				}
1021 			}
1022 
1023 			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
1024 			if (!m_list)
1025 				return -ENOMEM;
1026 
1027 			m_list->src = src;
1028 			m_list->dst = dst;
1029 
1030 			list_add(&m_list->node, &skl->bind_list);
1031 		}
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
1038 				struct skl *skl,
1039 				struct snd_soc_dapm_widget *src_w,
1040 				struct skl_module_cfg *src_mconfig)
1041 {
1042 	struct snd_soc_dapm_path *p;
1043 	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
1044 	struct skl_module_cfg *sink_mconfig;
1045 	struct skl_sst *ctx = skl->skl_sst;
1046 	int ret;
1047 
1048 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1049 		if (!p->connect)
1050 			continue;
1051 
1052 		dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
1053 		dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
1054 
1055 		next_sink = p->sink;
1056 
1057 		if (!is_skl_dsp_widget_type(p->sink, ctx->dev))
1058 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
1059 
1060 		/*
1061 		 * here we will check widgets in sink pipelines, so that
1062 		 * can be any widgets type and we are only interested if
1063 		 * they are ones used for SKL so check that first
1064 		 */
1065 		if ((p->sink->priv != NULL) &&
1066 				is_skl_dsp_widget_type(p->sink, ctx->dev)) {
1067 
1068 			sink = p->sink;
1069 			sink_mconfig = sink->priv;
1070 
1071 			/*
1072 			 * Modules other than PGA leaf can be connected
1073 			 * directly or via switch to a module in another
1074 			 * pipeline. EX: reference path
1075 			 * when the path is enabled, the dst module that needs
1076 			 * to be bound may not be initialized. if the module is
1077 			 * not initialized, add these modules in the deferred
1078 			 * bind list and when the dst module is initialised,
1079 			 * bind this module to the dst_module in deferred list.
1080 			 */
1081 			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1082 				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1083 
1084 				ret = skl_tplg_module_add_deferred_bind(skl,
1085 						src_mconfig, sink_mconfig);
1086 
1087 				if (ret < 0)
1088 					return ret;
1089 
1090 			}
1091 
1092 
1093 			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1094 				sink_mconfig->m_state == SKL_MODULE_UNINIT)
1095 				continue;
1096 
1097 			/* Bind source to sink, mixin is always source */
1098 			ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1099 			if (ret)
1100 				return ret;
1101 
1102 			/* set module params after bind */
1103 			skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
1104 			skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1105 
1106 			/* Start sinks pipe first */
1107 			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1108 				if (sink_mconfig->pipe->conn_type !=
1109 							SKL_PIPE_CONN_TYPE_FE)
1110 					ret = skl_run_pipe(ctx,
1111 							sink_mconfig->pipe);
1112 				if (ret)
1113 					return ret;
1114 			}
1115 		}
1116 	}
1117 
1118 	if (!sink && next_sink)
1119 		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1120 
1121 	return 0;
1122 }
1123 
1124 /*
1125  * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1126  * we need to do following:
1127  *   - Bind to sink pipeline
1128  *      Since the sink pipes can be running and we don't get mixer event on
1129  *      connect for already running mixer, we need to find the sink pipes
1130  *      here and bind to them. This way dynamic connect works.
1131  *   - Start sink pipeline, if not running
1132  *   - Then run current pipe
1133  */
1134 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1135 								struct skl *skl)
1136 {
1137 	struct skl_module_cfg *src_mconfig;
1138 	struct skl_sst *ctx = skl->skl_sst;
1139 	int ret = 0;
1140 
1141 	src_mconfig = w->priv;
1142 
1143 	/*
1144 	 * find which sink it is connected to, bind with the sink,
1145 	 * if sink is not started, start sink pipe first, then start
1146 	 * this pipe
1147 	 */
1148 	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1149 	if (ret)
1150 		return ret;
1151 
1152 	/* Start source pipe last after starting all sinks */
1153 	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1154 		return skl_run_pipe(ctx, src_mconfig->pipe);
1155 
1156 	return 0;
1157 }
1158 
1159 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1160 		struct snd_soc_dapm_widget *w, struct skl *skl)
1161 {
1162 	struct snd_soc_dapm_path *p;
1163 	struct snd_soc_dapm_widget *src_w = NULL;
1164 	struct skl_sst *ctx = skl->skl_sst;
1165 
1166 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1167 		src_w = p->source;
1168 		if (!p->connect)
1169 			continue;
1170 
1171 		dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
1172 		dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
1173 
1174 		/*
1175 		 * here we will check widgets in sink pipelines, so that can
1176 		 * be any widgets type and we are only interested if they are
1177 		 * ones used for SKL so check that first
1178 		 */
1179 		if ((p->source->priv != NULL) &&
1180 				is_skl_dsp_widget_type(p->source, ctx->dev)) {
1181 			return p->source;
1182 		}
1183 	}
1184 
1185 	if (src_w != NULL)
1186 		return skl_get_src_dsp_widget(src_w, skl);
1187 
1188 	return NULL;
1189 }
1190 
1191 /*
1192  * in the Post-PMU event of mixer we need to do following:
1193  *   - Check if this pipe is running
1194  *   - if not, then
1195  *	- bind this pipeline to its source pipeline
1196  *	  if source pipe is already running, this means it is a dynamic
1197  *	  connection and we need to bind only to that pipe
1198  *	- start this pipeline
1199  */
1200 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1201 							struct skl *skl)
1202 {
1203 	int ret = 0;
1204 	struct snd_soc_dapm_widget *source, *sink;
1205 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1206 	struct skl_sst *ctx = skl->skl_sst;
1207 	int src_pipe_started = 0;
1208 
1209 	sink = w;
1210 	sink_mconfig = sink->priv;
1211 
1212 	/*
1213 	 * If source pipe is already started, that means source is driving
1214 	 * one more sink before this sink got connected, Since source is
1215 	 * started, bind this sink to source and start this pipe.
1216 	 */
1217 	source = skl_get_src_dsp_widget(w, skl);
1218 	if (source != NULL) {
1219 		src_mconfig = source->priv;
1220 		sink_mconfig = sink->priv;
1221 		src_pipe_started = 1;
1222 
1223 		/*
1224 		 * check pipe state, then no need to bind or start the
1225 		 * pipe
1226 		 */
1227 		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1228 			src_pipe_started = 0;
1229 	}
1230 
1231 	if (src_pipe_started) {
1232 		ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1233 		if (ret)
1234 			return ret;
1235 
1236 		/* set module params after bind */
1237 		skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
1238 		skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1239 
1240 		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1241 			ret = skl_run_pipe(ctx, sink_mconfig->pipe);
1242 	}
1243 
1244 	return ret;
1245 }
1246 
1247 /*
1248  * in the Pre-PMD event of mixer we need to do following:
1249  *   - Stop the pipe
1250  *   - find the source connections and remove that from dapm_path_list
1251  *   - unbind with source pipelines if still connected
1252  */
1253 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1254 							struct skl *skl)
1255 {
1256 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1257 	int ret = 0, i;
1258 	struct skl_sst *ctx = skl->skl_sst;
1259 
1260 	sink_mconfig = w->priv;
1261 
1262 	/* Stop the pipe */
1263 	ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
1264 	if (ret)
1265 		return ret;
1266 
1267 	for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1268 		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1269 			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1270 			if (!src_mconfig)
1271 				continue;
1272 
1273 			ret = skl_unbind_modules(ctx,
1274 						src_mconfig, sink_mconfig);
1275 		}
1276 	}
1277 
1278 	return ret;
1279 }
1280 
1281 /*
1282  * in the Post-PMD event of mixer we need to do following:
1283  *   - Free the mcps used
1284  *   - Free the mem used
1285  *   - Unbind the modules within the pipeline
1286  *   - Delete the pipeline (modules are not required to be explicitly
1287  *     deleted, pipeline delete is enough here
1288  */
1289 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1290 							struct skl *skl)
1291 {
1292 	struct skl_module_cfg *mconfig = w->priv;
1293 	struct skl_pipe_module *w_module;
1294 	struct skl_module_cfg *src_module = NULL, *dst_module;
1295 	struct skl_sst *ctx = skl->skl_sst;
1296 	struct skl_pipe *s_pipe = mconfig->pipe;
1297 	struct skl_module_deferred_bind *modules, *tmp;
1298 
1299 	if (s_pipe->state == SKL_PIPE_INVALID)
1300 		return -EINVAL;
1301 
1302 	skl_tplg_free_pipe_mcps(skl, mconfig);
1303 	skl_tplg_free_pipe_mem(skl, mconfig);
1304 
1305 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1306 		if (list_empty(&skl->bind_list))
1307 			break;
1308 
1309 		src_module = w_module->w->priv;
1310 
1311 		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1312 			/*
1313 			 * When the destination module is deleted, Unbind the
1314 			 * modules from deferred bind list.
1315 			 */
1316 			if (modules->dst == src_module) {
1317 				skl_unbind_modules(ctx, modules->src,
1318 						modules->dst);
1319 			}
1320 
1321 			/*
1322 			 * When the source module is deleted, remove this entry
1323 			 * from the deferred bind list.
1324 			 */
1325 			if (modules->src == src_module) {
1326 				list_del(&modules->node);
1327 				modules->src = NULL;
1328 				modules->dst = NULL;
1329 				kfree(modules);
1330 			}
1331 		}
1332 	}
1333 
1334 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1335 		dst_module = w_module->w->priv;
1336 
1337 		if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1338 			skl_tplg_free_pipe_mcps(skl, dst_module);
1339 		if (src_module == NULL) {
1340 			src_module = dst_module;
1341 			continue;
1342 		}
1343 
1344 		skl_unbind_modules(ctx, src_module, dst_module);
1345 		src_module = dst_module;
1346 	}
1347 
1348 	skl_delete_pipe(ctx, mconfig->pipe);
1349 
1350 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1351 		src_module = w_module->w->priv;
1352 		src_module->m_state = SKL_MODULE_UNINIT;
1353 	}
1354 
1355 	return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1356 }
1357 
1358 /*
1359  * in the Post-PMD event of PGA we need to do following:
1360  *   - Free the mcps used
1361  *   - Stop the pipeline
1362  *   - In source pipe is connected, unbind with source pipelines
1363  */
1364 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1365 								struct skl *skl)
1366 {
1367 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1368 	int ret = 0, i;
1369 	struct skl_sst *ctx = skl->skl_sst;
1370 
1371 	src_mconfig = w->priv;
1372 
1373 	/* Stop the pipe since this is a mixin module */
1374 	ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1375 	if (ret)
1376 		return ret;
1377 
1378 	for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1379 		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1380 			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1381 			if (!sink_mconfig)
1382 				continue;
1383 			/*
1384 			 * This is a connecter and if path is found that means
1385 			 * unbind between source and sink has not happened yet
1386 			 */
1387 			ret = skl_unbind_modules(ctx, src_mconfig,
1388 							sink_mconfig);
1389 		}
1390 	}
1391 
1392 	return ret;
1393 }
1394 
1395 /*
1396  * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1397  * second one is required that is created as another pipe entity.
1398  * The mixer is responsible for pipe management and represent a pipeline
1399  * instance
1400  */
1401 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1402 				struct snd_kcontrol *k, int event)
1403 {
1404 	struct snd_soc_dapm_context *dapm = w->dapm;
1405 	struct skl *skl = get_skl_ctx(dapm->dev);
1406 
1407 	switch (event) {
1408 	case SND_SOC_DAPM_PRE_PMU:
1409 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1410 
1411 	case SND_SOC_DAPM_POST_PMU:
1412 		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1413 
1414 	case SND_SOC_DAPM_PRE_PMD:
1415 		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1416 
1417 	case SND_SOC_DAPM_POST_PMD:
1418 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1419 	}
1420 
1421 	return 0;
1422 }
1423 
1424 /*
1425  * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1426  * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1427  * the sink when it is running (two FE to one BE or one FE to two BE)
1428  * scenarios
1429  */
1430 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1431 			struct snd_kcontrol *k, int event)
1432 
1433 {
1434 	struct snd_soc_dapm_context *dapm = w->dapm;
1435 	struct skl *skl = get_skl_ctx(dapm->dev);
1436 
1437 	switch (event) {
1438 	case SND_SOC_DAPM_PRE_PMU:
1439 		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1440 
1441 	case SND_SOC_DAPM_POST_PMD:
1442 		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1449 			unsigned int __user *data, unsigned int size)
1450 {
1451 	struct soc_bytes_ext *sb =
1452 			(struct soc_bytes_ext *)kcontrol->private_value;
1453 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1454 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1455 	struct skl_module_cfg *mconfig = w->priv;
1456 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1457 
1458 	if (w->power)
1459 		skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1460 				      bc->size, bc->param_id, mconfig);
1461 
1462 	/* decrement size for TLV header */
1463 	size -= 2 * sizeof(u32);
1464 
1465 	/* check size as we don't want to send kernel data */
1466 	if (size > bc->max)
1467 		size = bc->max;
1468 
1469 	if (bc->params) {
1470 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1471 			return -EFAULT;
1472 		if (copy_to_user(data + 1, &size, sizeof(u32)))
1473 			return -EFAULT;
1474 		if (copy_to_user(data + 2, bc->params, size))
1475 			return -EFAULT;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 #define SKL_PARAM_VENDOR_ID 0xff
1482 
1483 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1484 			const unsigned int __user *data, unsigned int size)
1485 {
1486 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1487 	struct skl_module_cfg *mconfig = w->priv;
1488 	struct soc_bytes_ext *sb =
1489 			(struct soc_bytes_ext *)kcontrol->private_value;
1490 	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1491 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1492 
1493 	if (ac->params) {
1494 		if (size > ac->max)
1495 			return -EINVAL;
1496 
1497 		ac->size = size;
1498 		/*
1499 		 * if the param_is is of type Vendor, firmware expects actual
1500 		 * parameter id and size from the control.
1501 		 */
1502 		if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1503 			if (copy_from_user(ac->params, data, size))
1504 				return -EFAULT;
1505 		} else {
1506 			if (copy_from_user(ac->params,
1507 					   data + 2, size))
1508 				return -EFAULT;
1509 		}
1510 
1511 		if (w->power)
1512 			return skl_set_module_params(skl->skl_sst,
1513 						(u32 *)ac->params, ac->size,
1514 						ac->param_id, mconfig);
1515 	}
1516 
1517 	return 0;
1518 }
1519 
1520 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1521 		struct snd_ctl_elem_value *ucontrol)
1522 {
1523 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1524 	struct skl_module_cfg *mconfig = w->priv;
1525 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1526 	u32 ch_type = *((u32 *)ec->dobj.private);
1527 
1528 	if (mconfig->dmic_ch_type == ch_type)
1529 		ucontrol->value.enumerated.item[0] =
1530 					mconfig->dmic_ch_combo_index;
1531 	else
1532 		ucontrol->value.enumerated.item[0] = 0;
1533 
1534 	return 0;
1535 }
1536 
1537 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1538 	struct skl_mic_sel_config *mic_cfg, struct device *dev)
1539 {
1540 	struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
1541 
1542 	sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1543 	sp_cfg->set_params = SKL_PARAM_SET;
1544 	sp_cfg->param_id = 0x00;
1545 	if (!sp_cfg->caps) {
1546 		sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1547 		if (!sp_cfg->caps)
1548 			return -ENOMEM;
1549 	}
1550 
1551 	mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1552 	mic_cfg->flags = 0;
1553 	memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1554 
1555 	return 0;
1556 }
1557 
1558 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1559 			struct snd_ctl_elem_value *ucontrol)
1560 {
1561 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1562 	struct skl_module_cfg *mconfig = w->priv;
1563 	struct skl_mic_sel_config mic_cfg = {0};
1564 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1565 	u32 ch_type = *((u32 *)ec->dobj.private);
1566 	const int *list;
1567 	u8 in_ch, out_ch, index;
1568 
1569 	mconfig->dmic_ch_type = ch_type;
1570 	mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1571 
1572 	/* enum control index 0 is INVALID, so no channels to be set */
1573 	if (mconfig->dmic_ch_combo_index == 0)
1574 		return 0;
1575 
1576 	/* No valid channel selection map for index 0, so offset by 1 */
1577 	index = mconfig->dmic_ch_combo_index - 1;
1578 
1579 	switch (ch_type) {
1580 	case SKL_CH_MONO:
1581 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1582 			return -EINVAL;
1583 
1584 		list = &mic_mono_list[index];
1585 		break;
1586 
1587 	case SKL_CH_STEREO:
1588 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1589 			return -EINVAL;
1590 
1591 		list = mic_stereo_list[index];
1592 		break;
1593 
1594 	case SKL_CH_TRIO:
1595 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1596 			return -EINVAL;
1597 
1598 		list = mic_trio_list[index];
1599 		break;
1600 
1601 	case SKL_CH_QUATRO:
1602 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1603 			return -EINVAL;
1604 
1605 		list = mic_quatro_list[index];
1606 		break;
1607 
1608 	default:
1609 		dev_err(w->dapm->dev,
1610 				"Invalid channel %d for mic_select module\n",
1611 				ch_type);
1612 		return -EINVAL;
1613 
1614 	}
1615 
1616 	/* channel type enum map to number of chanels for that type */
1617 	for (out_ch = 0; out_ch < ch_type; out_ch++) {
1618 		in_ch = list[out_ch];
1619 		mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1620 	}
1621 
1622 	return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1623 }
1624 
1625 /*
1626  * Fill the dma id for host and link. In case of passthrough
1627  * pipeline, this will both host and link in the same
1628  * pipeline, so need to copy the link and host based on dev_type
1629  */
1630 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1631 				struct skl_pipe_params *params)
1632 {
1633 	struct skl_pipe *pipe = mcfg->pipe;
1634 
1635 	if (pipe->passthru) {
1636 		switch (mcfg->dev_type) {
1637 		case SKL_DEVICE_HDALINK:
1638 			pipe->p_params->link_dma_id = params->link_dma_id;
1639 			pipe->p_params->link_index = params->link_index;
1640 			pipe->p_params->link_bps = params->link_bps;
1641 			break;
1642 
1643 		case SKL_DEVICE_HDAHOST:
1644 			pipe->p_params->host_dma_id = params->host_dma_id;
1645 			pipe->p_params->host_bps = params->host_bps;
1646 			break;
1647 
1648 		default:
1649 			break;
1650 		}
1651 		pipe->p_params->s_fmt = params->s_fmt;
1652 		pipe->p_params->ch = params->ch;
1653 		pipe->p_params->s_freq = params->s_freq;
1654 		pipe->p_params->stream = params->stream;
1655 		pipe->p_params->format = params->format;
1656 
1657 	} else {
1658 		memcpy(pipe->p_params, params, sizeof(*params));
1659 	}
1660 }
1661 
1662 /*
1663  * The FE params are passed by hw_params of the DAI.
1664  * On hw_params, the params are stored in Gateway module of the FE and we
1665  * need to calculate the format in DSP module configuration, that
1666  * conversion is done here
1667  */
1668 int skl_tplg_update_pipe_params(struct device *dev,
1669 			struct skl_module_cfg *mconfig,
1670 			struct skl_pipe_params *params)
1671 {
1672 	struct skl_module_res *res = &mconfig->module->resources[0];
1673 	struct skl *skl = get_skl_ctx(dev);
1674 	struct skl_module_fmt *format = NULL;
1675 	u8 cfg_idx = mconfig->pipe->cur_config_idx;
1676 
1677 	skl_tplg_fill_dma_id(mconfig, params);
1678 	mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1679 	mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1680 
1681 	if (skl->nr_modules)
1682 		return 0;
1683 
1684 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1685 		format = &mconfig->module->formats[0].inputs[0].fmt;
1686 	else
1687 		format = &mconfig->module->formats[0].outputs[0].fmt;
1688 
1689 	/* set the hw_params */
1690 	format->s_freq = params->s_freq;
1691 	format->channels = params->ch;
1692 	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1693 
1694 	/*
1695 	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1696 	 * container so update bit depth accordingly
1697 	 */
1698 	switch (format->valid_bit_depth) {
1699 	case SKL_DEPTH_16BIT:
1700 		format->bit_depth = format->valid_bit_depth;
1701 		break;
1702 
1703 	case SKL_DEPTH_24BIT:
1704 	case SKL_DEPTH_32BIT:
1705 		format->bit_depth = SKL_DEPTH_32BIT;
1706 		break;
1707 
1708 	default:
1709 		dev_err(dev, "Invalid bit depth %x for pipe\n",
1710 				format->valid_bit_depth);
1711 		return -EINVAL;
1712 	}
1713 
1714 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1715 		res->ibs = (format->s_freq / 1000) *
1716 				(format->channels) *
1717 				(format->bit_depth >> 3);
1718 	} else {
1719 		res->obs = (format->s_freq / 1000) *
1720 				(format->channels) *
1721 				(format->bit_depth >> 3);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 /*
1728  * Query the module config for the FE DAI
1729  * This is used to find the hw_params set for that DAI and apply to FE
1730  * pipeline
1731  */
1732 struct skl_module_cfg *
1733 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1734 {
1735 	struct snd_soc_dapm_widget *w;
1736 	struct snd_soc_dapm_path *p = NULL;
1737 
1738 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1739 		w = dai->playback_widget;
1740 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1741 			if (p->connect && p->sink->power &&
1742 				!is_skl_dsp_widget_type(p->sink, dai->dev))
1743 				continue;
1744 
1745 			if (p->sink->priv) {
1746 				dev_dbg(dai->dev, "set params for %s\n",
1747 						p->sink->name);
1748 				return p->sink->priv;
1749 			}
1750 		}
1751 	} else {
1752 		w = dai->capture_widget;
1753 		snd_soc_dapm_widget_for_each_source_path(w, p) {
1754 			if (p->connect && p->source->power &&
1755 				!is_skl_dsp_widget_type(p->source, dai->dev))
1756 				continue;
1757 
1758 			if (p->source->priv) {
1759 				dev_dbg(dai->dev, "set params for %s\n",
1760 						p->source->name);
1761 				return p->source->priv;
1762 			}
1763 		}
1764 	}
1765 
1766 	return NULL;
1767 }
1768 
1769 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1770 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1771 {
1772 	struct snd_soc_dapm_path *p;
1773 	struct skl_module_cfg *mconfig = NULL;
1774 
1775 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1776 		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1777 			if (p->connect &&
1778 				    (p->sink->id == snd_soc_dapm_aif_out) &&
1779 				    p->source->priv) {
1780 				mconfig = p->source->priv;
1781 				return mconfig;
1782 			}
1783 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1784 			if (mconfig)
1785 				return mconfig;
1786 		}
1787 	}
1788 	return mconfig;
1789 }
1790 
1791 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1792 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1793 {
1794 	struct snd_soc_dapm_path *p;
1795 	struct skl_module_cfg *mconfig = NULL;
1796 
1797 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1798 		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1799 			if (p->connect &&
1800 				    (p->source->id == snd_soc_dapm_aif_in) &&
1801 				    p->sink->priv) {
1802 				mconfig = p->sink->priv;
1803 				return mconfig;
1804 			}
1805 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1806 			if (mconfig)
1807 				return mconfig;
1808 		}
1809 	}
1810 	return mconfig;
1811 }
1812 
1813 struct skl_module_cfg *
1814 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1815 {
1816 	struct snd_soc_dapm_widget *w;
1817 	struct skl_module_cfg *mconfig;
1818 
1819 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1820 		w = dai->playback_widget;
1821 		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1822 	} else {
1823 		w = dai->capture_widget;
1824 		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1825 	}
1826 	return mconfig;
1827 }
1828 
1829 static u8 skl_tplg_be_link_type(int dev_type)
1830 {
1831 	int ret;
1832 
1833 	switch (dev_type) {
1834 	case SKL_DEVICE_BT:
1835 		ret = NHLT_LINK_SSP;
1836 		break;
1837 
1838 	case SKL_DEVICE_DMIC:
1839 		ret = NHLT_LINK_DMIC;
1840 		break;
1841 
1842 	case SKL_DEVICE_I2S:
1843 		ret = NHLT_LINK_SSP;
1844 		break;
1845 
1846 	case SKL_DEVICE_HDALINK:
1847 		ret = NHLT_LINK_HDA;
1848 		break;
1849 
1850 	default:
1851 		ret = NHLT_LINK_INVALID;
1852 		break;
1853 	}
1854 
1855 	return ret;
1856 }
1857 
1858 /*
1859  * Fill the BE gateway parameters
1860  * The BE gateway expects a blob of parameters which are kept in the ACPI
1861  * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1862  * The port can have multiple settings so pick based on the PCM
1863  * parameters
1864  */
1865 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1866 				struct skl_module_cfg *mconfig,
1867 				struct skl_pipe_params *params)
1868 {
1869 	struct nhlt_specific_cfg *cfg;
1870 	struct skl *skl = get_skl_ctx(dai->dev);
1871 	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1872 	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1873 
1874 	skl_tplg_fill_dma_id(mconfig, params);
1875 
1876 	if (link_type == NHLT_LINK_HDA)
1877 		return 0;
1878 
1879 	/* update the blob based on virtual bus_id*/
1880 	cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1881 					params->s_fmt, params->ch,
1882 					params->s_freq, params->stream,
1883 					dev_type);
1884 	if (cfg) {
1885 		mconfig->formats_config.caps_size = cfg->size;
1886 		mconfig->formats_config.caps = (u32 *) &cfg->caps;
1887 	} else {
1888 		dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1889 					mconfig->vbus_id, link_type,
1890 					params->stream);
1891 		dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1892 				 params->ch, params->s_freq, params->s_fmt);
1893 		return -EINVAL;
1894 	}
1895 
1896 	return 0;
1897 }
1898 
1899 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1900 				struct snd_soc_dapm_widget *w,
1901 				struct skl_pipe_params *params)
1902 {
1903 	struct snd_soc_dapm_path *p;
1904 	int ret = -EIO;
1905 
1906 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1907 		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1908 						p->source->priv) {
1909 
1910 			ret = skl_tplg_be_fill_pipe_params(dai,
1911 						p->source->priv, params);
1912 			if (ret < 0)
1913 				return ret;
1914 		} else {
1915 			ret = skl_tplg_be_set_src_pipe_params(dai,
1916 						p->source, params);
1917 			if (ret < 0)
1918 				return ret;
1919 		}
1920 	}
1921 
1922 	return ret;
1923 }
1924 
1925 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1926 	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1927 {
1928 	struct snd_soc_dapm_path *p = NULL;
1929 	int ret = -EIO;
1930 
1931 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1932 		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1933 						p->sink->priv) {
1934 
1935 			ret = skl_tplg_be_fill_pipe_params(dai,
1936 						p->sink->priv, params);
1937 			if (ret < 0)
1938 				return ret;
1939 		} else {
1940 			ret = skl_tplg_be_set_sink_pipe_params(
1941 						dai, p->sink, params);
1942 			if (ret < 0)
1943 				return ret;
1944 		}
1945 	}
1946 
1947 	return ret;
1948 }
1949 
1950 /*
1951  * BE hw_params can be a source parameters (capture) or sink parameters
1952  * (playback). Based on sink and source we need to either find the source
1953  * list or the sink list and set the pipeline parameters
1954  */
1955 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1956 				struct skl_pipe_params *params)
1957 {
1958 	struct snd_soc_dapm_widget *w;
1959 
1960 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1961 		w = dai->playback_widget;
1962 
1963 		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1964 
1965 	} else {
1966 		w = dai->capture_widget;
1967 
1968 		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1969 	}
1970 
1971 	return 0;
1972 }
1973 
1974 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1975 	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1976 	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1977 	{SKL_PGA_EVENT, skl_tplg_pga_event},
1978 };
1979 
1980 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1981 	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1982 					skl_tplg_tlv_control_set},
1983 };
1984 
1985 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1986 	{
1987 		.id = SKL_CONTROL_TYPE_MIC_SELECT,
1988 		.get = skl_tplg_mic_control_get,
1989 		.put = skl_tplg_mic_control_set,
1990 	},
1991 };
1992 
1993 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1994 			struct skl_pipe *pipe, u32 tkn,
1995 			u32 tkn_val, int conf_idx, int dir)
1996 {
1997 	struct skl_pipe_fmt *fmt;
1998 	struct skl_path_config *config;
1999 
2000 	switch (dir) {
2001 	case SKL_DIR_IN:
2002 		fmt = &pipe->configs[conf_idx].in_fmt;
2003 		break;
2004 
2005 	case SKL_DIR_OUT:
2006 		fmt = &pipe->configs[conf_idx].out_fmt;
2007 		break;
2008 
2009 	default:
2010 		dev_err(dev, "Invalid direction: %d\n", dir);
2011 		return -EINVAL;
2012 	}
2013 
2014 	config = &pipe->configs[conf_idx];
2015 
2016 	switch (tkn) {
2017 	case SKL_TKN_U32_CFG_FREQ:
2018 		fmt->freq = tkn_val;
2019 		break;
2020 
2021 	case SKL_TKN_U8_CFG_CHAN:
2022 		fmt->channels = tkn_val;
2023 		break;
2024 
2025 	case SKL_TKN_U8_CFG_BPS:
2026 		fmt->bps = tkn_val;
2027 		break;
2028 
2029 	case SKL_TKN_U32_PATH_MEM_PGS:
2030 		config->mem_pages = tkn_val;
2031 		break;
2032 
2033 	default:
2034 		dev_err(dev, "Invalid token config: %d\n", tkn);
2035 		return -EINVAL;
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2042 			struct skl_pipe *pipe, u32 tkn,
2043 			u32 tkn_val)
2044 {
2045 
2046 	switch (tkn) {
2047 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2048 		pipe->conn_type = tkn_val;
2049 		break;
2050 
2051 	case SKL_TKN_U32_PIPE_PRIORITY:
2052 		pipe->pipe_priority = tkn_val;
2053 		break;
2054 
2055 	case SKL_TKN_U32_PIPE_MEM_PGS:
2056 		pipe->memory_pages = tkn_val;
2057 		break;
2058 
2059 	case SKL_TKN_U32_PMODE:
2060 		pipe->lp_mode = tkn_val;
2061 		break;
2062 
2063 	case SKL_TKN_U32_PIPE_DIRECTION:
2064 		pipe->direction = tkn_val;
2065 		break;
2066 
2067 	case SKL_TKN_U32_NUM_CONFIGS:
2068 		pipe->nr_cfgs = tkn_val;
2069 		break;
2070 
2071 	default:
2072 		dev_err(dev, "Token not handled %d\n", tkn);
2073 		return -EINVAL;
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 /*
2080  * Add pipeline by parsing the relevant tokens
2081  * Return an existing pipe if the pipe already exists.
2082  */
2083 static int skl_tplg_add_pipe(struct device *dev,
2084 		struct skl_module_cfg *mconfig, struct skl *skl,
2085 		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2086 {
2087 	struct skl_pipeline *ppl;
2088 	struct skl_pipe *pipe;
2089 	struct skl_pipe_params *params;
2090 
2091 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2092 		if (ppl->pipe->ppl_id == tkn_elem->value) {
2093 			mconfig->pipe = ppl->pipe;
2094 			return -EEXIST;
2095 		}
2096 	}
2097 
2098 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2099 	if (!ppl)
2100 		return -ENOMEM;
2101 
2102 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2103 	if (!pipe)
2104 		return -ENOMEM;
2105 
2106 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2107 	if (!params)
2108 		return -ENOMEM;
2109 
2110 	pipe->p_params = params;
2111 	pipe->ppl_id = tkn_elem->value;
2112 	INIT_LIST_HEAD(&pipe->w_list);
2113 
2114 	ppl->pipe = pipe;
2115 	list_add(&ppl->node, &skl->ppl_list);
2116 
2117 	mconfig->pipe = pipe;
2118 	mconfig->pipe->state = SKL_PIPE_INVALID;
2119 
2120 	return 0;
2121 }
2122 
2123 static int skl_tplg_get_uuid(struct device *dev, u8 *guid,
2124 	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2125 {
2126 	if (uuid_tkn->token == SKL_TKN_UUID) {
2127 		memcpy(guid, &uuid_tkn->uuid, 16);
2128 		return 0;
2129 	}
2130 
2131 	dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2132 
2133 	return -EINVAL;
2134 }
2135 
2136 static int skl_tplg_fill_pin(struct device *dev,
2137 			struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2138 			struct skl_module_pin *m_pin,
2139 			int pin_index)
2140 {
2141 	int ret;
2142 
2143 	switch (tkn_elem->token) {
2144 	case SKL_TKN_U32_PIN_MOD_ID:
2145 		m_pin[pin_index].id.module_id = tkn_elem->value;
2146 		break;
2147 
2148 	case SKL_TKN_U32_PIN_INST_ID:
2149 		m_pin[pin_index].id.instance_id = tkn_elem->value;
2150 		break;
2151 
2152 	case SKL_TKN_UUID:
2153 		ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b,
2154 			(struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2155 		if (ret < 0)
2156 			return ret;
2157 
2158 		break;
2159 
2160 	default:
2161 		dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2162 		return -EINVAL;
2163 	}
2164 
2165 	return 0;
2166 }
2167 
2168 /*
2169  * Parse for pin config specific tokens to fill up the
2170  * module private data
2171  */
2172 static int skl_tplg_fill_pins_info(struct device *dev,
2173 		struct skl_module_cfg *mconfig,
2174 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2175 		int dir, int pin_count)
2176 {
2177 	int ret;
2178 	struct skl_module_pin *m_pin;
2179 
2180 	switch (dir) {
2181 	case SKL_DIR_IN:
2182 		m_pin = mconfig->m_in_pin;
2183 		break;
2184 
2185 	case SKL_DIR_OUT:
2186 		m_pin = mconfig->m_out_pin;
2187 		break;
2188 
2189 	default:
2190 		dev_err(dev, "Invalid direction value\n");
2191 		return -EINVAL;
2192 	}
2193 
2194 	ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2195 	if (ret < 0)
2196 		return ret;
2197 
2198 	m_pin[pin_count].in_use = false;
2199 	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2200 
2201 	return 0;
2202 }
2203 
2204 /*
2205  * Fill up input/output module config format based
2206  * on the direction
2207  */
2208 static int skl_tplg_fill_fmt(struct device *dev,
2209 		struct skl_module_fmt *dst_fmt,
2210 		u32 tkn, u32 value)
2211 {
2212 	switch (tkn) {
2213 	case SKL_TKN_U32_FMT_CH:
2214 		dst_fmt->channels  = value;
2215 		break;
2216 
2217 	case SKL_TKN_U32_FMT_FREQ:
2218 		dst_fmt->s_freq = value;
2219 		break;
2220 
2221 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2222 		dst_fmt->bit_depth = value;
2223 		break;
2224 
2225 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2226 		dst_fmt->valid_bit_depth = value;
2227 		break;
2228 
2229 	case SKL_TKN_U32_FMT_CH_CONFIG:
2230 		dst_fmt->ch_cfg = value;
2231 		break;
2232 
2233 	case SKL_TKN_U32_FMT_INTERLEAVE:
2234 		dst_fmt->interleaving_style = value;
2235 		break;
2236 
2237 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2238 		dst_fmt->sample_type = value;
2239 		break;
2240 
2241 	case SKL_TKN_U32_FMT_CH_MAP:
2242 		dst_fmt->ch_map = value;
2243 		break;
2244 
2245 	default:
2246 		dev_err(dev, "Invalid token %d\n", tkn);
2247 		return -EINVAL;
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 static int skl_tplg_widget_fill_fmt(struct device *dev,
2254 		struct skl_module_iface *fmt,
2255 		u32 tkn, u32 val, u32 dir, int fmt_idx)
2256 {
2257 	struct skl_module_fmt *dst_fmt;
2258 
2259 	if (!fmt)
2260 		return -EINVAL;
2261 
2262 	switch (dir) {
2263 	case SKL_DIR_IN:
2264 		dst_fmt = &fmt->inputs[fmt_idx].fmt;
2265 		break;
2266 
2267 	case SKL_DIR_OUT:
2268 		dst_fmt = &fmt->outputs[fmt_idx].fmt;
2269 		break;
2270 
2271 	default:
2272 		dev_err(dev, "Invalid direction: %d\n", dir);
2273 		return -EINVAL;
2274 	}
2275 
2276 	return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2277 }
2278 
2279 static void skl_tplg_fill_pin_dynamic_val(
2280 		struct skl_module_pin *mpin, u32 pin_count, u32 value)
2281 {
2282 	int i;
2283 
2284 	for (i = 0; i < pin_count; i++)
2285 		mpin[i].is_dynamic = value;
2286 }
2287 
2288 /*
2289  * Resource table in the manifest has pin specific resources
2290  * like pin and pin buffer size
2291  */
2292 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2293 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2294 		struct skl_module_res *res, int pin_idx, int dir)
2295 {
2296 	struct skl_module_pin_resources *m_pin;
2297 
2298 	switch (dir) {
2299 	case SKL_DIR_IN:
2300 		m_pin = &res->input[pin_idx];
2301 		break;
2302 
2303 	case SKL_DIR_OUT:
2304 		m_pin = &res->output[pin_idx];
2305 		break;
2306 
2307 	default:
2308 		dev_err(dev, "Invalid pin direction: %d\n", dir);
2309 		return -EINVAL;
2310 	}
2311 
2312 	switch (tkn_elem->token) {
2313 	case SKL_TKN_MM_U32_RES_PIN_ID:
2314 		m_pin->pin_index = tkn_elem->value;
2315 		break;
2316 
2317 	case SKL_TKN_MM_U32_PIN_BUF:
2318 		m_pin->buf_size = tkn_elem->value;
2319 		break;
2320 
2321 	default:
2322 		dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2323 		return -EINVAL;
2324 	}
2325 
2326 	return 0;
2327 }
2328 
2329 /*
2330  * Fill module specific resources from the manifest's resource
2331  * table like CPS, DMA size, mem_pages.
2332  */
2333 static int skl_tplg_fill_res_tkn(struct device *dev,
2334 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2335 		struct skl_module_res *res,
2336 		int pin_idx, int dir)
2337 {
2338 	int ret, tkn_count = 0;
2339 
2340 	if (!res)
2341 		return -EINVAL;
2342 
2343 	switch (tkn_elem->token) {
2344 	case SKL_TKN_MM_U32_CPS:
2345 		res->cps = tkn_elem->value;
2346 		break;
2347 
2348 	case SKL_TKN_MM_U32_DMA_SIZE:
2349 		res->dma_buffer_size = tkn_elem->value;
2350 		break;
2351 
2352 	case SKL_TKN_MM_U32_CPC:
2353 		res->cpc = tkn_elem->value;
2354 		break;
2355 
2356 	case SKL_TKN_U32_MEM_PAGES:
2357 		res->is_pages = tkn_elem->value;
2358 		break;
2359 
2360 	case SKL_TKN_U32_OBS:
2361 		res->obs = tkn_elem->value;
2362 		break;
2363 
2364 	case SKL_TKN_U32_IBS:
2365 		res->ibs = tkn_elem->value;
2366 		break;
2367 
2368 	case SKL_TKN_U32_MAX_MCPS:
2369 		res->cps = tkn_elem->value;
2370 		break;
2371 
2372 	case SKL_TKN_MM_U32_RES_PIN_ID:
2373 	case SKL_TKN_MM_U32_PIN_BUF:
2374 		ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2375 						    pin_idx, dir);
2376 		if (ret < 0)
2377 			return ret;
2378 		break;
2379 
2380 	default:
2381 		dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2382 		return -EINVAL;
2383 
2384 	}
2385 	tkn_count++;
2386 
2387 	return tkn_count;
2388 }
2389 
2390 /*
2391  * Parse tokens to fill up the module private data
2392  */
2393 static int skl_tplg_get_token(struct device *dev,
2394 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2395 		struct skl *skl, struct skl_module_cfg *mconfig)
2396 {
2397 	int tkn_count = 0;
2398 	int ret;
2399 	static int is_pipe_exists;
2400 	static int pin_index, dir, conf_idx;
2401 	struct skl_module_iface *iface = NULL;
2402 	struct skl_module_res *res = NULL;
2403 	int res_idx = mconfig->res_idx;
2404 	int fmt_idx = mconfig->fmt_idx;
2405 
2406 	/*
2407 	 * If the manifest structure contains no modules, fill all
2408 	 * the module data to 0th index.
2409 	 * res_idx and fmt_idx are default set to 0.
2410 	 */
2411 	if (skl->nr_modules == 0) {
2412 		res = &mconfig->module->resources[res_idx];
2413 		iface = &mconfig->module->formats[fmt_idx];
2414 	}
2415 
2416 	if (tkn_elem->token > SKL_TKN_MAX)
2417 		return -EINVAL;
2418 
2419 	switch (tkn_elem->token) {
2420 	case SKL_TKN_U8_IN_QUEUE_COUNT:
2421 		mconfig->module->max_input_pins = tkn_elem->value;
2422 		break;
2423 
2424 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
2425 		mconfig->module->max_output_pins = tkn_elem->value;
2426 		break;
2427 
2428 	case SKL_TKN_U8_DYN_IN_PIN:
2429 		if (!mconfig->m_in_pin)
2430 			mconfig->m_in_pin = devm_kzalloc(dev, MAX_IN_QUEUE *
2431 					sizeof(*mconfig->m_in_pin), GFP_KERNEL);
2432 		if (!mconfig->m_in_pin)
2433 			return -ENOMEM;
2434 
2435 		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2436 					      tkn_elem->value);
2437 		break;
2438 
2439 	case SKL_TKN_U8_DYN_OUT_PIN:
2440 		if (!mconfig->m_out_pin)
2441 			mconfig->m_out_pin = devm_kzalloc(dev, MAX_IN_QUEUE *
2442 					sizeof(*mconfig->m_in_pin), GFP_KERNEL);
2443 		if (!mconfig->m_out_pin)
2444 			return -ENOMEM;
2445 
2446 		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2447 					      tkn_elem->value);
2448 		break;
2449 
2450 	case SKL_TKN_U8_TIME_SLOT:
2451 		mconfig->time_slot = tkn_elem->value;
2452 		break;
2453 
2454 	case SKL_TKN_U8_CORE_ID:
2455 		mconfig->core_id = tkn_elem->value;
2456 
2457 	case SKL_TKN_U8_MOD_TYPE:
2458 		mconfig->m_type = tkn_elem->value;
2459 		break;
2460 
2461 	case SKL_TKN_U8_DEV_TYPE:
2462 		mconfig->dev_type = tkn_elem->value;
2463 		break;
2464 
2465 	case SKL_TKN_U8_HW_CONN_TYPE:
2466 		mconfig->hw_conn_type = tkn_elem->value;
2467 		break;
2468 
2469 	case SKL_TKN_U16_MOD_INST_ID:
2470 		mconfig->id.instance_id =
2471 		tkn_elem->value;
2472 		break;
2473 
2474 	case SKL_TKN_U32_MEM_PAGES:
2475 	case SKL_TKN_U32_MAX_MCPS:
2476 	case SKL_TKN_U32_OBS:
2477 	case SKL_TKN_U32_IBS:
2478 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2479 		if (ret < 0)
2480 			return ret;
2481 
2482 		break;
2483 
2484 	case SKL_TKN_U32_VBUS_ID:
2485 		mconfig->vbus_id = tkn_elem->value;
2486 		break;
2487 
2488 	case SKL_TKN_U32_PARAMS_FIXUP:
2489 		mconfig->params_fixup = tkn_elem->value;
2490 		break;
2491 
2492 	case SKL_TKN_U32_CONVERTER:
2493 		mconfig->converter = tkn_elem->value;
2494 		break;
2495 
2496 	case SKL_TKN_U32_D0I3_CAPS:
2497 		mconfig->d0i3_caps = tkn_elem->value;
2498 		break;
2499 
2500 	case SKL_TKN_U32_PIPE_ID:
2501 		ret = skl_tplg_add_pipe(dev,
2502 				mconfig, skl, tkn_elem);
2503 
2504 		if (ret < 0) {
2505 			if (ret == -EEXIST) {
2506 				is_pipe_exists = 1;
2507 				break;
2508 			}
2509 			return is_pipe_exists;
2510 		}
2511 
2512 		break;
2513 
2514 	case SKL_TKN_U32_PIPE_CONFIG_ID:
2515 		conf_idx = tkn_elem->value;
2516 		break;
2517 
2518 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2519 	case SKL_TKN_U32_PIPE_PRIORITY:
2520 	case SKL_TKN_U32_PIPE_MEM_PGS:
2521 	case SKL_TKN_U32_PMODE:
2522 	case SKL_TKN_U32_PIPE_DIRECTION:
2523 	case SKL_TKN_U32_NUM_CONFIGS:
2524 		if (is_pipe_exists) {
2525 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2526 					tkn_elem->token, tkn_elem->value);
2527 			if (ret < 0)
2528 				return ret;
2529 		}
2530 
2531 		break;
2532 
2533 	case SKL_TKN_U32_PATH_MEM_PGS:
2534 	case SKL_TKN_U32_CFG_FREQ:
2535 	case SKL_TKN_U8_CFG_CHAN:
2536 	case SKL_TKN_U8_CFG_BPS:
2537 		if (mconfig->pipe->nr_cfgs) {
2538 			ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2539 					tkn_elem->token, tkn_elem->value,
2540 					conf_idx, dir);
2541 			if (ret < 0)
2542 				return ret;
2543 		}
2544 		break;
2545 
2546 	case SKL_TKN_CFG_MOD_RES_ID:
2547 		mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2548 		break;
2549 
2550 	case SKL_TKN_CFG_MOD_FMT_ID:
2551 		mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2552 		break;
2553 
2554 	/*
2555 	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2556 	 * direction and the pin count. The first four bits represent
2557 	 * direction and next four the pin count.
2558 	 */
2559 	case SKL_TKN_U32_DIR_PIN_COUNT:
2560 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2561 		pin_index = (tkn_elem->value &
2562 			SKL_PIN_COUNT_MASK) >> 4;
2563 
2564 		break;
2565 
2566 	case SKL_TKN_U32_FMT_CH:
2567 	case SKL_TKN_U32_FMT_FREQ:
2568 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2569 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2570 	case SKL_TKN_U32_FMT_CH_CONFIG:
2571 	case SKL_TKN_U32_FMT_INTERLEAVE:
2572 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2573 	case SKL_TKN_U32_FMT_CH_MAP:
2574 		ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2575 				tkn_elem->value, dir, pin_index);
2576 
2577 		if (ret < 0)
2578 			return ret;
2579 
2580 		break;
2581 
2582 	case SKL_TKN_U32_PIN_MOD_ID:
2583 	case SKL_TKN_U32_PIN_INST_ID:
2584 	case SKL_TKN_UUID:
2585 		ret = skl_tplg_fill_pins_info(dev,
2586 				mconfig, tkn_elem, dir,
2587 				pin_index);
2588 		if (ret < 0)
2589 			return ret;
2590 
2591 		break;
2592 
2593 	case SKL_TKN_U32_CAPS_SIZE:
2594 		mconfig->formats_config.caps_size =
2595 			tkn_elem->value;
2596 
2597 		break;
2598 
2599 	case SKL_TKN_U32_CAPS_SET_PARAMS:
2600 		mconfig->formats_config.set_params =
2601 				tkn_elem->value;
2602 		break;
2603 
2604 	case SKL_TKN_U32_CAPS_PARAMS_ID:
2605 		mconfig->formats_config.param_id =
2606 				tkn_elem->value;
2607 		break;
2608 
2609 	case SKL_TKN_U32_PROC_DOMAIN:
2610 		mconfig->domain =
2611 			tkn_elem->value;
2612 
2613 		break;
2614 
2615 	case SKL_TKN_U32_DMA_BUF_SIZE:
2616 		mconfig->dma_buffer_size = tkn_elem->value;
2617 		break;
2618 
2619 	case SKL_TKN_U8_IN_PIN_TYPE:
2620 	case SKL_TKN_U8_OUT_PIN_TYPE:
2621 	case SKL_TKN_U8_CONN_TYPE:
2622 		break;
2623 
2624 	default:
2625 		dev_err(dev, "Token %d not handled\n",
2626 				tkn_elem->token);
2627 		return -EINVAL;
2628 	}
2629 
2630 	tkn_count++;
2631 
2632 	return tkn_count;
2633 }
2634 
2635 /*
2636  * Parse the vendor array for specific tokens to construct
2637  * module private data
2638  */
2639 static int skl_tplg_get_tokens(struct device *dev,
2640 		char *pvt_data,	struct skl *skl,
2641 		struct skl_module_cfg *mconfig, int block_size)
2642 {
2643 	struct snd_soc_tplg_vendor_array *array;
2644 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2645 	int tkn_count = 0, ret;
2646 	int off = 0, tuple_size = 0;
2647 	bool is_module_guid = true;
2648 
2649 	if (block_size <= 0)
2650 		return -EINVAL;
2651 
2652 	while (tuple_size < block_size) {
2653 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2654 
2655 		off += array->size;
2656 
2657 		switch (array->type) {
2658 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2659 			dev_warn(dev, "no string tokens expected for skl tplg\n");
2660 			continue;
2661 
2662 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2663 			if (is_module_guid) {
2664 				ret = skl_tplg_get_uuid(dev, mconfig->guid,
2665 							array->uuid);
2666 				is_module_guid = false;
2667 			} else {
2668 				ret = skl_tplg_get_token(dev, array->value, skl,
2669 							 mconfig);
2670 			}
2671 
2672 			if (ret < 0)
2673 				return ret;
2674 
2675 			tuple_size += sizeof(*array->uuid);
2676 
2677 			continue;
2678 
2679 		default:
2680 			tkn_elem = array->value;
2681 			tkn_count = 0;
2682 			break;
2683 		}
2684 
2685 		while (tkn_count <= (array->num_elems - 1)) {
2686 			ret = skl_tplg_get_token(dev, tkn_elem,
2687 					skl, mconfig);
2688 
2689 			if (ret < 0)
2690 				return ret;
2691 
2692 			tkn_count = tkn_count + ret;
2693 			tkn_elem++;
2694 		}
2695 
2696 		tuple_size += tkn_count * sizeof(*tkn_elem);
2697 	}
2698 
2699 	return off;
2700 }
2701 
2702 /*
2703  * Every data block is preceded by a descriptor to read the number
2704  * of data blocks, they type of the block and it's size
2705  */
2706 static int skl_tplg_get_desc_blocks(struct device *dev,
2707 		struct snd_soc_tplg_vendor_array *array)
2708 {
2709 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2710 
2711 	tkn_elem = array->value;
2712 
2713 	switch (tkn_elem->token) {
2714 	case SKL_TKN_U8_NUM_BLOCKS:
2715 	case SKL_TKN_U8_BLOCK_TYPE:
2716 	case SKL_TKN_U16_BLOCK_SIZE:
2717 		return tkn_elem->value;
2718 
2719 	default:
2720 		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2721 		break;
2722 	}
2723 
2724 	return -EINVAL;
2725 }
2726 
2727 /*
2728  * Parse the private data for the token and corresponding value.
2729  * The private data can have multiple data blocks. So, a data block
2730  * is preceded by a descriptor for number of blocks and a descriptor
2731  * for the type and size of the suceeding data block.
2732  */
2733 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2734 				struct skl *skl, struct device *dev,
2735 				struct skl_module_cfg *mconfig)
2736 {
2737 	struct snd_soc_tplg_vendor_array *array;
2738 	int num_blocks, block_size = 0, block_type, off = 0;
2739 	char *data;
2740 	int ret;
2741 
2742 	/* Read the NUM_DATA_BLOCKS descriptor */
2743 	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2744 	ret = skl_tplg_get_desc_blocks(dev, array);
2745 	if (ret < 0)
2746 		return ret;
2747 	num_blocks = ret;
2748 
2749 	off += array->size;
2750 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2751 	while (num_blocks > 0) {
2752 		array = (struct snd_soc_tplg_vendor_array *)
2753 				(tplg_w->priv.data + off);
2754 
2755 		ret = skl_tplg_get_desc_blocks(dev, array);
2756 
2757 		if (ret < 0)
2758 			return ret;
2759 		block_type = ret;
2760 		off += array->size;
2761 
2762 		array = (struct snd_soc_tplg_vendor_array *)
2763 			(tplg_w->priv.data + off);
2764 
2765 		ret = skl_tplg_get_desc_blocks(dev, array);
2766 
2767 		if (ret < 0)
2768 			return ret;
2769 		block_size = ret;
2770 		off += array->size;
2771 
2772 		array = (struct snd_soc_tplg_vendor_array *)
2773 			(tplg_w->priv.data + off);
2774 
2775 		data = (tplg_w->priv.data + off);
2776 
2777 		if (block_type == SKL_TYPE_TUPLE) {
2778 			ret = skl_tplg_get_tokens(dev, data,
2779 					skl, mconfig, block_size);
2780 
2781 			if (ret < 0)
2782 				return ret;
2783 
2784 			--num_blocks;
2785 		} else {
2786 			if (mconfig->formats_config.caps_size > 0)
2787 				memcpy(mconfig->formats_config.caps, data,
2788 					mconfig->formats_config.caps_size);
2789 			--num_blocks;
2790 			ret = mconfig->formats_config.caps_size;
2791 		}
2792 		off += ret;
2793 	}
2794 
2795 	return 0;
2796 }
2797 
2798 static void skl_clear_pin_config(struct snd_soc_component *component,
2799 				struct snd_soc_dapm_widget *w)
2800 {
2801 	int i;
2802 	struct skl_module_cfg *mconfig;
2803 	struct skl_pipe *pipe;
2804 
2805 	if (!strncmp(w->dapm->component->name, component->name,
2806 					strlen(component->name))) {
2807 		mconfig = w->priv;
2808 		pipe = mconfig->pipe;
2809 		for (i = 0; i < mconfig->module->max_input_pins; i++) {
2810 			mconfig->m_in_pin[i].in_use = false;
2811 			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2812 		}
2813 		for (i = 0; i < mconfig->module->max_output_pins; i++) {
2814 			mconfig->m_out_pin[i].in_use = false;
2815 			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2816 		}
2817 		pipe->state = SKL_PIPE_INVALID;
2818 		mconfig->m_state = SKL_MODULE_UNINIT;
2819 	}
2820 }
2821 
2822 void skl_cleanup_resources(struct skl *skl)
2823 {
2824 	struct skl_sst *ctx = skl->skl_sst;
2825 	struct snd_soc_component *soc_component = skl->component;
2826 	struct snd_soc_dapm_widget *w;
2827 	struct snd_soc_card *card;
2828 
2829 	if (soc_component == NULL)
2830 		return;
2831 
2832 	card = soc_component->card;
2833 	if (!card || !card->instantiated)
2834 		return;
2835 
2836 	skl->resource.mem = 0;
2837 	skl->resource.mcps = 0;
2838 
2839 	list_for_each_entry(w, &card->widgets, list) {
2840 		if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL)
2841 			skl_clear_pin_config(soc_component, w);
2842 	}
2843 
2844 	skl_clear_module_cnt(ctx->dsp);
2845 }
2846 
2847 /*
2848  * Topology core widget load callback
2849  *
2850  * This is used to save the private data for each widget which gives
2851  * information to the driver about module and pipeline parameters which DSP
2852  * FW expects like ids, resource values, formats etc
2853  */
2854 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
2855 				struct snd_soc_dapm_widget *w,
2856 				struct snd_soc_tplg_dapm_widget *tplg_w)
2857 {
2858 	int ret;
2859 	struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2860 	struct skl *skl = ebus_to_skl(ebus);
2861 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2862 	struct skl_module_cfg *mconfig;
2863 
2864 	if (!tplg_w->priv.size)
2865 		goto bind_event;
2866 
2867 	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2868 
2869 	if (!mconfig)
2870 		return -ENOMEM;
2871 
2872 	if (skl->nr_modules == 0) {
2873 		mconfig->module = devm_kzalloc(bus->dev,
2874 				sizeof(*mconfig->module), GFP_KERNEL);
2875 		if (!mconfig->module)
2876 			return -ENOMEM;
2877 	}
2878 
2879 	w->priv = mconfig;
2880 
2881 	/*
2882 	 * module binary can be loaded later, so set it to query when
2883 	 * module is load for a use case
2884 	 */
2885 	mconfig->id.module_id = -1;
2886 
2887 	/* Parse private data for tuples */
2888 	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2889 	if (ret < 0)
2890 		return ret;
2891 
2892 	skl_debug_init_module(skl->debugfs, w, mconfig);
2893 
2894 bind_event:
2895 	if (tplg_w->event_type == 0) {
2896 		dev_dbg(bus->dev, "ASoC: No event handler required\n");
2897 		return 0;
2898 	}
2899 
2900 	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
2901 					ARRAY_SIZE(skl_tplg_widget_ops),
2902 					tplg_w->event_type);
2903 
2904 	if (ret) {
2905 		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2906 					__func__, tplg_w->event_type);
2907 		return -EINVAL;
2908 	}
2909 
2910 	return 0;
2911 }
2912 
2913 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2914 					struct snd_soc_tplg_bytes_control *bc)
2915 {
2916 	struct skl_algo_data *ac;
2917 	struct skl_dfw_algo_data *dfw_ac =
2918 				(struct skl_dfw_algo_data *)bc->priv.data;
2919 
2920 	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2921 	if (!ac)
2922 		return -ENOMEM;
2923 
2924 	/* Fill private data */
2925 	ac->max = dfw_ac->max;
2926 	ac->param_id = dfw_ac->param_id;
2927 	ac->set_params = dfw_ac->set_params;
2928 	ac->size = dfw_ac->max;
2929 
2930 	if (ac->max) {
2931 		ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2932 		if (!ac->params)
2933 			return -ENOMEM;
2934 
2935 		memcpy(ac->params, dfw_ac->params, ac->max);
2936 	}
2937 
2938 	be->dobj.private  = ac;
2939 	return 0;
2940 }
2941 
2942 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
2943 				struct snd_soc_tplg_enum_control *ec)
2944 {
2945 
2946 	void *data;
2947 
2948 	if (ec->priv.size) {
2949 		data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
2950 		if (!data)
2951 			return -ENOMEM;
2952 		memcpy(data, ec->priv.data, ec->priv.size);
2953 		se->dobj.private = data;
2954 	}
2955 
2956 	return 0;
2957 
2958 }
2959 
2960 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2961 				struct snd_kcontrol_new *kctl,
2962 				struct snd_soc_tplg_ctl_hdr *hdr)
2963 {
2964 	struct soc_bytes_ext *sb;
2965 	struct snd_soc_tplg_bytes_control *tplg_bc;
2966 	struct snd_soc_tplg_enum_control *tplg_ec;
2967 	struct hdac_ext_bus *ebus  = snd_soc_component_get_drvdata(cmpnt);
2968 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2969 	struct soc_enum *se;
2970 
2971 	switch (hdr->ops.info) {
2972 	case SND_SOC_TPLG_CTL_BYTES:
2973 		tplg_bc = container_of(hdr,
2974 				struct snd_soc_tplg_bytes_control, hdr);
2975 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2976 			sb = (struct soc_bytes_ext *)kctl->private_value;
2977 			if (tplg_bc->priv.size)
2978 				return skl_init_algo_data(
2979 						bus->dev, sb, tplg_bc);
2980 		}
2981 		break;
2982 
2983 	case SND_SOC_TPLG_CTL_ENUM:
2984 		tplg_ec = container_of(hdr,
2985 				struct snd_soc_tplg_enum_control, hdr);
2986 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
2987 			se = (struct soc_enum *)kctl->private_value;
2988 			if (tplg_ec->priv.size)
2989 				return skl_init_enum_data(bus->dev, se,
2990 						tplg_ec);
2991 		}
2992 		break;
2993 
2994 	default:
2995 		dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
2996 			hdr->ops.get, hdr->ops.put, hdr->ops.info);
2997 		break;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3004 		struct snd_soc_tplg_vendor_string_elem *str_elem,
3005 		struct skl *skl)
3006 {
3007 	int tkn_count = 0;
3008 	static int ref_count;
3009 
3010 	switch (str_elem->token) {
3011 	case SKL_TKN_STR_LIB_NAME:
3012 		if (ref_count > skl->skl_sst->lib_count - 1) {
3013 			ref_count = 0;
3014 			return -EINVAL;
3015 		}
3016 
3017 		strncpy(skl->skl_sst->lib_info[ref_count].name,
3018 			str_elem->string,
3019 			ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
3020 		ref_count++;
3021 		break;
3022 
3023 	default:
3024 		dev_err(dev, "Not a string token %d\n", str_elem->token);
3025 		break;
3026 	}
3027 	tkn_count++;
3028 
3029 	return tkn_count;
3030 }
3031 
3032 static int skl_tplg_get_str_tkn(struct device *dev,
3033 		struct snd_soc_tplg_vendor_array *array,
3034 		struct skl *skl)
3035 {
3036 	int tkn_count = 0, ret;
3037 	struct snd_soc_tplg_vendor_string_elem *str_elem;
3038 
3039 	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3040 	while (tkn_count < array->num_elems) {
3041 		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3042 		str_elem++;
3043 
3044 		if (ret < 0)
3045 			return ret;
3046 
3047 		tkn_count = tkn_count + ret;
3048 	}
3049 
3050 	return tkn_count;
3051 }
3052 
3053 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3054 		struct skl_module_iface *fmt,
3055 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3056 		u32 dir, int fmt_idx)
3057 {
3058 	struct skl_module_pin_fmt *dst_fmt;
3059 	struct skl_module_fmt *mod_fmt;
3060 	int ret;
3061 
3062 	if (!fmt)
3063 		return -EINVAL;
3064 
3065 	switch (dir) {
3066 	case SKL_DIR_IN:
3067 		dst_fmt = &fmt->inputs[fmt_idx];
3068 		break;
3069 
3070 	case SKL_DIR_OUT:
3071 		dst_fmt = &fmt->outputs[fmt_idx];
3072 		break;
3073 
3074 	default:
3075 		dev_err(dev, "Invalid direction: %d\n", dir);
3076 		return -EINVAL;
3077 	}
3078 
3079 	mod_fmt = &dst_fmt->fmt;
3080 
3081 	switch (tkn_elem->token) {
3082 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3083 		dst_fmt->id = tkn_elem->value;
3084 		break;
3085 
3086 	default:
3087 		ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3088 					tkn_elem->value);
3089 		if (ret < 0)
3090 			return ret;
3091 		break;
3092 	}
3093 
3094 	return 0;
3095 }
3096 
3097 static int skl_tplg_fill_mod_info(struct device *dev,
3098 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3099 		struct skl_module *mod)
3100 {
3101 
3102 	if (!mod)
3103 		return -EINVAL;
3104 
3105 	switch (tkn_elem->token) {
3106 	case SKL_TKN_U8_IN_PIN_TYPE:
3107 		mod->input_pin_type = tkn_elem->value;
3108 		break;
3109 
3110 	case SKL_TKN_U8_OUT_PIN_TYPE:
3111 		mod->output_pin_type = tkn_elem->value;
3112 		break;
3113 
3114 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3115 		mod->max_input_pins = tkn_elem->value;
3116 		break;
3117 
3118 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3119 		mod->max_output_pins = tkn_elem->value;
3120 		break;
3121 
3122 	case SKL_TKN_MM_U8_NUM_RES:
3123 		mod->nr_resources = tkn_elem->value;
3124 		break;
3125 
3126 	case SKL_TKN_MM_U8_NUM_INTF:
3127 		mod->nr_interfaces = tkn_elem->value;
3128 		break;
3129 
3130 	default:
3131 		dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3132 		return -EINVAL;
3133 	}
3134 
3135 	return 0;
3136 }
3137 
3138 
3139 static int skl_tplg_get_int_tkn(struct device *dev,
3140 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3141 		struct skl *skl)
3142 {
3143 	int tkn_count = 0, ret, size;
3144 	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3145 	struct skl_module_res *res = NULL;
3146 	struct skl_module_iface *fmt = NULL;
3147 	struct skl_module *mod = NULL;
3148 	static struct skl_astate_param *astate_table;
3149 	static int astate_cfg_idx, count;
3150 	int i;
3151 
3152 	if (skl->modules) {
3153 		mod = skl->modules[mod_idx];
3154 		res = &mod->resources[res_val_idx];
3155 		fmt = &mod->formats[intf_val_idx];
3156 	}
3157 
3158 	switch (tkn_elem->token) {
3159 	case SKL_TKN_U32_LIB_COUNT:
3160 		skl->skl_sst->lib_count = tkn_elem->value;
3161 		break;
3162 
3163 	case SKL_TKN_U8_NUM_MOD:
3164 		skl->nr_modules = tkn_elem->value;
3165 		skl->modules = devm_kcalloc(dev, skl->nr_modules,
3166 				sizeof(*skl->modules), GFP_KERNEL);
3167 		if (!skl->modules)
3168 			return -ENOMEM;
3169 
3170 		for (i = 0; i < skl->nr_modules; i++) {
3171 			skl->modules[i] = devm_kzalloc(dev,
3172 					sizeof(struct skl_module), GFP_KERNEL);
3173 			if (!skl->modules[i])
3174 				return -ENOMEM;
3175 		}
3176 		break;
3177 
3178 	case SKL_TKN_MM_U8_MOD_IDX:
3179 		mod_idx = tkn_elem->value;
3180 		break;
3181 
3182 	case SKL_TKN_U32_ASTATE_COUNT:
3183 		if (astate_table != NULL) {
3184 			dev_err(dev, "More than one entry for A-State count");
3185 			return -EINVAL;
3186 		}
3187 
3188 		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3189 			dev_err(dev, "Invalid A-State count %d\n",
3190 				tkn_elem->value);
3191 			return -EINVAL;
3192 		}
3193 
3194 		size = tkn_elem->value * sizeof(struct skl_astate_param) +
3195 				sizeof(count);
3196 		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3197 		if (!skl->cfg.astate_cfg)
3198 			return -ENOMEM;
3199 
3200 		astate_table = skl->cfg.astate_cfg->astate_table;
3201 		count = skl->cfg.astate_cfg->count = tkn_elem->value;
3202 		break;
3203 
3204 	case SKL_TKN_U32_ASTATE_IDX:
3205 		if (tkn_elem->value >= count) {
3206 			dev_err(dev, "Invalid A-State index %d\n",
3207 				tkn_elem->value);
3208 			return -EINVAL;
3209 		}
3210 
3211 		astate_cfg_idx = tkn_elem->value;
3212 		break;
3213 
3214 	case SKL_TKN_U32_ASTATE_KCPS:
3215 		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3216 		break;
3217 
3218 	case SKL_TKN_U32_ASTATE_CLK_SRC:
3219 		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3220 		break;
3221 
3222 	case SKL_TKN_U8_IN_PIN_TYPE:
3223 	case SKL_TKN_U8_OUT_PIN_TYPE:
3224 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3225 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3226 	case SKL_TKN_MM_U8_NUM_RES:
3227 	case SKL_TKN_MM_U8_NUM_INTF:
3228 		ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3229 		if (ret < 0)
3230 			return ret;
3231 		break;
3232 
3233 	case SKL_TKN_U32_DIR_PIN_COUNT:
3234 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3235 		pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3236 		break;
3237 
3238 	case SKL_TKN_MM_U32_RES_ID:
3239 		if (!res)
3240 			return -EINVAL;
3241 
3242 		res->id = tkn_elem->value;
3243 		res_val_idx = tkn_elem->value;
3244 		break;
3245 
3246 	case SKL_TKN_MM_U32_FMT_ID:
3247 		if (!fmt)
3248 			return -EINVAL;
3249 
3250 		fmt->fmt_idx = tkn_elem->value;
3251 		intf_val_idx = tkn_elem->value;
3252 		break;
3253 
3254 	case SKL_TKN_MM_U32_CPS:
3255 	case SKL_TKN_MM_U32_DMA_SIZE:
3256 	case SKL_TKN_MM_U32_CPC:
3257 	case SKL_TKN_U32_MEM_PAGES:
3258 	case SKL_TKN_U32_OBS:
3259 	case SKL_TKN_U32_IBS:
3260 	case SKL_TKN_MM_U32_RES_PIN_ID:
3261 	case SKL_TKN_MM_U32_PIN_BUF:
3262 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3263 		if (ret < 0)
3264 			return ret;
3265 
3266 		break;
3267 
3268 	case SKL_TKN_MM_U32_NUM_IN_FMT:
3269 		if (!fmt)
3270 			return -EINVAL;
3271 
3272 		res->nr_input_pins = tkn_elem->value;
3273 		break;
3274 
3275 	case SKL_TKN_MM_U32_NUM_OUT_FMT:
3276 		if (!fmt)
3277 			return -EINVAL;
3278 
3279 		res->nr_output_pins = tkn_elem->value;
3280 		break;
3281 
3282 	case SKL_TKN_U32_FMT_CH:
3283 	case SKL_TKN_U32_FMT_FREQ:
3284 	case SKL_TKN_U32_FMT_BIT_DEPTH:
3285 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3286 	case SKL_TKN_U32_FMT_CH_CONFIG:
3287 	case SKL_TKN_U32_FMT_INTERLEAVE:
3288 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3289 	case SKL_TKN_U32_FMT_CH_MAP:
3290 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3291 		ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3292 						 dir, pin_idx);
3293 		if (ret < 0)
3294 			return ret;
3295 		break;
3296 
3297 	default:
3298 		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3299 		return -EINVAL;
3300 	}
3301 	tkn_count++;
3302 
3303 	return tkn_count;
3304 }
3305 
3306 static int skl_tplg_get_manifest_uuid(struct device *dev,
3307 				struct skl *skl,
3308 				struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
3309 {
3310 	static int ref_count;
3311 	struct skl_module *mod;
3312 
3313 	if (uuid_tkn->token == SKL_TKN_UUID) {
3314 		mod = skl->modules[ref_count];
3315 		memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid));
3316 		ref_count++;
3317 	} else {
3318 		dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
3319 		return -EINVAL;
3320 	}
3321 
3322 	return 0;
3323 }
3324 
3325 /*
3326  * Fill the manifest structure by parsing the tokens based on the
3327  * type.
3328  */
3329 static int skl_tplg_get_manifest_tkn(struct device *dev,
3330 		char *pvt_data, struct skl *skl,
3331 		int block_size)
3332 {
3333 	int tkn_count = 0, ret;
3334 	int off = 0, tuple_size = 0;
3335 	struct snd_soc_tplg_vendor_array *array;
3336 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3337 
3338 	if (block_size <= 0)
3339 		return -EINVAL;
3340 
3341 	while (tuple_size < block_size) {
3342 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3343 		off += array->size;
3344 		switch (array->type) {
3345 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3346 			ret = skl_tplg_get_str_tkn(dev, array, skl);
3347 
3348 			if (ret < 0)
3349 				return ret;
3350 			tkn_count = ret;
3351 
3352 			tuple_size += tkn_count *
3353 				sizeof(struct snd_soc_tplg_vendor_string_elem);
3354 			continue;
3355 
3356 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3357 			ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid);
3358 			if (ret < 0)
3359 				return ret;
3360 
3361 			tuple_size += sizeof(*array->uuid);
3362 			continue;
3363 
3364 		default:
3365 			tkn_elem = array->value;
3366 			tkn_count = 0;
3367 			break;
3368 		}
3369 
3370 		while (tkn_count <= array->num_elems - 1) {
3371 			ret = skl_tplg_get_int_tkn(dev,
3372 					tkn_elem, skl);
3373 			if (ret < 0)
3374 				return ret;
3375 
3376 			tkn_count = tkn_count + ret;
3377 			tkn_elem++;
3378 		}
3379 		tuple_size += (tkn_count * sizeof(*tkn_elem));
3380 		tkn_count = 0;
3381 	}
3382 
3383 	return off;
3384 }
3385 
3386 /*
3387  * Parse manifest private data for tokens. The private data block is
3388  * preceded by descriptors for type and size of data block.
3389  */
3390 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3391 			struct device *dev, struct skl *skl)
3392 {
3393 	struct snd_soc_tplg_vendor_array *array;
3394 	int num_blocks, block_size = 0, block_type, off = 0;
3395 	char *data;
3396 	int ret;
3397 
3398 	/* Read the NUM_DATA_BLOCKS descriptor */
3399 	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3400 	ret = skl_tplg_get_desc_blocks(dev, array);
3401 	if (ret < 0)
3402 		return ret;
3403 	num_blocks = ret;
3404 
3405 	off += array->size;
3406 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3407 	while (num_blocks > 0) {
3408 		array = (struct snd_soc_tplg_vendor_array *)
3409 				(manifest->priv.data + off);
3410 		ret = skl_tplg_get_desc_blocks(dev, array);
3411 
3412 		if (ret < 0)
3413 			return ret;
3414 		block_type = ret;
3415 		off += array->size;
3416 
3417 		array = (struct snd_soc_tplg_vendor_array *)
3418 			(manifest->priv.data + off);
3419 
3420 		ret = skl_tplg_get_desc_blocks(dev, array);
3421 
3422 		if (ret < 0)
3423 			return ret;
3424 		block_size = ret;
3425 		off += array->size;
3426 
3427 		array = (struct snd_soc_tplg_vendor_array *)
3428 			(manifest->priv.data + off);
3429 
3430 		data = (manifest->priv.data + off);
3431 
3432 		if (block_type == SKL_TYPE_TUPLE) {
3433 			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3434 					block_size);
3435 
3436 			if (ret < 0)
3437 				return ret;
3438 
3439 			--num_blocks;
3440 		} else {
3441 			return -EINVAL;
3442 		}
3443 		off += ret;
3444 	}
3445 
3446 	return 0;
3447 }
3448 
3449 static int skl_manifest_load(struct snd_soc_component *cmpnt,
3450 				struct snd_soc_tplg_manifest *manifest)
3451 {
3452 	struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
3453 	struct hdac_bus *bus = ebus_to_hbus(ebus);
3454 	struct skl *skl = ebus_to_skl(ebus);
3455 
3456 	/* proceed only if we have private data defined */
3457 	if (manifest->priv.size == 0)
3458 		return 0;
3459 
3460 	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3461 
3462 	if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
3463 		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3464 					skl->skl_sst->lib_count);
3465 		return  -EINVAL;
3466 	}
3467 
3468 	return 0;
3469 }
3470 
3471 static struct snd_soc_tplg_ops skl_tplg_ops  = {
3472 	.widget_load = skl_tplg_widget_load,
3473 	.control_load = skl_tplg_control_load,
3474 	.bytes_ext_ops = skl_tlv_ops,
3475 	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3476 	.io_ops = skl_tplg_kcontrol_ops,
3477 	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3478 	.manifest = skl_manifest_load,
3479 	.dai_load = skl_dai_load,
3480 };
3481 
3482 /*
3483  * A pipe can have multiple modules, each of them will be a DAPM widget as
3484  * well. While managing a pipeline we need to get the list of all the
3485  * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3486  * helps to get the SKL type widgets in that pipeline
3487  */
3488 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3489 {
3490 	struct snd_soc_dapm_widget *w;
3491 	struct skl_module_cfg *mcfg = NULL;
3492 	struct skl_pipe_module *p_module = NULL;
3493 	struct skl_pipe *pipe;
3494 
3495 	list_for_each_entry(w, &component->card->widgets, list) {
3496 		if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3497 			mcfg = w->priv;
3498 			pipe = mcfg->pipe;
3499 
3500 			p_module = devm_kzalloc(component->dev,
3501 						sizeof(*p_module), GFP_KERNEL);
3502 			if (!p_module)
3503 				return -ENOMEM;
3504 
3505 			p_module->w = w;
3506 			list_add_tail(&p_module->node, &pipe->w_list);
3507 		}
3508 	}
3509 
3510 	return 0;
3511 }
3512 
3513 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
3514 {
3515 	struct skl_pipe_module *w_module;
3516 	struct snd_soc_dapm_widget *w;
3517 	struct skl_module_cfg *mconfig;
3518 	bool host_found = false, link_found = false;
3519 
3520 	list_for_each_entry(w_module, &pipe->w_list, node) {
3521 		w = w_module->w;
3522 		mconfig = w->priv;
3523 
3524 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3525 			host_found = true;
3526 		else if (mconfig->dev_type != SKL_DEVICE_NONE)
3527 			link_found = true;
3528 	}
3529 
3530 	if (host_found && link_found)
3531 		pipe->passthru = true;
3532 	else
3533 		pipe->passthru = false;
3534 }
3535 
3536 /* This will be read from topology manifest, currently defined here */
3537 #define SKL_MAX_MCPS 30000000
3538 #define SKL_FW_MAX_MEM 1000000
3539 
3540 /*
3541  * SKL topology init routine
3542  */
3543 int skl_tplg_init(struct snd_soc_component *component, struct hdac_ext_bus *ebus)
3544 {
3545 	int ret;
3546 	const struct firmware *fw;
3547 	struct hdac_bus *bus = ebus_to_hbus(ebus);
3548 	struct skl *skl = ebus_to_skl(ebus);
3549 	struct skl_pipeline *ppl;
3550 
3551 	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3552 	if (ret < 0) {
3553 		dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
3554 				skl->tplg_name, ret);
3555 		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3556 		if (ret < 0) {
3557 			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3558 					"dfw_sst.bin", ret);
3559 			return ret;
3560 		}
3561 	}
3562 
3563 	/*
3564 	 * The complete tplg for SKL is loaded as index 0, we don't use
3565 	 * any other index
3566 	 */
3567 	ret = snd_soc_tplg_component_load(component,
3568 					&skl_tplg_ops, fw, 0);
3569 	if (ret < 0) {
3570 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
3571 		release_firmware(fw);
3572 		return -EINVAL;
3573 	}
3574 
3575 	skl->resource.max_mcps = SKL_MAX_MCPS;
3576 	skl->resource.max_mem = SKL_FW_MAX_MEM;
3577 
3578 	skl->tplg = fw;
3579 	ret = skl_tplg_create_pipe_widget_list(component);
3580 	if (ret < 0)
3581 		return ret;
3582 
3583 	list_for_each_entry(ppl, &skl->ppl_list, node)
3584 		skl_tplg_set_pipe_type(skl, ppl->pipe);
3585 
3586 	return 0;
3587 }
3588