1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  skl-topology.c - Implements Platform component ALSA controls/widget
4  *  handlers.
5  *
6  *  Copyright (C) 2014-2015 Intel Corp
7  *  Author: Jeeja KP <jeeja.kp@intel.com>
8  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/firmware.h>
14 #include <linux/uuid.h>
15 #include <sound/soc.h>
16 #include <sound/soc-topology.h>
17 #include <uapi/sound/snd_sst_tokens.h>
18 #include <uapi/sound/skl-tplg-interface.h>
19 #include "skl-sst-dsp.h"
20 #include "skl-sst-ipc.h"
21 #include "skl-topology.h"
22 #include "skl.h"
23 #include "../common/sst-dsp.h"
24 #include "../common/sst-dsp-priv.h"
25 
26 #define SKL_CH_FIXUP_MASK		(1 << 0)
27 #define SKL_RATE_FIXUP_MASK		(1 << 1)
28 #define SKL_FMT_FIXUP_MASK		(1 << 2)
29 #define SKL_IN_DIR_BIT_MASK		BIT(0)
30 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
31 
32 static const int mic_mono_list[] = {
33 0, 1, 2, 3,
34 };
35 static const int mic_stereo_list[][SKL_CH_STEREO] = {
36 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
37 };
38 static const int mic_trio_list[][SKL_CH_TRIO] = {
39 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
40 };
41 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
42 {0, 1, 2, 3},
43 };
44 
45 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
46 	((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
47 
48 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
49 {
50 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
51 
52 	switch (caps) {
53 	case SKL_D0I3_NONE:
54 		d0i3->non_d0i3++;
55 		break;
56 
57 	case SKL_D0I3_STREAMING:
58 		d0i3->streaming++;
59 		break;
60 
61 	case SKL_D0I3_NON_STREAMING:
62 		d0i3->non_streaming++;
63 		break;
64 	}
65 }
66 
67 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
68 {
69 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
70 
71 	switch (caps) {
72 	case SKL_D0I3_NONE:
73 		d0i3->non_d0i3--;
74 		break;
75 
76 	case SKL_D0I3_STREAMING:
77 		d0i3->streaming--;
78 		break;
79 
80 	case SKL_D0I3_NON_STREAMING:
81 		d0i3->non_streaming--;
82 		break;
83 	}
84 }
85 
86 /*
87  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
88  * ignore. This helpers checks if the SKL driver handles this widget type
89  */
90 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
91 				  struct device *dev)
92 {
93 	if (w->dapm->dev != dev)
94 		return false;
95 
96 	switch (w->id) {
97 	case snd_soc_dapm_dai_link:
98 	case snd_soc_dapm_dai_in:
99 	case snd_soc_dapm_aif_in:
100 	case snd_soc_dapm_aif_out:
101 	case snd_soc_dapm_dai_out:
102 	case snd_soc_dapm_switch:
103 	case snd_soc_dapm_output:
104 	case snd_soc_dapm_mux:
105 
106 		return false;
107 	default:
108 		return true;
109 	}
110 }
111 
112 /*
113  * Each pipelines needs memory to be allocated. Check if we have free memory
114  * from available pool.
115  */
116 static bool skl_is_pipe_mem_avail(struct skl *skl,
117 				struct skl_module_cfg *mconfig)
118 {
119 	struct skl_sst *ctx = skl->skl_sst;
120 
121 	if (skl->resource.mem + mconfig->pipe->memory_pages >
122 				skl->resource.max_mem) {
123 		dev_err(ctx->dev,
124 				"%s: module_id %d instance %d\n", __func__,
125 				mconfig->id.module_id,
126 				mconfig->id.instance_id);
127 		dev_err(ctx->dev,
128 				"exceeds ppl memory available %d mem %d\n",
129 				skl->resource.max_mem, skl->resource.mem);
130 		return false;
131 	} else {
132 		return true;
133 	}
134 }
135 
136 /*
137  * Add the mem to the mem pool. This is freed when pipe is deleted.
138  * Note: DSP does actual memory management we only keep track for complete
139  * pool
140  */
141 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
142 				struct skl_module_cfg *mconfig)
143 {
144 	skl->resource.mem += mconfig->pipe->memory_pages;
145 }
146 
147 /*
148  * Pipeline needs needs DSP CPU resources for computation, this is
149  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
150  *
151  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
152  * pipe.
153  */
154 
155 static bool skl_is_pipe_mcps_avail(struct skl *skl,
156 				struct skl_module_cfg *mconfig)
157 {
158 	struct skl_sst *ctx = skl->skl_sst;
159 	u8 res_idx = mconfig->res_idx;
160 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
161 
162 	if (skl->resource.mcps + res->cps > skl->resource.max_mcps) {
163 		dev_err(ctx->dev,
164 			"%s: module_id %d instance %d\n", __func__,
165 			mconfig->id.module_id, mconfig->id.instance_id);
166 		dev_err(ctx->dev,
167 			"exceeds ppl mcps available %d > mem %d\n",
168 			skl->resource.max_mcps, skl->resource.mcps);
169 		return false;
170 	} else {
171 		return true;
172 	}
173 }
174 
175 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
176 				struct skl_module_cfg *mconfig)
177 {
178 	u8 res_idx = mconfig->res_idx;
179 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
180 
181 	skl->resource.mcps += res->cps;
182 }
183 
184 /*
185  * Free the mcps when tearing down
186  */
187 static void
188 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
189 {
190 	u8 res_idx = mconfig->res_idx;
191 	struct skl_module_res *res = &mconfig->module->resources[res_idx];
192 
193 	skl->resource.mcps -= res->cps;
194 }
195 
196 /*
197  * Free the memory when tearing down
198  */
199 static void
200 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
201 {
202 	skl->resource.mem -= mconfig->pipe->memory_pages;
203 }
204 
205 
206 static void skl_dump_mconfig(struct skl_sst *ctx,
207 					struct skl_module_cfg *mcfg)
208 {
209 	struct skl_module_iface *iface = &mcfg->module->formats[0];
210 
211 	dev_dbg(ctx->dev, "Dumping config\n");
212 	dev_dbg(ctx->dev, "Input Format:\n");
213 	dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
214 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
215 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
216 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
217 				iface->inputs[0].fmt.valid_bit_depth);
218 	dev_dbg(ctx->dev, "Output Format:\n");
219 	dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
220 	dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
221 	dev_dbg(ctx->dev, "valid bit depth = %d\n",
222 				iface->outputs[0].fmt.valid_bit_depth);
223 	dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
224 }
225 
226 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
227 {
228 	int slot_map = 0xFFFFFFFF;
229 	int start_slot = 0;
230 	int i;
231 
232 	for (i = 0; i < chs; i++) {
233 		/*
234 		 * For 2 channels with starting slot as 0, slot map will
235 		 * look like 0xFFFFFF10.
236 		 */
237 		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
238 		start_slot++;
239 	}
240 	fmt->ch_map = slot_map;
241 }
242 
243 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
244 			struct skl_pipe_params *params, int fixup)
245 {
246 	if (fixup & SKL_RATE_FIXUP_MASK)
247 		fmt->s_freq = params->s_freq;
248 	if (fixup & SKL_CH_FIXUP_MASK) {
249 		fmt->channels = params->ch;
250 		skl_tplg_update_chmap(fmt, fmt->channels);
251 	}
252 	if (fixup & SKL_FMT_FIXUP_MASK) {
253 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
254 
255 		/*
256 		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
257 		 * container so update bit depth accordingly
258 		 */
259 		switch (fmt->valid_bit_depth) {
260 		case SKL_DEPTH_16BIT:
261 			fmt->bit_depth = fmt->valid_bit_depth;
262 			break;
263 
264 		default:
265 			fmt->bit_depth = SKL_DEPTH_32BIT;
266 			break;
267 		}
268 	}
269 
270 }
271 
272 /*
273  * A pipeline may have modules which impact the pcm parameters, like SRC,
274  * channel converter, format converter.
275  * We need to calculate the output params by applying the 'fixup'
276  * Topology will tell driver which type of fixup is to be applied by
277  * supplying the fixup mask, so based on that we calculate the output
278  *
279  * Now In FE the pcm hw_params is source/target format. Same is applicable
280  * for BE with its hw_params invoked.
281  * here based on FE, BE pipeline and direction we calculate the input and
282  * outfix and then apply that for a module
283  */
284 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
285 		struct skl_pipe_params *params, bool is_fe)
286 {
287 	int in_fixup, out_fixup;
288 	struct skl_module_fmt *in_fmt, *out_fmt;
289 
290 	/* Fixups will be applied to pin 0 only */
291 	in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
292 	out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
293 
294 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
295 		if (is_fe) {
296 			in_fixup = m_cfg->params_fixup;
297 			out_fixup = (~m_cfg->converter) &
298 					m_cfg->params_fixup;
299 		} else {
300 			out_fixup = m_cfg->params_fixup;
301 			in_fixup = (~m_cfg->converter) &
302 					m_cfg->params_fixup;
303 		}
304 	} else {
305 		if (is_fe) {
306 			out_fixup = m_cfg->params_fixup;
307 			in_fixup = (~m_cfg->converter) &
308 					m_cfg->params_fixup;
309 		} else {
310 			in_fixup = m_cfg->params_fixup;
311 			out_fixup = (~m_cfg->converter) &
312 					m_cfg->params_fixup;
313 		}
314 	}
315 
316 	skl_tplg_update_params(in_fmt, params, in_fixup);
317 	skl_tplg_update_params(out_fmt, params, out_fixup);
318 }
319 
320 /*
321  * A module needs input and output buffers, which are dependent upon pcm
322  * params, so once we have calculate params, we need buffer calculation as
323  * well.
324  */
325 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
326 				struct skl_module_cfg *mcfg)
327 {
328 	int multiplier = 1;
329 	struct skl_module_fmt *in_fmt, *out_fmt;
330 	struct skl_module_res *res;
331 
332 	/* Since fixups is applied to pin 0 only, ibs, obs needs
333 	 * change for pin 0 only
334 	 */
335 	res = &mcfg->module->resources[0];
336 	in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
337 	out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
338 
339 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
340 		multiplier = 5;
341 
342 	res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
343 			in_fmt->channels * (in_fmt->bit_depth >> 3) *
344 			multiplier;
345 
346 	res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
347 			out_fmt->channels * (out_fmt->bit_depth >> 3) *
348 			multiplier;
349 }
350 
351 static u8 skl_tplg_be_dev_type(int dev_type)
352 {
353 	int ret;
354 
355 	switch (dev_type) {
356 	case SKL_DEVICE_BT:
357 		ret = NHLT_DEVICE_BT;
358 		break;
359 
360 	case SKL_DEVICE_DMIC:
361 		ret = NHLT_DEVICE_DMIC;
362 		break;
363 
364 	case SKL_DEVICE_I2S:
365 		ret = NHLT_DEVICE_I2S;
366 		break;
367 
368 	default:
369 		ret = NHLT_DEVICE_INVALID;
370 		break;
371 	}
372 
373 	return ret;
374 }
375 
376 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
377 						struct skl_sst *ctx)
378 {
379 	struct skl_module_cfg *m_cfg = w->priv;
380 	int link_type, dir;
381 	u32 ch, s_freq, s_fmt;
382 	struct nhlt_specific_cfg *cfg;
383 	struct skl *skl = get_skl_ctx(ctx->dev);
384 	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
385 	int fmt_idx = m_cfg->fmt_idx;
386 	struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
387 
388 	/* check if we already have blob */
389 	if (m_cfg->formats_config.caps_size > 0)
390 		return 0;
391 
392 	dev_dbg(ctx->dev, "Applying default cfg blob\n");
393 	switch (m_cfg->dev_type) {
394 	case SKL_DEVICE_DMIC:
395 		link_type = NHLT_LINK_DMIC;
396 		dir = SNDRV_PCM_STREAM_CAPTURE;
397 		s_freq = m_iface->inputs[0].fmt.s_freq;
398 		s_fmt = m_iface->inputs[0].fmt.bit_depth;
399 		ch = m_iface->inputs[0].fmt.channels;
400 		break;
401 
402 	case SKL_DEVICE_I2S:
403 		link_type = NHLT_LINK_SSP;
404 		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
405 			dir = SNDRV_PCM_STREAM_PLAYBACK;
406 			s_freq = m_iface->outputs[0].fmt.s_freq;
407 			s_fmt = m_iface->outputs[0].fmt.bit_depth;
408 			ch = m_iface->outputs[0].fmt.channels;
409 		} else {
410 			dir = SNDRV_PCM_STREAM_CAPTURE;
411 			s_freq = m_iface->inputs[0].fmt.s_freq;
412 			s_fmt = m_iface->inputs[0].fmt.bit_depth;
413 			ch = m_iface->inputs[0].fmt.channels;
414 		}
415 		break;
416 
417 	default:
418 		return -EINVAL;
419 	}
420 
421 	/* update the blob based on virtual bus_id and default params */
422 	cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
423 					s_fmt, ch, s_freq, dir, dev_type);
424 	if (cfg) {
425 		m_cfg->formats_config.caps_size = cfg->size;
426 		m_cfg->formats_config.caps = (u32 *) &cfg->caps;
427 	} else {
428 		dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
429 					m_cfg->vbus_id, link_type, dir);
430 		dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
431 					ch, s_freq, s_fmt);
432 		return -EIO;
433 	}
434 
435 	return 0;
436 }
437 
438 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
439 							struct skl_sst *ctx)
440 {
441 	struct skl_module_cfg *m_cfg = w->priv;
442 	struct skl_pipe_params *params = m_cfg->pipe->p_params;
443 	int p_conn_type = m_cfg->pipe->conn_type;
444 	bool is_fe;
445 
446 	if (!m_cfg->params_fixup)
447 		return;
448 
449 	dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
450 				w->name);
451 
452 	skl_dump_mconfig(ctx, m_cfg);
453 
454 	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
455 		is_fe = true;
456 	else
457 		is_fe = false;
458 
459 	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
460 	skl_tplg_update_buffer_size(ctx, m_cfg);
461 
462 	dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
463 				w->name);
464 
465 	skl_dump_mconfig(ctx, m_cfg);
466 }
467 
468 /*
469  * some modules can have multiple params set from user control and
470  * need to be set after module is initialized. If set_param flag is
471  * set module params will be done after module is initialised.
472  */
473 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
474 						struct skl_sst *ctx)
475 {
476 	int i, ret;
477 	struct skl_module_cfg *mconfig = w->priv;
478 	const struct snd_kcontrol_new *k;
479 	struct soc_bytes_ext *sb;
480 	struct skl_algo_data *bc;
481 	struct skl_specific_cfg *sp_cfg;
482 
483 	if (mconfig->formats_config.caps_size > 0 &&
484 		mconfig->formats_config.set_params == SKL_PARAM_SET) {
485 		sp_cfg = &mconfig->formats_config;
486 		ret = skl_set_module_params(ctx, sp_cfg->caps,
487 					sp_cfg->caps_size,
488 					sp_cfg->param_id, mconfig);
489 		if (ret < 0)
490 			return ret;
491 	}
492 
493 	for (i = 0; i < w->num_kcontrols; i++) {
494 		k = &w->kcontrol_news[i];
495 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
496 			sb = (void *) k->private_value;
497 			bc = (struct skl_algo_data *)sb->dobj.private;
498 
499 			if (bc->set_params == SKL_PARAM_SET) {
500 				ret = skl_set_module_params(ctx,
501 						(u32 *)bc->params, bc->size,
502 						bc->param_id, mconfig);
503 				if (ret < 0)
504 					return ret;
505 			}
506 		}
507 	}
508 
509 	return 0;
510 }
511 
512 /*
513  * some module param can set from user control and this is required as
514  * when module is initailzed. if module param is required in init it is
515  * identifed by set_param flag. if set_param flag is not set, then this
516  * parameter needs to set as part of module init.
517  */
518 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
519 {
520 	const struct snd_kcontrol_new *k;
521 	struct soc_bytes_ext *sb;
522 	struct skl_algo_data *bc;
523 	struct skl_module_cfg *mconfig = w->priv;
524 	int i;
525 
526 	for (i = 0; i < w->num_kcontrols; i++) {
527 		k = &w->kcontrol_news[i];
528 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
529 			sb = (struct soc_bytes_ext *)k->private_value;
530 			bc = (struct skl_algo_data *)sb->dobj.private;
531 
532 			if (bc->set_params != SKL_PARAM_INIT)
533 				continue;
534 
535 			mconfig->formats_config.caps = (u32 *)bc->params;
536 			mconfig->formats_config.caps_size = bc->size;
537 
538 			break;
539 		}
540 	}
541 
542 	return 0;
543 }
544 
545 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
546 		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
547 {
548 	switch (mcfg->dev_type) {
549 	case SKL_DEVICE_HDAHOST:
550 		return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
551 
552 	case SKL_DEVICE_HDALINK:
553 		return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
554 	}
555 
556 	return 0;
557 }
558 
559 /*
560  * Inside a pipe instance, we can have various modules. These modules need
561  * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
562  * skl_init_module() routine, so invoke that for all modules in a pipeline
563  */
564 static int
565 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
566 {
567 	struct skl_pipe_module *w_module;
568 	struct snd_soc_dapm_widget *w;
569 	struct skl_module_cfg *mconfig;
570 	struct skl_sst *ctx = skl->skl_sst;
571 	u8 cfg_idx;
572 	int ret = 0;
573 
574 	list_for_each_entry(w_module, &pipe->w_list, node) {
575 		guid_t *uuid_mod;
576 		w = w_module->w;
577 		mconfig = w->priv;
578 
579 		/* check if module ids are populated */
580 		if (mconfig->id.module_id < 0) {
581 			dev_err(skl->skl_sst->dev,
582 					"module %pUL id not populated\n",
583 					(guid_t *)mconfig->guid);
584 			return -EIO;
585 		}
586 
587 		cfg_idx = mconfig->pipe->cur_config_idx;
588 		mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
589 		mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
590 
591 		/* check resource available */
592 		if (!skl_is_pipe_mcps_avail(skl, mconfig))
593 			return -ENOMEM;
594 
595 		if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) {
596 			ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
597 				mconfig->id.module_id, mconfig->guid);
598 			if (ret < 0)
599 				return ret;
600 
601 			mconfig->m_state = SKL_MODULE_LOADED;
602 		}
603 
604 		/* prepare the DMA if the module is gateway cpr */
605 		ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
606 		if (ret < 0)
607 			return ret;
608 
609 		/* update blob if blob is null for be with default value */
610 		skl_tplg_update_be_blob(w, ctx);
611 
612 		/*
613 		 * apply fix/conversion to module params based on
614 		 * FE/BE params
615 		 */
616 		skl_tplg_update_module_params(w, ctx);
617 		uuid_mod = (guid_t *)mconfig->guid;
618 		mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
619 						mconfig->id.instance_id);
620 		if (mconfig->id.pvt_id < 0)
621 			return ret;
622 		skl_tplg_set_module_init_data(w);
623 
624 		ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id);
625 		if (ret < 0) {
626 			dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n",
627 						mconfig->core_id, ret);
628 			return ret;
629 		}
630 
631 		ret = skl_init_module(ctx, mconfig);
632 		if (ret < 0) {
633 			skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
634 			goto err;
635 		}
636 		skl_tplg_alloc_pipe_mcps(skl, mconfig);
637 		ret = skl_tplg_set_module_params(w, ctx);
638 		if (ret < 0)
639 			goto err;
640 	}
641 
642 	return 0;
643 err:
644 	skl_dsp_put_core(ctx->dsp, mconfig->core_id);
645 	return ret;
646 }
647 
648 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
649 	 struct skl_pipe *pipe)
650 {
651 	int ret = 0;
652 	struct skl_pipe_module *w_module = NULL;
653 	struct skl_module_cfg *mconfig = NULL;
654 
655 	list_for_each_entry(w_module, &pipe->w_list, node) {
656 		guid_t *uuid_mod;
657 		mconfig  = w_module->w->priv;
658 		uuid_mod = (guid_t *)mconfig->guid;
659 
660 		if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod &&
661 			mconfig->m_state > SKL_MODULE_UNINIT) {
662 			ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
663 						mconfig->id.module_id);
664 			if (ret < 0)
665 				return -EIO;
666 		}
667 		skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
668 
669 		ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id);
670 		if (ret < 0) {
671 			/* don't return; continue with other modules */
672 			dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n",
673 				mconfig->core_id, ret);
674 		}
675 	}
676 
677 	/* no modules to unload in this path, so return */
678 	return ret;
679 }
680 
681 /*
682  * Here, we select pipe format based on the pipe type and pipe
683  * direction to determine the current config index for the pipeline.
684  * The config index is then used to select proper module resources.
685  * Intermediate pipes currently have a fixed format hence we select the
686  * 0th configuratation by default for such pipes.
687  */
688 static int
689 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig)
690 {
691 	struct skl_sst *ctx = skl->skl_sst;
692 	struct skl_pipe *pipe = mconfig->pipe;
693 	struct skl_pipe_params *params = pipe->p_params;
694 	struct skl_path_config *pconfig = &pipe->configs[0];
695 	struct skl_pipe_fmt *fmt = NULL;
696 	bool in_fmt = false;
697 	int i;
698 
699 	if (pipe->nr_cfgs == 0) {
700 		pipe->cur_config_idx = 0;
701 		return 0;
702 	}
703 
704 	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
705 		dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n");
706 		pipe->cur_config_idx = 0;
707 		pipe->memory_pages = pconfig->mem_pages;
708 
709 		return 0;
710 	}
711 
712 	if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
713 	     pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
714 	     (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
715 	     pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
716 		in_fmt = true;
717 
718 	for (i = 0; i < pipe->nr_cfgs; i++) {
719 		pconfig = &pipe->configs[i];
720 		if (in_fmt)
721 			fmt = &pconfig->in_fmt;
722 		else
723 			fmt = &pconfig->out_fmt;
724 
725 		if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
726 				    fmt->channels, fmt->freq, fmt->bps)) {
727 			pipe->cur_config_idx = i;
728 			pipe->memory_pages = pconfig->mem_pages;
729 			dev_dbg(ctx->dev, "Using pipe config: %d\n", i);
730 
731 			return 0;
732 		}
733 	}
734 
735 	dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
736 		params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
737 	return -EINVAL;
738 }
739 
740 /*
741  * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
742  * need create the pipeline. So we do following:
743  *   - check the resources
744  *   - Create the pipeline
745  *   - Initialize the modules in pipeline
746  *   - finally bind all modules together
747  */
748 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
749 							struct skl *skl)
750 {
751 	int ret;
752 	struct skl_module_cfg *mconfig = w->priv;
753 	struct skl_pipe_module *w_module;
754 	struct skl_pipe *s_pipe = mconfig->pipe;
755 	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
756 	struct skl_sst *ctx = skl->skl_sst;
757 	struct skl_module_deferred_bind *modules;
758 
759 	ret = skl_tplg_get_pipe_config(skl, mconfig);
760 	if (ret < 0)
761 		return ret;
762 
763 	/* check resource available */
764 	if (!skl_is_pipe_mcps_avail(skl, mconfig))
765 		return -EBUSY;
766 
767 	if (!skl_is_pipe_mem_avail(skl, mconfig))
768 		return -ENOMEM;
769 
770 	/*
771 	 * Create a list of modules for pipe.
772 	 * This list contains modules from source to sink
773 	 */
774 	ret = skl_create_pipeline(ctx, mconfig->pipe);
775 	if (ret < 0)
776 		return ret;
777 
778 	skl_tplg_alloc_pipe_mem(skl, mconfig);
779 	skl_tplg_alloc_pipe_mcps(skl, mconfig);
780 
781 	/* Init all pipe modules from source to sink */
782 	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
783 	if (ret < 0)
784 		return ret;
785 
786 	/* Bind modules from source to sink */
787 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
788 		dst_module = w_module->w->priv;
789 
790 		if (src_module == NULL) {
791 			src_module = dst_module;
792 			continue;
793 		}
794 
795 		ret = skl_bind_modules(ctx, src_module, dst_module);
796 		if (ret < 0)
797 			return ret;
798 
799 		src_module = dst_module;
800 	}
801 
802 	/*
803 	 * When the destination module is initialized, check for these modules
804 	 * in deferred bind list. If found, bind them.
805 	 */
806 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
807 		if (list_empty(&skl->bind_list))
808 			break;
809 
810 		list_for_each_entry(modules, &skl->bind_list, node) {
811 			module = w_module->w->priv;
812 			if (modules->dst == module)
813 				skl_bind_modules(ctx, modules->src,
814 							modules->dst);
815 		}
816 	}
817 
818 	return 0;
819 }
820 
821 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
822 				int size, struct skl_module_cfg *mcfg)
823 {
824 	int i, pvt_id;
825 
826 	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
827 		struct skl_kpb_params *kpb_params =
828 				(struct skl_kpb_params *)params;
829 		struct skl_mod_inst_map *inst = kpb_params->u.map;
830 
831 		for (i = 0; i < kpb_params->num_modules; i++) {
832 			pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
833 								inst->inst_id);
834 			if (pvt_id < 0)
835 				return -EINVAL;
836 
837 			inst->inst_id = pvt_id;
838 			inst++;
839 		}
840 	}
841 
842 	return 0;
843 }
844 /*
845  * Some modules require params to be set after the module is bound to
846  * all pins connected.
847  *
848  * The module provider initializes set_param flag for such modules and we
849  * send params after binding
850  */
851 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
852 			struct skl_module_cfg *mcfg, struct skl_sst *ctx)
853 {
854 	int i, ret;
855 	struct skl_module_cfg *mconfig = w->priv;
856 	const struct snd_kcontrol_new *k;
857 	struct soc_bytes_ext *sb;
858 	struct skl_algo_data *bc;
859 	struct skl_specific_cfg *sp_cfg;
860 	u32 *params;
861 
862 	/*
863 	 * check all out/in pins are in bind state.
864 	 * if so set the module param
865 	 */
866 	for (i = 0; i < mcfg->module->max_output_pins; i++) {
867 		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
868 			return 0;
869 	}
870 
871 	for (i = 0; i < mcfg->module->max_input_pins; i++) {
872 		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
873 			return 0;
874 	}
875 
876 	if (mconfig->formats_config.caps_size > 0 &&
877 		mconfig->formats_config.set_params == SKL_PARAM_BIND) {
878 		sp_cfg = &mconfig->formats_config;
879 		ret = skl_set_module_params(ctx, sp_cfg->caps,
880 					sp_cfg->caps_size,
881 					sp_cfg->param_id, mconfig);
882 		if (ret < 0)
883 			return ret;
884 	}
885 
886 	for (i = 0; i < w->num_kcontrols; i++) {
887 		k = &w->kcontrol_news[i];
888 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
889 			sb = (void *) k->private_value;
890 			bc = (struct skl_algo_data *)sb->dobj.private;
891 
892 			if (bc->set_params == SKL_PARAM_BIND) {
893 				params = kmemdup(bc->params, bc->max, GFP_KERNEL);
894 				if (!params)
895 					return -ENOMEM;
896 
897 				skl_fill_sink_instance_id(ctx, params, bc->max,
898 								mconfig);
899 
900 				ret = skl_set_module_params(ctx, params,
901 						bc->max, bc->param_id, mconfig);
902 				kfree(params);
903 
904 				if (ret < 0)
905 					return ret;
906 			}
907 		}
908 	}
909 
910 	return 0;
911 }
912 
913 static int skl_get_module_id(struct skl_sst *ctx, guid_t *uuid)
914 {
915 	struct uuid_module *module;
916 
917 	list_for_each_entry(module, &ctx->uuid_list, list) {
918 		if (guid_equal(uuid, &module->uuid))
919 			return module->id;
920 	}
921 
922 	return -EINVAL;
923 }
924 
925 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl,
926 					const struct snd_kcontrol_new *k)
927 {
928 	struct soc_bytes_ext *sb = (void *) k->private_value;
929 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
930 	struct skl_kpb_params *uuid_params, *params;
931 	struct hdac_bus *bus = skl_to_bus(skl);
932 	int i, size, module_id;
933 
934 	if (bc->set_params == SKL_PARAM_BIND && bc->max) {
935 		uuid_params = (struct skl_kpb_params *)bc->params;
936 		size = struct_size(params, u.map, uuid_params->num_modules);
937 
938 		params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
939 		if (!params)
940 			return -ENOMEM;
941 
942 		params->num_modules = uuid_params->num_modules;
943 
944 		for (i = 0; i < uuid_params->num_modules; i++) {
945 			module_id = skl_get_module_id(skl->skl_sst,
946 				&uuid_params->u.map_uuid[i].mod_uuid);
947 			if (module_id < 0) {
948 				devm_kfree(bus->dev, params);
949 				return -EINVAL;
950 			}
951 
952 			params->u.map[i].mod_id = module_id;
953 			params->u.map[i].inst_id =
954 				uuid_params->u.map_uuid[i].inst_id;
955 		}
956 
957 		devm_kfree(bus->dev, bc->params);
958 		bc->params = (char *)params;
959 		bc->max = size;
960 	}
961 
962 	return 0;
963 }
964 
965 /*
966  * Retrieve the module id from UUID mentioned in the
967  * post bind params
968  */
969 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl,
970 				struct snd_soc_dapm_widget *w)
971 {
972 	struct skl_module_cfg *mconfig = w->priv;
973 	int i;
974 
975 	/*
976 	 * Post bind params are used for only for KPB
977 	 * to set copier instances to drain the data
978 	 * in fast mode
979 	 */
980 	if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
981 		return;
982 
983 	for (i = 0; i < w->num_kcontrols; i++)
984 		if ((w->kcontrol_news[i].access &
985 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
986 			(skl_tplg_find_moduleid_from_uuid(skl,
987 			&w->kcontrol_news[i]) < 0))
988 			dev_err(skl->skl_sst->dev,
989 				"%s: invalid kpb post bind params\n",
990 				__func__);
991 }
992 
993 static int skl_tplg_module_add_deferred_bind(struct skl *skl,
994 	struct skl_module_cfg *src, struct skl_module_cfg *dst)
995 {
996 	struct skl_module_deferred_bind *m_list, *modules;
997 	int i;
998 
999 	/* only supported for module with static pin connection */
1000 	for (i = 0; i < dst->module->max_input_pins; i++) {
1001 		struct skl_module_pin *pin = &dst->m_in_pin[i];
1002 
1003 		if (pin->is_dynamic)
1004 			continue;
1005 
1006 		if ((pin->id.module_id  == src->id.module_id) &&
1007 			(pin->id.instance_id  == src->id.instance_id)) {
1008 
1009 			if (!list_empty(&skl->bind_list)) {
1010 				list_for_each_entry(modules, &skl->bind_list, node) {
1011 					if (modules->src == src && modules->dst == dst)
1012 						return 0;
1013 				}
1014 			}
1015 
1016 			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
1017 			if (!m_list)
1018 				return -ENOMEM;
1019 
1020 			m_list->src = src;
1021 			m_list->dst = dst;
1022 
1023 			list_add(&m_list->node, &skl->bind_list);
1024 		}
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
1031 				struct skl *skl,
1032 				struct snd_soc_dapm_widget *src_w,
1033 				struct skl_module_cfg *src_mconfig)
1034 {
1035 	struct snd_soc_dapm_path *p;
1036 	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
1037 	struct skl_module_cfg *sink_mconfig;
1038 	struct skl_sst *ctx = skl->skl_sst;
1039 	int ret;
1040 
1041 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1042 		if (!p->connect)
1043 			continue;
1044 
1045 		dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
1046 		dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
1047 
1048 		next_sink = p->sink;
1049 
1050 		if (!is_skl_dsp_widget_type(p->sink, ctx->dev))
1051 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
1052 
1053 		/*
1054 		 * here we will check widgets in sink pipelines, so that
1055 		 * can be any widgets type and we are only interested if
1056 		 * they are ones used for SKL so check that first
1057 		 */
1058 		if ((p->sink->priv != NULL) &&
1059 				is_skl_dsp_widget_type(p->sink, ctx->dev)) {
1060 
1061 			sink = p->sink;
1062 			sink_mconfig = sink->priv;
1063 
1064 			/*
1065 			 * Modules other than PGA leaf can be connected
1066 			 * directly or via switch to a module in another
1067 			 * pipeline. EX: reference path
1068 			 * when the path is enabled, the dst module that needs
1069 			 * to be bound may not be initialized. if the module is
1070 			 * not initialized, add these modules in the deferred
1071 			 * bind list and when the dst module is initialised,
1072 			 * bind this module to the dst_module in deferred list.
1073 			 */
1074 			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1075 				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1076 
1077 				ret = skl_tplg_module_add_deferred_bind(skl,
1078 						src_mconfig, sink_mconfig);
1079 
1080 				if (ret < 0)
1081 					return ret;
1082 
1083 			}
1084 
1085 
1086 			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1087 				sink_mconfig->m_state == SKL_MODULE_UNINIT)
1088 				continue;
1089 
1090 			/* Bind source to sink, mixin is always source */
1091 			ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1092 			if (ret)
1093 				return ret;
1094 
1095 			/* set module params after bind */
1096 			skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
1097 			skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1098 
1099 			/* Start sinks pipe first */
1100 			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1101 				if (sink_mconfig->pipe->conn_type !=
1102 							SKL_PIPE_CONN_TYPE_FE)
1103 					ret = skl_run_pipe(ctx,
1104 							sink_mconfig->pipe);
1105 				if (ret)
1106 					return ret;
1107 			}
1108 		}
1109 	}
1110 
1111 	if (!sink && next_sink)
1112 		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1113 
1114 	return 0;
1115 }
1116 
1117 /*
1118  * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1119  * we need to do following:
1120  *   - Bind to sink pipeline
1121  *      Since the sink pipes can be running and we don't get mixer event on
1122  *      connect for already running mixer, we need to find the sink pipes
1123  *      here and bind to them. This way dynamic connect works.
1124  *   - Start sink pipeline, if not running
1125  *   - Then run current pipe
1126  */
1127 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1128 								struct skl *skl)
1129 {
1130 	struct skl_module_cfg *src_mconfig;
1131 	struct skl_sst *ctx = skl->skl_sst;
1132 	int ret = 0;
1133 
1134 	src_mconfig = w->priv;
1135 
1136 	/*
1137 	 * find which sink it is connected to, bind with the sink,
1138 	 * if sink is not started, start sink pipe first, then start
1139 	 * this pipe
1140 	 */
1141 	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1142 	if (ret)
1143 		return ret;
1144 
1145 	/* Start source pipe last after starting all sinks */
1146 	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1147 		return skl_run_pipe(ctx, src_mconfig->pipe);
1148 
1149 	return 0;
1150 }
1151 
1152 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1153 		struct snd_soc_dapm_widget *w, struct skl *skl)
1154 {
1155 	struct snd_soc_dapm_path *p;
1156 	struct snd_soc_dapm_widget *src_w = NULL;
1157 	struct skl_sst *ctx = skl->skl_sst;
1158 
1159 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1160 		src_w = p->source;
1161 		if (!p->connect)
1162 			continue;
1163 
1164 		dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
1165 		dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
1166 
1167 		/*
1168 		 * here we will check widgets in sink pipelines, so that can
1169 		 * be any widgets type and we are only interested if they are
1170 		 * ones used for SKL so check that first
1171 		 */
1172 		if ((p->source->priv != NULL) &&
1173 				is_skl_dsp_widget_type(p->source, ctx->dev)) {
1174 			return p->source;
1175 		}
1176 	}
1177 
1178 	if (src_w != NULL)
1179 		return skl_get_src_dsp_widget(src_w, skl);
1180 
1181 	return NULL;
1182 }
1183 
1184 /*
1185  * in the Post-PMU event of mixer we need to do following:
1186  *   - Check if this pipe is running
1187  *   - if not, then
1188  *	- bind this pipeline to its source pipeline
1189  *	  if source pipe is already running, this means it is a dynamic
1190  *	  connection and we need to bind only to that pipe
1191  *	- start this pipeline
1192  */
1193 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1194 							struct skl *skl)
1195 {
1196 	int ret = 0;
1197 	struct snd_soc_dapm_widget *source, *sink;
1198 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1199 	struct skl_sst *ctx = skl->skl_sst;
1200 	int src_pipe_started = 0;
1201 
1202 	sink = w;
1203 	sink_mconfig = sink->priv;
1204 
1205 	/*
1206 	 * If source pipe is already started, that means source is driving
1207 	 * one more sink before this sink got connected, Since source is
1208 	 * started, bind this sink to source and start this pipe.
1209 	 */
1210 	source = skl_get_src_dsp_widget(w, skl);
1211 	if (source != NULL) {
1212 		src_mconfig = source->priv;
1213 		sink_mconfig = sink->priv;
1214 		src_pipe_started = 1;
1215 
1216 		/*
1217 		 * check pipe state, then no need to bind or start the
1218 		 * pipe
1219 		 */
1220 		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1221 			src_pipe_started = 0;
1222 	}
1223 
1224 	if (src_pipe_started) {
1225 		ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1226 		if (ret)
1227 			return ret;
1228 
1229 		/* set module params after bind */
1230 		skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
1231 		skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1232 
1233 		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1234 			ret = skl_run_pipe(ctx, sink_mconfig->pipe);
1235 	}
1236 
1237 	return ret;
1238 }
1239 
1240 /*
1241  * in the Pre-PMD event of mixer we need to do following:
1242  *   - Stop the pipe
1243  *   - find the source connections and remove that from dapm_path_list
1244  *   - unbind with source pipelines if still connected
1245  */
1246 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1247 							struct skl *skl)
1248 {
1249 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1250 	int ret = 0, i;
1251 	struct skl_sst *ctx = skl->skl_sst;
1252 
1253 	sink_mconfig = w->priv;
1254 
1255 	/* Stop the pipe */
1256 	ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
1257 	if (ret)
1258 		return ret;
1259 
1260 	for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1261 		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1262 			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1263 			if (!src_mconfig)
1264 				continue;
1265 
1266 			ret = skl_unbind_modules(ctx,
1267 						src_mconfig, sink_mconfig);
1268 		}
1269 	}
1270 
1271 	return ret;
1272 }
1273 
1274 /*
1275  * in the Post-PMD event of mixer we need to do following:
1276  *   - Free the mcps used
1277  *   - Free the mem used
1278  *   - Unbind the modules within the pipeline
1279  *   - Delete the pipeline (modules are not required to be explicitly
1280  *     deleted, pipeline delete is enough here
1281  */
1282 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1283 							struct skl *skl)
1284 {
1285 	struct skl_module_cfg *mconfig = w->priv;
1286 	struct skl_pipe_module *w_module;
1287 	struct skl_module_cfg *src_module = NULL, *dst_module;
1288 	struct skl_sst *ctx = skl->skl_sst;
1289 	struct skl_pipe *s_pipe = mconfig->pipe;
1290 	struct skl_module_deferred_bind *modules, *tmp;
1291 
1292 	if (s_pipe->state == SKL_PIPE_INVALID)
1293 		return -EINVAL;
1294 
1295 	skl_tplg_free_pipe_mcps(skl, mconfig);
1296 	skl_tplg_free_pipe_mem(skl, mconfig);
1297 
1298 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1299 		if (list_empty(&skl->bind_list))
1300 			break;
1301 
1302 		src_module = w_module->w->priv;
1303 
1304 		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1305 			/*
1306 			 * When the destination module is deleted, Unbind the
1307 			 * modules from deferred bind list.
1308 			 */
1309 			if (modules->dst == src_module) {
1310 				skl_unbind_modules(ctx, modules->src,
1311 						modules->dst);
1312 			}
1313 
1314 			/*
1315 			 * When the source module is deleted, remove this entry
1316 			 * from the deferred bind list.
1317 			 */
1318 			if (modules->src == src_module) {
1319 				list_del(&modules->node);
1320 				modules->src = NULL;
1321 				modules->dst = NULL;
1322 				kfree(modules);
1323 			}
1324 		}
1325 	}
1326 
1327 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1328 		dst_module = w_module->w->priv;
1329 
1330 		if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1331 			skl_tplg_free_pipe_mcps(skl, dst_module);
1332 		if (src_module == NULL) {
1333 			src_module = dst_module;
1334 			continue;
1335 		}
1336 
1337 		skl_unbind_modules(ctx, src_module, dst_module);
1338 		src_module = dst_module;
1339 	}
1340 
1341 	skl_delete_pipe(ctx, mconfig->pipe);
1342 
1343 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1344 		src_module = w_module->w->priv;
1345 		src_module->m_state = SKL_MODULE_UNINIT;
1346 	}
1347 
1348 	return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1349 }
1350 
1351 /*
1352  * in the Post-PMD event of PGA we need to do following:
1353  *   - Free the mcps used
1354  *   - Stop the pipeline
1355  *   - In source pipe is connected, unbind with source pipelines
1356  */
1357 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1358 								struct skl *skl)
1359 {
1360 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1361 	int ret = 0, i;
1362 	struct skl_sst *ctx = skl->skl_sst;
1363 
1364 	src_mconfig = w->priv;
1365 
1366 	/* Stop the pipe since this is a mixin module */
1367 	ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1368 	if (ret)
1369 		return ret;
1370 
1371 	for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1372 		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1373 			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1374 			if (!sink_mconfig)
1375 				continue;
1376 			/*
1377 			 * This is a connecter and if path is found that means
1378 			 * unbind between source and sink has not happened yet
1379 			 */
1380 			ret = skl_unbind_modules(ctx, src_mconfig,
1381 							sink_mconfig);
1382 		}
1383 	}
1384 
1385 	return ret;
1386 }
1387 
1388 /*
1389  * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1390  * second one is required that is created as another pipe entity.
1391  * The mixer is responsible for pipe management and represent a pipeline
1392  * instance
1393  */
1394 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1395 				struct snd_kcontrol *k, int event)
1396 {
1397 	struct snd_soc_dapm_context *dapm = w->dapm;
1398 	struct skl *skl = get_skl_ctx(dapm->dev);
1399 
1400 	switch (event) {
1401 	case SND_SOC_DAPM_PRE_PMU:
1402 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1403 
1404 	case SND_SOC_DAPM_POST_PMU:
1405 		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1406 
1407 	case SND_SOC_DAPM_PRE_PMD:
1408 		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1409 
1410 	case SND_SOC_DAPM_POST_PMD:
1411 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 /*
1418  * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1419  * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1420  * the sink when it is running (two FE to one BE or one FE to two BE)
1421  * scenarios
1422  */
1423 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1424 			struct snd_kcontrol *k, int event)
1425 
1426 {
1427 	struct snd_soc_dapm_context *dapm = w->dapm;
1428 	struct skl *skl = get_skl_ctx(dapm->dev);
1429 
1430 	switch (event) {
1431 	case SND_SOC_DAPM_PRE_PMU:
1432 		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1433 
1434 	case SND_SOC_DAPM_POST_PMD:
1435 		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1442 			unsigned int __user *data, unsigned int size)
1443 {
1444 	struct soc_bytes_ext *sb =
1445 			(struct soc_bytes_ext *)kcontrol->private_value;
1446 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1447 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1448 	struct skl_module_cfg *mconfig = w->priv;
1449 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1450 
1451 	if (w->power)
1452 		skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1453 				      bc->size, bc->param_id, mconfig);
1454 
1455 	/* decrement size for TLV header */
1456 	size -= 2 * sizeof(u32);
1457 
1458 	/* check size as we don't want to send kernel data */
1459 	if (size > bc->max)
1460 		size = bc->max;
1461 
1462 	if (bc->params) {
1463 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1464 			return -EFAULT;
1465 		if (copy_to_user(data + 1, &size, sizeof(u32)))
1466 			return -EFAULT;
1467 		if (copy_to_user(data + 2, bc->params, size))
1468 			return -EFAULT;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 #define SKL_PARAM_VENDOR_ID 0xff
1475 
1476 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1477 			const unsigned int __user *data, unsigned int size)
1478 {
1479 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1480 	struct skl_module_cfg *mconfig = w->priv;
1481 	struct soc_bytes_ext *sb =
1482 			(struct soc_bytes_ext *)kcontrol->private_value;
1483 	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1484 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1485 
1486 	if (ac->params) {
1487 		/*
1488 		 * Widget data is expected to be stripped of T and L
1489 		 */
1490 		size -= 2 * sizeof(unsigned int);
1491 		data += 2;
1492 
1493 		if (size > ac->max)
1494 			return -EINVAL;
1495 		ac->size = size;
1496 
1497 		if (copy_from_user(ac->params, data, size))
1498 			return -EFAULT;
1499 
1500 		if (w->power)
1501 			return skl_set_module_params(skl->skl_sst,
1502 						(u32 *)ac->params, ac->size,
1503 						ac->param_id, mconfig);
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1510 		struct snd_ctl_elem_value *ucontrol)
1511 {
1512 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1513 	struct skl_module_cfg *mconfig = w->priv;
1514 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1515 	u32 ch_type = *((u32 *)ec->dobj.private);
1516 
1517 	if (mconfig->dmic_ch_type == ch_type)
1518 		ucontrol->value.enumerated.item[0] =
1519 					mconfig->dmic_ch_combo_index;
1520 	else
1521 		ucontrol->value.enumerated.item[0] = 0;
1522 
1523 	return 0;
1524 }
1525 
1526 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1527 	struct skl_mic_sel_config *mic_cfg, struct device *dev)
1528 {
1529 	struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
1530 
1531 	sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1532 	sp_cfg->set_params = SKL_PARAM_SET;
1533 	sp_cfg->param_id = 0x00;
1534 	if (!sp_cfg->caps) {
1535 		sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1536 		if (!sp_cfg->caps)
1537 			return -ENOMEM;
1538 	}
1539 
1540 	mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1541 	mic_cfg->flags = 0;
1542 	memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1543 
1544 	return 0;
1545 }
1546 
1547 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1548 			struct snd_ctl_elem_value *ucontrol)
1549 {
1550 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1551 	struct skl_module_cfg *mconfig = w->priv;
1552 	struct skl_mic_sel_config mic_cfg = {0};
1553 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1554 	u32 ch_type = *((u32 *)ec->dobj.private);
1555 	const int *list;
1556 	u8 in_ch, out_ch, index;
1557 
1558 	mconfig->dmic_ch_type = ch_type;
1559 	mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1560 
1561 	/* enum control index 0 is INVALID, so no channels to be set */
1562 	if (mconfig->dmic_ch_combo_index == 0)
1563 		return 0;
1564 
1565 	/* No valid channel selection map for index 0, so offset by 1 */
1566 	index = mconfig->dmic_ch_combo_index - 1;
1567 
1568 	switch (ch_type) {
1569 	case SKL_CH_MONO:
1570 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1571 			return -EINVAL;
1572 
1573 		list = &mic_mono_list[index];
1574 		break;
1575 
1576 	case SKL_CH_STEREO:
1577 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1578 			return -EINVAL;
1579 
1580 		list = mic_stereo_list[index];
1581 		break;
1582 
1583 	case SKL_CH_TRIO:
1584 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1585 			return -EINVAL;
1586 
1587 		list = mic_trio_list[index];
1588 		break;
1589 
1590 	case SKL_CH_QUATRO:
1591 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1592 			return -EINVAL;
1593 
1594 		list = mic_quatro_list[index];
1595 		break;
1596 
1597 	default:
1598 		dev_err(w->dapm->dev,
1599 				"Invalid channel %d for mic_select module\n",
1600 				ch_type);
1601 		return -EINVAL;
1602 
1603 	}
1604 
1605 	/* channel type enum map to number of chanels for that type */
1606 	for (out_ch = 0; out_ch < ch_type; out_ch++) {
1607 		in_ch = list[out_ch];
1608 		mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1609 	}
1610 
1611 	return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1612 }
1613 
1614 /*
1615  * Fill the dma id for host and link. In case of passthrough
1616  * pipeline, this will both host and link in the same
1617  * pipeline, so need to copy the link and host based on dev_type
1618  */
1619 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1620 				struct skl_pipe_params *params)
1621 {
1622 	struct skl_pipe *pipe = mcfg->pipe;
1623 
1624 	if (pipe->passthru) {
1625 		switch (mcfg->dev_type) {
1626 		case SKL_DEVICE_HDALINK:
1627 			pipe->p_params->link_dma_id = params->link_dma_id;
1628 			pipe->p_params->link_index = params->link_index;
1629 			pipe->p_params->link_bps = params->link_bps;
1630 			break;
1631 
1632 		case SKL_DEVICE_HDAHOST:
1633 			pipe->p_params->host_dma_id = params->host_dma_id;
1634 			pipe->p_params->host_bps = params->host_bps;
1635 			break;
1636 
1637 		default:
1638 			break;
1639 		}
1640 		pipe->p_params->s_fmt = params->s_fmt;
1641 		pipe->p_params->ch = params->ch;
1642 		pipe->p_params->s_freq = params->s_freq;
1643 		pipe->p_params->stream = params->stream;
1644 		pipe->p_params->format = params->format;
1645 
1646 	} else {
1647 		memcpy(pipe->p_params, params, sizeof(*params));
1648 	}
1649 }
1650 
1651 /*
1652  * The FE params are passed by hw_params of the DAI.
1653  * On hw_params, the params are stored in Gateway module of the FE and we
1654  * need to calculate the format in DSP module configuration, that
1655  * conversion is done here
1656  */
1657 int skl_tplg_update_pipe_params(struct device *dev,
1658 			struct skl_module_cfg *mconfig,
1659 			struct skl_pipe_params *params)
1660 {
1661 	struct skl_module_res *res = &mconfig->module->resources[0];
1662 	struct skl *skl = get_skl_ctx(dev);
1663 	struct skl_module_fmt *format = NULL;
1664 	u8 cfg_idx = mconfig->pipe->cur_config_idx;
1665 
1666 	skl_tplg_fill_dma_id(mconfig, params);
1667 	mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1668 	mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1669 
1670 	if (skl->nr_modules)
1671 		return 0;
1672 
1673 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1674 		format = &mconfig->module->formats[0].inputs[0].fmt;
1675 	else
1676 		format = &mconfig->module->formats[0].outputs[0].fmt;
1677 
1678 	/* set the hw_params */
1679 	format->s_freq = params->s_freq;
1680 	format->channels = params->ch;
1681 	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1682 
1683 	/*
1684 	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1685 	 * container so update bit depth accordingly
1686 	 */
1687 	switch (format->valid_bit_depth) {
1688 	case SKL_DEPTH_16BIT:
1689 		format->bit_depth = format->valid_bit_depth;
1690 		break;
1691 
1692 	case SKL_DEPTH_24BIT:
1693 	case SKL_DEPTH_32BIT:
1694 		format->bit_depth = SKL_DEPTH_32BIT;
1695 		break;
1696 
1697 	default:
1698 		dev_err(dev, "Invalid bit depth %x for pipe\n",
1699 				format->valid_bit_depth);
1700 		return -EINVAL;
1701 	}
1702 
1703 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1704 		res->ibs = (format->s_freq / 1000) *
1705 				(format->channels) *
1706 				(format->bit_depth >> 3);
1707 	} else {
1708 		res->obs = (format->s_freq / 1000) *
1709 				(format->channels) *
1710 				(format->bit_depth >> 3);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 /*
1717  * Query the module config for the FE DAI
1718  * This is used to find the hw_params set for that DAI and apply to FE
1719  * pipeline
1720  */
1721 struct skl_module_cfg *
1722 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1723 {
1724 	struct snd_soc_dapm_widget *w;
1725 	struct snd_soc_dapm_path *p = NULL;
1726 
1727 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1728 		w = dai->playback_widget;
1729 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1730 			if (p->connect && p->sink->power &&
1731 				!is_skl_dsp_widget_type(p->sink, dai->dev))
1732 				continue;
1733 
1734 			if (p->sink->priv) {
1735 				dev_dbg(dai->dev, "set params for %s\n",
1736 						p->sink->name);
1737 				return p->sink->priv;
1738 			}
1739 		}
1740 	} else {
1741 		w = dai->capture_widget;
1742 		snd_soc_dapm_widget_for_each_source_path(w, p) {
1743 			if (p->connect && p->source->power &&
1744 				!is_skl_dsp_widget_type(p->source, dai->dev))
1745 				continue;
1746 
1747 			if (p->source->priv) {
1748 				dev_dbg(dai->dev, "set params for %s\n",
1749 						p->source->name);
1750 				return p->source->priv;
1751 			}
1752 		}
1753 	}
1754 
1755 	return NULL;
1756 }
1757 
1758 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1759 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1760 {
1761 	struct snd_soc_dapm_path *p;
1762 	struct skl_module_cfg *mconfig = NULL;
1763 
1764 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1765 		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1766 			if (p->connect &&
1767 				    (p->sink->id == snd_soc_dapm_aif_out) &&
1768 				    p->source->priv) {
1769 				mconfig = p->source->priv;
1770 				return mconfig;
1771 			}
1772 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1773 			if (mconfig)
1774 				return mconfig;
1775 		}
1776 	}
1777 	return mconfig;
1778 }
1779 
1780 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1781 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1782 {
1783 	struct snd_soc_dapm_path *p;
1784 	struct skl_module_cfg *mconfig = NULL;
1785 
1786 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1787 		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1788 			if (p->connect &&
1789 				    (p->source->id == snd_soc_dapm_aif_in) &&
1790 				    p->sink->priv) {
1791 				mconfig = p->sink->priv;
1792 				return mconfig;
1793 			}
1794 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1795 			if (mconfig)
1796 				return mconfig;
1797 		}
1798 	}
1799 	return mconfig;
1800 }
1801 
1802 struct skl_module_cfg *
1803 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1804 {
1805 	struct snd_soc_dapm_widget *w;
1806 	struct skl_module_cfg *mconfig;
1807 
1808 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1809 		w = dai->playback_widget;
1810 		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1811 	} else {
1812 		w = dai->capture_widget;
1813 		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1814 	}
1815 	return mconfig;
1816 }
1817 
1818 static u8 skl_tplg_be_link_type(int dev_type)
1819 {
1820 	int ret;
1821 
1822 	switch (dev_type) {
1823 	case SKL_DEVICE_BT:
1824 		ret = NHLT_LINK_SSP;
1825 		break;
1826 
1827 	case SKL_DEVICE_DMIC:
1828 		ret = NHLT_LINK_DMIC;
1829 		break;
1830 
1831 	case SKL_DEVICE_I2S:
1832 		ret = NHLT_LINK_SSP;
1833 		break;
1834 
1835 	case SKL_DEVICE_HDALINK:
1836 		ret = NHLT_LINK_HDA;
1837 		break;
1838 
1839 	default:
1840 		ret = NHLT_LINK_INVALID;
1841 		break;
1842 	}
1843 
1844 	return ret;
1845 }
1846 
1847 /*
1848  * Fill the BE gateway parameters
1849  * The BE gateway expects a blob of parameters which are kept in the ACPI
1850  * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1851  * The port can have multiple settings so pick based on the PCM
1852  * parameters
1853  */
1854 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1855 				struct skl_module_cfg *mconfig,
1856 				struct skl_pipe_params *params)
1857 {
1858 	struct nhlt_specific_cfg *cfg;
1859 	struct skl *skl = get_skl_ctx(dai->dev);
1860 	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1861 	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1862 
1863 	skl_tplg_fill_dma_id(mconfig, params);
1864 
1865 	if (link_type == NHLT_LINK_HDA)
1866 		return 0;
1867 
1868 	/* update the blob based on virtual bus_id*/
1869 	cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1870 					params->s_fmt, params->ch,
1871 					params->s_freq, params->stream,
1872 					dev_type);
1873 	if (cfg) {
1874 		mconfig->formats_config.caps_size = cfg->size;
1875 		mconfig->formats_config.caps = (u32 *) &cfg->caps;
1876 	} else {
1877 		dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1878 					mconfig->vbus_id, link_type,
1879 					params->stream);
1880 		dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1881 				 params->ch, params->s_freq, params->s_fmt);
1882 		return -EINVAL;
1883 	}
1884 
1885 	return 0;
1886 }
1887 
1888 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1889 				struct snd_soc_dapm_widget *w,
1890 				struct skl_pipe_params *params)
1891 {
1892 	struct snd_soc_dapm_path *p;
1893 	int ret = -EIO;
1894 
1895 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1896 		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1897 						p->source->priv) {
1898 
1899 			ret = skl_tplg_be_fill_pipe_params(dai,
1900 						p->source->priv, params);
1901 			if (ret < 0)
1902 				return ret;
1903 		} else {
1904 			ret = skl_tplg_be_set_src_pipe_params(dai,
1905 						p->source, params);
1906 			if (ret < 0)
1907 				return ret;
1908 		}
1909 	}
1910 
1911 	return ret;
1912 }
1913 
1914 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1915 	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1916 {
1917 	struct snd_soc_dapm_path *p = NULL;
1918 	int ret = -EIO;
1919 
1920 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1921 		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1922 						p->sink->priv) {
1923 
1924 			ret = skl_tplg_be_fill_pipe_params(dai,
1925 						p->sink->priv, params);
1926 			if (ret < 0)
1927 				return ret;
1928 		} else {
1929 			ret = skl_tplg_be_set_sink_pipe_params(
1930 						dai, p->sink, params);
1931 			if (ret < 0)
1932 				return ret;
1933 		}
1934 	}
1935 
1936 	return ret;
1937 }
1938 
1939 /*
1940  * BE hw_params can be a source parameters (capture) or sink parameters
1941  * (playback). Based on sink and source we need to either find the source
1942  * list or the sink list and set the pipeline parameters
1943  */
1944 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1945 				struct skl_pipe_params *params)
1946 {
1947 	struct snd_soc_dapm_widget *w;
1948 
1949 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1950 		w = dai->playback_widget;
1951 
1952 		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1953 
1954 	} else {
1955 		w = dai->capture_widget;
1956 
1957 		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1958 	}
1959 
1960 	return 0;
1961 }
1962 
1963 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1964 	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1965 	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1966 	{SKL_PGA_EVENT, skl_tplg_pga_event},
1967 };
1968 
1969 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1970 	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1971 					skl_tplg_tlv_control_set},
1972 };
1973 
1974 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1975 	{
1976 		.id = SKL_CONTROL_TYPE_MIC_SELECT,
1977 		.get = skl_tplg_mic_control_get,
1978 		.put = skl_tplg_mic_control_set,
1979 	},
1980 };
1981 
1982 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1983 			struct skl_pipe *pipe, u32 tkn,
1984 			u32 tkn_val, int conf_idx, int dir)
1985 {
1986 	struct skl_pipe_fmt *fmt;
1987 	struct skl_path_config *config;
1988 
1989 	switch (dir) {
1990 	case SKL_DIR_IN:
1991 		fmt = &pipe->configs[conf_idx].in_fmt;
1992 		break;
1993 
1994 	case SKL_DIR_OUT:
1995 		fmt = &pipe->configs[conf_idx].out_fmt;
1996 		break;
1997 
1998 	default:
1999 		dev_err(dev, "Invalid direction: %d\n", dir);
2000 		return -EINVAL;
2001 	}
2002 
2003 	config = &pipe->configs[conf_idx];
2004 
2005 	switch (tkn) {
2006 	case SKL_TKN_U32_CFG_FREQ:
2007 		fmt->freq = tkn_val;
2008 		break;
2009 
2010 	case SKL_TKN_U8_CFG_CHAN:
2011 		fmt->channels = tkn_val;
2012 		break;
2013 
2014 	case SKL_TKN_U8_CFG_BPS:
2015 		fmt->bps = tkn_val;
2016 		break;
2017 
2018 	case SKL_TKN_U32_PATH_MEM_PGS:
2019 		config->mem_pages = tkn_val;
2020 		break;
2021 
2022 	default:
2023 		dev_err(dev, "Invalid token config: %d\n", tkn);
2024 		return -EINVAL;
2025 	}
2026 
2027 	return 0;
2028 }
2029 
2030 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2031 			struct skl_pipe *pipe, u32 tkn,
2032 			u32 tkn_val)
2033 {
2034 
2035 	switch (tkn) {
2036 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2037 		pipe->conn_type = tkn_val;
2038 		break;
2039 
2040 	case SKL_TKN_U32_PIPE_PRIORITY:
2041 		pipe->pipe_priority = tkn_val;
2042 		break;
2043 
2044 	case SKL_TKN_U32_PIPE_MEM_PGS:
2045 		pipe->memory_pages = tkn_val;
2046 		break;
2047 
2048 	case SKL_TKN_U32_PMODE:
2049 		pipe->lp_mode = tkn_val;
2050 		break;
2051 
2052 	case SKL_TKN_U32_PIPE_DIRECTION:
2053 		pipe->direction = tkn_val;
2054 		break;
2055 
2056 	case SKL_TKN_U32_NUM_CONFIGS:
2057 		pipe->nr_cfgs = tkn_val;
2058 		break;
2059 
2060 	default:
2061 		dev_err(dev, "Token not handled %d\n", tkn);
2062 		return -EINVAL;
2063 	}
2064 
2065 	return 0;
2066 }
2067 
2068 /*
2069  * Add pipeline by parsing the relevant tokens
2070  * Return an existing pipe if the pipe already exists.
2071  */
2072 static int skl_tplg_add_pipe(struct device *dev,
2073 		struct skl_module_cfg *mconfig, struct skl *skl,
2074 		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2075 {
2076 	struct skl_pipeline *ppl;
2077 	struct skl_pipe *pipe;
2078 	struct skl_pipe_params *params;
2079 
2080 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2081 		if (ppl->pipe->ppl_id == tkn_elem->value) {
2082 			mconfig->pipe = ppl->pipe;
2083 			return -EEXIST;
2084 		}
2085 	}
2086 
2087 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2088 	if (!ppl)
2089 		return -ENOMEM;
2090 
2091 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2092 	if (!pipe)
2093 		return -ENOMEM;
2094 
2095 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2096 	if (!params)
2097 		return -ENOMEM;
2098 
2099 	pipe->p_params = params;
2100 	pipe->ppl_id = tkn_elem->value;
2101 	INIT_LIST_HEAD(&pipe->w_list);
2102 
2103 	ppl->pipe = pipe;
2104 	list_add(&ppl->node, &skl->ppl_list);
2105 
2106 	mconfig->pipe = pipe;
2107 	mconfig->pipe->state = SKL_PIPE_INVALID;
2108 
2109 	return 0;
2110 }
2111 
2112 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
2113 	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2114 {
2115 	if (uuid_tkn->token == SKL_TKN_UUID) {
2116 		guid_copy(guid, (guid_t *)&uuid_tkn->uuid);
2117 		return 0;
2118 	}
2119 
2120 	dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2121 
2122 	return -EINVAL;
2123 }
2124 
2125 static int skl_tplg_fill_pin(struct device *dev,
2126 			struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2127 			struct skl_module_pin *m_pin,
2128 			int pin_index)
2129 {
2130 	int ret;
2131 
2132 	switch (tkn_elem->token) {
2133 	case SKL_TKN_U32_PIN_MOD_ID:
2134 		m_pin[pin_index].id.module_id = tkn_elem->value;
2135 		break;
2136 
2137 	case SKL_TKN_U32_PIN_INST_ID:
2138 		m_pin[pin_index].id.instance_id = tkn_elem->value;
2139 		break;
2140 
2141 	case SKL_TKN_UUID:
2142 		ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
2143 			(struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2144 		if (ret < 0)
2145 			return ret;
2146 
2147 		break;
2148 
2149 	default:
2150 		dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2151 		return -EINVAL;
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 /*
2158  * Parse for pin config specific tokens to fill up the
2159  * module private data
2160  */
2161 static int skl_tplg_fill_pins_info(struct device *dev,
2162 		struct skl_module_cfg *mconfig,
2163 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2164 		int dir, int pin_count)
2165 {
2166 	int ret;
2167 	struct skl_module_pin *m_pin;
2168 
2169 	switch (dir) {
2170 	case SKL_DIR_IN:
2171 		m_pin = mconfig->m_in_pin;
2172 		break;
2173 
2174 	case SKL_DIR_OUT:
2175 		m_pin = mconfig->m_out_pin;
2176 		break;
2177 
2178 	default:
2179 		dev_err(dev, "Invalid direction value\n");
2180 		return -EINVAL;
2181 	}
2182 
2183 	ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2184 	if (ret < 0)
2185 		return ret;
2186 
2187 	m_pin[pin_count].in_use = false;
2188 	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2189 
2190 	return 0;
2191 }
2192 
2193 /*
2194  * Fill up input/output module config format based
2195  * on the direction
2196  */
2197 static int skl_tplg_fill_fmt(struct device *dev,
2198 		struct skl_module_fmt *dst_fmt,
2199 		u32 tkn, u32 value)
2200 {
2201 	switch (tkn) {
2202 	case SKL_TKN_U32_FMT_CH:
2203 		dst_fmt->channels  = value;
2204 		break;
2205 
2206 	case SKL_TKN_U32_FMT_FREQ:
2207 		dst_fmt->s_freq = value;
2208 		break;
2209 
2210 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2211 		dst_fmt->bit_depth = value;
2212 		break;
2213 
2214 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2215 		dst_fmt->valid_bit_depth = value;
2216 		break;
2217 
2218 	case SKL_TKN_U32_FMT_CH_CONFIG:
2219 		dst_fmt->ch_cfg = value;
2220 		break;
2221 
2222 	case SKL_TKN_U32_FMT_INTERLEAVE:
2223 		dst_fmt->interleaving_style = value;
2224 		break;
2225 
2226 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2227 		dst_fmt->sample_type = value;
2228 		break;
2229 
2230 	case SKL_TKN_U32_FMT_CH_MAP:
2231 		dst_fmt->ch_map = value;
2232 		break;
2233 
2234 	default:
2235 		dev_err(dev, "Invalid token %d\n", tkn);
2236 		return -EINVAL;
2237 	}
2238 
2239 	return 0;
2240 }
2241 
2242 static int skl_tplg_widget_fill_fmt(struct device *dev,
2243 		struct skl_module_iface *fmt,
2244 		u32 tkn, u32 val, u32 dir, int fmt_idx)
2245 {
2246 	struct skl_module_fmt *dst_fmt;
2247 
2248 	if (!fmt)
2249 		return -EINVAL;
2250 
2251 	switch (dir) {
2252 	case SKL_DIR_IN:
2253 		dst_fmt = &fmt->inputs[fmt_idx].fmt;
2254 		break;
2255 
2256 	case SKL_DIR_OUT:
2257 		dst_fmt = &fmt->outputs[fmt_idx].fmt;
2258 		break;
2259 
2260 	default:
2261 		dev_err(dev, "Invalid direction: %d\n", dir);
2262 		return -EINVAL;
2263 	}
2264 
2265 	return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2266 }
2267 
2268 static void skl_tplg_fill_pin_dynamic_val(
2269 		struct skl_module_pin *mpin, u32 pin_count, u32 value)
2270 {
2271 	int i;
2272 
2273 	for (i = 0; i < pin_count; i++)
2274 		mpin[i].is_dynamic = value;
2275 }
2276 
2277 /*
2278  * Resource table in the manifest has pin specific resources
2279  * like pin and pin buffer size
2280  */
2281 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2282 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2283 		struct skl_module_res *res, int pin_idx, int dir)
2284 {
2285 	struct skl_module_pin_resources *m_pin;
2286 
2287 	switch (dir) {
2288 	case SKL_DIR_IN:
2289 		m_pin = &res->input[pin_idx];
2290 		break;
2291 
2292 	case SKL_DIR_OUT:
2293 		m_pin = &res->output[pin_idx];
2294 		break;
2295 
2296 	default:
2297 		dev_err(dev, "Invalid pin direction: %d\n", dir);
2298 		return -EINVAL;
2299 	}
2300 
2301 	switch (tkn_elem->token) {
2302 	case SKL_TKN_MM_U32_RES_PIN_ID:
2303 		m_pin->pin_index = tkn_elem->value;
2304 		break;
2305 
2306 	case SKL_TKN_MM_U32_PIN_BUF:
2307 		m_pin->buf_size = tkn_elem->value;
2308 		break;
2309 
2310 	default:
2311 		dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2312 		return -EINVAL;
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 /*
2319  * Fill module specific resources from the manifest's resource
2320  * table like CPS, DMA size, mem_pages.
2321  */
2322 static int skl_tplg_fill_res_tkn(struct device *dev,
2323 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2324 		struct skl_module_res *res,
2325 		int pin_idx, int dir)
2326 {
2327 	int ret, tkn_count = 0;
2328 
2329 	if (!res)
2330 		return -EINVAL;
2331 
2332 	switch (tkn_elem->token) {
2333 	case SKL_TKN_MM_U32_CPS:
2334 		res->cps = tkn_elem->value;
2335 		break;
2336 
2337 	case SKL_TKN_MM_U32_DMA_SIZE:
2338 		res->dma_buffer_size = tkn_elem->value;
2339 		break;
2340 
2341 	case SKL_TKN_MM_U32_CPC:
2342 		res->cpc = tkn_elem->value;
2343 		break;
2344 
2345 	case SKL_TKN_U32_MEM_PAGES:
2346 		res->is_pages = tkn_elem->value;
2347 		break;
2348 
2349 	case SKL_TKN_U32_OBS:
2350 		res->obs = tkn_elem->value;
2351 		break;
2352 
2353 	case SKL_TKN_U32_IBS:
2354 		res->ibs = tkn_elem->value;
2355 		break;
2356 
2357 	case SKL_TKN_U32_MAX_MCPS:
2358 		res->cps = tkn_elem->value;
2359 		break;
2360 
2361 	case SKL_TKN_MM_U32_RES_PIN_ID:
2362 	case SKL_TKN_MM_U32_PIN_BUF:
2363 		ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2364 						    pin_idx, dir);
2365 		if (ret < 0)
2366 			return ret;
2367 		break;
2368 
2369 	default:
2370 		dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2371 		return -EINVAL;
2372 
2373 	}
2374 	tkn_count++;
2375 
2376 	return tkn_count;
2377 }
2378 
2379 /*
2380  * Parse tokens to fill up the module private data
2381  */
2382 static int skl_tplg_get_token(struct device *dev,
2383 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2384 		struct skl *skl, struct skl_module_cfg *mconfig)
2385 {
2386 	int tkn_count = 0;
2387 	int ret;
2388 	static int is_pipe_exists;
2389 	static int pin_index, dir, conf_idx;
2390 	struct skl_module_iface *iface = NULL;
2391 	struct skl_module_res *res = NULL;
2392 	int res_idx = mconfig->res_idx;
2393 	int fmt_idx = mconfig->fmt_idx;
2394 
2395 	/*
2396 	 * If the manifest structure contains no modules, fill all
2397 	 * the module data to 0th index.
2398 	 * res_idx and fmt_idx are default set to 0.
2399 	 */
2400 	if (skl->nr_modules == 0) {
2401 		res = &mconfig->module->resources[res_idx];
2402 		iface = &mconfig->module->formats[fmt_idx];
2403 	}
2404 
2405 	if (tkn_elem->token > SKL_TKN_MAX)
2406 		return -EINVAL;
2407 
2408 	switch (tkn_elem->token) {
2409 	case SKL_TKN_U8_IN_QUEUE_COUNT:
2410 		mconfig->module->max_input_pins = tkn_elem->value;
2411 		break;
2412 
2413 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
2414 		mconfig->module->max_output_pins = tkn_elem->value;
2415 		break;
2416 
2417 	case SKL_TKN_U8_DYN_IN_PIN:
2418 		if (!mconfig->m_in_pin)
2419 			mconfig->m_in_pin =
2420 				devm_kcalloc(dev, MAX_IN_QUEUE,
2421 					     sizeof(*mconfig->m_in_pin),
2422 					     GFP_KERNEL);
2423 		if (!mconfig->m_in_pin)
2424 			return -ENOMEM;
2425 
2426 		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2427 					      tkn_elem->value);
2428 		break;
2429 
2430 	case SKL_TKN_U8_DYN_OUT_PIN:
2431 		if (!mconfig->m_out_pin)
2432 			mconfig->m_out_pin =
2433 				devm_kcalloc(dev, MAX_IN_QUEUE,
2434 					     sizeof(*mconfig->m_in_pin),
2435 					     GFP_KERNEL);
2436 		if (!mconfig->m_out_pin)
2437 			return -ENOMEM;
2438 
2439 		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2440 					      tkn_elem->value);
2441 		break;
2442 
2443 	case SKL_TKN_U8_TIME_SLOT:
2444 		mconfig->time_slot = tkn_elem->value;
2445 		break;
2446 
2447 	case SKL_TKN_U8_CORE_ID:
2448 		mconfig->core_id = tkn_elem->value;
2449 		break;
2450 
2451 	case SKL_TKN_U8_MOD_TYPE:
2452 		mconfig->m_type = tkn_elem->value;
2453 		break;
2454 
2455 	case SKL_TKN_U8_DEV_TYPE:
2456 		mconfig->dev_type = tkn_elem->value;
2457 		break;
2458 
2459 	case SKL_TKN_U8_HW_CONN_TYPE:
2460 		mconfig->hw_conn_type = tkn_elem->value;
2461 		break;
2462 
2463 	case SKL_TKN_U16_MOD_INST_ID:
2464 		mconfig->id.instance_id =
2465 		tkn_elem->value;
2466 		break;
2467 
2468 	case SKL_TKN_U32_MEM_PAGES:
2469 	case SKL_TKN_U32_MAX_MCPS:
2470 	case SKL_TKN_U32_OBS:
2471 	case SKL_TKN_U32_IBS:
2472 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2473 		if (ret < 0)
2474 			return ret;
2475 
2476 		break;
2477 
2478 	case SKL_TKN_U32_VBUS_ID:
2479 		mconfig->vbus_id = tkn_elem->value;
2480 		break;
2481 
2482 	case SKL_TKN_U32_PARAMS_FIXUP:
2483 		mconfig->params_fixup = tkn_elem->value;
2484 		break;
2485 
2486 	case SKL_TKN_U32_CONVERTER:
2487 		mconfig->converter = tkn_elem->value;
2488 		break;
2489 
2490 	case SKL_TKN_U32_D0I3_CAPS:
2491 		mconfig->d0i3_caps = tkn_elem->value;
2492 		break;
2493 
2494 	case SKL_TKN_U32_PIPE_ID:
2495 		ret = skl_tplg_add_pipe(dev,
2496 				mconfig, skl, tkn_elem);
2497 
2498 		if (ret < 0) {
2499 			if (ret == -EEXIST) {
2500 				is_pipe_exists = 1;
2501 				break;
2502 			}
2503 			return is_pipe_exists;
2504 		}
2505 
2506 		break;
2507 
2508 	case SKL_TKN_U32_PIPE_CONFIG_ID:
2509 		conf_idx = tkn_elem->value;
2510 		break;
2511 
2512 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2513 	case SKL_TKN_U32_PIPE_PRIORITY:
2514 	case SKL_TKN_U32_PIPE_MEM_PGS:
2515 	case SKL_TKN_U32_PMODE:
2516 	case SKL_TKN_U32_PIPE_DIRECTION:
2517 	case SKL_TKN_U32_NUM_CONFIGS:
2518 		if (is_pipe_exists) {
2519 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2520 					tkn_elem->token, tkn_elem->value);
2521 			if (ret < 0)
2522 				return ret;
2523 		}
2524 
2525 		break;
2526 
2527 	case SKL_TKN_U32_PATH_MEM_PGS:
2528 	case SKL_TKN_U32_CFG_FREQ:
2529 	case SKL_TKN_U8_CFG_CHAN:
2530 	case SKL_TKN_U8_CFG_BPS:
2531 		if (mconfig->pipe->nr_cfgs) {
2532 			ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2533 					tkn_elem->token, tkn_elem->value,
2534 					conf_idx, dir);
2535 			if (ret < 0)
2536 				return ret;
2537 		}
2538 		break;
2539 
2540 	case SKL_TKN_CFG_MOD_RES_ID:
2541 		mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2542 		break;
2543 
2544 	case SKL_TKN_CFG_MOD_FMT_ID:
2545 		mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2546 		break;
2547 
2548 	/*
2549 	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2550 	 * direction and the pin count. The first four bits represent
2551 	 * direction and next four the pin count.
2552 	 */
2553 	case SKL_TKN_U32_DIR_PIN_COUNT:
2554 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2555 		pin_index = (tkn_elem->value &
2556 			SKL_PIN_COUNT_MASK) >> 4;
2557 
2558 		break;
2559 
2560 	case SKL_TKN_U32_FMT_CH:
2561 	case SKL_TKN_U32_FMT_FREQ:
2562 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2563 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2564 	case SKL_TKN_U32_FMT_CH_CONFIG:
2565 	case SKL_TKN_U32_FMT_INTERLEAVE:
2566 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2567 	case SKL_TKN_U32_FMT_CH_MAP:
2568 		ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2569 				tkn_elem->value, dir, pin_index);
2570 
2571 		if (ret < 0)
2572 			return ret;
2573 
2574 		break;
2575 
2576 	case SKL_TKN_U32_PIN_MOD_ID:
2577 	case SKL_TKN_U32_PIN_INST_ID:
2578 	case SKL_TKN_UUID:
2579 		ret = skl_tplg_fill_pins_info(dev,
2580 				mconfig, tkn_elem, dir,
2581 				pin_index);
2582 		if (ret < 0)
2583 			return ret;
2584 
2585 		break;
2586 
2587 	case SKL_TKN_U32_CAPS_SIZE:
2588 		mconfig->formats_config.caps_size =
2589 			tkn_elem->value;
2590 
2591 		break;
2592 
2593 	case SKL_TKN_U32_CAPS_SET_PARAMS:
2594 		mconfig->formats_config.set_params =
2595 				tkn_elem->value;
2596 		break;
2597 
2598 	case SKL_TKN_U32_CAPS_PARAMS_ID:
2599 		mconfig->formats_config.param_id =
2600 				tkn_elem->value;
2601 		break;
2602 
2603 	case SKL_TKN_U32_PROC_DOMAIN:
2604 		mconfig->domain =
2605 			tkn_elem->value;
2606 
2607 		break;
2608 
2609 	case SKL_TKN_U32_DMA_BUF_SIZE:
2610 		mconfig->dma_buffer_size = tkn_elem->value;
2611 		break;
2612 
2613 	case SKL_TKN_U8_IN_PIN_TYPE:
2614 	case SKL_TKN_U8_OUT_PIN_TYPE:
2615 	case SKL_TKN_U8_CONN_TYPE:
2616 		break;
2617 
2618 	default:
2619 		dev_err(dev, "Token %d not handled\n",
2620 				tkn_elem->token);
2621 		return -EINVAL;
2622 	}
2623 
2624 	tkn_count++;
2625 
2626 	return tkn_count;
2627 }
2628 
2629 /*
2630  * Parse the vendor array for specific tokens to construct
2631  * module private data
2632  */
2633 static int skl_tplg_get_tokens(struct device *dev,
2634 		char *pvt_data,	struct skl *skl,
2635 		struct skl_module_cfg *mconfig, int block_size)
2636 {
2637 	struct snd_soc_tplg_vendor_array *array;
2638 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2639 	int tkn_count = 0, ret;
2640 	int off = 0, tuple_size = 0;
2641 	bool is_module_guid = true;
2642 
2643 	if (block_size <= 0)
2644 		return -EINVAL;
2645 
2646 	while (tuple_size < block_size) {
2647 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2648 
2649 		off += array->size;
2650 
2651 		switch (array->type) {
2652 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2653 			dev_warn(dev, "no string tokens expected for skl tplg\n");
2654 			continue;
2655 
2656 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2657 			if (is_module_guid) {
2658 				ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
2659 							array->uuid);
2660 				is_module_guid = false;
2661 			} else {
2662 				ret = skl_tplg_get_token(dev, array->value, skl,
2663 							 mconfig);
2664 			}
2665 
2666 			if (ret < 0)
2667 				return ret;
2668 
2669 			tuple_size += sizeof(*array->uuid);
2670 
2671 			continue;
2672 
2673 		default:
2674 			tkn_elem = array->value;
2675 			tkn_count = 0;
2676 			break;
2677 		}
2678 
2679 		while (tkn_count <= (array->num_elems - 1)) {
2680 			ret = skl_tplg_get_token(dev, tkn_elem,
2681 					skl, mconfig);
2682 
2683 			if (ret < 0)
2684 				return ret;
2685 
2686 			tkn_count = tkn_count + ret;
2687 			tkn_elem++;
2688 		}
2689 
2690 		tuple_size += tkn_count * sizeof(*tkn_elem);
2691 	}
2692 
2693 	return off;
2694 }
2695 
2696 /*
2697  * Every data block is preceded by a descriptor to read the number
2698  * of data blocks, they type of the block and it's size
2699  */
2700 static int skl_tplg_get_desc_blocks(struct device *dev,
2701 		struct snd_soc_tplg_vendor_array *array)
2702 {
2703 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2704 
2705 	tkn_elem = array->value;
2706 
2707 	switch (tkn_elem->token) {
2708 	case SKL_TKN_U8_NUM_BLOCKS:
2709 	case SKL_TKN_U8_BLOCK_TYPE:
2710 	case SKL_TKN_U16_BLOCK_SIZE:
2711 		return tkn_elem->value;
2712 
2713 	default:
2714 		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2715 		break;
2716 	}
2717 
2718 	return -EINVAL;
2719 }
2720 
2721 /* Functions to parse private data from configuration file format v4 */
2722 
2723 /*
2724  * Add pipeline from topology binary into driver pipeline list
2725  *
2726  * If already added we return that instance
2727  * Otherwise we create a new instance and add into driver list
2728  */
2729 static int skl_tplg_add_pipe_v4(struct device *dev,
2730 				struct skl_module_cfg *mconfig, struct skl *skl,
2731 				struct skl_dfw_v4_pipe *dfw_pipe)
2732 {
2733 	struct skl_pipeline *ppl;
2734 	struct skl_pipe *pipe;
2735 	struct skl_pipe_params *params;
2736 
2737 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2738 		if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2739 			mconfig->pipe = ppl->pipe;
2740 			return 0;
2741 		}
2742 	}
2743 
2744 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2745 	if (!ppl)
2746 		return -ENOMEM;
2747 
2748 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2749 	if (!pipe)
2750 		return -ENOMEM;
2751 
2752 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2753 	if (!params)
2754 		return -ENOMEM;
2755 
2756 	pipe->ppl_id = dfw_pipe->pipe_id;
2757 	pipe->memory_pages = dfw_pipe->memory_pages;
2758 	pipe->pipe_priority = dfw_pipe->pipe_priority;
2759 	pipe->conn_type = dfw_pipe->conn_type;
2760 	pipe->state = SKL_PIPE_INVALID;
2761 	pipe->p_params = params;
2762 	INIT_LIST_HEAD(&pipe->w_list);
2763 
2764 	ppl->pipe = pipe;
2765 	list_add(&ppl->node, &skl->ppl_list);
2766 
2767 	mconfig->pipe = pipe;
2768 
2769 	return 0;
2770 }
2771 
2772 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2773 					struct skl_module_pin *m_pin,
2774 					bool is_dynamic, int max_pin)
2775 {
2776 	int i;
2777 
2778 	for (i = 0; i < max_pin; i++) {
2779 		m_pin[i].id.module_id = dfw_pin[i].module_id;
2780 		m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2781 		m_pin[i].in_use = false;
2782 		m_pin[i].is_dynamic = is_dynamic;
2783 		m_pin[i].pin_state = SKL_PIN_UNBIND;
2784 	}
2785 }
2786 
2787 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2788 				 struct skl_dfw_v4_module_fmt *src_fmt,
2789 				 int pins)
2790 {
2791 	int i;
2792 
2793 	for (i = 0; i < pins; i++) {
2794 		dst_fmt[i].fmt.channels  = src_fmt[i].channels;
2795 		dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2796 		dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2797 		dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2798 		dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2799 		dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2800 		dst_fmt[i].fmt.interleaving_style =
2801 						src_fmt[i].interleaving_style;
2802 		dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2803 	}
2804 }
2805 
2806 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2807 				    struct skl *skl, struct device *dev,
2808 				    struct skl_module_cfg *mconfig)
2809 {
2810 	struct skl_dfw_v4_module *dfw =
2811 				(struct skl_dfw_v4_module *)tplg_w->priv.data;
2812 	int ret;
2813 
2814 	dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2815 
2816 	ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2817 	if (ret)
2818 		return ret;
2819 	mconfig->id.module_id = -1;
2820 	mconfig->id.instance_id = dfw->instance_id;
2821 	mconfig->module->resources[0].cps = dfw->max_mcps;
2822 	mconfig->module->resources[0].ibs = dfw->ibs;
2823 	mconfig->module->resources[0].obs = dfw->obs;
2824 	mconfig->core_id = dfw->core_id;
2825 	mconfig->module->max_input_pins = dfw->max_in_queue;
2826 	mconfig->module->max_output_pins = dfw->max_out_queue;
2827 	mconfig->module->loadable = dfw->is_loadable;
2828 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2829 			     MAX_IN_QUEUE);
2830 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2831 			     MAX_OUT_QUEUE);
2832 
2833 	mconfig->params_fixup = dfw->params_fixup;
2834 	mconfig->converter = dfw->converter;
2835 	mconfig->m_type = dfw->module_type;
2836 	mconfig->vbus_id = dfw->vbus_id;
2837 	mconfig->module->resources[0].is_pages = dfw->mem_pages;
2838 
2839 	ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2840 	if (ret)
2841 		return ret;
2842 
2843 	mconfig->dev_type = dfw->dev_type;
2844 	mconfig->hw_conn_type = dfw->hw_conn_type;
2845 	mconfig->time_slot = dfw->time_slot;
2846 	mconfig->formats_config.caps_size = dfw->caps.caps_size;
2847 
2848 	mconfig->m_in_pin = devm_kcalloc(dev,
2849 				MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2850 				GFP_KERNEL);
2851 	if (!mconfig->m_in_pin)
2852 		return -ENOMEM;
2853 
2854 	mconfig->m_out_pin = devm_kcalloc(dev,
2855 				MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2856 				GFP_KERNEL);
2857 	if (!mconfig->m_out_pin)
2858 		return -ENOMEM;
2859 
2860 	skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2861 				    dfw->is_dynamic_in_pin,
2862 				    mconfig->module->max_input_pins);
2863 	skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2864 				    dfw->is_dynamic_out_pin,
2865 				    mconfig->module->max_output_pins);
2866 
2867 	if (mconfig->formats_config.caps_size) {
2868 		mconfig->formats_config.set_params = dfw->caps.set_params;
2869 		mconfig->formats_config.param_id = dfw->caps.param_id;
2870 		mconfig->formats_config.caps =
2871 		devm_kzalloc(dev, mconfig->formats_config.caps_size,
2872 			     GFP_KERNEL);
2873 		if (!mconfig->formats_config.caps)
2874 			return -ENOMEM;
2875 		memcpy(mconfig->formats_config.caps, dfw->caps.caps,
2876 		       dfw->caps.caps_size);
2877 	}
2878 
2879 	return 0;
2880 }
2881 
2882 /*
2883  * Parse the private data for the token and corresponding value.
2884  * The private data can have multiple data blocks. So, a data block
2885  * is preceded by a descriptor for number of blocks and a descriptor
2886  * for the type and size of the suceeding data block.
2887  */
2888 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2889 				struct skl *skl, struct device *dev,
2890 				struct skl_module_cfg *mconfig)
2891 {
2892 	struct snd_soc_tplg_vendor_array *array;
2893 	int num_blocks, block_size = 0, block_type, off = 0;
2894 	char *data;
2895 	int ret;
2896 
2897 	/*
2898 	 * v4 configuration files have a valid UUID at the start of
2899 	 * the widget's private data.
2900 	 */
2901 	if (uuid_is_valid((char *)tplg_w->priv.data))
2902 		return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2903 
2904 	/* Read the NUM_DATA_BLOCKS descriptor */
2905 	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2906 	ret = skl_tplg_get_desc_blocks(dev, array);
2907 	if (ret < 0)
2908 		return ret;
2909 	num_blocks = ret;
2910 
2911 	off += array->size;
2912 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2913 	while (num_blocks > 0) {
2914 		array = (struct snd_soc_tplg_vendor_array *)
2915 				(tplg_w->priv.data + off);
2916 
2917 		ret = skl_tplg_get_desc_blocks(dev, array);
2918 
2919 		if (ret < 0)
2920 			return ret;
2921 		block_type = ret;
2922 		off += array->size;
2923 
2924 		array = (struct snd_soc_tplg_vendor_array *)
2925 			(tplg_w->priv.data + off);
2926 
2927 		ret = skl_tplg_get_desc_blocks(dev, array);
2928 
2929 		if (ret < 0)
2930 			return ret;
2931 		block_size = ret;
2932 		off += array->size;
2933 
2934 		array = (struct snd_soc_tplg_vendor_array *)
2935 			(tplg_w->priv.data + off);
2936 
2937 		data = (tplg_w->priv.data + off);
2938 
2939 		if (block_type == SKL_TYPE_TUPLE) {
2940 			ret = skl_tplg_get_tokens(dev, data,
2941 					skl, mconfig, block_size);
2942 
2943 			if (ret < 0)
2944 				return ret;
2945 
2946 			--num_blocks;
2947 		} else {
2948 			if (mconfig->formats_config.caps_size > 0)
2949 				memcpy(mconfig->formats_config.caps, data,
2950 					mconfig->formats_config.caps_size);
2951 			--num_blocks;
2952 			ret = mconfig->formats_config.caps_size;
2953 		}
2954 		off += ret;
2955 	}
2956 
2957 	return 0;
2958 }
2959 
2960 static void skl_clear_pin_config(struct snd_soc_component *component,
2961 				struct snd_soc_dapm_widget *w)
2962 {
2963 	int i;
2964 	struct skl_module_cfg *mconfig;
2965 	struct skl_pipe *pipe;
2966 
2967 	if (!strncmp(w->dapm->component->name, component->name,
2968 					strlen(component->name))) {
2969 		mconfig = w->priv;
2970 		pipe = mconfig->pipe;
2971 		for (i = 0; i < mconfig->module->max_input_pins; i++) {
2972 			mconfig->m_in_pin[i].in_use = false;
2973 			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2974 		}
2975 		for (i = 0; i < mconfig->module->max_output_pins; i++) {
2976 			mconfig->m_out_pin[i].in_use = false;
2977 			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2978 		}
2979 		pipe->state = SKL_PIPE_INVALID;
2980 		mconfig->m_state = SKL_MODULE_UNINIT;
2981 	}
2982 }
2983 
2984 void skl_cleanup_resources(struct skl *skl)
2985 {
2986 	struct skl_sst *ctx = skl->skl_sst;
2987 	struct snd_soc_component *soc_component = skl->component;
2988 	struct snd_soc_dapm_widget *w;
2989 	struct snd_soc_card *card;
2990 
2991 	if (soc_component == NULL)
2992 		return;
2993 
2994 	card = soc_component->card;
2995 	if (!card || !card->instantiated)
2996 		return;
2997 
2998 	skl->resource.mem = 0;
2999 	skl->resource.mcps = 0;
3000 
3001 	list_for_each_entry(w, &card->widgets, list) {
3002 		if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL)
3003 			skl_clear_pin_config(soc_component, w);
3004 	}
3005 
3006 	skl_clear_module_cnt(ctx->dsp);
3007 }
3008 
3009 /*
3010  * Topology core widget load callback
3011  *
3012  * This is used to save the private data for each widget which gives
3013  * information to the driver about module and pipeline parameters which DSP
3014  * FW expects like ids, resource values, formats etc
3015  */
3016 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
3017 				struct snd_soc_dapm_widget *w,
3018 				struct snd_soc_tplg_dapm_widget *tplg_w)
3019 {
3020 	int ret;
3021 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3022 	struct skl *skl = bus_to_skl(bus);
3023 	struct skl_module_cfg *mconfig;
3024 
3025 	if (!tplg_w->priv.size)
3026 		goto bind_event;
3027 
3028 	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3029 
3030 	if (!mconfig)
3031 		return -ENOMEM;
3032 
3033 	if (skl->nr_modules == 0) {
3034 		mconfig->module = devm_kzalloc(bus->dev,
3035 				sizeof(*mconfig->module), GFP_KERNEL);
3036 		if (!mconfig->module)
3037 			return -ENOMEM;
3038 	}
3039 
3040 	w->priv = mconfig;
3041 
3042 	/*
3043 	 * module binary can be loaded later, so set it to query when
3044 	 * module is load for a use case
3045 	 */
3046 	mconfig->id.module_id = -1;
3047 
3048 	/* Parse private data for tuples */
3049 	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3050 	if (ret < 0)
3051 		return ret;
3052 
3053 	skl_debug_init_module(skl->debugfs, w, mconfig);
3054 
3055 bind_event:
3056 	if (tplg_w->event_type == 0) {
3057 		dev_dbg(bus->dev, "ASoC: No event handler required\n");
3058 		return 0;
3059 	}
3060 
3061 	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3062 					ARRAY_SIZE(skl_tplg_widget_ops),
3063 					tplg_w->event_type);
3064 
3065 	if (ret) {
3066 		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3067 					__func__, tplg_w->event_type);
3068 		return -EINVAL;
3069 	}
3070 
3071 	return 0;
3072 }
3073 
3074 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3075 					struct snd_soc_tplg_bytes_control *bc)
3076 {
3077 	struct skl_algo_data *ac;
3078 	struct skl_dfw_algo_data *dfw_ac =
3079 				(struct skl_dfw_algo_data *)bc->priv.data;
3080 
3081 	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3082 	if (!ac)
3083 		return -ENOMEM;
3084 
3085 	/* Fill private data */
3086 	ac->max = dfw_ac->max;
3087 	ac->param_id = dfw_ac->param_id;
3088 	ac->set_params = dfw_ac->set_params;
3089 	ac->size = dfw_ac->max;
3090 
3091 	if (ac->max) {
3092 		ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
3093 		if (!ac->params)
3094 			return -ENOMEM;
3095 
3096 		memcpy(ac->params, dfw_ac->params, ac->max);
3097 	}
3098 
3099 	be->dobj.private  = ac;
3100 	return 0;
3101 }
3102 
3103 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3104 				struct snd_soc_tplg_enum_control *ec)
3105 {
3106 
3107 	void *data;
3108 
3109 	if (ec->priv.size) {
3110 		data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3111 		if (!data)
3112 			return -ENOMEM;
3113 		memcpy(data, ec->priv.data, ec->priv.size);
3114 		se->dobj.private = data;
3115 	}
3116 
3117 	return 0;
3118 
3119 }
3120 
3121 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3122 				int index,
3123 				struct snd_kcontrol_new *kctl,
3124 				struct snd_soc_tplg_ctl_hdr *hdr)
3125 {
3126 	struct soc_bytes_ext *sb;
3127 	struct snd_soc_tplg_bytes_control *tplg_bc;
3128 	struct snd_soc_tplg_enum_control *tplg_ec;
3129 	struct hdac_bus *bus  = snd_soc_component_get_drvdata(cmpnt);
3130 	struct soc_enum *se;
3131 
3132 	switch (hdr->ops.info) {
3133 	case SND_SOC_TPLG_CTL_BYTES:
3134 		tplg_bc = container_of(hdr,
3135 				struct snd_soc_tplg_bytes_control, hdr);
3136 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3137 			sb = (struct soc_bytes_ext *)kctl->private_value;
3138 			if (tplg_bc->priv.size)
3139 				return skl_init_algo_data(
3140 						bus->dev, sb, tplg_bc);
3141 		}
3142 		break;
3143 
3144 	case SND_SOC_TPLG_CTL_ENUM:
3145 		tplg_ec = container_of(hdr,
3146 				struct snd_soc_tplg_enum_control, hdr);
3147 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
3148 			se = (struct soc_enum *)kctl->private_value;
3149 			if (tplg_ec->priv.size)
3150 				return skl_init_enum_data(bus->dev, se,
3151 						tplg_ec);
3152 		}
3153 		break;
3154 
3155 	default:
3156 		dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3157 			hdr->ops.get, hdr->ops.put, hdr->ops.info);
3158 		break;
3159 	}
3160 
3161 	return 0;
3162 }
3163 
3164 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3165 		struct snd_soc_tplg_vendor_string_elem *str_elem,
3166 		struct skl *skl)
3167 {
3168 	int tkn_count = 0;
3169 	static int ref_count;
3170 
3171 	switch (str_elem->token) {
3172 	case SKL_TKN_STR_LIB_NAME:
3173 		if (ref_count > skl->skl_sst->lib_count - 1) {
3174 			ref_count = 0;
3175 			return -EINVAL;
3176 		}
3177 
3178 		strncpy(skl->skl_sst->lib_info[ref_count].name,
3179 			str_elem->string,
3180 			ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
3181 		ref_count++;
3182 		break;
3183 
3184 	default:
3185 		dev_err(dev, "Not a string token %d\n", str_elem->token);
3186 		break;
3187 	}
3188 	tkn_count++;
3189 
3190 	return tkn_count;
3191 }
3192 
3193 static int skl_tplg_get_str_tkn(struct device *dev,
3194 		struct snd_soc_tplg_vendor_array *array,
3195 		struct skl *skl)
3196 {
3197 	int tkn_count = 0, ret;
3198 	struct snd_soc_tplg_vendor_string_elem *str_elem;
3199 
3200 	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3201 	while (tkn_count < array->num_elems) {
3202 		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3203 		str_elem++;
3204 
3205 		if (ret < 0)
3206 			return ret;
3207 
3208 		tkn_count = tkn_count + ret;
3209 	}
3210 
3211 	return tkn_count;
3212 }
3213 
3214 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3215 		struct skl_module_iface *fmt,
3216 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3217 		u32 dir, int fmt_idx)
3218 {
3219 	struct skl_module_pin_fmt *dst_fmt;
3220 	struct skl_module_fmt *mod_fmt;
3221 	int ret;
3222 
3223 	if (!fmt)
3224 		return -EINVAL;
3225 
3226 	switch (dir) {
3227 	case SKL_DIR_IN:
3228 		dst_fmt = &fmt->inputs[fmt_idx];
3229 		break;
3230 
3231 	case SKL_DIR_OUT:
3232 		dst_fmt = &fmt->outputs[fmt_idx];
3233 		break;
3234 
3235 	default:
3236 		dev_err(dev, "Invalid direction: %d\n", dir);
3237 		return -EINVAL;
3238 	}
3239 
3240 	mod_fmt = &dst_fmt->fmt;
3241 
3242 	switch (tkn_elem->token) {
3243 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3244 		dst_fmt->id = tkn_elem->value;
3245 		break;
3246 
3247 	default:
3248 		ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3249 					tkn_elem->value);
3250 		if (ret < 0)
3251 			return ret;
3252 		break;
3253 	}
3254 
3255 	return 0;
3256 }
3257 
3258 static int skl_tplg_fill_mod_info(struct device *dev,
3259 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3260 		struct skl_module *mod)
3261 {
3262 
3263 	if (!mod)
3264 		return -EINVAL;
3265 
3266 	switch (tkn_elem->token) {
3267 	case SKL_TKN_U8_IN_PIN_TYPE:
3268 		mod->input_pin_type = tkn_elem->value;
3269 		break;
3270 
3271 	case SKL_TKN_U8_OUT_PIN_TYPE:
3272 		mod->output_pin_type = tkn_elem->value;
3273 		break;
3274 
3275 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3276 		mod->max_input_pins = tkn_elem->value;
3277 		break;
3278 
3279 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3280 		mod->max_output_pins = tkn_elem->value;
3281 		break;
3282 
3283 	case SKL_TKN_MM_U8_NUM_RES:
3284 		mod->nr_resources = tkn_elem->value;
3285 		break;
3286 
3287 	case SKL_TKN_MM_U8_NUM_INTF:
3288 		mod->nr_interfaces = tkn_elem->value;
3289 		break;
3290 
3291 	default:
3292 		dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3293 		return -EINVAL;
3294 	}
3295 
3296 	return 0;
3297 }
3298 
3299 
3300 static int skl_tplg_get_int_tkn(struct device *dev,
3301 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3302 		struct skl *skl)
3303 {
3304 	int tkn_count = 0, ret;
3305 	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3306 	struct skl_module_res *res = NULL;
3307 	struct skl_module_iface *fmt = NULL;
3308 	struct skl_module *mod = NULL;
3309 	static struct skl_astate_param *astate_table;
3310 	static int astate_cfg_idx, count;
3311 	int i;
3312 	size_t size;
3313 
3314 	if (skl->modules) {
3315 		mod = skl->modules[mod_idx];
3316 		res = &mod->resources[res_val_idx];
3317 		fmt = &mod->formats[intf_val_idx];
3318 	}
3319 
3320 	switch (tkn_elem->token) {
3321 	case SKL_TKN_U32_LIB_COUNT:
3322 		skl->skl_sst->lib_count = tkn_elem->value;
3323 		break;
3324 
3325 	case SKL_TKN_U8_NUM_MOD:
3326 		skl->nr_modules = tkn_elem->value;
3327 		skl->modules = devm_kcalloc(dev, skl->nr_modules,
3328 				sizeof(*skl->modules), GFP_KERNEL);
3329 		if (!skl->modules)
3330 			return -ENOMEM;
3331 
3332 		for (i = 0; i < skl->nr_modules; i++) {
3333 			skl->modules[i] = devm_kzalloc(dev,
3334 					sizeof(struct skl_module), GFP_KERNEL);
3335 			if (!skl->modules[i])
3336 				return -ENOMEM;
3337 		}
3338 		break;
3339 
3340 	case SKL_TKN_MM_U8_MOD_IDX:
3341 		mod_idx = tkn_elem->value;
3342 		break;
3343 
3344 	case SKL_TKN_U32_ASTATE_COUNT:
3345 		if (astate_table != NULL) {
3346 			dev_err(dev, "More than one entry for A-State count");
3347 			return -EINVAL;
3348 		}
3349 
3350 		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3351 			dev_err(dev, "Invalid A-State count %d\n",
3352 				tkn_elem->value);
3353 			return -EINVAL;
3354 		}
3355 
3356 		size = struct_size(skl->cfg.astate_cfg, astate_table,
3357 				   tkn_elem->value);
3358 		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3359 		if (!skl->cfg.astate_cfg)
3360 			return -ENOMEM;
3361 
3362 		astate_table = skl->cfg.astate_cfg->astate_table;
3363 		count = skl->cfg.astate_cfg->count = tkn_elem->value;
3364 		break;
3365 
3366 	case SKL_TKN_U32_ASTATE_IDX:
3367 		if (tkn_elem->value >= count) {
3368 			dev_err(dev, "Invalid A-State index %d\n",
3369 				tkn_elem->value);
3370 			return -EINVAL;
3371 		}
3372 
3373 		astate_cfg_idx = tkn_elem->value;
3374 		break;
3375 
3376 	case SKL_TKN_U32_ASTATE_KCPS:
3377 		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3378 		break;
3379 
3380 	case SKL_TKN_U32_ASTATE_CLK_SRC:
3381 		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3382 		break;
3383 
3384 	case SKL_TKN_U8_IN_PIN_TYPE:
3385 	case SKL_TKN_U8_OUT_PIN_TYPE:
3386 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3387 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3388 	case SKL_TKN_MM_U8_NUM_RES:
3389 	case SKL_TKN_MM_U8_NUM_INTF:
3390 		ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3391 		if (ret < 0)
3392 			return ret;
3393 		break;
3394 
3395 	case SKL_TKN_U32_DIR_PIN_COUNT:
3396 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3397 		pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3398 		break;
3399 
3400 	case SKL_TKN_MM_U32_RES_ID:
3401 		if (!res)
3402 			return -EINVAL;
3403 
3404 		res->id = tkn_elem->value;
3405 		res_val_idx = tkn_elem->value;
3406 		break;
3407 
3408 	case SKL_TKN_MM_U32_FMT_ID:
3409 		if (!fmt)
3410 			return -EINVAL;
3411 
3412 		fmt->fmt_idx = tkn_elem->value;
3413 		intf_val_idx = tkn_elem->value;
3414 		break;
3415 
3416 	case SKL_TKN_MM_U32_CPS:
3417 	case SKL_TKN_MM_U32_DMA_SIZE:
3418 	case SKL_TKN_MM_U32_CPC:
3419 	case SKL_TKN_U32_MEM_PAGES:
3420 	case SKL_TKN_U32_OBS:
3421 	case SKL_TKN_U32_IBS:
3422 	case SKL_TKN_MM_U32_RES_PIN_ID:
3423 	case SKL_TKN_MM_U32_PIN_BUF:
3424 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3425 		if (ret < 0)
3426 			return ret;
3427 
3428 		break;
3429 
3430 	case SKL_TKN_MM_U32_NUM_IN_FMT:
3431 		if (!fmt)
3432 			return -EINVAL;
3433 
3434 		res->nr_input_pins = tkn_elem->value;
3435 		break;
3436 
3437 	case SKL_TKN_MM_U32_NUM_OUT_FMT:
3438 		if (!fmt)
3439 			return -EINVAL;
3440 
3441 		res->nr_output_pins = tkn_elem->value;
3442 		break;
3443 
3444 	case SKL_TKN_U32_FMT_CH:
3445 	case SKL_TKN_U32_FMT_FREQ:
3446 	case SKL_TKN_U32_FMT_BIT_DEPTH:
3447 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3448 	case SKL_TKN_U32_FMT_CH_CONFIG:
3449 	case SKL_TKN_U32_FMT_INTERLEAVE:
3450 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3451 	case SKL_TKN_U32_FMT_CH_MAP:
3452 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3453 		ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3454 						 dir, pin_idx);
3455 		if (ret < 0)
3456 			return ret;
3457 		break;
3458 
3459 	default:
3460 		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3461 		return -EINVAL;
3462 	}
3463 	tkn_count++;
3464 
3465 	return tkn_count;
3466 }
3467 
3468 static int skl_tplg_get_manifest_uuid(struct device *dev,
3469 				struct skl *skl,
3470 				struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
3471 {
3472 	static int ref_count;
3473 	struct skl_module *mod;
3474 
3475 	if (uuid_tkn->token == SKL_TKN_UUID) {
3476 		mod = skl->modules[ref_count];
3477 		guid_copy(&mod->uuid, (guid_t *)&uuid_tkn->uuid);
3478 		ref_count++;
3479 	} else {
3480 		dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
3481 		return -EINVAL;
3482 	}
3483 
3484 	return 0;
3485 }
3486 
3487 /*
3488  * Fill the manifest structure by parsing the tokens based on the
3489  * type.
3490  */
3491 static int skl_tplg_get_manifest_tkn(struct device *dev,
3492 		char *pvt_data, struct skl *skl,
3493 		int block_size)
3494 {
3495 	int tkn_count = 0, ret;
3496 	int off = 0, tuple_size = 0;
3497 	struct snd_soc_tplg_vendor_array *array;
3498 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3499 
3500 	if (block_size <= 0)
3501 		return -EINVAL;
3502 
3503 	while (tuple_size < block_size) {
3504 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3505 		off += array->size;
3506 		switch (array->type) {
3507 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3508 			ret = skl_tplg_get_str_tkn(dev, array, skl);
3509 
3510 			if (ret < 0)
3511 				return ret;
3512 			tkn_count = ret;
3513 
3514 			tuple_size += tkn_count *
3515 				sizeof(struct snd_soc_tplg_vendor_string_elem);
3516 			continue;
3517 
3518 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3519 			ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid);
3520 			if (ret < 0)
3521 				return ret;
3522 
3523 			tuple_size += sizeof(*array->uuid);
3524 			continue;
3525 
3526 		default:
3527 			tkn_elem = array->value;
3528 			tkn_count = 0;
3529 			break;
3530 		}
3531 
3532 		while (tkn_count <= array->num_elems - 1) {
3533 			ret = skl_tplg_get_int_tkn(dev,
3534 					tkn_elem, skl);
3535 			if (ret < 0)
3536 				return ret;
3537 
3538 			tkn_count = tkn_count + ret;
3539 			tkn_elem++;
3540 		}
3541 		tuple_size += (tkn_count * sizeof(*tkn_elem));
3542 		tkn_count = 0;
3543 	}
3544 
3545 	return off;
3546 }
3547 
3548 /*
3549  * Parse manifest private data for tokens. The private data block is
3550  * preceded by descriptors for type and size of data block.
3551  */
3552 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3553 			struct device *dev, struct skl *skl)
3554 {
3555 	struct snd_soc_tplg_vendor_array *array;
3556 	int num_blocks, block_size = 0, block_type, off = 0;
3557 	char *data;
3558 	int ret;
3559 
3560 	/* Read the NUM_DATA_BLOCKS descriptor */
3561 	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3562 	ret = skl_tplg_get_desc_blocks(dev, array);
3563 	if (ret < 0)
3564 		return ret;
3565 	num_blocks = ret;
3566 
3567 	off += array->size;
3568 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3569 	while (num_blocks > 0) {
3570 		array = (struct snd_soc_tplg_vendor_array *)
3571 				(manifest->priv.data + off);
3572 		ret = skl_tplg_get_desc_blocks(dev, array);
3573 
3574 		if (ret < 0)
3575 			return ret;
3576 		block_type = ret;
3577 		off += array->size;
3578 
3579 		array = (struct snd_soc_tplg_vendor_array *)
3580 			(manifest->priv.data + off);
3581 
3582 		ret = skl_tplg_get_desc_blocks(dev, array);
3583 
3584 		if (ret < 0)
3585 			return ret;
3586 		block_size = ret;
3587 		off += array->size;
3588 
3589 		array = (struct snd_soc_tplg_vendor_array *)
3590 			(manifest->priv.data + off);
3591 
3592 		data = (manifest->priv.data + off);
3593 
3594 		if (block_type == SKL_TYPE_TUPLE) {
3595 			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3596 					block_size);
3597 
3598 			if (ret < 0)
3599 				return ret;
3600 
3601 			--num_blocks;
3602 		} else {
3603 			return -EINVAL;
3604 		}
3605 		off += ret;
3606 	}
3607 
3608 	return 0;
3609 }
3610 
3611 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3612 				struct snd_soc_tplg_manifest *manifest)
3613 {
3614 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3615 	struct skl *skl = bus_to_skl(bus);
3616 
3617 	/* proceed only if we have private data defined */
3618 	if (manifest->priv.size == 0)
3619 		return 0;
3620 
3621 	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3622 
3623 	if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
3624 		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3625 					skl->skl_sst->lib_count);
3626 		return  -EINVAL;
3627 	}
3628 
3629 	return 0;
3630 }
3631 
3632 static struct snd_soc_tplg_ops skl_tplg_ops  = {
3633 	.widget_load = skl_tplg_widget_load,
3634 	.control_load = skl_tplg_control_load,
3635 	.bytes_ext_ops = skl_tlv_ops,
3636 	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3637 	.io_ops = skl_tplg_kcontrol_ops,
3638 	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3639 	.manifest = skl_manifest_load,
3640 	.dai_load = skl_dai_load,
3641 };
3642 
3643 /*
3644  * A pipe can have multiple modules, each of them will be a DAPM widget as
3645  * well. While managing a pipeline we need to get the list of all the
3646  * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3647  * helps to get the SKL type widgets in that pipeline
3648  */
3649 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3650 {
3651 	struct snd_soc_dapm_widget *w;
3652 	struct skl_module_cfg *mcfg = NULL;
3653 	struct skl_pipe_module *p_module = NULL;
3654 	struct skl_pipe *pipe;
3655 
3656 	list_for_each_entry(w, &component->card->widgets, list) {
3657 		if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3658 			mcfg = w->priv;
3659 			pipe = mcfg->pipe;
3660 
3661 			p_module = devm_kzalloc(component->dev,
3662 						sizeof(*p_module), GFP_KERNEL);
3663 			if (!p_module)
3664 				return -ENOMEM;
3665 
3666 			p_module->w = w;
3667 			list_add_tail(&p_module->node, &pipe->w_list);
3668 		}
3669 	}
3670 
3671 	return 0;
3672 }
3673 
3674 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
3675 {
3676 	struct skl_pipe_module *w_module;
3677 	struct snd_soc_dapm_widget *w;
3678 	struct skl_module_cfg *mconfig;
3679 	bool host_found = false, link_found = false;
3680 
3681 	list_for_each_entry(w_module, &pipe->w_list, node) {
3682 		w = w_module->w;
3683 		mconfig = w->priv;
3684 
3685 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3686 			host_found = true;
3687 		else if (mconfig->dev_type != SKL_DEVICE_NONE)
3688 			link_found = true;
3689 	}
3690 
3691 	if (host_found && link_found)
3692 		pipe->passthru = true;
3693 	else
3694 		pipe->passthru = false;
3695 }
3696 
3697 /* This will be read from topology manifest, currently defined here */
3698 #define SKL_MAX_MCPS 30000000
3699 #define SKL_FW_MAX_MEM 1000000
3700 
3701 /*
3702  * SKL topology init routine
3703  */
3704 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3705 {
3706 	int ret;
3707 	const struct firmware *fw;
3708 	struct skl *skl = bus_to_skl(bus);
3709 	struct skl_pipeline *ppl;
3710 
3711 	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3712 	if (ret < 0) {
3713 		dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
3714 				skl->tplg_name, ret);
3715 		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3716 		if (ret < 0) {
3717 			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3718 					"dfw_sst.bin", ret);
3719 			return ret;
3720 		}
3721 	}
3722 
3723 	/*
3724 	 * The complete tplg for SKL is loaded as index 0, we don't use
3725 	 * any other index
3726 	 */
3727 	ret = snd_soc_tplg_component_load(component,
3728 					&skl_tplg_ops, fw, 0);
3729 	if (ret < 0) {
3730 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
3731 		release_firmware(fw);
3732 		return -EINVAL;
3733 	}
3734 
3735 	skl->resource.max_mcps = SKL_MAX_MCPS;
3736 	skl->resource.max_mem = SKL_FW_MAX_MEM;
3737 
3738 	skl->tplg = fw;
3739 	ret = skl_tplg_create_pipe_widget_list(component);
3740 	if (ret < 0)
3741 		return ret;
3742 
3743 	list_for_each_entry(ppl, &skl->ppl_list, node)
3744 		skl_tplg_set_pipe_type(skl, ppl->pipe);
3745 
3746 	return 0;
3747 }
3748 
3749 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
3750 {
3751 	struct skl *skl = bus_to_skl(bus);
3752 	struct skl_pipeline *ppl, *tmp;
3753 
3754 	if (!list_empty(&skl->ppl_list))
3755 		list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
3756 			list_del(&ppl->node);
3757 
3758 	/* clean up topology */
3759 	snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
3760 
3761 	release_firmware(skl->tplg);
3762 }
3763