1 /*
2  *  skl-topology.c - Implements Platform component ALSA controls/widget
3  *  handlers.
4  *
5  *  Copyright (C) 2014-2015 Intel Corp
6  *  Author: Jeeja KP <jeeja.kp@intel.com>
7  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <sound/soc.h>
23 #include <sound/soc-topology.h>
24 #include <uapi/sound/snd_sst_tokens.h>
25 #include "skl-sst-dsp.h"
26 #include "skl-sst-ipc.h"
27 #include "skl-topology.h"
28 #include "skl.h"
29 #include "skl-tplg-interface.h"
30 #include "../common/sst-dsp.h"
31 #include "../common/sst-dsp-priv.h"
32 
33 #define SKL_CH_FIXUP_MASK		(1 << 0)
34 #define SKL_RATE_FIXUP_MASK		(1 << 1)
35 #define SKL_FMT_FIXUP_MASK		(1 << 2)
36 #define SKL_IN_DIR_BIT_MASK		BIT(0)
37 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
38 
39 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
40 {
41 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
42 
43 	switch (caps) {
44 	case SKL_D0I3_NONE:
45 		d0i3->non_d0i3++;
46 		break;
47 
48 	case SKL_D0I3_STREAMING:
49 		d0i3->streaming++;
50 		break;
51 
52 	case SKL_D0I3_NON_STREAMING:
53 		d0i3->non_streaming++;
54 		break;
55 	}
56 }
57 
58 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
59 {
60 	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
61 
62 	switch (caps) {
63 	case SKL_D0I3_NONE:
64 		d0i3->non_d0i3--;
65 		break;
66 
67 	case SKL_D0I3_STREAMING:
68 		d0i3->streaming--;
69 		break;
70 
71 	case SKL_D0I3_NON_STREAMING:
72 		d0i3->non_streaming--;
73 		break;
74 	}
75 }
76 
77 /*
78  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79  * ignore. This helpers checks if the SKL driver handles this widget type
80  */
81 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
82 {
83 	switch (w->id) {
84 	case snd_soc_dapm_dai_link:
85 	case snd_soc_dapm_dai_in:
86 	case snd_soc_dapm_aif_in:
87 	case snd_soc_dapm_aif_out:
88 	case snd_soc_dapm_dai_out:
89 	case snd_soc_dapm_switch:
90 		return false;
91 	default:
92 		return true;
93 	}
94 }
95 
96 /*
97  * Each pipelines needs memory to be allocated. Check if we have free memory
98  * from available pool.
99  */
100 static bool skl_is_pipe_mem_avail(struct skl *skl,
101 				struct skl_module_cfg *mconfig)
102 {
103 	struct skl_sst *ctx = skl->skl_sst;
104 
105 	if (skl->resource.mem + mconfig->pipe->memory_pages >
106 				skl->resource.max_mem) {
107 		dev_err(ctx->dev,
108 				"%s: module_id %d instance %d\n", __func__,
109 				mconfig->id.module_id,
110 				mconfig->id.instance_id);
111 		dev_err(ctx->dev,
112 				"exceeds ppl memory available %d mem %d\n",
113 				skl->resource.max_mem, skl->resource.mem);
114 		return false;
115 	} else {
116 		return true;
117 	}
118 }
119 
120 /*
121  * Add the mem to the mem pool. This is freed when pipe is deleted.
122  * Note: DSP does actual memory management we only keep track for complete
123  * pool
124  */
125 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 				struct skl_module_cfg *mconfig)
127 {
128 	skl->resource.mem += mconfig->pipe->memory_pages;
129 }
130 
131 /*
132  * Pipeline needs needs DSP CPU resources for computation, this is
133  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
134  *
135  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
136  * pipe.
137  */
138 
139 static bool skl_is_pipe_mcps_avail(struct skl *skl,
140 				struct skl_module_cfg *mconfig)
141 {
142 	struct skl_sst *ctx = skl->skl_sst;
143 
144 	if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 		dev_err(ctx->dev,
146 			"%s: module_id %d instance %d\n", __func__,
147 			mconfig->id.module_id, mconfig->id.instance_id);
148 		dev_err(ctx->dev,
149 			"exceeds ppl mcps available %d > mem %d\n",
150 			skl->resource.max_mcps, skl->resource.mcps);
151 		return false;
152 	} else {
153 		return true;
154 	}
155 }
156 
157 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 				struct skl_module_cfg *mconfig)
159 {
160 	skl->resource.mcps += mconfig->mcps;
161 }
162 
163 /*
164  * Free the mcps when tearing down
165  */
166 static void
167 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
168 {
169 	skl->resource.mcps -= mconfig->mcps;
170 }
171 
172 /*
173  * Free the memory when tearing down
174  */
175 static void
176 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
177 {
178 	skl->resource.mem -= mconfig->pipe->memory_pages;
179 }
180 
181 
182 static void skl_dump_mconfig(struct skl_sst *ctx,
183 					struct skl_module_cfg *mcfg)
184 {
185 	dev_dbg(ctx->dev, "Dumping config\n");
186 	dev_dbg(ctx->dev, "Input Format:\n");
187 	dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 	dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 	dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 	dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
191 	dev_dbg(ctx->dev, "Output Format:\n");
192 	dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 	dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 	dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 	dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
196 }
197 
198 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
199 {
200 	int slot_map = 0xFFFFFFFF;
201 	int start_slot = 0;
202 	int i;
203 
204 	for (i = 0; i < chs; i++) {
205 		/*
206 		 * For 2 channels with starting slot as 0, slot map will
207 		 * look like 0xFFFFFF10.
208 		 */
209 		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 		start_slot++;
211 	}
212 	fmt->ch_map = slot_map;
213 }
214 
215 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 			struct skl_pipe_params *params, int fixup)
217 {
218 	if (fixup & SKL_RATE_FIXUP_MASK)
219 		fmt->s_freq = params->s_freq;
220 	if (fixup & SKL_CH_FIXUP_MASK) {
221 		fmt->channels = params->ch;
222 		skl_tplg_update_chmap(fmt, fmt->channels);
223 	}
224 	if (fixup & SKL_FMT_FIXUP_MASK) {
225 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
226 
227 		/*
228 		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 		 * container so update bit depth accordingly
230 		 */
231 		switch (fmt->valid_bit_depth) {
232 		case SKL_DEPTH_16BIT:
233 			fmt->bit_depth = fmt->valid_bit_depth;
234 			break;
235 
236 		default:
237 			fmt->bit_depth = SKL_DEPTH_32BIT;
238 			break;
239 		}
240 	}
241 
242 }
243 
244 /*
245  * A pipeline may have modules which impact the pcm parameters, like SRC,
246  * channel converter, format converter.
247  * We need to calculate the output params by applying the 'fixup'
248  * Topology will tell driver which type of fixup is to be applied by
249  * supplying the fixup mask, so based on that we calculate the output
250  *
251  * Now In FE the pcm hw_params is source/target format. Same is applicable
252  * for BE with its hw_params invoked.
253  * here based on FE, BE pipeline and direction we calculate the input and
254  * outfix and then apply that for a module
255  */
256 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 		struct skl_pipe_params *params, bool is_fe)
258 {
259 	int in_fixup, out_fixup;
260 	struct skl_module_fmt *in_fmt, *out_fmt;
261 
262 	/* Fixups will be applied to pin 0 only */
263 	in_fmt = &m_cfg->in_fmt[0];
264 	out_fmt = &m_cfg->out_fmt[0];
265 
266 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 		if (is_fe) {
268 			in_fixup = m_cfg->params_fixup;
269 			out_fixup = (~m_cfg->converter) &
270 					m_cfg->params_fixup;
271 		} else {
272 			out_fixup = m_cfg->params_fixup;
273 			in_fixup = (~m_cfg->converter) &
274 					m_cfg->params_fixup;
275 		}
276 	} else {
277 		if (is_fe) {
278 			out_fixup = m_cfg->params_fixup;
279 			in_fixup = (~m_cfg->converter) &
280 					m_cfg->params_fixup;
281 		} else {
282 			in_fixup = m_cfg->params_fixup;
283 			out_fixup = (~m_cfg->converter) &
284 					m_cfg->params_fixup;
285 		}
286 	}
287 
288 	skl_tplg_update_params(in_fmt, params, in_fixup);
289 	skl_tplg_update_params(out_fmt, params, out_fixup);
290 }
291 
292 /*
293  * A module needs input and output buffers, which are dependent upon pcm
294  * params, so once we have calculate params, we need buffer calculation as
295  * well.
296  */
297 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 				struct skl_module_cfg *mcfg)
299 {
300 	int multiplier = 1;
301 	struct skl_module_fmt *in_fmt, *out_fmt;
302 
303 	/* Since fixups is applied to pin 0 only, ibs, obs needs
304 	 * change for pin 0 only
305 	 */
306 	in_fmt = &mcfg->in_fmt[0];
307 	out_fmt = &mcfg->out_fmt[0];
308 
309 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
310 		multiplier = 5;
311 
312 	mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
313 			in_fmt->channels * (in_fmt->bit_depth >> 3) *
314 			multiplier;
315 
316 	mcfg->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
317 			out_fmt->channels * (out_fmt->bit_depth >> 3) *
318 			multiplier;
319 }
320 
321 static u8 skl_tplg_be_dev_type(int dev_type)
322 {
323 	int ret;
324 
325 	switch (dev_type) {
326 	case SKL_DEVICE_BT:
327 		ret = NHLT_DEVICE_BT;
328 		break;
329 
330 	case SKL_DEVICE_DMIC:
331 		ret = NHLT_DEVICE_DMIC;
332 		break;
333 
334 	case SKL_DEVICE_I2S:
335 		ret = NHLT_DEVICE_I2S;
336 		break;
337 
338 	default:
339 		ret = NHLT_DEVICE_INVALID;
340 		break;
341 	}
342 
343 	return ret;
344 }
345 
346 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
347 						struct skl_sst *ctx)
348 {
349 	struct skl_module_cfg *m_cfg = w->priv;
350 	int link_type, dir;
351 	u32 ch, s_freq, s_fmt;
352 	struct nhlt_specific_cfg *cfg;
353 	struct skl *skl = get_skl_ctx(ctx->dev);
354 	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
355 
356 	/* check if we already have blob */
357 	if (m_cfg->formats_config.caps_size > 0)
358 		return 0;
359 
360 	dev_dbg(ctx->dev, "Applying default cfg blob\n");
361 	switch (m_cfg->dev_type) {
362 	case SKL_DEVICE_DMIC:
363 		link_type = NHLT_LINK_DMIC;
364 		dir = SNDRV_PCM_STREAM_CAPTURE;
365 		s_freq = m_cfg->in_fmt[0].s_freq;
366 		s_fmt = m_cfg->in_fmt[0].bit_depth;
367 		ch = m_cfg->in_fmt[0].channels;
368 		break;
369 
370 	case SKL_DEVICE_I2S:
371 		link_type = NHLT_LINK_SSP;
372 		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
373 			dir = SNDRV_PCM_STREAM_PLAYBACK;
374 			s_freq = m_cfg->out_fmt[0].s_freq;
375 			s_fmt = m_cfg->out_fmt[0].bit_depth;
376 			ch = m_cfg->out_fmt[0].channels;
377 		} else {
378 			dir = SNDRV_PCM_STREAM_CAPTURE;
379 			s_freq = m_cfg->in_fmt[0].s_freq;
380 			s_fmt = m_cfg->in_fmt[0].bit_depth;
381 			ch = m_cfg->in_fmt[0].channels;
382 		}
383 		break;
384 
385 	default:
386 		return -EINVAL;
387 	}
388 
389 	/* update the blob based on virtual bus_id and default params */
390 	cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
391 					s_fmt, ch, s_freq, dir, dev_type);
392 	if (cfg) {
393 		m_cfg->formats_config.caps_size = cfg->size;
394 		m_cfg->formats_config.caps = (u32 *) &cfg->caps;
395 	} else {
396 		dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
397 					m_cfg->vbus_id, link_type, dir);
398 		dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
399 					ch, s_freq, s_fmt);
400 		return -EIO;
401 	}
402 
403 	return 0;
404 }
405 
406 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
407 							struct skl_sst *ctx)
408 {
409 	struct skl_module_cfg *m_cfg = w->priv;
410 	struct skl_pipe_params *params = m_cfg->pipe->p_params;
411 	int p_conn_type = m_cfg->pipe->conn_type;
412 	bool is_fe;
413 
414 	if (!m_cfg->params_fixup)
415 		return;
416 
417 	dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
418 				w->name);
419 
420 	skl_dump_mconfig(ctx, m_cfg);
421 
422 	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
423 		is_fe = true;
424 	else
425 		is_fe = false;
426 
427 	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
428 	skl_tplg_update_buffer_size(ctx, m_cfg);
429 
430 	dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
431 				w->name);
432 
433 	skl_dump_mconfig(ctx, m_cfg);
434 }
435 
436 /*
437  * some modules can have multiple params set from user control and
438  * need to be set after module is initialized. If set_param flag is
439  * set module params will be done after module is initialised.
440  */
441 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
442 						struct skl_sst *ctx)
443 {
444 	int i, ret;
445 	struct skl_module_cfg *mconfig = w->priv;
446 	const struct snd_kcontrol_new *k;
447 	struct soc_bytes_ext *sb;
448 	struct skl_algo_data *bc;
449 	struct skl_specific_cfg *sp_cfg;
450 
451 	if (mconfig->formats_config.caps_size > 0 &&
452 		mconfig->formats_config.set_params == SKL_PARAM_SET) {
453 		sp_cfg = &mconfig->formats_config;
454 		ret = skl_set_module_params(ctx, sp_cfg->caps,
455 					sp_cfg->caps_size,
456 					sp_cfg->param_id, mconfig);
457 		if (ret < 0)
458 			return ret;
459 	}
460 
461 	for (i = 0; i < w->num_kcontrols; i++) {
462 		k = &w->kcontrol_news[i];
463 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
464 			sb = (void *) k->private_value;
465 			bc = (struct skl_algo_data *)sb->dobj.private;
466 
467 			if (bc->set_params == SKL_PARAM_SET) {
468 				ret = skl_set_module_params(ctx,
469 						(u32 *)bc->params, bc->size,
470 						bc->param_id, mconfig);
471 				if (ret < 0)
472 					return ret;
473 			}
474 		}
475 	}
476 
477 	return 0;
478 }
479 
480 /*
481  * some module param can set from user control and this is required as
482  * when module is initailzed. if module param is required in init it is
483  * identifed by set_param flag. if set_param flag is not set, then this
484  * parameter needs to set as part of module init.
485  */
486 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
487 {
488 	const struct snd_kcontrol_new *k;
489 	struct soc_bytes_ext *sb;
490 	struct skl_algo_data *bc;
491 	struct skl_module_cfg *mconfig = w->priv;
492 	int i;
493 
494 	for (i = 0; i < w->num_kcontrols; i++) {
495 		k = &w->kcontrol_news[i];
496 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
497 			sb = (struct soc_bytes_ext *)k->private_value;
498 			bc = (struct skl_algo_data *)sb->dobj.private;
499 
500 			if (bc->set_params != SKL_PARAM_INIT)
501 				continue;
502 
503 			mconfig->formats_config.caps = (u32 *)bc->params;
504 			mconfig->formats_config.caps_size = bc->size;
505 
506 			break;
507 		}
508 	}
509 
510 	return 0;
511 }
512 
513 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
514 		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
515 {
516 	switch (mcfg->dev_type) {
517 	case SKL_DEVICE_HDAHOST:
518 		return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
519 
520 	case SKL_DEVICE_HDALINK:
521 		return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
522 	}
523 
524 	return 0;
525 }
526 
527 /*
528  * Inside a pipe instance, we can have various modules. These modules need
529  * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
530  * skl_init_module() routine, so invoke that for all modules in a pipeline
531  */
532 static int
533 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
534 {
535 	struct skl_pipe_module *w_module;
536 	struct snd_soc_dapm_widget *w;
537 	struct skl_module_cfg *mconfig;
538 	struct skl_sst *ctx = skl->skl_sst;
539 	int ret = 0;
540 
541 	list_for_each_entry(w_module, &pipe->w_list, node) {
542 		uuid_le *uuid_mod;
543 		w = w_module->w;
544 		mconfig = w->priv;
545 
546 		/* check if module ids are populated */
547 		if (mconfig->id.module_id < 0) {
548 			dev_err(skl->skl_sst->dev,
549 					"module %pUL id not populated\n",
550 					(uuid_le *)mconfig->guid);
551 			return -EIO;
552 		}
553 
554 		/* check resource available */
555 		if (!skl_is_pipe_mcps_avail(skl, mconfig))
556 			return -ENOMEM;
557 
558 		if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
559 			ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
560 				mconfig->id.module_id, mconfig->guid);
561 			if (ret < 0)
562 				return ret;
563 
564 			mconfig->m_state = SKL_MODULE_LOADED;
565 		}
566 
567 		/* prepare the DMA if the module is gateway cpr */
568 		ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
569 		if (ret < 0)
570 			return ret;
571 
572 		/* update blob if blob is null for be with default value */
573 		skl_tplg_update_be_blob(w, ctx);
574 
575 		/*
576 		 * apply fix/conversion to module params based on
577 		 * FE/BE params
578 		 */
579 		skl_tplg_update_module_params(w, ctx);
580 		uuid_mod = (uuid_le *)mconfig->guid;
581 		mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
582 						mconfig->id.instance_id);
583 		if (mconfig->id.pvt_id < 0)
584 			return ret;
585 		skl_tplg_set_module_init_data(w);
586 		ret = skl_init_module(ctx, mconfig);
587 		if (ret < 0) {
588 			skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
589 			return ret;
590 		}
591 		skl_tplg_alloc_pipe_mcps(skl, mconfig);
592 		ret = skl_tplg_set_module_params(w, ctx);
593 		if (ret < 0)
594 			return ret;
595 	}
596 
597 	return 0;
598 }
599 
600 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
601 	 struct skl_pipe *pipe)
602 {
603 	int ret;
604 	struct skl_pipe_module *w_module = NULL;
605 	struct skl_module_cfg *mconfig = NULL;
606 
607 	list_for_each_entry(w_module, &pipe->w_list, node) {
608 		uuid_le *uuid_mod;
609 		mconfig  = w_module->w->priv;
610 		uuid_mod = (uuid_le *)mconfig->guid;
611 
612 		if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
613 			mconfig->m_state > SKL_MODULE_UNINIT) {
614 			ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
615 						mconfig->id.module_id);
616 			if (ret < 0)
617 				return -EIO;
618 		}
619 		skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
620 	}
621 
622 	/* no modules to unload in this path, so return */
623 	return 0;
624 }
625 
626 /*
627  * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
628  * need create the pipeline. So we do following:
629  *   - check the resources
630  *   - Create the pipeline
631  *   - Initialize the modules in pipeline
632  *   - finally bind all modules together
633  */
634 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
635 							struct skl *skl)
636 {
637 	int ret;
638 	struct skl_module_cfg *mconfig = w->priv;
639 	struct skl_pipe_module *w_module;
640 	struct skl_pipe *s_pipe = mconfig->pipe;
641 	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
642 	struct skl_sst *ctx = skl->skl_sst;
643 	struct skl_module_deferred_bind *modules;
644 
645 	/* check resource available */
646 	if (!skl_is_pipe_mcps_avail(skl, mconfig))
647 		return -EBUSY;
648 
649 	if (!skl_is_pipe_mem_avail(skl, mconfig))
650 		return -ENOMEM;
651 
652 	/*
653 	 * Create a list of modules for pipe.
654 	 * This list contains modules from source to sink
655 	 */
656 	ret = skl_create_pipeline(ctx, mconfig->pipe);
657 	if (ret < 0)
658 		return ret;
659 
660 	skl_tplg_alloc_pipe_mem(skl, mconfig);
661 	skl_tplg_alloc_pipe_mcps(skl, mconfig);
662 
663 	/* Init all pipe modules from source to sink */
664 	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
665 	if (ret < 0)
666 		return ret;
667 
668 	/* Bind modules from source to sink */
669 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
670 		dst_module = w_module->w->priv;
671 
672 		if (src_module == NULL) {
673 			src_module = dst_module;
674 			continue;
675 		}
676 
677 		ret = skl_bind_modules(ctx, src_module, dst_module);
678 		if (ret < 0)
679 			return ret;
680 
681 		src_module = dst_module;
682 	}
683 
684 	/*
685 	 * When the destination module is initialized, check for these modules
686 	 * in deferred bind list. If found, bind them.
687 	 */
688 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
689 		if (list_empty(&skl->bind_list))
690 			break;
691 
692 		list_for_each_entry(modules, &skl->bind_list, node) {
693 			module = w_module->w->priv;
694 			if (modules->dst == module)
695 				skl_bind_modules(ctx, modules->src,
696 							modules->dst);
697 		}
698 	}
699 
700 	return 0;
701 }
702 
703 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
704 				int size, struct skl_module_cfg *mcfg)
705 {
706 	int i, pvt_id;
707 
708 	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
709 		struct skl_kpb_params *kpb_params =
710 				(struct skl_kpb_params *)params;
711 		struct skl_mod_inst_map *inst = kpb_params->map;
712 
713 		for (i = 0; i < kpb_params->num_modules; i++) {
714 			pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
715 								inst->inst_id);
716 			if (pvt_id < 0)
717 				return -EINVAL;
718 
719 			inst->inst_id = pvt_id;
720 			inst++;
721 		}
722 	}
723 
724 	return 0;
725 }
726 /*
727  * Some modules require params to be set after the module is bound to
728  * all pins connected.
729  *
730  * The module provider initializes set_param flag for such modules and we
731  * send params after binding
732  */
733 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
734 			struct skl_module_cfg *mcfg, struct skl_sst *ctx)
735 {
736 	int i, ret;
737 	struct skl_module_cfg *mconfig = w->priv;
738 	const struct snd_kcontrol_new *k;
739 	struct soc_bytes_ext *sb;
740 	struct skl_algo_data *bc;
741 	struct skl_specific_cfg *sp_cfg;
742 	u32 *params;
743 
744 	/*
745 	 * check all out/in pins are in bind state.
746 	 * if so set the module param
747 	 */
748 	for (i = 0; i < mcfg->max_out_queue; i++) {
749 		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
750 			return 0;
751 	}
752 
753 	for (i = 0; i < mcfg->max_in_queue; i++) {
754 		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
755 			return 0;
756 	}
757 
758 	if (mconfig->formats_config.caps_size > 0 &&
759 		mconfig->formats_config.set_params == SKL_PARAM_BIND) {
760 		sp_cfg = &mconfig->formats_config;
761 		ret = skl_set_module_params(ctx, sp_cfg->caps,
762 					sp_cfg->caps_size,
763 					sp_cfg->param_id, mconfig);
764 		if (ret < 0)
765 			return ret;
766 	}
767 
768 	for (i = 0; i < w->num_kcontrols; i++) {
769 		k = &w->kcontrol_news[i];
770 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
771 			sb = (void *) k->private_value;
772 			bc = (struct skl_algo_data *)sb->dobj.private;
773 
774 			if (bc->set_params == SKL_PARAM_BIND) {
775 				params = kzalloc(bc->max, GFP_KERNEL);
776 				if (!params)
777 					return -ENOMEM;
778 
779 				memcpy(params, bc->params, bc->max);
780 				skl_fill_sink_instance_id(ctx, params, bc->max,
781 								mconfig);
782 
783 				ret = skl_set_module_params(ctx, params,
784 						bc->max, bc->param_id, mconfig);
785 				kfree(params);
786 
787 				if (ret < 0)
788 					return ret;
789 			}
790 		}
791 	}
792 
793 	return 0;
794 }
795 
796 
797 static int skl_tplg_module_add_deferred_bind(struct skl *skl,
798 	struct skl_module_cfg *src, struct skl_module_cfg *dst)
799 {
800 	struct skl_module_deferred_bind *m_list, *modules;
801 	int i;
802 
803 	/* only supported for module with static pin connection */
804 	for (i = 0; i < dst->max_in_queue; i++) {
805 		struct skl_module_pin *pin = &dst->m_in_pin[i];
806 
807 		if (pin->is_dynamic)
808 			continue;
809 
810 		if ((pin->id.module_id  == src->id.module_id) &&
811 			(pin->id.instance_id  == src->id.instance_id)) {
812 
813 			if (!list_empty(&skl->bind_list)) {
814 				list_for_each_entry(modules, &skl->bind_list, node) {
815 					if (modules->src == src && modules->dst == dst)
816 						return 0;
817 				}
818 			}
819 
820 			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
821 			if (!m_list)
822 				return -ENOMEM;
823 
824 			m_list->src = src;
825 			m_list->dst = dst;
826 
827 			list_add(&m_list->node, &skl->bind_list);
828 		}
829 	}
830 
831 	return 0;
832 }
833 
834 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
835 				struct skl *skl,
836 				struct snd_soc_dapm_widget *src_w,
837 				struct skl_module_cfg *src_mconfig)
838 {
839 	struct snd_soc_dapm_path *p;
840 	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
841 	struct skl_module_cfg *sink_mconfig;
842 	struct skl_sst *ctx = skl->skl_sst;
843 	int ret;
844 
845 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
846 		if (!p->connect)
847 			continue;
848 
849 		dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
850 		dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
851 
852 		next_sink = p->sink;
853 
854 		if (!is_skl_dsp_widget_type(p->sink))
855 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
856 
857 		/*
858 		 * here we will check widgets in sink pipelines, so that
859 		 * can be any widgets type and we are only interested if
860 		 * they are ones used for SKL so check that first
861 		 */
862 		if ((p->sink->priv != NULL) &&
863 					is_skl_dsp_widget_type(p->sink)) {
864 
865 			sink = p->sink;
866 			sink_mconfig = sink->priv;
867 
868 			/*
869 			 * Modules other than PGA leaf can be connected
870 			 * directly or via switch to a module in another
871 			 * pipeline. EX: reference path
872 			 * when the path is enabled, the dst module that needs
873 			 * to be bound may not be initialized. if the module is
874 			 * not initialized, add these modules in the deferred
875 			 * bind list and when the dst module is initialised,
876 			 * bind this module to the dst_module in deferred list.
877 			 */
878 			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
879 				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
880 
881 				ret = skl_tplg_module_add_deferred_bind(skl,
882 						src_mconfig, sink_mconfig);
883 
884 				if (ret < 0)
885 					return ret;
886 
887 			}
888 
889 
890 			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
891 				sink_mconfig->m_state == SKL_MODULE_UNINIT)
892 				continue;
893 
894 			/* Bind source to sink, mixin is always source */
895 			ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
896 			if (ret)
897 				return ret;
898 
899 			/* set module params after bind */
900 			skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
901 			skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
902 
903 			/* Start sinks pipe first */
904 			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
905 				if (sink_mconfig->pipe->conn_type !=
906 							SKL_PIPE_CONN_TYPE_FE)
907 					ret = skl_run_pipe(ctx,
908 							sink_mconfig->pipe);
909 				if (ret)
910 					return ret;
911 			}
912 		}
913 	}
914 
915 	if (!sink)
916 		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
917 
918 	return 0;
919 }
920 
921 /*
922  * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
923  * we need to do following:
924  *   - Bind to sink pipeline
925  *      Since the sink pipes can be running and we don't get mixer event on
926  *      connect for already running mixer, we need to find the sink pipes
927  *      here and bind to them. This way dynamic connect works.
928  *   - Start sink pipeline, if not running
929  *   - Then run current pipe
930  */
931 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
932 								struct skl *skl)
933 {
934 	struct skl_module_cfg *src_mconfig;
935 	struct skl_sst *ctx = skl->skl_sst;
936 	int ret = 0;
937 
938 	src_mconfig = w->priv;
939 
940 	/*
941 	 * find which sink it is connected to, bind with the sink,
942 	 * if sink is not started, start sink pipe first, then start
943 	 * this pipe
944 	 */
945 	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
946 	if (ret)
947 		return ret;
948 
949 	/* Start source pipe last after starting all sinks */
950 	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
951 		return skl_run_pipe(ctx, src_mconfig->pipe);
952 
953 	return 0;
954 }
955 
956 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
957 		struct snd_soc_dapm_widget *w, struct skl *skl)
958 {
959 	struct snd_soc_dapm_path *p;
960 	struct snd_soc_dapm_widget *src_w = NULL;
961 	struct skl_sst *ctx = skl->skl_sst;
962 
963 	snd_soc_dapm_widget_for_each_source_path(w, p) {
964 		src_w = p->source;
965 		if (!p->connect)
966 			continue;
967 
968 		dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
969 		dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
970 
971 		/*
972 		 * here we will check widgets in sink pipelines, so that can
973 		 * be any widgets type and we are only interested if they are
974 		 * ones used for SKL so check that first
975 		 */
976 		if ((p->source->priv != NULL) &&
977 					is_skl_dsp_widget_type(p->source)) {
978 			return p->source;
979 		}
980 	}
981 
982 	if (src_w != NULL)
983 		return skl_get_src_dsp_widget(src_w, skl);
984 
985 	return NULL;
986 }
987 
988 /*
989  * in the Post-PMU event of mixer we need to do following:
990  *   - Check if this pipe is running
991  *   - if not, then
992  *	- bind this pipeline to its source pipeline
993  *	  if source pipe is already running, this means it is a dynamic
994  *	  connection and we need to bind only to that pipe
995  *	- start this pipeline
996  */
997 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
998 							struct skl *skl)
999 {
1000 	int ret = 0;
1001 	struct snd_soc_dapm_widget *source, *sink;
1002 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1003 	struct skl_sst *ctx = skl->skl_sst;
1004 	int src_pipe_started = 0;
1005 
1006 	sink = w;
1007 	sink_mconfig = sink->priv;
1008 
1009 	/*
1010 	 * If source pipe is already started, that means source is driving
1011 	 * one more sink before this sink got connected, Since source is
1012 	 * started, bind this sink to source and start this pipe.
1013 	 */
1014 	source = skl_get_src_dsp_widget(w, skl);
1015 	if (source != NULL) {
1016 		src_mconfig = source->priv;
1017 		sink_mconfig = sink->priv;
1018 		src_pipe_started = 1;
1019 
1020 		/*
1021 		 * check pipe state, then no need to bind or start the
1022 		 * pipe
1023 		 */
1024 		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1025 			src_pipe_started = 0;
1026 	}
1027 
1028 	if (src_pipe_started) {
1029 		ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1030 		if (ret)
1031 			return ret;
1032 
1033 		/* set module params after bind */
1034 		skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
1035 		skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1036 
1037 		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1038 			ret = skl_run_pipe(ctx, sink_mconfig->pipe);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 /*
1045  * in the Pre-PMD event of mixer we need to do following:
1046  *   - Stop the pipe
1047  *   - find the source connections and remove that from dapm_path_list
1048  *   - unbind with source pipelines if still connected
1049  */
1050 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1051 							struct skl *skl)
1052 {
1053 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1054 	int ret = 0, i;
1055 	struct skl_sst *ctx = skl->skl_sst;
1056 
1057 	sink_mconfig = w->priv;
1058 
1059 	/* Stop the pipe */
1060 	ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
1061 	if (ret)
1062 		return ret;
1063 
1064 	for (i = 0; i < sink_mconfig->max_in_queue; i++) {
1065 		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1066 			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1067 			if (!src_mconfig)
1068 				continue;
1069 
1070 			ret = skl_unbind_modules(ctx,
1071 						src_mconfig, sink_mconfig);
1072 		}
1073 	}
1074 
1075 	return ret;
1076 }
1077 
1078 /*
1079  * in the Post-PMD event of mixer we need to do following:
1080  *   - Free the mcps used
1081  *   - Free the mem used
1082  *   - Unbind the modules within the pipeline
1083  *   - Delete the pipeline (modules are not required to be explicitly
1084  *     deleted, pipeline delete is enough here
1085  */
1086 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1087 							struct skl *skl)
1088 {
1089 	struct skl_module_cfg *mconfig = w->priv;
1090 	struct skl_pipe_module *w_module;
1091 	struct skl_module_cfg *src_module = NULL, *dst_module;
1092 	struct skl_sst *ctx = skl->skl_sst;
1093 	struct skl_pipe *s_pipe = mconfig->pipe;
1094 	struct skl_module_deferred_bind *modules, *tmp;
1095 
1096 	if (s_pipe->state == SKL_PIPE_INVALID)
1097 		return -EINVAL;
1098 
1099 	skl_tplg_free_pipe_mcps(skl, mconfig);
1100 	skl_tplg_free_pipe_mem(skl, mconfig);
1101 
1102 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1103 		if (list_empty(&skl->bind_list))
1104 			break;
1105 
1106 		src_module = w_module->w->priv;
1107 
1108 		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1109 			/*
1110 			 * When the destination module is deleted, Unbind the
1111 			 * modules from deferred bind list.
1112 			 */
1113 			if (modules->dst == src_module) {
1114 				skl_unbind_modules(ctx, modules->src,
1115 						modules->dst);
1116 			}
1117 
1118 			/*
1119 			 * When the source module is deleted, remove this entry
1120 			 * from the deferred bind list.
1121 			 */
1122 			if (modules->src == src_module) {
1123 				list_del(&modules->node);
1124 				modules->src = NULL;
1125 				modules->dst = NULL;
1126 				kfree(modules);
1127 			}
1128 		}
1129 	}
1130 
1131 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1132 		dst_module = w_module->w->priv;
1133 
1134 		if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1135 			skl_tplg_free_pipe_mcps(skl, dst_module);
1136 		if (src_module == NULL) {
1137 			src_module = dst_module;
1138 			continue;
1139 		}
1140 
1141 		skl_unbind_modules(ctx, src_module, dst_module);
1142 		src_module = dst_module;
1143 	}
1144 
1145 	skl_delete_pipe(ctx, mconfig->pipe);
1146 
1147 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1148 		src_module = w_module->w->priv;
1149 		src_module->m_state = SKL_MODULE_UNINIT;
1150 	}
1151 
1152 	return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1153 }
1154 
1155 /*
1156  * in the Post-PMD event of PGA we need to do following:
1157  *   - Free the mcps used
1158  *   - Stop the pipeline
1159  *   - In source pipe is connected, unbind with source pipelines
1160  */
1161 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1162 								struct skl *skl)
1163 {
1164 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1165 	int ret = 0, i;
1166 	struct skl_sst *ctx = skl->skl_sst;
1167 
1168 	src_mconfig = w->priv;
1169 
1170 	/* Stop the pipe since this is a mixin module */
1171 	ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1172 	if (ret)
1173 		return ret;
1174 
1175 	for (i = 0; i < src_mconfig->max_out_queue; i++) {
1176 		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1177 			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1178 			if (!sink_mconfig)
1179 				continue;
1180 			/*
1181 			 * This is a connecter and if path is found that means
1182 			 * unbind between source and sink has not happened yet
1183 			 */
1184 			ret = skl_unbind_modules(ctx, src_mconfig,
1185 							sink_mconfig);
1186 		}
1187 	}
1188 
1189 	return ret;
1190 }
1191 
1192 /*
1193  * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1194  * second one is required that is created as another pipe entity.
1195  * The mixer is responsible for pipe management and represent a pipeline
1196  * instance
1197  */
1198 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1199 				struct snd_kcontrol *k, int event)
1200 {
1201 	struct snd_soc_dapm_context *dapm = w->dapm;
1202 	struct skl *skl = get_skl_ctx(dapm->dev);
1203 
1204 	switch (event) {
1205 	case SND_SOC_DAPM_PRE_PMU:
1206 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1207 
1208 	case SND_SOC_DAPM_POST_PMU:
1209 		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1210 
1211 	case SND_SOC_DAPM_PRE_PMD:
1212 		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1213 
1214 	case SND_SOC_DAPM_POST_PMD:
1215 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 /*
1222  * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1223  * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1224  * the sink when it is running (two FE to one BE or one FE to two BE)
1225  * scenarios
1226  */
1227 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1228 			struct snd_kcontrol *k, int event)
1229 
1230 {
1231 	struct snd_soc_dapm_context *dapm = w->dapm;
1232 	struct skl *skl = get_skl_ctx(dapm->dev);
1233 
1234 	switch (event) {
1235 	case SND_SOC_DAPM_PRE_PMU:
1236 		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1237 
1238 	case SND_SOC_DAPM_POST_PMD:
1239 		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1246 			unsigned int __user *data, unsigned int size)
1247 {
1248 	struct soc_bytes_ext *sb =
1249 			(struct soc_bytes_ext *)kcontrol->private_value;
1250 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1251 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1252 	struct skl_module_cfg *mconfig = w->priv;
1253 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1254 
1255 	if (w->power)
1256 		skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1257 				      bc->size, bc->param_id, mconfig);
1258 
1259 	/* decrement size for TLV header */
1260 	size -= 2 * sizeof(u32);
1261 
1262 	/* check size as we don't want to send kernel data */
1263 	if (size > bc->max)
1264 		size = bc->max;
1265 
1266 	if (bc->params) {
1267 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1268 			return -EFAULT;
1269 		if (copy_to_user(data + 1, &size, sizeof(u32)))
1270 			return -EFAULT;
1271 		if (copy_to_user(data + 2, bc->params, size))
1272 			return -EFAULT;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 #define SKL_PARAM_VENDOR_ID 0xff
1279 
1280 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1281 			const unsigned int __user *data, unsigned int size)
1282 {
1283 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1284 	struct skl_module_cfg *mconfig = w->priv;
1285 	struct soc_bytes_ext *sb =
1286 			(struct soc_bytes_ext *)kcontrol->private_value;
1287 	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1288 	struct skl *skl = get_skl_ctx(w->dapm->dev);
1289 
1290 	if (ac->params) {
1291 		if (size > ac->max)
1292 			return -EINVAL;
1293 
1294 		ac->size = size;
1295 		/*
1296 		 * if the param_is is of type Vendor, firmware expects actual
1297 		 * parameter id and size from the control.
1298 		 */
1299 		if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1300 			if (copy_from_user(ac->params, data, size))
1301 				return -EFAULT;
1302 		} else {
1303 			if (copy_from_user(ac->params,
1304 					   data + 2, size))
1305 				return -EFAULT;
1306 		}
1307 
1308 		if (w->power)
1309 			return skl_set_module_params(skl->skl_sst,
1310 						(u32 *)ac->params, ac->size,
1311 						ac->param_id, mconfig);
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 /*
1318  * Fill the dma id for host and link. In case of passthrough
1319  * pipeline, this will both host and link in the same
1320  * pipeline, so need to copy the link and host based on dev_type
1321  */
1322 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1323 				struct skl_pipe_params *params)
1324 {
1325 	struct skl_pipe *pipe = mcfg->pipe;
1326 
1327 	if (pipe->passthru) {
1328 		switch (mcfg->dev_type) {
1329 		case SKL_DEVICE_HDALINK:
1330 			pipe->p_params->link_dma_id = params->link_dma_id;
1331 			pipe->p_params->link_index = params->link_index;
1332 			pipe->p_params->link_bps = params->link_bps;
1333 			break;
1334 
1335 		case SKL_DEVICE_HDAHOST:
1336 			pipe->p_params->host_dma_id = params->host_dma_id;
1337 			pipe->p_params->host_bps = params->host_bps;
1338 			break;
1339 
1340 		default:
1341 			break;
1342 		}
1343 		pipe->p_params->s_fmt = params->s_fmt;
1344 		pipe->p_params->ch = params->ch;
1345 		pipe->p_params->s_freq = params->s_freq;
1346 		pipe->p_params->stream = params->stream;
1347 		pipe->p_params->format = params->format;
1348 
1349 	} else {
1350 		memcpy(pipe->p_params, params, sizeof(*params));
1351 	}
1352 }
1353 
1354 /*
1355  * The FE params are passed by hw_params of the DAI.
1356  * On hw_params, the params are stored in Gateway module of the FE and we
1357  * need to calculate the format in DSP module configuration, that
1358  * conversion is done here
1359  */
1360 int skl_tplg_update_pipe_params(struct device *dev,
1361 			struct skl_module_cfg *mconfig,
1362 			struct skl_pipe_params *params)
1363 {
1364 	struct skl_module_fmt *format = NULL;
1365 
1366 	skl_tplg_fill_dma_id(mconfig, params);
1367 
1368 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1369 		format = &mconfig->in_fmt[0];
1370 	else
1371 		format = &mconfig->out_fmt[0];
1372 
1373 	/* set the hw_params */
1374 	format->s_freq = params->s_freq;
1375 	format->channels = params->ch;
1376 	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1377 
1378 	/*
1379 	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1380 	 * container so update bit depth accordingly
1381 	 */
1382 	switch (format->valid_bit_depth) {
1383 	case SKL_DEPTH_16BIT:
1384 		format->bit_depth = format->valid_bit_depth;
1385 		break;
1386 
1387 	case SKL_DEPTH_24BIT:
1388 	case SKL_DEPTH_32BIT:
1389 		format->bit_depth = SKL_DEPTH_32BIT;
1390 		break;
1391 
1392 	default:
1393 		dev_err(dev, "Invalid bit depth %x for pipe\n",
1394 				format->valid_bit_depth);
1395 		return -EINVAL;
1396 	}
1397 
1398 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1399 		mconfig->ibs = (format->s_freq / 1000) *
1400 				(format->channels) *
1401 				(format->bit_depth >> 3);
1402 	} else {
1403 		mconfig->obs = (format->s_freq / 1000) *
1404 				(format->channels) *
1405 				(format->bit_depth >> 3);
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 /*
1412  * Query the module config for the FE DAI
1413  * This is used to find the hw_params set for that DAI and apply to FE
1414  * pipeline
1415  */
1416 struct skl_module_cfg *
1417 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1418 {
1419 	struct snd_soc_dapm_widget *w;
1420 	struct snd_soc_dapm_path *p = NULL;
1421 
1422 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1423 		w = dai->playback_widget;
1424 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1425 			if (p->connect && p->sink->power &&
1426 					!is_skl_dsp_widget_type(p->sink))
1427 				continue;
1428 
1429 			if (p->sink->priv) {
1430 				dev_dbg(dai->dev, "set params for %s\n",
1431 						p->sink->name);
1432 				return p->sink->priv;
1433 			}
1434 		}
1435 	} else {
1436 		w = dai->capture_widget;
1437 		snd_soc_dapm_widget_for_each_source_path(w, p) {
1438 			if (p->connect && p->source->power &&
1439 					!is_skl_dsp_widget_type(p->source))
1440 				continue;
1441 
1442 			if (p->source->priv) {
1443 				dev_dbg(dai->dev, "set params for %s\n",
1444 						p->source->name);
1445 				return p->source->priv;
1446 			}
1447 		}
1448 	}
1449 
1450 	return NULL;
1451 }
1452 
1453 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1454 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1455 {
1456 	struct snd_soc_dapm_path *p;
1457 	struct skl_module_cfg *mconfig = NULL;
1458 
1459 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1460 		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1461 			if (p->connect &&
1462 				    (p->sink->id == snd_soc_dapm_aif_out) &&
1463 				    p->source->priv) {
1464 				mconfig = p->source->priv;
1465 				return mconfig;
1466 			}
1467 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1468 			if (mconfig)
1469 				return mconfig;
1470 		}
1471 	}
1472 	return mconfig;
1473 }
1474 
1475 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1476 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1477 {
1478 	struct snd_soc_dapm_path *p;
1479 	struct skl_module_cfg *mconfig = NULL;
1480 
1481 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1482 		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1483 			if (p->connect &&
1484 				    (p->source->id == snd_soc_dapm_aif_in) &&
1485 				    p->sink->priv) {
1486 				mconfig = p->sink->priv;
1487 				return mconfig;
1488 			}
1489 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1490 			if (mconfig)
1491 				return mconfig;
1492 		}
1493 	}
1494 	return mconfig;
1495 }
1496 
1497 struct skl_module_cfg *
1498 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1499 {
1500 	struct snd_soc_dapm_widget *w;
1501 	struct skl_module_cfg *mconfig;
1502 
1503 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1504 		w = dai->playback_widget;
1505 		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1506 	} else {
1507 		w = dai->capture_widget;
1508 		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1509 	}
1510 	return mconfig;
1511 }
1512 
1513 static u8 skl_tplg_be_link_type(int dev_type)
1514 {
1515 	int ret;
1516 
1517 	switch (dev_type) {
1518 	case SKL_DEVICE_BT:
1519 		ret = NHLT_LINK_SSP;
1520 		break;
1521 
1522 	case SKL_DEVICE_DMIC:
1523 		ret = NHLT_LINK_DMIC;
1524 		break;
1525 
1526 	case SKL_DEVICE_I2S:
1527 		ret = NHLT_LINK_SSP;
1528 		break;
1529 
1530 	case SKL_DEVICE_HDALINK:
1531 		ret = NHLT_LINK_HDA;
1532 		break;
1533 
1534 	default:
1535 		ret = NHLT_LINK_INVALID;
1536 		break;
1537 	}
1538 
1539 	return ret;
1540 }
1541 
1542 /*
1543  * Fill the BE gateway parameters
1544  * The BE gateway expects a blob of parameters which are kept in the ACPI
1545  * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1546  * The port can have multiple settings so pick based on the PCM
1547  * parameters
1548  */
1549 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1550 				struct skl_module_cfg *mconfig,
1551 				struct skl_pipe_params *params)
1552 {
1553 	struct nhlt_specific_cfg *cfg;
1554 	struct skl *skl = get_skl_ctx(dai->dev);
1555 	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1556 	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1557 
1558 	skl_tplg_fill_dma_id(mconfig, params);
1559 
1560 	if (link_type == NHLT_LINK_HDA)
1561 		return 0;
1562 
1563 	/* update the blob based on virtual bus_id*/
1564 	cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1565 					params->s_fmt, params->ch,
1566 					params->s_freq, params->stream,
1567 					dev_type);
1568 	if (cfg) {
1569 		mconfig->formats_config.caps_size = cfg->size;
1570 		mconfig->formats_config.caps = (u32 *) &cfg->caps;
1571 	} else {
1572 		dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1573 					mconfig->vbus_id, link_type,
1574 					params->stream);
1575 		dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1576 				 params->ch, params->s_freq, params->s_fmt);
1577 		return -EINVAL;
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1584 				struct snd_soc_dapm_widget *w,
1585 				struct skl_pipe_params *params)
1586 {
1587 	struct snd_soc_dapm_path *p;
1588 	int ret = -EIO;
1589 
1590 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1591 		if (p->connect && is_skl_dsp_widget_type(p->source) &&
1592 						p->source->priv) {
1593 
1594 			ret = skl_tplg_be_fill_pipe_params(dai,
1595 						p->source->priv, params);
1596 			if (ret < 0)
1597 				return ret;
1598 		} else {
1599 			ret = skl_tplg_be_set_src_pipe_params(dai,
1600 						p->source, params);
1601 			if (ret < 0)
1602 				return ret;
1603 		}
1604 	}
1605 
1606 	return ret;
1607 }
1608 
1609 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1610 	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1611 {
1612 	struct snd_soc_dapm_path *p = NULL;
1613 	int ret = -EIO;
1614 
1615 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1616 		if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1617 						p->sink->priv) {
1618 
1619 			ret = skl_tplg_be_fill_pipe_params(dai,
1620 						p->sink->priv, params);
1621 			if (ret < 0)
1622 				return ret;
1623 		} else {
1624 			ret = skl_tplg_be_set_sink_pipe_params(
1625 						dai, p->sink, params);
1626 			if (ret < 0)
1627 				return ret;
1628 		}
1629 	}
1630 
1631 	return ret;
1632 }
1633 
1634 /*
1635  * BE hw_params can be a source parameters (capture) or sink parameters
1636  * (playback). Based on sink and source we need to either find the source
1637  * list or the sink list and set the pipeline parameters
1638  */
1639 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1640 				struct skl_pipe_params *params)
1641 {
1642 	struct snd_soc_dapm_widget *w;
1643 
1644 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1645 		w = dai->playback_widget;
1646 
1647 		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1648 
1649 	} else {
1650 		w = dai->capture_widget;
1651 
1652 		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1659 	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1660 	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1661 	{SKL_PGA_EVENT, skl_tplg_pga_event},
1662 };
1663 
1664 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1665 	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1666 					skl_tplg_tlv_control_set},
1667 };
1668 
1669 static int skl_tplg_fill_pipe_tkn(struct device *dev,
1670 			struct skl_pipe *pipe, u32 tkn,
1671 			u32 tkn_val)
1672 {
1673 
1674 	switch (tkn) {
1675 	case SKL_TKN_U32_PIPE_CONN_TYPE:
1676 		pipe->conn_type = tkn_val;
1677 		break;
1678 
1679 	case SKL_TKN_U32_PIPE_PRIORITY:
1680 		pipe->pipe_priority = tkn_val;
1681 		break;
1682 
1683 	case SKL_TKN_U32_PIPE_MEM_PGS:
1684 		pipe->memory_pages = tkn_val;
1685 		break;
1686 
1687 	case SKL_TKN_U32_PMODE:
1688 		pipe->lp_mode = tkn_val;
1689 		break;
1690 
1691 	default:
1692 		dev_err(dev, "Token not handled %d\n", tkn);
1693 		return -EINVAL;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 /*
1700  * Add pipeline by parsing the relevant tokens
1701  * Return an existing pipe if the pipe already exists.
1702  */
1703 static int skl_tplg_add_pipe(struct device *dev,
1704 		struct skl_module_cfg *mconfig, struct skl *skl,
1705 		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
1706 {
1707 	struct skl_pipeline *ppl;
1708 	struct skl_pipe *pipe;
1709 	struct skl_pipe_params *params;
1710 
1711 	list_for_each_entry(ppl, &skl->ppl_list, node) {
1712 		if (ppl->pipe->ppl_id == tkn_elem->value) {
1713 			mconfig->pipe = ppl->pipe;
1714 			return -EEXIST;
1715 		}
1716 	}
1717 
1718 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1719 	if (!ppl)
1720 		return -ENOMEM;
1721 
1722 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1723 	if (!pipe)
1724 		return -ENOMEM;
1725 
1726 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1727 	if (!params)
1728 		return -ENOMEM;
1729 
1730 	pipe->p_params = params;
1731 	pipe->ppl_id = tkn_elem->value;
1732 	INIT_LIST_HEAD(&pipe->w_list);
1733 
1734 	ppl->pipe = pipe;
1735 	list_add(&ppl->node, &skl->ppl_list);
1736 
1737 	mconfig->pipe = pipe;
1738 	mconfig->pipe->state = SKL_PIPE_INVALID;
1739 
1740 	return 0;
1741 }
1742 
1743 static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1744 			struct skl_module_pin *m_pin,
1745 			int pin_index, u32 value)
1746 {
1747 	switch (tkn) {
1748 	case SKL_TKN_U32_PIN_MOD_ID:
1749 		m_pin[pin_index].id.module_id = value;
1750 		break;
1751 
1752 	case SKL_TKN_U32_PIN_INST_ID:
1753 		m_pin[pin_index].id.instance_id = value;
1754 		break;
1755 
1756 	default:
1757 		dev_err(dev, "%d Not a pin token\n", value);
1758 		return -EINVAL;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 /*
1765  * Parse for pin config specific tokens to fill up the
1766  * module private data
1767  */
1768 static int skl_tplg_fill_pins_info(struct device *dev,
1769 		struct skl_module_cfg *mconfig,
1770 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1771 		int dir, int pin_count)
1772 {
1773 	int ret;
1774 	struct skl_module_pin *m_pin;
1775 
1776 	switch (dir) {
1777 	case SKL_DIR_IN:
1778 		m_pin = mconfig->m_in_pin;
1779 		break;
1780 
1781 	case SKL_DIR_OUT:
1782 		m_pin = mconfig->m_out_pin;
1783 		break;
1784 
1785 	default:
1786 		dev_err(dev, "Invalid direction value\n");
1787 		return -EINVAL;
1788 	}
1789 
1790 	ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1791 			m_pin, pin_count, tkn_elem->value);
1792 
1793 	if (ret < 0)
1794 		return ret;
1795 
1796 	m_pin[pin_count].in_use = false;
1797 	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1798 
1799 	return 0;
1800 }
1801 
1802 /*
1803  * Fill up input/output module config format based
1804  * on the direction
1805  */
1806 static int skl_tplg_fill_fmt(struct device *dev,
1807 		struct skl_module_cfg *mconfig,	u32 tkn,
1808 		u32 value, u32 dir, u32 pin_count)
1809 {
1810 	struct skl_module_fmt *dst_fmt;
1811 
1812 	switch (dir) {
1813 	case SKL_DIR_IN:
1814 		dst_fmt = mconfig->in_fmt;
1815 		dst_fmt += pin_count;
1816 		break;
1817 
1818 	case SKL_DIR_OUT:
1819 		dst_fmt = mconfig->out_fmt;
1820 		dst_fmt += pin_count;
1821 		break;
1822 
1823 	default:
1824 		dev_err(dev, "Invalid direction value\n");
1825 		return -EINVAL;
1826 	}
1827 
1828 	switch (tkn) {
1829 	case SKL_TKN_U32_FMT_CH:
1830 		dst_fmt->channels  = value;
1831 		break;
1832 
1833 	case SKL_TKN_U32_FMT_FREQ:
1834 		dst_fmt->s_freq = value;
1835 		break;
1836 
1837 	case SKL_TKN_U32_FMT_BIT_DEPTH:
1838 		dst_fmt->bit_depth = value;
1839 		break;
1840 
1841 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1842 		dst_fmt->valid_bit_depth = value;
1843 		break;
1844 
1845 	case SKL_TKN_U32_FMT_CH_CONFIG:
1846 		dst_fmt->ch_cfg = value;
1847 		break;
1848 
1849 	case SKL_TKN_U32_FMT_INTERLEAVE:
1850 		dst_fmt->interleaving_style = value;
1851 		break;
1852 
1853 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1854 		dst_fmt->sample_type = value;
1855 		break;
1856 
1857 	case SKL_TKN_U32_FMT_CH_MAP:
1858 		dst_fmt->ch_map = value;
1859 		break;
1860 
1861 	default:
1862 		dev_err(dev, "Invalid token %d\n", tkn);
1863 		return -EINVAL;
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1870 	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1871 {
1872 	if (uuid_tkn->token == SKL_TKN_UUID)
1873 		memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1874 	else {
1875 		dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
1876 		return -EINVAL;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 static void skl_tplg_fill_pin_dynamic_val(
1883 		struct skl_module_pin *mpin, u32 pin_count, u32 value)
1884 {
1885 	int i;
1886 
1887 	for (i = 0; i < pin_count; i++)
1888 		mpin[i].is_dynamic = value;
1889 }
1890 
1891 /*
1892  * Parse tokens to fill up the module private data
1893  */
1894 static int skl_tplg_get_token(struct device *dev,
1895 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1896 		struct skl *skl, struct skl_module_cfg *mconfig)
1897 {
1898 	int tkn_count = 0;
1899 	int ret;
1900 	static int is_pipe_exists;
1901 	static int pin_index, dir;
1902 
1903 	if (tkn_elem->token > SKL_TKN_MAX)
1904 		return -EINVAL;
1905 
1906 	switch (tkn_elem->token) {
1907 	case SKL_TKN_U8_IN_QUEUE_COUNT:
1908 		mconfig->max_in_queue = tkn_elem->value;
1909 		mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1910 					sizeof(*mconfig->m_in_pin),
1911 					GFP_KERNEL);
1912 		if (!mconfig->m_in_pin)
1913 			return -ENOMEM;
1914 
1915 		break;
1916 
1917 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
1918 		mconfig->max_out_queue = tkn_elem->value;
1919 		mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1920 					sizeof(*mconfig->m_out_pin),
1921 					GFP_KERNEL);
1922 
1923 		if (!mconfig->m_out_pin)
1924 			return -ENOMEM;
1925 
1926 		break;
1927 
1928 	case SKL_TKN_U8_DYN_IN_PIN:
1929 		if (!mconfig->m_in_pin)
1930 			return -ENOMEM;
1931 
1932 		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1933 			mconfig->max_in_queue, tkn_elem->value);
1934 
1935 		break;
1936 
1937 	case SKL_TKN_U8_DYN_OUT_PIN:
1938 		if (!mconfig->m_out_pin)
1939 			return -ENOMEM;
1940 
1941 		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1942 			mconfig->max_out_queue, tkn_elem->value);
1943 
1944 		break;
1945 
1946 	case SKL_TKN_U8_TIME_SLOT:
1947 		mconfig->time_slot = tkn_elem->value;
1948 		break;
1949 
1950 	case SKL_TKN_U8_CORE_ID:
1951 		mconfig->core_id = tkn_elem->value;
1952 
1953 	case SKL_TKN_U8_MOD_TYPE:
1954 		mconfig->m_type = tkn_elem->value;
1955 		break;
1956 
1957 	case SKL_TKN_U8_DEV_TYPE:
1958 		mconfig->dev_type = tkn_elem->value;
1959 		break;
1960 
1961 	case SKL_TKN_U8_HW_CONN_TYPE:
1962 		mconfig->hw_conn_type = tkn_elem->value;
1963 		break;
1964 
1965 	case SKL_TKN_U16_MOD_INST_ID:
1966 		mconfig->id.instance_id =
1967 		tkn_elem->value;
1968 		break;
1969 
1970 	case SKL_TKN_U32_MEM_PAGES:
1971 		mconfig->mem_pages = tkn_elem->value;
1972 		break;
1973 
1974 	case SKL_TKN_U32_MAX_MCPS:
1975 		mconfig->mcps = tkn_elem->value;
1976 		break;
1977 
1978 	case SKL_TKN_U32_OBS:
1979 		mconfig->obs = tkn_elem->value;
1980 		break;
1981 
1982 	case SKL_TKN_U32_IBS:
1983 		mconfig->ibs = tkn_elem->value;
1984 		break;
1985 
1986 	case SKL_TKN_U32_VBUS_ID:
1987 		mconfig->vbus_id = tkn_elem->value;
1988 		break;
1989 
1990 	case SKL_TKN_U32_PARAMS_FIXUP:
1991 		mconfig->params_fixup = tkn_elem->value;
1992 		break;
1993 
1994 	case SKL_TKN_U32_CONVERTER:
1995 		mconfig->converter = tkn_elem->value;
1996 		break;
1997 
1998 	case SKL_TKL_U32_D0I3_CAPS:
1999 		mconfig->d0i3_caps = tkn_elem->value;
2000 		break;
2001 
2002 	case SKL_TKN_U32_PIPE_ID:
2003 		ret = skl_tplg_add_pipe(dev,
2004 				mconfig, skl, tkn_elem);
2005 
2006 		if (ret < 0) {
2007 			if (ret == -EEXIST) {
2008 				is_pipe_exists = 1;
2009 				break;
2010 			}
2011 			return is_pipe_exists;
2012 		}
2013 
2014 		break;
2015 
2016 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2017 	case SKL_TKN_U32_PIPE_PRIORITY:
2018 	case SKL_TKN_U32_PIPE_MEM_PGS:
2019 	case SKL_TKN_U32_PMODE:
2020 		if (is_pipe_exists) {
2021 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2022 					tkn_elem->token, tkn_elem->value);
2023 			if (ret < 0)
2024 				return ret;
2025 		}
2026 
2027 		break;
2028 
2029 	/*
2030 	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2031 	 * direction and the pin count. The first four bits represent
2032 	 * direction and next four the pin count.
2033 	 */
2034 	case SKL_TKN_U32_DIR_PIN_COUNT:
2035 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2036 		pin_index = (tkn_elem->value &
2037 			SKL_PIN_COUNT_MASK) >> 4;
2038 
2039 		break;
2040 
2041 	case SKL_TKN_U32_FMT_CH:
2042 	case SKL_TKN_U32_FMT_FREQ:
2043 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2044 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2045 	case SKL_TKN_U32_FMT_CH_CONFIG:
2046 	case SKL_TKN_U32_FMT_INTERLEAVE:
2047 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2048 	case SKL_TKN_U32_FMT_CH_MAP:
2049 		ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
2050 				tkn_elem->value, dir, pin_index);
2051 
2052 		if (ret < 0)
2053 			return ret;
2054 
2055 		break;
2056 
2057 	case SKL_TKN_U32_PIN_MOD_ID:
2058 	case SKL_TKN_U32_PIN_INST_ID:
2059 		ret = skl_tplg_fill_pins_info(dev,
2060 				mconfig, tkn_elem, dir,
2061 				pin_index);
2062 		if (ret < 0)
2063 			return ret;
2064 
2065 		break;
2066 
2067 	case SKL_TKN_U32_CAPS_SIZE:
2068 		mconfig->formats_config.caps_size =
2069 			tkn_elem->value;
2070 
2071 		break;
2072 
2073 	case SKL_TKN_U32_PROC_DOMAIN:
2074 		mconfig->domain =
2075 			tkn_elem->value;
2076 
2077 		break;
2078 
2079 	case SKL_TKN_U8_IN_PIN_TYPE:
2080 	case SKL_TKN_U8_OUT_PIN_TYPE:
2081 	case SKL_TKN_U8_CONN_TYPE:
2082 		break;
2083 
2084 	default:
2085 		dev_err(dev, "Token %d not handled\n",
2086 				tkn_elem->token);
2087 		return -EINVAL;
2088 	}
2089 
2090 	tkn_count++;
2091 
2092 	return tkn_count;
2093 }
2094 
2095 /*
2096  * Parse the vendor array for specific tokens to construct
2097  * module private data
2098  */
2099 static int skl_tplg_get_tokens(struct device *dev,
2100 		char *pvt_data,	struct skl *skl,
2101 		struct skl_module_cfg *mconfig, int block_size)
2102 {
2103 	struct snd_soc_tplg_vendor_array *array;
2104 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2105 	int tkn_count = 0, ret;
2106 	int off = 0, tuple_size = 0;
2107 
2108 	if (block_size <= 0)
2109 		return -EINVAL;
2110 
2111 	while (tuple_size < block_size) {
2112 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2113 
2114 		off += array->size;
2115 
2116 		switch (array->type) {
2117 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2118 			dev_warn(dev, "no string tokens expected for skl tplg\n");
2119 			continue;
2120 
2121 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2122 			ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2123 			if (ret < 0)
2124 				return ret;
2125 
2126 			tuple_size += sizeof(*array->uuid);
2127 
2128 			continue;
2129 
2130 		default:
2131 			tkn_elem = array->value;
2132 			tkn_count = 0;
2133 			break;
2134 		}
2135 
2136 		while (tkn_count <= (array->num_elems - 1)) {
2137 			ret = skl_tplg_get_token(dev, tkn_elem,
2138 					skl, mconfig);
2139 
2140 			if (ret < 0)
2141 				return ret;
2142 
2143 			tkn_count = tkn_count + ret;
2144 			tkn_elem++;
2145 		}
2146 
2147 		tuple_size += tkn_count * sizeof(*tkn_elem);
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 /*
2154  * Every data block is preceded by a descriptor to read the number
2155  * of data blocks, they type of the block and it's size
2156  */
2157 static int skl_tplg_get_desc_blocks(struct device *dev,
2158 		struct snd_soc_tplg_vendor_array *array)
2159 {
2160 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2161 
2162 	tkn_elem = array->value;
2163 
2164 	switch (tkn_elem->token) {
2165 	case SKL_TKN_U8_NUM_BLOCKS:
2166 	case SKL_TKN_U8_BLOCK_TYPE:
2167 	case SKL_TKN_U16_BLOCK_SIZE:
2168 		return tkn_elem->value;
2169 
2170 	default:
2171 		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2172 		break;
2173 	}
2174 
2175 	return -EINVAL;
2176 }
2177 
2178 /*
2179  * Parse the private data for the token and corresponding value.
2180  * The private data can have multiple data blocks. So, a data block
2181  * is preceded by a descriptor for number of blocks and a descriptor
2182  * for the type and size of the suceeding data block.
2183  */
2184 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2185 				struct skl *skl, struct device *dev,
2186 				struct skl_module_cfg *mconfig)
2187 {
2188 	struct snd_soc_tplg_vendor_array *array;
2189 	int num_blocks, block_size = 0, block_type, off = 0;
2190 	char *data;
2191 	int ret;
2192 
2193 	/* Read the NUM_DATA_BLOCKS descriptor */
2194 	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2195 	ret = skl_tplg_get_desc_blocks(dev, array);
2196 	if (ret < 0)
2197 		return ret;
2198 	num_blocks = ret;
2199 
2200 	off += array->size;
2201 	array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2202 
2203 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2204 	while (num_blocks > 0) {
2205 		ret = skl_tplg_get_desc_blocks(dev, array);
2206 
2207 		if (ret < 0)
2208 			return ret;
2209 		block_type = ret;
2210 		off += array->size;
2211 
2212 		array = (struct snd_soc_tplg_vendor_array *)
2213 			(tplg_w->priv.data + off);
2214 
2215 		ret = skl_tplg_get_desc_blocks(dev, array);
2216 
2217 		if (ret < 0)
2218 			return ret;
2219 		block_size = ret;
2220 		off += array->size;
2221 
2222 		array = (struct snd_soc_tplg_vendor_array *)
2223 			(tplg_w->priv.data + off);
2224 
2225 		data = (tplg_w->priv.data + off);
2226 
2227 		if (block_type == SKL_TYPE_TUPLE) {
2228 			ret = skl_tplg_get_tokens(dev, data,
2229 					skl, mconfig, block_size);
2230 
2231 			if (ret < 0)
2232 				return ret;
2233 
2234 			--num_blocks;
2235 		} else {
2236 			if (mconfig->formats_config.caps_size > 0)
2237 				memcpy(mconfig->formats_config.caps, data,
2238 					mconfig->formats_config.caps_size);
2239 			--num_blocks;
2240 		}
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 static void skl_clear_pin_config(struct snd_soc_platform *platform,
2247 				struct snd_soc_dapm_widget *w)
2248 {
2249 	int i;
2250 	struct skl_module_cfg *mconfig;
2251 	struct skl_pipe *pipe;
2252 
2253 	if (!strncmp(w->dapm->component->name, platform->component.name,
2254 					strlen(platform->component.name))) {
2255 		mconfig = w->priv;
2256 		pipe = mconfig->pipe;
2257 		for (i = 0; i < mconfig->max_in_queue; i++) {
2258 			mconfig->m_in_pin[i].in_use = false;
2259 			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2260 		}
2261 		for (i = 0; i < mconfig->max_out_queue; i++) {
2262 			mconfig->m_out_pin[i].in_use = false;
2263 			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2264 		}
2265 		pipe->state = SKL_PIPE_INVALID;
2266 		mconfig->m_state = SKL_MODULE_UNINIT;
2267 	}
2268 }
2269 
2270 void skl_cleanup_resources(struct skl *skl)
2271 {
2272 	struct skl_sst *ctx = skl->skl_sst;
2273 	struct snd_soc_platform *soc_platform = skl->platform;
2274 	struct snd_soc_dapm_widget *w;
2275 	struct snd_soc_card *card;
2276 
2277 	if (soc_platform == NULL)
2278 		return;
2279 
2280 	card = soc_platform->component.card;
2281 	if (!card || !card->instantiated)
2282 		return;
2283 
2284 	skl->resource.mem = 0;
2285 	skl->resource.mcps = 0;
2286 
2287 	list_for_each_entry(w, &card->widgets, list) {
2288 		if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2289 			skl_clear_pin_config(soc_platform, w);
2290 	}
2291 
2292 	skl_clear_module_cnt(ctx->dsp);
2293 }
2294 
2295 /*
2296  * Topology core widget load callback
2297  *
2298  * This is used to save the private data for each widget which gives
2299  * information to the driver about module and pipeline parameters which DSP
2300  * FW expects like ids, resource values, formats etc
2301  */
2302 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
2303 				struct snd_soc_dapm_widget *w,
2304 				struct snd_soc_tplg_dapm_widget *tplg_w)
2305 {
2306 	int ret;
2307 	struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2308 	struct skl *skl = ebus_to_skl(ebus);
2309 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2310 	struct skl_module_cfg *mconfig;
2311 
2312 	if (!tplg_w->priv.size)
2313 		goto bind_event;
2314 
2315 	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2316 
2317 	if (!mconfig)
2318 		return -ENOMEM;
2319 
2320 	w->priv = mconfig;
2321 
2322 	/*
2323 	 * module binary can be loaded later, so set it to query when
2324 	 * module is load for a use case
2325 	 */
2326 	mconfig->id.module_id = -1;
2327 
2328 	/* Parse private data for tuples */
2329 	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2330 	if (ret < 0)
2331 		return ret;
2332 bind_event:
2333 	if (tplg_w->event_type == 0) {
2334 		dev_dbg(bus->dev, "ASoC: No event handler required\n");
2335 		return 0;
2336 	}
2337 
2338 	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
2339 					ARRAY_SIZE(skl_tplg_widget_ops),
2340 					tplg_w->event_type);
2341 
2342 	if (ret) {
2343 		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2344 					__func__, tplg_w->event_type);
2345 		return -EINVAL;
2346 	}
2347 
2348 	return 0;
2349 }
2350 
2351 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2352 					struct snd_soc_tplg_bytes_control *bc)
2353 {
2354 	struct skl_algo_data *ac;
2355 	struct skl_dfw_algo_data *dfw_ac =
2356 				(struct skl_dfw_algo_data *)bc->priv.data;
2357 
2358 	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2359 	if (!ac)
2360 		return -ENOMEM;
2361 
2362 	/* Fill private data */
2363 	ac->max = dfw_ac->max;
2364 	ac->param_id = dfw_ac->param_id;
2365 	ac->set_params = dfw_ac->set_params;
2366 	ac->size = dfw_ac->max;
2367 
2368 	if (ac->max) {
2369 		ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2370 		if (!ac->params)
2371 			return -ENOMEM;
2372 
2373 		memcpy(ac->params, dfw_ac->params, ac->max);
2374 	}
2375 
2376 	be->dobj.private  = ac;
2377 	return 0;
2378 }
2379 
2380 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2381 				struct snd_kcontrol_new *kctl,
2382 				struct snd_soc_tplg_ctl_hdr *hdr)
2383 {
2384 	struct soc_bytes_ext *sb;
2385 	struct snd_soc_tplg_bytes_control *tplg_bc;
2386 	struct hdac_ext_bus *ebus  = snd_soc_component_get_drvdata(cmpnt);
2387 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2388 
2389 	switch (hdr->ops.info) {
2390 	case SND_SOC_TPLG_CTL_BYTES:
2391 		tplg_bc = container_of(hdr,
2392 				struct snd_soc_tplg_bytes_control, hdr);
2393 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2394 			sb = (struct soc_bytes_ext *)kctl->private_value;
2395 			if (tplg_bc->priv.size)
2396 				return skl_init_algo_data(
2397 						bus->dev, sb, tplg_bc);
2398 		}
2399 		break;
2400 
2401 	default:
2402 		dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2403 			hdr->ops.get, hdr->ops.put, hdr->ops.info);
2404 		break;
2405 	}
2406 
2407 	return 0;
2408 }
2409 
2410 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2411 		struct snd_soc_tplg_vendor_string_elem *str_elem,
2412 		struct skl *skl)
2413 {
2414 	int tkn_count = 0;
2415 	static int ref_count;
2416 
2417 	switch (str_elem->token) {
2418 	case SKL_TKN_STR_LIB_NAME:
2419 		if (ref_count > skl->skl_sst->lib_count - 1) {
2420 			ref_count = 0;
2421 			return -EINVAL;
2422 		}
2423 
2424 		strncpy(skl->skl_sst->lib_info[ref_count].name,
2425 			str_elem->string,
2426 			ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
2427 		ref_count++;
2428 		tkn_count++;
2429 		break;
2430 
2431 	default:
2432 		dev_err(dev, "Not a string token %d\n", str_elem->token);
2433 		break;
2434 	}
2435 
2436 	return tkn_count;
2437 }
2438 
2439 static int skl_tplg_get_str_tkn(struct device *dev,
2440 		struct snd_soc_tplg_vendor_array *array,
2441 		struct skl *skl)
2442 {
2443 	int tkn_count = 0, ret;
2444 	struct snd_soc_tplg_vendor_string_elem *str_elem;
2445 
2446 	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2447 	while (tkn_count < array->num_elems) {
2448 		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
2449 		str_elem++;
2450 
2451 		if (ret < 0)
2452 			return ret;
2453 
2454 		tkn_count = tkn_count + ret;
2455 	}
2456 
2457 	return tkn_count;
2458 }
2459 
2460 static int skl_tplg_get_int_tkn(struct device *dev,
2461 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2462 		struct skl *skl)
2463 {
2464 	int tkn_count = 0;
2465 
2466 	switch (tkn_elem->token) {
2467 	case SKL_TKN_U32_LIB_COUNT:
2468 		skl->skl_sst->lib_count = tkn_elem->value;
2469 		tkn_count++;
2470 		break;
2471 
2472 	default:
2473 		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
2474 		return -EINVAL;
2475 	}
2476 
2477 	return tkn_count;
2478 }
2479 
2480 /*
2481  * Fill the manifest structure by parsing the tokens based on the
2482  * type.
2483  */
2484 static int skl_tplg_get_manifest_tkn(struct device *dev,
2485 		char *pvt_data, struct skl *skl,
2486 		int block_size)
2487 {
2488 	int tkn_count = 0, ret;
2489 	int off = 0, tuple_size = 0;
2490 	struct snd_soc_tplg_vendor_array *array;
2491 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2492 
2493 	if (block_size <= 0)
2494 		return -EINVAL;
2495 
2496 	while (tuple_size < block_size) {
2497 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2498 		off += array->size;
2499 		switch (array->type) {
2500 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2501 			ret = skl_tplg_get_str_tkn(dev, array, skl);
2502 
2503 			if (ret < 0)
2504 				return ret;
2505 			tkn_count = ret;
2506 
2507 			tuple_size += tkn_count *
2508 				sizeof(struct snd_soc_tplg_vendor_string_elem);
2509 			continue;
2510 
2511 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2512 			dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
2513 			continue;
2514 
2515 		default:
2516 			tkn_elem = array->value;
2517 			tkn_count = 0;
2518 			break;
2519 		}
2520 
2521 		while (tkn_count <= array->num_elems - 1) {
2522 			ret = skl_tplg_get_int_tkn(dev,
2523 					tkn_elem, skl);
2524 			if (ret < 0)
2525 				return ret;
2526 
2527 			tkn_count = tkn_count + ret;
2528 			tkn_elem++;
2529 			tuple_size += tkn_count *
2530 				sizeof(struct snd_soc_tplg_vendor_value_elem);
2531 			break;
2532 		}
2533 		tkn_count = 0;
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 /*
2540  * Parse manifest private data for tokens. The private data block is
2541  * preceded by descriptors for type and size of data block.
2542  */
2543 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2544 			struct device *dev, struct skl *skl)
2545 {
2546 	struct snd_soc_tplg_vendor_array *array;
2547 	int num_blocks, block_size = 0, block_type, off = 0;
2548 	char *data;
2549 	int ret;
2550 
2551 	/* Read the NUM_DATA_BLOCKS descriptor */
2552 	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2553 	ret = skl_tplg_get_desc_blocks(dev, array);
2554 	if (ret < 0)
2555 		return ret;
2556 	num_blocks = ret;
2557 
2558 	off += array->size;
2559 	array = (struct snd_soc_tplg_vendor_array *)
2560 			(manifest->priv.data + off);
2561 
2562 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2563 	while (num_blocks > 0) {
2564 		ret = skl_tplg_get_desc_blocks(dev, array);
2565 
2566 		if (ret < 0)
2567 			return ret;
2568 		block_type = ret;
2569 		off += array->size;
2570 
2571 		array = (struct snd_soc_tplg_vendor_array *)
2572 			(manifest->priv.data + off);
2573 
2574 		ret = skl_tplg_get_desc_blocks(dev, array);
2575 
2576 		if (ret < 0)
2577 			return ret;
2578 		block_size = ret;
2579 		off += array->size;
2580 
2581 		array = (struct snd_soc_tplg_vendor_array *)
2582 			(manifest->priv.data + off);
2583 
2584 		data = (manifest->priv.data + off);
2585 
2586 		if (block_type == SKL_TYPE_TUPLE) {
2587 			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
2588 					block_size);
2589 
2590 			if (ret < 0)
2591 				return ret;
2592 
2593 			--num_blocks;
2594 		} else {
2595 			return -EINVAL;
2596 		}
2597 	}
2598 
2599 	return 0;
2600 }
2601 
2602 static int skl_manifest_load(struct snd_soc_component *cmpnt,
2603 				struct snd_soc_tplg_manifest *manifest)
2604 {
2605 	struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2606 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2607 	struct skl *skl = ebus_to_skl(ebus);
2608 
2609 	/* proceed only if we have private data defined */
2610 	if (manifest->priv.size == 0)
2611 		return 0;
2612 
2613 	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
2614 
2615 	if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
2616 		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2617 					skl->skl_sst->lib_count);
2618 		return  -EINVAL;
2619 	}
2620 
2621 	return 0;
2622 }
2623 
2624 static struct snd_soc_tplg_ops skl_tplg_ops  = {
2625 	.widget_load = skl_tplg_widget_load,
2626 	.control_load = skl_tplg_control_load,
2627 	.bytes_ext_ops = skl_tlv_ops,
2628 	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
2629 	.manifest = skl_manifest_load,
2630 };
2631 
2632 /*
2633  * A pipe can have multiple modules, each of them will be a DAPM widget as
2634  * well. While managing a pipeline we need to get the list of all the
2635  * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2636  * helps to get the SKL type widgets in that pipeline
2637  */
2638 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2639 {
2640 	struct snd_soc_dapm_widget *w;
2641 	struct skl_module_cfg *mcfg = NULL;
2642 	struct skl_pipe_module *p_module = NULL;
2643 	struct skl_pipe *pipe;
2644 
2645 	list_for_each_entry(w, &platform->component.card->widgets, list) {
2646 		if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2647 			mcfg = w->priv;
2648 			pipe = mcfg->pipe;
2649 
2650 			p_module = devm_kzalloc(platform->dev,
2651 						sizeof(*p_module), GFP_KERNEL);
2652 			if (!p_module)
2653 				return -ENOMEM;
2654 
2655 			p_module->w = w;
2656 			list_add_tail(&p_module->node, &pipe->w_list);
2657 		}
2658 	}
2659 
2660 	return 0;
2661 }
2662 
2663 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2664 {
2665 	struct skl_pipe_module *w_module;
2666 	struct snd_soc_dapm_widget *w;
2667 	struct skl_module_cfg *mconfig;
2668 	bool host_found = false, link_found = false;
2669 
2670 	list_for_each_entry(w_module, &pipe->w_list, node) {
2671 		w = w_module->w;
2672 		mconfig = w->priv;
2673 
2674 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2675 			host_found = true;
2676 		else if (mconfig->dev_type != SKL_DEVICE_NONE)
2677 			link_found = true;
2678 	}
2679 
2680 	if (host_found && link_found)
2681 		pipe->passthru = true;
2682 	else
2683 		pipe->passthru = false;
2684 }
2685 
2686 /* This will be read from topology manifest, currently defined here */
2687 #define SKL_MAX_MCPS 30000000
2688 #define SKL_FW_MAX_MEM 1000000
2689 
2690 /*
2691  * SKL topology init routine
2692  */
2693 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2694 {
2695 	int ret;
2696 	const struct firmware *fw;
2697 	struct hdac_bus *bus = ebus_to_hbus(ebus);
2698 	struct skl *skl = ebus_to_skl(ebus);
2699 	struct skl_pipeline *ppl;
2700 
2701 	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
2702 	if (ret < 0) {
2703 		dev_err(bus->dev, "tplg fw %s load failed with %d\n",
2704 				skl->tplg_name, ret);
2705 		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2706 		if (ret < 0) {
2707 			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2708 					"dfw_sst.bin", ret);
2709 			return ret;
2710 		}
2711 	}
2712 
2713 	/*
2714 	 * The complete tplg for SKL is loaded as index 0, we don't use
2715 	 * any other index
2716 	 */
2717 	ret = snd_soc_tplg_component_load(&platform->component,
2718 					&skl_tplg_ops, fw, 0);
2719 	if (ret < 0) {
2720 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
2721 		release_firmware(fw);
2722 		return -EINVAL;
2723 	}
2724 
2725 	skl->resource.max_mcps = SKL_MAX_MCPS;
2726 	skl->resource.max_mem = SKL_FW_MAX_MEM;
2727 
2728 	skl->tplg = fw;
2729 	ret = skl_tplg_create_pipe_widget_list(platform);
2730 	if (ret < 0)
2731 		return ret;
2732 
2733 	list_for_each_entry(ppl, &skl->ppl_list, node)
2734 		skl_tplg_set_pipe_type(skl, ppl->pipe);
2735 
2736 	return 0;
2737 }
2738