1 /* 2 * skl-topology.c - Implements Platform component ALSA controls/widget 3 * handlers. 4 * 5 * Copyright (C) 2014-2015 Intel Corp 6 * Author: Jeeja KP <jeeja.kp@intel.com> 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/firmware.h> 22 #include <sound/soc.h> 23 #include <sound/soc-topology.h> 24 #include <uapi/sound/snd_sst_tokens.h> 25 #include "skl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl-topology.h" 28 #include "skl.h" 29 #include "skl-tplg-interface.h" 30 #include "../common/sst-dsp.h" 31 #include "../common/sst-dsp-priv.h" 32 33 #define SKL_CH_FIXUP_MASK (1 << 0) 34 #define SKL_RATE_FIXUP_MASK (1 << 1) 35 #define SKL_FMT_FIXUP_MASK (1 << 2) 36 #define SKL_IN_DIR_BIT_MASK BIT(0) 37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 38 39 static const int mic_mono_list[] = { 40 0, 1, 2, 3, 41 }; 42 static const int mic_stereo_list[][SKL_CH_STEREO] = { 43 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 44 }; 45 static const int mic_trio_list[][SKL_CH_TRIO] = { 46 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 47 }; 48 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 49 {0, 1, 2, 3}, 50 }; 51 52 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 53 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 54 55 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps) 56 { 57 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 58 59 switch (caps) { 60 case SKL_D0I3_NONE: 61 d0i3->non_d0i3++; 62 break; 63 64 case SKL_D0I3_STREAMING: 65 d0i3->streaming++; 66 break; 67 68 case SKL_D0I3_NON_STREAMING: 69 d0i3->non_streaming++; 70 break; 71 } 72 } 73 74 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps) 75 { 76 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 77 78 switch (caps) { 79 case SKL_D0I3_NONE: 80 d0i3->non_d0i3--; 81 break; 82 83 case SKL_D0I3_STREAMING: 84 d0i3->streaming--; 85 break; 86 87 case SKL_D0I3_NON_STREAMING: 88 d0i3->non_streaming--; 89 break; 90 } 91 } 92 93 /* 94 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 95 * ignore. This helpers checks if the SKL driver handles this widget type 96 */ 97 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w) 98 { 99 switch (w->id) { 100 case snd_soc_dapm_dai_link: 101 case snd_soc_dapm_dai_in: 102 case snd_soc_dapm_aif_in: 103 case snd_soc_dapm_aif_out: 104 case snd_soc_dapm_dai_out: 105 case snd_soc_dapm_switch: 106 return false; 107 default: 108 return true; 109 } 110 } 111 112 /* 113 * Each pipelines needs memory to be allocated. Check if we have free memory 114 * from available pool. 115 */ 116 static bool skl_is_pipe_mem_avail(struct skl *skl, 117 struct skl_module_cfg *mconfig) 118 { 119 struct skl_sst *ctx = skl->skl_sst; 120 121 if (skl->resource.mem + mconfig->pipe->memory_pages > 122 skl->resource.max_mem) { 123 dev_err(ctx->dev, 124 "%s: module_id %d instance %d\n", __func__, 125 mconfig->id.module_id, 126 mconfig->id.instance_id); 127 dev_err(ctx->dev, 128 "exceeds ppl memory available %d mem %d\n", 129 skl->resource.max_mem, skl->resource.mem); 130 return false; 131 } else { 132 return true; 133 } 134 } 135 136 /* 137 * Add the mem to the mem pool. This is freed when pipe is deleted. 138 * Note: DSP does actual memory management we only keep track for complete 139 * pool 140 */ 141 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 142 struct skl_module_cfg *mconfig) 143 { 144 skl->resource.mem += mconfig->pipe->memory_pages; 145 } 146 147 /* 148 * Pipeline needs needs DSP CPU resources for computation, this is 149 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 150 * 151 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 152 * pipe. 153 */ 154 155 static bool skl_is_pipe_mcps_avail(struct skl *skl, 156 struct skl_module_cfg *mconfig) 157 { 158 struct skl_sst *ctx = skl->skl_sst; 159 u8 res_idx = mconfig->res_idx; 160 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 161 162 if (skl->resource.mcps + res->cps > skl->resource.max_mcps) { 163 dev_err(ctx->dev, 164 "%s: module_id %d instance %d\n", __func__, 165 mconfig->id.module_id, mconfig->id.instance_id); 166 dev_err(ctx->dev, 167 "exceeds ppl mcps available %d > mem %d\n", 168 skl->resource.max_mcps, skl->resource.mcps); 169 return false; 170 } else { 171 return true; 172 } 173 } 174 175 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 176 struct skl_module_cfg *mconfig) 177 { 178 u8 res_idx = mconfig->res_idx; 179 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 180 181 skl->resource.mcps += res->cps; 182 } 183 184 /* 185 * Free the mcps when tearing down 186 */ 187 static void 188 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 189 { 190 u8 res_idx = mconfig->res_idx; 191 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 192 193 res = &mconfig->module->resources[res_idx]; 194 skl->resource.mcps -= res->cps; 195 } 196 197 /* 198 * Free the memory when tearing down 199 */ 200 static void 201 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 202 { 203 skl->resource.mem -= mconfig->pipe->memory_pages; 204 } 205 206 207 static void skl_dump_mconfig(struct skl_sst *ctx, 208 struct skl_module_cfg *mcfg) 209 { 210 struct skl_module_iface *iface = &mcfg->module->formats[0]; 211 212 dev_dbg(ctx->dev, "Dumping config\n"); 213 dev_dbg(ctx->dev, "Input Format:\n"); 214 dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 215 dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 216 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 217 dev_dbg(ctx->dev, "valid bit depth = %d\n", 218 iface->inputs[0].fmt.valid_bit_depth); 219 dev_dbg(ctx->dev, "Output Format:\n"); 220 dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 221 dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 222 dev_dbg(ctx->dev, "valid bit depth = %d\n", 223 iface->outputs[0].fmt.valid_bit_depth); 224 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 225 } 226 227 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 228 { 229 int slot_map = 0xFFFFFFFF; 230 int start_slot = 0; 231 int i; 232 233 for (i = 0; i < chs; i++) { 234 /* 235 * For 2 channels with starting slot as 0, slot map will 236 * look like 0xFFFFFF10. 237 */ 238 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 239 start_slot++; 240 } 241 fmt->ch_map = slot_map; 242 } 243 244 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 245 struct skl_pipe_params *params, int fixup) 246 { 247 if (fixup & SKL_RATE_FIXUP_MASK) 248 fmt->s_freq = params->s_freq; 249 if (fixup & SKL_CH_FIXUP_MASK) { 250 fmt->channels = params->ch; 251 skl_tplg_update_chmap(fmt, fmt->channels); 252 } 253 if (fixup & SKL_FMT_FIXUP_MASK) { 254 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 255 256 /* 257 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 258 * container so update bit depth accordingly 259 */ 260 switch (fmt->valid_bit_depth) { 261 case SKL_DEPTH_16BIT: 262 fmt->bit_depth = fmt->valid_bit_depth; 263 break; 264 265 default: 266 fmt->bit_depth = SKL_DEPTH_32BIT; 267 break; 268 } 269 } 270 271 } 272 273 /* 274 * A pipeline may have modules which impact the pcm parameters, like SRC, 275 * channel converter, format converter. 276 * We need to calculate the output params by applying the 'fixup' 277 * Topology will tell driver which type of fixup is to be applied by 278 * supplying the fixup mask, so based on that we calculate the output 279 * 280 * Now In FE the pcm hw_params is source/target format. Same is applicable 281 * for BE with its hw_params invoked. 282 * here based on FE, BE pipeline and direction we calculate the input and 283 * outfix and then apply that for a module 284 */ 285 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 286 struct skl_pipe_params *params, bool is_fe) 287 { 288 int in_fixup, out_fixup; 289 struct skl_module_fmt *in_fmt, *out_fmt; 290 291 /* Fixups will be applied to pin 0 only */ 292 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 293 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 294 295 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 296 if (is_fe) { 297 in_fixup = m_cfg->params_fixup; 298 out_fixup = (~m_cfg->converter) & 299 m_cfg->params_fixup; 300 } else { 301 out_fixup = m_cfg->params_fixup; 302 in_fixup = (~m_cfg->converter) & 303 m_cfg->params_fixup; 304 } 305 } else { 306 if (is_fe) { 307 out_fixup = m_cfg->params_fixup; 308 in_fixup = (~m_cfg->converter) & 309 m_cfg->params_fixup; 310 } else { 311 in_fixup = m_cfg->params_fixup; 312 out_fixup = (~m_cfg->converter) & 313 m_cfg->params_fixup; 314 } 315 } 316 317 skl_tplg_update_params(in_fmt, params, in_fixup); 318 skl_tplg_update_params(out_fmt, params, out_fixup); 319 } 320 321 /* 322 * A module needs input and output buffers, which are dependent upon pcm 323 * params, so once we have calculate params, we need buffer calculation as 324 * well. 325 */ 326 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 327 struct skl_module_cfg *mcfg) 328 { 329 int multiplier = 1; 330 struct skl_module_fmt *in_fmt, *out_fmt; 331 struct skl_module_res *res; 332 333 /* Since fixups is applied to pin 0 only, ibs, obs needs 334 * change for pin 0 only 335 */ 336 res = &mcfg->module->resources[0]; 337 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 338 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 339 340 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 341 multiplier = 5; 342 343 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 344 in_fmt->channels * (in_fmt->bit_depth >> 3) * 345 multiplier; 346 347 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 348 out_fmt->channels * (out_fmt->bit_depth >> 3) * 349 multiplier; 350 } 351 352 static u8 skl_tplg_be_dev_type(int dev_type) 353 { 354 int ret; 355 356 switch (dev_type) { 357 case SKL_DEVICE_BT: 358 ret = NHLT_DEVICE_BT; 359 break; 360 361 case SKL_DEVICE_DMIC: 362 ret = NHLT_DEVICE_DMIC; 363 break; 364 365 case SKL_DEVICE_I2S: 366 ret = NHLT_DEVICE_I2S; 367 break; 368 369 default: 370 ret = NHLT_DEVICE_INVALID; 371 break; 372 } 373 374 return ret; 375 } 376 377 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 378 struct skl_sst *ctx) 379 { 380 struct skl_module_cfg *m_cfg = w->priv; 381 int link_type, dir; 382 u32 ch, s_freq, s_fmt; 383 struct nhlt_specific_cfg *cfg; 384 struct skl *skl = get_skl_ctx(ctx->dev); 385 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 386 int fmt_idx = m_cfg->fmt_idx; 387 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 388 389 /* check if we already have blob */ 390 if (m_cfg->formats_config.caps_size > 0) 391 return 0; 392 393 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 394 switch (m_cfg->dev_type) { 395 case SKL_DEVICE_DMIC: 396 link_type = NHLT_LINK_DMIC; 397 dir = SNDRV_PCM_STREAM_CAPTURE; 398 s_freq = m_iface->inputs[0].fmt.s_freq; 399 s_fmt = m_iface->inputs[0].fmt.bit_depth; 400 ch = m_iface->inputs[0].fmt.channels; 401 break; 402 403 case SKL_DEVICE_I2S: 404 link_type = NHLT_LINK_SSP; 405 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 406 dir = SNDRV_PCM_STREAM_PLAYBACK; 407 s_freq = m_iface->outputs[0].fmt.s_freq; 408 s_fmt = m_iface->outputs[0].fmt.bit_depth; 409 ch = m_iface->outputs[0].fmt.channels; 410 } else { 411 dir = SNDRV_PCM_STREAM_CAPTURE; 412 s_freq = m_iface->inputs[0].fmt.s_freq; 413 s_fmt = m_iface->inputs[0].fmt.bit_depth; 414 ch = m_iface->inputs[0].fmt.channels; 415 } 416 break; 417 418 default: 419 return -EINVAL; 420 } 421 422 /* update the blob based on virtual bus_id and default params */ 423 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 424 s_fmt, ch, s_freq, dir, dev_type); 425 if (cfg) { 426 m_cfg->formats_config.caps_size = cfg->size; 427 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 428 } else { 429 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 430 m_cfg->vbus_id, link_type, dir); 431 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 432 ch, s_freq, s_fmt); 433 return -EIO; 434 } 435 436 return 0; 437 } 438 439 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 440 struct skl_sst *ctx) 441 { 442 struct skl_module_cfg *m_cfg = w->priv; 443 struct skl_pipe_params *params = m_cfg->pipe->p_params; 444 int p_conn_type = m_cfg->pipe->conn_type; 445 bool is_fe; 446 447 if (!m_cfg->params_fixup) 448 return; 449 450 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 451 w->name); 452 453 skl_dump_mconfig(ctx, m_cfg); 454 455 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 456 is_fe = true; 457 else 458 is_fe = false; 459 460 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 461 skl_tplg_update_buffer_size(ctx, m_cfg); 462 463 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 464 w->name); 465 466 skl_dump_mconfig(ctx, m_cfg); 467 } 468 469 /* 470 * some modules can have multiple params set from user control and 471 * need to be set after module is initialized. If set_param flag is 472 * set module params will be done after module is initialised. 473 */ 474 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 475 struct skl_sst *ctx) 476 { 477 int i, ret; 478 struct skl_module_cfg *mconfig = w->priv; 479 const struct snd_kcontrol_new *k; 480 struct soc_bytes_ext *sb; 481 struct skl_algo_data *bc; 482 struct skl_specific_cfg *sp_cfg; 483 484 if (mconfig->formats_config.caps_size > 0 && 485 mconfig->formats_config.set_params == SKL_PARAM_SET) { 486 sp_cfg = &mconfig->formats_config; 487 ret = skl_set_module_params(ctx, sp_cfg->caps, 488 sp_cfg->caps_size, 489 sp_cfg->param_id, mconfig); 490 if (ret < 0) 491 return ret; 492 } 493 494 for (i = 0; i < w->num_kcontrols; i++) { 495 k = &w->kcontrol_news[i]; 496 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 497 sb = (void *) k->private_value; 498 bc = (struct skl_algo_data *)sb->dobj.private; 499 500 if (bc->set_params == SKL_PARAM_SET) { 501 ret = skl_set_module_params(ctx, 502 (u32 *)bc->params, bc->size, 503 bc->param_id, mconfig); 504 if (ret < 0) 505 return ret; 506 } 507 } 508 } 509 510 return 0; 511 } 512 513 /* 514 * some module param can set from user control and this is required as 515 * when module is initailzed. if module param is required in init it is 516 * identifed by set_param flag. if set_param flag is not set, then this 517 * parameter needs to set as part of module init. 518 */ 519 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 520 { 521 const struct snd_kcontrol_new *k; 522 struct soc_bytes_ext *sb; 523 struct skl_algo_data *bc; 524 struct skl_module_cfg *mconfig = w->priv; 525 int i; 526 527 for (i = 0; i < w->num_kcontrols; i++) { 528 k = &w->kcontrol_news[i]; 529 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 530 sb = (struct soc_bytes_ext *)k->private_value; 531 bc = (struct skl_algo_data *)sb->dobj.private; 532 533 if (bc->set_params != SKL_PARAM_INIT) 534 continue; 535 536 mconfig->formats_config.caps = (u32 *)bc->params; 537 mconfig->formats_config.caps_size = bc->size; 538 539 break; 540 } 541 } 542 543 return 0; 544 } 545 546 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe, 547 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 548 { 549 switch (mcfg->dev_type) { 550 case SKL_DEVICE_HDAHOST: 551 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params); 552 553 case SKL_DEVICE_HDALINK: 554 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params); 555 } 556 557 return 0; 558 } 559 560 /* 561 * Inside a pipe instance, we can have various modules. These modules need 562 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 563 * skl_init_module() routine, so invoke that for all modules in a pipeline 564 */ 565 static int 566 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 567 { 568 struct skl_pipe_module *w_module; 569 struct snd_soc_dapm_widget *w; 570 struct skl_module_cfg *mconfig; 571 struct skl_sst *ctx = skl->skl_sst; 572 u8 cfg_idx; 573 int ret = 0; 574 575 list_for_each_entry(w_module, &pipe->w_list, node) { 576 uuid_le *uuid_mod; 577 w = w_module->w; 578 mconfig = w->priv; 579 580 /* check if module ids are populated */ 581 if (mconfig->id.module_id < 0) { 582 dev_err(skl->skl_sst->dev, 583 "module %pUL id not populated\n", 584 (uuid_le *)mconfig->guid); 585 return -EIO; 586 } 587 588 cfg_idx = mconfig->pipe->cur_config_idx; 589 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 590 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 591 592 /* check resource available */ 593 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 594 return -ENOMEM; 595 596 if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) { 597 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 598 mconfig->id.module_id, mconfig->guid); 599 if (ret < 0) 600 return ret; 601 602 mconfig->m_state = SKL_MODULE_LOADED; 603 } 604 605 /* prepare the DMA if the module is gateway cpr */ 606 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig); 607 if (ret < 0) 608 return ret; 609 610 /* update blob if blob is null for be with default value */ 611 skl_tplg_update_be_blob(w, ctx); 612 613 /* 614 * apply fix/conversion to module params based on 615 * FE/BE params 616 */ 617 skl_tplg_update_module_params(w, ctx); 618 uuid_mod = (uuid_le *)mconfig->guid; 619 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod, 620 mconfig->id.instance_id); 621 if (mconfig->id.pvt_id < 0) 622 return ret; 623 skl_tplg_set_module_init_data(w); 624 625 ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id); 626 if (ret < 0) { 627 dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n", 628 mconfig->core_id, ret); 629 return ret; 630 } 631 632 ret = skl_init_module(ctx, mconfig); 633 if (ret < 0) { 634 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 635 goto err; 636 } 637 skl_tplg_alloc_pipe_mcps(skl, mconfig); 638 ret = skl_tplg_set_module_params(w, ctx); 639 if (ret < 0) 640 goto err; 641 } 642 643 return 0; 644 err: 645 skl_dsp_put_core(ctx->dsp, mconfig->core_id); 646 return ret; 647 } 648 649 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 650 struct skl_pipe *pipe) 651 { 652 int ret = 0; 653 struct skl_pipe_module *w_module = NULL; 654 struct skl_module_cfg *mconfig = NULL; 655 656 list_for_each_entry(w_module, &pipe->w_list, node) { 657 uuid_le *uuid_mod; 658 mconfig = w_module->w->priv; 659 uuid_mod = (uuid_le *)mconfig->guid; 660 661 if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod && 662 mconfig->m_state > SKL_MODULE_UNINIT) { 663 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 664 mconfig->id.module_id); 665 if (ret < 0) 666 return -EIO; 667 } 668 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 669 670 ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id); 671 if (ret < 0) { 672 /* don't return; continue with other modules */ 673 dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n", 674 mconfig->core_id, ret); 675 } 676 } 677 678 /* no modules to unload in this path, so return */ 679 return ret; 680 } 681 682 /* 683 * Here, we select pipe format based on the pipe type and pipe 684 * direction to determine the current config index for the pipeline. 685 * The config index is then used to select proper module resources. 686 * Intermediate pipes currently have a fixed format hence we select the 687 * 0th configuratation by default for such pipes. 688 */ 689 static int 690 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig) 691 { 692 struct skl_sst *ctx = skl->skl_sst; 693 struct skl_pipe *pipe = mconfig->pipe; 694 struct skl_pipe_params *params = pipe->p_params; 695 struct skl_path_config *pconfig = &pipe->configs[0]; 696 struct skl_pipe_fmt *fmt = NULL; 697 bool in_fmt = false; 698 int i; 699 700 if (pipe->nr_cfgs == 0) { 701 pipe->cur_config_idx = 0; 702 return 0; 703 } 704 705 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 706 dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n"); 707 pipe->cur_config_idx = 0; 708 pipe->memory_pages = pconfig->mem_pages; 709 710 return 0; 711 } 712 713 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 714 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 715 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 716 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 717 in_fmt = true; 718 719 for (i = 0; i < pipe->nr_cfgs; i++) { 720 pconfig = &pipe->configs[i]; 721 if (in_fmt) 722 fmt = &pconfig->in_fmt; 723 else 724 fmt = &pconfig->out_fmt; 725 726 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 727 fmt->channels, fmt->freq, fmt->bps)) { 728 pipe->cur_config_idx = i; 729 pipe->memory_pages = pconfig->mem_pages; 730 dev_dbg(ctx->dev, "Using pipe config: %d\n", i); 731 732 return 0; 733 } 734 } 735 736 dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 737 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 738 return -EINVAL; 739 } 740 741 /* 742 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 743 * need create the pipeline. So we do following: 744 * - check the resources 745 * - Create the pipeline 746 * - Initialize the modules in pipeline 747 * - finally bind all modules together 748 */ 749 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 750 struct skl *skl) 751 { 752 int ret; 753 struct skl_module_cfg *mconfig = w->priv; 754 struct skl_pipe_module *w_module; 755 struct skl_pipe *s_pipe = mconfig->pipe; 756 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 757 struct skl_sst *ctx = skl->skl_sst; 758 struct skl_module_deferred_bind *modules; 759 760 ret = skl_tplg_get_pipe_config(skl, mconfig); 761 if (ret < 0) 762 return ret; 763 764 /* check resource available */ 765 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 766 return -EBUSY; 767 768 if (!skl_is_pipe_mem_avail(skl, mconfig)) 769 return -ENOMEM; 770 771 /* 772 * Create a list of modules for pipe. 773 * This list contains modules from source to sink 774 */ 775 ret = skl_create_pipeline(ctx, mconfig->pipe); 776 if (ret < 0) 777 return ret; 778 779 skl_tplg_alloc_pipe_mem(skl, mconfig); 780 skl_tplg_alloc_pipe_mcps(skl, mconfig); 781 782 /* Init all pipe modules from source to sink */ 783 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 784 if (ret < 0) 785 return ret; 786 787 /* Bind modules from source to sink */ 788 list_for_each_entry(w_module, &s_pipe->w_list, node) { 789 dst_module = w_module->w->priv; 790 791 if (src_module == NULL) { 792 src_module = dst_module; 793 continue; 794 } 795 796 ret = skl_bind_modules(ctx, src_module, dst_module); 797 if (ret < 0) 798 return ret; 799 800 src_module = dst_module; 801 } 802 803 /* 804 * When the destination module is initialized, check for these modules 805 * in deferred bind list. If found, bind them. 806 */ 807 list_for_each_entry(w_module, &s_pipe->w_list, node) { 808 if (list_empty(&skl->bind_list)) 809 break; 810 811 list_for_each_entry(modules, &skl->bind_list, node) { 812 module = w_module->w->priv; 813 if (modules->dst == module) 814 skl_bind_modules(ctx, modules->src, 815 modules->dst); 816 } 817 } 818 819 return 0; 820 } 821 822 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params, 823 int size, struct skl_module_cfg *mcfg) 824 { 825 int i, pvt_id; 826 827 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 828 struct skl_kpb_params *kpb_params = 829 (struct skl_kpb_params *)params; 830 struct skl_mod_inst_map *inst = kpb_params->map; 831 832 for (i = 0; i < kpb_params->num_modules; i++) { 833 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id, 834 inst->inst_id); 835 if (pvt_id < 0) 836 return -EINVAL; 837 838 inst->inst_id = pvt_id; 839 inst++; 840 } 841 } 842 843 return 0; 844 } 845 /* 846 * Some modules require params to be set after the module is bound to 847 * all pins connected. 848 * 849 * The module provider initializes set_param flag for such modules and we 850 * send params after binding 851 */ 852 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 853 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 854 { 855 int i, ret; 856 struct skl_module_cfg *mconfig = w->priv; 857 const struct snd_kcontrol_new *k; 858 struct soc_bytes_ext *sb; 859 struct skl_algo_data *bc; 860 struct skl_specific_cfg *sp_cfg; 861 u32 *params; 862 863 /* 864 * check all out/in pins are in bind state. 865 * if so set the module param 866 */ 867 for (i = 0; i < mcfg->module->max_output_pins; i++) { 868 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 869 return 0; 870 } 871 872 for (i = 0; i < mcfg->module->max_input_pins; i++) { 873 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 874 return 0; 875 } 876 877 if (mconfig->formats_config.caps_size > 0 && 878 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 879 sp_cfg = &mconfig->formats_config; 880 ret = skl_set_module_params(ctx, sp_cfg->caps, 881 sp_cfg->caps_size, 882 sp_cfg->param_id, mconfig); 883 if (ret < 0) 884 return ret; 885 } 886 887 for (i = 0; i < w->num_kcontrols; i++) { 888 k = &w->kcontrol_news[i]; 889 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 890 sb = (void *) k->private_value; 891 bc = (struct skl_algo_data *)sb->dobj.private; 892 893 if (bc->set_params == SKL_PARAM_BIND) { 894 params = kzalloc(bc->max, GFP_KERNEL); 895 if (!params) 896 return -ENOMEM; 897 898 memcpy(params, bc->params, bc->max); 899 skl_fill_sink_instance_id(ctx, params, bc->max, 900 mconfig); 901 902 ret = skl_set_module_params(ctx, params, 903 bc->max, bc->param_id, mconfig); 904 kfree(params); 905 906 if (ret < 0) 907 return ret; 908 } 909 } 910 } 911 912 return 0; 913 } 914 915 916 static int skl_tplg_module_add_deferred_bind(struct skl *skl, 917 struct skl_module_cfg *src, struct skl_module_cfg *dst) 918 { 919 struct skl_module_deferred_bind *m_list, *modules; 920 int i; 921 922 /* only supported for module with static pin connection */ 923 for (i = 0; i < dst->module->max_input_pins; i++) { 924 struct skl_module_pin *pin = &dst->m_in_pin[i]; 925 926 if (pin->is_dynamic) 927 continue; 928 929 if ((pin->id.module_id == src->id.module_id) && 930 (pin->id.instance_id == src->id.instance_id)) { 931 932 if (!list_empty(&skl->bind_list)) { 933 list_for_each_entry(modules, &skl->bind_list, node) { 934 if (modules->src == src && modules->dst == dst) 935 return 0; 936 } 937 } 938 939 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 940 if (!m_list) 941 return -ENOMEM; 942 943 m_list->src = src; 944 m_list->dst = dst; 945 946 list_add(&m_list->node, &skl->bind_list); 947 } 948 } 949 950 return 0; 951 } 952 953 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 954 struct skl *skl, 955 struct snd_soc_dapm_widget *src_w, 956 struct skl_module_cfg *src_mconfig) 957 { 958 struct snd_soc_dapm_path *p; 959 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 960 struct skl_module_cfg *sink_mconfig; 961 struct skl_sst *ctx = skl->skl_sst; 962 int ret; 963 964 snd_soc_dapm_widget_for_each_sink_path(w, p) { 965 if (!p->connect) 966 continue; 967 968 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 969 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 970 971 next_sink = p->sink; 972 973 if (!is_skl_dsp_widget_type(p->sink)) 974 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 975 976 /* 977 * here we will check widgets in sink pipelines, so that 978 * can be any widgets type and we are only interested if 979 * they are ones used for SKL so check that first 980 */ 981 if ((p->sink->priv != NULL) && 982 is_skl_dsp_widget_type(p->sink)) { 983 984 sink = p->sink; 985 sink_mconfig = sink->priv; 986 987 /* 988 * Modules other than PGA leaf can be connected 989 * directly or via switch to a module in another 990 * pipeline. EX: reference path 991 * when the path is enabled, the dst module that needs 992 * to be bound may not be initialized. if the module is 993 * not initialized, add these modules in the deferred 994 * bind list and when the dst module is initialised, 995 * bind this module to the dst_module in deferred list. 996 */ 997 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 998 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 999 1000 ret = skl_tplg_module_add_deferred_bind(skl, 1001 src_mconfig, sink_mconfig); 1002 1003 if (ret < 0) 1004 return ret; 1005 1006 } 1007 1008 1009 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 1010 sink_mconfig->m_state == SKL_MODULE_UNINIT) 1011 continue; 1012 1013 /* Bind source to sink, mixin is always source */ 1014 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1015 if (ret) 1016 return ret; 1017 1018 /* set module params after bind */ 1019 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 1020 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1021 1022 /* Start sinks pipe first */ 1023 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 1024 if (sink_mconfig->pipe->conn_type != 1025 SKL_PIPE_CONN_TYPE_FE) 1026 ret = skl_run_pipe(ctx, 1027 sink_mconfig->pipe); 1028 if (ret) 1029 return ret; 1030 } 1031 } 1032 } 1033 1034 if (!sink && next_sink) 1035 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1036 1037 return 0; 1038 } 1039 1040 /* 1041 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1042 * we need to do following: 1043 * - Bind to sink pipeline 1044 * Since the sink pipes can be running and we don't get mixer event on 1045 * connect for already running mixer, we need to find the sink pipes 1046 * here and bind to them. This way dynamic connect works. 1047 * - Start sink pipeline, if not running 1048 * - Then run current pipe 1049 */ 1050 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1051 struct skl *skl) 1052 { 1053 struct skl_module_cfg *src_mconfig; 1054 struct skl_sst *ctx = skl->skl_sst; 1055 int ret = 0; 1056 1057 src_mconfig = w->priv; 1058 1059 /* 1060 * find which sink it is connected to, bind with the sink, 1061 * if sink is not started, start sink pipe first, then start 1062 * this pipe 1063 */ 1064 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1065 if (ret) 1066 return ret; 1067 1068 /* Start source pipe last after starting all sinks */ 1069 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1070 return skl_run_pipe(ctx, src_mconfig->pipe); 1071 1072 return 0; 1073 } 1074 1075 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1076 struct snd_soc_dapm_widget *w, struct skl *skl) 1077 { 1078 struct snd_soc_dapm_path *p; 1079 struct snd_soc_dapm_widget *src_w = NULL; 1080 struct skl_sst *ctx = skl->skl_sst; 1081 1082 snd_soc_dapm_widget_for_each_source_path(w, p) { 1083 src_w = p->source; 1084 if (!p->connect) 1085 continue; 1086 1087 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 1088 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 1089 1090 /* 1091 * here we will check widgets in sink pipelines, so that can 1092 * be any widgets type and we are only interested if they are 1093 * ones used for SKL so check that first 1094 */ 1095 if ((p->source->priv != NULL) && 1096 is_skl_dsp_widget_type(p->source)) { 1097 return p->source; 1098 } 1099 } 1100 1101 if (src_w != NULL) 1102 return skl_get_src_dsp_widget(src_w, skl); 1103 1104 return NULL; 1105 } 1106 1107 /* 1108 * in the Post-PMU event of mixer we need to do following: 1109 * - Check if this pipe is running 1110 * - if not, then 1111 * - bind this pipeline to its source pipeline 1112 * if source pipe is already running, this means it is a dynamic 1113 * connection and we need to bind only to that pipe 1114 * - start this pipeline 1115 */ 1116 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1117 struct skl *skl) 1118 { 1119 int ret = 0; 1120 struct snd_soc_dapm_widget *source, *sink; 1121 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1122 struct skl_sst *ctx = skl->skl_sst; 1123 int src_pipe_started = 0; 1124 1125 sink = w; 1126 sink_mconfig = sink->priv; 1127 1128 /* 1129 * If source pipe is already started, that means source is driving 1130 * one more sink before this sink got connected, Since source is 1131 * started, bind this sink to source and start this pipe. 1132 */ 1133 source = skl_get_src_dsp_widget(w, skl); 1134 if (source != NULL) { 1135 src_mconfig = source->priv; 1136 sink_mconfig = sink->priv; 1137 src_pipe_started = 1; 1138 1139 /* 1140 * check pipe state, then no need to bind or start the 1141 * pipe 1142 */ 1143 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1144 src_pipe_started = 0; 1145 } 1146 1147 if (src_pipe_started) { 1148 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1149 if (ret) 1150 return ret; 1151 1152 /* set module params after bind */ 1153 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 1154 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1155 1156 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1157 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 1158 } 1159 1160 return ret; 1161 } 1162 1163 /* 1164 * in the Pre-PMD event of mixer we need to do following: 1165 * - Stop the pipe 1166 * - find the source connections and remove that from dapm_path_list 1167 * - unbind with source pipelines if still connected 1168 */ 1169 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1170 struct skl *skl) 1171 { 1172 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1173 int ret = 0, i; 1174 struct skl_sst *ctx = skl->skl_sst; 1175 1176 sink_mconfig = w->priv; 1177 1178 /* Stop the pipe */ 1179 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 1180 if (ret) 1181 return ret; 1182 1183 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1184 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1185 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1186 if (!src_mconfig) 1187 continue; 1188 1189 ret = skl_unbind_modules(ctx, 1190 src_mconfig, sink_mconfig); 1191 } 1192 } 1193 1194 return ret; 1195 } 1196 1197 /* 1198 * in the Post-PMD event of mixer we need to do following: 1199 * - Free the mcps used 1200 * - Free the mem used 1201 * - Unbind the modules within the pipeline 1202 * - Delete the pipeline (modules are not required to be explicitly 1203 * deleted, pipeline delete is enough here 1204 */ 1205 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1206 struct skl *skl) 1207 { 1208 struct skl_module_cfg *mconfig = w->priv; 1209 struct skl_pipe_module *w_module; 1210 struct skl_module_cfg *src_module = NULL, *dst_module; 1211 struct skl_sst *ctx = skl->skl_sst; 1212 struct skl_pipe *s_pipe = mconfig->pipe; 1213 struct skl_module_deferred_bind *modules, *tmp; 1214 1215 if (s_pipe->state == SKL_PIPE_INVALID) 1216 return -EINVAL; 1217 1218 skl_tplg_free_pipe_mcps(skl, mconfig); 1219 skl_tplg_free_pipe_mem(skl, mconfig); 1220 1221 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1222 if (list_empty(&skl->bind_list)) 1223 break; 1224 1225 src_module = w_module->w->priv; 1226 1227 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1228 /* 1229 * When the destination module is deleted, Unbind the 1230 * modules from deferred bind list. 1231 */ 1232 if (modules->dst == src_module) { 1233 skl_unbind_modules(ctx, modules->src, 1234 modules->dst); 1235 } 1236 1237 /* 1238 * When the source module is deleted, remove this entry 1239 * from the deferred bind list. 1240 */ 1241 if (modules->src == src_module) { 1242 list_del(&modules->node); 1243 modules->src = NULL; 1244 modules->dst = NULL; 1245 kfree(modules); 1246 } 1247 } 1248 } 1249 1250 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1251 dst_module = w_module->w->priv; 1252 1253 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1254 skl_tplg_free_pipe_mcps(skl, dst_module); 1255 if (src_module == NULL) { 1256 src_module = dst_module; 1257 continue; 1258 } 1259 1260 skl_unbind_modules(ctx, src_module, dst_module); 1261 src_module = dst_module; 1262 } 1263 1264 skl_delete_pipe(ctx, mconfig->pipe); 1265 1266 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1267 src_module = w_module->w->priv; 1268 src_module->m_state = SKL_MODULE_UNINIT; 1269 } 1270 1271 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1272 } 1273 1274 /* 1275 * in the Post-PMD event of PGA we need to do following: 1276 * - Free the mcps used 1277 * - Stop the pipeline 1278 * - In source pipe is connected, unbind with source pipelines 1279 */ 1280 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1281 struct skl *skl) 1282 { 1283 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1284 int ret = 0, i; 1285 struct skl_sst *ctx = skl->skl_sst; 1286 1287 src_mconfig = w->priv; 1288 1289 /* Stop the pipe since this is a mixin module */ 1290 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 1291 if (ret) 1292 return ret; 1293 1294 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1295 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1296 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1297 if (!sink_mconfig) 1298 continue; 1299 /* 1300 * This is a connecter and if path is found that means 1301 * unbind between source and sink has not happened yet 1302 */ 1303 ret = skl_unbind_modules(ctx, src_mconfig, 1304 sink_mconfig); 1305 } 1306 } 1307 1308 return ret; 1309 } 1310 1311 /* 1312 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1313 * second one is required that is created as another pipe entity. 1314 * The mixer is responsible for pipe management and represent a pipeline 1315 * instance 1316 */ 1317 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1318 struct snd_kcontrol *k, int event) 1319 { 1320 struct snd_soc_dapm_context *dapm = w->dapm; 1321 struct skl *skl = get_skl_ctx(dapm->dev); 1322 1323 switch (event) { 1324 case SND_SOC_DAPM_PRE_PMU: 1325 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1326 1327 case SND_SOC_DAPM_POST_PMU: 1328 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1329 1330 case SND_SOC_DAPM_PRE_PMD: 1331 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1332 1333 case SND_SOC_DAPM_POST_PMD: 1334 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1335 } 1336 1337 return 0; 1338 } 1339 1340 /* 1341 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1342 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1343 * the sink when it is running (two FE to one BE or one FE to two BE) 1344 * scenarios 1345 */ 1346 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1347 struct snd_kcontrol *k, int event) 1348 1349 { 1350 struct snd_soc_dapm_context *dapm = w->dapm; 1351 struct skl *skl = get_skl_ctx(dapm->dev); 1352 1353 switch (event) { 1354 case SND_SOC_DAPM_PRE_PMU: 1355 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1356 1357 case SND_SOC_DAPM_POST_PMD: 1358 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1359 } 1360 1361 return 0; 1362 } 1363 1364 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1365 unsigned int __user *data, unsigned int size) 1366 { 1367 struct soc_bytes_ext *sb = 1368 (struct soc_bytes_ext *)kcontrol->private_value; 1369 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1370 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1371 struct skl_module_cfg *mconfig = w->priv; 1372 struct skl *skl = get_skl_ctx(w->dapm->dev); 1373 1374 if (w->power) 1375 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1376 bc->size, bc->param_id, mconfig); 1377 1378 /* decrement size for TLV header */ 1379 size -= 2 * sizeof(u32); 1380 1381 /* check size as we don't want to send kernel data */ 1382 if (size > bc->max) 1383 size = bc->max; 1384 1385 if (bc->params) { 1386 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1387 return -EFAULT; 1388 if (copy_to_user(data + 1, &size, sizeof(u32))) 1389 return -EFAULT; 1390 if (copy_to_user(data + 2, bc->params, size)) 1391 return -EFAULT; 1392 } 1393 1394 return 0; 1395 } 1396 1397 #define SKL_PARAM_VENDOR_ID 0xff 1398 1399 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1400 const unsigned int __user *data, unsigned int size) 1401 { 1402 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1403 struct skl_module_cfg *mconfig = w->priv; 1404 struct soc_bytes_ext *sb = 1405 (struct soc_bytes_ext *)kcontrol->private_value; 1406 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1407 struct skl *skl = get_skl_ctx(w->dapm->dev); 1408 1409 if (ac->params) { 1410 if (size > ac->max) 1411 return -EINVAL; 1412 1413 ac->size = size; 1414 /* 1415 * if the param_is is of type Vendor, firmware expects actual 1416 * parameter id and size from the control. 1417 */ 1418 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1419 if (copy_from_user(ac->params, data, size)) 1420 return -EFAULT; 1421 } else { 1422 if (copy_from_user(ac->params, 1423 data + 2, size)) 1424 return -EFAULT; 1425 } 1426 1427 if (w->power) 1428 return skl_set_module_params(skl->skl_sst, 1429 (u32 *)ac->params, ac->size, 1430 ac->param_id, mconfig); 1431 } 1432 1433 return 0; 1434 } 1435 1436 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1437 struct snd_ctl_elem_value *ucontrol) 1438 { 1439 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1440 struct skl_module_cfg *mconfig = w->priv; 1441 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1442 u32 ch_type = *((u32 *)ec->dobj.private); 1443 1444 if (mconfig->dmic_ch_type == ch_type) 1445 ucontrol->value.enumerated.item[0] = 1446 mconfig->dmic_ch_combo_index; 1447 else 1448 ucontrol->value.enumerated.item[0] = 0; 1449 1450 return 0; 1451 } 1452 1453 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1454 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1455 { 1456 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1457 1458 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1459 sp_cfg->set_params = SKL_PARAM_SET; 1460 sp_cfg->param_id = 0x00; 1461 if (!sp_cfg->caps) { 1462 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1463 if (!sp_cfg->caps) 1464 return -ENOMEM; 1465 } 1466 1467 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1468 mic_cfg->flags = 0; 1469 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1470 1471 return 0; 1472 } 1473 1474 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1475 struct snd_ctl_elem_value *ucontrol) 1476 { 1477 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1478 struct skl_module_cfg *mconfig = w->priv; 1479 struct skl_mic_sel_config mic_cfg = {0}; 1480 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1481 u32 ch_type = *((u32 *)ec->dobj.private); 1482 const int *list; 1483 u8 in_ch, out_ch, index; 1484 1485 mconfig->dmic_ch_type = ch_type; 1486 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1487 1488 /* enum control index 0 is INVALID, so no channels to be set */ 1489 if (mconfig->dmic_ch_combo_index == 0) 1490 return 0; 1491 1492 /* No valid channel selection map for index 0, so offset by 1 */ 1493 index = mconfig->dmic_ch_combo_index - 1; 1494 1495 switch (ch_type) { 1496 case SKL_CH_MONO: 1497 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1498 return -EINVAL; 1499 1500 list = &mic_mono_list[index]; 1501 break; 1502 1503 case SKL_CH_STEREO: 1504 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1505 return -EINVAL; 1506 1507 list = mic_stereo_list[index]; 1508 break; 1509 1510 case SKL_CH_TRIO: 1511 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1512 return -EINVAL; 1513 1514 list = mic_trio_list[index]; 1515 break; 1516 1517 case SKL_CH_QUATRO: 1518 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1519 return -EINVAL; 1520 1521 list = mic_quatro_list[index]; 1522 break; 1523 1524 default: 1525 dev_err(w->dapm->dev, 1526 "Invalid channel %d for mic_select module\n", 1527 ch_type); 1528 return -EINVAL; 1529 1530 } 1531 1532 /* channel type enum map to number of chanels for that type */ 1533 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1534 in_ch = list[out_ch]; 1535 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1536 } 1537 1538 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1539 } 1540 1541 /* 1542 * Fill the dma id for host and link. In case of passthrough 1543 * pipeline, this will both host and link in the same 1544 * pipeline, so need to copy the link and host based on dev_type 1545 */ 1546 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1547 struct skl_pipe_params *params) 1548 { 1549 struct skl_pipe *pipe = mcfg->pipe; 1550 1551 if (pipe->passthru) { 1552 switch (mcfg->dev_type) { 1553 case SKL_DEVICE_HDALINK: 1554 pipe->p_params->link_dma_id = params->link_dma_id; 1555 pipe->p_params->link_index = params->link_index; 1556 pipe->p_params->link_bps = params->link_bps; 1557 break; 1558 1559 case SKL_DEVICE_HDAHOST: 1560 pipe->p_params->host_dma_id = params->host_dma_id; 1561 pipe->p_params->host_bps = params->host_bps; 1562 break; 1563 1564 default: 1565 break; 1566 } 1567 pipe->p_params->s_fmt = params->s_fmt; 1568 pipe->p_params->ch = params->ch; 1569 pipe->p_params->s_freq = params->s_freq; 1570 pipe->p_params->stream = params->stream; 1571 pipe->p_params->format = params->format; 1572 1573 } else { 1574 memcpy(pipe->p_params, params, sizeof(*params)); 1575 } 1576 } 1577 1578 /* 1579 * The FE params are passed by hw_params of the DAI. 1580 * On hw_params, the params are stored in Gateway module of the FE and we 1581 * need to calculate the format in DSP module configuration, that 1582 * conversion is done here 1583 */ 1584 int skl_tplg_update_pipe_params(struct device *dev, 1585 struct skl_module_cfg *mconfig, 1586 struct skl_pipe_params *params) 1587 { 1588 struct skl_module_res *res = &mconfig->module->resources[0]; 1589 struct skl *skl = get_skl_ctx(dev); 1590 struct skl_module_fmt *format = NULL; 1591 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1592 1593 skl_tplg_fill_dma_id(mconfig, params); 1594 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1595 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1596 1597 if (skl->nr_modules) 1598 return 0; 1599 1600 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1601 format = &mconfig->module->formats[0].inputs[0].fmt; 1602 else 1603 format = &mconfig->module->formats[0].outputs[0].fmt; 1604 1605 /* set the hw_params */ 1606 format->s_freq = params->s_freq; 1607 format->channels = params->ch; 1608 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1609 1610 /* 1611 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1612 * container so update bit depth accordingly 1613 */ 1614 switch (format->valid_bit_depth) { 1615 case SKL_DEPTH_16BIT: 1616 format->bit_depth = format->valid_bit_depth; 1617 break; 1618 1619 case SKL_DEPTH_24BIT: 1620 case SKL_DEPTH_32BIT: 1621 format->bit_depth = SKL_DEPTH_32BIT; 1622 break; 1623 1624 default: 1625 dev_err(dev, "Invalid bit depth %x for pipe\n", 1626 format->valid_bit_depth); 1627 return -EINVAL; 1628 } 1629 1630 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1631 res->ibs = (format->s_freq / 1000) * 1632 (format->channels) * 1633 (format->bit_depth >> 3); 1634 } else { 1635 res->obs = (format->s_freq / 1000) * 1636 (format->channels) * 1637 (format->bit_depth >> 3); 1638 } 1639 1640 return 0; 1641 } 1642 1643 /* 1644 * Query the module config for the FE DAI 1645 * This is used to find the hw_params set for that DAI and apply to FE 1646 * pipeline 1647 */ 1648 struct skl_module_cfg * 1649 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1650 { 1651 struct snd_soc_dapm_widget *w; 1652 struct snd_soc_dapm_path *p = NULL; 1653 1654 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1655 w = dai->playback_widget; 1656 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1657 if (p->connect && p->sink->power && 1658 !is_skl_dsp_widget_type(p->sink)) 1659 continue; 1660 1661 if (p->sink->priv) { 1662 dev_dbg(dai->dev, "set params for %s\n", 1663 p->sink->name); 1664 return p->sink->priv; 1665 } 1666 } 1667 } else { 1668 w = dai->capture_widget; 1669 snd_soc_dapm_widget_for_each_source_path(w, p) { 1670 if (p->connect && p->source->power && 1671 !is_skl_dsp_widget_type(p->source)) 1672 continue; 1673 1674 if (p->source->priv) { 1675 dev_dbg(dai->dev, "set params for %s\n", 1676 p->source->name); 1677 return p->source->priv; 1678 } 1679 } 1680 } 1681 1682 return NULL; 1683 } 1684 1685 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1686 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1687 { 1688 struct snd_soc_dapm_path *p; 1689 struct skl_module_cfg *mconfig = NULL; 1690 1691 snd_soc_dapm_widget_for_each_source_path(w, p) { 1692 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1693 if (p->connect && 1694 (p->sink->id == snd_soc_dapm_aif_out) && 1695 p->source->priv) { 1696 mconfig = p->source->priv; 1697 return mconfig; 1698 } 1699 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1700 if (mconfig) 1701 return mconfig; 1702 } 1703 } 1704 return mconfig; 1705 } 1706 1707 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1708 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1709 { 1710 struct snd_soc_dapm_path *p; 1711 struct skl_module_cfg *mconfig = NULL; 1712 1713 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1714 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1715 if (p->connect && 1716 (p->source->id == snd_soc_dapm_aif_in) && 1717 p->sink->priv) { 1718 mconfig = p->sink->priv; 1719 return mconfig; 1720 } 1721 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1722 if (mconfig) 1723 return mconfig; 1724 } 1725 } 1726 return mconfig; 1727 } 1728 1729 struct skl_module_cfg * 1730 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1731 { 1732 struct snd_soc_dapm_widget *w; 1733 struct skl_module_cfg *mconfig; 1734 1735 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1736 w = dai->playback_widget; 1737 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1738 } else { 1739 w = dai->capture_widget; 1740 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1741 } 1742 return mconfig; 1743 } 1744 1745 static u8 skl_tplg_be_link_type(int dev_type) 1746 { 1747 int ret; 1748 1749 switch (dev_type) { 1750 case SKL_DEVICE_BT: 1751 ret = NHLT_LINK_SSP; 1752 break; 1753 1754 case SKL_DEVICE_DMIC: 1755 ret = NHLT_LINK_DMIC; 1756 break; 1757 1758 case SKL_DEVICE_I2S: 1759 ret = NHLT_LINK_SSP; 1760 break; 1761 1762 case SKL_DEVICE_HDALINK: 1763 ret = NHLT_LINK_HDA; 1764 break; 1765 1766 default: 1767 ret = NHLT_LINK_INVALID; 1768 break; 1769 } 1770 1771 return ret; 1772 } 1773 1774 /* 1775 * Fill the BE gateway parameters 1776 * The BE gateway expects a blob of parameters which are kept in the ACPI 1777 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1778 * The port can have multiple settings so pick based on the PCM 1779 * parameters 1780 */ 1781 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1782 struct skl_module_cfg *mconfig, 1783 struct skl_pipe_params *params) 1784 { 1785 struct nhlt_specific_cfg *cfg; 1786 struct skl *skl = get_skl_ctx(dai->dev); 1787 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1788 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1789 1790 skl_tplg_fill_dma_id(mconfig, params); 1791 1792 if (link_type == NHLT_LINK_HDA) 1793 return 0; 1794 1795 /* update the blob based on virtual bus_id*/ 1796 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1797 params->s_fmt, params->ch, 1798 params->s_freq, params->stream, 1799 dev_type); 1800 if (cfg) { 1801 mconfig->formats_config.caps_size = cfg->size; 1802 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1803 } else { 1804 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1805 mconfig->vbus_id, link_type, 1806 params->stream); 1807 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1808 params->ch, params->s_freq, params->s_fmt); 1809 return -EINVAL; 1810 } 1811 1812 return 0; 1813 } 1814 1815 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1816 struct snd_soc_dapm_widget *w, 1817 struct skl_pipe_params *params) 1818 { 1819 struct snd_soc_dapm_path *p; 1820 int ret = -EIO; 1821 1822 snd_soc_dapm_widget_for_each_source_path(w, p) { 1823 if (p->connect && is_skl_dsp_widget_type(p->source) && 1824 p->source->priv) { 1825 1826 ret = skl_tplg_be_fill_pipe_params(dai, 1827 p->source->priv, params); 1828 if (ret < 0) 1829 return ret; 1830 } else { 1831 ret = skl_tplg_be_set_src_pipe_params(dai, 1832 p->source, params); 1833 if (ret < 0) 1834 return ret; 1835 } 1836 } 1837 1838 return ret; 1839 } 1840 1841 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1842 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1843 { 1844 struct snd_soc_dapm_path *p = NULL; 1845 int ret = -EIO; 1846 1847 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1848 if (p->connect && is_skl_dsp_widget_type(p->sink) && 1849 p->sink->priv) { 1850 1851 ret = skl_tplg_be_fill_pipe_params(dai, 1852 p->sink->priv, params); 1853 if (ret < 0) 1854 return ret; 1855 } else { 1856 ret = skl_tplg_be_set_sink_pipe_params( 1857 dai, p->sink, params); 1858 if (ret < 0) 1859 return ret; 1860 } 1861 } 1862 1863 return ret; 1864 } 1865 1866 /* 1867 * BE hw_params can be a source parameters (capture) or sink parameters 1868 * (playback). Based on sink and source we need to either find the source 1869 * list or the sink list and set the pipeline parameters 1870 */ 1871 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1872 struct skl_pipe_params *params) 1873 { 1874 struct snd_soc_dapm_widget *w; 1875 1876 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1877 w = dai->playback_widget; 1878 1879 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1880 1881 } else { 1882 w = dai->capture_widget; 1883 1884 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1885 } 1886 1887 return 0; 1888 } 1889 1890 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1891 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1892 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1893 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1894 }; 1895 1896 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1897 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1898 skl_tplg_tlv_control_set}, 1899 }; 1900 1901 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1902 { 1903 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1904 .get = skl_tplg_mic_control_get, 1905 .put = skl_tplg_mic_control_set, 1906 }, 1907 }; 1908 1909 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1910 struct skl_pipe *pipe, u32 tkn, 1911 u32 tkn_val, int conf_idx, int dir) 1912 { 1913 struct skl_pipe_fmt *fmt; 1914 struct skl_path_config *config; 1915 1916 switch (dir) { 1917 case SKL_DIR_IN: 1918 fmt = &pipe->configs[conf_idx].in_fmt; 1919 break; 1920 1921 case SKL_DIR_OUT: 1922 fmt = &pipe->configs[conf_idx].out_fmt; 1923 break; 1924 1925 default: 1926 dev_err(dev, "Invalid direction: %d\n", dir); 1927 return -EINVAL; 1928 } 1929 1930 config = &pipe->configs[conf_idx]; 1931 1932 switch (tkn) { 1933 case SKL_TKN_U32_CFG_FREQ: 1934 fmt->freq = tkn_val; 1935 break; 1936 1937 case SKL_TKN_U8_CFG_CHAN: 1938 fmt->channels = tkn_val; 1939 break; 1940 1941 case SKL_TKN_U8_CFG_BPS: 1942 fmt->bps = tkn_val; 1943 break; 1944 1945 case SKL_TKN_U32_PATH_MEM_PGS: 1946 config->mem_pages = tkn_val; 1947 break; 1948 1949 default: 1950 dev_err(dev, "Invalid token config: %d\n", tkn); 1951 return -EINVAL; 1952 } 1953 1954 return 0; 1955 } 1956 1957 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1958 struct skl_pipe *pipe, u32 tkn, 1959 u32 tkn_val) 1960 { 1961 1962 switch (tkn) { 1963 case SKL_TKN_U32_PIPE_CONN_TYPE: 1964 pipe->conn_type = tkn_val; 1965 break; 1966 1967 case SKL_TKN_U32_PIPE_PRIORITY: 1968 pipe->pipe_priority = tkn_val; 1969 break; 1970 1971 case SKL_TKN_U32_PIPE_MEM_PGS: 1972 pipe->memory_pages = tkn_val; 1973 break; 1974 1975 case SKL_TKN_U32_PMODE: 1976 pipe->lp_mode = tkn_val; 1977 break; 1978 1979 case SKL_TKN_U32_PIPE_DIRECTION: 1980 pipe->direction = tkn_val; 1981 break; 1982 1983 case SKL_TKN_U32_NUM_CONFIGS: 1984 pipe->nr_cfgs = tkn_val; 1985 break; 1986 1987 default: 1988 dev_err(dev, "Token not handled %d\n", tkn); 1989 return -EINVAL; 1990 } 1991 1992 return 0; 1993 } 1994 1995 /* 1996 * Add pipeline by parsing the relevant tokens 1997 * Return an existing pipe if the pipe already exists. 1998 */ 1999 static int skl_tplg_add_pipe(struct device *dev, 2000 struct skl_module_cfg *mconfig, struct skl *skl, 2001 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2002 { 2003 struct skl_pipeline *ppl; 2004 struct skl_pipe *pipe; 2005 struct skl_pipe_params *params; 2006 2007 list_for_each_entry(ppl, &skl->ppl_list, node) { 2008 if (ppl->pipe->ppl_id == tkn_elem->value) { 2009 mconfig->pipe = ppl->pipe; 2010 return -EEXIST; 2011 } 2012 } 2013 2014 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2015 if (!ppl) 2016 return -ENOMEM; 2017 2018 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2019 if (!pipe) 2020 return -ENOMEM; 2021 2022 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2023 if (!params) 2024 return -ENOMEM; 2025 2026 pipe->p_params = params; 2027 pipe->ppl_id = tkn_elem->value; 2028 INIT_LIST_HEAD(&pipe->w_list); 2029 2030 ppl->pipe = pipe; 2031 list_add(&ppl->node, &skl->ppl_list); 2032 2033 mconfig->pipe = pipe; 2034 mconfig->pipe->state = SKL_PIPE_INVALID; 2035 2036 return 0; 2037 } 2038 2039 static int skl_tplg_get_uuid(struct device *dev, u8 *guid, 2040 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2041 { 2042 if (uuid_tkn->token == SKL_TKN_UUID) { 2043 memcpy(guid, &uuid_tkn->uuid, 16); 2044 return 0; 2045 } 2046 2047 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2048 2049 return -EINVAL; 2050 } 2051 2052 static int skl_tplg_fill_pin(struct device *dev, 2053 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2054 struct skl_module_pin *m_pin, 2055 int pin_index) 2056 { 2057 int ret; 2058 2059 switch (tkn_elem->token) { 2060 case SKL_TKN_U32_PIN_MOD_ID: 2061 m_pin[pin_index].id.module_id = tkn_elem->value; 2062 break; 2063 2064 case SKL_TKN_U32_PIN_INST_ID: 2065 m_pin[pin_index].id.instance_id = tkn_elem->value; 2066 break; 2067 2068 case SKL_TKN_UUID: 2069 ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b, 2070 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2071 if (ret < 0) 2072 return ret; 2073 2074 break; 2075 2076 default: 2077 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2078 return -EINVAL; 2079 } 2080 2081 return 0; 2082 } 2083 2084 /* 2085 * Parse for pin config specific tokens to fill up the 2086 * module private data 2087 */ 2088 static int skl_tplg_fill_pins_info(struct device *dev, 2089 struct skl_module_cfg *mconfig, 2090 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2091 int dir, int pin_count) 2092 { 2093 int ret; 2094 struct skl_module_pin *m_pin; 2095 2096 switch (dir) { 2097 case SKL_DIR_IN: 2098 m_pin = mconfig->m_in_pin; 2099 break; 2100 2101 case SKL_DIR_OUT: 2102 m_pin = mconfig->m_out_pin; 2103 break; 2104 2105 default: 2106 dev_err(dev, "Invalid direction value\n"); 2107 return -EINVAL; 2108 } 2109 2110 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2111 if (ret < 0) 2112 return ret; 2113 2114 m_pin[pin_count].in_use = false; 2115 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2116 2117 return 0; 2118 } 2119 2120 /* 2121 * Fill up input/output module config format based 2122 * on the direction 2123 */ 2124 static int skl_tplg_fill_fmt(struct device *dev, 2125 struct skl_module_fmt *dst_fmt, 2126 u32 tkn, u32 value) 2127 { 2128 switch (tkn) { 2129 case SKL_TKN_U32_FMT_CH: 2130 dst_fmt->channels = value; 2131 break; 2132 2133 case SKL_TKN_U32_FMT_FREQ: 2134 dst_fmt->s_freq = value; 2135 break; 2136 2137 case SKL_TKN_U32_FMT_BIT_DEPTH: 2138 dst_fmt->bit_depth = value; 2139 break; 2140 2141 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2142 dst_fmt->valid_bit_depth = value; 2143 break; 2144 2145 case SKL_TKN_U32_FMT_CH_CONFIG: 2146 dst_fmt->ch_cfg = value; 2147 break; 2148 2149 case SKL_TKN_U32_FMT_INTERLEAVE: 2150 dst_fmt->interleaving_style = value; 2151 break; 2152 2153 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2154 dst_fmt->sample_type = value; 2155 break; 2156 2157 case SKL_TKN_U32_FMT_CH_MAP: 2158 dst_fmt->ch_map = value; 2159 break; 2160 2161 default: 2162 dev_err(dev, "Invalid token %d\n", tkn); 2163 return -EINVAL; 2164 } 2165 2166 return 0; 2167 } 2168 2169 static int skl_tplg_widget_fill_fmt(struct device *dev, 2170 struct skl_module_iface *fmt, 2171 u32 tkn, u32 val, u32 dir, int fmt_idx) 2172 { 2173 struct skl_module_fmt *dst_fmt; 2174 2175 if (!fmt) 2176 return -EINVAL; 2177 2178 switch (dir) { 2179 case SKL_DIR_IN: 2180 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2181 break; 2182 2183 case SKL_DIR_OUT: 2184 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2185 break; 2186 2187 default: 2188 dev_err(dev, "Invalid direction: %d\n", dir); 2189 return -EINVAL; 2190 } 2191 2192 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2193 } 2194 2195 static void skl_tplg_fill_pin_dynamic_val( 2196 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2197 { 2198 int i; 2199 2200 for (i = 0; i < pin_count; i++) 2201 mpin[i].is_dynamic = value; 2202 } 2203 2204 /* 2205 * Resource table in the manifest has pin specific resources 2206 * like pin and pin buffer size 2207 */ 2208 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2209 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2210 struct skl_module_res *res, int pin_idx, int dir) 2211 { 2212 struct skl_module_pin_resources *m_pin; 2213 2214 switch (dir) { 2215 case SKL_DIR_IN: 2216 m_pin = &res->input[pin_idx]; 2217 break; 2218 2219 case SKL_DIR_OUT: 2220 m_pin = &res->output[pin_idx]; 2221 break; 2222 2223 default: 2224 dev_err(dev, "Invalid pin direction: %d\n", dir); 2225 return -EINVAL; 2226 } 2227 2228 switch (tkn_elem->token) { 2229 case SKL_TKN_MM_U32_RES_PIN_ID: 2230 m_pin->pin_index = tkn_elem->value; 2231 break; 2232 2233 case SKL_TKN_MM_U32_PIN_BUF: 2234 m_pin->buf_size = tkn_elem->value; 2235 break; 2236 2237 default: 2238 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2239 return -EINVAL; 2240 } 2241 2242 return 0; 2243 } 2244 2245 /* 2246 * Fill module specific resources from the manifest's resource 2247 * table like CPS, DMA size, mem_pages. 2248 */ 2249 static int skl_tplg_fill_res_tkn(struct device *dev, 2250 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2251 struct skl_module_res *res, 2252 int pin_idx, int dir) 2253 { 2254 int ret, tkn_count = 0; 2255 2256 if (!res) 2257 return -EINVAL; 2258 2259 switch (tkn_elem->token) { 2260 case SKL_TKN_MM_U32_CPS: 2261 res->cps = tkn_elem->value; 2262 break; 2263 2264 case SKL_TKN_MM_U32_DMA_SIZE: 2265 res->dma_buffer_size = tkn_elem->value; 2266 break; 2267 2268 case SKL_TKN_MM_U32_CPC: 2269 res->cpc = tkn_elem->value; 2270 break; 2271 2272 case SKL_TKN_U32_MEM_PAGES: 2273 res->is_pages = tkn_elem->value; 2274 break; 2275 2276 case SKL_TKN_U32_OBS: 2277 res->obs = tkn_elem->value; 2278 break; 2279 2280 case SKL_TKN_U32_IBS: 2281 res->ibs = tkn_elem->value; 2282 break; 2283 2284 case SKL_TKN_U32_MAX_MCPS: 2285 res->cps = tkn_elem->value; 2286 break; 2287 2288 case SKL_TKN_MM_U32_RES_PIN_ID: 2289 case SKL_TKN_MM_U32_PIN_BUF: 2290 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2291 pin_idx, dir); 2292 if (ret < 0) 2293 return ret; 2294 break; 2295 2296 default: 2297 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2298 return -EINVAL; 2299 2300 } 2301 tkn_count++; 2302 2303 return tkn_count; 2304 } 2305 2306 /* 2307 * Parse tokens to fill up the module private data 2308 */ 2309 static int skl_tplg_get_token(struct device *dev, 2310 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2311 struct skl *skl, struct skl_module_cfg *mconfig) 2312 { 2313 int tkn_count = 0; 2314 int ret; 2315 static int is_pipe_exists; 2316 static int pin_index, dir, conf_idx; 2317 struct skl_module_iface *iface = NULL; 2318 struct skl_module_res *res = NULL; 2319 int res_idx = mconfig->res_idx; 2320 int fmt_idx = mconfig->fmt_idx; 2321 2322 /* 2323 * If the manifest structure contains no modules, fill all 2324 * the module data to 0th index. 2325 * res_idx and fmt_idx are default set to 0. 2326 */ 2327 if (skl->nr_modules == 0) { 2328 res = &mconfig->module->resources[res_idx]; 2329 iface = &mconfig->module->formats[fmt_idx]; 2330 } 2331 2332 if (tkn_elem->token > SKL_TKN_MAX) 2333 return -EINVAL; 2334 2335 switch (tkn_elem->token) { 2336 case SKL_TKN_U8_IN_QUEUE_COUNT: 2337 mconfig->module->max_input_pins = tkn_elem->value; 2338 break; 2339 2340 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2341 mconfig->module->max_output_pins = tkn_elem->value; 2342 break; 2343 2344 case SKL_TKN_U8_DYN_IN_PIN: 2345 if (!mconfig->m_in_pin) 2346 mconfig->m_in_pin = devm_kzalloc(dev, MAX_IN_QUEUE * 2347 sizeof(*mconfig->m_in_pin), GFP_KERNEL); 2348 if (!mconfig->m_in_pin) 2349 return -ENOMEM; 2350 2351 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2352 tkn_elem->value); 2353 break; 2354 2355 case SKL_TKN_U8_DYN_OUT_PIN: 2356 if (!mconfig->m_out_pin) 2357 mconfig->m_out_pin = devm_kzalloc(dev, MAX_IN_QUEUE * 2358 sizeof(*mconfig->m_in_pin), GFP_KERNEL); 2359 if (!mconfig->m_out_pin) 2360 return -ENOMEM; 2361 2362 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2363 tkn_elem->value); 2364 break; 2365 2366 case SKL_TKN_U8_TIME_SLOT: 2367 mconfig->time_slot = tkn_elem->value; 2368 break; 2369 2370 case SKL_TKN_U8_CORE_ID: 2371 mconfig->core_id = tkn_elem->value; 2372 2373 case SKL_TKN_U8_MOD_TYPE: 2374 mconfig->m_type = tkn_elem->value; 2375 break; 2376 2377 case SKL_TKN_U8_DEV_TYPE: 2378 mconfig->dev_type = tkn_elem->value; 2379 break; 2380 2381 case SKL_TKN_U8_HW_CONN_TYPE: 2382 mconfig->hw_conn_type = tkn_elem->value; 2383 break; 2384 2385 case SKL_TKN_U16_MOD_INST_ID: 2386 mconfig->id.instance_id = 2387 tkn_elem->value; 2388 break; 2389 2390 case SKL_TKN_U32_MEM_PAGES: 2391 case SKL_TKN_U32_MAX_MCPS: 2392 case SKL_TKN_U32_OBS: 2393 case SKL_TKN_U32_IBS: 2394 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2395 if (ret < 0) 2396 return ret; 2397 2398 break; 2399 2400 case SKL_TKN_U32_VBUS_ID: 2401 mconfig->vbus_id = tkn_elem->value; 2402 break; 2403 2404 case SKL_TKN_U32_PARAMS_FIXUP: 2405 mconfig->params_fixup = tkn_elem->value; 2406 break; 2407 2408 case SKL_TKN_U32_CONVERTER: 2409 mconfig->converter = tkn_elem->value; 2410 break; 2411 2412 case SKL_TKN_U32_D0I3_CAPS: 2413 mconfig->d0i3_caps = tkn_elem->value; 2414 break; 2415 2416 case SKL_TKN_U32_PIPE_ID: 2417 ret = skl_tplg_add_pipe(dev, 2418 mconfig, skl, tkn_elem); 2419 2420 if (ret < 0) { 2421 if (ret == -EEXIST) { 2422 is_pipe_exists = 1; 2423 break; 2424 } 2425 return is_pipe_exists; 2426 } 2427 2428 break; 2429 2430 case SKL_TKN_U32_PIPE_CONFIG_ID: 2431 conf_idx = tkn_elem->value; 2432 break; 2433 2434 case SKL_TKN_U32_PIPE_CONN_TYPE: 2435 case SKL_TKN_U32_PIPE_PRIORITY: 2436 case SKL_TKN_U32_PIPE_MEM_PGS: 2437 case SKL_TKN_U32_PMODE: 2438 case SKL_TKN_U32_PIPE_DIRECTION: 2439 case SKL_TKN_U32_NUM_CONFIGS: 2440 if (is_pipe_exists) { 2441 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2442 tkn_elem->token, tkn_elem->value); 2443 if (ret < 0) 2444 return ret; 2445 } 2446 2447 break; 2448 2449 case SKL_TKN_U32_PATH_MEM_PGS: 2450 case SKL_TKN_U32_CFG_FREQ: 2451 case SKL_TKN_U8_CFG_CHAN: 2452 case SKL_TKN_U8_CFG_BPS: 2453 if (mconfig->pipe->nr_cfgs) { 2454 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2455 tkn_elem->token, tkn_elem->value, 2456 conf_idx, dir); 2457 if (ret < 0) 2458 return ret; 2459 } 2460 break; 2461 2462 case SKL_TKN_CFG_MOD_RES_ID: 2463 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2464 break; 2465 2466 case SKL_TKN_CFG_MOD_FMT_ID: 2467 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2468 break; 2469 2470 /* 2471 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2472 * direction and the pin count. The first four bits represent 2473 * direction and next four the pin count. 2474 */ 2475 case SKL_TKN_U32_DIR_PIN_COUNT: 2476 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2477 pin_index = (tkn_elem->value & 2478 SKL_PIN_COUNT_MASK) >> 4; 2479 2480 break; 2481 2482 case SKL_TKN_U32_FMT_CH: 2483 case SKL_TKN_U32_FMT_FREQ: 2484 case SKL_TKN_U32_FMT_BIT_DEPTH: 2485 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2486 case SKL_TKN_U32_FMT_CH_CONFIG: 2487 case SKL_TKN_U32_FMT_INTERLEAVE: 2488 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2489 case SKL_TKN_U32_FMT_CH_MAP: 2490 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2491 tkn_elem->value, dir, pin_index); 2492 2493 if (ret < 0) 2494 return ret; 2495 2496 break; 2497 2498 case SKL_TKN_U32_PIN_MOD_ID: 2499 case SKL_TKN_U32_PIN_INST_ID: 2500 case SKL_TKN_UUID: 2501 ret = skl_tplg_fill_pins_info(dev, 2502 mconfig, tkn_elem, dir, 2503 pin_index); 2504 if (ret < 0) 2505 return ret; 2506 2507 break; 2508 2509 case SKL_TKN_U32_CAPS_SIZE: 2510 mconfig->formats_config.caps_size = 2511 tkn_elem->value; 2512 2513 break; 2514 2515 case SKL_TKN_U32_CAPS_SET_PARAMS: 2516 mconfig->formats_config.set_params = 2517 tkn_elem->value; 2518 break; 2519 2520 case SKL_TKN_U32_CAPS_PARAMS_ID: 2521 mconfig->formats_config.param_id = 2522 tkn_elem->value; 2523 break; 2524 2525 case SKL_TKN_U32_PROC_DOMAIN: 2526 mconfig->domain = 2527 tkn_elem->value; 2528 2529 break; 2530 2531 case SKL_TKN_U32_DMA_BUF_SIZE: 2532 mconfig->dma_buffer_size = tkn_elem->value; 2533 break; 2534 2535 case SKL_TKN_U8_IN_PIN_TYPE: 2536 case SKL_TKN_U8_OUT_PIN_TYPE: 2537 case SKL_TKN_U8_CONN_TYPE: 2538 break; 2539 2540 default: 2541 dev_err(dev, "Token %d not handled\n", 2542 tkn_elem->token); 2543 return -EINVAL; 2544 } 2545 2546 tkn_count++; 2547 2548 return tkn_count; 2549 } 2550 2551 /* 2552 * Parse the vendor array for specific tokens to construct 2553 * module private data 2554 */ 2555 static int skl_tplg_get_tokens(struct device *dev, 2556 char *pvt_data, struct skl *skl, 2557 struct skl_module_cfg *mconfig, int block_size) 2558 { 2559 struct snd_soc_tplg_vendor_array *array; 2560 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2561 int tkn_count = 0, ret; 2562 int off = 0, tuple_size = 0; 2563 bool is_module_guid = true; 2564 2565 if (block_size <= 0) 2566 return -EINVAL; 2567 2568 while (tuple_size < block_size) { 2569 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2570 2571 off += array->size; 2572 2573 switch (array->type) { 2574 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2575 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2576 continue; 2577 2578 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2579 if (is_module_guid) { 2580 ret = skl_tplg_get_uuid(dev, mconfig->guid, 2581 array->uuid); 2582 is_module_guid = false; 2583 } else { 2584 ret = skl_tplg_get_token(dev, array->value, skl, 2585 mconfig); 2586 } 2587 2588 if (ret < 0) 2589 return ret; 2590 2591 tuple_size += sizeof(*array->uuid); 2592 2593 continue; 2594 2595 default: 2596 tkn_elem = array->value; 2597 tkn_count = 0; 2598 break; 2599 } 2600 2601 while (tkn_count <= (array->num_elems - 1)) { 2602 ret = skl_tplg_get_token(dev, tkn_elem, 2603 skl, mconfig); 2604 2605 if (ret < 0) 2606 return ret; 2607 2608 tkn_count = tkn_count + ret; 2609 tkn_elem++; 2610 } 2611 2612 tuple_size += tkn_count * sizeof(*tkn_elem); 2613 } 2614 2615 return off; 2616 } 2617 2618 /* 2619 * Every data block is preceded by a descriptor to read the number 2620 * of data blocks, they type of the block and it's size 2621 */ 2622 static int skl_tplg_get_desc_blocks(struct device *dev, 2623 struct snd_soc_tplg_vendor_array *array) 2624 { 2625 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2626 2627 tkn_elem = array->value; 2628 2629 switch (tkn_elem->token) { 2630 case SKL_TKN_U8_NUM_BLOCKS: 2631 case SKL_TKN_U8_BLOCK_TYPE: 2632 case SKL_TKN_U16_BLOCK_SIZE: 2633 return tkn_elem->value; 2634 2635 default: 2636 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2637 break; 2638 } 2639 2640 return -EINVAL; 2641 } 2642 2643 /* 2644 * Parse the private data for the token and corresponding value. 2645 * The private data can have multiple data blocks. So, a data block 2646 * is preceded by a descriptor for number of blocks and a descriptor 2647 * for the type and size of the suceeding data block. 2648 */ 2649 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2650 struct skl *skl, struct device *dev, 2651 struct skl_module_cfg *mconfig) 2652 { 2653 struct snd_soc_tplg_vendor_array *array; 2654 int num_blocks, block_size = 0, block_type, off = 0; 2655 char *data; 2656 int ret; 2657 2658 /* Read the NUM_DATA_BLOCKS descriptor */ 2659 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2660 ret = skl_tplg_get_desc_blocks(dev, array); 2661 if (ret < 0) 2662 return ret; 2663 num_blocks = ret; 2664 2665 off += array->size; 2666 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2667 while (num_blocks > 0) { 2668 array = (struct snd_soc_tplg_vendor_array *) 2669 (tplg_w->priv.data + off); 2670 2671 ret = skl_tplg_get_desc_blocks(dev, array); 2672 2673 if (ret < 0) 2674 return ret; 2675 block_type = ret; 2676 off += array->size; 2677 2678 array = (struct snd_soc_tplg_vendor_array *) 2679 (tplg_w->priv.data + off); 2680 2681 ret = skl_tplg_get_desc_blocks(dev, array); 2682 2683 if (ret < 0) 2684 return ret; 2685 block_size = ret; 2686 off += array->size; 2687 2688 array = (struct snd_soc_tplg_vendor_array *) 2689 (tplg_w->priv.data + off); 2690 2691 data = (tplg_w->priv.data + off); 2692 2693 if (block_type == SKL_TYPE_TUPLE) { 2694 ret = skl_tplg_get_tokens(dev, data, 2695 skl, mconfig, block_size); 2696 2697 if (ret < 0) 2698 return ret; 2699 2700 --num_blocks; 2701 } else { 2702 if (mconfig->formats_config.caps_size > 0) 2703 memcpy(mconfig->formats_config.caps, data, 2704 mconfig->formats_config.caps_size); 2705 --num_blocks; 2706 ret = mconfig->formats_config.caps_size; 2707 } 2708 off += ret; 2709 } 2710 2711 return 0; 2712 } 2713 2714 static void skl_clear_pin_config(struct snd_soc_platform *platform, 2715 struct snd_soc_dapm_widget *w) 2716 { 2717 int i; 2718 struct skl_module_cfg *mconfig; 2719 struct skl_pipe *pipe; 2720 2721 if (!strncmp(w->dapm->component->name, platform->component.name, 2722 strlen(platform->component.name))) { 2723 mconfig = w->priv; 2724 pipe = mconfig->pipe; 2725 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2726 mconfig->m_in_pin[i].in_use = false; 2727 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2728 } 2729 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2730 mconfig->m_out_pin[i].in_use = false; 2731 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2732 } 2733 pipe->state = SKL_PIPE_INVALID; 2734 mconfig->m_state = SKL_MODULE_UNINIT; 2735 } 2736 } 2737 2738 void skl_cleanup_resources(struct skl *skl) 2739 { 2740 struct skl_sst *ctx = skl->skl_sst; 2741 struct snd_soc_platform *soc_platform = skl->platform; 2742 struct snd_soc_dapm_widget *w; 2743 struct snd_soc_card *card; 2744 2745 if (soc_platform == NULL) 2746 return; 2747 2748 card = soc_platform->component.card; 2749 if (!card || !card->instantiated) 2750 return; 2751 2752 skl->resource.mem = 0; 2753 skl->resource.mcps = 0; 2754 2755 list_for_each_entry(w, &card->widgets, list) { 2756 if (is_skl_dsp_widget_type(w) && (w->priv != NULL)) 2757 skl_clear_pin_config(soc_platform, w); 2758 } 2759 2760 skl_clear_module_cnt(ctx->dsp); 2761 } 2762 2763 /* 2764 * Topology core widget load callback 2765 * 2766 * This is used to save the private data for each widget which gives 2767 * information to the driver about module and pipeline parameters which DSP 2768 * FW expects like ids, resource values, formats etc 2769 */ 2770 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, 2771 struct snd_soc_dapm_widget *w, 2772 struct snd_soc_tplg_dapm_widget *tplg_w) 2773 { 2774 int ret; 2775 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2776 struct skl *skl = ebus_to_skl(ebus); 2777 struct hdac_bus *bus = ebus_to_hbus(ebus); 2778 struct skl_module_cfg *mconfig; 2779 2780 if (!tplg_w->priv.size) 2781 goto bind_event; 2782 2783 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2784 2785 if (!mconfig) 2786 return -ENOMEM; 2787 2788 if (skl->nr_modules == 0) { 2789 mconfig->module = devm_kzalloc(bus->dev, 2790 sizeof(*mconfig->module), GFP_KERNEL); 2791 if (!mconfig->module) 2792 return -ENOMEM; 2793 } 2794 2795 w->priv = mconfig; 2796 2797 /* 2798 * module binary can be loaded later, so set it to query when 2799 * module is load for a use case 2800 */ 2801 mconfig->id.module_id = -1; 2802 2803 /* Parse private data for tuples */ 2804 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2805 if (ret < 0) 2806 return ret; 2807 2808 skl_debug_init_module(skl->debugfs, w, mconfig); 2809 2810 bind_event: 2811 if (tplg_w->event_type == 0) { 2812 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2813 return 0; 2814 } 2815 2816 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2817 ARRAY_SIZE(skl_tplg_widget_ops), 2818 tplg_w->event_type); 2819 2820 if (ret) { 2821 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2822 __func__, tplg_w->event_type); 2823 return -EINVAL; 2824 } 2825 2826 return 0; 2827 } 2828 2829 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2830 struct snd_soc_tplg_bytes_control *bc) 2831 { 2832 struct skl_algo_data *ac; 2833 struct skl_dfw_algo_data *dfw_ac = 2834 (struct skl_dfw_algo_data *)bc->priv.data; 2835 2836 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2837 if (!ac) 2838 return -ENOMEM; 2839 2840 /* Fill private data */ 2841 ac->max = dfw_ac->max; 2842 ac->param_id = dfw_ac->param_id; 2843 ac->set_params = dfw_ac->set_params; 2844 ac->size = dfw_ac->max; 2845 2846 if (ac->max) { 2847 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL); 2848 if (!ac->params) 2849 return -ENOMEM; 2850 2851 memcpy(ac->params, dfw_ac->params, ac->max); 2852 } 2853 2854 be->dobj.private = ac; 2855 return 0; 2856 } 2857 2858 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 2859 struct snd_soc_tplg_enum_control *ec) 2860 { 2861 2862 void *data; 2863 2864 if (ec->priv.size) { 2865 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 2866 if (!data) 2867 return -ENOMEM; 2868 memcpy(data, ec->priv.data, ec->priv.size); 2869 se->dobj.private = data; 2870 } 2871 2872 return 0; 2873 2874 } 2875 2876 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2877 struct snd_kcontrol_new *kctl, 2878 struct snd_soc_tplg_ctl_hdr *hdr) 2879 { 2880 struct soc_bytes_ext *sb; 2881 struct snd_soc_tplg_bytes_control *tplg_bc; 2882 struct snd_soc_tplg_enum_control *tplg_ec; 2883 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2884 struct hdac_bus *bus = ebus_to_hbus(ebus); 2885 struct soc_enum *se; 2886 2887 switch (hdr->ops.info) { 2888 case SND_SOC_TPLG_CTL_BYTES: 2889 tplg_bc = container_of(hdr, 2890 struct snd_soc_tplg_bytes_control, hdr); 2891 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 2892 sb = (struct soc_bytes_ext *)kctl->private_value; 2893 if (tplg_bc->priv.size) 2894 return skl_init_algo_data( 2895 bus->dev, sb, tplg_bc); 2896 } 2897 break; 2898 2899 case SND_SOC_TPLG_CTL_ENUM: 2900 tplg_ec = container_of(hdr, 2901 struct snd_soc_tplg_enum_control, hdr); 2902 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 2903 se = (struct soc_enum *)kctl->private_value; 2904 if (tplg_ec->priv.size) 2905 return skl_init_enum_data(bus->dev, se, 2906 tplg_ec); 2907 } 2908 break; 2909 2910 default: 2911 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n", 2912 hdr->ops.get, hdr->ops.put, hdr->ops.info); 2913 break; 2914 } 2915 2916 return 0; 2917 } 2918 2919 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 2920 struct snd_soc_tplg_vendor_string_elem *str_elem, 2921 struct skl *skl) 2922 { 2923 int tkn_count = 0; 2924 static int ref_count; 2925 2926 switch (str_elem->token) { 2927 case SKL_TKN_STR_LIB_NAME: 2928 if (ref_count > skl->skl_sst->lib_count - 1) { 2929 ref_count = 0; 2930 return -EINVAL; 2931 } 2932 2933 strncpy(skl->skl_sst->lib_info[ref_count].name, 2934 str_elem->string, 2935 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name)); 2936 ref_count++; 2937 break; 2938 2939 default: 2940 dev_err(dev, "Not a string token %d\n", str_elem->token); 2941 break; 2942 } 2943 tkn_count++; 2944 2945 return tkn_count; 2946 } 2947 2948 static int skl_tplg_get_str_tkn(struct device *dev, 2949 struct snd_soc_tplg_vendor_array *array, 2950 struct skl *skl) 2951 { 2952 int tkn_count = 0, ret; 2953 struct snd_soc_tplg_vendor_string_elem *str_elem; 2954 2955 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 2956 while (tkn_count < array->num_elems) { 2957 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 2958 str_elem++; 2959 2960 if (ret < 0) 2961 return ret; 2962 2963 tkn_count = tkn_count + ret; 2964 } 2965 2966 return tkn_count; 2967 } 2968 2969 static int skl_tplg_manifest_fill_fmt(struct device *dev, 2970 struct skl_module_iface *fmt, 2971 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2972 u32 dir, int fmt_idx) 2973 { 2974 struct skl_module_pin_fmt *dst_fmt; 2975 struct skl_module_fmt *mod_fmt; 2976 int ret; 2977 2978 if (!fmt) 2979 return -EINVAL; 2980 2981 switch (dir) { 2982 case SKL_DIR_IN: 2983 dst_fmt = &fmt->inputs[fmt_idx]; 2984 break; 2985 2986 case SKL_DIR_OUT: 2987 dst_fmt = &fmt->outputs[fmt_idx]; 2988 break; 2989 2990 default: 2991 dev_err(dev, "Invalid direction: %d\n", dir); 2992 return -EINVAL; 2993 } 2994 2995 mod_fmt = &dst_fmt->fmt; 2996 2997 switch (tkn_elem->token) { 2998 case SKL_TKN_MM_U32_INTF_PIN_ID: 2999 dst_fmt->id = tkn_elem->value; 3000 break; 3001 3002 default: 3003 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3004 tkn_elem->value); 3005 if (ret < 0) 3006 return ret; 3007 break; 3008 } 3009 3010 return 0; 3011 } 3012 3013 static int skl_tplg_fill_mod_info(struct device *dev, 3014 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3015 struct skl_module *mod) 3016 { 3017 3018 if (!mod) 3019 return -EINVAL; 3020 3021 switch (tkn_elem->token) { 3022 case SKL_TKN_U8_IN_PIN_TYPE: 3023 mod->input_pin_type = tkn_elem->value; 3024 break; 3025 3026 case SKL_TKN_U8_OUT_PIN_TYPE: 3027 mod->output_pin_type = tkn_elem->value; 3028 break; 3029 3030 case SKL_TKN_U8_IN_QUEUE_COUNT: 3031 mod->max_input_pins = tkn_elem->value; 3032 break; 3033 3034 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3035 mod->max_output_pins = tkn_elem->value; 3036 break; 3037 3038 case SKL_TKN_MM_U8_NUM_RES: 3039 mod->nr_resources = tkn_elem->value; 3040 break; 3041 3042 case SKL_TKN_MM_U8_NUM_INTF: 3043 mod->nr_interfaces = tkn_elem->value; 3044 break; 3045 3046 default: 3047 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3048 return -EINVAL; 3049 } 3050 3051 return 0; 3052 } 3053 3054 3055 static int skl_tplg_get_int_tkn(struct device *dev, 3056 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3057 struct skl *skl) 3058 { 3059 int tkn_count = 0, ret; 3060 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3061 struct skl_module_res *res = NULL; 3062 struct skl_module_iface *fmt = NULL; 3063 struct skl_module *mod = NULL; 3064 int i; 3065 3066 if (skl->modules) { 3067 mod = skl->modules[mod_idx]; 3068 res = &mod->resources[res_val_idx]; 3069 fmt = &mod->formats[intf_val_idx]; 3070 } 3071 3072 switch (tkn_elem->token) { 3073 case SKL_TKN_U32_LIB_COUNT: 3074 skl->skl_sst->lib_count = tkn_elem->value; 3075 break; 3076 3077 case SKL_TKN_U8_NUM_MOD: 3078 skl->nr_modules = tkn_elem->value; 3079 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3080 sizeof(*skl->modules), GFP_KERNEL); 3081 if (!skl->modules) 3082 return -ENOMEM; 3083 3084 for (i = 0; i < skl->nr_modules; i++) { 3085 skl->modules[i] = devm_kzalloc(dev, 3086 sizeof(struct skl_module), GFP_KERNEL); 3087 if (!skl->modules[i]) 3088 return -ENOMEM; 3089 } 3090 break; 3091 3092 case SKL_TKN_MM_U8_MOD_IDX: 3093 mod_idx = tkn_elem->value; 3094 break; 3095 3096 case SKL_TKN_U8_IN_PIN_TYPE: 3097 case SKL_TKN_U8_OUT_PIN_TYPE: 3098 case SKL_TKN_U8_IN_QUEUE_COUNT: 3099 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3100 case SKL_TKN_MM_U8_NUM_RES: 3101 case SKL_TKN_MM_U8_NUM_INTF: 3102 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3103 if (ret < 0) 3104 return ret; 3105 break; 3106 3107 case SKL_TKN_U32_DIR_PIN_COUNT: 3108 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3109 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3110 break; 3111 3112 case SKL_TKN_MM_U32_RES_ID: 3113 if (!res) 3114 return -EINVAL; 3115 3116 res->id = tkn_elem->value; 3117 res_val_idx = tkn_elem->value; 3118 break; 3119 3120 case SKL_TKN_MM_U32_FMT_ID: 3121 if (!fmt) 3122 return -EINVAL; 3123 3124 fmt->fmt_idx = tkn_elem->value; 3125 intf_val_idx = tkn_elem->value; 3126 break; 3127 3128 case SKL_TKN_MM_U32_CPS: 3129 case SKL_TKN_MM_U32_DMA_SIZE: 3130 case SKL_TKN_MM_U32_CPC: 3131 case SKL_TKN_U32_MEM_PAGES: 3132 case SKL_TKN_U32_OBS: 3133 case SKL_TKN_U32_IBS: 3134 case SKL_TKN_MM_U32_RES_PIN_ID: 3135 case SKL_TKN_MM_U32_PIN_BUF: 3136 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3137 if (ret < 0) 3138 return ret; 3139 3140 break; 3141 3142 case SKL_TKN_MM_U32_NUM_IN_FMT: 3143 if (!fmt) 3144 return -EINVAL; 3145 3146 res->nr_input_pins = tkn_elem->value; 3147 break; 3148 3149 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3150 if (!fmt) 3151 return -EINVAL; 3152 3153 res->nr_output_pins = tkn_elem->value; 3154 break; 3155 3156 case SKL_TKN_U32_FMT_CH: 3157 case SKL_TKN_U32_FMT_FREQ: 3158 case SKL_TKN_U32_FMT_BIT_DEPTH: 3159 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3160 case SKL_TKN_U32_FMT_CH_CONFIG: 3161 case SKL_TKN_U32_FMT_INTERLEAVE: 3162 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3163 case SKL_TKN_U32_FMT_CH_MAP: 3164 case SKL_TKN_MM_U32_INTF_PIN_ID: 3165 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3166 dir, pin_idx); 3167 if (ret < 0) 3168 return ret; 3169 break; 3170 3171 default: 3172 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3173 return -EINVAL; 3174 } 3175 tkn_count++; 3176 3177 return tkn_count; 3178 } 3179 3180 static int skl_tplg_get_manifest_uuid(struct device *dev, 3181 struct skl *skl, 3182 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 3183 { 3184 static int ref_count; 3185 struct skl_module *mod; 3186 3187 if (uuid_tkn->token == SKL_TKN_UUID) { 3188 mod = skl->modules[ref_count]; 3189 memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid)); 3190 ref_count++; 3191 } else { 3192 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 3193 return -EINVAL; 3194 } 3195 3196 return 0; 3197 } 3198 3199 /* 3200 * Fill the manifest structure by parsing the tokens based on the 3201 * type. 3202 */ 3203 static int skl_tplg_get_manifest_tkn(struct device *dev, 3204 char *pvt_data, struct skl *skl, 3205 int block_size) 3206 { 3207 int tkn_count = 0, ret; 3208 int off = 0, tuple_size = 0; 3209 struct snd_soc_tplg_vendor_array *array; 3210 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3211 3212 if (block_size <= 0) 3213 return -EINVAL; 3214 3215 while (tuple_size < block_size) { 3216 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3217 off += array->size; 3218 switch (array->type) { 3219 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3220 ret = skl_tplg_get_str_tkn(dev, array, skl); 3221 3222 if (ret < 0) 3223 return ret; 3224 tkn_count = ret; 3225 3226 tuple_size += tkn_count * 3227 sizeof(struct snd_soc_tplg_vendor_string_elem); 3228 continue; 3229 3230 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3231 ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid); 3232 if (ret < 0) 3233 return ret; 3234 3235 tuple_size += sizeof(*array->uuid); 3236 continue; 3237 3238 default: 3239 tkn_elem = array->value; 3240 tkn_count = 0; 3241 break; 3242 } 3243 3244 while (tkn_count <= array->num_elems - 1) { 3245 ret = skl_tplg_get_int_tkn(dev, 3246 tkn_elem, skl); 3247 if (ret < 0) 3248 return ret; 3249 3250 tkn_count = tkn_count + ret; 3251 tkn_elem++; 3252 } 3253 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3254 tkn_count = 0; 3255 } 3256 3257 return off; 3258 } 3259 3260 /* 3261 * Parse manifest private data for tokens. The private data block is 3262 * preceded by descriptors for type and size of data block. 3263 */ 3264 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3265 struct device *dev, struct skl *skl) 3266 { 3267 struct snd_soc_tplg_vendor_array *array; 3268 int num_blocks, block_size = 0, block_type, off = 0; 3269 char *data; 3270 int ret; 3271 3272 /* Read the NUM_DATA_BLOCKS descriptor */ 3273 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3274 ret = skl_tplg_get_desc_blocks(dev, array); 3275 if (ret < 0) 3276 return ret; 3277 num_blocks = ret; 3278 3279 off += array->size; 3280 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3281 while (num_blocks > 0) { 3282 array = (struct snd_soc_tplg_vendor_array *) 3283 (manifest->priv.data + off); 3284 ret = skl_tplg_get_desc_blocks(dev, array); 3285 3286 if (ret < 0) 3287 return ret; 3288 block_type = ret; 3289 off += array->size; 3290 3291 array = (struct snd_soc_tplg_vendor_array *) 3292 (manifest->priv.data + off); 3293 3294 ret = skl_tplg_get_desc_blocks(dev, array); 3295 3296 if (ret < 0) 3297 return ret; 3298 block_size = ret; 3299 off += array->size; 3300 3301 array = (struct snd_soc_tplg_vendor_array *) 3302 (manifest->priv.data + off); 3303 3304 data = (manifest->priv.data + off); 3305 3306 if (block_type == SKL_TYPE_TUPLE) { 3307 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3308 block_size); 3309 3310 if (ret < 0) 3311 return ret; 3312 3313 --num_blocks; 3314 } else { 3315 return -EINVAL; 3316 } 3317 off += ret; 3318 } 3319 3320 return 0; 3321 } 3322 3323 static int skl_manifest_load(struct snd_soc_component *cmpnt, 3324 struct snd_soc_tplg_manifest *manifest) 3325 { 3326 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 3327 struct hdac_bus *bus = ebus_to_hbus(ebus); 3328 struct skl *skl = ebus_to_skl(ebus); 3329 3330 /* proceed only if we have private data defined */ 3331 if (manifest->priv.size == 0) 3332 return 0; 3333 3334 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3335 3336 if (skl->skl_sst->lib_count > SKL_MAX_LIB) { 3337 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3338 skl->skl_sst->lib_count); 3339 return -EINVAL; 3340 } 3341 3342 return 0; 3343 } 3344 3345 static struct snd_soc_tplg_ops skl_tplg_ops = { 3346 .widget_load = skl_tplg_widget_load, 3347 .control_load = skl_tplg_control_load, 3348 .bytes_ext_ops = skl_tlv_ops, 3349 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3350 .io_ops = skl_tplg_kcontrol_ops, 3351 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3352 .manifest = skl_manifest_load, 3353 .dai_load = skl_dai_load, 3354 }; 3355 3356 /* 3357 * A pipe can have multiple modules, each of them will be a DAPM widget as 3358 * well. While managing a pipeline we need to get the list of all the 3359 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3360 * helps to get the SKL type widgets in that pipeline 3361 */ 3362 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform) 3363 { 3364 struct snd_soc_dapm_widget *w; 3365 struct skl_module_cfg *mcfg = NULL; 3366 struct skl_pipe_module *p_module = NULL; 3367 struct skl_pipe *pipe; 3368 3369 list_for_each_entry(w, &platform->component.card->widgets, list) { 3370 if (is_skl_dsp_widget_type(w) && w->priv != NULL) { 3371 mcfg = w->priv; 3372 pipe = mcfg->pipe; 3373 3374 p_module = devm_kzalloc(platform->dev, 3375 sizeof(*p_module), GFP_KERNEL); 3376 if (!p_module) 3377 return -ENOMEM; 3378 3379 p_module->w = w; 3380 list_add_tail(&p_module->node, &pipe->w_list); 3381 } 3382 } 3383 3384 return 0; 3385 } 3386 3387 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 3388 { 3389 struct skl_pipe_module *w_module; 3390 struct snd_soc_dapm_widget *w; 3391 struct skl_module_cfg *mconfig; 3392 bool host_found = false, link_found = false; 3393 3394 list_for_each_entry(w_module, &pipe->w_list, node) { 3395 w = w_module->w; 3396 mconfig = w->priv; 3397 3398 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3399 host_found = true; 3400 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3401 link_found = true; 3402 } 3403 3404 if (host_found && link_found) 3405 pipe->passthru = true; 3406 else 3407 pipe->passthru = false; 3408 } 3409 3410 /* This will be read from topology manifest, currently defined here */ 3411 #define SKL_MAX_MCPS 30000000 3412 #define SKL_FW_MAX_MEM 1000000 3413 3414 /* 3415 * SKL topology init routine 3416 */ 3417 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus) 3418 { 3419 int ret; 3420 const struct firmware *fw; 3421 struct hdac_bus *bus = ebus_to_hbus(ebus); 3422 struct skl *skl = ebus_to_skl(ebus); 3423 struct skl_pipeline *ppl; 3424 3425 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3426 if (ret < 0) { 3427 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3428 skl->tplg_name, ret); 3429 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3430 if (ret < 0) { 3431 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3432 "dfw_sst.bin", ret); 3433 return ret; 3434 } 3435 } 3436 3437 /* 3438 * The complete tplg for SKL is loaded as index 0, we don't use 3439 * any other index 3440 */ 3441 ret = snd_soc_tplg_component_load(&platform->component, 3442 &skl_tplg_ops, fw, 0); 3443 if (ret < 0) { 3444 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3445 release_firmware(fw); 3446 return -EINVAL; 3447 } 3448 3449 skl->resource.max_mcps = SKL_MAX_MCPS; 3450 skl->resource.max_mem = SKL_FW_MAX_MEM; 3451 3452 skl->tplg = fw; 3453 ret = skl_tplg_create_pipe_widget_list(platform); 3454 if (ret < 0) 3455 return ret; 3456 3457 list_for_each_entry(ppl, &skl->ppl_list, node) 3458 skl_tplg_set_pipe_type(skl, ppl->pipe); 3459 3460 return 0; 3461 } 3462