1 /* 2 * skl-topology.c - Implements Platform component ALSA controls/widget 3 * handlers. 4 * 5 * Copyright (C) 2014-2015 Intel Corp 6 * Author: Jeeja KP <jeeja.kp@intel.com> 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/firmware.h> 22 #include <linux/uuid.h> 23 #include <sound/soc.h> 24 #include <sound/soc-topology.h> 25 #include <uapi/sound/snd_sst_tokens.h> 26 #include <uapi/sound/skl-tplg-interface.h> 27 #include "skl-sst-dsp.h" 28 #include "skl-sst-ipc.h" 29 #include "skl-topology.h" 30 #include "skl.h" 31 #include "../common/sst-dsp.h" 32 #include "../common/sst-dsp-priv.h" 33 34 #define SKL_CH_FIXUP_MASK (1 << 0) 35 #define SKL_RATE_FIXUP_MASK (1 << 1) 36 #define SKL_FMT_FIXUP_MASK (1 << 2) 37 #define SKL_IN_DIR_BIT_MASK BIT(0) 38 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 39 40 static const int mic_mono_list[] = { 41 0, 1, 2, 3, 42 }; 43 static const int mic_stereo_list[][SKL_CH_STEREO] = { 44 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 45 }; 46 static const int mic_trio_list[][SKL_CH_TRIO] = { 47 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 48 }; 49 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 50 {0, 1, 2, 3}, 51 }; 52 53 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 54 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 55 56 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps) 57 { 58 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 59 60 switch (caps) { 61 case SKL_D0I3_NONE: 62 d0i3->non_d0i3++; 63 break; 64 65 case SKL_D0I3_STREAMING: 66 d0i3->streaming++; 67 break; 68 69 case SKL_D0I3_NON_STREAMING: 70 d0i3->non_streaming++; 71 break; 72 } 73 } 74 75 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps) 76 { 77 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 78 79 switch (caps) { 80 case SKL_D0I3_NONE: 81 d0i3->non_d0i3--; 82 break; 83 84 case SKL_D0I3_STREAMING: 85 d0i3->streaming--; 86 break; 87 88 case SKL_D0I3_NON_STREAMING: 89 d0i3->non_streaming--; 90 break; 91 } 92 } 93 94 /* 95 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 96 * ignore. This helpers checks if the SKL driver handles this widget type 97 */ 98 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 99 struct device *dev) 100 { 101 if (w->dapm->dev != dev) 102 return false; 103 104 switch (w->id) { 105 case snd_soc_dapm_dai_link: 106 case snd_soc_dapm_dai_in: 107 case snd_soc_dapm_aif_in: 108 case snd_soc_dapm_aif_out: 109 case snd_soc_dapm_dai_out: 110 case snd_soc_dapm_switch: 111 return false; 112 default: 113 return true; 114 } 115 } 116 117 /* 118 * Each pipelines needs memory to be allocated. Check if we have free memory 119 * from available pool. 120 */ 121 static bool skl_is_pipe_mem_avail(struct skl *skl, 122 struct skl_module_cfg *mconfig) 123 { 124 struct skl_sst *ctx = skl->skl_sst; 125 126 if (skl->resource.mem + mconfig->pipe->memory_pages > 127 skl->resource.max_mem) { 128 dev_err(ctx->dev, 129 "%s: module_id %d instance %d\n", __func__, 130 mconfig->id.module_id, 131 mconfig->id.instance_id); 132 dev_err(ctx->dev, 133 "exceeds ppl memory available %d mem %d\n", 134 skl->resource.max_mem, skl->resource.mem); 135 return false; 136 } else { 137 return true; 138 } 139 } 140 141 /* 142 * Add the mem to the mem pool. This is freed when pipe is deleted. 143 * Note: DSP does actual memory management we only keep track for complete 144 * pool 145 */ 146 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 147 struct skl_module_cfg *mconfig) 148 { 149 skl->resource.mem += mconfig->pipe->memory_pages; 150 } 151 152 /* 153 * Pipeline needs needs DSP CPU resources for computation, this is 154 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 155 * 156 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 157 * pipe. 158 */ 159 160 static bool skl_is_pipe_mcps_avail(struct skl *skl, 161 struct skl_module_cfg *mconfig) 162 { 163 struct skl_sst *ctx = skl->skl_sst; 164 u8 res_idx = mconfig->res_idx; 165 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 166 167 if (skl->resource.mcps + res->cps > skl->resource.max_mcps) { 168 dev_err(ctx->dev, 169 "%s: module_id %d instance %d\n", __func__, 170 mconfig->id.module_id, mconfig->id.instance_id); 171 dev_err(ctx->dev, 172 "exceeds ppl mcps available %d > mem %d\n", 173 skl->resource.max_mcps, skl->resource.mcps); 174 return false; 175 } else { 176 return true; 177 } 178 } 179 180 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 181 struct skl_module_cfg *mconfig) 182 { 183 u8 res_idx = mconfig->res_idx; 184 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 185 186 skl->resource.mcps += res->cps; 187 } 188 189 /* 190 * Free the mcps when tearing down 191 */ 192 static void 193 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 194 { 195 u8 res_idx = mconfig->res_idx; 196 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 197 198 skl->resource.mcps -= res->cps; 199 } 200 201 /* 202 * Free the memory when tearing down 203 */ 204 static void 205 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 206 { 207 skl->resource.mem -= mconfig->pipe->memory_pages; 208 } 209 210 211 static void skl_dump_mconfig(struct skl_sst *ctx, 212 struct skl_module_cfg *mcfg) 213 { 214 struct skl_module_iface *iface = &mcfg->module->formats[0]; 215 216 dev_dbg(ctx->dev, "Dumping config\n"); 217 dev_dbg(ctx->dev, "Input Format:\n"); 218 dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 219 dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 220 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 221 dev_dbg(ctx->dev, "valid bit depth = %d\n", 222 iface->inputs[0].fmt.valid_bit_depth); 223 dev_dbg(ctx->dev, "Output Format:\n"); 224 dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 225 dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 226 dev_dbg(ctx->dev, "valid bit depth = %d\n", 227 iface->outputs[0].fmt.valid_bit_depth); 228 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 229 } 230 231 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 232 { 233 int slot_map = 0xFFFFFFFF; 234 int start_slot = 0; 235 int i; 236 237 for (i = 0; i < chs; i++) { 238 /* 239 * For 2 channels with starting slot as 0, slot map will 240 * look like 0xFFFFFF10. 241 */ 242 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 243 start_slot++; 244 } 245 fmt->ch_map = slot_map; 246 } 247 248 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 249 struct skl_pipe_params *params, int fixup) 250 { 251 if (fixup & SKL_RATE_FIXUP_MASK) 252 fmt->s_freq = params->s_freq; 253 if (fixup & SKL_CH_FIXUP_MASK) { 254 fmt->channels = params->ch; 255 skl_tplg_update_chmap(fmt, fmt->channels); 256 } 257 if (fixup & SKL_FMT_FIXUP_MASK) { 258 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 259 260 /* 261 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 262 * container so update bit depth accordingly 263 */ 264 switch (fmt->valid_bit_depth) { 265 case SKL_DEPTH_16BIT: 266 fmt->bit_depth = fmt->valid_bit_depth; 267 break; 268 269 default: 270 fmt->bit_depth = SKL_DEPTH_32BIT; 271 break; 272 } 273 } 274 275 } 276 277 /* 278 * A pipeline may have modules which impact the pcm parameters, like SRC, 279 * channel converter, format converter. 280 * We need to calculate the output params by applying the 'fixup' 281 * Topology will tell driver which type of fixup is to be applied by 282 * supplying the fixup mask, so based on that we calculate the output 283 * 284 * Now In FE the pcm hw_params is source/target format. Same is applicable 285 * for BE with its hw_params invoked. 286 * here based on FE, BE pipeline and direction we calculate the input and 287 * outfix and then apply that for a module 288 */ 289 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 290 struct skl_pipe_params *params, bool is_fe) 291 { 292 int in_fixup, out_fixup; 293 struct skl_module_fmt *in_fmt, *out_fmt; 294 295 /* Fixups will be applied to pin 0 only */ 296 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 297 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 298 299 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 300 if (is_fe) { 301 in_fixup = m_cfg->params_fixup; 302 out_fixup = (~m_cfg->converter) & 303 m_cfg->params_fixup; 304 } else { 305 out_fixup = m_cfg->params_fixup; 306 in_fixup = (~m_cfg->converter) & 307 m_cfg->params_fixup; 308 } 309 } else { 310 if (is_fe) { 311 out_fixup = m_cfg->params_fixup; 312 in_fixup = (~m_cfg->converter) & 313 m_cfg->params_fixup; 314 } else { 315 in_fixup = m_cfg->params_fixup; 316 out_fixup = (~m_cfg->converter) & 317 m_cfg->params_fixup; 318 } 319 } 320 321 skl_tplg_update_params(in_fmt, params, in_fixup); 322 skl_tplg_update_params(out_fmt, params, out_fixup); 323 } 324 325 /* 326 * A module needs input and output buffers, which are dependent upon pcm 327 * params, so once we have calculate params, we need buffer calculation as 328 * well. 329 */ 330 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 331 struct skl_module_cfg *mcfg) 332 { 333 int multiplier = 1; 334 struct skl_module_fmt *in_fmt, *out_fmt; 335 struct skl_module_res *res; 336 337 /* Since fixups is applied to pin 0 only, ibs, obs needs 338 * change for pin 0 only 339 */ 340 res = &mcfg->module->resources[0]; 341 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 342 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 343 344 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 345 multiplier = 5; 346 347 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 348 in_fmt->channels * (in_fmt->bit_depth >> 3) * 349 multiplier; 350 351 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 352 out_fmt->channels * (out_fmt->bit_depth >> 3) * 353 multiplier; 354 } 355 356 static u8 skl_tplg_be_dev_type(int dev_type) 357 { 358 int ret; 359 360 switch (dev_type) { 361 case SKL_DEVICE_BT: 362 ret = NHLT_DEVICE_BT; 363 break; 364 365 case SKL_DEVICE_DMIC: 366 ret = NHLT_DEVICE_DMIC; 367 break; 368 369 case SKL_DEVICE_I2S: 370 ret = NHLT_DEVICE_I2S; 371 break; 372 373 default: 374 ret = NHLT_DEVICE_INVALID; 375 break; 376 } 377 378 return ret; 379 } 380 381 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 382 struct skl_sst *ctx) 383 { 384 struct skl_module_cfg *m_cfg = w->priv; 385 int link_type, dir; 386 u32 ch, s_freq, s_fmt; 387 struct nhlt_specific_cfg *cfg; 388 struct skl *skl = get_skl_ctx(ctx->dev); 389 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 390 int fmt_idx = m_cfg->fmt_idx; 391 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 392 393 /* check if we already have blob */ 394 if (m_cfg->formats_config.caps_size > 0) 395 return 0; 396 397 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 398 switch (m_cfg->dev_type) { 399 case SKL_DEVICE_DMIC: 400 link_type = NHLT_LINK_DMIC; 401 dir = SNDRV_PCM_STREAM_CAPTURE; 402 s_freq = m_iface->inputs[0].fmt.s_freq; 403 s_fmt = m_iface->inputs[0].fmt.bit_depth; 404 ch = m_iface->inputs[0].fmt.channels; 405 break; 406 407 case SKL_DEVICE_I2S: 408 link_type = NHLT_LINK_SSP; 409 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 410 dir = SNDRV_PCM_STREAM_PLAYBACK; 411 s_freq = m_iface->outputs[0].fmt.s_freq; 412 s_fmt = m_iface->outputs[0].fmt.bit_depth; 413 ch = m_iface->outputs[0].fmt.channels; 414 } else { 415 dir = SNDRV_PCM_STREAM_CAPTURE; 416 s_freq = m_iface->inputs[0].fmt.s_freq; 417 s_fmt = m_iface->inputs[0].fmt.bit_depth; 418 ch = m_iface->inputs[0].fmt.channels; 419 } 420 break; 421 422 default: 423 return -EINVAL; 424 } 425 426 /* update the blob based on virtual bus_id and default params */ 427 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 428 s_fmt, ch, s_freq, dir, dev_type); 429 if (cfg) { 430 m_cfg->formats_config.caps_size = cfg->size; 431 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 432 } else { 433 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 434 m_cfg->vbus_id, link_type, dir); 435 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 436 ch, s_freq, s_fmt); 437 return -EIO; 438 } 439 440 return 0; 441 } 442 443 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 444 struct skl_sst *ctx) 445 { 446 struct skl_module_cfg *m_cfg = w->priv; 447 struct skl_pipe_params *params = m_cfg->pipe->p_params; 448 int p_conn_type = m_cfg->pipe->conn_type; 449 bool is_fe; 450 451 if (!m_cfg->params_fixup) 452 return; 453 454 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 455 w->name); 456 457 skl_dump_mconfig(ctx, m_cfg); 458 459 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 460 is_fe = true; 461 else 462 is_fe = false; 463 464 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 465 skl_tplg_update_buffer_size(ctx, m_cfg); 466 467 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 468 w->name); 469 470 skl_dump_mconfig(ctx, m_cfg); 471 } 472 473 /* 474 * some modules can have multiple params set from user control and 475 * need to be set after module is initialized. If set_param flag is 476 * set module params will be done after module is initialised. 477 */ 478 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 479 struct skl_sst *ctx) 480 { 481 int i, ret; 482 struct skl_module_cfg *mconfig = w->priv; 483 const struct snd_kcontrol_new *k; 484 struct soc_bytes_ext *sb; 485 struct skl_algo_data *bc; 486 struct skl_specific_cfg *sp_cfg; 487 488 if (mconfig->formats_config.caps_size > 0 && 489 mconfig->formats_config.set_params == SKL_PARAM_SET) { 490 sp_cfg = &mconfig->formats_config; 491 ret = skl_set_module_params(ctx, sp_cfg->caps, 492 sp_cfg->caps_size, 493 sp_cfg->param_id, mconfig); 494 if (ret < 0) 495 return ret; 496 } 497 498 for (i = 0; i < w->num_kcontrols; i++) { 499 k = &w->kcontrol_news[i]; 500 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 501 sb = (void *) k->private_value; 502 bc = (struct skl_algo_data *)sb->dobj.private; 503 504 if (bc->set_params == SKL_PARAM_SET) { 505 ret = skl_set_module_params(ctx, 506 (u32 *)bc->params, bc->size, 507 bc->param_id, mconfig); 508 if (ret < 0) 509 return ret; 510 } 511 } 512 } 513 514 return 0; 515 } 516 517 /* 518 * some module param can set from user control and this is required as 519 * when module is initailzed. if module param is required in init it is 520 * identifed by set_param flag. if set_param flag is not set, then this 521 * parameter needs to set as part of module init. 522 */ 523 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 524 { 525 const struct snd_kcontrol_new *k; 526 struct soc_bytes_ext *sb; 527 struct skl_algo_data *bc; 528 struct skl_module_cfg *mconfig = w->priv; 529 int i; 530 531 for (i = 0; i < w->num_kcontrols; i++) { 532 k = &w->kcontrol_news[i]; 533 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 534 sb = (struct soc_bytes_ext *)k->private_value; 535 bc = (struct skl_algo_data *)sb->dobj.private; 536 537 if (bc->set_params != SKL_PARAM_INIT) 538 continue; 539 540 mconfig->formats_config.caps = (u32 *)bc->params; 541 mconfig->formats_config.caps_size = bc->size; 542 543 break; 544 } 545 } 546 547 return 0; 548 } 549 550 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe, 551 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 552 { 553 switch (mcfg->dev_type) { 554 case SKL_DEVICE_HDAHOST: 555 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params); 556 557 case SKL_DEVICE_HDALINK: 558 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params); 559 } 560 561 return 0; 562 } 563 564 /* 565 * Inside a pipe instance, we can have various modules. These modules need 566 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 567 * skl_init_module() routine, so invoke that for all modules in a pipeline 568 */ 569 static int 570 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 571 { 572 struct skl_pipe_module *w_module; 573 struct snd_soc_dapm_widget *w; 574 struct skl_module_cfg *mconfig; 575 struct skl_sst *ctx = skl->skl_sst; 576 u8 cfg_idx; 577 int ret = 0; 578 579 list_for_each_entry(w_module, &pipe->w_list, node) { 580 uuid_le *uuid_mod; 581 w = w_module->w; 582 mconfig = w->priv; 583 584 /* check if module ids are populated */ 585 if (mconfig->id.module_id < 0) { 586 dev_err(skl->skl_sst->dev, 587 "module %pUL id not populated\n", 588 (uuid_le *)mconfig->guid); 589 return -EIO; 590 } 591 592 cfg_idx = mconfig->pipe->cur_config_idx; 593 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 594 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 595 596 /* check resource available */ 597 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 598 return -ENOMEM; 599 600 if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) { 601 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 602 mconfig->id.module_id, mconfig->guid); 603 if (ret < 0) 604 return ret; 605 606 mconfig->m_state = SKL_MODULE_LOADED; 607 } 608 609 /* prepare the DMA if the module is gateway cpr */ 610 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig); 611 if (ret < 0) 612 return ret; 613 614 /* update blob if blob is null for be with default value */ 615 skl_tplg_update_be_blob(w, ctx); 616 617 /* 618 * apply fix/conversion to module params based on 619 * FE/BE params 620 */ 621 skl_tplg_update_module_params(w, ctx); 622 uuid_mod = (uuid_le *)mconfig->guid; 623 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod, 624 mconfig->id.instance_id); 625 if (mconfig->id.pvt_id < 0) 626 return ret; 627 skl_tplg_set_module_init_data(w); 628 629 ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id); 630 if (ret < 0) { 631 dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n", 632 mconfig->core_id, ret); 633 return ret; 634 } 635 636 ret = skl_init_module(ctx, mconfig); 637 if (ret < 0) { 638 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 639 goto err; 640 } 641 skl_tplg_alloc_pipe_mcps(skl, mconfig); 642 ret = skl_tplg_set_module_params(w, ctx); 643 if (ret < 0) 644 goto err; 645 } 646 647 return 0; 648 err: 649 skl_dsp_put_core(ctx->dsp, mconfig->core_id); 650 return ret; 651 } 652 653 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 654 struct skl_pipe *pipe) 655 { 656 int ret = 0; 657 struct skl_pipe_module *w_module = NULL; 658 struct skl_module_cfg *mconfig = NULL; 659 660 list_for_each_entry(w_module, &pipe->w_list, node) { 661 uuid_le *uuid_mod; 662 mconfig = w_module->w->priv; 663 uuid_mod = (uuid_le *)mconfig->guid; 664 665 if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod && 666 mconfig->m_state > SKL_MODULE_UNINIT) { 667 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 668 mconfig->id.module_id); 669 if (ret < 0) 670 return -EIO; 671 } 672 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 673 674 ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id); 675 if (ret < 0) { 676 /* don't return; continue with other modules */ 677 dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n", 678 mconfig->core_id, ret); 679 } 680 } 681 682 /* no modules to unload in this path, so return */ 683 return ret; 684 } 685 686 /* 687 * Here, we select pipe format based on the pipe type and pipe 688 * direction to determine the current config index for the pipeline. 689 * The config index is then used to select proper module resources. 690 * Intermediate pipes currently have a fixed format hence we select the 691 * 0th configuratation by default for such pipes. 692 */ 693 static int 694 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig) 695 { 696 struct skl_sst *ctx = skl->skl_sst; 697 struct skl_pipe *pipe = mconfig->pipe; 698 struct skl_pipe_params *params = pipe->p_params; 699 struct skl_path_config *pconfig = &pipe->configs[0]; 700 struct skl_pipe_fmt *fmt = NULL; 701 bool in_fmt = false; 702 int i; 703 704 if (pipe->nr_cfgs == 0) { 705 pipe->cur_config_idx = 0; 706 return 0; 707 } 708 709 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 710 dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n"); 711 pipe->cur_config_idx = 0; 712 pipe->memory_pages = pconfig->mem_pages; 713 714 return 0; 715 } 716 717 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 718 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 719 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 720 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 721 in_fmt = true; 722 723 for (i = 0; i < pipe->nr_cfgs; i++) { 724 pconfig = &pipe->configs[i]; 725 if (in_fmt) 726 fmt = &pconfig->in_fmt; 727 else 728 fmt = &pconfig->out_fmt; 729 730 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 731 fmt->channels, fmt->freq, fmt->bps)) { 732 pipe->cur_config_idx = i; 733 pipe->memory_pages = pconfig->mem_pages; 734 dev_dbg(ctx->dev, "Using pipe config: %d\n", i); 735 736 return 0; 737 } 738 } 739 740 dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 741 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 742 return -EINVAL; 743 } 744 745 /* 746 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 747 * need create the pipeline. So we do following: 748 * - check the resources 749 * - Create the pipeline 750 * - Initialize the modules in pipeline 751 * - finally bind all modules together 752 */ 753 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 754 struct skl *skl) 755 { 756 int ret; 757 struct skl_module_cfg *mconfig = w->priv; 758 struct skl_pipe_module *w_module; 759 struct skl_pipe *s_pipe = mconfig->pipe; 760 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 761 struct skl_sst *ctx = skl->skl_sst; 762 struct skl_module_deferred_bind *modules; 763 764 ret = skl_tplg_get_pipe_config(skl, mconfig); 765 if (ret < 0) 766 return ret; 767 768 /* check resource available */ 769 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 770 return -EBUSY; 771 772 if (!skl_is_pipe_mem_avail(skl, mconfig)) 773 return -ENOMEM; 774 775 /* 776 * Create a list of modules for pipe. 777 * This list contains modules from source to sink 778 */ 779 ret = skl_create_pipeline(ctx, mconfig->pipe); 780 if (ret < 0) 781 return ret; 782 783 skl_tplg_alloc_pipe_mem(skl, mconfig); 784 skl_tplg_alloc_pipe_mcps(skl, mconfig); 785 786 /* Init all pipe modules from source to sink */ 787 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 788 if (ret < 0) 789 return ret; 790 791 /* Bind modules from source to sink */ 792 list_for_each_entry(w_module, &s_pipe->w_list, node) { 793 dst_module = w_module->w->priv; 794 795 if (src_module == NULL) { 796 src_module = dst_module; 797 continue; 798 } 799 800 ret = skl_bind_modules(ctx, src_module, dst_module); 801 if (ret < 0) 802 return ret; 803 804 src_module = dst_module; 805 } 806 807 /* 808 * When the destination module is initialized, check for these modules 809 * in deferred bind list. If found, bind them. 810 */ 811 list_for_each_entry(w_module, &s_pipe->w_list, node) { 812 if (list_empty(&skl->bind_list)) 813 break; 814 815 list_for_each_entry(modules, &skl->bind_list, node) { 816 module = w_module->w->priv; 817 if (modules->dst == module) 818 skl_bind_modules(ctx, modules->src, 819 modules->dst); 820 } 821 } 822 823 return 0; 824 } 825 826 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params, 827 int size, struct skl_module_cfg *mcfg) 828 { 829 int i, pvt_id; 830 831 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 832 struct skl_kpb_params *kpb_params = 833 (struct skl_kpb_params *)params; 834 struct skl_mod_inst_map *inst = kpb_params->u.map; 835 836 for (i = 0; i < kpb_params->num_modules; i++) { 837 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id, 838 inst->inst_id); 839 if (pvt_id < 0) 840 return -EINVAL; 841 842 inst->inst_id = pvt_id; 843 inst++; 844 } 845 } 846 847 return 0; 848 } 849 /* 850 * Some modules require params to be set after the module is bound to 851 * all pins connected. 852 * 853 * The module provider initializes set_param flag for such modules and we 854 * send params after binding 855 */ 856 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 857 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 858 { 859 int i, ret; 860 struct skl_module_cfg *mconfig = w->priv; 861 const struct snd_kcontrol_new *k; 862 struct soc_bytes_ext *sb; 863 struct skl_algo_data *bc; 864 struct skl_specific_cfg *sp_cfg; 865 u32 *params; 866 867 /* 868 * check all out/in pins are in bind state. 869 * if so set the module param 870 */ 871 for (i = 0; i < mcfg->module->max_output_pins; i++) { 872 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 873 return 0; 874 } 875 876 for (i = 0; i < mcfg->module->max_input_pins; i++) { 877 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 878 return 0; 879 } 880 881 if (mconfig->formats_config.caps_size > 0 && 882 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 883 sp_cfg = &mconfig->formats_config; 884 ret = skl_set_module_params(ctx, sp_cfg->caps, 885 sp_cfg->caps_size, 886 sp_cfg->param_id, mconfig); 887 if (ret < 0) 888 return ret; 889 } 890 891 for (i = 0; i < w->num_kcontrols; i++) { 892 k = &w->kcontrol_news[i]; 893 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 894 sb = (void *) k->private_value; 895 bc = (struct skl_algo_data *)sb->dobj.private; 896 897 if (bc->set_params == SKL_PARAM_BIND) { 898 params = kzalloc(bc->max, GFP_KERNEL); 899 if (!params) 900 return -ENOMEM; 901 902 memcpy(params, bc->params, bc->max); 903 skl_fill_sink_instance_id(ctx, params, bc->max, 904 mconfig); 905 906 ret = skl_set_module_params(ctx, params, 907 bc->max, bc->param_id, mconfig); 908 kfree(params); 909 910 if (ret < 0) 911 return ret; 912 } 913 } 914 } 915 916 return 0; 917 } 918 919 static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid) 920 { 921 struct uuid_module *module; 922 923 list_for_each_entry(module, &ctx->uuid_list, list) { 924 if (uuid_le_cmp(*uuid, module->uuid) == 0) 925 return module->id; 926 } 927 928 return -EINVAL; 929 } 930 931 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl, 932 const struct snd_kcontrol_new *k) 933 { 934 struct soc_bytes_ext *sb = (void *) k->private_value; 935 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 936 struct skl_kpb_params *uuid_params, *params; 937 struct hdac_bus *bus = ebus_to_hbus(skl_to_ebus(skl)); 938 int i, size, module_id; 939 940 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 941 uuid_params = (struct skl_kpb_params *)bc->params; 942 size = uuid_params->num_modules * 943 sizeof(struct skl_mod_inst_map) + 944 sizeof(uuid_params->num_modules); 945 946 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 947 if (!params) 948 return -ENOMEM; 949 950 params->num_modules = uuid_params->num_modules; 951 952 for (i = 0; i < uuid_params->num_modules; i++) { 953 module_id = skl_get_module_id(skl->skl_sst, 954 &uuid_params->u.map_uuid[i].mod_uuid); 955 if (module_id < 0) { 956 devm_kfree(bus->dev, params); 957 return -EINVAL; 958 } 959 960 params->u.map[i].mod_id = module_id; 961 params->u.map[i].inst_id = 962 uuid_params->u.map_uuid[i].inst_id; 963 } 964 965 devm_kfree(bus->dev, bc->params); 966 bc->params = (char *)params; 967 bc->max = size; 968 } 969 970 return 0; 971 } 972 973 /* 974 * Retrieve the module id from UUID mentioned in the 975 * post bind params 976 */ 977 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl, 978 struct snd_soc_dapm_widget *w) 979 { 980 struct skl_module_cfg *mconfig = w->priv; 981 int i; 982 983 /* 984 * Post bind params are used for only for KPB 985 * to set copier instances to drain the data 986 * in fast mode 987 */ 988 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 989 return; 990 991 for (i = 0; i < w->num_kcontrols; i++) 992 if ((w->kcontrol_news[i].access & 993 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 994 (skl_tplg_find_moduleid_from_uuid(skl, 995 &w->kcontrol_news[i]) < 0)) 996 dev_err(skl->skl_sst->dev, 997 "%s: invalid kpb post bind params\n", 998 __func__); 999 } 1000 1001 static int skl_tplg_module_add_deferred_bind(struct skl *skl, 1002 struct skl_module_cfg *src, struct skl_module_cfg *dst) 1003 { 1004 struct skl_module_deferred_bind *m_list, *modules; 1005 int i; 1006 1007 /* only supported for module with static pin connection */ 1008 for (i = 0; i < dst->module->max_input_pins; i++) { 1009 struct skl_module_pin *pin = &dst->m_in_pin[i]; 1010 1011 if (pin->is_dynamic) 1012 continue; 1013 1014 if ((pin->id.module_id == src->id.module_id) && 1015 (pin->id.instance_id == src->id.instance_id)) { 1016 1017 if (!list_empty(&skl->bind_list)) { 1018 list_for_each_entry(modules, &skl->bind_list, node) { 1019 if (modules->src == src && modules->dst == dst) 1020 return 0; 1021 } 1022 } 1023 1024 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 1025 if (!m_list) 1026 return -ENOMEM; 1027 1028 m_list->src = src; 1029 m_list->dst = dst; 1030 1031 list_add(&m_list->node, &skl->bind_list); 1032 } 1033 } 1034 1035 return 0; 1036 } 1037 1038 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 1039 struct skl *skl, 1040 struct snd_soc_dapm_widget *src_w, 1041 struct skl_module_cfg *src_mconfig) 1042 { 1043 struct snd_soc_dapm_path *p; 1044 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 1045 struct skl_module_cfg *sink_mconfig; 1046 struct skl_sst *ctx = skl->skl_sst; 1047 int ret; 1048 1049 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1050 if (!p->connect) 1051 continue; 1052 1053 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 1054 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 1055 1056 next_sink = p->sink; 1057 1058 if (!is_skl_dsp_widget_type(p->sink, ctx->dev)) 1059 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 1060 1061 /* 1062 * here we will check widgets in sink pipelines, so that 1063 * can be any widgets type and we are only interested if 1064 * they are ones used for SKL so check that first 1065 */ 1066 if ((p->sink->priv != NULL) && 1067 is_skl_dsp_widget_type(p->sink, ctx->dev)) { 1068 1069 sink = p->sink; 1070 sink_mconfig = sink->priv; 1071 1072 /* 1073 * Modules other than PGA leaf can be connected 1074 * directly or via switch to a module in another 1075 * pipeline. EX: reference path 1076 * when the path is enabled, the dst module that needs 1077 * to be bound may not be initialized. if the module is 1078 * not initialized, add these modules in the deferred 1079 * bind list and when the dst module is initialised, 1080 * bind this module to the dst_module in deferred list. 1081 */ 1082 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 1083 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 1084 1085 ret = skl_tplg_module_add_deferred_bind(skl, 1086 src_mconfig, sink_mconfig); 1087 1088 if (ret < 0) 1089 return ret; 1090 1091 } 1092 1093 1094 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 1095 sink_mconfig->m_state == SKL_MODULE_UNINIT) 1096 continue; 1097 1098 /* Bind source to sink, mixin is always source */ 1099 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1100 if (ret) 1101 return ret; 1102 1103 /* set module params after bind */ 1104 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 1105 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1106 1107 /* Start sinks pipe first */ 1108 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 1109 if (sink_mconfig->pipe->conn_type != 1110 SKL_PIPE_CONN_TYPE_FE) 1111 ret = skl_run_pipe(ctx, 1112 sink_mconfig->pipe); 1113 if (ret) 1114 return ret; 1115 } 1116 } 1117 } 1118 1119 if (!sink && next_sink) 1120 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1121 1122 return 0; 1123 } 1124 1125 /* 1126 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1127 * we need to do following: 1128 * - Bind to sink pipeline 1129 * Since the sink pipes can be running and we don't get mixer event on 1130 * connect for already running mixer, we need to find the sink pipes 1131 * here and bind to them. This way dynamic connect works. 1132 * - Start sink pipeline, if not running 1133 * - Then run current pipe 1134 */ 1135 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1136 struct skl *skl) 1137 { 1138 struct skl_module_cfg *src_mconfig; 1139 struct skl_sst *ctx = skl->skl_sst; 1140 int ret = 0; 1141 1142 src_mconfig = w->priv; 1143 1144 /* 1145 * find which sink it is connected to, bind with the sink, 1146 * if sink is not started, start sink pipe first, then start 1147 * this pipe 1148 */ 1149 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1150 if (ret) 1151 return ret; 1152 1153 /* Start source pipe last after starting all sinks */ 1154 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1155 return skl_run_pipe(ctx, src_mconfig->pipe); 1156 1157 return 0; 1158 } 1159 1160 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1161 struct snd_soc_dapm_widget *w, struct skl *skl) 1162 { 1163 struct snd_soc_dapm_path *p; 1164 struct snd_soc_dapm_widget *src_w = NULL; 1165 struct skl_sst *ctx = skl->skl_sst; 1166 1167 snd_soc_dapm_widget_for_each_source_path(w, p) { 1168 src_w = p->source; 1169 if (!p->connect) 1170 continue; 1171 1172 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 1173 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 1174 1175 /* 1176 * here we will check widgets in sink pipelines, so that can 1177 * be any widgets type and we are only interested if they are 1178 * ones used for SKL so check that first 1179 */ 1180 if ((p->source->priv != NULL) && 1181 is_skl_dsp_widget_type(p->source, ctx->dev)) { 1182 return p->source; 1183 } 1184 } 1185 1186 if (src_w != NULL) 1187 return skl_get_src_dsp_widget(src_w, skl); 1188 1189 return NULL; 1190 } 1191 1192 /* 1193 * in the Post-PMU event of mixer we need to do following: 1194 * - Check if this pipe is running 1195 * - if not, then 1196 * - bind this pipeline to its source pipeline 1197 * if source pipe is already running, this means it is a dynamic 1198 * connection and we need to bind only to that pipe 1199 * - start this pipeline 1200 */ 1201 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1202 struct skl *skl) 1203 { 1204 int ret = 0; 1205 struct snd_soc_dapm_widget *source, *sink; 1206 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1207 struct skl_sst *ctx = skl->skl_sst; 1208 int src_pipe_started = 0; 1209 1210 sink = w; 1211 sink_mconfig = sink->priv; 1212 1213 /* 1214 * If source pipe is already started, that means source is driving 1215 * one more sink before this sink got connected, Since source is 1216 * started, bind this sink to source and start this pipe. 1217 */ 1218 source = skl_get_src_dsp_widget(w, skl); 1219 if (source != NULL) { 1220 src_mconfig = source->priv; 1221 sink_mconfig = sink->priv; 1222 src_pipe_started = 1; 1223 1224 /* 1225 * check pipe state, then no need to bind or start the 1226 * pipe 1227 */ 1228 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1229 src_pipe_started = 0; 1230 } 1231 1232 if (src_pipe_started) { 1233 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1234 if (ret) 1235 return ret; 1236 1237 /* set module params after bind */ 1238 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 1239 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1240 1241 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1242 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 1243 } 1244 1245 return ret; 1246 } 1247 1248 /* 1249 * in the Pre-PMD event of mixer we need to do following: 1250 * - Stop the pipe 1251 * - find the source connections and remove that from dapm_path_list 1252 * - unbind with source pipelines if still connected 1253 */ 1254 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1255 struct skl *skl) 1256 { 1257 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1258 int ret = 0, i; 1259 struct skl_sst *ctx = skl->skl_sst; 1260 1261 sink_mconfig = w->priv; 1262 1263 /* Stop the pipe */ 1264 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 1265 if (ret) 1266 return ret; 1267 1268 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1269 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1270 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1271 if (!src_mconfig) 1272 continue; 1273 1274 ret = skl_unbind_modules(ctx, 1275 src_mconfig, sink_mconfig); 1276 } 1277 } 1278 1279 return ret; 1280 } 1281 1282 /* 1283 * in the Post-PMD event of mixer we need to do following: 1284 * - Free the mcps used 1285 * - Free the mem used 1286 * - Unbind the modules within the pipeline 1287 * - Delete the pipeline (modules are not required to be explicitly 1288 * deleted, pipeline delete is enough here 1289 */ 1290 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1291 struct skl *skl) 1292 { 1293 struct skl_module_cfg *mconfig = w->priv; 1294 struct skl_pipe_module *w_module; 1295 struct skl_module_cfg *src_module = NULL, *dst_module; 1296 struct skl_sst *ctx = skl->skl_sst; 1297 struct skl_pipe *s_pipe = mconfig->pipe; 1298 struct skl_module_deferred_bind *modules, *tmp; 1299 1300 if (s_pipe->state == SKL_PIPE_INVALID) 1301 return -EINVAL; 1302 1303 skl_tplg_free_pipe_mcps(skl, mconfig); 1304 skl_tplg_free_pipe_mem(skl, mconfig); 1305 1306 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1307 if (list_empty(&skl->bind_list)) 1308 break; 1309 1310 src_module = w_module->w->priv; 1311 1312 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1313 /* 1314 * When the destination module is deleted, Unbind the 1315 * modules from deferred bind list. 1316 */ 1317 if (modules->dst == src_module) { 1318 skl_unbind_modules(ctx, modules->src, 1319 modules->dst); 1320 } 1321 1322 /* 1323 * When the source module is deleted, remove this entry 1324 * from the deferred bind list. 1325 */ 1326 if (modules->src == src_module) { 1327 list_del(&modules->node); 1328 modules->src = NULL; 1329 modules->dst = NULL; 1330 kfree(modules); 1331 } 1332 } 1333 } 1334 1335 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1336 dst_module = w_module->w->priv; 1337 1338 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1339 skl_tplg_free_pipe_mcps(skl, dst_module); 1340 if (src_module == NULL) { 1341 src_module = dst_module; 1342 continue; 1343 } 1344 1345 skl_unbind_modules(ctx, src_module, dst_module); 1346 src_module = dst_module; 1347 } 1348 1349 skl_delete_pipe(ctx, mconfig->pipe); 1350 1351 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1352 src_module = w_module->w->priv; 1353 src_module->m_state = SKL_MODULE_UNINIT; 1354 } 1355 1356 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1357 } 1358 1359 /* 1360 * in the Post-PMD event of PGA we need to do following: 1361 * - Free the mcps used 1362 * - Stop the pipeline 1363 * - In source pipe is connected, unbind with source pipelines 1364 */ 1365 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1366 struct skl *skl) 1367 { 1368 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1369 int ret = 0, i; 1370 struct skl_sst *ctx = skl->skl_sst; 1371 1372 src_mconfig = w->priv; 1373 1374 /* Stop the pipe since this is a mixin module */ 1375 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 1376 if (ret) 1377 return ret; 1378 1379 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1380 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1381 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1382 if (!sink_mconfig) 1383 continue; 1384 /* 1385 * This is a connecter and if path is found that means 1386 * unbind between source and sink has not happened yet 1387 */ 1388 ret = skl_unbind_modules(ctx, src_mconfig, 1389 sink_mconfig); 1390 } 1391 } 1392 1393 return ret; 1394 } 1395 1396 /* 1397 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1398 * second one is required that is created as another pipe entity. 1399 * The mixer is responsible for pipe management and represent a pipeline 1400 * instance 1401 */ 1402 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1403 struct snd_kcontrol *k, int event) 1404 { 1405 struct snd_soc_dapm_context *dapm = w->dapm; 1406 struct skl *skl = get_skl_ctx(dapm->dev); 1407 1408 switch (event) { 1409 case SND_SOC_DAPM_PRE_PMU: 1410 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1411 1412 case SND_SOC_DAPM_POST_PMU: 1413 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1414 1415 case SND_SOC_DAPM_PRE_PMD: 1416 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1417 1418 case SND_SOC_DAPM_POST_PMD: 1419 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1420 } 1421 1422 return 0; 1423 } 1424 1425 /* 1426 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1427 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1428 * the sink when it is running (two FE to one BE or one FE to two BE) 1429 * scenarios 1430 */ 1431 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1432 struct snd_kcontrol *k, int event) 1433 1434 { 1435 struct snd_soc_dapm_context *dapm = w->dapm; 1436 struct skl *skl = get_skl_ctx(dapm->dev); 1437 1438 switch (event) { 1439 case SND_SOC_DAPM_PRE_PMU: 1440 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1441 1442 case SND_SOC_DAPM_POST_PMD: 1443 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1450 unsigned int __user *data, unsigned int size) 1451 { 1452 struct soc_bytes_ext *sb = 1453 (struct soc_bytes_ext *)kcontrol->private_value; 1454 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1455 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1456 struct skl_module_cfg *mconfig = w->priv; 1457 struct skl *skl = get_skl_ctx(w->dapm->dev); 1458 1459 if (w->power) 1460 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1461 bc->size, bc->param_id, mconfig); 1462 1463 /* decrement size for TLV header */ 1464 size -= 2 * sizeof(u32); 1465 1466 /* check size as we don't want to send kernel data */ 1467 if (size > bc->max) 1468 size = bc->max; 1469 1470 if (bc->params) { 1471 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1472 return -EFAULT; 1473 if (copy_to_user(data + 1, &size, sizeof(u32))) 1474 return -EFAULT; 1475 if (copy_to_user(data + 2, bc->params, size)) 1476 return -EFAULT; 1477 } 1478 1479 return 0; 1480 } 1481 1482 #define SKL_PARAM_VENDOR_ID 0xff 1483 1484 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1485 const unsigned int __user *data, unsigned int size) 1486 { 1487 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1488 struct skl_module_cfg *mconfig = w->priv; 1489 struct soc_bytes_ext *sb = 1490 (struct soc_bytes_ext *)kcontrol->private_value; 1491 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1492 struct skl *skl = get_skl_ctx(w->dapm->dev); 1493 1494 if (ac->params) { 1495 if (size > ac->max) 1496 return -EINVAL; 1497 1498 ac->size = size; 1499 /* 1500 * if the param_is is of type Vendor, firmware expects actual 1501 * parameter id and size from the control. 1502 */ 1503 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1504 if (copy_from_user(ac->params, data, size)) 1505 return -EFAULT; 1506 } else { 1507 if (copy_from_user(ac->params, 1508 data + 2, size)) 1509 return -EFAULT; 1510 } 1511 1512 if (w->power) 1513 return skl_set_module_params(skl->skl_sst, 1514 (u32 *)ac->params, ac->size, 1515 ac->param_id, mconfig); 1516 } 1517 1518 return 0; 1519 } 1520 1521 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1522 struct snd_ctl_elem_value *ucontrol) 1523 { 1524 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1525 struct skl_module_cfg *mconfig = w->priv; 1526 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1527 u32 ch_type = *((u32 *)ec->dobj.private); 1528 1529 if (mconfig->dmic_ch_type == ch_type) 1530 ucontrol->value.enumerated.item[0] = 1531 mconfig->dmic_ch_combo_index; 1532 else 1533 ucontrol->value.enumerated.item[0] = 0; 1534 1535 return 0; 1536 } 1537 1538 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1539 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1540 { 1541 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1542 1543 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1544 sp_cfg->set_params = SKL_PARAM_SET; 1545 sp_cfg->param_id = 0x00; 1546 if (!sp_cfg->caps) { 1547 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1548 if (!sp_cfg->caps) 1549 return -ENOMEM; 1550 } 1551 1552 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1553 mic_cfg->flags = 0; 1554 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1555 1556 return 0; 1557 } 1558 1559 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1560 struct snd_ctl_elem_value *ucontrol) 1561 { 1562 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1563 struct skl_module_cfg *mconfig = w->priv; 1564 struct skl_mic_sel_config mic_cfg = {0}; 1565 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1566 u32 ch_type = *((u32 *)ec->dobj.private); 1567 const int *list; 1568 u8 in_ch, out_ch, index; 1569 1570 mconfig->dmic_ch_type = ch_type; 1571 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1572 1573 /* enum control index 0 is INVALID, so no channels to be set */ 1574 if (mconfig->dmic_ch_combo_index == 0) 1575 return 0; 1576 1577 /* No valid channel selection map for index 0, so offset by 1 */ 1578 index = mconfig->dmic_ch_combo_index - 1; 1579 1580 switch (ch_type) { 1581 case SKL_CH_MONO: 1582 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1583 return -EINVAL; 1584 1585 list = &mic_mono_list[index]; 1586 break; 1587 1588 case SKL_CH_STEREO: 1589 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1590 return -EINVAL; 1591 1592 list = mic_stereo_list[index]; 1593 break; 1594 1595 case SKL_CH_TRIO: 1596 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1597 return -EINVAL; 1598 1599 list = mic_trio_list[index]; 1600 break; 1601 1602 case SKL_CH_QUATRO: 1603 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1604 return -EINVAL; 1605 1606 list = mic_quatro_list[index]; 1607 break; 1608 1609 default: 1610 dev_err(w->dapm->dev, 1611 "Invalid channel %d for mic_select module\n", 1612 ch_type); 1613 return -EINVAL; 1614 1615 } 1616 1617 /* channel type enum map to number of chanels for that type */ 1618 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1619 in_ch = list[out_ch]; 1620 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1621 } 1622 1623 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1624 } 1625 1626 /* 1627 * Fill the dma id for host and link. In case of passthrough 1628 * pipeline, this will both host and link in the same 1629 * pipeline, so need to copy the link and host based on dev_type 1630 */ 1631 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1632 struct skl_pipe_params *params) 1633 { 1634 struct skl_pipe *pipe = mcfg->pipe; 1635 1636 if (pipe->passthru) { 1637 switch (mcfg->dev_type) { 1638 case SKL_DEVICE_HDALINK: 1639 pipe->p_params->link_dma_id = params->link_dma_id; 1640 pipe->p_params->link_index = params->link_index; 1641 pipe->p_params->link_bps = params->link_bps; 1642 break; 1643 1644 case SKL_DEVICE_HDAHOST: 1645 pipe->p_params->host_dma_id = params->host_dma_id; 1646 pipe->p_params->host_bps = params->host_bps; 1647 break; 1648 1649 default: 1650 break; 1651 } 1652 pipe->p_params->s_fmt = params->s_fmt; 1653 pipe->p_params->ch = params->ch; 1654 pipe->p_params->s_freq = params->s_freq; 1655 pipe->p_params->stream = params->stream; 1656 pipe->p_params->format = params->format; 1657 1658 } else { 1659 memcpy(pipe->p_params, params, sizeof(*params)); 1660 } 1661 } 1662 1663 /* 1664 * The FE params are passed by hw_params of the DAI. 1665 * On hw_params, the params are stored in Gateway module of the FE and we 1666 * need to calculate the format in DSP module configuration, that 1667 * conversion is done here 1668 */ 1669 int skl_tplg_update_pipe_params(struct device *dev, 1670 struct skl_module_cfg *mconfig, 1671 struct skl_pipe_params *params) 1672 { 1673 struct skl_module_res *res = &mconfig->module->resources[0]; 1674 struct skl *skl = get_skl_ctx(dev); 1675 struct skl_module_fmt *format = NULL; 1676 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1677 1678 skl_tplg_fill_dma_id(mconfig, params); 1679 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1680 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1681 1682 if (skl->nr_modules) 1683 return 0; 1684 1685 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1686 format = &mconfig->module->formats[0].inputs[0].fmt; 1687 else 1688 format = &mconfig->module->formats[0].outputs[0].fmt; 1689 1690 /* set the hw_params */ 1691 format->s_freq = params->s_freq; 1692 format->channels = params->ch; 1693 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1694 1695 /* 1696 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1697 * container so update bit depth accordingly 1698 */ 1699 switch (format->valid_bit_depth) { 1700 case SKL_DEPTH_16BIT: 1701 format->bit_depth = format->valid_bit_depth; 1702 break; 1703 1704 case SKL_DEPTH_24BIT: 1705 case SKL_DEPTH_32BIT: 1706 format->bit_depth = SKL_DEPTH_32BIT; 1707 break; 1708 1709 default: 1710 dev_err(dev, "Invalid bit depth %x for pipe\n", 1711 format->valid_bit_depth); 1712 return -EINVAL; 1713 } 1714 1715 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1716 res->ibs = (format->s_freq / 1000) * 1717 (format->channels) * 1718 (format->bit_depth >> 3); 1719 } else { 1720 res->obs = (format->s_freq / 1000) * 1721 (format->channels) * 1722 (format->bit_depth >> 3); 1723 } 1724 1725 return 0; 1726 } 1727 1728 /* 1729 * Query the module config for the FE DAI 1730 * This is used to find the hw_params set for that DAI and apply to FE 1731 * pipeline 1732 */ 1733 struct skl_module_cfg * 1734 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1735 { 1736 struct snd_soc_dapm_widget *w; 1737 struct snd_soc_dapm_path *p = NULL; 1738 1739 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1740 w = dai->playback_widget; 1741 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1742 if (p->connect && p->sink->power && 1743 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1744 continue; 1745 1746 if (p->sink->priv) { 1747 dev_dbg(dai->dev, "set params for %s\n", 1748 p->sink->name); 1749 return p->sink->priv; 1750 } 1751 } 1752 } else { 1753 w = dai->capture_widget; 1754 snd_soc_dapm_widget_for_each_source_path(w, p) { 1755 if (p->connect && p->source->power && 1756 !is_skl_dsp_widget_type(p->source, dai->dev)) 1757 continue; 1758 1759 if (p->source->priv) { 1760 dev_dbg(dai->dev, "set params for %s\n", 1761 p->source->name); 1762 return p->source->priv; 1763 } 1764 } 1765 } 1766 1767 return NULL; 1768 } 1769 1770 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1771 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1772 { 1773 struct snd_soc_dapm_path *p; 1774 struct skl_module_cfg *mconfig = NULL; 1775 1776 snd_soc_dapm_widget_for_each_source_path(w, p) { 1777 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1778 if (p->connect && 1779 (p->sink->id == snd_soc_dapm_aif_out) && 1780 p->source->priv) { 1781 mconfig = p->source->priv; 1782 return mconfig; 1783 } 1784 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1785 if (mconfig) 1786 return mconfig; 1787 } 1788 } 1789 return mconfig; 1790 } 1791 1792 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1793 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1794 { 1795 struct snd_soc_dapm_path *p; 1796 struct skl_module_cfg *mconfig = NULL; 1797 1798 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1799 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1800 if (p->connect && 1801 (p->source->id == snd_soc_dapm_aif_in) && 1802 p->sink->priv) { 1803 mconfig = p->sink->priv; 1804 return mconfig; 1805 } 1806 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1807 if (mconfig) 1808 return mconfig; 1809 } 1810 } 1811 return mconfig; 1812 } 1813 1814 struct skl_module_cfg * 1815 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1816 { 1817 struct snd_soc_dapm_widget *w; 1818 struct skl_module_cfg *mconfig; 1819 1820 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1821 w = dai->playback_widget; 1822 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1823 } else { 1824 w = dai->capture_widget; 1825 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1826 } 1827 return mconfig; 1828 } 1829 1830 static u8 skl_tplg_be_link_type(int dev_type) 1831 { 1832 int ret; 1833 1834 switch (dev_type) { 1835 case SKL_DEVICE_BT: 1836 ret = NHLT_LINK_SSP; 1837 break; 1838 1839 case SKL_DEVICE_DMIC: 1840 ret = NHLT_LINK_DMIC; 1841 break; 1842 1843 case SKL_DEVICE_I2S: 1844 ret = NHLT_LINK_SSP; 1845 break; 1846 1847 case SKL_DEVICE_HDALINK: 1848 ret = NHLT_LINK_HDA; 1849 break; 1850 1851 default: 1852 ret = NHLT_LINK_INVALID; 1853 break; 1854 } 1855 1856 return ret; 1857 } 1858 1859 /* 1860 * Fill the BE gateway parameters 1861 * The BE gateway expects a blob of parameters which are kept in the ACPI 1862 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1863 * The port can have multiple settings so pick based on the PCM 1864 * parameters 1865 */ 1866 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1867 struct skl_module_cfg *mconfig, 1868 struct skl_pipe_params *params) 1869 { 1870 struct nhlt_specific_cfg *cfg; 1871 struct skl *skl = get_skl_ctx(dai->dev); 1872 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1873 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1874 1875 skl_tplg_fill_dma_id(mconfig, params); 1876 1877 if (link_type == NHLT_LINK_HDA) 1878 return 0; 1879 1880 /* update the blob based on virtual bus_id*/ 1881 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1882 params->s_fmt, params->ch, 1883 params->s_freq, params->stream, 1884 dev_type); 1885 if (cfg) { 1886 mconfig->formats_config.caps_size = cfg->size; 1887 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1888 } else { 1889 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1890 mconfig->vbus_id, link_type, 1891 params->stream); 1892 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1893 params->ch, params->s_freq, params->s_fmt); 1894 return -EINVAL; 1895 } 1896 1897 return 0; 1898 } 1899 1900 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1901 struct snd_soc_dapm_widget *w, 1902 struct skl_pipe_params *params) 1903 { 1904 struct snd_soc_dapm_path *p; 1905 int ret = -EIO; 1906 1907 snd_soc_dapm_widget_for_each_source_path(w, p) { 1908 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1909 p->source->priv) { 1910 1911 ret = skl_tplg_be_fill_pipe_params(dai, 1912 p->source->priv, params); 1913 if (ret < 0) 1914 return ret; 1915 } else { 1916 ret = skl_tplg_be_set_src_pipe_params(dai, 1917 p->source, params); 1918 if (ret < 0) 1919 return ret; 1920 } 1921 } 1922 1923 return ret; 1924 } 1925 1926 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1927 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1928 { 1929 struct snd_soc_dapm_path *p = NULL; 1930 int ret = -EIO; 1931 1932 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1933 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1934 p->sink->priv) { 1935 1936 ret = skl_tplg_be_fill_pipe_params(dai, 1937 p->sink->priv, params); 1938 if (ret < 0) 1939 return ret; 1940 } else { 1941 ret = skl_tplg_be_set_sink_pipe_params( 1942 dai, p->sink, params); 1943 if (ret < 0) 1944 return ret; 1945 } 1946 } 1947 1948 return ret; 1949 } 1950 1951 /* 1952 * BE hw_params can be a source parameters (capture) or sink parameters 1953 * (playback). Based on sink and source we need to either find the source 1954 * list or the sink list and set the pipeline parameters 1955 */ 1956 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1957 struct skl_pipe_params *params) 1958 { 1959 struct snd_soc_dapm_widget *w; 1960 1961 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1962 w = dai->playback_widget; 1963 1964 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1965 1966 } else { 1967 w = dai->capture_widget; 1968 1969 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1970 } 1971 1972 return 0; 1973 } 1974 1975 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1976 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1977 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1978 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1979 }; 1980 1981 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1982 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1983 skl_tplg_tlv_control_set}, 1984 }; 1985 1986 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1987 { 1988 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1989 .get = skl_tplg_mic_control_get, 1990 .put = skl_tplg_mic_control_set, 1991 }, 1992 }; 1993 1994 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1995 struct skl_pipe *pipe, u32 tkn, 1996 u32 tkn_val, int conf_idx, int dir) 1997 { 1998 struct skl_pipe_fmt *fmt; 1999 struct skl_path_config *config; 2000 2001 switch (dir) { 2002 case SKL_DIR_IN: 2003 fmt = &pipe->configs[conf_idx].in_fmt; 2004 break; 2005 2006 case SKL_DIR_OUT: 2007 fmt = &pipe->configs[conf_idx].out_fmt; 2008 break; 2009 2010 default: 2011 dev_err(dev, "Invalid direction: %d\n", dir); 2012 return -EINVAL; 2013 } 2014 2015 config = &pipe->configs[conf_idx]; 2016 2017 switch (tkn) { 2018 case SKL_TKN_U32_CFG_FREQ: 2019 fmt->freq = tkn_val; 2020 break; 2021 2022 case SKL_TKN_U8_CFG_CHAN: 2023 fmt->channels = tkn_val; 2024 break; 2025 2026 case SKL_TKN_U8_CFG_BPS: 2027 fmt->bps = tkn_val; 2028 break; 2029 2030 case SKL_TKN_U32_PATH_MEM_PGS: 2031 config->mem_pages = tkn_val; 2032 break; 2033 2034 default: 2035 dev_err(dev, "Invalid token config: %d\n", tkn); 2036 return -EINVAL; 2037 } 2038 2039 return 0; 2040 } 2041 2042 static int skl_tplg_fill_pipe_tkn(struct device *dev, 2043 struct skl_pipe *pipe, u32 tkn, 2044 u32 tkn_val) 2045 { 2046 2047 switch (tkn) { 2048 case SKL_TKN_U32_PIPE_CONN_TYPE: 2049 pipe->conn_type = tkn_val; 2050 break; 2051 2052 case SKL_TKN_U32_PIPE_PRIORITY: 2053 pipe->pipe_priority = tkn_val; 2054 break; 2055 2056 case SKL_TKN_U32_PIPE_MEM_PGS: 2057 pipe->memory_pages = tkn_val; 2058 break; 2059 2060 case SKL_TKN_U32_PMODE: 2061 pipe->lp_mode = tkn_val; 2062 break; 2063 2064 case SKL_TKN_U32_PIPE_DIRECTION: 2065 pipe->direction = tkn_val; 2066 break; 2067 2068 case SKL_TKN_U32_NUM_CONFIGS: 2069 pipe->nr_cfgs = tkn_val; 2070 break; 2071 2072 default: 2073 dev_err(dev, "Token not handled %d\n", tkn); 2074 return -EINVAL; 2075 } 2076 2077 return 0; 2078 } 2079 2080 /* 2081 * Add pipeline by parsing the relevant tokens 2082 * Return an existing pipe if the pipe already exists. 2083 */ 2084 static int skl_tplg_add_pipe(struct device *dev, 2085 struct skl_module_cfg *mconfig, struct skl *skl, 2086 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2087 { 2088 struct skl_pipeline *ppl; 2089 struct skl_pipe *pipe; 2090 struct skl_pipe_params *params; 2091 2092 list_for_each_entry(ppl, &skl->ppl_list, node) { 2093 if (ppl->pipe->ppl_id == tkn_elem->value) { 2094 mconfig->pipe = ppl->pipe; 2095 return -EEXIST; 2096 } 2097 } 2098 2099 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2100 if (!ppl) 2101 return -ENOMEM; 2102 2103 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2104 if (!pipe) 2105 return -ENOMEM; 2106 2107 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2108 if (!params) 2109 return -ENOMEM; 2110 2111 pipe->p_params = params; 2112 pipe->ppl_id = tkn_elem->value; 2113 INIT_LIST_HEAD(&pipe->w_list); 2114 2115 ppl->pipe = pipe; 2116 list_add(&ppl->node, &skl->ppl_list); 2117 2118 mconfig->pipe = pipe; 2119 mconfig->pipe->state = SKL_PIPE_INVALID; 2120 2121 return 0; 2122 } 2123 2124 static int skl_tplg_get_uuid(struct device *dev, u8 *guid, 2125 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2126 { 2127 if (uuid_tkn->token == SKL_TKN_UUID) { 2128 memcpy(guid, &uuid_tkn->uuid, 16); 2129 return 0; 2130 } 2131 2132 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2133 2134 return -EINVAL; 2135 } 2136 2137 static int skl_tplg_fill_pin(struct device *dev, 2138 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2139 struct skl_module_pin *m_pin, 2140 int pin_index) 2141 { 2142 int ret; 2143 2144 switch (tkn_elem->token) { 2145 case SKL_TKN_U32_PIN_MOD_ID: 2146 m_pin[pin_index].id.module_id = tkn_elem->value; 2147 break; 2148 2149 case SKL_TKN_U32_PIN_INST_ID: 2150 m_pin[pin_index].id.instance_id = tkn_elem->value; 2151 break; 2152 2153 case SKL_TKN_UUID: 2154 ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b, 2155 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2156 if (ret < 0) 2157 return ret; 2158 2159 break; 2160 2161 default: 2162 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2163 return -EINVAL; 2164 } 2165 2166 return 0; 2167 } 2168 2169 /* 2170 * Parse for pin config specific tokens to fill up the 2171 * module private data 2172 */ 2173 static int skl_tplg_fill_pins_info(struct device *dev, 2174 struct skl_module_cfg *mconfig, 2175 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2176 int dir, int pin_count) 2177 { 2178 int ret; 2179 struct skl_module_pin *m_pin; 2180 2181 switch (dir) { 2182 case SKL_DIR_IN: 2183 m_pin = mconfig->m_in_pin; 2184 break; 2185 2186 case SKL_DIR_OUT: 2187 m_pin = mconfig->m_out_pin; 2188 break; 2189 2190 default: 2191 dev_err(dev, "Invalid direction value\n"); 2192 return -EINVAL; 2193 } 2194 2195 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2196 if (ret < 0) 2197 return ret; 2198 2199 m_pin[pin_count].in_use = false; 2200 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2201 2202 return 0; 2203 } 2204 2205 /* 2206 * Fill up input/output module config format based 2207 * on the direction 2208 */ 2209 static int skl_tplg_fill_fmt(struct device *dev, 2210 struct skl_module_fmt *dst_fmt, 2211 u32 tkn, u32 value) 2212 { 2213 switch (tkn) { 2214 case SKL_TKN_U32_FMT_CH: 2215 dst_fmt->channels = value; 2216 break; 2217 2218 case SKL_TKN_U32_FMT_FREQ: 2219 dst_fmt->s_freq = value; 2220 break; 2221 2222 case SKL_TKN_U32_FMT_BIT_DEPTH: 2223 dst_fmt->bit_depth = value; 2224 break; 2225 2226 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2227 dst_fmt->valid_bit_depth = value; 2228 break; 2229 2230 case SKL_TKN_U32_FMT_CH_CONFIG: 2231 dst_fmt->ch_cfg = value; 2232 break; 2233 2234 case SKL_TKN_U32_FMT_INTERLEAVE: 2235 dst_fmt->interleaving_style = value; 2236 break; 2237 2238 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2239 dst_fmt->sample_type = value; 2240 break; 2241 2242 case SKL_TKN_U32_FMT_CH_MAP: 2243 dst_fmt->ch_map = value; 2244 break; 2245 2246 default: 2247 dev_err(dev, "Invalid token %d\n", tkn); 2248 return -EINVAL; 2249 } 2250 2251 return 0; 2252 } 2253 2254 static int skl_tplg_widget_fill_fmt(struct device *dev, 2255 struct skl_module_iface *fmt, 2256 u32 tkn, u32 val, u32 dir, int fmt_idx) 2257 { 2258 struct skl_module_fmt *dst_fmt; 2259 2260 if (!fmt) 2261 return -EINVAL; 2262 2263 switch (dir) { 2264 case SKL_DIR_IN: 2265 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2266 break; 2267 2268 case SKL_DIR_OUT: 2269 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2270 break; 2271 2272 default: 2273 dev_err(dev, "Invalid direction: %d\n", dir); 2274 return -EINVAL; 2275 } 2276 2277 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2278 } 2279 2280 static void skl_tplg_fill_pin_dynamic_val( 2281 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2282 { 2283 int i; 2284 2285 for (i = 0; i < pin_count; i++) 2286 mpin[i].is_dynamic = value; 2287 } 2288 2289 /* 2290 * Resource table in the manifest has pin specific resources 2291 * like pin and pin buffer size 2292 */ 2293 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2294 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2295 struct skl_module_res *res, int pin_idx, int dir) 2296 { 2297 struct skl_module_pin_resources *m_pin; 2298 2299 switch (dir) { 2300 case SKL_DIR_IN: 2301 m_pin = &res->input[pin_idx]; 2302 break; 2303 2304 case SKL_DIR_OUT: 2305 m_pin = &res->output[pin_idx]; 2306 break; 2307 2308 default: 2309 dev_err(dev, "Invalid pin direction: %d\n", dir); 2310 return -EINVAL; 2311 } 2312 2313 switch (tkn_elem->token) { 2314 case SKL_TKN_MM_U32_RES_PIN_ID: 2315 m_pin->pin_index = tkn_elem->value; 2316 break; 2317 2318 case SKL_TKN_MM_U32_PIN_BUF: 2319 m_pin->buf_size = tkn_elem->value; 2320 break; 2321 2322 default: 2323 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2324 return -EINVAL; 2325 } 2326 2327 return 0; 2328 } 2329 2330 /* 2331 * Fill module specific resources from the manifest's resource 2332 * table like CPS, DMA size, mem_pages. 2333 */ 2334 static int skl_tplg_fill_res_tkn(struct device *dev, 2335 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2336 struct skl_module_res *res, 2337 int pin_idx, int dir) 2338 { 2339 int ret, tkn_count = 0; 2340 2341 if (!res) 2342 return -EINVAL; 2343 2344 switch (tkn_elem->token) { 2345 case SKL_TKN_MM_U32_CPS: 2346 res->cps = tkn_elem->value; 2347 break; 2348 2349 case SKL_TKN_MM_U32_DMA_SIZE: 2350 res->dma_buffer_size = tkn_elem->value; 2351 break; 2352 2353 case SKL_TKN_MM_U32_CPC: 2354 res->cpc = tkn_elem->value; 2355 break; 2356 2357 case SKL_TKN_U32_MEM_PAGES: 2358 res->is_pages = tkn_elem->value; 2359 break; 2360 2361 case SKL_TKN_U32_OBS: 2362 res->obs = tkn_elem->value; 2363 break; 2364 2365 case SKL_TKN_U32_IBS: 2366 res->ibs = tkn_elem->value; 2367 break; 2368 2369 case SKL_TKN_U32_MAX_MCPS: 2370 res->cps = tkn_elem->value; 2371 break; 2372 2373 case SKL_TKN_MM_U32_RES_PIN_ID: 2374 case SKL_TKN_MM_U32_PIN_BUF: 2375 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2376 pin_idx, dir); 2377 if (ret < 0) 2378 return ret; 2379 break; 2380 2381 default: 2382 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2383 return -EINVAL; 2384 2385 } 2386 tkn_count++; 2387 2388 return tkn_count; 2389 } 2390 2391 /* 2392 * Parse tokens to fill up the module private data 2393 */ 2394 static int skl_tplg_get_token(struct device *dev, 2395 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2396 struct skl *skl, struct skl_module_cfg *mconfig) 2397 { 2398 int tkn_count = 0; 2399 int ret; 2400 static int is_pipe_exists; 2401 static int pin_index, dir, conf_idx; 2402 struct skl_module_iface *iface = NULL; 2403 struct skl_module_res *res = NULL; 2404 int res_idx = mconfig->res_idx; 2405 int fmt_idx = mconfig->fmt_idx; 2406 2407 /* 2408 * If the manifest structure contains no modules, fill all 2409 * the module data to 0th index. 2410 * res_idx and fmt_idx are default set to 0. 2411 */ 2412 if (skl->nr_modules == 0) { 2413 res = &mconfig->module->resources[res_idx]; 2414 iface = &mconfig->module->formats[fmt_idx]; 2415 } 2416 2417 if (tkn_elem->token > SKL_TKN_MAX) 2418 return -EINVAL; 2419 2420 switch (tkn_elem->token) { 2421 case SKL_TKN_U8_IN_QUEUE_COUNT: 2422 mconfig->module->max_input_pins = tkn_elem->value; 2423 break; 2424 2425 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2426 mconfig->module->max_output_pins = tkn_elem->value; 2427 break; 2428 2429 case SKL_TKN_U8_DYN_IN_PIN: 2430 if (!mconfig->m_in_pin) 2431 mconfig->m_in_pin = 2432 devm_kcalloc(dev, MAX_IN_QUEUE, 2433 sizeof(*mconfig->m_in_pin), 2434 GFP_KERNEL); 2435 if (!mconfig->m_in_pin) 2436 return -ENOMEM; 2437 2438 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2439 tkn_elem->value); 2440 break; 2441 2442 case SKL_TKN_U8_DYN_OUT_PIN: 2443 if (!mconfig->m_out_pin) 2444 mconfig->m_out_pin = 2445 devm_kcalloc(dev, MAX_IN_QUEUE, 2446 sizeof(*mconfig->m_in_pin), 2447 GFP_KERNEL); 2448 if (!mconfig->m_out_pin) 2449 return -ENOMEM; 2450 2451 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2452 tkn_elem->value); 2453 break; 2454 2455 case SKL_TKN_U8_TIME_SLOT: 2456 mconfig->time_slot = tkn_elem->value; 2457 break; 2458 2459 case SKL_TKN_U8_CORE_ID: 2460 mconfig->core_id = tkn_elem->value; 2461 2462 case SKL_TKN_U8_MOD_TYPE: 2463 mconfig->m_type = tkn_elem->value; 2464 break; 2465 2466 case SKL_TKN_U8_DEV_TYPE: 2467 mconfig->dev_type = tkn_elem->value; 2468 break; 2469 2470 case SKL_TKN_U8_HW_CONN_TYPE: 2471 mconfig->hw_conn_type = tkn_elem->value; 2472 break; 2473 2474 case SKL_TKN_U16_MOD_INST_ID: 2475 mconfig->id.instance_id = 2476 tkn_elem->value; 2477 break; 2478 2479 case SKL_TKN_U32_MEM_PAGES: 2480 case SKL_TKN_U32_MAX_MCPS: 2481 case SKL_TKN_U32_OBS: 2482 case SKL_TKN_U32_IBS: 2483 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2484 if (ret < 0) 2485 return ret; 2486 2487 break; 2488 2489 case SKL_TKN_U32_VBUS_ID: 2490 mconfig->vbus_id = tkn_elem->value; 2491 break; 2492 2493 case SKL_TKN_U32_PARAMS_FIXUP: 2494 mconfig->params_fixup = tkn_elem->value; 2495 break; 2496 2497 case SKL_TKN_U32_CONVERTER: 2498 mconfig->converter = tkn_elem->value; 2499 break; 2500 2501 case SKL_TKN_U32_D0I3_CAPS: 2502 mconfig->d0i3_caps = tkn_elem->value; 2503 break; 2504 2505 case SKL_TKN_U32_PIPE_ID: 2506 ret = skl_tplg_add_pipe(dev, 2507 mconfig, skl, tkn_elem); 2508 2509 if (ret < 0) { 2510 if (ret == -EEXIST) { 2511 is_pipe_exists = 1; 2512 break; 2513 } 2514 return is_pipe_exists; 2515 } 2516 2517 break; 2518 2519 case SKL_TKN_U32_PIPE_CONFIG_ID: 2520 conf_idx = tkn_elem->value; 2521 break; 2522 2523 case SKL_TKN_U32_PIPE_CONN_TYPE: 2524 case SKL_TKN_U32_PIPE_PRIORITY: 2525 case SKL_TKN_U32_PIPE_MEM_PGS: 2526 case SKL_TKN_U32_PMODE: 2527 case SKL_TKN_U32_PIPE_DIRECTION: 2528 case SKL_TKN_U32_NUM_CONFIGS: 2529 if (is_pipe_exists) { 2530 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2531 tkn_elem->token, tkn_elem->value); 2532 if (ret < 0) 2533 return ret; 2534 } 2535 2536 break; 2537 2538 case SKL_TKN_U32_PATH_MEM_PGS: 2539 case SKL_TKN_U32_CFG_FREQ: 2540 case SKL_TKN_U8_CFG_CHAN: 2541 case SKL_TKN_U8_CFG_BPS: 2542 if (mconfig->pipe->nr_cfgs) { 2543 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2544 tkn_elem->token, tkn_elem->value, 2545 conf_idx, dir); 2546 if (ret < 0) 2547 return ret; 2548 } 2549 break; 2550 2551 case SKL_TKN_CFG_MOD_RES_ID: 2552 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2553 break; 2554 2555 case SKL_TKN_CFG_MOD_FMT_ID: 2556 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2557 break; 2558 2559 /* 2560 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2561 * direction and the pin count. The first four bits represent 2562 * direction and next four the pin count. 2563 */ 2564 case SKL_TKN_U32_DIR_PIN_COUNT: 2565 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2566 pin_index = (tkn_elem->value & 2567 SKL_PIN_COUNT_MASK) >> 4; 2568 2569 break; 2570 2571 case SKL_TKN_U32_FMT_CH: 2572 case SKL_TKN_U32_FMT_FREQ: 2573 case SKL_TKN_U32_FMT_BIT_DEPTH: 2574 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2575 case SKL_TKN_U32_FMT_CH_CONFIG: 2576 case SKL_TKN_U32_FMT_INTERLEAVE: 2577 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2578 case SKL_TKN_U32_FMT_CH_MAP: 2579 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2580 tkn_elem->value, dir, pin_index); 2581 2582 if (ret < 0) 2583 return ret; 2584 2585 break; 2586 2587 case SKL_TKN_U32_PIN_MOD_ID: 2588 case SKL_TKN_U32_PIN_INST_ID: 2589 case SKL_TKN_UUID: 2590 ret = skl_tplg_fill_pins_info(dev, 2591 mconfig, tkn_elem, dir, 2592 pin_index); 2593 if (ret < 0) 2594 return ret; 2595 2596 break; 2597 2598 case SKL_TKN_U32_CAPS_SIZE: 2599 mconfig->formats_config.caps_size = 2600 tkn_elem->value; 2601 2602 break; 2603 2604 case SKL_TKN_U32_CAPS_SET_PARAMS: 2605 mconfig->formats_config.set_params = 2606 tkn_elem->value; 2607 break; 2608 2609 case SKL_TKN_U32_CAPS_PARAMS_ID: 2610 mconfig->formats_config.param_id = 2611 tkn_elem->value; 2612 break; 2613 2614 case SKL_TKN_U32_PROC_DOMAIN: 2615 mconfig->domain = 2616 tkn_elem->value; 2617 2618 break; 2619 2620 case SKL_TKN_U32_DMA_BUF_SIZE: 2621 mconfig->dma_buffer_size = tkn_elem->value; 2622 break; 2623 2624 case SKL_TKN_U8_IN_PIN_TYPE: 2625 case SKL_TKN_U8_OUT_PIN_TYPE: 2626 case SKL_TKN_U8_CONN_TYPE: 2627 break; 2628 2629 default: 2630 dev_err(dev, "Token %d not handled\n", 2631 tkn_elem->token); 2632 return -EINVAL; 2633 } 2634 2635 tkn_count++; 2636 2637 return tkn_count; 2638 } 2639 2640 /* 2641 * Parse the vendor array for specific tokens to construct 2642 * module private data 2643 */ 2644 static int skl_tplg_get_tokens(struct device *dev, 2645 char *pvt_data, struct skl *skl, 2646 struct skl_module_cfg *mconfig, int block_size) 2647 { 2648 struct snd_soc_tplg_vendor_array *array; 2649 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2650 int tkn_count = 0, ret; 2651 int off = 0, tuple_size = 0; 2652 bool is_module_guid = true; 2653 2654 if (block_size <= 0) 2655 return -EINVAL; 2656 2657 while (tuple_size < block_size) { 2658 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2659 2660 off += array->size; 2661 2662 switch (array->type) { 2663 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2664 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2665 continue; 2666 2667 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2668 if (is_module_guid) { 2669 ret = skl_tplg_get_uuid(dev, mconfig->guid, 2670 array->uuid); 2671 is_module_guid = false; 2672 } else { 2673 ret = skl_tplg_get_token(dev, array->value, skl, 2674 mconfig); 2675 } 2676 2677 if (ret < 0) 2678 return ret; 2679 2680 tuple_size += sizeof(*array->uuid); 2681 2682 continue; 2683 2684 default: 2685 tkn_elem = array->value; 2686 tkn_count = 0; 2687 break; 2688 } 2689 2690 while (tkn_count <= (array->num_elems - 1)) { 2691 ret = skl_tplg_get_token(dev, tkn_elem, 2692 skl, mconfig); 2693 2694 if (ret < 0) 2695 return ret; 2696 2697 tkn_count = tkn_count + ret; 2698 tkn_elem++; 2699 } 2700 2701 tuple_size += tkn_count * sizeof(*tkn_elem); 2702 } 2703 2704 return off; 2705 } 2706 2707 /* 2708 * Every data block is preceded by a descriptor to read the number 2709 * of data blocks, they type of the block and it's size 2710 */ 2711 static int skl_tplg_get_desc_blocks(struct device *dev, 2712 struct snd_soc_tplg_vendor_array *array) 2713 { 2714 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2715 2716 tkn_elem = array->value; 2717 2718 switch (tkn_elem->token) { 2719 case SKL_TKN_U8_NUM_BLOCKS: 2720 case SKL_TKN_U8_BLOCK_TYPE: 2721 case SKL_TKN_U16_BLOCK_SIZE: 2722 return tkn_elem->value; 2723 2724 default: 2725 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2726 break; 2727 } 2728 2729 return -EINVAL; 2730 } 2731 2732 /* Functions to parse private data from configuration file format v4 */ 2733 2734 /* 2735 * Add pipeline from topology binary into driver pipeline list 2736 * 2737 * If already added we return that instance 2738 * Otherwise we create a new instance and add into driver list 2739 */ 2740 static int skl_tplg_add_pipe_v4(struct device *dev, 2741 struct skl_module_cfg *mconfig, struct skl *skl, 2742 struct skl_dfw_v4_pipe *dfw_pipe) 2743 { 2744 struct skl_pipeline *ppl; 2745 struct skl_pipe *pipe; 2746 struct skl_pipe_params *params; 2747 2748 list_for_each_entry(ppl, &skl->ppl_list, node) { 2749 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2750 mconfig->pipe = ppl->pipe; 2751 return 0; 2752 } 2753 } 2754 2755 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2756 if (!ppl) 2757 return -ENOMEM; 2758 2759 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2760 if (!pipe) 2761 return -ENOMEM; 2762 2763 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2764 if (!params) 2765 return -ENOMEM; 2766 2767 pipe->ppl_id = dfw_pipe->pipe_id; 2768 pipe->memory_pages = dfw_pipe->memory_pages; 2769 pipe->pipe_priority = dfw_pipe->pipe_priority; 2770 pipe->conn_type = dfw_pipe->conn_type; 2771 pipe->state = SKL_PIPE_INVALID; 2772 pipe->p_params = params; 2773 INIT_LIST_HEAD(&pipe->w_list); 2774 2775 ppl->pipe = pipe; 2776 list_add(&ppl->node, &skl->ppl_list); 2777 2778 mconfig->pipe = pipe; 2779 2780 return 0; 2781 } 2782 2783 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2784 struct skl_module_pin *m_pin, 2785 bool is_dynamic, int max_pin) 2786 { 2787 int i; 2788 2789 for (i = 0; i < max_pin; i++) { 2790 m_pin[i].id.module_id = dfw_pin[i].module_id; 2791 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2792 m_pin[i].in_use = false; 2793 m_pin[i].is_dynamic = is_dynamic; 2794 m_pin[i].pin_state = SKL_PIN_UNBIND; 2795 } 2796 } 2797 2798 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2799 struct skl_dfw_v4_module_fmt *src_fmt, 2800 int pins) 2801 { 2802 int i; 2803 2804 for (i = 0; i < pins; i++) { 2805 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2806 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2807 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2808 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2809 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2810 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2811 dst_fmt[i].fmt.interleaving_style = 2812 src_fmt[i].interleaving_style; 2813 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2814 } 2815 } 2816 2817 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2818 struct skl *skl, struct device *dev, 2819 struct skl_module_cfg *mconfig) 2820 { 2821 struct skl_dfw_v4_module *dfw = 2822 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2823 int ret; 2824 2825 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2826 2827 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2828 if (ret) 2829 return ret; 2830 mconfig->id.module_id = -1; 2831 mconfig->id.instance_id = dfw->instance_id; 2832 mconfig->module->resources[0].cps = dfw->max_mcps; 2833 mconfig->module->resources[0].ibs = dfw->ibs; 2834 mconfig->module->resources[0].obs = dfw->obs; 2835 mconfig->core_id = dfw->core_id; 2836 mconfig->module->max_input_pins = dfw->max_in_queue; 2837 mconfig->module->max_output_pins = dfw->max_out_queue; 2838 mconfig->module->loadable = dfw->is_loadable; 2839 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2840 MAX_IN_QUEUE); 2841 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2842 MAX_OUT_QUEUE); 2843 2844 mconfig->params_fixup = dfw->params_fixup; 2845 mconfig->converter = dfw->converter; 2846 mconfig->m_type = dfw->module_type; 2847 mconfig->vbus_id = dfw->vbus_id; 2848 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2849 2850 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2851 if (ret) 2852 return ret; 2853 2854 mconfig->dev_type = dfw->dev_type; 2855 mconfig->hw_conn_type = dfw->hw_conn_type; 2856 mconfig->time_slot = dfw->time_slot; 2857 mconfig->formats_config.caps_size = dfw->caps.caps_size; 2858 2859 mconfig->m_in_pin = devm_kcalloc(dev, 2860 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2861 GFP_KERNEL); 2862 if (!mconfig->m_in_pin) 2863 return -ENOMEM; 2864 2865 mconfig->m_out_pin = devm_kcalloc(dev, 2866 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2867 GFP_KERNEL); 2868 if (!mconfig->m_out_pin) 2869 return -ENOMEM; 2870 2871 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2872 dfw->is_dynamic_in_pin, 2873 mconfig->module->max_input_pins); 2874 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2875 dfw->is_dynamic_out_pin, 2876 mconfig->module->max_output_pins); 2877 2878 if (mconfig->formats_config.caps_size) { 2879 mconfig->formats_config.set_params = dfw->caps.set_params; 2880 mconfig->formats_config.param_id = dfw->caps.param_id; 2881 mconfig->formats_config.caps = 2882 devm_kzalloc(dev, mconfig->formats_config.caps_size, 2883 GFP_KERNEL); 2884 if (!mconfig->formats_config.caps) 2885 return -ENOMEM; 2886 memcpy(mconfig->formats_config.caps, dfw->caps.caps, 2887 dfw->caps.caps_size); 2888 } 2889 2890 return 0; 2891 } 2892 2893 /* 2894 * Parse the private data for the token and corresponding value. 2895 * The private data can have multiple data blocks. So, a data block 2896 * is preceded by a descriptor for number of blocks and a descriptor 2897 * for the type and size of the suceeding data block. 2898 */ 2899 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2900 struct skl *skl, struct device *dev, 2901 struct skl_module_cfg *mconfig) 2902 { 2903 struct snd_soc_tplg_vendor_array *array; 2904 int num_blocks, block_size = 0, block_type, off = 0; 2905 char *data; 2906 int ret; 2907 2908 /* 2909 * v4 configuration files have a valid UUID at the start of 2910 * the widget's private data. 2911 */ 2912 if (uuid_is_valid((char *)tplg_w->priv.data)) 2913 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2914 2915 /* Read the NUM_DATA_BLOCKS descriptor */ 2916 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2917 ret = skl_tplg_get_desc_blocks(dev, array); 2918 if (ret < 0) 2919 return ret; 2920 num_blocks = ret; 2921 2922 off += array->size; 2923 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2924 while (num_blocks > 0) { 2925 array = (struct snd_soc_tplg_vendor_array *) 2926 (tplg_w->priv.data + off); 2927 2928 ret = skl_tplg_get_desc_blocks(dev, array); 2929 2930 if (ret < 0) 2931 return ret; 2932 block_type = ret; 2933 off += array->size; 2934 2935 array = (struct snd_soc_tplg_vendor_array *) 2936 (tplg_w->priv.data + off); 2937 2938 ret = skl_tplg_get_desc_blocks(dev, array); 2939 2940 if (ret < 0) 2941 return ret; 2942 block_size = ret; 2943 off += array->size; 2944 2945 array = (struct snd_soc_tplg_vendor_array *) 2946 (tplg_w->priv.data + off); 2947 2948 data = (tplg_w->priv.data + off); 2949 2950 if (block_type == SKL_TYPE_TUPLE) { 2951 ret = skl_tplg_get_tokens(dev, data, 2952 skl, mconfig, block_size); 2953 2954 if (ret < 0) 2955 return ret; 2956 2957 --num_blocks; 2958 } else { 2959 if (mconfig->formats_config.caps_size > 0) 2960 memcpy(mconfig->formats_config.caps, data, 2961 mconfig->formats_config.caps_size); 2962 --num_blocks; 2963 ret = mconfig->formats_config.caps_size; 2964 } 2965 off += ret; 2966 } 2967 2968 return 0; 2969 } 2970 2971 static void skl_clear_pin_config(struct snd_soc_component *component, 2972 struct snd_soc_dapm_widget *w) 2973 { 2974 int i; 2975 struct skl_module_cfg *mconfig; 2976 struct skl_pipe *pipe; 2977 2978 if (!strncmp(w->dapm->component->name, component->name, 2979 strlen(component->name))) { 2980 mconfig = w->priv; 2981 pipe = mconfig->pipe; 2982 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2983 mconfig->m_in_pin[i].in_use = false; 2984 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2985 } 2986 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2987 mconfig->m_out_pin[i].in_use = false; 2988 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2989 } 2990 pipe->state = SKL_PIPE_INVALID; 2991 mconfig->m_state = SKL_MODULE_UNINIT; 2992 } 2993 } 2994 2995 void skl_cleanup_resources(struct skl *skl) 2996 { 2997 struct skl_sst *ctx = skl->skl_sst; 2998 struct snd_soc_component *soc_component = skl->component; 2999 struct snd_soc_dapm_widget *w; 3000 struct snd_soc_card *card; 3001 3002 if (soc_component == NULL) 3003 return; 3004 3005 card = soc_component->card; 3006 if (!card || !card->instantiated) 3007 return; 3008 3009 skl->resource.mem = 0; 3010 skl->resource.mcps = 0; 3011 3012 list_for_each_entry(w, &card->widgets, list) { 3013 if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL) 3014 skl_clear_pin_config(soc_component, w); 3015 } 3016 3017 skl_clear_module_cnt(ctx->dsp); 3018 } 3019 3020 /* 3021 * Topology core widget load callback 3022 * 3023 * This is used to save the private data for each widget which gives 3024 * information to the driver about module and pipeline parameters which DSP 3025 * FW expects like ids, resource values, formats etc 3026 */ 3027 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, 3028 struct snd_soc_dapm_widget *w, 3029 struct snd_soc_tplg_dapm_widget *tplg_w) 3030 { 3031 int ret; 3032 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 3033 struct skl *skl = ebus_to_skl(ebus); 3034 struct hdac_bus *bus = ebus_to_hbus(ebus); 3035 struct skl_module_cfg *mconfig; 3036 3037 if (!tplg_w->priv.size) 3038 goto bind_event; 3039 3040 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 3041 3042 if (!mconfig) 3043 return -ENOMEM; 3044 3045 if (skl->nr_modules == 0) { 3046 mconfig->module = devm_kzalloc(bus->dev, 3047 sizeof(*mconfig->module), GFP_KERNEL); 3048 if (!mconfig->module) 3049 return -ENOMEM; 3050 } 3051 3052 w->priv = mconfig; 3053 3054 /* 3055 * module binary can be loaded later, so set it to query when 3056 * module is load for a use case 3057 */ 3058 mconfig->id.module_id = -1; 3059 3060 /* Parse private data for tuples */ 3061 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 3062 if (ret < 0) 3063 return ret; 3064 3065 skl_debug_init_module(skl->debugfs, w, mconfig); 3066 3067 bind_event: 3068 if (tplg_w->event_type == 0) { 3069 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 3070 return 0; 3071 } 3072 3073 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 3074 ARRAY_SIZE(skl_tplg_widget_ops), 3075 tplg_w->event_type); 3076 3077 if (ret) { 3078 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 3079 __func__, tplg_w->event_type); 3080 return -EINVAL; 3081 } 3082 3083 return 0; 3084 } 3085 3086 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 3087 struct snd_soc_tplg_bytes_control *bc) 3088 { 3089 struct skl_algo_data *ac; 3090 struct skl_dfw_algo_data *dfw_ac = 3091 (struct skl_dfw_algo_data *)bc->priv.data; 3092 3093 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 3094 if (!ac) 3095 return -ENOMEM; 3096 3097 /* Fill private data */ 3098 ac->max = dfw_ac->max; 3099 ac->param_id = dfw_ac->param_id; 3100 ac->set_params = dfw_ac->set_params; 3101 ac->size = dfw_ac->max; 3102 3103 if (ac->max) { 3104 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL); 3105 if (!ac->params) 3106 return -ENOMEM; 3107 3108 memcpy(ac->params, dfw_ac->params, ac->max); 3109 } 3110 3111 be->dobj.private = ac; 3112 return 0; 3113 } 3114 3115 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 3116 struct snd_soc_tplg_enum_control *ec) 3117 { 3118 3119 void *data; 3120 3121 if (ec->priv.size) { 3122 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 3123 if (!data) 3124 return -ENOMEM; 3125 memcpy(data, ec->priv.data, ec->priv.size); 3126 se->dobj.private = data; 3127 } 3128 3129 return 0; 3130 3131 } 3132 3133 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 3134 struct snd_kcontrol_new *kctl, 3135 struct snd_soc_tplg_ctl_hdr *hdr) 3136 { 3137 struct soc_bytes_ext *sb; 3138 struct snd_soc_tplg_bytes_control *tplg_bc; 3139 struct snd_soc_tplg_enum_control *tplg_ec; 3140 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 3141 struct hdac_bus *bus = ebus_to_hbus(ebus); 3142 struct soc_enum *se; 3143 3144 switch (hdr->ops.info) { 3145 case SND_SOC_TPLG_CTL_BYTES: 3146 tplg_bc = container_of(hdr, 3147 struct snd_soc_tplg_bytes_control, hdr); 3148 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3149 sb = (struct soc_bytes_ext *)kctl->private_value; 3150 if (tplg_bc->priv.size) 3151 return skl_init_algo_data( 3152 bus->dev, sb, tplg_bc); 3153 } 3154 break; 3155 3156 case SND_SOC_TPLG_CTL_ENUM: 3157 tplg_ec = container_of(hdr, 3158 struct snd_soc_tplg_enum_control, hdr); 3159 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 3160 se = (struct soc_enum *)kctl->private_value; 3161 if (tplg_ec->priv.size) 3162 return skl_init_enum_data(bus->dev, se, 3163 tplg_ec); 3164 } 3165 break; 3166 3167 default: 3168 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3169 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3170 break; 3171 } 3172 3173 return 0; 3174 } 3175 3176 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3177 struct snd_soc_tplg_vendor_string_elem *str_elem, 3178 struct skl *skl) 3179 { 3180 int tkn_count = 0; 3181 static int ref_count; 3182 3183 switch (str_elem->token) { 3184 case SKL_TKN_STR_LIB_NAME: 3185 if (ref_count > skl->skl_sst->lib_count - 1) { 3186 ref_count = 0; 3187 return -EINVAL; 3188 } 3189 3190 strncpy(skl->skl_sst->lib_info[ref_count].name, 3191 str_elem->string, 3192 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name)); 3193 ref_count++; 3194 break; 3195 3196 default: 3197 dev_err(dev, "Not a string token %d\n", str_elem->token); 3198 break; 3199 } 3200 tkn_count++; 3201 3202 return tkn_count; 3203 } 3204 3205 static int skl_tplg_get_str_tkn(struct device *dev, 3206 struct snd_soc_tplg_vendor_array *array, 3207 struct skl *skl) 3208 { 3209 int tkn_count = 0, ret; 3210 struct snd_soc_tplg_vendor_string_elem *str_elem; 3211 3212 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3213 while (tkn_count < array->num_elems) { 3214 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3215 str_elem++; 3216 3217 if (ret < 0) 3218 return ret; 3219 3220 tkn_count = tkn_count + ret; 3221 } 3222 3223 return tkn_count; 3224 } 3225 3226 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3227 struct skl_module_iface *fmt, 3228 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3229 u32 dir, int fmt_idx) 3230 { 3231 struct skl_module_pin_fmt *dst_fmt; 3232 struct skl_module_fmt *mod_fmt; 3233 int ret; 3234 3235 if (!fmt) 3236 return -EINVAL; 3237 3238 switch (dir) { 3239 case SKL_DIR_IN: 3240 dst_fmt = &fmt->inputs[fmt_idx]; 3241 break; 3242 3243 case SKL_DIR_OUT: 3244 dst_fmt = &fmt->outputs[fmt_idx]; 3245 break; 3246 3247 default: 3248 dev_err(dev, "Invalid direction: %d\n", dir); 3249 return -EINVAL; 3250 } 3251 3252 mod_fmt = &dst_fmt->fmt; 3253 3254 switch (tkn_elem->token) { 3255 case SKL_TKN_MM_U32_INTF_PIN_ID: 3256 dst_fmt->id = tkn_elem->value; 3257 break; 3258 3259 default: 3260 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3261 tkn_elem->value); 3262 if (ret < 0) 3263 return ret; 3264 break; 3265 } 3266 3267 return 0; 3268 } 3269 3270 static int skl_tplg_fill_mod_info(struct device *dev, 3271 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3272 struct skl_module *mod) 3273 { 3274 3275 if (!mod) 3276 return -EINVAL; 3277 3278 switch (tkn_elem->token) { 3279 case SKL_TKN_U8_IN_PIN_TYPE: 3280 mod->input_pin_type = tkn_elem->value; 3281 break; 3282 3283 case SKL_TKN_U8_OUT_PIN_TYPE: 3284 mod->output_pin_type = tkn_elem->value; 3285 break; 3286 3287 case SKL_TKN_U8_IN_QUEUE_COUNT: 3288 mod->max_input_pins = tkn_elem->value; 3289 break; 3290 3291 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3292 mod->max_output_pins = tkn_elem->value; 3293 break; 3294 3295 case SKL_TKN_MM_U8_NUM_RES: 3296 mod->nr_resources = tkn_elem->value; 3297 break; 3298 3299 case SKL_TKN_MM_U8_NUM_INTF: 3300 mod->nr_interfaces = tkn_elem->value; 3301 break; 3302 3303 default: 3304 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3305 return -EINVAL; 3306 } 3307 3308 return 0; 3309 } 3310 3311 3312 static int skl_tplg_get_int_tkn(struct device *dev, 3313 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3314 struct skl *skl) 3315 { 3316 int tkn_count = 0, ret, size; 3317 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3318 struct skl_module_res *res = NULL; 3319 struct skl_module_iface *fmt = NULL; 3320 struct skl_module *mod = NULL; 3321 static struct skl_astate_param *astate_table; 3322 static int astate_cfg_idx, count; 3323 int i; 3324 3325 if (skl->modules) { 3326 mod = skl->modules[mod_idx]; 3327 res = &mod->resources[res_val_idx]; 3328 fmt = &mod->formats[intf_val_idx]; 3329 } 3330 3331 switch (tkn_elem->token) { 3332 case SKL_TKN_U32_LIB_COUNT: 3333 skl->skl_sst->lib_count = tkn_elem->value; 3334 break; 3335 3336 case SKL_TKN_U8_NUM_MOD: 3337 skl->nr_modules = tkn_elem->value; 3338 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3339 sizeof(*skl->modules), GFP_KERNEL); 3340 if (!skl->modules) 3341 return -ENOMEM; 3342 3343 for (i = 0; i < skl->nr_modules; i++) { 3344 skl->modules[i] = devm_kzalloc(dev, 3345 sizeof(struct skl_module), GFP_KERNEL); 3346 if (!skl->modules[i]) 3347 return -ENOMEM; 3348 } 3349 break; 3350 3351 case SKL_TKN_MM_U8_MOD_IDX: 3352 mod_idx = tkn_elem->value; 3353 break; 3354 3355 case SKL_TKN_U32_ASTATE_COUNT: 3356 if (astate_table != NULL) { 3357 dev_err(dev, "More than one entry for A-State count"); 3358 return -EINVAL; 3359 } 3360 3361 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3362 dev_err(dev, "Invalid A-State count %d\n", 3363 tkn_elem->value); 3364 return -EINVAL; 3365 } 3366 3367 size = tkn_elem->value * sizeof(struct skl_astate_param) + 3368 sizeof(count); 3369 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3370 if (!skl->cfg.astate_cfg) 3371 return -ENOMEM; 3372 3373 astate_table = skl->cfg.astate_cfg->astate_table; 3374 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3375 break; 3376 3377 case SKL_TKN_U32_ASTATE_IDX: 3378 if (tkn_elem->value >= count) { 3379 dev_err(dev, "Invalid A-State index %d\n", 3380 tkn_elem->value); 3381 return -EINVAL; 3382 } 3383 3384 astate_cfg_idx = tkn_elem->value; 3385 break; 3386 3387 case SKL_TKN_U32_ASTATE_KCPS: 3388 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3389 break; 3390 3391 case SKL_TKN_U32_ASTATE_CLK_SRC: 3392 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3393 break; 3394 3395 case SKL_TKN_U8_IN_PIN_TYPE: 3396 case SKL_TKN_U8_OUT_PIN_TYPE: 3397 case SKL_TKN_U8_IN_QUEUE_COUNT: 3398 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3399 case SKL_TKN_MM_U8_NUM_RES: 3400 case SKL_TKN_MM_U8_NUM_INTF: 3401 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3402 if (ret < 0) 3403 return ret; 3404 break; 3405 3406 case SKL_TKN_U32_DIR_PIN_COUNT: 3407 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3408 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3409 break; 3410 3411 case SKL_TKN_MM_U32_RES_ID: 3412 if (!res) 3413 return -EINVAL; 3414 3415 res->id = tkn_elem->value; 3416 res_val_idx = tkn_elem->value; 3417 break; 3418 3419 case SKL_TKN_MM_U32_FMT_ID: 3420 if (!fmt) 3421 return -EINVAL; 3422 3423 fmt->fmt_idx = tkn_elem->value; 3424 intf_val_idx = tkn_elem->value; 3425 break; 3426 3427 case SKL_TKN_MM_U32_CPS: 3428 case SKL_TKN_MM_U32_DMA_SIZE: 3429 case SKL_TKN_MM_U32_CPC: 3430 case SKL_TKN_U32_MEM_PAGES: 3431 case SKL_TKN_U32_OBS: 3432 case SKL_TKN_U32_IBS: 3433 case SKL_TKN_MM_U32_RES_PIN_ID: 3434 case SKL_TKN_MM_U32_PIN_BUF: 3435 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3436 if (ret < 0) 3437 return ret; 3438 3439 break; 3440 3441 case SKL_TKN_MM_U32_NUM_IN_FMT: 3442 if (!fmt) 3443 return -EINVAL; 3444 3445 res->nr_input_pins = tkn_elem->value; 3446 break; 3447 3448 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3449 if (!fmt) 3450 return -EINVAL; 3451 3452 res->nr_output_pins = tkn_elem->value; 3453 break; 3454 3455 case SKL_TKN_U32_FMT_CH: 3456 case SKL_TKN_U32_FMT_FREQ: 3457 case SKL_TKN_U32_FMT_BIT_DEPTH: 3458 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3459 case SKL_TKN_U32_FMT_CH_CONFIG: 3460 case SKL_TKN_U32_FMT_INTERLEAVE: 3461 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3462 case SKL_TKN_U32_FMT_CH_MAP: 3463 case SKL_TKN_MM_U32_INTF_PIN_ID: 3464 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3465 dir, pin_idx); 3466 if (ret < 0) 3467 return ret; 3468 break; 3469 3470 default: 3471 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3472 return -EINVAL; 3473 } 3474 tkn_count++; 3475 3476 return tkn_count; 3477 } 3478 3479 static int skl_tplg_get_manifest_uuid(struct device *dev, 3480 struct skl *skl, 3481 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 3482 { 3483 static int ref_count; 3484 struct skl_module *mod; 3485 3486 if (uuid_tkn->token == SKL_TKN_UUID) { 3487 mod = skl->modules[ref_count]; 3488 memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid)); 3489 ref_count++; 3490 } else { 3491 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 3492 return -EINVAL; 3493 } 3494 3495 return 0; 3496 } 3497 3498 /* 3499 * Fill the manifest structure by parsing the tokens based on the 3500 * type. 3501 */ 3502 static int skl_tplg_get_manifest_tkn(struct device *dev, 3503 char *pvt_data, struct skl *skl, 3504 int block_size) 3505 { 3506 int tkn_count = 0, ret; 3507 int off = 0, tuple_size = 0; 3508 struct snd_soc_tplg_vendor_array *array; 3509 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3510 3511 if (block_size <= 0) 3512 return -EINVAL; 3513 3514 while (tuple_size < block_size) { 3515 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3516 off += array->size; 3517 switch (array->type) { 3518 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3519 ret = skl_tplg_get_str_tkn(dev, array, skl); 3520 3521 if (ret < 0) 3522 return ret; 3523 tkn_count = ret; 3524 3525 tuple_size += tkn_count * 3526 sizeof(struct snd_soc_tplg_vendor_string_elem); 3527 continue; 3528 3529 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3530 ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid); 3531 if (ret < 0) 3532 return ret; 3533 3534 tuple_size += sizeof(*array->uuid); 3535 continue; 3536 3537 default: 3538 tkn_elem = array->value; 3539 tkn_count = 0; 3540 break; 3541 } 3542 3543 while (tkn_count <= array->num_elems - 1) { 3544 ret = skl_tplg_get_int_tkn(dev, 3545 tkn_elem, skl); 3546 if (ret < 0) 3547 return ret; 3548 3549 tkn_count = tkn_count + ret; 3550 tkn_elem++; 3551 } 3552 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3553 tkn_count = 0; 3554 } 3555 3556 return off; 3557 } 3558 3559 /* 3560 * Parse manifest private data for tokens. The private data block is 3561 * preceded by descriptors for type and size of data block. 3562 */ 3563 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3564 struct device *dev, struct skl *skl) 3565 { 3566 struct snd_soc_tplg_vendor_array *array; 3567 int num_blocks, block_size = 0, block_type, off = 0; 3568 char *data; 3569 int ret; 3570 3571 /* Read the NUM_DATA_BLOCKS descriptor */ 3572 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3573 ret = skl_tplg_get_desc_blocks(dev, array); 3574 if (ret < 0) 3575 return ret; 3576 num_blocks = ret; 3577 3578 off += array->size; 3579 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3580 while (num_blocks > 0) { 3581 array = (struct snd_soc_tplg_vendor_array *) 3582 (manifest->priv.data + off); 3583 ret = skl_tplg_get_desc_blocks(dev, array); 3584 3585 if (ret < 0) 3586 return ret; 3587 block_type = ret; 3588 off += array->size; 3589 3590 array = (struct snd_soc_tplg_vendor_array *) 3591 (manifest->priv.data + off); 3592 3593 ret = skl_tplg_get_desc_blocks(dev, array); 3594 3595 if (ret < 0) 3596 return ret; 3597 block_size = ret; 3598 off += array->size; 3599 3600 array = (struct snd_soc_tplg_vendor_array *) 3601 (manifest->priv.data + off); 3602 3603 data = (manifest->priv.data + off); 3604 3605 if (block_type == SKL_TYPE_TUPLE) { 3606 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3607 block_size); 3608 3609 if (ret < 0) 3610 return ret; 3611 3612 --num_blocks; 3613 } else { 3614 return -EINVAL; 3615 } 3616 off += ret; 3617 } 3618 3619 return 0; 3620 } 3621 3622 static int skl_manifest_load(struct snd_soc_component *cmpnt, 3623 struct snd_soc_tplg_manifest *manifest) 3624 { 3625 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 3626 struct hdac_bus *bus = ebus_to_hbus(ebus); 3627 struct skl *skl = ebus_to_skl(ebus); 3628 3629 /* proceed only if we have private data defined */ 3630 if (manifest->priv.size == 0) 3631 return 0; 3632 3633 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3634 3635 if (skl->skl_sst->lib_count > SKL_MAX_LIB) { 3636 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3637 skl->skl_sst->lib_count); 3638 return -EINVAL; 3639 } 3640 3641 return 0; 3642 } 3643 3644 static struct snd_soc_tplg_ops skl_tplg_ops = { 3645 .widget_load = skl_tplg_widget_load, 3646 .control_load = skl_tplg_control_load, 3647 .bytes_ext_ops = skl_tlv_ops, 3648 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3649 .io_ops = skl_tplg_kcontrol_ops, 3650 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3651 .manifest = skl_manifest_load, 3652 .dai_load = skl_dai_load, 3653 }; 3654 3655 /* 3656 * A pipe can have multiple modules, each of them will be a DAPM widget as 3657 * well. While managing a pipeline we need to get the list of all the 3658 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3659 * helps to get the SKL type widgets in that pipeline 3660 */ 3661 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3662 { 3663 struct snd_soc_dapm_widget *w; 3664 struct skl_module_cfg *mcfg = NULL; 3665 struct skl_pipe_module *p_module = NULL; 3666 struct skl_pipe *pipe; 3667 3668 list_for_each_entry(w, &component->card->widgets, list) { 3669 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3670 mcfg = w->priv; 3671 pipe = mcfg->pipe; 3672 3673 p_module = devm_kzalloc(component->dev, 3674 sizeof(*p_module), GFP_KERNEL); 3675 if (!p_module) 3676 return -ENOMEM; 3677 3678 p_module->w = w; 3679 list_add_tail(&p_module->node, &pipe->w_list); 3680 } 3681 } 3682 3683 return 0; 3684 } 3685 3686 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 3687 { 3688 struct skl_pipe_module *w_module; 3689 struct snd_soc_dapm_widget *w; 3690 struct skl_module_cfg *mconfig; 3691 bool host_found = false, link_found = false; 3692 3693 list_for_each_entry(w_module, &pipe->w_list, node) { 3694 w = w_module->w; 3695 mconfig = w->priv; 3696 3697 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3698 host_found = true; 3699 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3700 link_found = true; 3701 } 3702 3703 if (host_found && link_found) 3704 pipe->passthru = true; 3705 else 3706 pipe->passthru = false; 3707 } 3708 3709 /* This will be read from topology manifest, currently defined here */ 3710 #define SKL_MAX_MCPS 30000000 3711 #define SKL_FW_MAX_MEM 1000000 3712 3713 /* 3714 * SKL topology init routine 3715 */ 3716 int skl_tplg_init(struct snd_soc_component *component, struct hdac_ext_bus *ebus) 3717 { 3718 int ret; 3719 const struct firmware *fw; 3720 struct hdac_bus *bus = ebus_to_hbus(ebus); 3721 struct skl *skl = ebus_to_skl(ebus); 3722 struct skl_pipeline *ppl; 3723 3724 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3725 if (ret < 0) { 3726 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3727 skl->tplg_name, ret); 3728 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3729 if (ret < 0) { 3730 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3731 "dfw_sst.bin", ret); 3732 return ret; 3733 } 3734 } 3735 3736 /* 3737 * The complete tplg for SKL is loaded as index 0, we don't use 3738 * any other index 3739 */ 3740 ret = snd_soc_tplg_component_load(component, 3741 &skl_tplg_ops, fw, 0); 3742 if (ret < 0) { 3743 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3744 release_firmware(fw); 3745 return -EINVAL; 3746 } 3747 3748 skl->resource.max_mcps = SKL_MAX_MCPS; 3749 skl->resource.max_mem = SKL_FW_MAX_MEM; 3750 3751 skl->tplg = fw; 3752 ret = skl_tplg_create_pipe_widget_list(component); 3753 if (ret < 0) 3754 return ret; 3755 3756 list_for_each_entry(ppl, &skl->ppl_list, node) 3757 skl_tplg_set_pipe_type(skl, ppl->pipe); 3758 3759 return 0; 3760 } 3761