1 /* 2 * skl-topology.c - Implements Platform component ALSA controls/widget 3 * handlers. 4 * 5 * Copyright (C) 2014-2015 Intel Corp 6 * Author: Jeeja KP <jeeja.kp@intel.com> 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/firmware.h> 22 #include <sound/soc.h> 23 #include <sound/soc-topology.h> 24 #include <uapi/sound/snd_sst_tokens.h> 25 #include "skl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl-topology.h" 28 #include "skl.h" 29 #include "skl-tplg-interface.h" 30 #include "../common/sst-dsp.h" 31 #include "../common/sst-dsp-priv.h" 32 33 #define SKL_CH_FIXUP_MASK (1 << 0) 34 #define SKL_RATE_FIXUP_MASK (1 << 1) 35 #define SKL_FMT_FIXUP_MASK (1 << 2) 36 #define SKL_IN_DIR_BIT_MASK BIT(0) 37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 38 39 static const int mic_mono_list[] = { 40 0, 1, 2, 3, 41 }; 42 static const int mic_stereo_list[][SKL_CH_STEREO] = { 43 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 44 }; 45 static const int mic_trio_list[][SKL_CH_TRIO] = { 46 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 47 }; 48 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 49 {0, 1, 2, 3}, 50 }; 51 52 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps) 53 { 54 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 55 56 switch (caps) { 57 case SKL_D0I3_NONE: 58 d0i3->non_d0i3++; 59 break; 60 61 case SKL_D0I3_STREAMING: 62 d0i3->streaming++; 63 break; 64 65 case SKL_D0I3_NON_STREAMING: 66 d0i3->non_streaming++; 67 break; 68 } 69 } 70 71 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps) 72 { 73 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 74 75 switch (caps) { 76 case SKL_D0I3_NONE: 77 d0i3->non_d0i3--; 78 break; 79 80 case SKL_D0I3_STREAMING: 81 d0i3->streaming--; 82 break; 83 84 case SKL_D0I3_NON_STREAMING: 85 d0i3->non_streaming--; 86 break; 87 } 88 } 89 90 /* 91 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 92 * ignore. This helpers checks if the SKL driver handles this widget type 93 */ 94 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w) 95 { 96 switch (w->id) { 97 case snd_soc_dapm_dai_link: 98 case snd_soc_dapm_dai_in: 99 case snd_soc_dapm_aif_in: 100 case snd_soc_dapm_aif_out: 101 case snd_soc_dapm_dai_out: 102 case snd_soc_dapm_switch: 103 return false; 104 default: 105 return true; 106 } 107 } 108 109 /* 110 * Each pipelines needs memory to be allocated. Check if we have free memory 111 * from available pool. 112 */ 113 static bool skl_is_pipe_mem_avail(struct skl *skl, 114 struct skl_module_cfg *mconfig) 115 { 116 struct skl_sst *ctx = skl->skl_sst; 117 118 if (skl->resource.mem + mconfig->pipe->memory_pages > 119 skl->resource.max_mem) { 120 dev_err(ctx->dev, 121 "%s: module_id %d instance %d\n", __func__, 122 mconfig->id.module_id, 123 mconfig->id.instance_id); 124 dev_err(ctx->dev, 125 "exceeds ppl memory available %d mem %d\n", 126 skl->resource.max_mem, skl->resource.mem); 127 return false; 128 } else { 129 return true; 130 } 131 } 132 133 /* 134 * Add the mem to the mem pool. This is freed when pipe is deleted. 135 * Note: DSP does actual memory management we only keep track for complete 136 * pool 137 */ 138 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 139 struct skl_module_cfg *mconfig) 140 { 141 skl->resource.mem += mconfig->pipe->memory_pages; 142 } 143 144 /* 145 * Pipeline needs needs DSP CPU resources for computation, this is 146 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 147 * 148 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 149 * pipe. 150 */ 151 152 static bool skl_is_pipe_mcps_avail(struct skl *skl, 153 struct skl_module_cfg *mconfig) 154 { 155 struct skl_sst *ctx = skl->skl_sst; 156 157 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) { 158 dev_err(ctx->dev, 159 "%s: module_id %d instance %d\n", __func__, 160 mconfig->id.module_id, mconfig->id.instance_id); 161 dev_err(ctx->dev, 162 "exceeds ppl mcps available %d > mem %d\n", 163 skl->resource.max_mcps, skl->resource.mcps); 164 return false; 165 } else { 166 return true; 167 } 168 } 169 170 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 171 struct skl_module_cfg *mconfig) 172 { 173 skl->resource.mcps += mconfig->mcps; 174 } 175 176 /* 177 * Free the mcps when tearing down 178 */ 179 static void 180 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 181 { 182 skl->resource.mcps -= mconfig->mcps; 183 } 184 185 /* 186 * Free the memory when tearing down 187 */ 188 static void 189 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 190 { 191 skl->resource.mem -= mconfig->pipe->memory_pages; 192 } 193 194 195 static void skl_dump_mconfig(struct skl_sst *ctx, 196 struct skl_module_cfg *mcfg) 197 { 198 dev_dbg(ctx->dev, "Dumping config\n"); 199 dev_dbg(ctx->dev, "Input Format:\n"); 200 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels); 201 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq); 202 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg); 203 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth); 204 dev_dbg(ctx->dev, "Output Format:\n"); 205 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels); 206 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq); 207 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth); 208 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg); 209 } 210 211 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 212 { 213 int slot_map = 0xFFFFFFFF; 214 int start_slot = 0; 215 int i; 216 217 for (i = 0; i < chs; i++) { 218 /* 219 * For 2 channels with starting slot as 0, slot map will 220 * look like 0xFFFFFF10. 221 */ 222 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 223 start_slot++; 224 } 225 fmt->ch_map = slot_map; 226 } 227 228 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 229 struct skl_pipe_params *params, int fixup) 230 { 231 if (fixup & SKL_RATE_FIXUP_MASK) 232 fmt->s_freq = params->s_freq; 233 if (fixup & SKL_CH_FIXUP_MASK) { 234 fmt->channels = params->ch; 235 skl_tplg_update_chmap(fmt, fmt->channels); 236 } 237 if (fixup & SKL_FMT_FIXUP_MASK) { 238 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 239 240 /* 241 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 242 * container so update bit depth accordingly 243 */ 244 switch (fmt->valid_bit_depth) { 245 case SKL_DEPTH_16BIT: 246 fmt->bit_depth = fmt->valid_bit_depth; 247 break; 248 249 default: 250 fmt->bit_depth = SKL_DEPTH_32BIT; 251 break; 252 } 253 } 254 255 } 256 257 /* 258 * A pipeline may have modules which impact the pcm parameters, like SRC, 259 * channel converter, format converter. 260 * We need to calculate the output params by applying the 'fixup' 261 * Topology will tell driver which type of fixup is to be applied by 262 * supplying the fixup mask, so based on that we calculate the output 263 * 264 * Now In FE the pcm hw_params is source/target format. Same is applicable 265 * for BE with its hw_params invoked. 266 * here based on FE, BE pipeline and direction we calculate the input and 267 * outfix and then apply that for a module 268 */ 269 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 270 struct skl_pipe_params *params, bool is_fe) 271 { 272 int in_fixup, out_fixup; 273 struct skl_module_fmt *in_fmt, *out_fmt; 274 275 /* Fixups will be applied to pin 0 only */ 276 in_fmt = &m_cfg->in_fmt[0]; 277 out_fmt = &m_cfg->out_fmt[0]; 278 279 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 280 if (is_fe) { 281 in_fixup = m_cfg->params_fixup; 282 out_fixup = (~m_cfg->converter) & 283 m_cfg->params_fixup; 284 } else { 285 out_fixup = m_cfg->params_fixup; 286 in_fixup = (~m_cfg->converter) & 287 m_cfg->params_fixup; 288 } 289 } else { 290 if (is_fe) { 291 out_fixup = m_cfg->params_fixup; 292 in_fixup = (~m_cfg->converter) & 293 m_cfg->params_fixup; 294 } else { 295 in_fixup = m_cfg->params_fixup; 296 out_fixup = (~m_cfg->converter) & 297 m_cfg->params_fixup; 298 } 299 } 300 301 skl_tplg_update_params(in_fmt, params, in_fixup); 302 skl_tplg_update_params(out_fmt, params, out_fixup); 303 } 304 305 /* 306 * A module needs input and output buffers, which are dependent upon pcm 307 * params, so once we have calculate params, we need buffer calculation as 308 * well. 309 */ 310 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 311 struct skl_module_cfg *mcfg) 312 { 313 int multiplier = 1; 314 struct skl_module_fmt *in_fmt, *out_fmt; 315 316 /* Since fixups is applied to pin 0 only, ibs, obs needs 317 * change for pin 0 only 318 */ 319 in_fmt = &mcfg->in_fmt[0]; 320 out_fmt = &mcfg->out_fmt[0]; 321 322 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 323 multiplier = 5; 324 325 mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 326 in_fmt->channels * (in_fmt->bit_depth >> 3) * 327 multiplier; 328 329 mcfg->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 330 out_fmt->channels * (out_fmt->bit_depth >> 3) * 331 multiplier; 332 } 333 334 static u8 skl_tplg_be_dev_type(int dev_type) 335 { 336 int ret; 337 338 switch (dev_type) { 339 case SKL_DEVICE_BT: 340 ret = NHLT_DEVICE_BT; 341 break; 342 343 case SKL_DEVICE_DMIC: 344 ret = NHLT_DEVICE_DMIC; 345 break; 346 347 case SKL_DEVICE_I2S: 348 ret = NHLT_DEVICE_I2S; 349 break; 350 351 default: 352 ret = NHLT_DEVICE_INVALID; 353 break; 354 } 355 356 return ret; 357 } 358 359 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 360 struct skl_sst *ctx) 361 { 362 struct skl_module_cfg *m_cfg = w->priv; 363 int link_type, dir; 364 u32 ch, s_freq, s_fmt; 365 struct nhlt_specific_cfg *cfg; 366 struct skl *skl = get_skl_ctx(ctx->dev); 367 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 368 369 /* check if we already have blob */ 370 if (m_cfg->formats_config.caps_size > 0) 371 return 0; 372 373 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 374 switch (m_cfg->dev_type) { 375 case SKL_DEVICE_DMIC: 376 link_type = NHLT_LINK_DMIC; 377 dir = SNDRV_PCM_STREAM_CAPTURE; 378 s_freq = m_cfg->in_fmt[0].s_freq; 379 s_fmt = m_cfg->in_fmt[0].bit_depth; 380 ch = m_cfg->in_fmt[0].channels; 381 break; 382 383 case SKL_DEVICE_I2S: 384 link_type = NHLT_LINK_SSP; 385 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 386 dir = SNDRV_PCM_STREAM_PLAYBACK; 387 s_freq = m_cfg->out_fmt[0].s_freq; 388 s_fmt = m_cfg->out_fmt[0].bit_depth; 389 ch = m_cfg->out_fmt[0].channels; 390 } else { 391 dir = SNDRV_PCM_STREAM_CAPTURE; 392 s_freq = m_cfg->in_fmt[0].s_freq; 393 s_fmt = m_cfg->in_fmt[0].bit_depth; 394 ch = m_cfg->in_fmt[0].channels; 395 } 396 break; 397 398 default: 399 return -EINVAL; 400 } 401 402 /* update the blob based on virtual bus_id and default params */ 403 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 404 s_fmt, ch, s_freq, dir, dev_type); 405 if (cfg) { 406 m_cfg->formats_config.caps_size = cfg->size; 407 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 408 } else { 409 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 410 m_cfg->vbus_id, link_type, dir); 411 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 412 ch, s_freq, s_fmt); 413 return -EIO; 414 } 415 416 return 0; 417 } 418 419 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 420 struct skl_sst *ctx) 421 { 422 struct skl_module_cfg *m_cfg = w->priv; 423 struct skl_pipe_params *params = m_cfg->pipe->p_params; 424 int p_conn_type = m_cfg->pipe->conn_type; 425 bool is_fe; 426 427 if (!m_cfg->params_fixup) 428 return; 429 430 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 431 w->name); 432 433 skl_dump_mconfig(ctx, m_cfg); 434 435 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 436 is_fe = true; 437 else 438 is_fe = false; 439 440 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 441 skl_tplg_update_buffer_size(ctx, m_cfg); 442 443 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 444 w->name); 445 446 skl_dump_mconfig(ctx, m_cfg); 447 } 448 449 /* 450 * some modules can have multiple params set from user control and 451 * need to be set after module is initialized. If set_param flag is 452 * set module params will be done after module is initialised. 453 */ 454 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 455 struct skl_sst *ctx) 456 { 457 int i, ret; 458 struct skl_module_cfg *mconfig = w->priv; 459 const struct snd_kcontrol_new *k; 460 struct soc_bytes_ext *sb; 461 struct skl_algo_data *bc; 462 struct skl_specific_cfg *sp_cfg; 463 464 if (mconfig->formats_config.caps_size > 0 && 465 mconfig->formats_config.set_params == SKL_PARAM_SET) { 466 sp_cfg = &mconfig->formats_config; 467 ret = skl_set_module_params(ctx, sp_cfg->caps, 468 sp_cfg->caps_size, 469 sp_cfg->param_id, mconfig); 470 if (ret < 0) 471 return ret; 472 } 473 474 for (i = 0; i < w->num_kcontrols; i++) { 475 k = &w->kcontrol_news[i]; 476 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 477 sb = (void *) k->private_value; 478 bc = (struct skl_algo_data *)sb->dobj.private; 479 480 if (bc->set_params == SKL_PARAM_SET) { 481 ret = skl_set_module_params(ctx, 482 (u32 *)bc->params, bc->size, 483 bc->param_id, mconfig); 484 if (ret < 0) 485 return ret; 486 } 487 } 488 } 489 490 return 0; 491 } 492 493 /* 494 * some module param can set from user control and this is required as 495 * when module is initailzed. if module param is required in init it is 496 * identifed by set_param flag. if set_param flag is not set, then this 497 * parameter needs to set as part of module init. 498 */ 499 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 500 { 501 const struct snd_kcontrol_new *k; 502 struct soc_bytes_ext *sb; 503 struct skl_algo_data *bc; 504 struct skl_module_cfg *mconfig = w->priv; 505 int i; 506 507 for (i = 0; i < w->num_kcontrols; i++) { 508 k = &w->kcontrol_news[i]; 509 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 510 sb = (struct soc_bytes_ext *)k->private_value; 511 bc = (struct skl_algo_data *)sb->dobj.private; 512 513 if (bc->set_params != SKL_PARAM_INIT) 514 continue; 515 516 mconfig->formats_config.caps = (u32 *)bc->params; 517 mconfig->formats_config.caps_size = bc->size; 518 519 break; 520 } 521 } 522 523 return 0; 524 } 525 526 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe, 527 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 528 { 529 switch (mcfg->dev_type) { 530 case SKL_DEVICE_HDAHOST: 531 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params); 532 533 case SKL_DEVICE_HDALINK: 534 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params); 535 } 536 537 return 0; 538 } 539 540 /* 541 * Inside a pipe instance, we can have various modules. These modules need 542 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 543 * skl_init_module() routine, so invoke that for all modules in a pipeline 544 */ 545 static int 546 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 547 { 548 struct skl_pipe_module *w_module; 549 struct snd_soc_dapm_widget *w; 550 struct skl_module_cfg *mconfig; 551 struct skl_sst *ctx = skl->skl_sst; 552 int ret = 0; 553 554 list_for_each_entry(w_module, &pipe->w_list, node) { 555 uuid_le *uuid_mod; 556 w = w_module->w; 557 mconfig = w->priv; 558 559 /* check if module ids are populated */ 560 if (mconfig->id.module_id < 0) { 561 dev_err(skl->skl_sst->dev, 562 "module %pUL id not populated\n", 563 (uuid_le *)mconfig->guid); 564 return -EIO; 565 } 566 567 /* check resource available */ 568 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 569 return -ENOMEM; 570 571 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 572 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 573 mconfig->id.module_id, mconfig->guid); 574 if (ret < 0) 575 return ret; 576 577 mconfig->m_state = SKL_MODULE_LOADED; 578 } 579 580 /* prepare the DMA if the module is gateway cpr */ 581 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig); 582 if (ret < 0) 583 return ret; 584 585 /* update blob if blob is null for be with default value */ 586 skl_tplg_update_be_blob(w, ctx); 587 588 /* 589 * apply fix/conversion to module params based on 590 * FE/BE params 591 */ 592 skl_tplg_update_module_params(w, ctx); 593 uuid_mod = (uuid_le *)mconfig->guid; 594 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod, 595 mconfig->id.instance_id); 596 if (mconfig->id.pvt_id < 0) 597 return ret; 598 skl_tplg_set_module_init_data(w); 599 ret = skl_init_module(ctx, mconfig); 600 if (ret < 0) { 601 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 602 return ret; 603 } 604 skl_tplg_alloc_pipe_mcps(skl, mconfig); 605 ret = skl_tplg_set_module_params(w, ctx); 606 if (ret < 0) 607 return ret; 608 } 609 610 return 0; 611 } 612 613 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 614 struct skl_pipe *pipe) 615 { 616 int ret; 617 struct skl_pipe_module *w_module = NULL; 618 struct skl_module_cfg *mconfig = NULL; 619 620 list_for_each_entry(w_module, &pipe->w_list, node) { 621 uuid_le *uuid_mod; 622 mconfig = w_module->w->priv; 623 uuid_mod = (uuid_le *)mconfig->guid; 624 625 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod && 626 mconfig->m_state > SKL_MODULE_UNINIT) { 627 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 628 mconfig->id.module_id); 629 if (ret < 0) 630 return -EIO; 631 } 632 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 633 } 634 635 /* no modules to unload in this path, so return */ 636 return 0; 637 } 638 639 /* 640 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 641 * need create the pipeline. So we do following: 642 * - check the resources 643 * - Create the pipeline 644 * - Initialize the modules in pipeline 645 * - finally bind all modules together 646 */ 647 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 648 struct skl *skl) 649 { 650 int ret; 651 struct skl_module_cfg *mconfig = w->priv; 652 struct skl_pipe_module *w_module; 653 struct skl_pipe *s_pipe = mconfig->pipe; 654 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 655 struct skl_sst *ctx = skl->skl_sst; 656 struct skl_module_deferred_bind *modules; 657 658 /* check resource available */ 659 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 660 return -EBUSY; 661 662 if (!skl_is_pipe_mem_avail(skl, mconfig)) 663 return -ENOMEM; 664 665 /* 666 * Create a list of modules for pipe. 667 * This list contains modules from source to sink 668 */ 669 ret = skl_create_pipeline(ctx, mconfig->pipe); 670 if (ret < 0) 671 return ret; 672 673 skl_tplg_alloc_pipe_mem(skl, mconfig); 674 skl_tplg_alloc_pipe_mcps(skl, mconfig); 675 676 /* Init all pipe modules from source to sink */ 677 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 678 if (ret < 0) 679 return ret; 680 681 /* Bind modules from source to sink */ 682 list_for_each_entry(w_module, &s_pipe->w_list, node) { 683 dst_module = w_module->w->priv; 684 685 if (src_module == NULL) { 686 src_module = dst_module; 687 continue; 688 } 689 690 ret = skl_bind_modules(ctx, src_module, dst_module); 691 if (ret < 0) 692 return ret; 693 694 src_module = dst_module; 695 } 696 697 /* 698 * When the destination module is initialized, check for these modules 699 * in deferred bind list. If found, bind them. 700 */ 701 list_for_each_entry(w_module, &s_pipe->w_list, node) { 702 if (list_empty(&skl->bind_list)) 703 break; 704 705 list_for_each_entry(modules, &skl->bind_list, node) { 706 module = w_module->w->priv; 707 if (modules->dst == module) 708 skl_bind_modules(ctx, modules->src, 709 modules->dst); 710 } 711 } 712 713 return 0; 714 } 715 716 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params, 717 int size, struct skl_module_cfg *mcfg) 718 { 719 int i, pvt_id; 720 721 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 722 struct skl_kpb_params *kpb_params = 723 (struct skl_kpb_params *)params; 724 struct skl_mod_inst_map *inst = kpb_params->map; 725 726 for (i = 0; i < kpb_params->num_modules; i++) { 727 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id, 728 inst->inst_id); 729 if (pvt_id < 0) 730 return -EINVAL; 731 732 inst->inst_id = pvt_id; 733 inst++; 734 } 735 } 736 737 return 0; 738 } 739 /* 740 * Some modules require params to be set after the module is bound to 741 * all pins connected. 742 * 743 * The module provider initializes set_param flag for such modules and we 744 * send params after binding 745 */ 746 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 747 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 748 { 749 int i, ret; 750 struct skl_module_cfg *mconfig = w->priv; 751 const struct snd_kcontrol_new *k; 752 struct soc_bytes_ext *sb; 753 struct skl_algo_data *bc; 754 struct skl_specific_cfg *sp_cfg; 755 u32 *params; 756 757 /* 758 * check all out/in pins are in bind state. 759 * if so set the module param 760 */ 761 for (i = 0; i < mcfg->max_out_queue; i++) { 762 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 763 return 0; 764 } 765 766 for (i = 0; i < mcfg->max_in_queue; i++) { 767 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 768 return 0; 769 } 770 771 if (mconfig->formats_config.caps_size > 0 && 772 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 773 sp_cfg = &mconfig->formats_config; 774 ret = skl_set_module_params(ctx, sp_cfg->caps, 775 sp_cfg->caps_size, 776 sp_cfg->param_id, mconfig); 777 if (ret < 0) 778 return ret; 779 } 780 781 for (i = 0; i < w->num_kcontrols; i++) { 782 k = &w->kcontrol_news[i]; 783 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 784 sb = (void *) k->private_value; 785 bc = (struct skl_algo_data *)sb->dobj.private; 786 787 if (bc->set_params == SKL_PARAM_BIND) { 788 params = kzalloc(bc->max, GFP_KERNEL); 789 if (!params) 790 return -ENOMEM; 791 792 memcpy(params, bc->params, bc->max); 793 skl_fill_sink_instance_id(ctx, params, bc->max, 794 mconfig); 795 796 ret = skl_set_module_params(ctx, params, 797 bc->max, bc->param_id, mconfig); 798 kfree(params); 799 800 if (ret < 0) 801 return ret; 802 } 803 } 804 } 805 806 return 0; 807 } 808 809 810 static int skl_tplg_module_add_deferred_bind(struct skl *skl, 811 struct skl_module_cfg *src, struct skl_module_cfg *dst) 812 { 813 struct skl_module_deferred_bind *m_list, *modules; 814 int i; 815 816 /* only supported for module with static pin connection */ 817 for (i = 0; i < dst->max_in_queue; i++) { 818 struct skl_module_pin *pin = &dst->m_in_pin[i]; 819 820 if (pin->is_dynamic) 821 continue; 822 823 if ((pin->id.module_id == src->id.module_id) && 824 (pin->id.instance_id == src->id.instance_id)) { 825 826 if (!list_empty(&skl->bind_list)) { 827 list_for_each_entry(modules, &skl->bind_list, node) { 828 if (modules->src == src && modules->dst == dst) 829 return 0; 830 } 831 } 832 833 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 834 if (!m_list) 835 return -ENOMEM; 836 837 m_list->src = src; 838 m_list->dst = dst; 839 840 list_add(&m_list->node, &skl->bind_list); 841 } 842 } 843 844 return 0; 845 } 846 847 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 848 struct skl *skl, 849 struct snd_soc_dapm_widget *src_w, 850 struct skl_module_cfg *src_mconfig) 851 { 852 struct snd_soc_dapm_path *p; 853 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 854 struct skl_module_cfg *sink_mconfig; 855 struct skl_sst *ctx = skl->skl_sst; 856 int ret; 857 858 snd_soc_dapm_widget_for_each_sink_path(w, p) { 859 if (!p->connect) 860 continue; 861 862 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 863 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 864 865 next_sink = p->sink; 866 867 if (!is_skl_dsp_widget_type(p->sink)) 868 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 869 870 /* 871 * here we will check widgets in sink pipelines, so that 872 * can be any widgets type and we are only interested if 873 * they are ones used for SKL so check that first 874 */ 875 if ((p->sink->priv != NULL) && 876 is_skl_dsp_widget_type(p->sink)) { 877 878 sink = p->sink; 879 sink_mconfig = sink->priv; 880 881 /* 882 * Modules other than PGA leaf can be connected 883 * directly or via switch to a module in another 884 * pipeline. EX: reference path 885 * when the path is enabled, the dst module that needs 886 * to be bound may not be initialized. if the module is 887 * not initialized, add these modules in the deferred 888 * bind list and when the dst module is initialised, 889 * bind this module to the dst_module in deferred list. 890 */ 891 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 892 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 893 894 ret = skl_tplg_module_add_deferred_bind(skl, 895 src_mconfig, sink_mconfig); 896 897 if (ret < 0) 898 return ret; 899 900 } 901 902 903 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 904 sink_mconfig->m_state == SKL_MODULE_UNINIT) 905 continue; 906 907 /* Bind source to sink, mixin is always source */ 908 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 909 if (ret) 910 return ret; 911 912 /* set module params after bind */ 913 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 914 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 915 916 /* Start sinks pipe first */ 917 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 918 if (sink_mconfig->pipe->conn_type != 919 SKL_PIPE_CONN_TYPE_FE) 920 ret = skl_run_pipe(ctx, 921 sink_mconfig->pipe); 922 if (ret) 923 return ret; 924 } 925 } 926 } 927 928 if (!sink) 929 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 930 931 return 0; 932 } 933 934 /* 935 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 936 * we need to do following: 937 * - Bind to sink pipeline 938 * Since the sink pipes can be running and we don't get mixer event on 939 * connect for already running mixer, we need to find the sink pipes 940 * here and bind to them. This way dynamic connect works. 941 * - Start sink pipeline, if not running 942 * - Then run current pipe 943 */ 944 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 945 struct skl *skl) 946 { 947 struct skl_module_cfg *src_mconfig; 948 struct skl_sst *ctx = skl->skl_sst; 949 int ret = 0; 950 951 src_mconfig = w->priv; 952 953 /* 954 * find which sink it is connected to, bind with the sink, 955 * if sink is not started, start sink pipe first, then start 956 * this pipe 957 */ 958 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 959 if (ret) 960 return ret; 961 962 /* Start source pipe last after starting all sinks */ 963 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 964 return skl_run_pipe(ctx, src_mconfig->pipe); 965 966 return 0; 967 } 968 969 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 970 struct snd_soc_dapm_widget *w, struct skl *skl) 971 { 972 struct snd_soc_dapm_path *p; 973 struct snd_soc_dapm_widget *src_w = NULL; 974 struct skl_sst *ctx = skl->skl_sst; 975 976 snd_soc_dapm_widget_for_each_source_path(w, p) { 977 src_w = p->source; 978 if (!p->connect) 979 continue; 980 981 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 982 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 983 984 /* 985 * here we will check widgets in sink pipelines, so that can 986 * be any widgets type and we are only interested if they are 987 * ones used for SKL so check that first 988 */ 989 if ((p->source->priv != NULL) && 990 is_skl_dsp_widget_type(p->source)) { 991 return p->source; 992 } 993 } 994 995 if (src_w != NULL) 996 return skl_get_src_dsp_widget(src_w, skl); 997 998 return NULL; 999 } 1000 1001 /* 1002 * in the Post-PMU event of mixer we need to do following: 1003 * - Check if this pipe is running 1004 * - if not, then 1005 * - bind this pipeline to its source pipeline 1006 * if source pipe is already running, this means it is a dynamic 1007 * connection and we need to bind only to that pipe 1008 * - start this pipeline 1009 */ 1010 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1011 struct skl *skl) 1012 { 1013 int ret = 0; 1014 struct snd_soc_dapm_widget *source, *sink; 1015 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1016 struct skl_sst *ctx = skl->skl_sst; 1017 int src_pipe_started = 0; 1018 1019 sink = w; 1020 sink_mconfig = sink->priv; 1021 1022 /* 1023 * If source pipe is already started, that means source is driving 1024 * one more sink before this sink got connected, Since source is 1025 * started, bind this sink to source and start this pipe. 1026 */ 1027 source = skl_get_src_dsp_widget(w, skl); 1028 if (source != NULL) { 1029 src_mconfig = source->priv; 1030 sink_mconfig = sink->priv; 1031 src_pipe_started = 1; 1032 1033 /* 1034 * check pipe state, then no need to bind or start the 1035 * pipe 1036 */ 1037 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1038 src_pipe_started = 0; 1039 } 1040 1041 if (src_pipe_started) { 1042 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1043 if (ret) 1044 return ret; 1045 1046 /* set module params after bind */ 1047 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 1048 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1049 1050 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1051 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 1052 } 1053 1054 return ret; 1055 } 1056 1057 /* 1058 * in the Pre-PMD event of mixer we need to do following: 1059 * - Stop the pipe 1060 * - find the source connections and remove that from dapm_path_list 1061 * - unbind with source pipelines if still connected 1062 */ 1063 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1064 struct skl *skl) 1065 { 1066 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1067 int ret = 0, i; 1068 struct skl_sst *ctx = skl->skl_sst; 1069 1070 sink_mconfig = w->priv; 1071 1072 /* Stop the pipe */ 1073 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 1074 if (ret) 1075 return ret; 1076 1077 for (i = 0; i < sink_mconfig->max_in_queue; i++) { 1078 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1079 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1080 if (!src_mconfig) 1081 continue; 1082 1083 ret = skl_unbind_modules(ctx, 1084 src_mconfig, sink_mconfig); 1085 } 1086 } 1087 1088 return ret; 1089 } 1090 1091 /* 1092 * in the Post-PMD event of mixer we need to do following: 1093 * - Free the mcps used 1094 * - Free the mem used 1095 * - Unbind the modules within the pipeline 1096 * - Delete the pipeline (modules are not required to be explicitly 1097 * deleted, pipeline delete is enough here 1098 */ 1099 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1100 struct skl *skl) 1101 { 1102 struct skl_module_cfg *mconfig = w->priv; 1103 struct skl_pipe_module *w_module; 1104 struct skl_module_cfg *src_module = NULL, *dst_module; 1105 struct skl_sst *ctx = skl->skl_sst; 1106 struct skl_pipe *s_pipe = mconfig->pipe; 1107 struct skl_module_deferred_bind *modules, *tmp; 1108 1109 if (s_pipe->state == SKL_PIPE_INVALID) 1110 return -EINVAL; 1111 1112 skl_tplg_free_pipe_mcps(skl, mconfig); 1113 skl_tplg_free_pipe_mem(skl, mconfig); 1114 1115 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1116 if (list_empty(&skl->bind_list)) 1117 break; 1118 1119 src_module = w_module->w->priv; 1120 1121 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1122 /* 1123 * When the destination module is deleted, Unbind the 1124 * modules from deferred bind list. 1125 */ 1126 if (modules->dst == src_module) { 1127 skl_unbind_modules(ctx, modules->src, 1128 modules->dst); 1129 } 1130 1131 /* 1132 * When the source module is deleted, remove this entry 1133 * from the deferred bind list. 1134 */ 1135 if (modules->src == src_module) { 1136 list_del(&modules->node); 1137 modules->src = NULL; 1138 modules->dst = NULL; 1139 kfree(modules); 1140 } 1141 } 1142 } 1143 1144 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1145 dst_module = w_module->w->priv; 1146 1147 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1148 skl_tplg_free_pipe_mcps(skl, dst_module); 1149 if (src_module == NULL) { 1150 src_module = dst_module; 1151 continue; 1152 } 1153 1154 skl_unbind_modules(ctx, src_module, dst_module); 1155 src_module = dst_module; 1156 } 1157 1158 skl_delete_pipe(ctx, mconfig->pipe); 1159 1160 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1161 src_module = w_module->w->priv; 1162 src_module->m_state = SKL_MODULE_UNINIT; 1163 } 1164 1165 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1166 } 1167 1168 /* 1169 * in the Post-PMD event of PGA we need to do following: 1170 * - Free the mcps used 1171 * - Stop the pipeline 1172 * - In source pipe is connected, unbind with source pipelines 1173 */ 1174 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1175 struct skl *skl) 1176 { 1177 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1178 int ret = 0, i; 1179 struct skl_sst *ctx = skl->skl_sst; 1180 1181 src_mconfig = w->priv; 1182 1183 /* Stop the pipe since this is a mixin module */ 1184 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 1185 if (ret) 1186 return ret; 1187 1188 for (i = 0; i < src_mconfig->max_out_queue; i++) { 1189 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1190 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1191 if (!sink_mconfig) 1192 continue; 1193 /* 1194 * This is a connecter and if path is found that means 1195 * unbind between source and sink has not happened yet 1196 */ 1197 ret = skl_unbind_modules(ctx, src_mconfig, 1198 sink_mconfig); 1199 } 1200 } 1201 1202 return ret; 1203 } 1204 1205 /* 1206 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1207 * second one is required that is created as another pipe entity. 1208 * The mixer is responsible for pipe management and represent a pipeline 1209 * instance 1210 */ 1211 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1212 struct snd_kcontrol *k, int event) 1213 { 1214 struct snd_soc_dapm_context *dapm = w->dapm; 1215 struct skl *skl = get_skl_ctx(dapm->dev); 1216 1217 switch (event) { 1218 case SND_SOC_DAPM_PRE_PMU: 1219 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1220 1221 case SND_SOC_DAPM_POST_PMU: 1222 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1223 1224 case SND_SOC_DAPM_PRE_PMD: 1225 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1226 1227 case SND_SOC_DAPM_POST_PMD: 1228 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1229 } 1230 1231 return 0; 1232 } 1233 1234 /* 1235 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1236 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1237 * the sink when it is running (two FE to one BE or one FE to two BE) 1238 * scenarios 1239 */ 1240 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1241 struct snd_kcontrol *k, int event) 1242 1243 { 1244 struct snd_soc_dapm_context *dapm = w->dapm; 1245 struct skl *skl = get_skl_ctx(dapm->dev); 1246 1247 switch (event) { 1248 case SND_SOC_DAPM_PRE_PMU: 1249 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1250 1251 case SND_SOC_DAPM_POST_PMD: 1252 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1253 } 1254 1255 return 0; 1256 } 1257 1258 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1259 unsigned int __user *data, unsigned int size) 1260 { 1261 struct soc_bytes_ext *sb = 1262 (struct soc_bytes_ext *)kcontrol->private_value; 1263 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1264 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1265 struct skl_module_cfg *mconfig = w->priv; 1266 struct skl *skl = get_skl_ctx(w->dapm->dev); 1267 1268 if (w->power) 1269 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1270 bc->size, bc->param_id, mconfig); 1271 1272 /* decrement size for TLV header */ 1273 size -= 2 * sizeof(u32); 1274 1275 /* check size as we don't want to send kernel data */ 1276 if (size > bc->max) 1277 size = bc->max; 1278 1279 if (bc->params) { 1280 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1281 return -EFAULT; 1282 if (copy_to_user(data + 1, &size, sizeof(u32))) 1283 return -EFAULT; 1284 if (copy_to_user(data + 2, bc->params, size)) 1285 return -EFAULT; 1286 } 1287 1288 return 0; 1289 } 1290 1291 #define SKL_PARAM_VENDOR_ID 0xff 1292 1293 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1294 const unsigned int __user *data, unsigned int size) 1295 { 1296 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1297 struct skl_module_cfg *mconfig = w->priv; 1298 struct soc_bytes_ext *sb = 1299 (struct soc_bytes_ext *)kcontrol->private_value; 1300 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1301 struct skl *skl = get_skl_ctx(w->dapm->dev); 1302 1303 if (ac->params) { 1304 if (size > ac->max) 1305 return -EINVAL; 1306 1307 ac->size = size; 1308 /* 1309 * if the param_is is of type Vendor, firmware expects actual 1310 * parameter id and size from the control. 1311 */ 1312 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1313 if (copy_from_user(ac->params, data, size)) 1314 return -EFAULT; 1315 } else { 1316 if (copy_from_user(ac->params, 1317 data + 2, size)) 1318 return -EFAULT; 1319 } 1320 1321 if (w->power) 1322 return skl_set_module_params(skl->skl_sst, 1323 (u32 *)ac->params, ac->size, 1324 ac->param_id, mconfig); 1325 } 1326 1327 return 0; 1328 } 1329 1330 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1331 struct snd_ctl_elem_value *ucontrol) 1332 { 1333 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1334 struct skl_module_cfg *mconfig = w->priv; 1335 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1336 u32 ch_type = *((u32 *)ec->dobj.private); 1337 1338 if (mconfig->dmic_ch_type == ch_type) 1339 ucontrol->value.enumerated.item[0] = 1340 mconfig->dmic_ch_combo_index; 1341 else 1342 ucontrol->value.enumerated.item[0] = 0; 1343 1344 return 0; 1345 } 1346 1347 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1348 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1349 { 1350 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1351 1352 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1353 sp_cfg->set_params = SKL_PARAM_SET; 1354 sp_cfg->param_id = 0x00; 1355 if (!sp_cfg->caps) { 1356 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1357 if (!sp_cfg->caps) 1358 return -ENOMEM; 1359 } 1360 1361 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1362 mic_cfg->flags = 0; 1363 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1364 1365 return 0; 1366 } 1367 1368 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1369 struct snd_ctl_elem_value *ucontrol) 1370 { 1371 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1372 struct skl_module_cfg *mconfig = w->priv; 1373 struct skl_mic_sel_config mic_cfg = {0}; 1374 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1375 u32 ch_type = *((u32 *)ec->dobj.private); 1376 const int *list; 1377 u8 in_ch, out_ch, index; 1378 1379 mconfig->dmic_ch_type = ch_type; 1380 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1381 1382 /* enum control index 0 is INVALID, so no channels to be set */ 1383 if (mconfig->dmic_ch_combo_index == 0) 1384 return 0; 1385 1386 /* No valid channel selection map for index 0, so offset by 1 */ 1387 index = mconfig->dmic_ch_combo_index - 1; 1388 1389 switch (ch_type) { 1390 case SKL_CH_MONO: 1391 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1392 return -EINVAL; 1393 1394 list = &mic_mono_list[index]; 1395 break; 1396 1397 case SKL_CH_STEREO: 1398 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1399 return -EINVAL; 1400 1401 list = mic_stereo_list[index]; 1402 break; 1403 1404 case SKL_CH_TRIO: 1405 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1406 return -EINVAL; 1407 1408 list = mic_trio_list[index]; 1409 break; 1410 1411 case SKL_CH_QUATRO: 1412 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1413 return -EINVAL; 1414 1415 list = mic_quatro_list[index]; 1416 break; 1417 1418 default: 1419 dev_err(w->dapm->dev, 1420 "Invalid channel %d for mic_select module\n", 1421 ch_type); 1422 return -EINVAL; 1423 1424 } 1425 1426 /* channel type enum map to number of chanels for that type */ 1427 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1428 in_ch = list[out_ch]; 1429 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1430 } 1431 1432 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1433 } 1434 1435 /* 1436 * Fill the dma id for host and link. In case of passthrough 1437 * pipeline, this will both host and link in the same 1438 * pipeline, so need to copy the link and host based on dev_type 1439 */ 1440 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1441 struct skl_pipe_params *params) 1442 { 1443 struct skl_pipe *pipe = mcfg->pipe; 1444 1445 if (pipe->passthru) { 1446 switch (mcfg->dev_type) { 1447 case SKL_DEVICE_HDALINK: 1448 pipe->p_params->link_dma_id = params->link_dma_id; 1449 pipe->p_params->link_index = params->link_index; 1450 pipe->p_params->link_bps = params->link_bps; 1451 break; 1452 1453 case SKL_DEVICE_HDAHOST: 1454 pipe->p_params->host_dma_id = params->host_dma_id; 1455 pipe->p_params->host_bps = params->host_bps; 1456 break; 1457 1458 default: 1459 break; 1460 } 1461 pipe->p_params->s_fmt = params->s_fmt; 1462 pipe->p_params->ch = params->ch; 1463 pipe->p_params->s_freq = params->s_freq; 1464 pipe->p_params->stream = params->stream; 1465 pipe->p_params->format = params->format; 1466 1467 } else { 1468 memcpy(pipe->p_params, params, sizeof(*params)); 1469 } 1470 } 1471 1472 /* 1473 * The FE params are passed by hw_params of the DAI. 1474 * On hw_params, the params are stored in Gateway module of the FE and we 1475 * need to calculate the format in DSP module configuration, that 1476 * conversion is done here 1477 */ 1478 int skl_tplg_update_pipe_params(struct device *dev, 1479 struct skl_module_cfg *mconfig, 1480 struct skl_pipe_params *params) 1481 { 1482 struct skl_module_fmt *format = NULL; 1483 1484 skl_tplg_fill_dma_id(mconfig, params); 1485 1486 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1487 format = &mconfig->in_fmt[0]; 1488 else 1489 format = &mconfig->out_fmt[0]; 1490 1491 /* set the hw_params */ 1492 format->s_freq = params->s_freq; 1493 format->channels = params->ch; 1494 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1495 1496 /* 1497 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1498 * container so update bit depth accordingly 1499 */ 1500 switch (format->valid_bit_depth) { 1501 case SKL_DEPTH_16BIT: 1502 format->bit_depth = format->valid_bit_depth; 1503 break; 1504 1505 case SKL_DEPTH_24BIT: 1506 case SKL_DEPTH_32BIT: 1507 format->bit_depth = SKL_DEPTH_32BIT; 1508 break; 1509 1510 default: 1511 dev_err(dev, "Invalid bit depth %x for pipe\n", 1512 format->valid_bit_depth); 1513 return -EINVAL; 1514 } 1515 1516 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1517 mconfig->ibs = (format->s_freq / 1000) * 1518 (format->channels) * 1519 (format->bit_depth >> 3); 1520 } else { 1521 mconfig->obs = (format->s_freq / 1000) * 1522 (format->channels) * 1523 (format->bit_depth >> 3); 1524 } 1525 1526 return 0; 1527 } 1528 1529 /* 1530 * Query the module config for the FE DAI 1531 * This is used to find the hw_params set for that DAI and apply to FE 1532 * pipeline 1533 */ 1534 struct skl_module_cfg * 1535 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1536 { 1537 struct snd_soc_dapm_widget *w; 1538 struct snd_soc_dapm_path *p = NULL; 1539 1540 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1541 w = dai->playback_widget; 1542 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1543 if (p->connect && p->sink->power && 1544 !is_skl_dsp_widget_type(p->sink)) 1545 continue; 1546 1547 if (p->sink->priv) { 1548 dev_dbg(dai->dev, "set params for %s\n", 1549 p->sink->name); 1550 return p->sink->priv; 1551 } 1552 } 1553 } else { 1554 w = dai->capture_widget; 1555 snd_soc_dapm_widget_for_each_source_path(w, p) { 1556 if (p->connect && p->source->power && 1557 !is_skl_dsp_widget_type(p->source)) 1558 continue; 1559 1560 if (p->source->priv) { 1561 dev_dbg(dai->dev, "set params for %s\n", 1562 p->source->name); 1563 return p->source->priv; 1564 } 1565 } 1566 } 1567 1568 return NULL; 1569 } 1570 1571 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1572 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1573 { 1574 struct snd_soc_dapm_path *p; 1575 struct skl_module_cfg *mconfig = NULL; 1576 1577 snd_soc_dapm_widget_for_each_source_path(w, p) { 1578 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1579 if (p->connect && 1580 (p->sink->id == snd_soc_dapm_aif_out) && 1581 p->source->priv) { 1582 mconfig = p->source->priv; 1583 return mconfig; 1584 } 1585 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1586 if (mconfig) 1587 return mconfig; 1588 } 1589 } 1590 return mconfig; 1591 } 1592 1593 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1594 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1595 { 1596 struct snd_soc_dapm_path *p; 1597 struct skl_module_cfg *mconfig = NULL; 1598 1599 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1600 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1601 if (p->connect && 1602 (p->source->id == snd_soc_dapm_aif_in) && 1603 p->sink->priv) { 1604 mconfig = p->sink->priv; 1605 return mconfig; 1606 } 1607 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1608 if (mconfig) 1609 return mconfig; 1610 } 1611 } 1612 return mconfig; 1613 } 1614 1615 struct skl_module_cfg * 1616 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1617 { 1618 struct snd_soc_dapm_widget *w; 1619 struct skl_module_cfg *mconfig; 1620 1621 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1622 w = dai->playback_widget; 1623 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1624 } else { 1625 w = dai->capture_widget; 1626 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1627 } 1628 return mconfig; 1629 } 1630 1631 static u8 skl_tplg_be_link_type(int dev_type) 1632 { 1633 int ret; 1634 1635 switch (dev_type) { 1636 case SKL_DEVICE_BT: 1637 ret = NHLT_LINK_SSP; 1638 break; 1639 1640 case SKL_DEVICE_DMIC: 1641 ret = NHLT_LINK_DMIC; 1642 break; 1643 1644 case SKL_DEVICE_I2S: 1645 ret = NHLT_LINK_SSP; 1646 break; 1647 1648 case SKL_DEVICE_HDALINK: 1649 ret = NHLT_LINK_HDA; 1650 break; 1651 1652 default: 1653 ret = NHLT_LINK_INVALID; 1654 break; 1655 } 1656 1657 return ret; 1658 } 1659 1660 /* 1661 * Fill the BE gateway parameters 1662 * The BE gateway expects a blob of parameters which are kept in the ACPI 1663 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1664 * The port can have multiple settings so pick based on the PCM 1665 * parameters 1666 */ 1667 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1668 struct skl_module_cfg *mconfig, 1669 struct skl_pipe_params *params) 1670 { 1671 struct nhlt_specific_cfg *cfg; 1672 struct skl *skl = get_skl_ctx(dai->dev); 1673 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1674 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1675 1676 skl_tplg_fill_dma_id(mconfig, params); 1677 1678 if (link_type == NHLT_LINK_HDA) 1679 return 0; 1680 1681 /* update the blob based on virtual bus_id*/ 1682 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1683 params->s_fmt, params->ch, 1684 params->s_freq, params->stream, 1685 dev_type); 1686 if (cfg) { 1687 mconfig->formats_config.caps_size = cfg->size; 1688 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1689 } else { 1690 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1691 mconfig->vbus_id, link_type, 1692 params->stream); 1693 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1694 params->ch, params->s_freq, params->s_fmt); 1695 return -EINVAL; 1696 } 1697 1698 return 0; 1699 } 1700 1701 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1702 struct snd_soc_dapm_widget *w, 1703 struct skl_pipe_params *params) 1704 { 1705 struct snd_soc_dapm_path *p; 1706 int ret = -EIO; 1707 1708 snd_soc_dapm_widget_for_each_source_path(w, p) { 1709 if (p->connect && is_skl_dsp_widget_type(p->source) && 1710 p->source->priv) { 1711 1712 ret = skl_tplg_be_fill_pipe_params(dai, 1713 p->source->priv, params); 1714 if (ret < 0) 1715 return ret; 1716 } else { 1717 ret = skl_tplg_be_set_src_pipe_params(dai, 1718 p->source, params); 1719 if (ret < 0) 1720 return ret; 1721 } 1722 } 1723 1724 return ret; 1725 } 1726 1727 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1728 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1729 { 1730 struct snd_soc_dapm_path *p = NULL; 1731 int ret = -EIO; 1732 1733 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1734 if (p->connect && is_skl_dsp_widget_type(p->sink) && 1735 p->sink->priv) { 1736 1737 ret = skl_tplg_be_fill_pipe_params(dai, 1738 p->sink->priv, params); 1739 if (ret < 0) 1740 return ret; 1741 } else { 1742 ret = skl_tplg_be_set_sink_pipe_params( 1743 dai, p->sink, params); 1744 if (ret < 0) 1745 return ret; 1746 } 1747 } 1748 1749 return ret; 1750 } 1751 1752 /* 1753 * BE hw_params can be a source parameters (capture) or sink parameters 1754 * (playback). Based on sink and source we need to either find the source 1755 * list or the sink list and set the pipeline parameters 1756 */ 1757 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1758 struct skl_pipe_params *params) 1759 { 1760 struct snd_soc_dapm_widget *w; 1761 1762 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1763 w = dai->playback_widget; 1764 1765 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1766 1767 } else { 1768 w = dai->capture_widget; 1769 1770 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1771 } 1772 1773 return 0; 1774 } 1775 1776 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1777 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1778 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1779 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1780 }; 1781 1782 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1783 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1784 skl_tplg_tlv_control_set}, 1785 }; 1786 1787 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1788 { 1789 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1790 .get = skl_tplg_mic_control_get, 1791 .put = skl_tplg_mic_control_set, 1792 }, 1793 }; 1794 1795 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1796 struct skl_pipe *pipe, u32 tkn, 1797 u32 tkn_val) 1798 { 1799 1800 switch (tkn) { 1801 case SKL_TKN_U32_PIPE_CONN_TYPE: 1802 pipe->conn_type = tkn_val; 1803 break; 1804 1805 case SKL_TKN_U32_PIPE_PRIORITY: 1806 pipe->pipe_priority = tkn_val; 1807 break; 1808 1809 case SKL_TKN_U32_PIPE_MEM_PGS: 1810 pipe->memory_pages = tkn_val; 1811 break; 1812 1813 case SKL_TKN_U32_PMODE: 1814 pipe->lp_mode = tkn_val; 1815 break; 1816 1817 default: 1818 dev_err(dev, "Token not handled %d\n", tkn); 1819 return -EINVAL; 1820 } 1821 1822 return 0; 1823 } 1824 1825 /* 1826 * Add pipeline by parsing the relevant tokens 1827 * Return an existing pipe if the pipe already exists. 1828 */ 1829 static int skl_tplg_add_pipe(struct device *dev, 1830 struct skl_module_cfg *mconfig, struct skl *skl, 1831 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 1832 { 1833 struct skl_pipeline *ppl; 1834 struct skl_pipe *pipe; 1835 struct skl_pipe_params *params; 1836 1837 list_for_each_entry(ppl, &skl->ppl_list, node) { 1838 if (ppl->pipe->ppl_id == tkn_elem->value) { 1839 mconfig->pipe = ppl->pipe; 1840 return -EEXIST; 1841 } 1842 } 1843 1844 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 1845 if (!ppl) 1846 return -ENOMEM; 1847 1848 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 1849 if (!pipe) 1850 return -ENOMEM; 1851 1852 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 1853 if (!params) 1854 return -ENOMEM; 1855 1856 pipe->p_params = params; 1857 pipe->ppl_id = tkn_elem->value; 1858 INIT_LIST_HEAD(&pipe->w_list); 1859 1860 ppl->pipe = pipe; 1861 list_add(&ppl->node, &skl->ppl_list); 1862 1863 mconfig->pipe = pipe; 1864 mconfig->pipe->state = SKL_PIPE_INVALID; 1865 1866 return 0; 1867 } 1868 1869 static int skl_tplg_fill_pin(struct device *dev, u32 tkn, 1870 struct skl_module_pin *m_pin, 1871 int pin_index, u32 value) 1872 { 1873 switch (tkn) { 1874 case SKL_TKN_U32_PIN_MOD_ID: 1875 m_pin[pin_index].id.module_id = value; 1876 break; 1877 1878 case SKL_TKN_U32_PIN_INST_ID: 1879 m_pin[pin_index].id.instance_id = value; 1880 break; 1881 1882 default: 1883 dev_err(dev, "%d Not a pin token\n", value); 1884 return -EINVAL; 1885 } 1886 1887 return 0; 1888 } 1889 1890 /* 1891 * Parse for pin config specific tokens to fill up the 1892 * module private data 1893 */ 1894 static int skl_tplg_fill_pins_info(struct device *dev, 1895 struct skl_module_cfg *mconfig, 1896 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 1897 int dir, int pin_count) 1898 { 1899 int ret; 1900 struct skl_module_pin *m_pin; 1901 1902 switch (dir) { 1903 case SKL_DIR_IN: 1904 m_pin = mconfig->m_in_pin; 1905 break; 1906 1907 case SKL_DIR_OUT: 1908 m_pin = mconfig->m_out_pin; 1909 break; 1910 1911 default: 1912 dev_err(dev, "Invalid direction value\n"); 1913 return -EINVAL; 1914 } 1915 1916 ret = skl_tplg_fill_pin(dev, tkn_elem->token, 1917 m_pin, pin_count, tkn_elem->value); 1918 1919 if (ret < 0) 1920 return ret; 1921 1922 m_pin[pin_count].in_use = false; 1923 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 1924 1925 return 0; 1926 } 1927 1928 /* 1929 * Fill up input/output module config format based 1930 * on the direction 1931 */ 1932 static int skl_tplg_fill_fmt(struct device *dev, 1933 struct skl_module_cfg *mconfig, u32 tkn, 1934 u32 value, u32 dir, u32 pin_count) 1935 { 1936 struct skl_module_fmt *dst_fmt; 1937 1938 switch (dir) { 1939 case SKL_DIR_IN: 1940 dst_fmt = mconfig->in_fmt; 1941 dst_fmt += pin_count; 1942 break; 1943 1944 case SKL_DIR_OUT: 1945 dst_fmt = mconfig->out_fmt; 1946 dst_fmt += pin_count; 1947 break; 1948 1949 default: 1950 dev_err(dev, "Invalid direction value\n"); 1951 return -EINVAL; 1952 } 1953 1954 switch (tkn) { 1955 case SKL_TKN_U32_FMT_CH: 1956 dst_fmt->channels = value; 1957 break; 1958 1959 case SKL_TKN_U32_FMT_FREQ: 1960 dst_fmt->s_freq = value; 1961 break; 1962 1963 case SKL_TKN_U32_FMT_BIT_DEPTH: 1964 dst_fmt->bit_depth = value; 1965 break; 1966 1967 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 1968 dst_fmt->valid_bit_depth = value; 1969 break; 1970 1971 case SKL_TKN_U32_FMT_CH_CONFIG: 1972 dst_fmt->ch_cfg = value; 1973 break; 1974 1975 case SKL_TKN_U32_FMT_INTERLEAVE: 1976 dst_fmt->interleaving_style = value; 1977 break; 1978 1979 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 1980 dst_fmt->sample_type = value; 1981 break; 1982 1983 case SKL_TKN_U32_FMT_CH_MAP: 1984 dst_fmt->ch_map = value; 1985 break; 1986 1987 default: 1988 dev_err(dev, "Invalid token %d\n", tkn); 1989 return -EINVAL; 1990 } 1991 1992 return 0; 1993 } 1994 1995 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig, 1996 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 1997 { 1998 if (uuid_tkn->token == SKL_TKN_UUID) 1999 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16); 2000 else { 2001 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 2002 return -EINVAL; 2003 } 2004 2005 return 0; 2006 } 2007 2008 static void skl_tplg_fill_pin_dynamic_val( 2009 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2010 { 2011 int i; 2012 2013 for (i = 0; i < pin_count; i++) 2014 mpin[i].is_dynamic = value; 2015 } 2016 2017 /* 2018 * Parse tokens to fill up the module private data 2019 */ 2020 static int skl_tplg_get_token(struct device *dev, 2021 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2022 struct skl *skl, struct skl_module_cfg *mconfig) 2023 { 2024 int tkn_count = 0; 2025 int ret; 2026 static int is_pipe_exists; 2027 static int pin_index, dir; 2028 2029 if (tkn_elem->token > SKL_TKN_MAX) 2030 return -EINVAL; 2031 2032 switch (tkn_elem->token) { 2033 case SKL_TKN_U8_IN_QUEUE_COUNT: 2034 mconfig->max_in_queue = tkn_elem->value; 2035 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue * 2036 sizeof(*mconfig->m_in_pin), 2037 GFP_KERNEL); 2038 if (!mconfig->m_in_pin) 2039 return -ENOMEM; 2040 2041 break; 2042 2043 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2044 mconfig->max_out_queue = tkn_elem->value; 2045 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue * 2046 sizeof(*mconfig->m_out_pin), 2047 GFP_KERNEL); 2048 2049 if (!mconfig->m_out_pin) 2050 return -ENOMEM; 2051 2052 break; 2053 2054 case SKL_TKN_U8_DYN_IN_PIN: 2055 if (!mconfig->m_in_pin) 2056 return -ENOMEM; 2057 2058 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, 2059 mconfig->max_in_queue, tkn_elem->value); 2060 2061 break; 2062 2063 case SKL_TKN_U8_DYN_OUT_PIN: 2064 if (!mconfig->m_out_pin) 2065 return -ENOMEM; 2066 2067 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, 2068 mconfig->max_out_queue, tkn_elem->value); 2069 2070 break; 2071 2072 case SKL_TKN_U8_TIME_SLOT: 2073 mconfig->time_slot = tkn_elem->value; 2074 break; 2075 2076 case SKL_TKN_U8_CORE_ID: 2077 mconfig->core_id = tkn_elem->value; 2078 2079 case SKL_TKN_U8_MOD_TYPE: 2080 mconfig->m_type = tkn_elem->value; 2081 break; 2082 2083 case SKL_TKN_U8_DEV_TYPE: 2084 mconfig->dev_type = tkn_elem->value; 2085 break; 2086 2087 case SKL_TKN_U8_HW_CONN_TYPE: 2088 mconfig->hw_conn_type = tkn_elem->value; 2089 break; 2090 2091 case SKL_TKN_U16_MOD_INST_ID: 2092 mconfig->id.instance_id = 2093 tkn_elem->value; 2094 break; 2095 2096 case SKL_TKN_U32_MEM_PAGES: 2097 mconfig->mem_pages = tkn_elem->value; 2098 break; 2099 2100 case SKL_TKN_U32_MAX_MCPS: 2101 mconfig->mcps = tkn_elem->value; 2102 break; 2103 2104 case SKL_TKN_U32_OBS: 2105 mconfig->obs = tkn_elem->value; 2106 break; 2107 2108 case SKL_TKN_U32_IBS: 2109 mconfig->ibs = tkn_elem->value; 2110 break; 2111 2112 case SKL_TKN_U32_VBUS_ID: 2113 mconfig->vbus_id = tkn_elem->value; 2114 break; 2115 2116 case SKL_TKN_U32_PARAMS_FIXUP: 2117 mconfig->params_fixup = tkn_elem->value; 2118 break; 2119 2120 case SKL_TKN_U32_CONVERTER: 2121 mconfig->converter = tkn_elem->value; 2122 break; 2123 2124 case SKL_TKN_U32_D0I3_CAPS: 2125 mconfig->d0i3_caps = tkn_elem->value; 2126 break; 2127 2128 case SKL_TKN_U32_PIPE_ID: 2129 ret = skl_tplg_add_pipe(dev, 2130 mconfig, skl, tkn_elem); 2131 2132 if (ret < 0) { 2133 if (ret == -EEXIST) { 2134 is_pipe_exists = 1; 2135 break; 2136 } 2137 return is_pipe_exists; 2138 } 2139 2140 break; 2141 2142 case SKL_TKN_U32_PIPE_CONN_TYPE: 2143 case SKL_TKN_U32_PIPE_PRIORITY: 2144 case SKL_TKN_U32_PIPE_MEM_PGS: 2145 case SKL_TKN_U32_PMODE: 2146 if (is_pipe_exists) { 2147 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2148 tkn_elem->token, tkn_elem->value); 2149 if (ret < 0) 2150 return ret; 2151 } 2152 2153 break; 2154 2155 /* 2156 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2157 * direction and the pin count. The first four bits represent 2158 * direction and next four the pin count. 2159 */ 2160 case SKL_TKN_U32_DIR_PIN_COUNT: 2161 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2162 pin_index = (tkn_elem->value & 2163 SKL_PIN_COUNT_MASK) >> 4; 2164 2165 break; 2166 2167 case SKL_TKN_U32_FMT_CH: 2168 case SKL_TKN_U32_FMT_FREQ: 2169 case SKL_TKN_U32_FMT_BIT_DEPTH: 2170 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2171 case SKL_TKN_U32_FMT_CH_CONFIG: 2172 case SKL_TKN_U32_FMT_INTERLEAVE: 2173 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2174 case SKL_TKN_U32_FMT_CH_MAP: 2175 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token, 2176 tkn_elem->value, dir, pin_index); 2177 2178 if (ret < 0) 2179 return ret; 2180 2181 break; 2182 2183 case SKL_TKN_U32_PIN_MOD_ID: 2184 case SKL_TKN_U32_PIN_INST_ID: 2185 ret = skl_tplg_fill_pins_info(dev, 2186 mconfig, tkn_elem, dir, 2187 pin_index); 2188 if (ret < 0) 2189 return ret; 2190 2191 break; 2192 2193 case SKL_TKN_U32_CAPS_SIZE: 2194 mconfig->formats_config.caps_size = 2195 tkn_elem->value; 2196 2197 break; 2198 2199 case SKL_TKN_U32_CAPS_SET_PARAMS: 2200 mconfig->formats_config.set_params = 2201 tkn_elem->value; 2202 break; 2203 2204 case SKL_TKN_U32_CAPS_PARAMS_ID: 2205 mconfig->formats_config.param_id = 2206 tkn_elem->value; 2207 break; 2208 2209 case SKL_TKN_U32_PROC_DOMAIN: 2210 mconfig->domain = 2211 tkn_elem->value; 2212 2213 break; 2214 2215 case SKL_TKN_U32_DMA_BUF_SIZE: 2216 mconfig->dma_buffer_size = tkn_elem->value; 2217 break; 2218 2219 case SKL_TKN_U8_IN_PIN_TYPE: 2220 case SKL_TKN_U8_OUT_PIN_TYPE: 2221 case SKL_TKN_U8_CONN_TYPE: 2222 break; 2223 2224 default: 2225 dev_err(dev, "Token %d not handled\n", 2226 tkn_elem->token); 2227 return -EINVAL; 2228 } 2229 2230 tkn_count++; 2231 2232 return tkn_count; 2233 } 2234 2235 /* 2236 * Parse the vendor array for specific tokens to construct 2237 * module private data 2238 */ 2239 static int skl_tplg_get_tokens(struct device *dev, 2240 char *pvt_data, struct skl *skl, 2241 struct skl_module_cfg *mconfig, int block_size) 2242 { 2243 struct snd_soc_tplg_vendor_array *array; 2244 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2245 int tkn_count = 0, ret; 2246 int off = 0, tuple_size = 0; 2247 2248 if (block_size <= 0) 2249 return -EINVAL; 2250 2251 while (tuple_size < block_size) { 2252 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2253 2254 off += array->size; 2255 2256 switch (array->type) { 2257 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2258 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2259 continue; 2260 2261 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2262 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid); 2263 if (ret < 0) 2264 return ret; 2265 2266 tuple_size += sizeof(*array->uuid); 2267 2268 continue; 2269 2270 default: 2271 tkn_elem = array->value; 2272 tkn_count = 0; 2273 break; 2274 } 2275 2276 while (tkn_count <= (array->num_elems - 1)) { 2277 ret = skl_tplg_get_token(dev, tkn_elem, 2278 skl, mconfig); 2279 2280 if (ret < 0) 2281 return ret; 2282 2283 tkn_count = tkn_count + ret; 2284 tkn_elem++; 2285 } 2286 2287 tuple_size += tkn_count * sizeof(*tkn_elem); 2288 } 2289 2290 return off; 2291 } 2292 2293 /* 2294 * Every data block is preceded by a descriptor to read the number 2295 * of data blocks, they type of the block and it's size 2296 */ 2297 static int skl_tplg_get_desc_blocks(struct device *dev, 2298 struct snd_soc_tplg_vendor_array *array) 2299 { 2300 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2301 2302 tkn_elem = array->value; 2303 2304 switch (tkn_elem->token) { 2305 case SKL_TKN_U8_NUM_BLOCKS: 2306 case SKL_TKN_U8_BLOCK_TYPE: 2307 case SKL_TKN_U16_BLOCK_SIZE: 2308 return tkn_elem->value; 2309 2310 default: 2311 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2312 break; 2313 } 2314 2315 return -EINVAL; 2316 } 2317 2318 /* 2319 * Parse the private data for the token and corresponding value. 2320 * The private data can have multiple data blocks. So, a data block 2321 * is preceded by a descriptor for number of blocks and a descriptor 2322 * for the type and size of the suceeding data block. 2323 */ 2324 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2325 struct skl *skl, struct device *dev, 2326 struct skl_module_cfg *mconfig) 2327 { 2328 struct snd_soc_tplg_vendor_array *array; 2329 int num_blocks, block_size = 0, block_type, off = 0; 2330 char *data; 2331 int ret; 2332 2333 /* Read the NUM_DATA_BLOCKS descriptor */ 2334 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2335 ret = skl_tplg_get_desc_blocks(dev, array); 2336 if (ret < 0) 2337 return ret; 2338 num_blocks = ret; 2339 2340 off += array->size; 2341 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2342 while (num_blocks > 0) { 2343 array = (struct snd_soc_tplg_vendor_array *) 2344 (tplg_w->priv.data + off); 2345 2346 ret = skl_tplg_get_desc_blocks(dev, array); 2347 2348 if (ret < 0) 2349 return ret; 2350 block_type = ret; 2351 off += array->size; 2352 2353 array = (struct snd_soc_tplg_vendor_array *) 2354 (tplg_w->priv.data + off); 2355 2356 ret = skl_tplg_get_desc_blocks(dev, array); 2357 2358 if (ret < 0) 2359 return ret; 2360 block_size = ret; 2361 off += array->size; 2362 2363 array = (struct snd_soc_tplg_vendor_array *) 2364 (tplg_w->priv.data + off); 2365 2366 data = (tplg_w->priv.data + off); 2367 2368 if (block_type == SKL_TYPE_TUPLE) { 2369 ret = skl_tplg_get_tokens(dev, data, 2370 skl, mconfig, block_size); 2371 2372 if (ret < 0) 2373 return ret; 2374 2375 --num_blocks; 2376 } else { 2377 if (mconfig->formats_config.caps_size > 0) 2378 memcpy(mconfig->formats_config.caps, data, 2379 mconfig->formats_config.caps_size); 2380 --num_blocks; 2381 ret = mconfig->formats_config.caps_size; 2382 } 2383 off += ret; 2384 } 2385 2386 return 0; 2387 } 2388 2389 static void skl_clear_pin_config(struct snd_soc_platform *platform, 2390 struct snd_soc_dapm_widget *w) 2391 { 2392 int i; 2393 struct skl_module_cfg *mconfig; 2394 struct skl_pipe *pipe; 2395 2396 if (!strncmp(w->dapm->component->name, platform->component.name, 2397 strlen(platform->component.name))) { 2398 mconfig = w->priv; 2399 pipe = mconfig->pipe; 2400 for (i = 0; i < mconfig->max_in_queue; i++) { 2401 mconfig->m_in_pin[i].in_use = false; 2402 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2403 } 2404 for (i = 0; i < mconfig->max_out_queue; i++) { 2405 mconfig->m_out_pin[i].in_use = false; 2406 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2407 } 2408 pipe->state = SKL_PIPE_INVALID; 2409 mconfig->m_state = SKL_MODULE_UNINIT; 2410 } 2411 } 2412 2413 void skl_cleanup_resources(struct skl *skl) 2414 { 2415 struct skl_sst *ctx = skl->skl_sst; 2416 struct snd_soc_platform *soc_platform = skl->platform; 2417 struct snd_soc_dapm_widget *w; 2418 struct snd_soc_card *card; 2419 2420 if (soc_platform == NULL) 2421 return; 2422 2423 card = soc_platform->component.card; 2424 if (!card || !card->instantiated) 2425 return; 2426 2427 skl->resource.mem = 0; 2428 skl->resource.mcps = 0; 2429 2430 list_for_each_entry(w, &card->widgets, list) { 2431 if (is_skl_dsp_widget_type(w) && (w->priv != NULL)) 2432 skl_clear_pin_config(soc_platform, w); 2433 } 2434 2435 skl_clear_module_cnt(ctx->dsp); 2436 } 2437 2438 /* 2439 * Topology core widget load callback 2440 * 2441 * This is used to save the private data for each widget which gives 2442 * information to the driver about module and pipeline parameters which DSP 2443 * FW expects like ids, resource values, formats etc 2444 */ 2445 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, 2446 struct snd_soc_dapm_widget *w, 2447 struct snd_soc_tplg_dapm_widget *tplg_w) 2448 { 2449 int ret; 2450 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2451 struct skl *skl = ebus_to_skl(ebus); 2452 struct hdac_bus *bus = ebus_to_hbus(ebus); 2453 struct skl_module_cfg *mconfig; 2454 2455 if (!tplg_w->priv.size) 2456 goto bind_event; 2457 2458 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2459 2460 if (!mconfig) 2461 return -ENOMEM; 2462 2463 w->priv = mconfig; 2464 2465 /* 2466 * module binary can be loaded later, so set it to query when 2467 * module is load for a use case 2468 */ 2469 mconfig->id.module_id = -1; 2470 2471 /* Parse private data for tuples */ 2472 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2473 if (ret < 0) 2474 return ret; 2475 2476 skl_debug_init_module(skl->debugfs, w, mconfig); 2477 2478 bind_event: 2479 if (tplg_w->event_type == 0) { 2480 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2481 return 0; 2482 } 2483 2484 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2485 ARRAY_SIZE(skl_tplg_widget_ops), 2486 tplg_w->event_type); 2487 2488 if (ret) { 2489 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2490 __func__, tplg_w->event_type); 2491 return -EINVAL; 2492 } 2493 2494 return 0; 2495 } 2496 2497 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2498 struct snd_soc_tplg_bytes_control *bc) 2499 { 2500 struct skl_algo_data *ac; 2501 struct skl_dfw_algo_data *dfw_ac = 2502 (struct skl_dfw_algo_data *)bc->priv.data; 2503 2504 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2505 if (!ac) 2506 return -ENOMEM; 2507 2508 /* Fill private data */ 2509 ac->max = dfw_ac->max; 2510 ac->param_id = dfw_ac->param_id; 2511 ac->set_params = dfw_ac->set_params; 2512 ac->size = dfw_ac->max; 2513 2514 if (ac->max) { 2515 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL); 2516 if (!ac->params) 2517 return -ENOMEM; 2518 2519 memcpy(ac->params, dfw_ac->params, ac->max); 2520 } 2521 2522 be->dobj.private = ac; 2523 return 0; 2524 } 2525 2526 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 2527 struct snd_soc_tplg_enum_control *ec) 2528 { 2529 2530 void *data; 2531 2532 if (ec->priv.size) { 2533 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 2534 if (!data) 2535 return -ENOMEM; 2536 memcpy(data, ec->priv.data, ec->priv.size); 2537 se->dobj.private = data; 2538 } 2539 2540 return 0; 2541 2542 } 2543 2544 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2545 struct snd_kcontrol_new *kctl, 2546 struct snd_soc_tplg_ctl_hdr *hdr) 2547 { 2548 struct soc_bytes_ext *sb; 2549 struct snd_soc_tplg_bytes_control *tplg_bc; 2550 struct snd_soc_tplg_enum_control *tplg_ec; 2551 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2552 struct hdac_bus *bus = ebus_to_hbus(ebus); 2553 struct soc_enum *se; 2554 2555 switch (hdr->ops.info) { 2556 case SND_SOC_TPLG_CTL_BYTES: 2557 tplg_bc = container_of(hdr, 2558 struct snd_soc_tplg_bytes_control, hdr); 2559 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 2560 sb = (struct soc_bytes_ext *)kctl->private_value; 2561 if (tplg_bc->priv.size) 2562 return skl_init_algo_data( 2563 bus->dev, sb, tplg_bc); 2564 } 2565 break; 2566 2567 case SND_SOC_TPLG_CTL_ENUM: 2568 tplg_ec = container_of(hdr, 2569 struct snd_soc_tplg_enum_control, hdr); 2570 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 2571 se = (struct soc_enum *)kctl->private_value; 2572 if (tplg_ec->priv.size) 2573 return skl_init_enum_data(bus->dev, se, 2574 tplg_ec); 2575 } 2576 break; 2577 2578 default: 2579 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n", 2580 hdr->ops.get, hdr->ops.put, hdr->ops.info); 2581 break; 2582 } 2583 2584 return 0; 2585 } 2586 2587 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 2588 struct snd_soc_tplg_vendor_string_elem *str_elem, 2589 struct skl *skl) 2590 { 2591 int tkn_count = 0; 2592 static int ref_count; 2593 2594 switch (str_elem->token) { 2595 case SKL_TKN_STR_LIB_NAME: 2596 if (ref_count > skl->skl_sst->lib_count - 1) { 2597 ref_count = 0; 2598 return -EINVAL; 2599 } 2600 2601 strncpy(skl->skl_sst->lib_info[ref_count].name, 2602 str_elem->string, 2603 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name)); 2604 ref_count++; 2605 tkn_count++; 2606 break; 2607 2608 default: 2609 dev_err(dev, "Not a string token %d\n", str_elem->token); 2610 break; 2611 } 2612 2613 return tkn_count; 2614 } 2615 2616 static int skl_tplg_get_str_tkn(struct device *dev, 2617 struct snd_soc_tplg_vendor_array *array, 2618 struct skl *skl) 2619 { 2620 int tkn_count = 0, ret; 2621 struct snd_soc_tplg_vendor_string_elem *str_elem; 2622 2623 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 2624 while (tkn_count < array->num_elems) { 2625 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 2626 str_elem++; 2627 2628 if (ret < 0) 2629 return ret; 2630 2631 tkn_count = tkn_count + ret; 2632 } 2633 2634 return tkn_count; 2635 } 2636 2637 static int skl_tplg_get_int_tkn(struct device *dev, 2638 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2639 struct skl *skl) 2640 { 2641 int tkn_count = 0; 2642 2643 switch (tkn_elem->token) { 2644 case SKL_TKN_U32_LIB_COUNT: 2645 skl->skl_sst->lib_count = tkn_elem->value; 2646 tkn_count++; 2647 break; 2648 2649 default: 2650 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 2651 return -EINVAL; 2652 } 2653 2654 return tkn_count; 2655 } 2656 2657 /* 2658 * Fill the manifest structure by parsing the tokens based on the 2659 * type. 2660 */ 2661 static int skl_tplg_get_manifest_tkn(struct device *dev, 2662 char *pvt_data, struct skl *skl, 2663 int block_size) 2664 { 2665 int tkn_count = 0, ret; 2666 int off = 0, tuple_size = 0; 2667 struct snd_soc_tplg_vendor_array *array; 2668 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2669 2670 if (block_size <= 0) 2671 return -EINVAL; 2672 2673 while (tuple_size < block_size) { 2674 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2675 off += array->size; 2676 switch (array->type) { 2677 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2678 ret = skl_tplg_get_str_tkn(dev, array, skl); 2679 2680 if (ret < 0) 2681 return ret; 2682 tkn_count = ret; 2683 2684 tuple_size += tkn_count * 2685 sizeof(struct snd_soc_tplg_vendor_string_elem); 2686 continue; 2687 2688 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2689 dev_warn(dev, "no uuid tokens for skl tplf manifest\n"); 2690 continue; 2691 2692 default: 2693 tkn_elem = array->value; 2694 tkn_count = 0; 2695 break; 2696 } 2697 2698 while (tkn_count <= array->num_elems - 1) { 2699 ret = skl_tplg_get_int_tkn(dev, 2700 tkn_elem, skl); 2701 if (ret < 0) 2702 return ret; 2703 2704 tkn_count = tkn_count + ret; 2705 tkn_elem++; 2706 tuple_size += tkn_count * 2707 sizeof(struct snd_soc_tplg_vendor_value_elem); 2708 break; 2709 } 2710 tkn_count = 0; 2711 } 2712 2713 return 0; 2714 } 2715 2716 /* 2717 * Parse manifest private data for tokens. The private data block is 2718 * preceded by descriptors for type and size of data block. 2719 */ 2720 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 2721 struct device *dev, struct skl *skl) 2722 { 2723 struct snd_soc_tplg_vendor_array *array; 2724 int num_blocks, block_size = 0, block_type, off = 0; 2725 char *data; 2726 int ret; 2727 2728 /* Read the NUM_DATA_BLOCKS descriptor */ 2729 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 2730 ret = skl_tplg_get_desc_blocks(dev, array); 2731 if (ret < 0) 2732 return ret; 2733 num_blocks = ret; 2734 2735 off += array->size; 2736 array = (struct snd_soc_tplg_vendor_array *) 2737 (manifest->priv.data + off); 2738 2739 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2740 while (num_blocks > 0) { 2741 ret = skl_tplg_get_desc_blocks(dev, array); 2742 2743 if (ret < 0) 2744 return ret; 2745 block_type = ret; 2746 off += array->size; 2747 2748 array = (struct snd_soc_tplg_vendor_array *) 2749 (manifest->priv.data + off); 2750 2751 ret = skl_tplg_get_desc_blocks(dev, array); 2752 2753 if (ret < 0) 2754 return ret; 2755 block_size = ret; 2756 off += array->size; 2757 2758 array = (struct snd_soc_tplg_vendor_array *) 2759 (manifest->priv.data + off); 2760 2761 data = (manifest->priv.data + off); 2762 2763 if (block_type == SKL_TYPE_TUPLE) { 2764 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 2765 block_size); 2766 2767 if (ret < 0) 2768 return ret; 2769 2770 --num_blocks; 2771 } else { 2772 return -EINVAL; 2773 } 2774 } 2775 2776 return 0; 2777 } 2778 2779 static int skl_manifest_load(struct snd_soc_component *cmpnt, 2780 struct snd_soc_tplg_manifest *manifest) 2781 { 2782 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2783 struct hdac_bus *bus = ebus_to_hbus(ebus); 2784 struct skl *skl = ebus_to_skl(ebus); 2785 2786 /* proceed only if we have private data defined */ 2787 if (manifest->priv.size == 0) 2788 return 0; 2789 2790 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 2791 2792 if (skl->skl_sst->lib_count > SKL_MAX_LIB) { 2793 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 2794 skl->skl_sst->lib_count); 2795 return -EINVAL; 2796 } 2797 2798 return 0; 2799 } 2800 2801 static struct snd_soc_tplg_ops skl_tplg_ops = { 2802 .widget_load = skl_tplg_widget_load, 2803 .control_load = skl_tplg_control_load, 2804 .bytes_ext_ops = skl_tlv_ops, 2805 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 2806 .io_ops = skl_tplg_kcontrol_ops, 2807 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 2808 .manifest = skl_manifest_load, 2809 }; 2810 2811 /* 2812 * A pipe can have multiple modules, each of them will be a DAPM widget as 2813 * well. While managing a pipeline we need to get the list of all the 2814 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 2815 * helps to get the SKL type widgets in that pipeline 2816 */ 2817 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform) 2818 { 2819 struct snd_soc_dapm_widget *w; 2820 struct skl_module_cfg *mcfg = NULL; 2821 struct skl_pipe_module *p_module = NULL; 2822 struct skl_pipe *pipe; 2823 2824 list_for_each_entry(w, &platform->component.card->widgets, list) { 2825 if (is_skl_dsp_widget_type(w) && w->priv != NULL) { 2826 mcfg = w->priv; 2827 pipe = mcfg->pipe; 2828 2829 p_module = devm_kzalloc(platform->dev, 2830 sizeof(*p_module), GFP_KERNEL); 2831 if (!p_module) 2832 return -ENOMEM; 2833 2834 p_module->w = w; 2835 list_add_tail(&p_module->node, &pipe->w_list); 2836 } 2837 } 2838 2839 return 0; 2840 } 2841 2842 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 2843 { 2844 struct skl_pipe_module *w_module; 2845 struct snd_soc_dapm_widget *w; 2846 struct skl_module_cfg *mconfig; 2847 bool host_found = false, link_found = false; 2848 2849 list_for_each_entry(w_module, &pipe->w_list, node) { 2850 w = w_module->w; 2851 mconfig = w->priv; 2852 2853 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 2854 host_found = true; 2855 else if (mconfig->dev_type != SKL_DEVICE_NONE) 2856 link_found = true; 2857 } 2858 2859 if (host_found && link_found) 2860 pipe->passthru = true; 2861 else 2862 pipe->passthru = false; 2863 } 2864 2865 /* This will be read from topology manifest, currently defined here */ 2866 #define SKL_MAX_MCPS 30000000 2867 #define SKL_FW_MAX_MEM 1000000 2868 2869 /* 2870 * SKL topology init routine 2871 */ 2872 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus) 2873 { 2874 int ret; 2875 const struct firmware *fw; 2876 struct hdac_bus *bus = ebus_to_hbus(ebus); 2877 struct skl *skl = ebus_to_skl(ebus); 2878 struct skl_pipeline *ppl; 2879 2880 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 2881 if (ret < 0) { 2882 dev_err(bus->dev, "tplg fw %s load failed with %d\n", 2883 skl->tplg_name, ret); 2884 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 2885 if (ret < 0) { 2886 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 2887 "dfw_sst.bin", ret); 2888 return ret; 2889 } 2890 } 2891 2892 /* 2893 * The complete tplg for SKL is loaded as index 0, we don't use 2894 * any other index 2895 */ 2896 ret = snd_soc_tplg_component_load(&platform->component, 2897 &skl_tplg_ops, fw, 0); 2898 if (ret < 0) { 2899 dev_err(bus->dev, "tplg component load failed%d\n", ret); 2900 release_firmware(fw); 2901 return -EINVAL; 2902 } 2903 2904 skl->resource.max_mcps = SKL_MAX_MCPS; 2905 skl->resource.max_mem = SKL_FW_MAX_MEM; 2906 2907 skl->tplg = fw; 2908 ret = skl_tplg_create_pipe_widget_list(platform); 2909 if (ret < 0) 2910 return ret; 2911 2912 list_for_each_entry(ppl, &skl->ppl_list, node) 2913 skl_tplg_set_pipe_type(skl, ppl->pipe); 2914 2915 return 0; 2916 } 2917