1 /* 2 * skl-topology.c - Implements Platform component ALSA controls/widget 3 * handlers. 4 * 5 * Copyright (C) 2014-2015 Intel Corp 6 * Author: Jeeja KP <jeeja.kp@intel.com> 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/firmware.h> 22 #include <sound/soc.h> 23 #include <sound/soc-topology.h> 24 #include <uapi/sound/snd_sst_tokens.h> 25 #include "skl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl-topology.h" 28 #include "skl.h" 29 #include "skl-tplg-interface.h" 30 #include "../common/sst-dsp.h" 31 #include "../common/sst-dsp-priv.h" 32 33 #define SKL_CH_FIXUP_MASK (1 << 0) 34 #define SKL_RATE_FIXUP_MASK (1 << 1) 35 #define SKL_FMT_FIXUP_MASK (1 << 2) 36 #define SKL_IN_DIR_BIT_MASK BIT(0) 37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 38 39 /* 40 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 41 * ignore. This helpers checks if the SKL driver handles this widget type 42 */ 43 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w) 44 { 45 switch (w->id) { 46 case snd_soc_dapm_dai_link: 47 case snd_soc_dapm_dai_in: 48 case snd_soc_dapm_aif_in: 49 case snd_soc_dapm_aif_out: 50 case snd_soc_dapm_dai_out: 51 case snd_soc_dapm_switch: 52 return false; 53 default: 54 return true; 55 } 56 } 57 58 /* 59 * Each pipelines needs memory to be allocated. Check if we have free memory 60 * from available pool. 61 */ 62 static bool skl_is_pipe_mem_avail(struct skl *skl, 63 struct skl_module_cfg *mconfig) 64 { 65 struct skl_sst *ctx = skl->skl_sst; 66 67 if (skl->resource.mem + mconfig->pipe->memory_pages > 68 skl->resource.max_mem) { 69 dev_err(ctx->dev, 70 "%s: module_id %d instance %d\n", __func__, 71 mconfig->id.module_id, 72 mconfig->id.instance_id); 73 dev_err(ctx->dev, 74 "exceeds ppl memory available %d mem %d\n", 75 skl->resource.max_mem, skl->resource.mem); 76 return false; 77 } else { 78 return true; 79 } 80 } 81 82 /* 83 * Add the mem to the mem pool. This is freed when pipe is deleted. 84 * Note: DSP does actual memory management we only keep track for complete 85 * pool 86 */ 87 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 88 struct skl_module_cfg *mconfig) 89 { 90 skl->resource.mem += mconfig->pipe->memory_pages; 91 } 92 93 /* 94 * Pipeline needs needs DSP CPU resources for computation, this is 95 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 96 * 97 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 98 * pipe. 99 */ 100 101 static bool skl_is_pipe_mcps_avail(struct skl *skl, 102 struct skl_module_cfg *mconfig) 103 { 104 struct skl_sst *ctx = skl->skl_sst; 105 106 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) { 107 dev_err(ctx->dev, 108 "%s: module_id %d instance %d\n", __func__, 109 mconfig->id.module_id, mconfig->id.instance_id); 110 dev_err(ctx->dev, 111 "exceeds ppl mcps available %d > mem %d\n", 112 skl->resource.max_mcps, skl->resource.mcps); 113 return false; 114 } else { 115 return true; 116 } 117 } 118 119 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 120 struct skl_module_cfg *mconfig) 121 { 122 skl->resource.mcps += mconfig->mcps; 123 } 124 125 /* 126 * Free the mcps when tearing down 127 */ 128 static void 129 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 130 { 131 skl->resource.mcps -= mconfig->mcps; 132 } 133 134 /* 135 * Free the memory when tearing down 136 */ 137 static void 138 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 139 { 140 skl->resource.mem -= mconfig->pipe->memory_pages; 141 } 142 143 144 static void skl_dump_mconfig(struct skl_sst *ctx, 145 struct skl_module_cfg *mcfg) 146 { 147 dev_dbg(ctx->dev, "Dumping config\n"); 148 dev_dbg(ctx->dev, "Input Format:\n"); 149 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels); 150 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq); 151 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg); 152 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth); 153 dev_dbg(ctx->dev, "Output Format:\n"); 154 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels); 155 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq); 156 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth); 157 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg); 158 } 159 160 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 161 { 162 int slot_map = 0xFFFFFFFF; 163 int start_slot = 0; 164 int i; 165 166 for (i = 0; i < chs; i++) { 167 /* 168 * For 2 channels with starting slot as 0, slot map will 169 * look like 0xFFFFFF10. 170 */ 171 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 172 start_slot++; 173 } 174 fmt->ch_map = slot_map; 175 } 176 177 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 178 struct skl_pipe_params *params, int fixup) 179 { 180 if (fixup & SKL_RATE_FIXUP_MASK) 181 fmt->s_freq = params->s_freq; 182 if (fixup & SKL_CH_FIXUP_MASK) { 183 fmt->channels = params->ch; 184 skl_tplg_update_chmap(fmt, fmt->channels); 185 } 186 if (fixup & SKL_FMT_FIXUP_MASK) { 187 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 188 189 /* 190 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 191 * container so update bit depth accordingly 192 */ 193 switch (fmt->valid_bit_depth) { 194 case SKL_DEPTH_16BIT: 195 fmt->bit_depth = fmt->valid_bit_depth; 196 break; 197 198 default: 199 fmt->bit_depth = SKL_DEPTH_32BIT; 200 break; 201 } 202 } 203 204 } 205 206 /* 207 * A pipeline may have modules which impact the pcm parameters, like SRC, 208 * channel converter, format converter. 209 * We need to calculate the output params by applying the 'fixup' 210 * Topology will tell driver which type of fixup is to be applied by 211 * supplying the fixup mask, so based on that we calculate the output 212 * 213 * Now In FE the pcm hw_params is source/target format. Same is applicable 214 * for BE with its hw_params invoked. 215 * here based on FE, BE pipeline and direction we calculate the input and 216 * outfix and then apply that for a module 217 */ 218 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 219 struct skl_pipe_params *params, bool is_fe) 220 { 221 int in_fixup, out_fixup; 222 struct skl_module_fmt *in_fmt, *out_fmt; 223 224 /* Fixups will be applied to pin 0 only */ 225 in_fmt = &m_cfg->in_fmt[0]; 226 out_fmt = &m_cfg->out_fmt[0]; 227 228 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 229 if (is_fe) { 230 in_fixup = m_cfg->params_fixup; 231 out_fixup = (~m_cfg->converter) & 232 m_cfg->params_fixup; 233 } else { 234 out_fixup = m_cfg->params_fixup; 235 in_fixup = (~m_cfg->converter) & 236 m_cfg->params_fixup; 237 } 238 } else { 239 if (is_fe) { 240 out_fixup = m_cfg->params_fixup; 241 in_fixup = (~m_cfg->converter) & 242 m_cfg->params_fixup; 243 } else { 244 in_fixup = m_cfg->params_fixup; 245 out_fixup = (~m_cfg->converter) & 246 m_cfg->params_fixup; 247 } 248 } 249 250 skl_tplg_update_params(in_fmt, params, in_fixup); 251 skl_tplg_update_params(out_fmt, params, out_fixup); 252 } 253 254 /* 255 * A module needs input and output buffers, which are dependent upon pcm 256 * params, so once we have calculate params, we need buffer calculation as 257 * well. 258 */ 259 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 260 struct skl_module_cfg *mcfg) 261 { 262 int multiplier = 1; 263 struct skl_module_fmt *in_fmt, *out_fmt; 264 int in_rate, out_rate; 265 266 267 /* Since fixups is applied to pin 0 only, ibs, obs needs 268 * change for pin 0 only 269 */ 270 in_fmt = &mcfg->in_fmt[0]; 271 out_fmt = &mcfg->out_fmt[0]; 272 273 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 274 multiplier = 5; 275 276 if (in_fmt->s_freq % 1000) 277 in_rate = (in_fmt->s_freq / 1000) + 1; 278 else 279 in_rate = (in_fmt->s_freq / 1000); 280 281 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) * 282 (mcfg->in_fmt->bit_depth >> 3) * 283 multiplier; 284 285 if (mcfg->out_fmt->s_freq % 1000) 286 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1; 287 else 288 out_rate = (mcfg->out_fmt->s_freq / 1000); 289 290 mcfg->obs = out_rate * (mcfg->out_fmt->channels) * 291 (mcfg->out_fmt->bit_depth >> 3) * 292 multiplier; 293 } 294 295 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 296 struct skl_sst *ctx) 297 { 298 struct skl_module_cfg *m_cfg = w->priv; 299 int link_type, dir; 300 u32 ch, s_freq, s_fmt; 301 struct nhlt_specific_cfg *cfg; 302 struct skl *skl = get_skl_ctx(ctx->dev); 303 304 /* check if we already have blob */ 305 if (m_cfg->formats_config.caps_size > 0) 306 return 0; 307 308 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 309 switch (m_cfg->dev_type) { 310 case SKL_DEVICE_DMIC: 311 link_type = NHLT_LINK_DMIC; 312 dir = SNDRV_PCM_STREAM_CAPTURE; 313 s_freq = m_cfg->in_fmt[0].s_freq; 314 s_fmt = m_cfg->in_fmt[0].bit_depth; 315 ch = m_cfg->in_fmt[0].channels; 316 break; 317 318 case SKL_DEVICE_I2S: 319 link_type = NHLT_LINK_SSP; 320 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 321 dir = SNDRV_PCM_STREAM_PLAYBACK; 322 s_freq = m_cfg->out_fmt[0].s_freq; 323 s_fmt = m_cfg->out_fmt[0].bit_depth; 324 ch = m_cfg->out_fmt[0].channels; 325 } else { 326 dir = SNDRV_PCM_STREAM_CAPTURE; 327 s_freq = m_cfg->in_fmt[0].s_freq; 328 s_fmt = m_cfg->in_fmt[0].bit_depth; 329 ch = m_cfg->in_fmt[0].channels; 330 } 331 break; 332 333 default: 334 return -EINVAL; 335 } 336 337 /* update the blob based on virtual bus_id and default params */ 338 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 339 s_fmt, ch, s_freq, dir); 340 if (cfg) { 341 m_cfg->formats_config.caps_size = cfg->size; 342 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 343 } else { 344 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 345 m_cfg->vbus_id, link_type, dir); 346 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 347 ch, s_freq, s_fmt); 348 return -EIO; 349 } 350 351 return 0; 352 } 353 354 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 355 struct skl_sst *ctx) 356 { 357 struct skl_module_cfg *m_cfg = w->priv; 358 struct skl_pipe_params *params = m_cfg->pipe->p_params; 359 int p_conn_type = m_cfg->pipe->conn_type; 360 bool is_fe; 361 362 if (!m_cfg->params_fixup) 363 return; 364 365 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 366 w->name); 367 368 skl_dump_mconfig(ctx, m_cfg); 369 370 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 371 is_fe = true; 372 else 373 is_fe = false; 374 375 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 376 skl_tplg_update_buffer_size(ctx, m_cfg); 377 378 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 379 w->name); 380 381 skl_dump_mconfig(ctx, m_cfg); 382 } 383 384 /* 385 * some modules can have multiple params set from user control and 386 * need to be set after module is initialized. If set_param flag is 387 * set module params will be done after module is initialised. 388 */ 389 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 390 struct skl_sst *ctx) 391 { 392 int i, ret; 393 struct skl_module_cfg *mconfig = w->priv; 394 const struct snd_kcontrol_new *k; 395 struct soc_bytes_ext *sb; 396 struct skl_algo_data *bc; 397 struct skl_specific_cfg *sp_cfg; 398 399 if (mconfig->formats_config.caps_size > 0 && 400 mconfig->formats_config.set_params == SKL_PARAM_SET) { 401 sp_cfg = &mconfig->formats_config; 402 ret = skl_set_module_params(ctx, sp_cfg->caps, 403 sp_cfg->caps_size, 404 sp_cfg->param_id, mconfig); 405 if (ret < 0) 406 return ret; 407 } 408 409 for (i = 0; i < w->num_kcontrols; i++) { 410 k = &w->kcontrol_news[i]; 411 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 412 sb = (void *) k->private_value; 413 bc = (struct skl_algo_data *)sb->dobj.private; 414 415 if (bc->set_params == SKL_PARAM_SET) { 416 ret = skl_set_module_params(ctx, 417 (u32 *)bc->params, bc->size, 418 bc->param_id, mconfig); 419 if (ret < 0) 420 return ret; 421 } 422 } 423 } 424 425 return 0; 426 } 427 428 /* 429 * some module param can set from user control and this is required as 430 * when module is initailzed. if module param is required in init it is 431 * identifed by set_param flag. if set_param flag is not set, then this 432 * parameter needs to set as part of module init. 433 */ 434 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 435 { 436 const struct snd_kcontrol_new *k; 437 struct soc_bytes_ext *sb; 438 struct skl_algo_data *bc; 439 struct skl_module_cfg *mconfig = w->priv; 440 int i; 441 442 for (i = 0; i < w->num_kcontrols; i++) { 443 k = &w->kcontrol_news[i]; 444 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 445 sb = (struct soc_bytes_ext *)k->private_value; 446 bc = (struct skl_algo_data *)sb->dobj.private; 447 448 if (bc->set_params != SKL_PARAM_INIT) 449 continue; 450 451 mconfig->formats_config.caps = (u32 *)&bc->params; 452 mconfig->formats_config.caps_size = bc->size; 453 454 break; 455 } 456 } 457 458 return 0; 459 } 460 461 /* 462 * Inside a pipe instance, we can have various modules. These modules need 463 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 464 * skl_init_module() routine, so invoke that for all modules in a pipeline 465 */ 466 static int 467 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 468 { 469 struct skl_pipe_module *w_module; 470 struct snd_soc_dapm_widget *w; 471 struct skl_module_cfg *mconfig; 472 struct skl_sst *ctx = skl->skl_sst; 473 int ret = 0; 474 475 list_for_each_entry(w_module, &pipe->w_list, node) { 476 w = w_module->w; 477 mconfig = w->priv; 478 479 /* check if module ids are populated */ 480 if (mconfig->id.module_id < 0) { 481 dev_err(skl->skl_sst->dev, 482 "module %pUL id not populated\n", 483 (uuid_le *)mconfig->guid); 484 return -EIO; 485 } 486 487 /* check resource available */ 488 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 489 return -ENOMEM; 490 491 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 492 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 493 mconfig->id.module_id, mconfig->guid); 494 if (ret < 0) 495 return ret; 496 497 mconfig->m_state = SKL_MODULE_LOADED; 498 } 499 500 /* update blob if blob is null for be with default value */ 501 skl_tplg_update_be_blob(w, ctx); 502 503 /* 504 * apply fix/conversion to module params based on 505 * FE/BE params 506 */ 507 skl_tplg_update_module_params(w, ctx); 508 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig); 509 if (mconfig->id.pvt_id < 0) 510 return ret; 511 skl_tplg_set_module_init_data(w); 512 ret = skl_init_module(ctx, mconfig); 513 if (ret < 0) { 514 skl_put_pvt_id(ctx, mconfig); 515 return ret; 516 } 517 skl_tplg_alloc_pipe_mcps(skl, mconfig); 518 ret = skl_tplg_set_module_params(w, ctx); 519 if (ret < 0) 520 return ret; 521 } 522 523 return 0; 524 } 525 526 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 527 struct skl_pipe *pipe) 528 { 529 int ret; 530 struct skl_pipe_module *w_module = NULL; 531 struct skl_module_cfg *mconfig = NULL; 532 533 list_for_each_entry(w_module, &pipe->w_list, node) { 534 mconfig = w_module->w->priv; 535 536 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod && 537 mconfig->m_state > SKL_MODULE_UNINIT) { 538 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 539 mconfig->id.module_id); 540 if (ret < 0) 541 return -EIO; 542 } 543 skl_put_pvt_id(ctx, mconfig); 544 } 545 546 /* no modules to unload in this path, so return */ 547 return 0; 548 } 549 550 /* 551 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 552 * need create the pipeline. So we do following: 553 * - check the resources 554 * - Create the pipeline 555 * - Initialize the modules in pipeline 556 * - finally bind all modules together 557 */ 558 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 559 struct skl *skl) 560 { 561 int ret; 562 struct skl_module_cfg *mconfig = w->priv; 563 struct skl_pipe_module *w_module; 564 struct skl_pipe *s_pipe = mconfig->pipe; 565 struct skl_module_cfg *src_module = NULL, *dst_module; 566 struct skl_sst *ctx = skl->skl_sst; 567 568 /* check resource available */ 569 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 570 return -EBUSY; 571 572 if (!skl_is_pipe_mem_avail(skl, mconfig)) 573 return -ENOMEM; 574 575 /* 576 * Create a list of modules for pipe. 577 * This list contains modules from source to sink 578 */ 579 ret = skl_create_pipeline(ctx, mconfig->pipe); 580 if (ret < 0) 581 return ret; 582 583 skl_tplg_alloc_pipe_mem(skl, mconfig); 584 skl_tplg_alloc_pipe_mcps(skl, mconfig); 585 586 /* Init all pipe modules from source to sink */ 587 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 588 if (ret < 0) 589 return ret; 590 591 /* Bind modules from source to sink */ 592 list_for_each_entry(w_module, &s_pipe->w_list, node) { 593 dst_module = w_module->w->priv; 594 595 if (src_module == NULL) { 596 src_module = dst_module; 597 continue; 598 } 599 600 ret = skl_bind_modules(ctx, src_module, dst_module); 601 if (ret < 0) 602 return ret; 603 604 src_module = dst_module; 605 } 606 607 return 0; 608 } 609 610 static int skl_fill_sink_instance_id(struct skl_sst *ctx, 611 struct skl_algo_data *alg_data) 612 { 613 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params; 614 struct skl_mod_inst_map *inst; 615 int i, pvt_id; 616 617 inst = params->map; 618 619 for (i = 0; i < params->num_modules; i++) { 620 pvt_id = skl_get_pvt_instance_id_map(ctx, 621 inst->mod_id, inst->inst_id); 622 if (pvt_id < 0) 623 return -EINVAL; 624 inst->inst_id = pvt_id; 625 inst++; 626 } 627 return 0; 628 } 629 630 /* 631 * Some modules require params to be set after the module is bound to 632 * all pins connected. 633 * 634 * The module provider initializes set_param flag for such modules and we 635 * send params after binding 636 */ 637 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 638 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 639 { 640 int i, ret; 641 struct skl_module_cfg *mconfig = w->priv; 642 const struct snd_kcontrol_new *k; 643 struct soc_bytes_ext *sb; 644 struct skl_algo_data *bc; 645 struct skl_specific_cfg *sp_cfg; 646 647 /* 648 * check all out/in pins are in bind state. 649 * if so set the module param 650 */ 651 for (i = 0; i < mcfg->max_out_queue; i++) { 652 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 653 return 0; 654 } 655 656 for (i = 0; i < mcfg->max_in_queue; i++) { 657 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 658 return 0; 659 } 660 661 if (mconfig->formats_config.caps_size > 0 && 662 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 663 sp_cfg = &mconfig->formats_config; 664 ret = skl_set_module_params(ctx, sp_cfg->caps, 665 sp_cfg->caps_size, 666 sp_cfg->param_id, mconfig); 667 if (ret < 0) 668 return ret; 669 } 670 671 for (i = 0; i < w->num_kcontrols; i++) { 672 k = &w->kcontrol_news[i]; 673 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 674 sb = (void *) k->private_value; 675 bc = (struct skl_algo_data *)sb->dobj.private; 676 677 if (bc->set_params == SKL_PARAM_BIND) { 678 if (mconfig->m_type == SKL_MODULE_TYPE_KPB) 679 skl_fill_sink_instance_id(ctx, bc); 680 ret = skl_set_module_params(ctx, 681 (u32 *)bc->params, bc->max, 682 bc->param_id, mconfig); 683 if (ret < 0) 684 return ret; 685 } 686 } 687 } 688 689 return 0; 690 } 691 692 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 693 struct skl *skl, 694 struct snd_soc_dapm_widget *src_w, 695 struct skl_module_cfg *src_mconfig) 696 { 697 struct snd_soc_dapm_path *p; 698 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 699 struct skl_module_cfg *sink_mconfig; 700 struct skl_sst *ctx = skl->skl_sst; 701 int ret; 702 703 snd_soc_dapm_widget_for_each_sink_path(w, p) { 704 if (!p->connect) 705 continue; 706 707 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 708 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 709 710 next_sink = p->sink; 711 712 if (!is_skl_dsp_widget_type(p->sink)) 713 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 714 715 /* 716 * here we will check widgets in sink pipelines, so that 717 * can be any widgets type and we are only interested if 718 * they are ones used for SKL so check that first 719 */ 720 if ((p->sink->priv != NULL) && 721 is_skl_dsp_widget_type(p->sink)) { 722 723 sink = p->sink; 724 sink_mconfig = sink->priv; 725 726 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 727 sink_mconfig->m_state == SKL_MODULE_UNINIT) 728 continue; 729 730 /* Bind source to sink, mixin is always source */ 731 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 732 if (ret) 733 return ret; 734 735 /* set module params after bind */ 736 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 737 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 738 739 /* Start sinks pipe first */ 740 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 741 if (sink_mconfig->pipe->conn_type != 742 SKL_PIPE_CONN_TYPE_FE) 743 ret = skl_run_pipe(ctx, 744 sink_mconfig->pipe); 745 if (ret) 746 return ret; 747 } 748 } 749 } 750 751 if (!sink) 752 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 753 754 return 0; 755 } 756 757 /* 758 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 759 * we need to do following: 760 * - Bind to sink pipeline 761 * Since the sink pipes can be running and we don't get mixer event on 762 * connect for already running mixer, we need to find the sink pipes 763 * here and bind to them. This way dynamic connect works. 764 * - Start sink pipeline, if not running 765 * - Then run current pipe 766 */ 767 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 768 struct skl *skl) 769 { 770 struct skl_module_cfg *src_mconfig; 771 struct skl_sst *ctx = skl->skl_sst; 772 int ret = 0; 773 774 src_mconfig = w->priv; 775 776 /* 777 * find which sink it is connected to, bind with the sink, 778 * if sink is not started, start sink pipe first, then start 779 * this pipe 780 */ 781 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 782 if (ret) 783 return ret; 784 785 /* Start source pipe last after starting all sinks */ 786 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 787 return skl_run_pipe(ctx, src_mconfig->pipe); 788 789 return 0; 790 } 791 792 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 793 struct snd_soc_dapm_widget *w, struct skl *skl) 794 { 795 struct snd_soc_dapm_path *p; 796 struct snd_soc_dapm_widget *src_w = NULL; 797 struct skl_sst *ctx = skl->skl_sst; 798 799 snd_soc_dapm_widget_for_each_source_path(w, p) { 800 src_w = p->source; 801 if (!p->connect) 802 continue; 803 804 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 805 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 806 807 /* 808 * here we will check widgets in sink pipelines, so that can 809 * be any widgets type and we are only interested if they are 810 * ones used for SKL so check that first 811 */ 812 if ((p->source->priv != NULL) && 813 is_skl_dsp_widget_type(p->source)) { 814 return p->source; 815 } 816 } 817 818 if (src_w != NULL) 819 return skl_get_src_dsp_widget(src_w, skl); 820 821 return NULL; 822 } 823 824 /* 825 * in the Post-PMU event of mixer we need to do following: 826 * - Check if this pipe is running 827 * - if not, then 828 * - bind this pipeline to its source pipeline 829 * if source pipe is already running, this means it is a dynamic 830 * connection and we need to bind only to that pipe 831 * - start this pipeline 832 */ 833 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 834 struct skl *skl) 835 { 836 int ret = 0; 837 struct snd_soc_dapm_widget *source, *sink; 838 struct skl_module_cfg *src_mconfig, *sink_mconfig; 839 struct skl_sst *ctx = skl->skl_sst; 840 int src_pipe_started = 0; 841 842 sink = w; 843 sink_mconfig = sink->priv; 844 845 /* 846 * If source pipe is already started, that means source is driving 847 * one more sink before this sink got connected, Since source is 848 * started, bind this sink to source and start this pipe. 849 */ 850 source = skl_get_src_dsp_widget(w, skl); 851 if (source != NULL) { 852 src_mconfig = source->priv; 853 sink_mconfig = sink->priv; 854 src_pipe_started = 1; 855 856 /* 857 * check pipe state, then no need to bind or start the 858 * pipe 859 */ 860 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 861 src_pipe_started = 0; 862 } 863 864 if (src_pipe_started) { 865 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 866 if (ret) 867 return ret; 868 869 /* set module params after bind */ 870 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 871 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 872 873 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 874 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 875 } 876 877 return ret; 878 } 879 880 /* 881 * in the Pre-PMD event of mixer we need to do following: 882 * - Stop the pipe 883 * - find the source connections and remove that from dapm_path_list 884 * - unbind with source pipelines if still connected 885 */ 886 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 887 struct skl *skl) 888 { 889 struct skl_module_cfg *src_mconfig, *sink_mconfig; 890 int ret = 0, i; 891 struct skl_sst *ctx = skl->skl_sst; 892 893 sink_mconfig = w->priv; 894 895 /* Stop the pipe */ 896 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 897 if (ret) 898 return ret; 899 900 for (i = 0; i < sink_mconfig->max_in_queue; i++) { 901 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 902 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 903 if (!src_mconfig) 904 continue; 905 /* 906 * If path_found == 1, that means pmd for source 907 * pipe has not occurred, source is connected to 908 * some other sink. so its responsibility of sink 909 * to unbind itself from source. 910 */ 911 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 912 if (ret < 0) 913 return ret; 914 915 ret = skl_unbind_modules(ctx, 916 src_mconfig, sink_mconfig); 917 } 918 } 919 920 return ret; 921 } 922 923 /* 924 * in the Post-PMD event of mixer we need to do following: 925 * - Free the mcps used 926 * - Free the mem used 927 * - Unbind the modules within the pipeline 928 * - Delete the pipeline (modules are not required to be explicitly 929 * deleted, pipeline delete is enough here 930 */ 931 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 932 struct skl *skl) 933 { 934 struct skl_module_cfg *mconfig = w->priv; 935 struct skl_pipe_module *w_module; 936 struct skl_module_cfg *src_module = NULL, *dst_module; 937 struct skl_sst *ctx = skl->skl_sst; 938 struct skl_pipe *s_pipe = mconfig->pipe; 939 int ret = 0; 940 941 if (s_pipe->state == SKL_PIPE_INVALID) 942 return -EINVAL; 943 944 skl_tplg_free_pipe_mcps(skl, mconfig); 945 skl_tplg_free_pipe_mem(skl, mconfig); 946 947 list_for_each_entry(w_module, &s_pipe->w_list, node) { 948 dst_module = w_module->w->priv; 949 950 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 951 skl_tplg_free_pipe_mcps(skl, dst_module); 952 if (src_module == NULL) { 953 src_module = dst_module; 954 continue; 955 } 956 957 skl_unbind_modules(ctx, src_module, dst_module); 958 src_module = dst_module; 959 } 960 961 ret = skl_delete_pipe(ctx, mconfig->pipe); 962 963 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 964 } 965 966 /* 967 * in the Post-PMD event of PGA we need to do following: 968 * - Free the mcps used 969 * - Stop the pipeline 970 * - In source pipe is connected, unbind with source pipelines 971 */ 972 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 973 struct skl *skl) 974 { 975 struct skl_module_cfg *src_mconfig, *sink_mconfig; 976 int ret = 0, i; 977 struct skl_sst *ctx = skl->skl_sst; 978 979 src_mconfig = w->priv; 980 981 /* Stop the pipe since this is a mixin module */ 982 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 983 if (ret) 984 return ret; 985 986 for (i = 0; i < src_mconfig->max_out_queue; i++) { 987 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 988 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 989 if (!sink_mconfig) 990 continue; 991 /* 992 * This is a connecter and if path is found that means 993 * unbind between source and sink has not happened yet 994 */ 995 ret = skl_unbind_modules(ctx, src_mconfig, 996 sink_mconfig); 997 } 998 } 999 1000 return ret; 1001 } 1002 1003 /* 1004 * In modelling, we assume there will be ONLY one mixer in a pipeline. If 1005 * mixer is not required then it is treated as static mixer aka vmixer with 1006 * a hard path to source module 1007 * So we don't need to check if source is started or not as hard path puts 1008 * dependency on each other 1009 */ 1010 static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w, 1011 struct snd_kcontrol *k, int event) 1012 { 1013 struct snd_soc_dapm_context *dapm = w->dapm; 1014 struct skl *skl = get_skl_ctx(dapm->dev); 1015 1016 switch (event) { 1017 case SND_SOC_DAPM_PRE_PMU: 1018 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1019 1020 case SND_SOC_DAPM_POST_PMU: 1021 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1022 1023 case SND_SOC_DAPM_PRE_PMD: 1024 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1025 1026 case SND_SOC_DAPM_POST_PMD: 1027 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1028 } 1029 1030 return 0; 1031 } 1032 1033 /* 1034 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1035 * second one is required that is created as another pipe entity. 1036 * The mixer is responsible for pipe management and represent a pipeline 1037 * instance 1038 */ 1039 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1040 struct snd_kcontrol *k, int event) 1041 { 1042 struct snd_soc_dapm_context *dapm = w->dapm; 1043 struct skl *skl = get_skl_ctx(dapm->dev); 1044 1045 switch (event) { 1046 case SND_SOC_DAPM_PRE_PMU: 1047 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1048 1049 case SND_SOC_DAPM_POST_PMU: 1050 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1051 1052 case SND_SOC_DAPM_PRE_PMD: 1053 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1054 1055 case SND_SOC_DAPM_POST_PMD: 1056 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1057 } 1058 1059 return 0; 1060 } 1061 1062 /* 1063 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1064 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1065 * the sink when it is running (two FE to one BE or one FE to two BE) 1066 * scenarios 1067 */ 1068 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1069 struct snd_kcontrol *k, int event) 1070 1071 { 1072 struct snd_soc_dapm_context *dapm = w->dapm; 1073 struct skl *skl = get_skl_ctx(dapm->dev); 1074 1075 switch (event) { 1076 case SND_SOC_DAPM_PRE_PMU: 1077 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1078 1079 case SND_SOC_DAPM_POST_PMD: 1080 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1081 } 1082 1083 return 0; 1084 } 1085 1086 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1087 unsigned int __user *data, unsigned int size) 1088 { 1089 struct soc_bytes_ext *sb = 1090 (struct soc_bytes_ext *)kcontrol->private_value; 1091 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1092 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1093 struct skl_module_cfg *mconfig = w->priv; 1094 struct skl *skl = get_skl_ctx(w->dapm->dev); 1095 1096 if (w->power) 1097 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1098 bc->size, bc->param_id, mconfig); 1099 1100 /* decrement size for TLV header */ 1101 size -= 2 * sizeof(u32); 1102 1103 /* check size as we don't want to send kernel data */ 1104 if (size > bc->max) 1105 size = bc->max; 1106 1107 if (bc->params) { 1108 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1109 return -EFAULT; 1110 if (copy_to_user(data + 1, &size, sizeof(u32))) 1111 return -EFAULT; 1112 if (copy_to_user(data + 2, bc->params, size)) 1113 return -EFAULT; 1114 } 1115 1116 return 0; 1117 } 1118 1119 #define SKL_PARAM_VENDOR_ID 0xff 1120 1121 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1122 const unsigned int __user *data, unsigned int size) 1123 { 1124 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1125 struct skl_module_cfg *mconfig = w->priv; 1126 struct soc_bytes_ext *sb = 1127 (struct soc_bytes_ext *)kcontrol->private_value; 1128 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1129 struct skl *skl = get_skl_ctx(w->dapm->dev); 1130 1131 if (ac->params) { 1132 if (size > ac->max) 1133 return -EINVAL; 1134 1135 ac->size = size; 1136 /* 1137 * if the param_is is of type Vendor, firmware expects actual 1138 * parameter id and size from the control. 1139 */ 1140 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1141 if (copy_from_user(ac->params, data, size)) 1142 return -EFAULT; 1143 } else { 1144 if (copy_from_user(ac->params, 1145 data + 2, size)) 1146 return -EFAULT; 1147 } 1148 1149 if (w->power) 1150 return skl_set_module_params(skl->skl_sst, 1151 (u32 *)ac->params, ac->size, 1152 ac->param_id, mconfig); 1153 } 1154 1155 return 0; 1156 } 1157 1158 /* 1159 * Fill the dma id for host and link. In case of passthrough 1160 * pipeline, this will both host and link in the same 1161 * pipeline, so need to copy the link and host based on dev_type 1162 */ 1163 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1164 struct skl_pipe_params *params) 1165 { 1166 struct skl_pipe *pipe = mcfg->pipe; 1167 1168 if (pipe->passthru) { 1169 switch (mcfg->dev_type) { 1170 case SKL_DEVICE_HDALINK: 1171 pipe->p_params->link_dma_id = params->link_dma_id; 1172 break; 1173 1174 case SKL_DEVICE_HDAHOST: 1175 pipe->p_params->host_dma_id = params->host_dma_id; 1176 break; 1177 1178 default: 1179 break; 1180 } 1181 pipe->p_params->s_fmt = params->s_fmt; 1182 pipe->p_params->ch = params->ch; 1183 pipe->p_params->s_freq = params->s_freq; 1184 pipe->p_params->stream = params->stream; 1185 1186 } else { 1187 memcpy(pipe->p_params, params, sizeof(*params)); 1188 } 1189 } 1190 1191 /* 1192 * The FE params are passed by hw_params of the DAI. 1193 * On hw_params, the params are stored in Gateway module of the FE and we 1194 * need to calculate the format in DSP module configuration, that 1195 * conversion is done here 1196 */ 1197 int skl_tplg_update_pipe_params(struct device *dev, 1198 struct skl_module_cfg *mconfig, 1199 struct skl_pipe_params *params) 1200 { 1201 struct skl_module_fmt *format = NULL; 1202 1203 skl_tplg_fill_dma_id(mconfig, params); 1204 1205 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1206 format = &mconfig->in_fmt[0]; 1207 else 1208 format = &mconfig->out_fmt[0]; 1209 1210 /* set the hw_params */ 1211 format->s_freq = params->s_freq; 1212 format->channels = params->ch; 1213 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1214 1215 /* 1216 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1217 * container so update bit depth accordingly 1218 */ 1219 switch (format->valid_bit_depth) { 1220 case SKL_DEPTH_16BIT: 1221 format->bit_depth = format->valid_bit_depth; 1222 break; 1223 1224 case SKL_DEPTH_24BIT: 1225 case SKL_DEPTH_32BIT: 1226 format->bit_depth = SKL_DEPTH_32BIT; 1227 break; 1228 1229 default: 1230 dev_err(dev, "Invalid bit depth %x for pipe\n", 1231 format->valid_bit_depth); 1232 return -EINVAL; 1233 } 1234 1235 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1236 mconfig->ibs = (format->s_freq / 1000) * 1237 (format->channels) * 1238 (format->bit_depth >> 3); 1239 } else { 1240 mconfig->obs = (format->s_freq / 1000) * 1241 (format->channels) * 1242 (format->bit_depth >> 3); 1243 } 1244 1245 return 0; 1246 } 1247 1248 /* 1249 * Query the module config for the FE DAI 1250 * This is used to find the hw_params set for that DAI and apply to FE 1251 * pipeline 1252 */ 1253 struct skl_module_cfg * 1254 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1255 { 1256 struct snd_soc_dapm_widget *w; 1257 struct snd_soc_dapm_path *p = NULL; 1258 1259 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1260 w = dai->playback_widget; 1261 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1262 if (p->connect && p->sink->power && 1263 !is_skl_dsp_widget_type(p->sink)) 1264 continue; 1265 1266 if (p->sink->priv) { 1267 dev_dbg(dai->dev, "set params for %s\n", 1268 p->sink->name); 1269 return p->sink->priv; 1270 } 1271 } 1272 } else { 1273 w = dai->capture_widget; 1274 snd_soc_dapm_widget_for_each_source_path(w, p) { 1275 if (p->connect && p->source->power && 1276 !is_skl_dsp_widget_type(p->source)) 1277 continue; 1278 1279 if (p->source->priv) { 1280 dev_dbg(dai->dev, "set params for %s\n", 1281 p->source->name); 1282 return p->source->priv; 1283 } 1284 } 1285 } 1286 1287 return NULL; 1288 } 1289 1290 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1291 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1292 { 1293 struct snd_soc_dapm_path *p; 1294 struct skl_module_cfg *mconfig = NULL; 1295 1296 snd_soc_dapm_widget_for_each_source_path(w, p) { 1297 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1298 if (p->connect && 1299 (p->sink->id == snd_soc_dapm_aif_out) && 1300 p->source->priv) { 1301 mconfig = p->source->priv; 1302 return mconfig; 1303 } 1304 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1305 if (mconfig) 1306 return mconfig; 1307 } 1308 } 1309 return mconfig; 1310 } 1311 1312 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1313 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1314 { 1315 struct snd_soc_dapm_path *p; 1316 struct skl_module_cfg *mconfig = NULL; 1317 1318 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1319 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1320 if (p->connect && 1321 (p->source->id == snd_soc_dapm_aif_in) && 1322 p->sink->priv) { 1323 mconfig = p->sink->priv; 1324 return mconfig; 1325 } 1326 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1327 if (mconfig) 1328 return mconfig; 1329 } 1330 } 1331 return mconfig; 1332 } 1333 1334 struct skl_module_cfg * 1335 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1336 { 1337 struct snd_soc_dapm_widget *w; 1338 struct skl_module_cfg *mconfig; 1339 1340 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1341 w = dai->playback_widget; 1342 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1343 } else { 1344 w = dai->capture_widget; 1345 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1346 } 1347 return mconfig; 1348 } 1349 1350 static u8 skl_tplg_be_link_type(int dev_type) 1351 { 1352 int ret; 1353 1354 switch (dev_type) { 1355 case SKL_DEVICE_BT: 1356 ret = NHLT_LINK_SSP; 1357 break; 1358 1359 case SKL_DEVICE_DMIC: 1360 ret = NHLT_LINK_DMIC; 1361 break; 1362 1363 case SKL_DEVICE_I2S: 1364 ret = NHLT_LINK_SSP; 1365 break; 1366 1367 case SKL_DEVICE_HDALINK: 1368 ret = NHLT_LINK_HDA; 1369 break; 1370 1371 default: 1372 ret = NHLT_LINK_INVALID; 1373 break; 1374 } 1375 1376 return ret; 1377 } 1378 1379 /* 1380 * Fill the BE gateway parameters 1381 * The BE gateway expects a blob of parameters which are kept in the ACPI 1382 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1383 * The port can have multiple settings so pick based on the PCM 1384 * parameters 1385 */ 1386 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1387 struct skl_module_cfg *mconfig, 1388 struct skl_pipe_params *params) 1389 { 1390 struct nhlt_specific_cfg *cfg; 1391 struct skl *skl = get_skl_ctx(dai->dev); 1392 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1393 1394 skl_tplg_fill_dma_id(mconfig, params); 1395 1396 if (link_type == NHLT_LINK_HDA) 1397 return 0; 1398 1399 /* update the blob based on virtual bus_id*/ 1400 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1401 params->s_fmt, params->ch, 1402 params->s_freq, params->stream); 1403 if (cfg) { 1404 mconfig->formats_config.caps_size = cfg->size; 1405 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1406 } else { 1407 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1408 mconfig->vbus_id, link_type, 1409 params->stream); 1410 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1411 params->ch, params->s_freq, params->s_fmt); 1412 return -EINVAL; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1419 struct snd_soc_dapm_widget *w, 1420 struct skl_pipe_params *params) 1421 { 1422 struct snd_soc_dapm_path *p; 1423 int ret = -EIO; 1424 1425 snd_soc_dapm_widget_for_each_source_path(w, p) { 1426 if (p->connect && is_skl_dsp_widget_type(p->source) && 1427 p->source->priv) { 1428 1429 ret = skl_tplg_be_fill_pipe_params(dai, 1430 p->source->priv, params); 1431 if (ret < 0) 1432 return ret; 1433 } else { 1434 ret = skl_tplg_be_set_src_pipe_params(dai, 1435 p->source, params); 1436 if (ret < 0) 1437 return ret; 1438 } 1439 } 1440 1441 return ret; 1442 } 1443 1444 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1445 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1446 { 1447 struct snd_soc_dapm_path *p = NULL; 1448 int ret = -EIO; 1449 1450 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1451 if (p->connect && is_skl_dsp_widget_type(p->sink) && 1452 p->sink->priv) { 1453 1454 ret = skl_tplg_be_fill_pipe_params(dai, 1455 p->sink->priv, params); 1456 if (ret < 0) 1457 return ret; 1458 } else { 1459 ret = skl_tplg_be_set_sink_pipe_params( 1460 dai, p->sink, params); 1461 if (ret < 0) 1462 return ret; 1463 } 1464 } 1465 1466 return ret; 1467 } 1468 1469 /* 1470 * BE hw_params can be a source parameters (capture) or sink parameters 1471 * (playback). Based on sink and source we need to either find the source 1472 * list or the sink list and set the pipeline parameters 1473 */ 1474 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1475 struct skl_pipe_params *params) 1476 { 1477 struct snd_soc_dapm_widget *w; 1478 1479 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1480 w = dai->playback_widget; 1481 1482 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1483 1484 } else { 1485 w = dai->capture_widget; 1486 1487 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1488 } 1489 1490 return 0; 1491 } 1492 1493 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1494 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1495 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event}, 1496 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1497 }; 1498 1499 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1500 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1501 skl_tplg_tlv_control_set}, 1502 }; 1503 1504 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1505 struct skl_pipe *pipe, u32 tkn, 1506 u32 tkn_val) 1507 { 1508 1509 switch (tkn) { 1510 case SKL_TKN_U32_PIPE_CONN_TYPE: 1511 pipe->conn_type = tkn_val; 1512 break; 1513 1514 case SKL_TKN_U32_PIPE_PRIORITY: 1515 pipe->pipe_priority = tkn_val; 1516 break; 1517 1518 case SKL_TKN_U32_PIPE_MEM_PGS: 1519 pipe->memory_pages = tkn_val; 1520 break; 1521 1522 default: 1523 dev_err(dev, "Token not handled %d\n", tkn); 1524 return -EINVAL; 1525 } 1526 1527 return 0; 1528 } 1529 1530 /* 1531 * Add pipeline by parsing the relevant tokens 1532 * Return an existing pipe if the pipe already exists. 1533 */ 1534 static int skl_tplg_add_pipe(struct device *dev, 1535 struct skl_module_cfg *mconfig, struct skl *skl, 1536 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 1537 { 1538 struct skl_pipeline *ppl; 1539 struct skl_pipe *pipe; 1540 struct skl_pipe_params *params; 1541 1542 list_for_each_entry(ppl, &skl->ppl_list, node) { 1543 if (ppl->pipe->ppl_id == tkn_elem->value) { 1544 mconfig->pipe = ppl->pipe; 1545 return EEXIST; 1546 } 1547 } 1548 1549 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 1550 if (!ppl) 1551 return -ENOMEM; 1552 1553 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 1554 if (!pipe) 1555 return -ENOMEM; 1556 1557 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 1558 if (!params) 1559 return -ENOMEM; 1560 1561 pipe->p_params = params; 1562 pipe->ppl_id = tkn_elem->value; 1563 INIT_LIST_HEAD(&pipe->w_list); 1564 1565 ppl->pipe = pipe; 1566 list_add(&ppl->node, &skl->ppl_list); 1567 1568 mconfig->pipe = pipe; 1569 mconfig->pipe->state = SKL_PIPE_INVALID; 1570 1571 return 0; 1572 } 1573 1574 static int skl_tplg_fill_pin(struct device *dev, u32 tkn, 1575 struct skl_module_pin *m_pin, 1576 int pin_index, u32 value) 1577 { 1578 switch (tkn) { 1579 case SKL_TKN_U32_PIN_MOD_ID: 1580 m_pin[pin_index].id.module_id = value; 1581 break; 1582 1583 case SKL_TKN_U32_PIN_INST_ID: 1584 m_pin[pin_index].id.instance_id = value; 1585 break; 1586 1587 default: 1588 dev_err(dev, "%d Not a pin token\n", value); 1589 return -EINVAL; 1590 } 1591 1592 return 0; 1593 } 1594 1595 /* 1596 * Parse for pin config specific tokens to fill up the 1597 * module private data 1598 */ 1599 static int skl_tplg_fill_pins_info(struct device *dev, 1600 struct skl_module_cfg *mconfig, 1601 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 1602 int dir, int pin_count) 1603 { 1604 int ret; 1605 struct skl_module_pin *m_pin; 1606 1607 switch (dir) { 1608 case SKL_DIR_IN: 1609 m_pin = mconfig->m_in_pin; 1610 break; 1611 1612 case SKL_DIR_OUT: 1613 m_pin = mconfig->m_out_pin; 1614 break; 1615 1616 default: 1617 dev_err(dev, "Invalid direction value\n"); 1618 return -EINVAL; 1619 } 1620 1621 ret = skl_tplg_fill_pin(dev, tkn_elem->token, 1622 m_pin, pin_count, tkn_elem->value); 1623 1624 if (ret < 0) 1625 return ret; 1626 1627 m_pin[pin_count].in_use = false; 1628 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 1629 1630 return 0; 1631 } 1632 1633 /* 1634 * Fill up input/output module config format based 1635 * on the direction 1636 */ 1637 static int skl_tplg_fill_fmt(struct device *dev, 1638 struct skl_module_cfg *mconfig, u32 tkn, 1639 u32 value, u32 dir, u32 pin_count) 1640 { 1641 struct skl_module_fmt *dst_fmt; 1642 1643 switch (dir) { 1644 case SKL_DIR_IN: 1645 dst_fmt = mconfig->in_fmt; 1646 dst_fmt += pin_count; 1647 break; 1648 1649 case SKL_DIR_OUT: 1650 dst_fmt = mconfig->out_fmt; 1651 dst_fmt += pin_count; 1652 break; 1653 1654 default: 1655 dev_err(dev, "Invalid direction value\n"); 1656 return -EINVAL; 1657 } 1658 1659 switch (tkn) { 1660 case SKL_TKN_U32_FMT_CH: 1661 dst_fmt->channels = value; 1662 break; 1663 1664 case SKL_TKN_U32_FMT_FREQ: 1665 dst_fmt->s_freq = value; 1666 break; 1667 1668 case SKL_TKN_U32_FMT_BIT_DEPTH: 1669 dst_fmt->bit_depth = value; 1670 break; 1671 1672 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 1673 dst_fmt->valid_bit_depth = value; 1674 break; 1675 1676 case SKL_TKN_U32_FMT_CH_CONFIG: 1677 dst_fmt->ch_cfg = value; 1678 break; 1679 1680 case SKL_TKN_U32_FMT_INTERLEAVE: 1681 dst_fmt->interleaving_style = value; 1682 break; 1683 1684 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 1685 dst_fmt->sample_type = value; 1686 break; 1687 1688 case SKL_TKN_U32_FMT_CH_MAP: 1689 dst_fmt->ch_map = value; 1690 break; 1691 1692 default: 1693 dev_err(dev, "Invalid token %d\n", tkn); 1694 return -EINVAL; 1695 } 1696 1697 return 0; 1698 } 1699 1700 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig, 1701 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 1702 { 1703 if (uuid_tkn->token == SKL_TKN_UUID) 1704 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16); 1705 else { 1706 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 1707 return -EINVAL; 1708 } 1709 1710 return 0; 1711 } 1712 1713 static void skl_tplg_fill_pin_dynamic_val( 1714 struct skl_module_pin *mpin, u32 pin_count, u32 value) 1715 { 1716 int i; 1717 1718 for (i = 0; i < pin_count; i++) 1719 mpin[i].is_dynamic = value; 1720 } 1721 1722 /* 1723 * Parse tokens to fill up the module private data 1724 */ 1725 static int skl_tplg_get_token(struct device *dev, 1726 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 1727 struct skl *skl, struct skl_module_cfg *mconfig) 1728 { 1729 int tkn_count = 0; 1730 int ret; 1731 static int is_pipe_exists; 1732 static int pin_index, dir; 1733 1734 if (tkn_elem->token > SKL_TKN_MAX) 1735 return -EINVAL; 1736 1737 switch (tkn_elem->token) { 1738 case SKL_TKN_U8_IN_QUEUE_COUNT: 1739 mconfig->max_in_queue = tkn_elem->value; 1740 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue * 1741 sizeof(*mconfig->m_in_pin), 1742 GFP_KERNEL); 1743 if (!mconfig->m_in_pin) 1744 return -ENOMEM; 1745 1746 break; 1747 1748 case SKL_TKN_U8_OUT_QUEUE_COUNT: 1749 mconfig->max_out_queue = tkn_elem->value; 1750 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue * 1751 sizeof(*mconfig->m_out_pin), 1752 GFP_KERNEL); 1753 1754 if (!mconfig->m_out_pin) 1755 return -ENOMEM; 1756 1757 break; 1758 1759 case SKL_TKN_U8_DYN_IN_PIN: 1760 if (!mconfig->m_in_pin) 1761 return -ENOMEM; 1762 1763 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, 1764 mconfig->max_in_queue, tkn_elem->value); 1765 1766 break; 1767 1768 case SKL_TKN_U8_DYN_OUT_PIN: 1769 if (!mconfig->m_out_pin) 1770 return -ENOMEM; 1771 1772 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, 1773 mconfig->max_out_queue, tkn_elem->value); 1774 1775 break; 1776 1777 case SKL_TKN_U8_TIME_SLOT: 1778 mconfig->time_slot = tkn_elem->value; 1779 break; 1780 1781 case SKL_TKN_U8_CORE_ID: 1782 mconfig->core_id = tkn_elem->value; 1783 1784 case SKL_TKN_U8_MOD_TYPE: 1785 mconfig->m_type = tkn_elem->value; 1786 break; 1787 1788 case SKL_TKN_U8_DEV_TYPE: 1789 mconfig->dev_type = tkn_elem->value; 1790 break; 1791 1792 case SKL_TKN_U8_HW_CONN_TYPE: 1793 mconfig->hw_conn_type = tkn_elem->value; 1794 break; 1795 1796 case SKL_TKN_U16_MOD_INST_ID: 1797 mconfig->id.instance_id = 1798 tkn_elem->value; 1799 break; 1800 1801 case SKL_TKN_U32_MEM_PAGES: 1802 mconfig->mem_pages = tkn_elem->value; 1803 break; 1804 1805 case SKL_TKN_U32_MAX_MCPS: 1806 mconfig->mcps = tkn_elem->value; 1807 break; 1808 1809 case SKL_TKN_U32_OBS: 1810 mconfig->obs = tkn_elem->value; 1811 break; 1812 1813 case SKL_TKN_U32_IBS: 1814 mconfig->ibs = tkn_elem->value; 1815 break; 1816 1817 case SKL_TKN_U32_VBUS_ID: 1818 mconfig->vbus_id = tkn_elem->value; 1819 break; 1820 1821 case SKL_TKN_U32_PARAMS_FIXUP: 1822 mconfig->params_fixup = tkn_elem->value; 1823 break; 1824 1825 case SKL_TKN_U32_CONVERTER: 1826 mconfig->converter = tkn_elem->value; 1827 break; 1828 1829 case SKL_TKN_U32_PIPE_ID: 1830 ret = skl_tplg_add_pipe(dev, 1831 mconfig, skl, tkn_elem); 1832 1833 if (ret < 0) 1834 return is_pipe_exists; 1835 1836 if (ret == EEXIST) 1837 is_pipe_exists = 1; 1838 1839 break; 1840 1841 case SKL_TKN_U32_PIPE_CONN_TYPE: 1842 case SKL_TKN_U32_PIPE_PRIORITY: 1843 case SKL_TKN_U32_PIPE_MEM_PGS: 1844 if (is_pipe_exists) { 1845 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 1846 tkn_elem->token, tkn_elem->value); 1847 if (ret < 0) 1848 return ret; 1849 } 1850 1851 break; 1852 1853 /* 1854 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 1855 * direction and the pin count. The first four bits represent 1856 * direction and next four the pin count. 1857 */ 1858 case SKL_TKN_U32_DIR_PIN_COUNT: 1859 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 1860 pin_index = (tkn_elem->value & 1861 SKL_PIN_COUNT_MASK) >> 4; 1862 1863 break; 1864 1865 case SKL_TKN_U32_FMT_CH: 1866 case SKL_TKN_U32_FMT_FREQ: 1867 case SKL_TKN_U32_FMT_BIT_DEPTH: 1868 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 1869 case SKL_TKN_U32_FMT_CH_CONFIG: 1870 case SKL_TKN_U32_FMT_INTERLEAVE: 1871 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 1872 case SKL_TKN_U32_FMT_CH_MAP: 1873 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token, 1874 tkn_elem->value, dir, pin_index); 1875 1876 if (ret < 0) 1877 return ret; 1878 1879 break; 1880 1881 case SKL_TKN_U32_PIN_MOD_ID: 1882 case SKL_TKN_U32_PIN_INST_ID: 1883 ret = skl_tplg_fill_pins_info(dev, 1884 mconfig, tkn_elem, dir, 1885 pin_index); 1886 if (ret < 0) 1887 return ret; 1888 1889 break; 1890 1891 case SKL_TKN_U32_CAPS_SIZE: 1892 mconfig->formats_config.caps_size = 1893 tkn_elem->value; 1894 1895 break; 1896 1897 case SKL_TKN_U32_PROC_DOMAIN: 1898 mconfig->domain = 1899 tkn_elem->value; 1900 1901 break; 1902 1903 case SKL_TKN_U8_IN_PIN_TYPE: 1904 case SKL_TKN_U8_OUT_PIN_TYPE: 1905 case SKL_TKN_U8_CONN_TYPE: 1906 break; 1907 1908 default: 1909 dev_err(dev, "Token %d not handled\n", 1910 tkn_elem->token); 1911 return -EINVAL; 1912 } 1913 1914 tkn_count++; 1915 1916 return tkn_count; 1917 } 1918 1919 /* 1920 * Parse the vendor array for specific tokens to construct 1921 * module private data 1922 */ 1923 static int skl_tplg_get_tokens(struct device *dev, 1924 char *pvt_data, struct skl *skl, 1925 struct skl_module_cfg *mconfig, int block_size) 1926 { 1927 struct snd_soc_tplg_vendor_array *array; 1928 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 1929 int tkn_count = 0, ret; 1930 int off = 0, tuple_size = 0; 1931 1932 if (block_size <= 0) 1933 return -EINVAL; 1934 1935 while (tuple_size < block_size) { 1936 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 1937 1938 off += array->size; 1939 1940 switch (array->type) { 1941 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 1942 dev_warn(dev, "no string tokens expected for skl tplg\n"); 1943 continue; 1944 1945 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 1946 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid); 1947 if (ret < 0) 1948 return ret; 1949 1950 tuple_size += sizeof(*array->uuid); 1951 1952 continue; 1953 1954 default: 1955 tkn_elem = array->value; 1956 tkn_count = 0; 1957 break; 1958 } 1959 1960 while (tkn_count <= (array->num_elems - 1)) { 1961 ret = skl_tplg_get_token(dev, tkn_elem, 1962 skl, mconfig); 1963 1964 if (ret < 0) 1965 return ret; 1966 1967 tkn_count = tkn_count + ret; 1968 tkn_elem++; 1969 } 1970 1971 tuple_size += tkn_count * sizeof(*tkn_elem); 1972 } 1973 1974 return 0; 1975 } 1976 1977 /* 1978 * Every data block is preceded by a descriptor to read the number 1979 * of data blocks, they type of the block and it's size 1980 */ 1981 static int skl_tplg_get_desc_blocks(struct device *dev, 1982 struct snd_soc_tplg_vendor_array *array) 1983 { 1984 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 1985 1986 tkn_elem = array->value; 1987 1988 switch (tkn_elem->token) { 1989 case SKL_TKN_U8_NUM_BLOCKS: 1990 case SKL_TKN_U8_BLOCK_TYPE: 1991 case SKL_TKN_U16_BLOCK_SIZE: 1992 return tkn_elem->value; 1993 1994 default: 1995 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 1996 break; 1997 } 1998 1999 return -EINVAL; 2000 } 2001 2002 /* 2003 * Parse the private data for the token and corresponding value. 2004 * The private data can have multiple data blocks. So, a data block 2005 * is preceded by a descriptor for number of blocks and a descriptor 2006 * for the type and size of the suceeding data block. 2007 */ 2008 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2009 struct skl *skl, struct device *dev, 2010 struct skl_module_cfg *mconfig) 2011 { 2012 struct snd_soc_tplg_vendor_array *array; 2013 int num_blocks, block_size = 0, block_type, off = 0; 2014 char *data; 2015 int ret; 2016 2017 /* Read the NUM_DATA_BLOCKS descriptor */ 2018 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2019 ret = skl_tplg_get_desc_blocks(dev, array); 2020 if (ret < 0) 2021 return ret; 2022 num_blocks = ret; 2023 2024 off += array->size; 2025 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off); 2026 2027 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2028 while (num_blocks > 0) { 2029 ret = skl_tplg_get_desc_blocks(dev, array); 2030 2031 if (ret < 0) 2032 return ret; 2033 block_type = ret; 2034 off += array->size; 2035 2036 array = (struct snd_soc_tplg_vendor_array *) 2037 (tplg_w->priv.data + off); 2038 2039 ret = skl_tplg_get_desc_blocks(dev, array); 2040 2041 if (ret < 0) 2042 return ret; 2043 block_size = ret; 2044 off += array->size; 2045 2046 array = (struct snd_soc_tplg_vendor_array *) 2047 (tplg_w->priv.data + off); 2048 2049 data = (tplg_w->priv.data + off); 2050 2051 if (block_type == SKL_TYPE_TUPLE) { 2052 ret = skl_tplg_get_tokens(dev, data, 2053 skl, mconfig, block_size); 2054 2055 if (ret < 0) 2056 return ret; 2057 2058 --num_blocks; 2059 } else { 2060 if (mconfig->formats_config.caps_size > 0) 2061 memcpy(mconfig->formats_config.caps, data, 2062 mconfig->formats_config.caps_size); 2063 --num_blocks; 2064 } 2065 } 2066 2067 return 0; 2068 } 2069 2070 static void skl_clear_pin_config(struct snd_soc_platform *platform, 2071 struct snd_soc_dapm_widget *w) 2072 { 2073 int i; 2074 struct skl_module_cfg *mconfig; 2075 struct skl_pipe *pipe; 2076 2077 if (!strncmp(w->dapm->component->name, platform->component.name, 2078 strlen(platform->component.name))) { 2079 mconfig = w->priv; 2080 pipe = mconfig->pipe; 2081 for (i = 0; i < mconfig->max_in_queue; i++) { 2082 mconfig->m_in_pin[i].in_use = false; 2083 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2084 } 2085 for (i = 0; i < mconfig->max_out_queue; i++) { 2086 mconfig->m_out_pin[i].in_use = false; 2087 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2088 } 2089 pipe->state = SKL_PIPE_INVALID; 2090 mconfig->m_state = SKL_MODULE_UNINIT; 2091 } 2092 } 2093 2094 void skl_cleanup_resources(struct skl *skl) 2095 { 2096 struct skl_sst *ctx = skl->skl_sst; 2097 struct snd_soc_platform *soc_platform = skl->platform; 2098 struct snd_soc_dapm_widget *w; 2099 struct snd_soc_card *card; 2100 2101 if (soc_platform == NULL) 2102 return; 2103 2104 card = soc_platform->component.card; 2105 if (!card || !card->instantiated) 2106 return; 2107 2108 skl->resource.mem = 0; 2109 skl->resource.mcps = 0; 2110 2111 list_for_each_entry(w, &card->widgets, list) { 2112 if (is_skl_dsp_widget_type(w) && (w->priv != NULL)) 2113 skl_clear_pin_config(soc_platform, w); 2114 } 2115 2116 skl_clear_module_cnt(ctx->dsp); 2117 } 2118 2119 /* 2120 * Topology core widget load callback 2121 * 2122 * This is used to save the private data for each widget which gives 2123 * information to the driver about module and pipeline parameters which DSP 2124 * FW expects like ids, resource values, formats etc 2125 */ 2126 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, 2127 struct snd_soc_dapm_widget *w, 2128 struct snd_soc_tplg_dapm_widget *tplg_w) 2129 { 2130 int ret; 2131 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2132 struct skl *skl = ebus_to_skl(ebus); 2133 struct hdac_bus *bus = ebus_to_hbus(ebus); 2134 struct skl_module_cfg *mconfig; 2135 2136 if (!tplg_w->priv.size) 2137 goto bind_event; 2138 2139 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2140 2141 if (!mconfig) 2142 return -ENOMEM; 2143 2144 w->priv = mconfig; 2145 2146 /* 2147 * module binary can be loaded later, so set it to query when 2148 * module is load for a use case 2149 */ 2150 mconfig->id.module_id = -1; 2151 2152 /* Parse private data for tuples */ 2153 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2154 if (ret < 0) 2155 return ret; 2156 bind_event: 2157 if (tplg_w->event_type == 0) { 2158 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2159 return 0; 2160 } 2161 2162 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2163 ARRAY_SIZE(skl_tplg_widget_ops), 2164 tplg_w->event_type); 2165 2166 if (ret) { 2167 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2168 __func__, tplg_w->event_type); 2169 return -EINVAL; 2170 } 2171 2172 return 0; 2173 } 2174 2175 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2176 struct snd_soc_tplg_bytes_control *bc) 2177 { 2178 struct skl_algo_data *ac; 2179 struct skl_dfw_algo_data *dfw_ac = 2180 (struct skl_dfw_algo_data *)bc->priv.data; 2181 2182 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2183 if (!ac) 2184 return -ENOMEM; 2185 2186 /* Fill private data */ 2187 ac->max = dfw_ac->max; 2188 ac->param_id = dfw_ac->param_id; 2189 ac->set_params = dfw_ac->set_params; 2190 ac->size = dfw_ac->max; 2191 2192 if (ac->max) { 2193 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL); 2194 if (!ac->params) 2195 return -ENOMEM; 2196 2197 memcpy(ac->params, dfw_ac->params, ac->max); 2198 } 2199 2200 be->dobj.private = ac; 2201 return 0; 2202 } 2203 2204 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2205 struct snd_kcontrol_new *kctl, 2206 struct snd_soc_tplg_ctl_hdr *hdr) 2207 { 2208 struct soc_bytes_ext *sb; 2209 struct snd_soc_tplg_bytes_control *tplg_bc; 2210 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2211 struct hdac_bus *bus = ebus_to_hbus(ebus); 2212 2213 switch (hdr->ops.info) { 2214 case SND_SOC_TPLG_CTL_BYTES: 2215 tplg_bc = container_of(hdr, 2216 struct snd_soc_tplg_bytes_control, hdr); 2217 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 2218 sb = (struct soc_bytes_ext *)kctl->private_value; 2219 if (tplg_bc->priv.size) 2220 return skl_init_algo_data( 2221 bus->dev, sb, tplg_bc); 2222 } 2223 break; 2224 2225 default: 2226 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n", 2227 hdr->ops.get, hdr->ops.put, hdr->ops.info); 2228 break; 2229 } 2230 2231 return 0; 2232 } 2233 2234 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 2235 struct snd_soc_tplg_vendor_string_elem *str_elem, 2236 struct skl_dfw_manifest *minfo) 2237 { 2238 int tkn_count = 0; 2239 static int ref_count; 2240 2241 switch (str_elem->token) { 2242 case SKL_TKN_STR_LIB_NAME: 2243 if (ref_count > minfo->lib_count - 1) { 2244 ref_count = 0; 2245 return -EINVAL; 2246 } 2247 2248 strncpy(minfo->lib[ref_count].name, str_elem->string, 2249 ARRAY_SIZE(minfo->lib[ref_count].name)); 2250 ref_count++; 2251 tkn_count++; 2252 break; 2253 2254 default: 2255 dev_err(dev, "Not a string token %d\n", str_elem->token); 2256 break; 2257 } 2258 2259 return tkn_count; 2260 } 2261 2262 static int skl_tplg_get_str_tkn(struct device *dev, 2263 struct snd_soc_tplg_vendor_array *array, 2264 struct skl_dfw_manifest *minfo) 2265 { 2266 int tkn_count = 0, ret; 2267 struct snd_soc_tplg_vendor_string_elem *str_elem; 2268 2269 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 2270 while (tkn_count < array->num_elems) { 2271 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo); 2272 str_elem++; 2273 2274 if (ret < 0) 2275 return ret; 2276 2277 tkn_count = tkn_count + ret; 2278 } 2279 2280 return tkn_count; 2281 } 2282 2283 static int skl_tplg_get_int_tkn(struct device *dev, 2284 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2285 struct skl_dfw_manifest *minfo) 2286 { 2287 int tkn_count = 0; 2288 2289 switch (tkn_elem->token) { 2290 case SKL_TKN_U32_LIB_COUNT: 2291 minfo->lib_count = tkn_elem->value; 2292 tkn_count++; 2293 break; 2294 2295 default: 2296 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 2297 return -EINVAL; 2298 } 2299 2300 return tkn_count; 2301 } 2302 2303 /* 2304 * Fill the manifest structure by parsing the tokens based on the 2305 * type. 2306 */ 2307 static int skl_tplg_get_manifest_tkn(struct device *dev, 2308 char *pvt_data, struct skl_dfw_manifest *minfo, 2309 int block_size) 2310 { 2311 int tkn_count = 0, ret; 2312 int off = 0, tuple_size = 0; 2313 struct snd_soc_tplg_vendor_array *array; 2314 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2315 2316 if (block_size <= 0) 2317 return -EINVAL; 2318 2319 while (tuple_size < block_size) { 2320 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2321 off += array->size; 2322 switch (array->type) { 2323 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2324 ret = skl_tplg_get_str_tkn(dev, array, minfo); 2325 2326 if (ret < 0) 2327 return ret; 2328 tkn_count += ret; 2329 2330 tuple_size += tkn_count * 2331 sizeof(struct snd_soc_tplg_vendor_string_elem); 2332 continue; 2333 2334 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2335 dev_warn(dev, "no uuid tokens for skl tplf manifest\n"); 2336 continue; 2337 2338 default: 2339 tkn_elem = array->value; 2340 tkn_count = 0; 2341 break; 2342 } 2343 2344 while (tkn_count <= array->num_elems - 1) { 2345 ret = skl_tplg_get_int_tkn(dev, 2346 tkn_elem, minfo); 2347 if (ret < 0) 2348 return ret; 2349 2350 tkn_count = tkn_count + ret; 2351 tkn_elem++; 2352 tuple_size += tkn_count * 2353 sizeof(struct snd_soc_tplg_vendor_value_elem); 2354 break; 2355 } 2356 tkn_count = 0; 2357 } 2358 2359 return 0; 2360 } 2361 2362 /* 2363 * Parse manifest private data for tokens. The private data block is 2364 * preceded by descriptors for type and size of data block. 2365 */ 2366 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 2367 struct device *dev, struct skl_dfw_manifest *minfo) 2368 { 2369 struct snd_soc_tplg_vendor_array *array; 2370 int num_blocks, block_size = 0, block_type, off = 0; 2371 char *data; 2372 int ret; 2373 2374 /* Read the NUM_DATA_BLOCKS descriptor */ 2375 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 2376 ret = skl_tplg_get_desc_blocks(dev, array); 2377 if (ret < 0) 2378 return ret; 2379 num_blocks = ret; 2380 2381 off += array->size; 2382 array = (struct snd_soc_tplg_vendor_array *) 2383 (manifest->priv.data + off); 2384 2385 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2386 while (num_blocks > 0) { 2387 ret = skl_tplg_get_desc_blocks(dev, array); 2388 2389 if (ret < 0) 2390 return ret; 2391 block_type = ret; 2392 off += array->size; 2393 2394 array = (struct snd_soc_tplg_vendor_array *) 2395 (manifest->priv.data + off); 2396 2397 ret = skl_tplg_get_desc_blocks(dev, array); 2398 2399 if (ret < 0) 2400 return ret; 2401 block_size = ret; 2402 off += array->size; 2403 2404 array = (struct snd_soc_tplg_vendor_array *) 2405 (manifest->priv.data + off); 2406 2407 data = (manifest->priv.data + off); 2408 2409 if (block_type == SKL_TYPE_TUPLE) { 2410 ret = skl_tplg_get_manifest_tkn(dev, data, minfo, 2411 block_size); 2412 2413 if (ret < 0) 2414 return ret; 2415 2416 --num_blocks; 2417 } else { 2418 return -EINVAL; 2419 } 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int skl_manifest_load(struct snd_soc_component *cmpnt, 2426 struct snd_soc_tplg_manifest *manifest) 2427 { 2428 struct skl_dfw_manifest *minfo; 2429 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt); 2430 struct hdac_bus *bus = ebus_to_hbus(ebus); 2431 struct skl *skl = ebus_to_skl(ebus); 2432 int ret = 0; 2433 2434 /* proceed only if we have private data defined */ 2435 if (manifest->priv.size == 0) 2436 return 0; 2437 2438 minfo = &skl->skl_sst->manifest; 2439 2440 skl_tplg_get_manifest_data(manifest, bus->dev, minfo); 2441 2442 if (minfo->lib_count > HDA_MAX_LIB) { 2443 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 2444 minfo->lib_count); 2445 ret = -EINVAL; 2446 } 2447 2448 return ret; 2449 } 2450 2451 static struct snd_soc_tplg_ops skl_tplg_ops = { 2452 .widget_load = skl_tplg_widget_load, 2453 .control_load = skl_tplg_control_load, 2454 .bytes_ext_ops = skl_tlv_ops, 2455 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 2456 .manifest = skl_manifest_load, 2457 }; 2458 2459 /* 2460 * A pipe can have multiple modules, each of them will be a DAPM widget as 2461 * well. While managing a pipeline we need to get the list of all the 2462 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 2463 * helps to get the SKL type widgets in that pipeline 2464 */ 2465 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform) 2466 { 2467 struct snd_soc_dapm_widget *w; 2468 struct skl_module_cfg *mcfg = NULL; 2469 struct skl_pipe_module *p_module = NULL; 2470 struct skl_pipe *pipe; 2471 2472 list_for_each_entry(w, &platform->component.card->widgets, list) { 2473 if (is_skl_dsp_widget_type(w) && w->priv != NULL) { 2474 mcfg = w->priv; 2475 pipe = mcfg->pipe; 2476 2477 p_module = devm_kzalloc(platform->dev, 2478 sizeof(*p_module), GFP_KERNEL); 2479 if (!p_module) 2480 return -ENOMEM; 2481 2482 p_module->w = w; 2483 list_add_tail(&p_module->node, &pipe->w_list); 2484 } 2485 } 2486 2487 return 0; 2488 } 2489 2490 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 2491 { 2492 struct skl_pipe_module *w_module; 2493 struct snd_soc_dapm_widget *w; 2494 struct skl_module_cfg *mconfig; 2495 bool host_found = false, link_found = false; 2496 2497 list_for_each_entry(w_module, &pipe->w_list, node) { 2498 w = w_module->w; 2499 mconfig = w->priv; 2500 2501 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 2502 host_found = true; 2503 else if (mconfig->dev_type != SKL_DEVICE_NONE) 2504 link_found = true; 2505 } 2506 2507 if (host_found && link_found) 2508 pipe->passthru = true; 2509 else 2510 pipe->passthru = false; 2511 } 2512 2513 /* This will be read from topology manifest, currently defined here */ 2514 #define SKL_MAX_MCPS 30000000 2515 #define SKL_FW_MAX_MEM 1000000 2516 2517 /* 2518 * SKL topology init routine 2519 */ 2520 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus) 2521 { 2522 int ret; 2523 const struct firmware *fw; 2524 struct hdac_bus *bus = ebus_to_hbus(ebus); 2525 struct skl *skl = ebus_to_skl(ebus); 2526 struct skl_pipeline *ppl; 2527 2528 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 2529 if (ret < 0) { 2530 dev_err(bus->dev, "tplg fw %s load failed with %d\n", 2531 skl->tplg_name, ret); 2532 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 2533 if (ret < 0) { 2534 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 2535 "dfw_sst.bin", ret); 2536 return ret; 2537 } 2538 } 2539 2540 /* 2541 * The complete tplg for SKL is loaded as index 0, we don't use 2542 * any other index 2543 */ 2544 ret = snd_soc_tplg_component_load(&platform->component, 2545 &skl_tplg_ops, fw, 0); 2546 if (ret < 0) { 2547 dev_err(bus->dev, "tplg component load failed%d\n", ret); 2548 release_firmware(fw); 2549 return -EINVAL; 2550 } 2551 2552 skl->resource.max_mcps = SKL_MAX_MCPS; 2553 skl->resource.max_mem = SKL_FW_MAX_MEM; 2554 2555 skl->tplg = fw; 2556 ret = skl_tplg_create_pipe_widget_list(platform); 2557 if (ret < 0) 2558 return ret; 2559 2560 list_for_each_entry(ppl, &skl->ppl_list, node) 2561 skl_tplg_set_pipe_type(skl, ppl->pipe); 2562 2563 return 0; 2564 } 2565