1 /* 2 * skl-topology.c - Implements Platform component ALSA controls/widget 3 * handlers. 4 * 5 * Copyright (C) 2014-2015 Intel Corp 6 * Author: Jeeja KP <jeeja.kp@intel.com> 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/firmware.h> 22 #include <linux/uuid.h> 23 #include <sound/soc.h> 24 #include <sound/soc-topology.h> 25 #include <uapi/sound/snd_sst_tokens.h> 26 #include <uapi/sound/skl-tplg-interface.h> 27 #include "skl-sst-dsp.h" 28 #include "skl-sst-ipc.h" 29 #include "skl-topology.h" 30 #include "skl.h" 31 #include "../common/sst-dsp.h" 32 #include "../common/sst-dsp-priv.h" 33 34 #define SKL_CH_FIXUP_MASK (1 << 0) 35 #define SKL_RATE_FIXUP_MASK (1 << 1) 36 #define SKL_FMT_FIXUP_MASK (1 << 2) 37 #define SKL_IN_DIR_BIT_MASK BIT(0) 38 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 39 40 static const int mic_mono_list[] = { 41 0, 1, 2, 3, 42 }; 43 static const int mic_stereo_list[][SKL_CH_STEREO] = { 44 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 45 }; 46 static const int mic_trio_list[][SKL_CH_TRIO] = { 47 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 48 }; 49 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 50 {0, 1, 2, 3}, 51 }; 52 53 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 54 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 55 56 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps) 57 { 58 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 59 60 switch (caps) { 61 case SKL_D0I3_NONE: 62 d0i3->non_d0i3++; 63 break; 64 65 case SKL_D0I3_STREAMING: 66 d0i3->streaming++; 67 break; 68 69 case SKL_D0I3_NON_STREAMING: 70 d0i3->non_streaming++; 71 break; 72 } 73 } 74 75 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps) 76 { 77 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 78 79 switch (caps) { 80 case SKL_D0I3_NONE: 81 d0i3->non_d0i3--; 82 break; 83 84 case SKL_D0I3_STREAMING: 85 d0i3->streaming--; 86 break; 87 88 case SKL_D0I3_NON_STREAMING: 89 d0i3->non_streaming--; 90 break; 91 } 92 } 93 94 /* 95 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 96 * ignore. This helpers checks if the SKL driver handles this widget type 97 */ 98 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 99 struct device *dev) 100 { 101 if (w->dapm->dev != dev) 102 return false; 103 104 switch (w->id) { 105 case snd_soc_dapm_dai_link: 106 case snd_soc_dapm_dai_in: 107 case snd_soc_dapm_aif_in: 108 case snd_soc_dapm_aif_out: 109 case snd_soc_dapm_dai_out: 110 case snd_soc_dapm_switch: 111 case snd_soc_dapm_output: 112 case snd_soc_dapm_mux: 113 114 return false; 115 default: 116 return true; 117 } 118 } 119 120 /* 121 * Each pipelines needs memory to be allocated. Check if we have free memory 122 * from available pool. 123 */ 124 static bool skl_is_pipe_mem_avail(struct skl *skl, 125 struct skl_module_cfg *mconfig) 126 { 127 struct skl_sst *ctx = skl->skl_sst; 128 129 if (skl->resource.mem + mconfig->pipe->memory_pages > 130 skl->resource.max_mem) { 131 dev_err(ctx->dev, 132 "%s: module_id %d instance %d\n", __func__, 133 mconfig->id.module_id, 134 mconfig->id.instance_id); 135 dev_err(ctx->dev, 136 "exceeds ppl memory available %d mem %d\n", 137 skl->resource.max_mem, skl->resource.mem); 138 return false; 139 } else { 140 return true; 141 } 142 } 143 144 /* 145 * Add the mem to the mem pool. This is freed when pipe is deleted. 146 * Note: DSP does actual memory management we only keep track for complete 147 * pool 148 */ 149 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 150 struct skl_module_cfg *mconfig) 151 { 152 skl->resource.mem += mconfig->pipe->memory_pages; 153 } 154 155 /* 156 * Pipeline needs needs DSP CPU resources for computation, this is 157 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 158 * 159 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 160 * pipe. 161 */ 162 163 static bool skl_is_pipe_mcps_avail(struct skl *skl, 164 struct skl_module_cfg *mconfig) 165 { 166 struct skl_sst *ctx = skl->skl_sst; 167 u8 res_idx = mconfig->res_idx; 168 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 169 170 if (skl->resource.mcps + res->cps > skl->resource.max_mcps) { 171 dev_err(ctx->dev, 172 "%s: module_id %d instance %d\n", __func__, 173 mconfig->id.module_id, mconfig->id.instance_id); 174 dev_err(ctx->dev, 175 "exceeds ppl mcps available %d > mem %d\n", 176 skl->resource.max_mcps, skl->resource.mcps); 177 return false; 178 } else { 179 return true; 180 } 181 } 182 183 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 184 struct skl_module_cfg *mconfig) 185 { 186 u8 res_idx = mconfig->res_idx; 187 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 188 189 skl->resource.mcps += res->cps; 190 } 191 192 /* 193 * Free the mcps when tearing down 194 */ 195 static void 196 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 197 { 198 u8 res_idx = mconfig->res_idx; 199 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 200 201 skl->resource.mcps -= res->cps; 202 } 203 204 /* 205 * Free the memory when tearing down 206 */ 207 static void 208 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 209 { 210 skl->resource.mem -= mconfig->pipe->memory_pages; 211 } 212 213 214 static void skl_dump_mconfig(struct skl_sst *ctx, 215 struct skl_module_cfg *mcfg) 216 { 217 struct skl_module_iface *iface = &mcfg->module->formats[0]; 218 219 dev_dbg(ctx->dev, "Dumping config\n"); 220 dev_dbg(ctx->dev, "Input Format:\n"); 221 dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 222 dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 223 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 224 dev_dbg(ctx->dev, "valid bit depth = %d\n", 225 iface->inputs[0].fmt.valid_bit_depth); 226 dev_dbg(ctx->dev, "Output Format:\n"); 227 dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 228 dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 229 dev_dbg(ctx->dev, "valid bit depth = %d\n", 230 iface->outputs[0].fmt.valid_bit_depth); 231 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 232 } 233 234 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 235 { 236 int slot_map = 0xFFFFFFFF; 237 int start_slot = 0; 238 int i; 239 240 for (i = 0; i < chs; i++) { 241 /* 242 * For 2 channels with starting slot as 0, slot map will 243 * look like 0xFFFFFF10. 244 */ 245 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 246 start_slot++; 247 } 248 fmt->ch_map = slot_map; 249 } 250 251 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 252 struct skl_pipe_params *params, int fixup) 253 { 254 if (fixup & SKL_RATE_FIXUP_MASK) 255 fmt->s_freq = params->s_freq; 256 if (fixup & SKL_CH_FIXUP_MASK) { 257 fmt->channels = params->ch; 258 skl_tplg_update_chmap(fmt, fmt->channels); 259 } 260 if (fixup & SKL_FMT_FIXUP_MASK) { 261 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 262 263 /* 264 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 265 * container so update bit depth accordingly 266 */ 267 switch (fmt->valid_bit_depth) { 268 case SKL_DEPTH_16BIT: 269 fmt->bit_depth = fmt->valid_bit_depth; 270 break; 271 272 default: 273 fmt->bit_depth = SKL_DEPTH_32BIT; 274 break; 275 } 276 } 277 278 } 279 280 /* 281 * A pipeline may have modules which impact the pcm parameters, like SRC, 282 * channel converter, format converter. 283 * We need to calculate the output params by applying the 'fixup' 284 * Topology will tell driver which type of fixup is to be applied by 285 * supplying the fixup mask, so based on that we calculate the output 286 * 287 * Now In FE the pcm hw_params is source/target format. Same is applicable 288 * for BE with its hw_params invoked. 289 * here based on FE, BE pipeline and direction we calculate the input and 290 * outfix and then apply that for a module 291 */ 292 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 293 struct skl_pipe_params *params, bool is_fe) 294 { 295 int in_fixup, out_fixup; 296 struct skl_module_fmt *in_fmt, *out_fmt; 297 298 /* Fixups will be applied to pin 0 only */ 299 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 300 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 301 302 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 303 if (is_fe) { 304 in_fixup = m_cfg->params_fixup; 305 out_fixup = (~m_cfg->converter) & 306 m_cfg->params_fixup; 307 } else { 308 out_fixup = m_cfg->params_fixup; 309 in_fixup = (~m_cfg->converter) & 310 m_cfg->params_fixup; 311 } 312 } else { 313 if (is_fe) { 314 out_fixup = m_cfg->params_fixup; 315 in_fixup = (~m_cfg->converter) & 316 m_cfg->params_fixup; 317 } else { 318 in_fixup = m_cfg->params_fixup; 319 out_fixup = (~m_cfg->converter) & 320 m_cfg->params_fixup; 321 } 322 } 323 324 skl_tplg_update_params(in_fmt, params, in_fixup); 325 skl_tplg_update_params(out_fmt, params, out_fixup); 326 } 327 328 /* 329 * A module needs input and output buffers, which are dependent upon pcm 330 * params, so once we have calculate params, we need buffer calculation as 331 * well. 332 */ 333 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 334 struct skl_module_cfg *mcfg) 335 { 336 int multiplier = 1; 337 struct skl_module_fmt *in_fmt, *out_fmt; 338 struct skl_module_res *res; 339 340 /* Since fixups is applied to pin 0 only, ibs, obs needs 341 * change for pin 0 only 342 */ 343 res = &mcfg->module->resources[0]; 344 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 345 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 346 347 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 348 multiplier = 5; 349 350 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 351 in_fmt->channels * (in_fmt->bit_depth >> 3) * 352 multiplier; 353 354 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 355 out_fmt->channels * (out_fmt->bit_depth >> 3) * 356 multiplier; 357 } 358 359 static u8 skl_tplg_be_dev_type(int dev_type) 360 { 361 int ret; 362 363 switch (dev_type) { 364 case SKL_DEVICE_BT: 365 ret = NHLT_DEVICE_BT; 366 break; 367 368 case SKL_DEVICE_DMIC: 369 ret = NHLT_DEVICE_DMIC; 370 break; 371 372 case SKL_DEVICE_I2S: 373 ret = NHLT_DEVICE_I2S; 374 break; 375 376 default: 377 ret = NHLT_DEVICE_INVALID; 378 break; 379 } 380 381 return ret; 382 } 383 384 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 385 struct skl_sst *ctx) 386 { 387 struct skl_module_cfg *m_cfg = w->priv; 388 int link_type, dir; 389 u32 ch, s_freq, s_fmt; 390 struct nhlt_specific_cfg *cfg; 391 struct skl *skl = get_skl_ctx(ctx->dev); 392 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 393 int fmt_idx = m_cfg->fmt_idx; 394 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 395 396 /* check if we already have blob */ 397 if (m_cfg->formats_config.caps_size > 0) 398 return 0; 399 400 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 401 switch (m_cfg->dev_type) { 402 case SKL_DEVICE_DMIC: 403 link_type = NHLT_LINK_DMIC; 404 dir = SNDRV_PCM_STREAM_CAPTURE; 405 s_freq = m_iface->inputs[0].fmt.s_freq; 406 s_fmt = m_iface->inputs[0].fmt.bit_depth; 407 ch = m_iface->inputs[0].fmt.channels; 408 break; 409 410 case SKL_DEVICE_I2S: 411 link_type = NHLT_LINK_SSP; 412 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 413 dir = SNDRV_PCM_STREAM_PLAYBACK; 414 s_freq = m_iface->outputs[0].fmt.s_freq; 415 s_fmt = m_iface->outputs[0].fmt.bit_depth; 416 ch = m_iface->outputs[0].fmt.channels; 417 } else { 418 dir = SNDRV_PCM_STREAM_CAPTURE; 419 s_freq = m_iface->inputs[0].fmt.s_freq; 420 s_fmt = m_iface->inputs[0].fmt.bit_depth; 421 ch = m_iface->inputs[0].fmt.channels; 422 } 423 break; 424 425 default: 426 return -EINVAL; 427 } 428 429 /* update the blob based on virtual bus_id and default params */ 430 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 431 s_fmt, ch, s_freq, dir, dev_type); 432 if (cfg) { 433 m_cfg->formats_config.caps_size = cfg->size; 434 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 435 } else { 436 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 437 m_cfg->vbus_id, link_type, dir); 438 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 439 ch, s_freq, s_fmt); 440 return -EIO; 441 } 442 443 return 0; 444 } 445 446 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 447 struct skl_sst *ctx) 448 { 449 struct skl_module_cfg *m_cfg = w->priv; 450 struct skl_pipe_params *params = m_cfg->pipe->p_params; 451 int p_conn_type = m_cfg->pipe->conn_type; 452 bool is_fe; 453 454 if (!m_cfg->params_fixup) 455 return; 456 457 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 458 w->name); 459 460 skl_dump_mconfig(ctx, m_cfg); 461 462 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 463 is_fe = true; 464 else 465 is_fe = false; 466 467 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 468 skl_tplg_update_buffer_size(ctx, m_cfg); 469 470 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 471 w->name); 472 473 skl_dump_mconfig(ctx, m_cfg); 474 } 475 476 /* 477 * some modules can have multiple params set from user control and 478 * need to be set after module is initialized. If set_param flag is 479 * set module params will be done after module is initialised. 480 */ 481 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 482 struct skl_sst *ctx) 483 { 484 int i, ret; 485 struct skl_module_cfg *mconfig = w->priv; 486 const struct snd_kcontrol_new *k; 487 struct soc_bytes_ext *sb; 488 struct skl_algo_data *bc; 489 struct skl_specific_cfg *sp_cfg; 490 491 if (mconfig->formats_config.caps_size > 0 && 492 mconfig->formats_config.set_params == SKL_PARAM_SET) { 493 sp_cfg = &mconfig->formats_config; 494 ret = skl_set_module_params(ctx, sp_cfg->caps, 495 sp_cfg->caps_size, 496 sp_cfg->param_id, mconfig); 497 if (ret < 0) 498 return ret; 499 } 500 501 for (i = 0; i < w->num_kcontrols; i++) { 502 k = &w->kcontrol_news[i]; 503 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 504 sb = (void *) k->private_value; 505 bc = (struct skl_algo_data *)sb->dobj.private; 506 507 if (bc->set_params == SKL_PARAM_SET) { 508 ret = skl_set_module_params(ctx, 509 (u32 *)bc->params, bc->size, 510 bc->param_id, mconfig); 511 if (ret < 0) 512 return ret; 513 } 514 } 515 } 516 517 return 0; 518 } 519 520 /* 521 * some module param can set from user control and this is required as 522 * when module is initailzed. if module param is required in init it is 523 * identifed by set_param flag. if set_param flag is not set, then this 524 * parameter needs to set as part of module init. 525 */ 526 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 527 { 528 const struct snd_kcontrol_new *k; 529 struct soc_bytes_ext *sb; 530 struct skl_algo_data *bc; 531 struct skl_module_cfg *mconfig = w->priv; 532 int i; 533 534 for (i = 0; i < w->num_kcontrols; i++) { 535 k = &w->kcontrol_news[i]; 536 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 537 sb = (struct soc_bytes_ext *)k->private_value; 538 bc = (struct skl_algo_data *)sb->dobj.private; 539 540 if (bc->set_params != SKL_PARAM_INIT) 541 continue; 542 543 mconfig->formats_config.caps = (u32 *)bc->params; 544 mconfig->formats_config.caps_size = bc->size; 545 546 break; 547 } 548 } 549 550 return 0; 551 } 552 553 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe, 554 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 555 { 556 switch (mcfg->dev_type) { 557 case SKL_DEVICE_HDAHOST: 558 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params); 559 560 case SKL_DEVICE_HDALINK: 561 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params); 562 } 563 564 return 0; 565 } 566 567 /* 568 * Inside a pipe instance, we can have various modules. These modules need 569 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 570 * skl_init_module() routine, so invoke that for all modules in a pipeline 571 */ 572 static int 573 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 574 { 575 struct skl_pipe_module *w_module; 576 struct snd_soc_dapm_widget *w; 577 struct skl_module_cfg *mconfig; 578 struct skl_sst *ctx = skl->skl_sst; 579 u8 cfg_idx; 580 int ret = 0; 581 582 list_for_each_entry(w_module, &pipe->w_list, node) { 583 uuid_le *uuid_mod; 584 w = w_module->w; 585 mconfig = w->priv; 586 587 /* check if module ids are populated */ 588 if (mconfig->id.module_id < 0) { 589 dev_err(skl->skl_sst->dev, 590 "module %pUL id not populated\n", 591 (uuid_le *)mconfig->guid); 592 return -EIO; 593 } 594 595 cfg_idx = mconfig->pipe->cur_config_idx; 596 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 597 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 598 599 /* check resource available */ 600 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 601 return -ENOMEM; 602 603 if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) { 604 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 605 mconfig->id.module_id, mconfig->guid); 606 if (ret < 0) 607 return ret; 608 609 mconfig->m_state = SKL_MODULE_LOADED; 610 } 611 612 /* prepare the DMA if the module is gateway cpr */ 613 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig); 614 if (ret < 0) 615 return ret; 616 617 /* update blob if blob is null for be with default value */ 618 skl_tplg_update_be_blob(w, ctx); 619 620 /* 621 * apply fix/conversion to module params based on 622 * FE/BE params 623 */ 624 skl_tplg_update_module_params(w, ctx); 625 uuid_mod = (uuid_le *)mconfig->guid; 626 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod, 627 mconfig->id.instance_id); 628 if (mconfig->id.pvt_id < 0) 629 return ret; 630 skl_tplg_set_module_init_data(w); 631 632 ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id); 633 if (ret < 0) { 634 dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n", 635 mconfig->core_id, ret); 636 return ret; 637 } 638 639 ret = skl_init_module(ctx, mconfig); 640 if (ret < 0) { 641 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 642 goto err; 643 } 644 skl_tplg_alloc_pipe_mcps(skl, mconfig); 645 ret = skl_tplg_set_module_params(w, ctx); 646 if (ret < 0) 647 goto err; 648 } 649 650 return 0; 651 err: 652 skl_dsp_put_core(ctx->dsp, mconfig->core_id); 653 return ret; 654 } 655 656 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 657 struct skl_pipe *pipe) 658 { 659 int ret = 0; 660 struct skl_pipe_module *w_module = NULL; 661 struct skl_module_cfg *mconfig = NULL; 662 663 list_for_each_entry(w_module, &pipe->w_list, node) { 664 uuid_le *uuid_mod; 665 mconfig = w_module->w->priv; 666 uuid_mod = (uuid_le *)mconfig->guid; 667 668 if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod && 669 mconfig->m_state > SKL_MODULE_UNINIT) { 670 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 671 mconfig->id.module_id); 672 if (ret < 0) 673 return -EIO; 674 } 675 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 676 677 ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id); 678 if (ret < 0) { 679 /* don't return; continue with other modules */ 680 dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n", 681 mconfig->core_id, ret); 682 } 683 } 684 685 /* no modules to unload in this path, so return */ 686 return ret; 687 } 688 689 /* 690 * Here, we select pipe format based on the pipe type and pipe 691 * direction to determine the current config index for the pipeline. 692 * The config index is then used to select proper module resources. 693 * Intermediate pipes currently have a fixed format hence we select the 694 * 0th configuratation by default for such pipes. 695 */ 696 static int 697 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig) 698 { 699 struct skl_sst *ctx = skl->skl_sst; 700 struct skl_pipe *pipe = mconfig->pipe; 701 struct skl_pipe_params *params = pipe->p_params; 702 struct skl_path_config *pconfig = &pipe->configs[0]; 703 struct skl_pipe_fmt *fmt = NULL; 704 bool in_fmt = false; 705 int i; 706 707 if (pipe->nr_cfgs == 0) { 708 pipe->cur_config_idx = 0; 709 return 0; 710 } 711 712 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 713 dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n"); 714 pipe->cur_config_idx = 0; 715 pipe->memory_pages = pconfig->mem_pages; 716 717 return 0; 718 } 719 720 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 721 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 722 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 723 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 724 in_fmt = true; 725 726 for (i = 0; i < pipe->nr_cfgs; i++) { 727 pconfig = &pipe->configs[i]; 728 if (in_fmt) 729 fmt = &pconfig->in_fmt; 730 else 731 fmt = &pconfig->out_fmt; 732 733 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 734 fmt->channels, fmt->freq, fmt->bps)) { 735 pipe->cur_config_idx = i; 736 pipe->memory_pages = pconfig->mem_pages; 737 dev_dbg(ctx->dev, "Using pipe config: %d\n", i); 738 739 return 0; 740 } 741 } 742 743 dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 744 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 745 return -EINVAL; 746 } 747 748 /* 749 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 750 * need create the pipeline. So we do following: 751 * - check the resources 752 * - Create the pipeline 753 * - Initialize the modules in pipeline 754 * - finally bind all modules together 755 */ 756 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 757 struct skl *skl) 758 { 759 int ret; 760 struct skl_module_cfg *mconfig = w->priv; 761 struct skl_pipe_module *w_module; 762 struct skl_pipe *s_pipe = mconfig->pipe; 763 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 764 struct skl_sst *ctx = skl->skl_sst; 765 struct skl_module_deferred_bind *modules; 766 767 ret = skl_tplg_get_pipe_config(skl, mconfig); 768 if (ret < 0) 769 return ret; 770 771 /* check resource available */ 772 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 773 return -EBUSY; 774 775 if (!skl_is_pipe_mem_avail(skl, mconfig)) 776 return -ENOMEM; 777 778 /* 779 * Create a list of modules for pipe. 780 * This list contains modules from source to sink 781 */ 782 ret = skl_create_pipeline(ctx, mconfig->pipe); 783 if (ret < 0) 784 return ret; 785 786 skl_tplg_alloc_pipe_mem(skl, mconfig); 787 skl_tplg_alloc_pipe_mcps(skl, mconfig); 788 789 /* Init all pipe modules from source to sink */ 790 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 791 if (ret < 0) 792 return ret; 793 794 /* Bind modules from source to sink */ 795 list_for_each_entry(w_module, &s_pipe->w_list, node) { 796 dst_module = w_module->w->priv; 797 798 if (src_module == NULL) { 799 src_module = dst_module; 800 continue; 801 } 802 803 ret = skl_bind_modules(ctx, src_module, dst_module); 804 if (ret < 0) 805 return ret; 806 807 src_module = dst_module; 808 } 809 810 /* 811 * When the destination module is initialized, check for these modules 812 * in deferred bind list. If found, bind them. 813 */ 814 list_for_each_entry(w_module, &s_pipe->w_list, node) { 815 if (list_empty(&skl->bind_list)) 816 break; 817 818 list_for_each_entry(modules, &skl->bind_list, node) { 819 module = w_module->w->priv; 820 if (modules->dst == module) 821 skl_bind_modules(ctx, modules->src, 822 modules->dst); 823 } 824 } 825 826 return 0; 827 } 828 829 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params, 830 int size, struct skl_module_cfg *mcfg) 831 { 832 int i, pvt_id; 833 834 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 835 struct skl_kpb_params *kpb_params = 836 (struct skl_kpb_params *)params; 837 struct skl_mod_inst_map *inst = kpb_params->u.map; 838 839 for (i = 0; i < kpb_params->num_modules; i++) { 840 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id, 841 inst->inst_id); 842 if (pvt_id < 0) 843 return -EINVAL; 844 845 inst->inst_id = pvt_id; 846 inst++; 847 } 848 } 849 850 return 0; 851 } 852 /* 853 * Some modules require params to be set after the module is bound to 854 * all pins connected. 855 * 856 * The module provider initializes set_param flag for such modules and we 857 * send params after binding 858 */ 859 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 860 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 861 { 862 int i, ret; 863 struct skl_module_cfg *mconfig = w->priv; 864 const struct snd_kcontrol_new *k; 865 struct soc_bytes_ext *sb; 866 struct skl_algo_data *bc; 867 struct skl_specific_cfg *sp_cfg; 868 u32 *params; 869 870 /* 871 * check all out/in pins are in bind state. 872 * if so set the module param 873 */ 874 for (i = 0; i < mcfg->module->max_output_pins; i++) { 875 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 876 return 0; 877 } 878 879 for (i = 0; i < mcfg->module->max_input_pins; i++) { 880 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 881 return 0; 882 } 883 884 if (mconfig->formats_config.caps_size > 0 && 885 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 886 sp_cfg = &mconfig->formats_config; 887 ret = skl_set_module_params(ctx, sp_cfg->caps, 888 sp_cfg->caps_size, 889 sp_cfg->param_id, mconfig); 890 if (ret < 0) 891 return ret; 892 } 893 894 for (i = 0; i < w->num_kcontrols; i++) { 895 k = &w->kcontrol_news[i]; 896 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 897 sb = (void *) k->private_value; 898 bc = (struct skl_algo_data *)sb->dobj.private; 899 900 if (bc->set_params == SKL_PARAM_BIND) { 901 params = kzalloc(bc->max, GFP_KERNEL); 902 if (!params) 903 return -ENOMEM; 904 905 memcpy(params, bc->params, bc->max); 906 skl_fill_sink_instance_id(ctx, params, bc->max, 907 mconfig); 908 909 ret = skl_set_module_params(ctx, params, 910 bc->max, bc->param_id, mconfig); 911 kfree(params); 912 913 if (ret < 0) 914 return ret; 915 } 916 } 917 } 918 919 return 0; 920 } 921 922 static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid) 923 { 924 struct uuid_module *module; 925 926 list_for_each_entry(module, &ctx->uuid_list, list) { 927 if (uuid_le_cmp(*uuid, module->uuid) == 0) 928 return module->id; 929 } 930 931 return -EINVAL; 932 } 933 934 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl, 935 const struct snd_kcontrol_new *k) 936 { 937 struct soc_bytes_ext *sb = (void *) k->private_value; 938 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 939 struct skl_kpb_params *uuid_params, *params; 940 struct hdac_bus *bus = skl_to_bus(skl); 941 int i, size, module_id; 942 943 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 944 uuid_params = (struct skl_kpb_params *)bc->params; 945 size = uuid_params->num_modules * 946 sizeof(struct skl_mod_inst_map) + 947 sizeof(uuid_params->num_modules); 948 949 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 950 if (!params) 951 return -ENOMEM; 952 953 params->num_modules = uuid_params->num_modules; 954 955 for (i = 0; i < uuid_params->num_modules; i++) { 956 module_id = skl_get_module_id(skl->skl_sst, 957 &uuid_params->u.map_uuid[i].mod_uuid); 958 if (module_id < 0) { 959 devm_kfree(bus->dev, params); 960 return -EINVAL; 961 } 962 963 params->u.map[i].mod_id = module_id; 964 params->u.map[i].inst_id = 965 uuid_params->u.map_uuid[i].inst_id; 966 } 967 968 devm_kfree(bus->dev, bc->params); 969 bc->params = (char *)params; 970 bc->max = size; 971 } 972 973 return 0; 974 } 975 976 /* 977 * Retrieve the module id from UUID mentioned in the 978 * post bind params 979 */ 980 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl, 981 struct snd_soc_dapm_widget *w) 982 { 983 struct skl_module_cfg *mconfig = w->priv; 984 int i; 985 986 /* 987 * Post bind params are used for only for KPB 988 * to set copier instances to drain the data 989 * in fast mode 990 */ 991 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 992 return; 993 994 for (i = 0; i < w->num_kcontrols; i++) 995 if ((w->kcontrol_news[i].access & 996 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 997 (skl_tplg_find_moduleid_from_uuid(skl, 998 &w->kcontrol_news[i]) < 0)) 999 dev_err(skl->skl_sst->dev, 1000 "%s: invalid kpb post bind params\n", 1001 __func__); 1002 } 1003 1004 static int skl_tplg_module_add_deferred_bind(struct skl *skl, 1005 struct skl_module_cfg *src, struct skl_module_cfg *dst) 1006 { 1007 struct skl_module_deferred_bind *m_list, *modules; 1008 int i; 1009 1010 /* only supported for module with static pin connection */ 1011 for (i = 0; i < dst->module->max_input_pins; i++) { 1012 struct skl_module_pin *pin = &dst->m_in_pin[i]; 1013 1014 if (pin->is_dynamic) 1015 continue; 1016 1017 if ((pin->id.module_id == src->id.module_id) && 1018 (pin->id.instance_id == src->id.instance_id)) { 1019 1020 if (!list_empty(&skl->bind_list)) { 1021 list_for_each_entry(modules, &skl->bind_list, node) { 1022 if (modules->src == src && modules->dst == dst) 1023 return 0; 1024 } 1025 } 1026 1027 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 1028 if (!m_list) 1029 return -ENOMEM; 1030 1031 m_list->src = src; 1032 m_list->dst = dst; 1033 1034 list_add(&m_list->node, &skl->bind_list); 1035 } 1036 } 1037 1038 return 0; 1039 } 1040 1041 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 1042 struct skl *skl, 1043 struct snd_soc_dapm_widget *src_w, 1044 struct skl_module_cfg *src_mconfig) 1045 { 1046 struct snd_soc_dapm_path *p; 1047 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 1048 struct skl_module_cfg *sink_mconfig; 1049 struct skl_sst *ctx = skl->skl_sst; 1050 int ret; 1051 1052 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1053 if (!p->connect) 1054 continue; 1055 1056 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 1057 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 1058 1059 next_sink = p->sink; 1060 1061 if (!is_skl_dsp_widget_type(p->sink, ctx->dev)) 1062 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 1063 1064 /* 1065 * here we will check widgets in sink pipelines, so that 1066 * can be any widgets type and we are only interested if 1067 * they are ones used for SKL so check that first 1068 */ 1069 if ((p->sink->priv != NULL) && 1070 is_skl_dsp_widget_type(p->sink, ctx->dev)) { 1071 1072 sink = p->sink; 1073 sink_mconfig = sink->priv; 1074 1075 /* 1076 * Modules other than PGA leaf can be connected 1077 * directly or via switch to a module in another 1078 * pipeline. EX: reference path 1079 * when the path is enabled, the dst module that needs 1080 * to be bound may not be initialized. if the module is 1081 * not initialized, add these modules in the deferred 1082 * bind list and when the dst module is initialised, 1083 * bind this module to the dst_module in deferred list. 1084 */ 1085 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 1086 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 1087 1088 ret = skl_tplg_module_add_deferred_bind(skl, 1089 src_mconfig, sink_mconfig); 1090 1091 if (ret < 0) 1092 return ret; 1093 1094 } 1095 1096 1097 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 1098 sink_mconfig->m_state == SKL_MODULE_UNINIT) 1099 continue; 1100 1101 /* Bind source to sink, mixin is always source */ 1102 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1103 if (ret) 1104 return ret; 1105 1106 /* set module params after bind */ 1107 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 1108 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1109 1110 /* Start sinks pipe first */ 1111 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 1112 if (sink_mconfig->pipe->conn_type != 1113 SKL_PIPE_CONN_TYPE_FE) 1114 ret = skl_run_pipe(ctx, 1115 sink_mconfig->pipe); 1116 if (ret) 1117 return ret; 1118 } 1119 } 1120 } 1121 1122 if (!sink && next_sink) 1123 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1124 1125 return 0; 1126 } 1127 1128 /* 1129 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1130 * we need to do following: 1131 * - Bind to sink pipeline 1132 * Since the sink pipes can be running and we don't get mixer event on 1133 * connect for already running mixer, we need to find the sink pipes 1134 * here and bind to them. This way dynamic connect works. 1135 * - Start sink pipeline, if not running 1136 * - Then run current pipe 1137 */ 1138 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1139 struct skl *skl) 1140 { 1141 struct skl_module_cfg *src_mconfig; 1142 struct skl_sst *ctx = skl->skl_sst; 1143 int ret = 0; 1144 1145 src_mconfig = w->priv; 1146 1147 /* 1148 * find which sink it is connected to, bind with the sink, 1149 * if sink is not started, start sink pipe first, then start 1150 * this pipe 1151 */ 1152 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1153 if (ret) 1154 return ret; 1155 1156 /* Start source pipe last after starting all sinks */ 1157 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1158 return skl_run_pipe(ctx, src_mconfig->pipe); 1159 1160 return 0; 1161 } 1162 1163 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1164 struct snd_soc_dapm_widget *w, struct skl *skl) 1165 { 1166 struct snd_soc_dapm_path *p; 1167 struct snd_soc_dapm_widget *src_w = NULL; 1168 struct skl_sst *ctx = skl->skl_sst; 1169 1170 snd_soc_dapm_widget_for_each_source_path(w, p) { 1171 src_w = p->source; 1172 if (!p->connect) 1173 continue; 1174 1175 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 1176 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 1177 1178 /* 1179 * here we will check widgets in sink pipelines, so that can 1180 * be any widgets type and we are only interested if they are 1181 * ones used for SKL so check that first 1182 */ 1183 if ((p->source->priv != NULL) && 1184 is_skl_dsp_widget_type(p->source, ctx->dev)) { 1185 return p->source; 1186 } 1187 } 1188 1189 if (src_w != NULL) 1190 return skl_get_src_dsp_widget(src_w, skl); 1191 1192 return NULL; 1193 } 1194 1195 /* 1196 * in the Post-PMU event of mixer we need to do following: 1197 * - Check if this pipe is running 1198 * - if not, then 1199 * - bind this pipeline to its source pipeline 1200 * if source pipe is already running, this means it is a dynamic 1201 * connection and we need to bind only to that pipe 1202 * - start this pipeline 1203 */ 1204 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1205 struct skl *skl) 1206 { 1207 int ret = 0; 1208 struct snd_soc_dapm_widget *source, *sink; 1209 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1210 struct skl_sst *ctx = skl->skl_sst; 1211 int src_pipe_started = 0; 1212 1213 sink = w; 1214 sink_mconfig = sink->priv; 1215 1216 /* 1217 * If source pipe is already started, that means source is driving 1218 * one more sink before this sink got connected, Since source is 1219 * started, bind this sink to source and start this pipe. 1220 */ 1221 source = skl_get_src_dsp_widget(w, skl); 1222 if (source != NULL) { 1223 src_mconfig = source->priv; 1224 sink_mconfig = sink->priv; 1225 src_pipe_started = 1; 1226 1227 /* 1228 * check pipe state, then no need to bind or start the 1229 * pipe 1230 */ 1231 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1232 src_pipe_started = 0; 1233 } 1234 1235 if (src_pipe_started) { 1236 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1237 if (ret) 1238 return ret; 1239 1240 /* set module params after bind */ 1241 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 1242 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1243 1244 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1245 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 1246 } 1247 1248 return ret; 1249 } 1250 1251 /* 1252 * in the Pre-PMD event of mixer we need to do following: 1253 * - Stop the pipe 1254 * - find the source connections and remove that from dapm_path_list 1255 * - unbind with source pipelines if still connected 1256 */ 1257 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1258 struct skl *skl) 1259 { 1260 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1261 int ret = 0, i; 1262 struct skl_sst *ctx = skl->skl_sst; 1263 1264 sink_mconfig = w->priv; 1265 1266 /* Stop the pipe */ 1267 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 1268 if (ret) 1269 return ret; 1270 1271 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1272 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1273 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1274 if (!src_mconfig) 1275 continue; 1276 1277 ret = skl_unbind_modules(ctx, 1278 src_mconfig, sink_mconfig); 1279 } 1280 } 1281 1282 return ret; 1283 } 1284 1285 /* 1286 * in the Post-PMD event of mixer we need to do following: 1287 * - Free the mcps used 1288 * - Free the mem used 1289 * - Unbind the modules within the pipeline 1290 * - Delete the pipeline (modules are not required to be explicitly 1291 * deleted, pipeline delete is enough here 1292 */ 1293 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1294 struct skl *skl) 1295 { 1296 struct skl_module_cfg *mconfig = w->priv; 1297 struct skl_pipe_module *w_module; 1298 struct skl_module_cfg *src_module = NULL, *dst_module; 1299 struct skl_sst *ctx = skl->skl_sst; 1300 struct skl_pipe *s_pipe = mconfig->pipe; 1301 struct skl_module_deferred_bind *modules, *tmp; 1302 1303 if (s_pipe->state == SKL_PIPE_INVALID) 1304 return -EINVAL; 1305 1306 skl_tplg_free_pipe_mcps(skl, mconfig); 1307 skl_tplg_free_pipe_mem(skl, mconfig); 1308 1309 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1310 if (list_empty(&skl->bind_list)) 1311 break; 1312 1313 src_module = w_module->w->priv; 1314 1315 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1316 /* 1317 * When the destination module is deleted, Unbind the 1318 * modules from deferred bind list. 1319 */ 1320 if (modules->dst == src_module) { 1321 skl_unbind_modules(ctx, modules->src, 1322 modules->dst); 1323 } 1324 1325 /* 1326 * When the source module is deleted, remove this entry 1327 * from the deferred bind list. 1328 */ 1329 if (modules->src == src_module) { 1330 list_del(&modules->node); 1331 modules->src = NULL; 1332 modules->dst = NULL; 1333 kfree(modules); 1334 } 1335 } 1336 } 1337 1338 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1339 dst_module = w_module->w->priv; 1340 1341 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1342 skl_tplg_free_pipe_mcps(skl, dst_module); 1343 if (src_module == NULL) { 1344 src_module = dst_module; 1345 continue; 1346 } 1347 1348 skl_unbind_modules(ctx, src_module, dst_module); 1349 src_module = dst_module; 1350 } 1351 1352 skl_delete_pipe(ctx, mconfig->pipe); 1353 1354 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1355 src_module = w_module->w->priv; 1356 src_module->m_state = SKL_MODULE_UNINIT; 1357 } 1358 1359 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1360 } 1361 1362 /* 1363 * in the Post-PMD event of PGA we need to do following: 1364 * - Free the mcps used 1365 * - Stop the pipeline 1366 * - In source pipe is connected, unbind with source pipelines 1367 */ 1368 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1369 struct skl *skl) 1370 { 1371 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1372 int ret = 0, i; 1373 struct skl_sst *ctx = skl->skl_sst; 1374 1375 src_mconfig = w->priv; 1376 1377 /* Stop the pipe since this is a mixin module */ 1378 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 1379 if (ret) 1380 return ret; 1381 1382 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1383 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1384 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1385 if (!sink_mconfig) 1386 continue; 1387 /* 1388 * This is a connecter and if path is found that means 1389 * unbind between source and sink has not happened yet 1390 */ 1391 ret = skl_unbind_modules(ctx, src_mconfig, 1392 sink_mconfig); 1393 } 1394 } 1395 1396 return ret; 1397 } 1398 1399 /* 1400 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1401 * second one is required that is created as another pipe entity. 1402 * The mixer is responsible for pipe management and represent a pipeline 1403 * instance 1404 */ 1405 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1406 struct snd_kcontrol *k, int event) 1407 { 1408 struct snd_soc_dapm_context *dapm = w->dapm; 1409 struct skl *skl = get_skl_ctx(dapm->dev); 1410 1411 switch (event) { 1412 case SND_SOC_DAPM_PRE_PMU: 1413 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1414 1415 case SND_SOC_DAPM_POST_PMU: 1416 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1417 1418 case SND_SOC_DAPM_PRE_PMD: 1419 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1420 1421 case SND_SOC_DAPM_POST_PMD: 1422 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1423 } 1424 1425 return 0; 1426 } 1427 1428 /* 1429 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1430 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1431 * the sink when it is running (two FE to one BE or one FE to two BE) 1432 * scenarios 1433 */ 1434 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1435 struct snd_kcontrol *k, int event) 1436 1437 { 1438 struct snd_soc_dapm_context *dapm = w->dapm; 1439 struct skl *skl = get_skl_ctx(dapm->dev); 1440 1441 switch (event) { 1442 case SND_SOC_DAPM_PRE_PMU: 1443 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1444 1445 case SND_SOC_DAPM_POST_PMD: 1446 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1447 } 1448 1449 return 0; 1450 } 1451 1452 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1453 unsigned int __user *data, unsigned int size) 1454 { 1455 struct soc_bytes_ext *sb = 1456 (struct soc_bytes_ext *)kcontrol->private_value; 1457 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1458 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1459 struct skl_module_cfg *mconfig = w->priv; 1460 struct skl *skl = get_skl_ctx(w->dapm->dev); 1461 1462 if (w->power) 1463 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1464 bc->size, bc->param_id, mconfig); 1465 1466 /* decrement size for TLV header */ 1467 size -= 2 * sizeof(u32); 1468 1469 /* check size as we don't want to send kernel data */ 1470 if (size > bc->max) 1471 size = bc->max; 1472 1473 if (bc->params) { 1474 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1475 return -EFAULT; 1476 if (copy_to_user(data + 1, &size, sizeof(u32))) 1477 return -EFAULT; 1478 if (copy_to_user(data + 2, bc->params, size)) 1479 return -EFAULT; 1480 } 1481 1482 return 0; 1483 } 1484 1485 #define SKL_PARAM_VENDOR_ID 0xff 1486 1487 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1488 const unsigned int __user *data, unsigned int size) 1489 { 1490 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1491 struct skl_module_cfg *mconfig = w->priv; 1492 struct soc_bytes_ext *sb = 1493 (struct soc_bytes_ext *)kcontrol->private_value; 1494 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1495 struct skl *skl = get_skl_ctx(w->dapm->dev); 1496 1497 if (ac->params) { 1498 if (size > ac->max) 1499 return -EINVAL; 1500 1501 ac->size = size; 1502 /* 1503 * if the param_is is of type Vendor, firmware expects actual 1504 * parameter id and size from the control. 1505 */ 1506 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1507 if (copy_from_user(ac->params, data, size)) 1508 return -EFAULT; 1509 } else { 1510 if (copy_from_user(ac->params, 1511 data + 2, size)) 1512 return -EFAULT; 1513 } 1514 1515 if (w->power) 1516 return skl_set_module_params(skl->skl_sst, 1517 (u32 *)ac->params, ac->size, 1518 ac->param_id, mconfig); 1519 } 1520 1521 return 0; 1522 } 1523 1524 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1525 struct snd_ctl_elem_value *ucontrol) 1526 { 1527 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1528 struct skl_module_cfg *mconfig = w->priv; 1529 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1530 u32 ch_type = *((u32 *)ec->dobj.private); 1531 1532 if (mconfig->dmic_ch_type == ch_type) 1533 ucontrol->value.enumerated.item[0] = 1534 mconfig->dmic_ch_combo_index; 1535 else 1536 ucontrol->value.enumerated.item[0] = 0; 1537 1538 return 0; 1539 } 1540 1541 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1542 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1543 { 1544 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1545 1546 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1547 sp_cfg->set_params = SKL_PARAM_SET; 1548 sp_cfg->param_id = 0x00; 1549 if (!sp_cfg->caps) { 1550 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1551 if (!sp_cfg->caps) 1552 return -ENOMEM; 1553 } 1554 1555 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1556 mic_cfg->flags = 0; 1557 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1558 1559 return 0; 1560 } 1561 1562 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1563 struct snd_ctl_elem_value *ucontrol) 1564 { 1565 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1566 struct skl_module_cfg *mconfig = w->priv; 1567 struct skl_mic_sel_config mic_cfg = {0}; 1568 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1569 u32 ch_type = *((u32 *)ec->dobj.private); 1570 const int *list; 1571 u8 in_ch, out_ch, index; 1572 1573 mconfig->dmic_ch_type = ch_type; 1574 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1575 1576 /* enum control index 0 is INVALID, so no channels to be set */ 1577 if (mconfig->dmic_ch_combo_index == 0) 1578 return 0; 1579 1580 /* No valid channel selection map for index 0, so offset by 1 */ 1581 index = mconfig->dmic_ch_combo_index - 1; 1582 1583 switch (ch_type) { 1584 case SKL_CH_MONO: 1585 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1586 return -EINVAL; 1587 1588 list = &mic_mono_list[index]; 1589 break; 1590 1591 case SKL_CH_STEREO: 1592 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1593 return -EINVAL; 1594 1595 list = mic_stereo_list[index]; 1596 break; 1597 1598 case SKL_CH_TRIO: 1599 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1600 return -EINVAL; 1601 1602 list = mic_trio_list[index]; 1603 break; 1604 1605 case SKL_CH_QUATRO: 1606 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1607 return -EINVAL; 1608 1609 list = mic_quatro_list[index]; 1610 break; 1611 1612 default: 1613 dev_err(w->dapm->dev, 1614 "Invalid channel %d for mic_select module\n", 1615 ch_type); 1616 return -EINVAL; 1617 1618 } 1619 1620 /* channel type enum map to number of chanels for that type */ 1621 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1622 in_ch = list[out_ch]; 1623 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1624 } 1625 1626 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1627 } 1628 1629 /* 1630 * Fill the dma id for host and link. In case of passthrough 1631 * pipeline, this will both host and link in the same 1632 * pipeline, so need to copy the link and host based on dev_type 1633 */ 1634 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1635 struct skl_pipe_params *params) 1636 { 1637 struct skl_pipe *pipe = mcfg->pipe; 1638 1639 if (pipe->passthru) { 1640 switch (mcfg->dev_type) { 1641 case SKL_DEVICE_HDALINK: 1642 pipe->p_params->link_dma_id = params->link_dma_id; 1643 pipe->p_params->link_index = params->link_index; 1644 pipe->p_params->link_bps = params->link_bps; 1645 break; 1646 1647 case SKL_DEVICE_HDAHOST: 1648 pipe->p_params->host_dma_id = params->host_dma_id; 1649 pipe->p_params->host_bps = params->host_bps; 1650 break; 1651 1652 default: 1653 break; 1654 } 1655 pipe->p_params->s_fmt = params->s_fmt; 1656 pipe->p_params->ch = params->ch; 1657 pipe->p_params->s_freq = params->s_freq; 1658 pipe->p_params->stream = params->stream; 1659 pipe->p_params->format = params->format; 1660 1661 } else { 1662 memcpy(pipe->p_params, params, sizeof(*params)); 1663 } 1664 } 1665 1666 /* 1667 * The FE params are passed by hw_params of the DAI. 1668 * On hw_params, the params are stored in Gateway module of the FE and we 1669 * need to calculate the format in DSP module configuration, that 1670 * conversion is done here 1671 */ 1672 int skl_tplg_update_pipe_params(struct device *dev, 1673 struct skl_module_cfg *mconfig, 1674 struct skl_pipe_params *params) 1675 { 1676 struct skl_module_res *res = &mconfig->module->resources[0]; 1677 struct skl *skl = get_skl_ctx(dev); 1678 struct skl_module_fmt *format = NULL; 1679 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1680 1681 skl_tplg_fill_dma_id(mconfig, params); 1682 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1683 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1684 1685 if (skl->nr_modules) 1686 return 0; 1687 1688 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1689 format = &mconfig->module->formats[0].inputs[0].fmt; 1690 else 1691 format = &mconfig->module->formats[0].outputs[0].fmt; 1692 1693 /* set the hw_params */ 1694 format->s_freq = params->s_freq; 1695 format->channels = params->ch; 1696 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1697 1698 /* 1699 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1700 * container so update bit depth accordingly 1701 */ 1702 switch (format->valid_bit_depth) { 1703 case SKL_DEPTH_16BIT: 1704 format->bit_depth = format->valid_bit_depth; 1705 break; 1706 1707 case SKL_DEPTH_24BIT: 1708 case SKL_DEPTH_32BIT: 1709 format->bit_depth = SKL_DEPTH_32BIT; 1710 break; 1711 1712 default: 1713 dev_err(dev, "Invalid bit depth %x for pipe\n", 1714 format->valid_bit_depth); 1715 return -EINVAL; 1716 } 1717 1718 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1719 res->ibs = (format->s_freq / 1000) * 1720 (format->channels) * 1721 (format->bit_depth >> 3); 1722 } else { 1723 res->obs = (format->s_freq / 1000) * 1724 (format->channels) * 1725 (format->bit_depth >> 3); 1726 } 1727 1728 return 0; 1729 } 1730 1731 /* 1732 * Query the module config for the FE DAI 1733 * This is used to find the hw_params set for that DAI and apply to FE 1734 * pipeline 1735 */ 1736 struct skl_module_cfg * 1737 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1738 { 1739 struct snd_soc_dapm_widget *w; 1740 struct snd_soc_dapm_path *p = NULL; 1741 1742 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1743 w = dai->playback_widget; 1744 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1745 if (p->connect && p->sink->power && 1746 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1747 continue; 1748 1749 if (p->sink->priv) { 1750 dev_dbg(dai->dev, "set params for %s\n", 1751 p->sink->name); 1752 return p->sink->priv; 1753 } 1754 } 1755 } else { 1756 w = dai->capture_widget; 1757 snd_soc_dapm_widget_for_each_source_path(w, p) { 1758 if (p->connect && p->source->power && 1759 !is_skl_dsp_widget_type(p->source, dai->dev)) 1760 continue; 1761 1762 if (p->source->priv) { 1763 dev_dbg(dai->dev, "set params for %s\n", 1764 p->source->name); 1765 return p->source->priv; 1766 } 1767 } 1768 } 1769 1770 return NULL; 1771 } 1772 1773 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1774 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1775 { 1776 struct snd_soc_dapm_path *p; 1777 struct skl_module_cfg *mconfig = NULL; 1778 1779 snd_soc_dapm_widget_for_each_source_path(w, p) { 1780 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1781 if (p->connect && 1782 (p->sink->id == snd_soc_dapm_aif_out) && 1783 p->source->priv) { 1784 mconfig = p->source->priv; 1785 return mconfig; 1786 } 1787 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1788 if (mconfig) 1789 return mconfig; 1790 } 1791 } 1792 return mconfig; 1793 } 1794 1795 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1796 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1797 { 1798 struct snd_soc_dapm_path *p; 1799 struct skl_module_cfg *mconfig = NULL; 1800 1801 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1802 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1803 if (p->connect && 1804 (p->source->id == snd_soc_dapm_aif_in) && 1805 p->sink->priv) { 1806 mconfig = p->sink->priv; 1807 return mconfig; 1808 } 1809 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1810 if (mconfig) 1811 return mconfig; 1812 } 1813 } 1814 return mconfig; 1815 } 1816 1817 struct skl_module_cfg * 1818 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1819 { 1820 struct snd_soc_dapm_widget *w; 1821 struct skl_module_cfg *mconfig; 1822 1823 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1824 w = dai->playback_widget; 1825 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1826 } else { 1827 w = dai->capture_widget; 1828 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1829 } 1830 return mconfig; 1831 } 1832 1833 static u8 skl_tplg_be_link_type(int dev_type) 1834 { 1835 int ret; 1836 1837 switch (dev_type) { 1838 case SKL_DEVICE_BT: 1839 ret = NHLT_LINK_SSP; 1840 break; 1841 1842 case SKL_DEVICE_DMIC: 1843 ret = NHLT_LINK_DMIC; 1844 break; 1845 1846 case SKL_DEVICE_I2S: 1847 ret = NHLT_LINK_SSP; 1848 break; 1849 1850 case SKL_DEVICE_HDALINK: 1851 ret = NHLT_LINK_HDA; 1852 break; 1853 1854 default: 1855 ret = NHLT_LINK_INVALID; 1856 break; 1857 } 1858 1859 return ret; 1860 } 1861 1862 /* 1863 * Fill the BE gateway parameters 1864 * The BE gateway expects a blob of parameters which are kept in the ACPI 1865 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1866 * The port can have multiple settings so pick based on the PCM 1867 * parameters 1868 */ 1869 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1870 struct skl_module_cfg *mconfig, 1871 struct skl_pipe_params *params) 1872 { 1873 struct nhlt_specific_cfg *cfg; 1874 struct skl *skl = get_skl_ctx(dai->dev); 1875 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1876 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1877 1878 skl_tplg_fill_dma_id(mconfig, params); 1879 1880 if (link_type == NHLT_LINK_HDA) 1881 return 0; 1882 1883 /* update the blob based on virtual bus_id*/ 1884 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1885 params->s_fmt, params->ch, 1886 params->s_freq, params->stream, 1887 dev_type); 1888 if (cfg) { 1889 mconfig->formats_config.caps_size = cfg->size; 1890 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1891 } else { 1892 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1893 mconfig->vbus_id, link_type, 1894 params->stream); 1895 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1896 params->ch, params->s_freq, params->s_fmt); 1897 return -EINVAL; 1898 } 1899 1900 return 0; 1901 } 1902 1903 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1904 struct snd_soc_dapm_widget *w, 1905 struct skl_pipe_params *params) 1906 { 1907 struct snd_soc_dapm_path *p; 1908 int ret = -EIO; 1909 1910 snd_soc_dapm_widget_for_each_source_path(w, p) { 1911 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1912 p->source->priv) { 1913 1914 ret = skl_tplg_be_fill_pipe_params(dai, 1915 p->source->priv, params); 1916 if (ret < 0) 1917 return ret; 1918 } else { 1919 ret = skl_tplg_be_set_src_pipe_params(dai, 1920 p->source, params); 1921 if (ret < 0) 1922 return ret; 1923 } 1924 } 1925 1926 return ret; 1927 } 1928 1929 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1930 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1931 { 1932 struct snd_soc_dapm_path *p = NULL; 1933 int ret = -EIO; 1934 1935 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1936 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1937 p->sink->priv) { 1938 1939 ret = skl_tplg_be_fill_pipe_params(dai, 1940 p->sink->priv, params); 1941 if (ret < 0) 1942 return ret; 1943 } else { 1944 ret = skl_tplg_be_set_sink_pipe_params( 1945 dai, p->sink, params); 1946 if (ret < 0) 1947 return ret; 1948 } 1949 } 1950 1951 return ret; 1952 } 1953 1954 /* 1955 * BE hw_params can be a source parameters (capture) or sink parameters 1956 * (playback). Based on sink and source we need to either find the source 1957 * list or the sink list and set the pipeline parameters 1958 */ 1959 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1960 struct skl_pipe_params *params) 1961 { 1962 struct snd_soc_dapm_widget *w; 1963 1964 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1965 w = dai->playback_widget; 1966 1967 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1968 1969 } else { 1970 w = dai->capture_widget; 1971 1972 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1973 } 1974 1975 return 0; 1976 } 1977 1978 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1979 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1980 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1981 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1982 }; 1983 1984 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1985 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1986 skl_tplg_tlv_control_set}, 1987 }; 1988 1989 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1990 { 1991 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1992 .get = skl_tplg_mic_control_get, 1993 .put = skl_tplg_mic_control_set, 1994 }, 1995 }; 1996 1997 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1998 struct skl_pipe *pipe, u32 tkn, 1999 u32 tkn_val, int conf_idx, int dir) 2000 { 2001 struct skl_pipe_fmt *fmt; 2002 struct skl_path_config *config; 2003 2004 switch (dir) { 2005 case SKL_DIR_IN: 2006 fmt = &pipe->configs[conf_idx].in_fmt; 2007 break; 2008 2009 case SKL_DIR_OUT: 2010 fmt = &pipe->configs[conf_idx].out_fmt; 2011 break; 2012 2013 default: 2014 dev_err(dev, "Invalid direction: %d\n", dir); 2015 return -EINVAL; 2016 } 2017 2018 config = &pipe->configs[conf_idx]; 2019 2020 switch (tkn) { 2021 case SKL_TKN_U32_CFG_FREQ: 2022 fmt->freq = tkn_val; 2023 break; 2024 2025 case SKL_TKN_U8_CFG_CHAN: 2026 fmt->channels = tkn_val; 2027 break; 2028 2029 case SKL_TKN_U8_CFG_BPS: 2030 fmt->bps = tkn_val; 2031 break; 2032 2033 case SKL_TKN_U32_PATH_MEM_PGS: 2034 config->mem_pages = tkn_val; 2035 break; 2036 2037 default: 2038 dev_err(dev, "Invalid token config: %d\n", tkn); 2039 return -EINVAL; 2040 } 2041 2042 return 0; 2043 } 2044 2045 static int skl_tplg_fill_pipe_tkn(struct device *dev, 2046 struct skl_pipe *pipe, u32 tkn, 2047 u32 tkn_val) 2048 { 2049 2050 switch (tkn) { 2051 case SKL_TKN_U32_PIPE_CONN_TYPE: 2052 pipe->conn_type = tkn_val; 2053 break; 2054 2055 case SKL_TKN_U32_PIPE_PRIORITY: 2056 pipe->pipe_priority = tkn_val; 2057 break; 2058 2059 case SKL_TKN_U32_PIPE_MEM_PGS: 2060 pipe->memory_pages = tkn_val; 2061 break; 2062 2063 case SKL_TKN_U32_PMODE: 2064 pipe->lp_mode = tkn_val; 2065 break; 2066 2067 case SKL_TKN_U32_PIPE_DIRECTION: 2068 pipe->direction = tkn_val; 2069 break; 2070 2071 case SKL_TKN_U32_NUM_CONFIGS: 2072 pipe->nr_cfgs = tkn_val; 2073 break; 2074 2075 default: 2076 dev_err(dev, "Token not handled %d\n", tkn); 2077 return -EINVAL; 2078 } 2079 2080 return 0; 2081 } 2082 2083 /* 2084 * Add pipeline by parsing the relevant tokens 2085 * Return an existing pipe if the pipe already exists. 2086 */ 2087 static int skl_tplg_add_pipe(struct device *dev, 2088 struct skl_module_cfg *mconfig, struct skl *skl, 2089 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2090 { 2091 struct skl_pipeline *ppl; 2092 struct skl_pipe *pipe; 2093 struct skl_pipe_params *params; 2094 2095 list_for_each_entry(ppl, &skl->ppl_list, node) { 2096 if (ppl->pipe->ppl_id == tkn_elem->value) { 2097 mconfig->pipe = ppl->pipe; 2098 return -EEXIST; 2099 } 2100 } 2101 2102 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2103 if (!ppl) 2104 return -ENOMEM; 2105 2106 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2107 if (!pipe) 2108 return -ENOMEM; 2109 2110 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2111 if (!params) 2112 return -ENOMEM; 2113 2114 pipe->p_params = params; 2115 pipe->ppl_id = tkn_elem->value; 2116 INIT_LIST_HEAD(&pipe->w_list); 2117 2118 ppl->pipe = pipe; 2119 list_add(&ppl->node, &skl->ppl_list); 2120 2121 mconfig->pipe = pipe; 2122 mconfig->pipe->state = SKL_PIPE_INVALID; 2123 2124 return 0; 2125 } 2126 2127 static int skl_tplg_get_uuid(struct device *dev, u8 *guid, 2128 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2129 { 2130 if (uuid_tkn->token == SKL_TKN_UUID) { 2131 memcpy(guid, &uuid_tkn->uuid, 16); 2132 return 0; 2133 } 2134 2135 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2136 2137 return -EINVAL; 2138 } 2139 2140 static int skl_tplg_fill_pin(struct device *dev, 2141 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2142 struct skl_module_pin *m_pin, 2143 int pin_index) 2144 { 2145 int ret; 2146 2147 switch (tkn_elem->token) { 2148 case SKL_TKN_U32_PIN_MOD_ID: 2149 m_pin[pin_index].id.module_id = tkn_elem->value; 2150 break; 2151 2152 case SKL_TKN_U32_PIN_INST_ID: 2153 m_pin[pin_index].id.instance_id = tkn_elem->value; 2154 break; 2155 2156 case SKL_TKN_UUID: 2157 ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b, 2158 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2159 if (ret < 0) 2160 return ret; 2161 2162 break; 2163 2164 default: 2165 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2166 return -EINVAL; 2167 } 2168 2169 return 0; 2170 } 2171 2172 /* 2173 * Parse for pin config specific tokens to fill up the 2174 * module private data 2175 */ 2176 static int skl_tplg_fill_pins_info(struct device *dev, 2177 struct skl_module_cfg *mconfig, 2178 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2179 int dir, int pin_count) 2180 { 2181 int ret; 2182 struct skl_module_pin *m_pin; 2183 2184 switch (dir) { 2185 case SKL_DIR_IN: 2186 m_pin = mconfig->m_in_pin; 2187 break; 2188 2189 case SKL_DIR_OUT: 2190 m_pin = mconfig->m_out_pin; 2191 break; 2192 2193 default: 2194 dev_err(dev, "Invalid direction value\n"); 2195 return -EINVAL; 2196 } 2197 2198 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2199 if (ret < 0) 2200 return ret; 2201 2202 m_pin[pin_count].in_use = false; 2203 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2204 2205 return 0; 2206 } 2207 2208 /* 2209 * Fill up input/output module config format based 2210 * on the direction 2211 */ 2212 static int skl_tplg_fill_fmt(struct device *dev, 2213 struct skl_module_fmt *dst_fmt, 2214 u32 tkn, u32 value) 2215 { 2216 switch (tkn) { 2217 case SKL_TKN_U32_FMT_CH: 2218 dst_fmt->channels = value; 2219 break; 2220 2221 case SKL_TKN_U32_FMT_FREQ: 2222 dst_fmt->s_freq = value; 2223 break; 2224 2225 case SKL_TKN_U32_FMT_BIT_DEPTH: 2226 dst_fmt->bit_depth = value; 2227 break; 2228 2229 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2230 dst_fmt->valid_bit_depth = value; 2231 break; 2232 2233 case SKL_TKN_U32_FMT_CH_CONFIG: 2234 dst_fmt->ch_cfg = value; 2235 break; 2236 2237 case SKL_TKN_U32_FMT_INTERLEAVE: 2238 dst_fmt->interleaving_style = value; 2239 break; 2240 2241 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2242 dst_fmt->sample_type = value; 2243 break; 2244 2245 case SKL_TKN_U32_FMT_CH_MAP: 2246 dst_fmt->ch_map = value; 2247 break; 2248 2249 default: 2250 dev_err(dev, "Invalid token %d\n", tkn); 2251 return -EINVAL; 2252 } 2253 2254 return 0; 2255 } 2256 2257 static int skl_tplg_widget_fill_fmt(struct device *dev, 2258 struct skl_module_iface *fmt, 2259 u32 tkn, u32 val, u32 dir, int fmt_idx) 2260 { 2261 struct skl_module_fmt *dst_fmt; 2262 2263 if (!fmt) 2264 return -EINVAL; 2265 2266 switch (dir) { 2267 case SKL_DIR_IN: 2268 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2269 break; 2270 2271 case SKL_DIR_OUT: 2272 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2273 break; 2274 2275 default: 2276 dev_err(dev, "Invalid direction: %d\n", dir); 2277 return -EINVAL; 2278 } 2279 2280 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2281 } 2282 2283 static void skl_tplg_fill_pin_dynamic_val( 2284 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2285 { 2286 int i; 2287 2288 for (i = 0; i < pin_count; i++) 2289 mpin[i].is_dynamic = value; 2290 } 2291 2292 /* 2293 * Resource table in the manifest has pin specific resources 2294 * like pin and pin buffer size 2295 */ 2296 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2297 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2298 struct skl_module_res *res, int pin_idx, int dir) 2299 { 2300 struct skl_module_pin_resources *m_pin; 2301 2302 switch (dir) { 2303 case SKL_DIR_IN: 2304 m_pin = &res->input[pin_idx]; 2305 break; 2306 2307 case SKL_DIR_OUT: 2308 m_pin = &res->output[pin_idx]; 2309 break; 2310 2311 default: 2312 dev_err(dev, "Invalid pin direction: %d\n", dir); 2313 return -EINVAL; 2314 } 2315 2316 switch (tkn_elem->token) { 2317 case SKL_TKN_MM_U32_RES_PIN_ID: 2318 m_pin->pin_index = tkn_elem->value; 2319 break; 2320 2321 case SKL_TKN_MM_U32_PIN_BUF: 2322 m_pin->buf_size = tkn_elem->value; 2323 break; 2324 2325 default: 2326 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2327 return -EINVAL; 2328 } 2329 2330 return 0; 2331 } 2332 2333 /* 2334 * Fill module specific resources from the manifest's resource 2335 * table like CPS, DMA size, mem_pages. 2336 */ 2337 static int skl_tplg_fill_res_tkn(struct device *dev, 2338 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2339 struct skl_module_res *res, 2340 int pin_idx, int dir) 2341 { 2342 int ret, tkn_count = 0; 2343 2344 if (!res) 2345 return -EINVAL; 2346 2347 switch (tkn_elem->token) { 2348 case SKL_TKN_MM_U32_CPS: 2349 res->cps = tkn_elem->value; 2350 break; 2351 2352 case SKL_TKN_MM_U32_DMA_SIZE: 2353 res->dma_buffer_size = tkn_elem->value; 2354 break; 2355 2356 case SKL_TKN_MM_U32_CPC: 2357 res->cpc = tkn_elem->value; 2358 break; 2359 2360 case SKL_TKN_U32_MEM_PAGES: 2361 res->is_pages = tkn_elem->value; 2362 break; 2363 2364 case SKL_TKN_U32_OBS: 2365 res->obs = tkn_elem->value; 2366 break; 2367 2368 case SKL_TKN_U32_IBS: 2369 res->ibs = tkn_elem->value; 2370 break; 2371 2372 case SKL_TKN_U32_MAX_MCPS: 2373 res->cps = tkn_elem->value; 2374 break; 2375 2376 case SKL_TKN_MM_U32_RES_PIN_ID: 2377 case SKL_TKN_MM_U32_PIN_BUF: 2378 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2379 pin_idx, dir); 2380 if (ret < 0) 2381 return ret; 2382 break; 2383 2384 default: 2385 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2386 return -EINVAL; 2387 2388 } 2389 tkn_count++; 2390 2391 return tkn_count; 2392 } 2393 2394 /* 2395 * Parse tokens to fill up the module private data 2396 */ 2397 static int skl_tplg_get_token(struct device *dev, 2398 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2399 struct skl *skl, struct skl_module_cfg *mconfig) 2400 { 2401 int tkn_count = 0; 2402 int ret; 2403 static int is_pipe_exists; 2404 static int pin_index, dir, conf_idx; 2405 struct skl_module_iface *iface = NULL; 2406 struct skl_module_res *res = NULL; 2407 int res_idx = mconfig->res_idx; 2408 int fmt_idx = mconfig->fmt_idx; 2409 2410 /* 2411 * If the manifest structure contains no modules, fill all 2412 * the module data to 0th index. 2413 * res_idx and fmt_idx are default set to 0. 2414 */ 2415 if (skl->nr_modules == 0) { 2416 res = &mconfig->module->resources[res_idx]; 2417 iface = &mconfig->module->formats[fmt_idx]; 2418 } 2419 2420 if (tkn_elem->token > SKL_TKN_MAX) 2421 return -EINVAL; 2422 2423 switch (tkn_elem->token) { 2424 case SKL_TKN_U8_IN_QUEUE_COUNT: 2425 mconfig->module->max_input_pins = tkn_elem->value; 2426 break; 2427 2428 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2429 mconfig->module->max_output_pins = tkn_elem->value; 2430 break; 2431 2432 case SKL_TKN_U8_DYN_IN_PIN: 2433 if (!mconfig->m_in_pin) 2434 mconfig->m_in_pin = 2435 devm_kcalloc(dev, MAX_IN_QUEUE, 2436 sizeof(*mconfig->m_in_pin), 2437 GFP_KERNEL); 2438 if (!mconfig->m_in_pin) 2439 return -ENOMEM; 2440 2441 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2442 tkn_elem->value); 2443 break; 2444 2445 case SKL_TKN_U8_DYN_OUT_PIN: 2446 if (!mconfig->m_out_pin) 2447 mconfig->m_out_pin = 2448 devm_kcalloc(dev, MAX_IN_QUEUE, 2449 sizeof(*mconfig->m_in_pin), 2450 GFP_KERNEL); 2451 if (!mconfig->m_out_pin) 2452 return -ENOMEM; 2453 2454 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2455 tkn_elem->value); 2456 break; 2457 2458 case SKL_TKN_U8_TIME_SLOT: 2459 mconfig->time_slot = tkn_elem->value; 2460 break; 2461 2462 case SKL_TKN_U8_CORE_ID: 2463 mconfig->core_id = tkn_elem->value; 2464 2465 case SKL_TKN_U8_MOD_TYPE: 2466 mconfig->m_type = tkn_elem->value; 2467 break; 2468 2469 case SKL_TKN_U8_DEV_TYPE: 2470 mconfig->dev_type = tkn_elem->value; 2471 break; 2472 2473 case SKL_TKN_U8_HW_CONN_TYPE: 2474 mconfig->hw_conn_type = tkn_elem->value; 2475 break; 2476 2477 case SKL_TKN_U16_MOD_INST_ID: 2478 mconfig->id.instance_id = 2479 tkn_elem->value; 2480 break; 2481 2482 case SKL_TKN_U32_MEM_PAGES: 2483 case SKL_TKN_U32_MAX_MCPS: 2484 case SKL_TKN_U32_OBS: 2485 case SKL_TKN_U32_IBS: 2486 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2487 if (ret < 0) 2488 return ret; 2489 2490 break; 2491 2492 case SKL_TKN_U32_VBUS_ID: 2493 mconfig->vbus_id = tkn_elem->value; 2494 break; 2495 2496 case SKL_TKN_U32_PARAMS_FIXUP: 2497 mconfig->params_fixup = tkn_elem->value; 2498 break; 2499 2500 case SKL_TKN_U32_CONVERTER: 2501 mconfig->converter = tkn_elem->value; 2502 break; 2503 2504 case SKL_TKN_U32_D0I3_CAPS: 2505 mconfig->d0i3_caps = tkn_elem->value; 2506 break; 2507 2508 case SKL_TKN_U32_PIPE_ID: 2509 ret = skl_tplg_add_pipe(dev, 2510 mconfig, skl, tkn_elem); 2511 2512 if (ret < 0) { 2513 if (ret == -EEXIST) { 2514 is_pipe_exists = 1; 2515 break; 2516 } 2517 return is_pipe_exists; 2518 } 2519 2520 break; 2521 2522 case SKL_TKN_U32_PIPE_CONFIG_ID: 2523 conf_idx = tkn_elem->value; 2524 break; 2525 2526 case SKL_TKN_U32_PIPE_CONN_TYPE: 2527 case SKL_TKN_U32_PIPE_PRIORITY: 2528 case SKL_TKN_U32_PIPE_MEM_PGS: 2529 case SKL_TKN_U32_PMODE: 2530 case SKL_TKN_U32_PIPE_DIRECTION: 2531 case SKL_TKN_U32_NUM_CONFIGS: 2532 if (is_pipe_exists) { 2533 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2534 tkn_elem->token, tkn_elem->value); 2535 if (ret < 0) 2536 return ret; 2537 } 2538 2539 break; 2540 2541 case SKL_TKN_U32_PATH_MEM_PGS: 2542 case SKL_TKN_U32_CFG_FREQ: 2543 case SKL_TKN_U8_CFG_CHAN: 2544 case SKL_TKN_U8_CFG_BPS: 2545 if (mconfig->pipe->nr_cfgs) { 2546 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2547 tkn_elem->token, tkn_elem->value, 2548 conf_idx, dir); 2549 if (ret < 0) 2550 return ret; 2551 } 2552 break; 2553 2554 case SKL_TKN_CFG_MOD_RES_ID: 2555 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2556 break; 2557 2558 case SKL_TKN_CFG_MOD_FMT_ID: 2559 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2560 break; 2561 2562 /* 2563 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2564 * direction and the pin count. The first four bits represent 2565 * direction and next four the pin count. 2566 */ 2567 case SKL_TKN_U32_DIR_PIN_COUNT: 2568 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2569 pin_index = (tkn_elem->value & 2570 SKL_PIN_COUNT_MASK) >> 4; 2571 2572 break; 2573 2574 case SKL_TKN_U32_FMT_CH: 2575 case SKL_TKN_U32_FMT_FREQ: 2576 case SKL_TKN_U32_FMT_BIT_DEPTH: 2577 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2578 case SKL_TKN_U32_FMT_CH_CONFIG: 2579 case SKL_TKN_U32_FMT_INTERLEAVE: 2580 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2581 case SKL_TKN_U32_FMT_CH_MAP: 2582 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2583 tkn_elem->value, dir, pin_index); 2584 2585 if (ret < 0) 2586 return ret; 2587 2588 break; 2589 2590 case SKL_TKN_U32_PIN_MOD_ID: 2591 case SKL_TKN_U32_PIN_INST_ID: 2592 case SKL_TKN_UUID: 2593 ret = skl_tplg_fill_pins_info(dev, 2594 mconfig, tkn_elem, dir, 2595 pin_index); 2596 if (ret < 0) 2597 return ret; 2598 2599 break; 2600 2601 case SKL_TKN_U32_CAPS_SIZE: 2602 mconfig->formats_config.caps_size = 2603 tkn_elem->value; 2604 2605 break; 2606 2607 case SKL_TKN_U32_CAPS_SET_PARAMS: 2608 mconfig->formats_config.set_params = 2609 tkn_elem->value; 2610 break; 2611 2612 case SKL_TKN_U32_CAPS_PARAMS_ID: 2613 mconfig->formats_config.param_id = 2614 tkn_elem->value; 2615 break; 2616 2617 case SKL_TKN_U32_PROC_DOMAIN: 2618 mconfig->domain = 2619 tkn_elem->value; 2620 2621 break; 2622 2623 case SKL_TKN_U32_DMA_BUF_SIZE: 2624 mconfig->dma_buffer_size = tkn_elem->value; 2625 break; 2626 2627 case SKL_TKN_U8_IN_PIN_TYPE: 2628 case SKL_TKN_U8_OUT_PIN_TYPE: 2629 case SKL_TKN_U8_CONN_TYPE: 2630 break; 2631 2632 default: 2633 dev_err(dev, "Token %d not handled\n", 2634 tkn_elem->token); 2635 return -EINVAL; 2636 } 2637 2638 tkn_count++; 2639 2640 return tkn_count; 2641 } 2642 2643 /* 2644 * Parse the vendor array for specific tokens to construct 2645 * module private data 2646 */ 2647 static int skl_tplg_get_tokens(struct device *dev, 2648 char *pvt_data, struct skl *skl, 2649 struct skl_module_cfg *mconfig, int block_size) 2650 { 2651 struct snd_soc_tplg_vendor_array *array; 2652 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2653 int tkn_count = 0, ret; 2654 int off = 0, tuple_size = 0; 2655 bool is_module_guid = true; 2656 2657 if (block_size <= 0) 2658 return -EINVAL; 2659 2660 while (tuple_size < block_size) { 2661 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2662 2663 off += array->size; 2664 2665 switch (array->type) { 2666 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2667 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2668 continue; 2669 2670 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2671 if (is_module_guid) { 2672 ret = skl_tplg_get_uuid(dev, mconfig->guid, 2673 array->uuid); 2674 is_module_guid = false; 2675 } else { 2676 ret = skl_tplg_get_token(dev, array->value, skl, 2677 mconfig); 2678 } 2679 2680 if (ret < 0) 2681 return ret; 2682 2683 tuple_size += sizeof(*array->uuid); 2684 2685 continue; 2686 2687 default: 2688 tkn_elem = array->value; 2689 tkn_count = 0; 2690 break; 2691 } 2692 2693 while (tkn_count <= (array->num_elems - 1)) { 2694 ret = skl_tplg_get_token(dev, tkn_elem, 2695 skl, mconfig); 2696 2697 if (ret < 0) 2698 return ret; 2699 2700 tkn_count = tkn_count + ret; 2701 tkn_elem++; 2702 } 2703 2704 tuple_size += tkn_count * sizeof(*tkn_elem); 2705 } 2706 2707 return off; 2708 } 2709 2710 /* 2711 * Every data block is preceded by a descriptor to read the number 2712 * of data blocks, they type of the block and it's size 2713 */ 2714 static int skl_tplg_get_desc_blocks(struct device *dev, 2715 struct snd_soc_tplg_vendor_array *array) 2716 { 2717 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2718 2719 tkn_elem = array->value; 2720 2721 switch (tkn_elem->token) { 2722 case SKL_TKN_U8_NUM_BLOCKS: 2723 case SKL_TKN_U8_BLOCK_TYPE: 2724 case SKL_TKN_U16_BLOCK_SIZE: 2725 return tkn_elem->value; 2726 2727 default: 2728 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2729 break; 2730 } 2731 2732 return -EINVAL; 2733 } 2734 2735 /* Functions to parse private data from configuration file format v4 */ 2736 2737 /* 2738 * Add pipeline from topology binary into driver pipeline list 2739 * 2740 * If already added we return that instance 2741 * Otherwise we create a new instance and add into driver list 2742 */ 2743 static int skl_tplg_add_pipe_v4(struct device *dev, 2744 struct skl_module_cfg *mconfig, struct skl *skl, 2745 struct skl_dfw_v4_pipe *dfw_pipe) 2746 { 2747 struct skl_pipeline *ppl; 2748 struct skl_pipe *pipe; 2749 struct skl_pipe_params *params; 2750 2751 list_for_each_entry(ppl, &skl->ppl_list, node) { 2752 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2753 mconfig->pipe = ppl->pipe; 2754 return 0; 2755 } 2756 } 2757 2758 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2759 if (!ppl) 2760 return -ENOMEM; 2761 2762 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2763 if (!pipe) 2764 return -ENOMEM; 2765 2766 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2767 if (!params) 2768 return -ENOMEM; 2769 2770 pipe->ppl_id = dfw_pipe->pipe_id; 2771 pipe->memory_pages = dfw_pipe->memory_pages; 2772 pipe->pipe_priority = dfw_pipe->pipe_priority; 2773 pipe->conn_type = dfw_pipe->conn_type; 2774 pipe->state = SKL_PIPE_INVALID; 2775 pipe->p_params = params; 2776 INIT_LIST_HEAD(&pipe->w_list); 2777 2778 ppl->pipe = pipe; 2779 list_add(&ppl->node, &skl->ppl_list); 2780 2781 mconfig->pipe = pipe; 2782 2783 return 0; 2784 } 2785 2786 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2787 struct skl_module_pin *m_pin, 2788 bool is_dynamic, int max_pin) 2789 { 2790 int i; 2791 2792 for (i = 0; i < max_pin; i++) { 2793 m_pin[i].id.module_id = dfw_pin[i].module_id; 2794 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2795 m_pin[i].in_use = false; 2796 m_pin[i].is_dynamic = is_dynamic; 2797 m_pin[i].pin_state = SKL_PIN_UNBIND; 2798 } 2799 } 2800 2801 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2802 struct skl_dfw_v4_module_fmt *src_fmt, 2803 int pins) 2804 { 2805 int i; 2806 2807 for (i = 0; i < pins; i++) { 2808 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2809 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2810 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2811 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2812 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2813 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2814 dst_fmt[i].fmt.interleaving_style = 2815 src_fmt[i].interleaving_style; 2816 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2817 } 2818 } 2819 2820 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2821 struct skl *skl, struct device *dev, 2822 struct skl_module_cfg *mconfig) 2823 { 2824 struct skl_dfw_v4_module *dfw = 2825 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2826 int ret; 2827 2828 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2829 2830 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2831 if (ret) 2832 return ret; 2833 mconfig->id.module_id = -1; 2834 mconfig->id.instance_id = dfw->instance_id; 2835 mconfig->module->resources[0].cps = dfw->max_mcps; 2836 mconfig->module->resources[0].ibs = dfw->ibs; 2837 mconfig->module->resources[0].obs = dfw->obs; 2838 mconfig->core_id = dfw->core_id; 2839 mconfig->module->max_input_pins = dfw->max_in_queue; 2840 mconfig->module->max_output_pins = dfw->max_out_queue; 2841 mconfig->module->loadable = dfw->is_loadable; 2842 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2843 MAX_IN_QUEUE); 2844 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2845 MAX_OUT_QUEUE); 2846 2847 mconfig->params_fixup = dfw->params_fixup; 2848 mconfig->converter = dfw->converter; 2849 mconfig->m_type = dfw->module_type; 2850 mconfig->vbus_id = dfw->vbus_id; 2851 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2852 2853 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2854 if (ret) 2855 return ret; 2856 2857 mconfig->dev_type = dfw->dev_type; 2858 mconfig->hw_conn_type = dfw->hw_conn_type; 2859 mconfig->time_slot = dfw->time_slot; 2860 mconfig->formats_config.caps_size = dfw->caps.caps_size; 2861 2862 mconfig->m_in_pin = devm_kcalloc(dev, 2863 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2864 GFP_KERNEL); 2865 if (!mconfig->m_in_pin) 2866 return -ENOMEM; 2867 2868 mconfig->m_out_pin = devm_kcalloc(dev, 2869 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2870 GFP_KERNEL); 2871 if (!mconfig->m_out_pin) 2872 return -ENOMEM; 2873 2874 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2875 dfw->is_dynamic_in_pin, 2876 mconfig->module->max_input_pins); 2877 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2878 dfw->is_dynamic_out_pin, 2879 mconfig->module->max_output_pins); 2880 2881 if (mconfig->formats_config.caps_size) { 2882 mconfig->formats_config.set_params = dfw->caps.set_params; 2883 mconfig->formats_config.param_id = dfw->caps.param_id; 2884 mconfig->formats_config.caps = 2885 devm_kzalloc(dev, mconfig->formats_config.caps_size, 2886 GFP_KERNEL); 2887 if (!mconfig->formats_config.caps) 2888 return -ENOMEM; 2889 memcpy(mconfig->formats_config.caps, dfw->caps.caps, 2890 dfw->caps.caps_size); 2891 } 2892 2893 return 0; 2894 } 2895 2896 /* 2897 * Parse the private data for the token and corresponding value. 2898 * The private data can have multiple data blocks. So, a data block 2899 * is preceded by a descriptor for number of blocks and a descriptor 2900 * for the type and size of the suceeding data block. 2901 */ 2902 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2903 struct skl *skl, struct device *dev, 2904 struct skl_module_cfg *mconfig) 2905 { 2906 struct snd_soc_tplg_vendor_array *array; 2907 int num_blocks, block_size = 0, block_type, off = 0; 2908 char *data; 2909 int ret; 2910 2911 /* 2912 * v4 configuration files have a valid UUID at the start of 2913 * the widget's private data. 2914 */ 2915 if (uuid_is_valid((char *)tplg_w->priv.data)) 2916 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2917 2918 /* Read the NUM_DATA_BLOCKS descriptor */ 2919 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2920 ret = skl_tplg_get_desc_blocks(dev, array); 2921 if (ret < 0) 2922 return ret; 2923 num_blocks = ret; 2924 2925 off += array->size; 2926 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2927 while (num_blocks > 0) { 2928 array = (struct snd_soc_tplg_vendor_array *) 2929 (tplg_w->priv.data + off); 2930 2931 ret = skl_tplg_get_desc_blocks(dev, array); 2932 2933 if (ret < 0) 2934 return ret; 2935 block_type = ret; 2936 off += array->size; 2937 2938 array = (struct snd_soc_tplg_vendor_array *) 2939 (tplg_w->priv.data + off); 2940 2941 ret = skl_tplg_get_desc_blocks(dev, array); 2942 2943 if (ret < 0) 2944 return ret; 2945 block_size = ret; 2946 off += array->size; 2947 2948 array = (struct snd_soc_tplg_vendor_array *) 2949 (tplg_w->priv.data + off); 2950 2951 data = (tplg_w->priv.data + off); 2952 2953 if (block_type == SKL_TYPE_TUPLE) { 2954 ret = skl_tplg_get_tokens(dev, data, 2955 skl, mconfig, block_size); 2956 2957 if (ret < 0) 2958 return ret; 2959 2960 --num_blocks; 2961 } else { 2962 if (mconfig->formats_config.caps_size > 0) 2963 memcpy(mconfig->formats_config.caps, data, 2964 mconfig->formats_config.caps_size); 2965 --num_blocks; 2966 ret = mconfig->formats_config.caps_size; 2967 } 2968 off += ret; 2969 } 2970 2971 return 0; 2972 } 2973 2974 static void skl_clear_pin_config(struct snd_soc_component *component, 2975 struct snd_soc_dapm_widget *w) 2976 { 2977 int i; 2978 struct skl_module_cfg *mconfig; 2979 struct skl_pipe *pipe; 2980 2981 if (!strncmp(w->dapm->component->name, component->name, 2982 strlen(component->name))) { 2983 mconfig = w->priv; 2984 pipe = mconfig->pipe; 2985 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2986 mconfig->m_in_pin[i].in_use = false; 2987 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2988 } 2989 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2990 mconfig->m_out_pin[i].in_use = false; 2991 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2992 } 2993 pipe->state = SKL_PIPE_INVALID; 2994 mconfig->m_state = SKL_MODULE_UNINIT; 2995 } 2996 } 2997 2998 void skl_cleanup_resources(struct skl *skl) 2999 { 3000 struct skl_sst *ctx = skl->skl_sst; 3001 struct snd_soc_component *soc_component = skl->component; 3002 struct snd_soc_dapm_widget *w; 3003 struct snd_soc_card *card; 3004 3005 if (soc_component == NULL) 3006 return; 3007 3008 card = soc_component->card; 3009 if (!card || !card->instantiated) 3010 return; 3011 3012 skl->resource.mem = 0; 3013 skl->resource.mcps = 0; 3014 3015 list_for_each_entry(w, &card->widgets, list) { 3016 if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL) 3017 skl_clear_pin_config(soc_component, w); 3018 } 3019 3020 skl_clear_module_cnt(ctx->dsp); 3021 } 3022 3023 /* 3024 * Topology core widget load callback 3025 * 3026 * This is used to save the private data for each widget which gives 3027 * information to the driver about module and pipeline parameters which DSP 3028 * FW expects like ids, resource values, formats etc 3029 */ 3030 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 3031 struct snd_soc_dapm_widget *w, 3032 struct snd_soc_tplg_dapm_widget *tplg_w) 3033 { 3034 int ret; 3035 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3036 struct skl *skl = bus_to_skl(bus); 3037 struct skl_module_cfg *mconfig; 3038 3039 if (!tplg_w->priv.size) 3040 goto bind_event; 3041 3042 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 3043 3044 if (!mconfig) 3045 return -ENOMEM; 3046 3047 if (skl->nr_modules == 0) { 3048 mconfig->module = devm_kzalloc(bus->dev, 3049 sizeof(*mconfig->module), GFP_KERNEL); 3050 if (!mconfig->module) 3051 return -ENOMEM; 3052 } 3053 3054 w->priv = mconfig; 3055 3056 /* 3057 * module binary can be loaded later, so set it to query when 3058 * module is load for a use case 3059 */ 3060 mconfig->id.module_id = -1; 3061 3062 /* Parse private data for tuples */ 3063 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 3064 if (ret < 0) 3065 return ret; 3066 3067 skl_debug_init_module(skl->debugfs, w, mconfig); 3068 3069 bind_event: 3070 if (tplg_w->event_type == 0) { 3071 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 3072 return 0; 3073 } 3074 3075 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 3076 ARRAY_SIZE(skl_tplg_widget_ops), 3077 tplg_w->event_type); 3078 3079 if (ret) { 3080 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 3081 __func__, tplg_w->event_type); 3082 return -EINVAL; 3083 } 3084 3085 return 0; 3086 } 3087 3088 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 3089 struct snd_soc_tplg_bytes_control *bc) 3090 { 3091 struct skl_algo_data *ac; 3092 struct skl_dfw_algo_data *dfw_ac = 3093 (struct skl_dfw_algo_data *)bc->priv.data; 3094 3095 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 3096 if (!ac) 3097 return -ENOMEM; 3098 3099 /* Fill private data */ 3100 ac->max = dfw_ac->max; 3101 ac->param_id = dfw_ac->param_id; 3102 ac->set_params = dfw_ac->set_params; 3103 ac->size = dfw_ac->max; 3104 3105 if (ac->max) { 3106 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL); 3107 if (!ac->params) 3108 return -ENOMEM; 3109 3110 memcpy(ac->params, dfw_ac->params, ac->max); 3111 } 3112 3113 be->dobj.private = ac; 3114 return 0; 3115 } 3116 3117 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 3118 struct snd_soc_tplg_enum_control *ec) 3119 { 3120 3121 void *data; 3122 3123 if (ec->priv.size) { 3124 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 3125 if (!data) 3126 return -ENOMEM; 3127 memcpy(data, ec->priv.data, ec->priv.size); 3128 se->dobj.private = data; 3129 } 3130 3131 return 0; 3132 3133 } 3134 3135 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 3136 int index, 3137 struct snd_kcontrol_new *kctl, 3138 struct snd_soc_tplg_ctl_hdr *hdr) 3139 { 3140 struct soc_bytes_ext *sb; 3141 struct snd_soc_tplg_bytes_control *tplg_bc; 3142 struct snd_soc_tplg_enum_control *tplg_ec; 3143 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3144 struct soc_enum *se; 3145 3146 switch (hdr->ops.info) { 3147 case SND_SOC_TPLG_CTL_BYTES: 3148 tplg_bc = container_of(hdr, 3149 struct snd_soc_tplg_bytes_control, hdr); 3150 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3151 sb = (struct soc_bytes_ext *)kctl->private_value; 3152 if (tplg_bc->priv.size) 3153 return skl_init_algo_data( 3154 bus->dev, sb, tplg_bc); 3155 } 3156 break; 3157 3158 case SND_SOC_TPLG_CTL_ENUM: 3159 tplg_ec = container_of(hdr, 3160 struct snd_soc_tplg_enum_control, hdr); 3161 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 3162 se = (struct soc_enum *)kctl->private_value; 3163 if (tplg_ec->priv.size) 3164 return skl_init_enum_data(bus->dev, se, 3165 tplg_ec); 3166 } 3167 break; 3168 3169 default: 3170 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3171 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3172 break; 3173 } 3174 3175 return 0; 3176 } 3177 3178 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3179 struct snd_soc_tplg_vendor_string_elem *str_elem, 3180 struct skl *skl) 3181 { 3182 int tkn_count = 0; 3183 static int ref_count; 3184 3185 switch (str_elem->token) { 3186 case SKL_TKN_STR_LIB_NAME: 3187 if (ref_count > skl->skl_sst->lib_count - 1) { 3188 ref_count = 0; 3189 return -EINVAL; 3190 } 3191 3192 strncpy(skl->skl_sst->lib_info[ref_count].name, 3193 str_elem->string, 3194 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name)); 3195 ref_count++; 3196 break; 3197 3198 default: 3199 dev_err(dev, "Not a string token %d\n", str_elem->token); 3200 break; 3201 } 3202 tkn_count++; 3203 3204 return tkn_count; 3205 } 3206 3207 static int skl_tplg_get_str_tkn(struct device *dev, 3208 struct snd_soc_tplg_vendor_array *array, 3209 struct skl *skl) 3210 { 3211 int tkn_count = 0, ret; 3212 struct snd_soc_tplg_vendor_string_elem *str_elem; 3213 3214 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3215 while (tkn_count < array->num_elems) { 3216 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3217 str_elem++; 3218 3219 if (ret < 0) 3220 return ret; 3221 3222 tkn_count = tkn_count + ret; 3223 } 3224 3225 return tkn_count; 3226 } 3227 3228 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3229 struct skl_module_iface *fmt, 3230 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3231 u32 dir, int fmt_idx) 3232 { 3233 struct skl_module_pin_fmt *dst_fmt; 3234 struct skl_module_fmt *mod_fmt; 3235 int ret; 3236 3237 if (!fmt) 3238 return -EINVAL; 3239 3240 switch (dir) { 3241 case SKL_DIR_IN: 3242 dst_fmt = &fmt->inputs[fmt_idx]; 3243 break; 3244 3245 case SKL_DIR_OUT: 3246 dst_fmt = &fmt->outputs[fmt_idx]; 3247 break; 3248 3249 default: 3250 dev_err(dev, "Invalid direction: %d\n", dir); 3251 return -EINVAL; 3252 } 3253 3254 mod_fmt = &dst_fmt->fmt; 3255 3256 switch (tkn_elem->token) { 3257 case SKL_TKN_MM_U32_INTF_PIN_ID: 3258 dst_fmt->id = tkn_elem->value; 3259 break; 3260 3261 default: 3262 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3263 tkn_elem->value); 3264 if (ret < 0) 3265 return ret; 3266 break; 3267 } 3268 3269 return 0; 3270 } 3271 3272 static int skl_tplg_fill_mod_info(struct device *dev, 3273 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3274 struct skl_module *mod) 3275 { 3276 3277 if (!mod) 3278 return -EINVAL; 3279 3280 switch (tkn_elem->token) { 3281 case SKL_TKN_U8_IN_PIN_TYPE: 3282 mod->input_pin_type = tkn_elem->value; 3283 break; 3284 3285 case SKL_TKN_U8_OUT_PIN_TYPE: 3286 mod->output_pin_type = tkn_elem->value; 3287 break; 3288 3289 case SKL_TKN_U8_IN_QUEUE_COUNT: 3290 mod->max_input_pins = tkn_elem->value; 3291 break; 3292 3293 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3294 mod->max_output_pins = tkn_elem->value; 3295 break; 3296 3297 case SKL_TKN_MM_U8_NUM_RES: 3298 mod->nr_resources = tkn_elem->value; 3299 break; 3300 3301 case SKL_TKN_MM_U8_NUM_INTF: 3302 mod->nr_interfaces = tkn_elem->value; 3303 break; 3304 3305 default: 3306 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3307 return -EINVAL; 3308 } 3309 3310 return 0; 3311 } 3312 3313 3314 static int skl_tplg_get_int_tkn(struct device *dev, 3315 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3316 struct skl *skl) 3317 { 3318 int tkn_count = 0, ret, size; 3319 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3320 struct skl_module_res *res = NULL; 3321 struct skl_module_iface *fmt = NULL; 3322 struct skl_module *mod = NULL; 3323 static struct skl_astate_param *astate_table; 3324 static int astate_cfg_idx, count; 3325 int i; 3326 3327 if (skl->modules) { 3328 mod = skl->modules[mod_idx]; 3329 res = &mod->resources[res_val_idx]; 3330 fmt = &mod->formats[intf_val_idx]; 3331 } 3332 3333 switch (tkn_elem->token) { 3334 case SKL_TKN_U32_LIB_COUNT: 3335 skl->skl_sst->lib_count = tkn_elem->value; 3336 break; 3337 3338 case SKL_TKN_U8_NUM_MOD: 3339 skl->nr_modules = tkn_elem->value; 3340 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3341 sizeof(*skl->modules), GFP_KERNEL); 3342 if (!skl->modules) 3343 return -ENOMEM; 3344 3345 for (i = 0; i < skl->nr_modules; i++) { 3346 skl->modules[i] = devm_kzalloc(dev, 3347 sizeof(struct skl_module), GFP_KERNEL); 3348 if (!skl->modules[i]) 3349 return -ENOMEM; 3350 } 3351 break; 3352 3353 case SKL_TKN_MM_U8_MOD_IDX: 3354 mod_idx = tkn_elem->value; 3355 break; 3356 3357 case SKL_TKN_U32_ASTATE_COUNT: 3358 if (astate_table != NULL) { 3359 dev_err(dev, "More than one entry for A-State count"); 3360 return -EINVAL; 3361 } 3362 3363 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3364 dev_err(dev, "Invalid A-State count %d\n", 3365 tkn_elem->value); 3366 return -EINVAL; 3367 } 3368 3369 size = tkn_elem->value * sizeof(struct skl_astate_param) + 3370 sizeof(count); 3371 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3372 if (!skl->cfg.astate_cfg) 3373 return -ENOMEM; 3374 3375 astate_table = skl->cfg.astate_cfg->astate_table; 3376 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3377 break; 3378 3379 case SKL_TKN_U32_ASTATE_IDX: 3380 if (tkn_elem->value >= count) { 3381 dev_err(dev, "Invalid A-State index %d\n", 3382 tkn_elem->value); 3383 return -EINVAL; 3384 } 3385 3386 astate_cfg_idx = tkn_elem->value; 3387 break; 3388 3389 case SKL_TKN_U32_ASTATE_KCPS: 3390 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3391 break; 3392 3393 case SKL_TKN_U32_ASTATE_CLK_SRC: 3394 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3395 break; 3396 3397 case SKL_TKN_U8_IN_PIN_TYPE: 3398 case SKL_TKN_U8_OUT_PIN_TYPE: 3399 case SKL_TKN_U8_IN_QUEUE_COUNT: 3400 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3401 case SKL_TKN_MM_U8_NUM_RES: 3402 case SKL_TKN_MM_U8_NUM_INTF: 3403 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3404 if (ret < 0) 3405 return ret; 3406 break; 3407 3408 case SKL_TKN_U32_DIR_PIN_COUNT: 3409 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3410 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3411 break; 3412 3413 case SKL_TKN_MM_U32_RES_ID: 3414 if (!res) 3415 return -EINVAL; 3416 3417 res->id = tkn_elem->value; 3418 res_val_idx = tkn_elem->value; 3419 break; 3420 3421 case SKL_TKN_MM_U32_FMT_ID: 3422 if (!fmt) 3423 return -EINVAL; 3424 3425 fmt->fmt_idx = tkn_elem->value; 3426 intf_val_idx = tkn_elem->value; 3427 break; 3428 3429 case SKL_TKN_MM_U32_CPS: 3430 case SKL_TKN_MM_U32_DMA_SIZE: 3431 case SKL_TKN_MM_U32_CPC: 3432 case SKL_TKN_U32_MEM_PAGES: 3433 case SKL_TKN_U32_OBS: 3434 case SKL_TKN_U32_IBS: 3435 case SKL_TKN_MM_U32_RES_PIN_ID: 3436 case SKL_TKN_MM_U32_PIN_BUF: 3437 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3438 if (ret < 0) 3439 return ret; 3440 3441 break; 3442 3443 case SKL_TKN_MM_U32_NUM_IN_FMT: 3444 if (!fmt) 3445 return -EINVAL; 3446 3447 res->nr_input_pins = tkn_elem->value; 3448 break; 3449 3450 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3451 if (!fmt) 3452 return -EINVAL; 3453 3454 res->nr_output_pins = tkn_elem->value; 3455 break; 3456 3457 case SKL_TKN_U32_FMT_CH: 3458 case SKL_TKN_U32_FMT_FREQ: 3459 case SKL_TKN_U32_FMT_BIT_DEPTH: 3460 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3461 case SKL_TKN_U32_FMT_CH_CONFIG: 3462 case SKL_TKN_U32_FMT_INTERLEAVE: 3463 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3464 case SKL_TKN_U32_FMT_CH_MAP: 3465 case SKL_TKN_MM_U32_INTF_PIN_ID: 3466 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3467 dir, pin_idx); 3468 if (ret < 0) 3469 return ret; 3470 break; 3471 3472 default: 3473 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3474 return -EINVAL; 3475 } 3476 tkn_count++; 3477 3478 return tkn_count; 3479 } 3480 3481 static int skl_tplg_get_manifest_uuid(struct device *dev, 3482 struct skl *skl, 3483 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 3484 { 3485 static int ref_count; 3486 struct skl_module *mod; 3487 3488 if (uuid_tkn->token == SKL_TKN_UUID) { 3489 mod = skl->modules[ref_count]; 3490 memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid)); 3491 ref_count++; 3492 } else { 3493 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 3494 return -EINVAL; 3495 } 3496 3497 return 0; 3498 } 3499 3500 /* 3501 * Fill the manifest structure by parsing the tokens based on the 3502 * type. 3503 */ 3504 static int skl_tplg_get_manifest_tkn(struct device *dev, 3505 char *pvt_data, struct skl *skl, 3506 int block_size) 3507 { 3508 int tkn_count = 0, ret; 3509 int off = 0, tuple_size = 0; 3510 struct snd_soc_tplg_vendor_array *array; 3511 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3512 3513 if (block_size <= 0) 3514 return -EINVAL; 3515 3516 while (tuple_size < block_size) { 3517 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3518 off += array->size; 3519 switch (array->type) { 3520 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3521 ret = skl_tplg_get_str_tkn(dev, array, skl); 3522 3523 if (ret < 0) 3524 return ret; 3525 tkn_count = ret; 3526 3527 tuple_size += tkn_count * 3528 sizeof(struct snd_soc_tplg_vendor_string_elem); 3529 continue; 3530 3531 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3532 ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid); 3533 if (ret < 0) 3534 return ret; 3535 3536 tuple_size += sizeof(*array->uuid); 3537 continue; 3538 3539 default: 3540 tkn_elem = array->value; 3541 tkn_count = 0; 3542 break; 3543 } 3544 3545 while (tkn_count <= array->num_elems - 1) { 3546 ret = skl_tplg_get_int_tkn(dev, 3547 tkn_elem, skl); 3548 if (ret < 0) 3549 return ret; 3550 3551 tkn_count = tkn_count + ret; 3552 tkn_elem++; 3553 } 3554 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3555 tkn_count = 0; 3556 } 3557 3558 return off; 3559 } 3560 3561 /* 3562 * Parse manifest private data for tokens. The private data block is 3563 * preceded by descriptors for type and size of data block. 3564 */ 3565 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3566 struct device *dev, struct skl *skl) 3567 { 3568 struct snd_soc_tplg_vendor_array *array; 3569 int num_blocks, block_size = 0, block_type, off = 0; 3570 char *data; 3571 int ret; 3572 3573 /* Read the NUM_DATA_BLOCKS descriptor */ 3574 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3575 ret = skl_tplg_get_desc_blocks(dev, array); 3576 if (ret < 0) 3577 return ret; 3578 num_blocks = ret; 3579 3580 off += array->size; 3581 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3582 while (num_blocks > 0) { 3583 array = (struct snd_soc_tplg_vendor_array *) 3584 (manifest->priv.data + off); 3585 ret = skl_tplg_get_desc_blocks(dev, array); 3586 3587 if (ret < 0) 3588 return ret; 3589 block_type = ret; 3590 off += array->size; 3591 3592 array = (struct snd_soc_tplg_vendor_array *) 3593 (manifest->priv.data + off); 3594 3595 ret = skl_tplg_get_desc_blocks(dev, array); 3596 3597 if (ret < 0) 3598 return ret; 3599 block_size = ret; 3600 off += array->size; 3601 3602 array = (struct snd_soc_tplg_vendor_array *) 3603 (manifest->priv.data + off); 3604 3605 data = (manifest->priv.data + off); 3606 3607 if (block_type == SKL_TYPE_TUPLE) { 3608 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3609 block_size); 3610 3611 if (ret < 0) 3612 return ret; 3613 3614 --num_blocks; 3615 } else { 3616 return -EINVAL; 3617 } 3618 off += ret; 3619 } 3620 3621 return 0; 3622 } 3623 3624 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3625 struct snd_soc_tplg_manifest *manifest) 3626 { 3627 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3628 struct skl *skl = bus_to_skl(bus); 3629 3630 /* proceed only if we have private data defined */ 3631 if (manifest->priv.size == 0) 3632 return 0; 3633 3634 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3635 3636 if (skl->skl_sst->lib_count > SKL_MAX_LIB) { 3637 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3638 skl->skl_sst->lib_count); 3639 return -EINVAL; 3640 } 3641 3642 return 0; 3643 } 3644 3645 static struct snd_soc_tplg_ops skl_tplg_ops = { 3646 .widget_load = skl_tplg_widget_load, 3647 .control_load = skl_tplg_control_load, 3648 .bytes_ext_ops = skl_tlv_ops, 3649 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3650 .io_ops = skl_tplg_kcontrol_ops, 3651 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3652 .manifest = skl_manifest_load, 3653 .dai_load = skl_dai_load, 3654 }; 3655 3656 /* 3657 * A pipe can have multiple modules, each of them will be a DAPM widget as 3658 * well. While managing a pipeline we need to get the list of all the 3659 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3660 * helps to get the SKL type widgets in that pipeline 3661 */ 3662 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3663 { 3664 struct snd_soc_dapm_widget *w; 3665 struct skl_module_cfg *mcfg = NULL; 3666 struct skl_pipe_module *p_module = NULL; 3667 struct skl_pipe *pipe; 3668 3669 list_for_each_entry(w, &component->card->widgets, list) { 3670 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3671 mcfg = w->priv; 3672 pipe = mcfg->pipe; 3673 3674 p_module = devm_kzalloc(component->dev, 3675 sizeof(*p_module), GFP_KERNEL); 3676 if (!p_module) 3677 return -ENOMEM; 3678 3679 p_module->w = w; 3680 list_add_tail(&p_module->node, &pipe->w_list); 3681 } 3682 } 3683 3684 return 0; 3685 } 3686 3687 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 3688 { 3689 struct skl_pipe_module *w_module; 3690 struct snd_soc_dapm_widget *w; 3691 struct skl_module_cfg *mconfig; 3692 bool host_found = false, link_found = false; 3693 3694 list_for_each_entry(w_module, &pipe->w_list, node) { 3695 w = w_module->w; 3696 mconfig = w->priv; 3697 3698 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3699 host_found = true; 3700 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3701 link_found = true; 3702 } 3703 3704 if (host_found && link_found) 3705 pipe->passthru = true; 3706 else 3707 pipe->passthru = false; 3708 } 3709 3710 /* This will be read from topology manifest, currently defined here */ 3711 #define SKL_MAX_MCPS 30000000 3712 #define SKL_FW_MAX_MEM 1000000 3713 3714 /* 3715 * SKL topology init routine 3716 */ 3717 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3718 { 3719 int ret; 3720 const struct firmware *fw; 3721 struct skl *skl = bus_to_skl(bus); 3722 struct skl_pipeline *ppl; 3723 3724 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3725 if (ret < 0) { 3726 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3727 skl->tplg_name, ret); 3728 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3729 if (ret < 0) { 3730 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3731 "dfw_sst.bin", ret); 3732 return ret; 3733 } 3734 } 3735 3736 /* 3737 * The complete tplg for SKL is loaded as index 0, we don't use 3738 * any other index 3739 */ 3740 ret = snd_soc_tplg_component_load(component, 3741 &skl_tplg_ops, fw, 0); 3742 if (ret < 0) { 3743 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3744 release_firmware(fw); 3745 return -EINVAL; 3746 } 3747 3748 skl->resource.max_mcps = SKL_MAX_MCPS; 3749 skl->resource.max_mem = SKL_FW_MAX_MEM; 3750 3751 skl->tplg = fw; 3752 ret = skl_tplg_create_pipe_widget_list(component); 3753 if (ret < 0) 3754 return ret; 3755 3756 list_for_each_entry(ppl, &skl->ppl_list, node) 3757 skl_tplg_set_pipe_type(skl, ppl->pipe); 3758 3759 return 0; 3760 } 3761