1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-topology.c - Implements Platform component ALSA controls/widget 4 * handlers. 5 * 6 * Copyright (C) 2014-2015 Intel Corp 7 * Author: Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/firmware.h> 14 #include <linux/uuid.h> 15 #include <sound/soc.h> 16 #include <sound/soc-topology.h> 17 #include <uapi/sound/snd_sst_tokens.h> 18 #include <uapi/sound/skl-tplg-interface.h> 19 #include "skl-sst-dsp.h" 20 #include "skl-sst-ipc.h" 21 #include "skl-topology.h" 22 #include "skl.h" 23 #include "../common/sst-dsp.h" 24 #include "../common/sst-dsp-priv.h" 25 26 #define SKL_CH_FIXUP_MASK (1 << 0) 27 #define SKL_RATE_FIXUP_MASK (1 << 1) 28 #define SKL_FMT_FIXUP_MASK (1 << 2) 29 #define SKL_IN_DIR_BIT_MASK BIT(0) 30 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 31 32 static const int mic_mono_list[] = { 33 0, 1, 2, 3, 34 }; 35 static const int mic_stereo_list[][SKL_CH_STEREO] = { 36 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 37 }; 38 static const int mic_trio_list[][SKL_CH_TRIO] = { 39 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 40 }; 41 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 42 {0, 1, 2, 3}, 43 }; 44 45 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 46 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 47 48 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps) 49 { 50 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 51 52 switch (caps) { 53 case SKL_D0I3_NONE: 54 d0i3->non_d0i3++; 55 break; 56 57 case SKL_D0I3_STREAMING: 58 d0i3->streaming++; 59 break; 60 61 case SKL_D0I3_NON_STREAMING: 62 d0i3->non_streaming++; 63 break; 64 } 65 } 66 67 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps) 68 { 69 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3; 70 71 switch (caps) { 72 case SKL_D0I3_NONE: 73 d0i3->non_d0i3--; 74 break; 75 76 case SKL_D0I3_STREAMING: 77 d0i3->streaming--; 78 break; 79 80 case SKL_D0I3_NON_STREAMING: 81 d0i3->non_streaming--; 82 break; 83 } 84 } 85 86 /* 87 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 88 * ignore. This helpers checks if the SKL driver handles this widget type 89 */ 90 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 91 struct device *dev) 92 { 93 if (w->dapm->dev != dev) 94 return false; 95 96 switch (w->id) { 97 case snd_soc_dapm_dai_link: 98 case snd_soc_dapm_dai_in: 99 case snd_soc_dapm_aif_in: 100 case snd_soc_dapm_aif_out: 101 case snd_soc_dapm_dai_out: 102 case snd_soc_dapm_switch: 103 case snd_soc_dapm_output: 104 case snd_soc_dapm_mux: 105 106 return false; 107 default: 108 return true; 109 } 110 } 111 112 /* 113 * Each pipelines needs memory to be allocated. Check if we have free memory 114 * from available pool. 115 */ 116 static bool skl_is_pipe_mem_avail(struct skl *skl, 117 struct skl_module_cfg *mconfig) 118 { 119 struct skl_sst *ctx = skl->skl_sst; 120 121 if (skl->resource.mem + mconfig->pipe->memory_pages > 122 skl->resource.max_mem) { 123 dev_err(ctx->dev, 124 "%s: module_id %d instance %d\n", __func__, 125 mconfig->id.module_id, 126 mconfig->id.instance_id); 127 dev_err(ctx->dev, 128 "exceeds ppl memory available %d mem %d\n", 129 skl->resource.max_mem, skl->resource.mem); 130 return false; 131 } else { 132 return true; 133 } 134 } 135 136 /* 137 * Add the mem to the mem pool. This is freed when pipe is deleted. 138 * Note: DSP does actual memory management we only keep track for complete 139 * pool 140 */ 141 static void skl_tplg_alloc_pipe_mem(struct skl *skl, 142 struct skl_module_cfg *mconfig) 143 { 144 skl->resource.mem += mconfig->pipe->memory_pages; 145 } 146 147 /* 148 * Pipeline needs needs DSP CPU resources for computation, this is 149 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 150 * 151 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 152 * pipe. 153 */ 154 155 static bool skl_is_pipe_mcps_avail(struct skl *skl, 156 struct skl_module_cfg *mconfig) 157 { 158 struct skl_sst *ctx = skl->skl_sst; 159 u8 res_idx = mconfig->res_idx; 160 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 161 162 if (skl->resource.mcps + res->cps > skl->resource.max_mcps) { 163 dev_err(ctx->dev, 164 "%s: module_id %d instance %d\n", __func__, 165 mconfig->id.module_id, mconfig->id.instance_id); 166 dev_err(ctx->dev, 167 "exceeds ppl mcps available %d > mem %d\n", 168 skl->resource.max_mcps, skl->resource.mcps); 169 return false; 170 } else { 171 return true; 172 } 173 } 174 175 static void skl_tplg_alloc_pipe_mcps(struct skl *skl, 176 struct skl_module_cfg *mconfig) 177 { 178 u8 res_idx = mconfig->res_idx; 179 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 180 181 skl->resource.mcps += res->cps; 182 } 183 184 /* 185 * Free the mcps when tearing down 186 */ 187 static void 188 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig) 189 { 190 u8 res_idx = mconfig->res_idx; 191 struct skl_module_res *res = &mconfig->module->resources[res_idx]; 192 193 skl->resource.mcps -= res->cps; 194 } 195 196 /* 197 * Free the memory when tearing down 198 */ 199 static void 200 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig) 201 { 202 skl->resource.mem -= mconfig->pipe->memory_pages; 203 } 204 205 206 static void skl_dump_mconfig(struct skl_sst *ctx, 207 struct skl_module_cfg *mcfg) 208 { 209 struct skl_module_iface *iface = &mcfg->module->formats[0]; 210 211 dev_dbg(ctx->dev, "Dumping config\n"); 212 dev_dbg(ctx->dev, "Input Format:\n"); 213 dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 214 dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 215 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 216 dev_dbg(ctx->dev, "valid bit depth = %d\n", 217 iface->inputs[0].fmt.valid_bit_depth); 218 dev_dbg(ctx->dev, "Output Format:\n"); 219 dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 220 dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 221 dev_dbg(ctx->dev, "valid bit depth = %d\n", 222 iface->outputs[0].fmt.valid_bit_depth); 223 dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 224 } 225 226 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 227 { 228 int slot_map = 0xFFFFFFFF; 229 int start_slot = 0; 230 int i; 231 232 for (i = 0; i < chs; i++) { 233 /* 234 * For 2 channels with starting slot as 0, slot map will 235 * look like 0xFFFFFF10. 236 */ 237 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 238 start_slot++; 239 } 240 fmt->ch_map = slot_map; 241 } 242 243 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 244 struct skl_pipe_params *params, int fixup) 245 { 246 if (fixup & SKL_RATE_FIXUP_MASK) 247 fmt->s_freq = params->s_freq; 248 if (fixup & SKL_CH_FIXUP_MASK) { 249 fmt->channels = params->ch; 250 skl_tplg_update_chmap(fmt, fmt->channels); 251 } 252 if (fixup & SKL_FMT_FIXUP_MASK) { 253 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 254 255 /* 256 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 257 * container so update bit depth accordingly 258 */ 259 switch (fmt->valid_bit_depth) { 260 case SKL_DEPTH_16BIT: 261 fmt->bit_depth = fmt->valid_bit_depth; 262 break; 263 264 default: 265 fmt->bit_depth = SKL_DEPTH_32BIT; 266 break; 267 } 268 } 269 270 } 271 272 /* 273 * A pipeline may have modules which impact the pcm parameters, like SRC, 274 * channel converter, format converter. 275 * We need to calculate the output params by applying the 'fixup' 276 * Topology will tell driver which type of fixup is to be applied by 277 * supplying the fixup mask, so based on that we calculate the output 278 * 279 * Now In FE the pcm hw_params is source/target format. Same is applicable 280 * for BE with its hw_params invoked. 281 * here based on FE, BE pipeline and direction we calculate the input and 282 * outfix and then apply that for a module 283 */ 284 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 285 struct skl_pipe_params *params, bool is_fe) 286 { 287 int in_fixup, out_fixup; 288 struct skl_module_fmt *in_fmt, *out_fmt; 289 290 /* Fixups will be applied to pin 0 only */ 291 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 292 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 293 294 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 295 if (is_fe) { 296 in_fixup = m_cfg->params_fixup; 297 out_fixup = (~m_cfg->converter) & 298 m_cfg->params_fixup; 299 } else { 300 out_fixup = m_cfg->params_fixup; 301 in_fixup = (~m_cfg->converter) & 302 m_cfg->params_fixup; 303 } 304 } else { 305 if (is_fe) { 306 out_fixup = m_cfg->params_fixup; 307 in_fixup = (~m_cfg->converter) & 308 m_cfg->params_fixup; 309 } else { 310 in_fixup = m_cfg->params_fixup; 311 out_fixup = (~m_cfg->converter) & 312 m_cfg->params_fixup; 313 } 314 } 315 316 skl_tplg_update_params(in_fmt, params, in_fixup); 317 skl_tplg_update_params(out_fmt, params, out_fixup); 318 } 319 320 /* 321 * A module needs input and output buffers, which are dependent upon pcm 322 * params, so once we have calculate params, we need buffer calculation as 323 * well. 324 */ 325 static void skl_tplg_update_buffer_size(struct skl_sst *ctx, 326 struct skl_module_cfg *mcfg) 327 { 328 int multiplier = 1; 329 struct skl_module_fmt *in_fmt, *out_fmt; 330 struct skl_module_res *res; 331 332 /* Since fixups is applied to pin 0 only, ibs, obs needs 333 * change for pin 0 only 334 */ 335 res = &mcfg->module->resources[0]; 336 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 337 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 338 339 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 340 multiplier = 5; 341 342 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 343 in_fmt->channels * (in_fmt->bit_depth >> 3) * 344 multiplier; 345 346 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 347 out_fmt->channels * (out_fmt->bit_depth >> 3) * 348 multiplier; 349 } 350 351 static u8 skl_tplg_be_dev_type(int dev_type) 352 { 353 int ret; 354 355 switch (dev_type) { 356 case SKL_DEVICE_BT: 357 ret = NHLT_DEVICE_BT; 358 break; 359 360 case SKL_DEVICE_DMIC: 361 ret = NHLT_DEVICE_DMIC; 362 break; 363 364 case SKL_DEVICE_I2S: 365 ret = NHLT_DEVICE_I2S; 366 break; 367 368 default: 369 ret = NHLT_DEVICE_INVALID; 370 break; 371 } 372 373 return ret; 374 } 375 376 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 377 struct skl_sst *ctx) 378 { 379 struct skl_module_cfg *m_cfg = w->priv; 380 int link_type, dir; 381 u32 ch, s_freq, s_fmt; 382 struct nhlt_specific_cfg *cfg; 383 struct skl *skl = get_skl_ctx(ctx->dev); 384 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 385 int fmt_idx = m_cfg->fmt_idx; 386 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 387 388 /* check if we already have blob */ 389 if (m_cfg->formats_config.caps_size > 0) 390 return 0; 391 392 dev_dbg(ctx->dev, "Applying default cfg blob\n"); 393 switch (m_cfg->dev_type) { 394 case SKL_DEVICE_DMIC: 395 link_type = NHLT_LINK_DMIC; 396 dir = SNDRV_PCM_STREAM_CAPTURE; 397 s_freq = m_iface->inputs[0].fmt.s_freq; 398 s_fmt = m_iface->inputs[0].fmt.bit_depth; 399 ch = m_iface->inputs[0].fmt.channels; 400 break; 401 402 case SKL_DEVICE_I2S: 403 link_type = NHLT_LINK_SSP; 404 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 405 dir = SNDRV_PCM_STREAM_PLAYBACK; 406 s_freq = m_iface->outputs[0].fmt.s_freq; 407 s_fmt = m_iface->outputs[0].fmt.bit_depth; 408 ch = m_iface->outputs[0].fmt.channels; 409 } else { 410 dir = SNDRV_PCM_STREAM_CAPTURE; 411 s_freq = m_iface->inputs[0].fmt.s_freq; 412 s_fmt = m_iface->inputs[0].fmt.bit_depth; 413 ch = m_iface->inputs[0].fmt.channels; 414 } 415 break; 416 417 default: 418 return -EINVAL; 419 } 420 421 /* update the blob based on virtual bus_id and default params */ 422 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 423 s_fmt, ch, s_freq, dir, dev_type); 424 if (cfg) { 425 m_cfg->formats_config.caps_size = cfg->size; 426 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 427 } else { 428 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n", 429 m_cfg->vbus_id, link_type, dir); 430 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n", 431 ch, s_freq, s_fmt); 432 return -EIO; 433 } 434 435 return 0; 436 } 437 438 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 439 struct skl_sst *ctx) 440 { 441 struct skl_module_cfg *m_cfg = w->priv; 442 struct skl_pipe_params *params = m_cfg->pipe->p_params; 443 int p_conn_type = m_cfg->pipe->conn_type; 444 bool is_fe; 445 446 if (!m_cfg->params_fixup) 447 return; 448 449 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n", 450 w->name); 451 452 skl_dump_mconfig(ctx, m_cfg); 453 454 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 455 is_fe = true; 456 else 457 is_fe = false; 458 459 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 460 skl_tplg_update_buffer_size(ctx, m_cfg); 461 462 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n", 463 w->name); 464 465 skl_dump_mconfig(ctx, m_cfg); 466 } 467 468 /* 469 * some modules can have multiple params set from user control and 470 * need to be set after module is initialized. If set_param flag is 471 * set module params will be done after module is initialised. 472 */ 473 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 474 struct skl_sst *ctx) 475 { 476 int i, ret; 477 struct skl_module_cfg *mconfig = w->priv; 478 const struct snd_kcontrol_new *k; 479 struct soc_bytes_ext *sb; 480 struct skl_algo_data *bc; 481 struct skl_specific_cfg *sp_cfg; 482 483 if (mconfig->formats_config.caps_size > 0 && 484 mconfig->formats_config.set_params == SKL_PARAM_SET) { 485 sp_cfg = &mconfig->formats_config; 486 ret = skl_set_module_params(ctx, sp_cfg->caps, 487 sp_cfg->caps_size, 488 sp_cfg->param_id, mconfig); 489 if (ret < 0) 490 return ret; 491 } 492 493 for (i = 0; i < w->num_kcontrols; i++) { 494 k = &w->kcontrol_news[i]; 495 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 496 sb = (void *) k->private_value; 497 bc = (struct skl_algo_data *)sb->dobj.private; 498 499 if (bc->set_params == SKL_PARAM_SET) { 500 ret = skl_set_module_params(ctx, 501 (u32 *)bc->params, bc->size, 502 bc->param_id, mconfig); 503 if (ret < 0) 504 return ret; 505 } 506 } 507 } 508 509 return 0; 510 } 511 512 /* 513 * some module param can set from user control and this is required as 514 * when module is initailzed. if module param is required in init it is 515 * identifed by set_param flag. if set_param flag is not set, then this 516 * parameter needs to set as part of module init. 517 */ 518 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 519 { 520 const struct snd_kcontrol_new *k; 521 struct soc_bytes_ext *sb; 522 struct skl_algo_data *bc; 523 struct skl_module_cfg *mconfig = w->priv; 524 int i; 525 526 for (i = 0; i < w->num_kcontrols; i++) { 527 k = &w->kcontrol_news[i]; 528 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 529 sb = (struct soc_bytes_ext *)k->private_value; 530 bc = (struct skl_algo_data *)sb->dobj.private; 531 532 if (bc->set_params != SKL_PARAM_INIT) 533 continue; 534 535 mconfig->formats_config.caps = (u32 *)bc->params; 536 mconfig->formats_config.caps_size = bc->size; 537 538 break; 539 } 540 } 541 542 return 0; 543 } 544 545 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe, 546 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 547 { 548 switch (mcfg->dev_type) { 549 case SKL_DEVICE_HDAHOST: 550 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params); 551 552 case SKL_DEVICE_HDALINK: 553 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params); 554 } 555 556 return 0; 557 } 558 559 /* 560 * Inside a pipe instance, we can have various modules. These modules need 561 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 562 * skl_init_module() routine, so invoke that for all modules in a pipeline 563 */ 564 static int 565 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) 566 { 567 struct skl_pipe_module *w_module; 568 struct snd_soc_dapm_widget *w; 569 struct skl_module_cfg *mconfig; 570 struct skl_sst *ctx = skl->skl_sst; 571 u8 cfg_idx; 572 int ret = 0; 573 574 list_for_each_entry(w_module, &pipe->w_list, node) { 575 uuid_le *uuid_mod; 576 w = w_module->w; 577 mconfig = w->priv; 578 579 /* check if module ids are populated */ 580 if (mconfig->id.module_id < 0) { 581 dev_err(skl->skl_sst->dev, 582 "module %pUL id not populated\n", 583 (uuid_le *)mconfig->guid); 584 return -EIO; 585 } 586 587 cfg_idx = mconfig->pipe->cur_config_idx; 588 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 589 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 590 591 /* check resource available */ 592 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 593 return -ENOMEM; 594 595 if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) { 596 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 597 mconfig->id.module_id, mconfig->guid); 598 if (ret < 0) 599 return ret; 600 601 mconfig->m_state = SKL_MODULE_LOADED; 602 } 603 604 /* prepare the DMA if the module is gateway cpr */ 605 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig); 606 if (ret < 0) 607 return ret; 608 609 /* update blob if blob is null for be with default value */ 610 skl_tplg_update_be_blob(w, ctx); 611 612 /* 613 * apply fix/conversion to module params based on 614 * FE/BE params 615 */ 616 skl_tplg_update_module_params(w, ctx); 617 uuid_mod = (uuid_le *)mconfig->guid; 618 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod, 619 mconfig->id.instance_id); 620 if (mconfig->id.pvt_id < 0) 621 return ret; 622 skl_tplg_set_module_init_data(w); 623 624 ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id); 625 if (ret < 0) { 626 dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n", 627 mconfig->core_id, ret); 628 return ret; 629 } 630 631 ret = skl_init_module(ctx, mconfig); 632 if (ret < 0) { 633 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 634 goto err; 635 } 636 skl_tplg_alloc_pipe_mcps(skl, mconfig); 637 ret = skl_tplg_set_module_params(w, ctx); 638 if (ret < 0) 639 goto err; 640 } 641 642 return 0; 643 err: 644 skl_dsp_put_core(ctx->dsp, mconfig->core_id); 645 return ret; 646 } 647 648 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, 649 struct skl_pipe *pipe) 650 { 651 int ret = 0; 652 struct skl_pipe_module *w_module = NULL; 653 struct skl_module_cfg *mconfig = NULL; 654 655 list_for_each_entry(w_module, &pipe->w_list, node) { 656 uuid_le *uuid_mod; 657 mconfig = w_module->w->priv; 658 uuid_mod = (uuid_le *)mconfig->guid; 659 660 if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod && 661 mconfig->m_state > SKL_MODULE_UNINIT) { 662 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp, 663 mconfig->id.module_id); 664 if (ret < 0) 665 return -EIO; 666 } 667 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id); 668 669 ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id); 670 if (ret < 0) { 671 /* don't return; continue with other modules */ 672 dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n", 673 mconfig->core_id, ret); 674 } 675 } 676 677 /* no modules to unload in this path, so return */ 678 return ret; 679 } 680 681 /* 682 * Here, we select pipe format based on the pipe type and pipe 683 * direction to determine the current config index for the pipeline. 684 * The config index is then used to select proper module resources. 685 * Intermediate pipes currently have a fixed format hence we select the 686 * 0th configuratation by default for such pipes. 687 */ 688 static int 689 skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig) 690 { 691 struct skl_sst *ctx = skl->skl_sst; 692 struct skl_pipe *pipe = mconfig->pipe; 693 struct skl_pipe_params *params = pipe->p_params; 694 struct skl_path_config *pconfig = &pipe->configs[0]; 695 struct skl_pipe_fmt *fmt = NULL; 696 bool in_fmt = false; 697 int i; 698 699 if (pipe->nr_cfgs == 0) { 700 pipe->cur_config_idx = 0; 701 return 0; 702 } 703 704 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 705 dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n"); 706 pipe->cur_config_idx = 0; 707 pipe->memory_pages = pconfig->mem_pages; 708 709 return 0; 710 } 711 712 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 713 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 714 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 715 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 716 in_fmt = true; 717 718 for (i = 0; i < pipe->nr_cfgs; i++) { 719 pconfig = &pipe->configs[i]; 720 if (in_fmt) 721 fmt = &pconfig->in_fmt; 722 else 723 fmt = &pconfig->out_fmt; 724 725 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 726 fmt->channels, fmt->freq, fmt->bps)) { 727 pipe->cur_config_idx = i; 728 pipe->memory_pages = pconfig->mem_pages; 729 dev_dbg(ctx->dev, "Using pipe config: %d\n", i); 730 731 return 0; 732 } 733 } 734 735 dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 736 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 737 return -EINVAL; 738 } 739 740 /* 741 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 742 * need create the pipeline. So we do following: 743 * - check the resources 744 * - Create the pipeline 745 * - Initialize the modules in pipeline 746 * - finally bind all modules together 747 */ 748 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 749 struct skl *skl) 750 { 751 int ret; 752 struct skl_module_cfg *mconfig = w->priv; 753 struct skl_pipe_module *w_module; 754 struct skl_pipe *s_pipe = mconfig->pipe; 755 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 756 struct skl_sst *ctx = skl->skl_sst; 757 struct skl_module_deferred_bind *modules; 758 759 ret = skl_tplg_get_pipe_config(skl, mconfig); 760 if (ret < 0) 761 return ret; 762 763 /* check resource available */ 764 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 765 return -EBUSY; 766 767 if (!skl_is_pipe_mem_avail(skl, mconfig)) 768 return -ENOMEM; 769 770 /* 771 * Create a list of modules for pipe. 772 * This list contains modules from source to sink 773 */ 774 ret = skl_create_pipeline(ctx, mconfig->pipe); 775 if (ret < 0) 776 return ret; 777 778 skl_tplg_alloc_pipe_mem(skl, mconfig); 779 skl_tplg_alloc_pipe_mcps(skl, mconfig); 780 781 /* Init all pipe modules from source to sink */ 782 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 783 if (ret < 0) 784 return ret; 785 786 /* Bind modules from source to sink */ 787 list_for_each_entry(w_module, &s_pipe->w_list, node) { 788 dst_module = w_module->w->priv; 789 790 if (src_module == NULL) { 791 src_module = dst_module; 792 continue; 793 } 794 795 ret = skl_bind_modules(ctx, src_module, dst_module); 796 if (ret < 0) 797 return ret; 798 799 src_module = dst_module; 800 } 801 802 /* 803 * When the destination module is initialized, check for these modules 804 * in deferred bind list. If found, bind them. 805 */ 806 list_for_each_entry(w_module, &s_pipe->w_list, node) { 807 if (list_empty(&skl->bind_list)) 808 break; 809 810 list_for_each_entry(modules, &skl->bind_list, node) { 811 module = w_module->w->priv; 812 if (modules->dst == module) 813 skl_bind_modules(ctx, modules->src, 814 modules->dst); 815 } 816 } 817 818 return 0; 819 } 820 821 static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params, 822 int size, struct skl_module_cfg *mcfg) 823 { 824 int i, pvt_id; 825 826 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 827 struct skl_kpb_params *kpb_params = 828 (struct skl_kpb_params *)params; 829 struct skl_mod_inst_map *inst = kpb_params->u.map; 830 831 for (i = 0; i < kpb_params->num_modules; i++) { 832 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id, 833 inst->inst_id); 834 if (pvt_id < 0) 835 return -EINVAL; 836 837 inst->inst_id = pvt_id; 838 inst++; 839 } 840 } 841 842 return 0; 843 } 844 /* 845 * Some modules require params to be set after the module is bound to 846 * all pins connected. 847 * 848 * The module provider initializes set_param flag for such modules and we 849 * send params after binding 850 */ 851 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 852 struct skl_module_cfg *mcfg, struct skl_sst *ctx) 853 { 854 int i, ret; 855 struct skl_module_cfg *mconfig = w->priv; 856 const struct snd_kcontrol_new *k; 857 struct soc_bytes_ext *sb; 858 struct skl_algo_data *bc; 859 struct skl_specific_cfg *sp_cfg; 860 u32 *params; 861 862 /* 863 * check all out/in pins are in bind state. 864 * if so set the module param 865 */ 866 for (i = 0; i < mcfg->module->max_output_pins; i++) { 867 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 868 return 0; 869 } 870 871 for (i = 0; i < mcfg->module->max_input_pins; i++) { 872 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 873 return 0; 874 } 875 876 if (mconfig->formats_config.caps_size > 0 && 877 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 878 sp_cfg = &mconfig->formats_config; 879 ret = skl_set_module_params(ctx, sp_cfg->caps, 880 sp_cfg->caps_size, 881 sp_cfg->param_id, mconfig); 882 if (ret < 0) 883 return ret; 884 } 885 886 for (i = 0; i < w->num_kcontrols; i++) { 887 k = &w->kcontrol_news[i]; 888 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 889 sb = (void *) k->private_value; 890 bc = (struct skl_algo_data *)sb->dobj.private; 891 892 if (bc->set_params == SKL_PARAM_BIND) { 893 params = kmemdup(bc->params, bc->max, GFP_KERNEL); 894 if (!params) 895 return -ENOMEM; 896 897 skl_fill_sink_instance_id(ctx, params, bc->max, 898 mconfig); 899 900 ret = skl_set_module_params(ctx, params, 901 bc->max, bc->param_id, mconfig); 902 kfree(params); 903 904 if (ret < 0) 905 return ret; 906 } 907 } 908 } 909 910 return 0; 911 } 912 913 static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid) 914 { 915 struct uuid_module *module; 916 917 list_for_each_entry(module, &ctx->uuid_list, list) { 918 if (uuid_le_cmp(*uuid, module->uuid) == 0) 919 return module->id; 920 } 921 922 return -EINVAL; 923 } 924 925 static int skl_tplg_find_moduleid_from_uuid(struct skl *skl, 926 const struct snd_kcontrol_new *k) 927 { 928 struct soc_bytes_ext *sb = (void *) k->private_value; 929 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 930 struct skl_kpb_params *uuid_params, *params; 931 struct hdac_bus *bus = skl_to_bus(skl); 932 int i, size, module_id; 933 934 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 935 uuid_params = (struct skl_kpb_params *)bc->params; 936 size = uuid_params->num_modules * 937 sizeof(struct skl_mod_inst_map) + 938 sizeof(uuid_params->num_modules); 939 940 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 941 if (!params) 942 return -ENOMEM; 943 944 params->num_modules = uuid_params->num_modules; 945 946 for (i = 0; i < uuid_params->num_modules; i++) { 947 module_id = skl_get_module_id(skl->skl_sst, 948 &uuid_params->u.map_uuid[i].mod_uuid); 949 if (module_id < 0) { 950 devm_kfree(bus->dev, params); 951 return -EINVAL; 952 } 953 954 params->u.map[i].mod_id = module_id; 955 params->u.map[i].inst_id = 956 uuid_params->u.map_uuid[i].inst_id; 957 } 958 959 devm_kfree(bus->dev, bc->params); 960 bc->params = (char *)params; 961 bc->max = size; 962 } 963 964 return 0; 965 } 966 967 /* 968 * Retrieve the module id from UUID mentioned in the 969 * post bind params 970 */ 971 void skl_tplg_add_moduleid_in_bind_params(struct skl *skl, 972 struct snd_soc_dapm_widget *w) 973 { 974 struct skl_module_cfg *mconfig = w->priv; 975 int i; 976 977 /* 978 * Post bind params are used for only for KPB 979 * to set copier instances to drain the data 980 * in fast mode 981 */ 982 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 983 return; 984 985 for (i = 0; i < w->num_kcontrols; i++) 986 if ((w->kcontrol_news[i].access & 987 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 988 (skl_tplg_find_moduleid_from_uuid(skl, 989 &w->kcontrol_news[i]) < 0)) 990 dev_err(skl->skl_sst->dev, 991 "%s: invalid kpb post bind params\n", 992 __func__); 993 } 994 995 static int skl_tplg_module_add_deferred_bind(struct skl *skl, 996 struct skl_module_cfg *src, struct skl_module_cfg *dst) 997 { 998 struct skl_module_deferred_bind *m_list, *modules; 999 int i; 1000 1001 /* only supported for module with static pin connection */ 1002 for (i = 0; i < dst->module->max_input_pins; i++) { 1003 struct skl_module_pin *pin = &dst->m_in_pin[i]; 1004 1005 if (pin->is_dynamic) 1006 continue; 1007 1008 if ((pin->id.module_id == src->id.module_id) && 1009 (pin->id.instance_id == src->id.instance_id)) { 1010 1011 if (!list_empty(&skl->bind_list)) { 1012 list_for_each_entry(modules, &skl->bind_list, node) { 1013 if (modules->src == src && modules->dst == dst) 1014 return 0; 1015 } 1016 } 1017 1018 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 1019 if (!m_list) 1020 return -ENOMEM; 1021 1022 m_list->src = src; 1023 m_list->dst = dst; 1024 1025 list_add(&m_list->node, &skl->bind_list); 1026 } 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 1033 struct skl *skl, 1034 struct snd_soc_dapm_widget *src_w, 1035 struct skl_module_cfg *src_mconfig) 1036 { 1037 struct snd_soc_dapm_path *p; 1038 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 1039 struct skl_module_cfg *sink_mconfig; 1040 struct skl_sst *ctx = skl->skl_sst; 1041 int ret; 1042 1043 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1044 if (!p->connect) 1045 continue; 1046 1047 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name); 1048 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 1049 1050 next_sink = p->sink; 1051 1052 if (!is_skl_dsp_widget_type(p->sink, ctx->dev)) 1053 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 1054 1055 /* 1056 * here we will check widgets in sink pipelines, so that 1057 * can be any widgets type and we are only interested if 1058 * they are ones used for SKL so check that first 1059 */ 1060 if ((p->sink->priv != NULL) && 1061 is_skl_dsp_widget_type(p->sink, ctx->dev)) { 1062 1063 sink = p->sink; 1064 sink_mconfig = sink->priv; 1065 1066 /* 1067 * Modules other than PGA leaf can be connected 1068 * directly or via switch to a module in another 1069 * pipeline. EX: reference path 1070 * when the path is enabled, the dst module that needs 1071 * to be bound may not be initialized. if the module is 1072 * not initialized, add these modules in the deferred 1073 * bind list and when the dst module is initialised, 1074 * bind this module to the dst_module in deferred list. 1075 */ 1076 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 1077 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 1078 1079 ret = skl_tplg_module_add_deferred_bind(skl, 1080 src_mconfig, sink_mconfig); 1081 1082 if (ret < 0) 1083 return ret; 1084 1085 } 1086 1087 1088 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 1089 sink_mconfig->m_state == SKL_MODULE_UNINIT) 1090 continue; 1091 1092 /* Bind source to sink, mixin is always source */ 1093 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1094 if (ret) 1095 return ret; 1096 1097 /* set module params after bind */ 1098 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx); 1099 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1100 1101 /* Start sinks pipe first */ 1102 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 1103 if (sink_mconfig->pipe->conn_type != 1104 SKL_PIPE_CONN_TYPE_FE) 1105 ret = skl_run_pipe(ctx, 1106 sink_mconfig->pipe); 1107 if (ret) 1108 return ret; 1109 } 1110 } 1111 } 1112 1113 if (!sink && next_sink) 1114 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1115 1116 return 0; 1117 } 1118 1119 /* 1120 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1121 * we need to do following: 1122 * - Bind to sink pipeline 1123 * Since the sink pipes can be running and we don't get mixer event on 1124 * connect for already running mixer, we need to find the sink pipes 1125 * here and bind to them. This way dynamic connect works. 1126 * - Start sink pipeline, if not running 1127 * - Then run current pipe 1128 */ 1129 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1130 struct skl *skl) 1131 { 1132 struct skl_module_cfg *src_mconfig; 1133 struct skl_sst *ctx = skl->skl_sst; 1134 int ret = 0; 1135 1136 src_mconfig = w->priv; 1137 1138 /* 1139 * find which sink it is connected to, bind with the sink, 1140 * if sink is not started, start sink pipe first, then start 1141 * this pipe 1142 */ 1143 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1144 if (ret) 1145 return ret; 1146 1147 /* Start source pipe last after starting all sinks */ 1148 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1149 return skl_run_pipe(ctx, src_mconfig->pipe); 1150 1151 return 0; 1152 } 1153 1154 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1155 struct snd_soc_dapm_widget *w, struct skl *skl) 1156 { 1157 struct snd_soc_dapm_path *p; 1158 struct snd_soc_dapm_widget *src_w = NULL; 1159 struct skl_sst *ctx = skl->skl_sst; 1160 1161 snd_soc_dapm_widget_for_each_source_path(w, p) { 1162 src_w = p->source; 1163 if (!p->connect) 1164 continue; 1165 1166 dev_dbg(ctx->dev, "sink widget=%s\n", w->name); 1167 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name); 1168 1169 /* 1170 * here we will check widgets in sink pipelines, so that can 1171 * be any widgets type and we are only interested if they are 1172 * ones used for SKL so check that first 1173 */ 1174 if ((p->source->priv != NULL) && 1175 is_skl_dsp_widget_type(p->source, ctx->dev)) { 1176 return p->source; 1177 } 1178 } 1179 1180 if (src_w != NULL) 1181 return skl_get_src_dsp_widget(src_w, skl); 1182 1183 return NULL; 1184 } 1185 1186 /* 1187 * in the Post-PMU event of mixer we need to do following: 1188 * - Check if this pipe is running 1189 * - if not, then 1190 * - bind this pipeline to its source pipeline 1191 * if source pipe is already running, this means it is a dynamic 1192 * connection and we need to bind only to that pipe 1193 * - start this pipeline 1194 */ 1195 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1196 struct skl *skl) 1197 { 1198 int ret = 0; 1199 struct snd_soc_dapm_widget *source, *sink; 1200 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1201 struct skl_sst *ctx = skl->skl_sst; 1202 int src_pipe_started = 0; 1203 1204 sink = w; 1205 sink_mconfig = sink->priv; 1206 1207 /* 1208 * If source pipe is already started, that means source is driving 1209 * one more sink before this sink got connected, Since source is 1210 * started, bind this sink to source and start this pipe. 1211 */ 1212 source = skl_get_src_dsp_widget(w, skl); 1213 if (source != NULL) { 1214 src_mconfig = source->priv; 1215 sink_mconfig = sink->priv; 1216 src_pipe_started = 1; 1217 1218 /* 1219 * check pipe state, then no need to bind or start the 1220 * pipe 1221 */ 1222 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1223 src_pipe_started = 0; 1224 } 1225 1226 if (src_pipe_started) { 1227 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig); 1228 if (ret) 1229 return ret; 1230 1231 /* set module params after bind */ 1232 skl_tplg_set_module_bind_params(source, src_mconfig, ctx); 1233 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx); 1234 1235 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1236 ret = skl_run_pipe(ctx, sink_mconfig->pipe); 1237 } 1238 1239 return ret; 1240 } 1241 1242 /* 1243 * in the Pre-PMD event of mixer we need to do following: 1244 * - Stop the pipe 1245 * - find the source connections and remove that from dapm_path_list 1246 * - unbind with source pipelines if still connected 1247 */ 1248 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1249 struct skl *skl) 1250 { 1251 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1252 int ret = 0, i; 1253 struct skl_sst *ctx = skl->skl_sst; 1254 1255 sink_mconfig = w->priv; 1256 1257 /* Stop the pipe */ 1258 ret = skl_stop_pipe(ctx, sink_mconfig->pipe); 1259 if (ret) 1260 return ret; 1261 1262 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1263 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1264 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1265 if (!src_mconfig) 1266 continue; 1267 1268 ret = skl_unbind_modules(ctx, 1269 src_mconfig, sink_mconfig); 1270 } 1271 } 1272 1273 return ret; 1274 } 1275 1276 /* 1277 * in the Post-PMD event of mixer we need to do following: 1278 * - Free the mcps used 1279 * - Free the mem used 1280 * - Unbind the modules within the pipeline 1281 * - Delete the pipeline (modules are not required to be explicitly 1282 * deleted, pipeline delete is enough here 1283 */ 1284 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1285 struct skl *skl) 1286 { 1287 struct skl_module_cfg *mconfig = w->priv; 1288 struct skl_pipe_module *w_module; 1289 struct skl_module_cfg *src_module = NULL, *dst_module; 1290 struct skl_sst *ctx = skl->skl_sst; 1291 struct skl_pipe *s_pipe = mconfig->pipe; 1292 struct skl_module_deferred_bind *modules, *tmp; 1293 1294 if (s_pipe->state == SKL_PIPE_INVALID) 1295 return -EINVAL; 1296 1297 skl_tplg_free_pipe_mcps(skl, mconfig); 1298 skl_tplg_free_pipe_mem(skl, mconfig); 1299 1300 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1301 if (list_empty(&skl->bind_list)) 1302 break; 1303 1304 src_module = w_module->w->priv; 1305 1306 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1307 /* 1308 * When the destination module is deleted, Unbind the 1309 * modules from deferred bind list. 1310 */ 1311 if (modules->dst == src_module) { 1312 skl_unbind_modules(ctx, modules->src, 1313 modules->dst); 1314 } 1315 1316 /* 1317 * When the source module is deleted, remove this entry 1318 * from the deferred bind list. 1319 */ 1320 if (modules->src == src_module) { 1321 list_del(&modules->node); 1322 modules->src = NULL; 1323 modules->dst = NULL; 1324 kfree(modules); 1325 } 1326 } 1327 } 1328 1329 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1330 dst_module = w_module->w->priv; 1331 1332 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1333 skl_tplg_free_pipe_mcps(skl, dst_module); 1334 if (src_module == NULL) { 1335 src_module = dst_module; 1336 continue; 1337 } 1338 1339 skl_unbind_modules(ctx, src_module, dst_module); 1340 src_module = dst_module; 1341 } 1342 1343 skl_delete_pipe(ctx, mconfig->pipe); 1344 1345 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1346 src_module = w_module->w->priv; 1347 src_module->m_state = SKL_MODULE_UNINIT; 1348 } 1349 1350 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1351 } 1352 1353 /* 1354 * in the Post-PMD event of PGA we need to do following: 1355 * - Free the mcps used 1356 * - Stop the pipeline 1357 * - In source pipe is connected, unbind with source pipelines 1358 */ 1359 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1360 struct skl *skl) 1361 { 1362 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1363 int ret = 0, i; 1364 struct skl_sst *ctx = skl->skl_sst; 1365 1366 src_mconfig = w->priv; 1367 1368 /* Stop the pipe since this is a mixin module */ 1369 ret = skl_stop_pipe(ctx, src_mconfig->pipe); 1370 if (ret) 1371 return ret; 1372 1373 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1374 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1375 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1376 if (!sink_mconfig) 1377 continue; 1378 /* 1379 * This is a connecter and if path is found that means 1380 * unbind between source and sink has not happened yet 1381 */ 1382 ret = skl_unbind_modules(ctx, src_mconfig, 1383 sink_mconfig); 1384 } 1385 } 1386 1387 return ret; 1388 } 1389 1390 /* 1391 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1392 * second one is required that is created as another pipe entity. 1393 * The mixer is responsible for pipe management and represent a pipeline 1394 * instance 1395 */ 1396 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1397 struct snd_kcontrol *k, int event) 1398 { 1399 struct snd_soc_dapm_context *dapm = w->dapm; 1400 struct skl *skl = get_skl_ctx(dapm->dev); 1401 1402 switch (event) { 1403 case SND_SOC_DAPM_PRE_PMU: 1404 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1405 1406 case SND_SOC_DAPM_POST_PMU: 1407 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1408 1409 case SND_SOC_DAPM_PRE_PMD: 1410 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1411 1412 case SND_SOC_DAPM_POST_PMD: 1413 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1414 } 1415 1416 return 0; 1417 } 1418 1419 /* 1420 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1421 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1422 * the sink when it is running (two FE to one BE or one FE to two BE) 1423 * scenarios 1424 */ 1425 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1426 struct snd_kcontrol *k, int event) 1427 1428 { 1429 struct snd_soc_dapm_context *dapm = w->dapm; 1430 struct skl *skl = get_skl_ctx(dapm->dev); 1431 1432 switch (event) { 1433 case SND_SOC_DAPM_PRE_PMU: 1434 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1435 1436 case SND_SOC_DAPM_POST_PMD: 1437 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1438 } 1439 1440 return 0; 1441 } 1442 1443 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1444 unsigned int __user *data, unsigned int size) 1445 { 1446 struct soc_bytes_ext *sb = 1447 (struct soc_bytes_ext *)kcontrol->private_value; 1448 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1449 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1450 struct skl_module_cfg *mconfig = w->priv; 1451 struct skl *skl = get_skl_ctx(w->dapm->dev); 1452 1453 if (w->power) 1454 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 1455 bc->size, bc->param_id, mconfig); 1456 1457 /* decrement size for TLV header */ 1458 size -= 2 * sizeof(u32); 1459 1460 /* check size as we don't want to send kernel data */ 1461 if (size > bc->max) 1462 size = bc->max; 1463 1464 if (bc->params) { 1465 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1466 return -EFAULT; 1467 if (copy_to_user(data + 1, &size, sizeof(u32))) 1468 return -EFAULT; 1469 if (copy_to_user(data + 2, bc->params, size)) 1470 return -EFAULT; 1471 } 1472 1473 return 0; 1474 } 1475 1476 #define SKL_PARAM_VENDOR_ID 0xff 1477 1478 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1479 const unsigned int __user *data, unsigned int size) 1480 { 1481 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1482 struct skl_module_cfg *mconfig = w->priv; 1483 struct soc_bytes_ext *sb = 1484 (struct soc_bytes_ext *)kcontrol->private_value; 1485 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1486 struct skl *skl = get_skl_ctx(w->dapm->dev); 1487 1488 if (ac->params) { 1489 if (size > ac->max) 1490 return -EINVAL; 1491 1492 ac->size = size; 1493 /* 1494 * if the param_is is of type Vendor, firmware expects actual 1495 * parameter id and size from the control. 1496 */ 1497 if (ac->param_id == SKL_PARAM_VENDOR_ID) { 1498 if (copy_from_user(ac->params, data, size)) 1499 return -EFAULT; 1500 } else { 1501 if (copy_from_user(ac->params, 1502 data + 2, size)) 1503 return -EFAULT; 1504 } 1505 1506 if (w->power) 1507 return skl_set_module_params(skl->skl_sst, 1508 (u32 *)ac->params, ac->size, 1509 ac->param_id, mconfig); 1510 } 1511 1512 return 0; 1513 } 1514 1515 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1516 struct snd_ctl_elem_value *ucontrol) 1517 { 1518 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1519 struct skl_module_cfg *mconfig = w->priv; 1520 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1521 u32 ch_type = *((u32 *)ec->dobj.private); 1522 1523 if (mconfig->dmic_ch_type == ch_type) 1524 ucontrol->value.enumerated.item[0] = 1525 mconfig->dmic_ch_combo_index; 1526 else 1527 ucontrol->value.enumerated.item[0] = 0; 1528 1529 return 0; 1530 } 1531 1532 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1533 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1534 { 1535 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1536 1537 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1538 sp_cfg->set_params = SKL_PARAM_SET; 1539 sp_cfg->param_id = 0x00; 1540 if (!sp_cfg->caps) { 1541 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1542 if (!sp_cfg->caps) 1543 return -ENOMEM; 1544 } 1545 1546 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1547 mic_cfg->flags = 0; 1548 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1549 1550 return 0; 1551 } 1552 1553 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1554 struct snd_ctl_elem_value *ucontrol) 1555 { 1556 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1557 struct skl_module_cfg *mconfig = w->priv; 1558 struct skl_mic_sel_config mic_cfg = {0}; 1559 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1560 u32 ch_type = *((u32 *)ec->dobj.private); 1561 const int *list; 1562 u8 in_ch, out_ch, index; 1563 1564 mconfig->dmic_ch_type = ch_type; 1565 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1566 1567 /* enum control index 0 is INVALID, so no channels to be set */ 1568 if (mconfig->dmic_ch_combo_index == 0) 1569 return 0; 1570 1571 /* No valid channel selection map for index 0, so offset by 1 */ 1572 index = mconfig->dmic_ch_combo_index - 1; 1573 1574 switch (ch_type) { 1575 case SKL_CH_MONO: 1576 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1577 return -EINVAL; 1578 1579 list = &mic_mono_list[index]; 1580 break; 1581 1582 case SKL_CH_STEREO: 1583 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1584 return -EINVAL; 1585 1586 list = mic_stereo_list[index]; 1587 break; 1588 1589 case SKL_CH_TRIO: 1590 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1591 return -EINVAL; 1592 1593 list = mic_trio_list[index]; 1594 break; 1595 1596 case SKL_CH_QUATRO: 1597 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1598 return -EINVAL; 1599 1600 list = mic_quatro_list[index]; 1601 break; 1602 1603 default: 1604 dev_err(w->dapm->dev, 1605 "Invalid channel %d for mic_select module\n", 1606 ch_type); 1607 return -EINVAL; 1608 1609 } 1610 1611 /* channel type enum map to number of chanels for that type */ 1612 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1613 in_ch = list[out_ch]; 1614 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1615 } 1616 1617 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1618 } 1619 1620 /* 1621 * Fill the dma id for host and link. In case of passthrough 1622 * pipeline, this will both host and link in the same 1623 * pipeline, so need to copy the link and host based on dev_type 1624 */ 1625 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1626 struct skl_pipe_params *params) 1627 { 1628 struct skl_pipe *pipe = mcfg->pipe; 1629 1630 if (pipe->passthru) { 1631 switch (mcfg->dev_type) { 1632 case SKL_DEVICE_HDALINK: 1633 pipe->p_params->link_dma_id = params->link_dma_id; 1634 pipe->p_params->link_index = params->link_index; 1635 pipe->p_params->link_bps = params->link_bps; 1636 break; 1637 1638 case SKL_DEVICE_HDAHOST: 1639 pipe->p_params->host_dma_id = params->host_dma_id; 1640 pipe->p_params->host_bps = params->host_bps; 1641 break; 1642 1643 default: 1644 break; 1645 } 1646 pipe->p_params->s_fmt = params->s_fmt; 1647 pipe->p_params->ch = params->ch; 1648 pipe->p_params->s_freq = params->s_freq; 1649 pipe->p_params->stream = params->stream; 1650 pipe->p_params->format = params->format; 1651 1652 } else { 1653 memcpy(pipe->p_params, params, sizeof(*params)); 1654 } 1655 } 1656 1657 /* 1658 * The FE params are passed by hw_params of the DAI. 1659 * On hw_params, the params are stored in Gateway module of the FE and we 1660 * need to calculate the format in DSP module configuration, that 1661 * conversion is done here 1662 */ 1663 int skl_tplg_update_pipe_params(struct device *dev, 1664 struct skl_module_cfg *mconfig, 1665 struct skl_pipe_params *params) 1666 { 1667 struct skl_module_res *res = &mconfig->module->resources[0]; 1668 struct skl *skl = get_skl_ctx(dev); 1669 struct skl_module_fmt *format = NULL; 1670 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1671 1672 skl_tplg_fill_dma_id(mconfig, params); 1673 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1674 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1675 1676 if (skl->nr_modules) 1677 return 0; 1678 1679 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1680 format = &mconfig->module->formats[0].inputs[0].fmt; 1681 else 1682 format = &mconfig->module->formats[0].outputs[0].fmt; 1683 1684 /* set the hw_params */ 1685 format->s_freq = params->s_freq; 1686 format->channels = params->ch; 1687 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1688 1689 /* 1690 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1691 * container so update bit depth accordingly 1692 */ 1693 switch (format->valid_bit_depth) { 1694 case SKL_DEPTH_16BIT: 1695 format->bit_depth = format->valid_bit_depth; 1696 break; 1697 1698 case SKL_DEPTH_24BIT: 1699 case SKL_DEPTH_32BIT: 1700 format->bit_depth = SKL_DEPTH_32BIT; 1701 break; 1702 1703 default: 1704 dev_err(dev, "Invalid bit depth %x for pipe\n", 1705 format->valid_bit_depth); 1706 return -EINVAL; 1707 } 1708 1709 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1710 res->ibs = (format->s_freq / 1000) * 1711 (format->channels) * 1712 (format->bit_depth >> 3); 1713 } else { 1714 res->obs = (format->s_freq / 1000) * 1715 (format->channels) * 1716 (format->bit_depth >> 3); 1717 } 1718 1719 return 0; 1720 } 1721 1722 /* 1723 * Query the module config for the FE DAI 1724 * This is used to find the hw_params set for that DAI and apply to FE 1725 * pipeline 1726 */ 1727 struct skl_module_cfg * 1728 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1729 { 1730 struct snd_soc_dapm_widget *w; 1731 struct snd_soc_dapm_path *p = NULL; 1732 1733 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1734 w = dai->playback_widget; 1735 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1736 if (p->connect && p->sink->power && 1737 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1738 continue; 1739 1740 if (p->sink->priv) { 1741 dev_dbg(dai->dev, "set params for %s\n", 1742 p->sink->name); 1743 return p->sink->priv; 1744 } 1745 } 1746 } else { 1747 w = dai->capture_widget; 1748 snd_soc_dapm_widget_for_each_source_path(w, p) { 1749 if (p->connect && p->source->power && 1750 !is_skl_dsp_widget_type(p->source, dai->dev)) 1751 continue; 1752 1753 if (p->source->priv) { 1754 dev_dbg(dai->dev, "set params for %s\n", 1755 p->source->name); 1756 return p->source->priv; 1757 } 1758 } 1759 } 1760 1761 return NULL; 1762 } 1763 1764 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1765 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1766 { 1767 struct snd_soc_dapm_path *p; 1768 struct skl_module_cfg *mconfig = NULL; 1769 1770 snd_soc_dapm_widget_for_each_source_path(w, p) { 1771 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1772 if (p->connect && 1773 (p->sink->id == snd_soc_dapm_aif_out) && 1774 p->source->priv) { 1775 mconfig = p->source->priv; 1776 return mconfig; 1777 } 1778 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1779 if (mconfig) 1780 return mconfig; 1781 } 1782 } 1783 return mconfig; 1784 } 1785 1786 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1787 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1788 { 1789 struct snd_soc_dapm_path *p; 1790 struct skl_module_cfg *mconfig = NULL; 1791 1792 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1793 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1794 if (p->connect && 1795 (p->source->id == snd_soc_dapm_aif_in) && 1796 p->sink->priv) { 1797 mconfig = p->sink->priv; 1798 return mconfig; 1799 } 1800 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1801 if (mconfig) 1802 return mconfig; 1803 } 1804 } 1805 return mconfig; 1806 } 1807 1808 struct skl_module_cfg * 1809 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1810 { 1811 struct snd_soc_dapm_widget *w; 1812 struct skl_module_cfg *mconfig; 1813 1814 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1815 w = dai->playback_widget; 1816 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1817 } else { 1818 w = dai->capture_widget; 1819 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1820 } 1821 return mconfig; 1822 } 1823 1824 static u8 skl_tplg_be_link_type(int dev_type) 1825 { 1826 int ret; 1827 1828 switch (dev_type) { 1829 case SKL_DEVICE_BT: 1830 ret = NHLT_LINK_SSP; 1831 break; 1832 1833 case SKL_DEVICE_DMIC: 1834 ret = NHLT_LINK_DMIC; 1835 break; 1836 1837 case SKL_DEVICE_I2S: 1838 ret = NHLT_LINK_SSP; 1839 break; 1840 1841 case SKL_DEVICE_HDALINK: 1842 ret = NHLT_LINK_HDA; 1843 break; 1844 1845 default: 1846 ret = NHLT_LINK_INVALID; 1847 break; 1848 } 1849 1850 return ret; 1851 } 1852 1853 /* 1854 * Fill the BE gateway parameters 1855 * The BE gateway expects a blob of parameters which are kept in the ACPI 1856 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1857 * The port can have multiple settings so pick based on the PCM 1858 * parameters 1859 */ 1860 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1861 struct skl_module_cfg *mconfig, 1862 struct skl_pipe_params *params) 1863 { 1864 struct nhlt_specific_cfg *cfg; 1865 struct skl *skl = get_skl_ctx(dai->dev); 1866 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1867 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1868 1869 skl_tplg_fill_dma_id(mconfig, params); 1870 1871 if (link_type == NHLT_LINK_HDA) 1872 return 0; 1873 1874 /* update the blob based on virtual bus_id*/ 1875 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1876 params->s_fmt, params->ch, 1877 params->s_freq, params->stream, 1878 dev_type); 1879 if (cfg) { 1880 mconfig->formats_config.caps_size = cfg->size; 1881 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1882 } else { 1883 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1884 mconfig->vbus_id, link_type, 1885 params->stream); 1886 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1887 params->ch, params->s_freq, params->s_fmt); 1888 return -EINVAL; 1889 } 1890 1891 return 0; 1892 } 1893 1894 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1895 struct snd_soc_dapm_widget *w, 1896 struct skl_pipe_params *params) 1897 { 1898 struct snd_soc_dapm_path *p; 1899 int ret = -EIO; 1900 1901 snd_soc_dapm_widget_for_each_source_path(w, p) { 1902 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1903 p->source->priv) { 1904 1905 ret = skl_tplg_be_fill_pipe_params(dai, 1906 p->source->priv, params); 1907 if (ret < 0) 1908 return ret; 1909 } else { 1910 ret = skl_tplg_be_set_src_pipe_params(dai, 1911 p->source, params); 1912 if (ret < 0) 1913 return ret; 1914 } 1915 } 1916 1917 return ret; 1918 } 1919 1920 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1921 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1922 { 1923 struct snd_soc_dapm_path *p = NULL; 1924 int ret = -EIO; 1925 1926 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1927 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1928 p->sink->priv) { 1929 1930 ret = skl_tplg_be_fill_pipe_params(dai, 1931 p->sink->priv, params); 1932 if (ret < 0) 1933 return ret; 1934 } else { 1935 ret = skl_tplg_be_set_sink_pipe_params( 1936 dai, p->sink, params); 1937 if (ret < 0) 1938 return ret; 1939 } 1940 } 1941 1942 return ret; 1943 } 1944 1945 /* 1946 * BE hw_params can be a source parameters (capture) or sink parameters 1947 * (playback). Based on sink and source we need to either find the source 1948 * list or the sink list and set the pipeline parameters 1949 */ 1950 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1951 struct skl_pipe_params *params) 1952 { 1953 struct snd_soc_dapm_widget *w; 1954 1955 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1956 w = dai->playback_widget; 1957 1958 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1959 1960 } else { 1961 w = dai->capture_widget; 1962 1963 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1964 } 1965 1966 return 0; 1967 } 1968 1969 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1970 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1971 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1972 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1973 }; 1974 1975 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1976 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1977 skl_tplg_tlv_control_set}, 1978 }; 1979 1980 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1981 { 1982 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1983 .get = skl_tplg_mic_control_get, 1984 .put = skl_tplg_mic_control_set, 1985 }, 1986 }; 1987 1988 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1989 struct skl_pipe *pipe, u32 tkn, 1990 u32 tkn_val, int conf_idx, int dir) 1991 { 1992 struct skl_pipe_fmt *fmt; 1993 struct skl_path_config *config; 1994 1995 switch (dir) { 1996 case SKL_DIR_IN: 1997 fmt = &pipe->configs[conf_idx].in_fmt; 1998 break; 1999 2000 case SKL_DIR_OUT: 2001 fmt = &pipe->configs[conf_idx].out_fmt; 2002 break; 2003 2004 default: 2005 dev_err(dev, "Invalid direction: %d\n", dir); 2006 return -EINVAL; 2007 } 2008 2009 config = &pipe->configs[conf_idx]; 2010 2011 switch (tkn) { 2012 case SKL_TKN_U32_CFG_FREQ: 2013 fmt->freq = tkn_val; 2014 break; 2015 2016 case SKL_TKN_U8_CFG_CHAN: 2017 fmt->channels = tkn_val; 2018 break; 2019 2020 case SKL_TKN_U8_CFG_BPS: 2021 fmt->bps = tkn_val; 2022 break; 2023 2024 case SKL_TKN_U32_PATH_MEM_PGS: 2025 config->mem_pages = tkn_val; 2026 break; 2027 2028 default: 2029 dev_err(dev, "Invalid token config: %d\n", tkn); 2030 return -EINVAL; 2031 } 2032 2033 return 0; 2034 } 2035 2036 static int skl_tplg_fill_pipe_tkn(struct device *dev, 2037 struct skl_pipe *pipe, u32 tkn, 2038 u32 tkn_val) 2039 { 2040 2041 switch (tkn) { 2042 case SKL_TKN_U32_PIPE_CONN_TYPE: 2043 pipe->conn_type = tkn_val; 2044 break; 2045 2046 case SKL_TKN_U32_PIPE_PRIORITY: 2047 pipe->pipe_priority = tkn_val; 2048 break; 2049 2050 case SKL_TKN_U32_PIPE_MEM_PGS: 2051 pipe->memory_pages = tkn_val; 2052 break; 2053 2054 case SKL_TKN_U32_PMODE: 2055 pipe->lp_mode = tkn_val; 2056 break; 2057 2058 case SKL_TKN_U32_PIPE_DIRECTION: 2059 pipe->direction = tkn_val; 2060 break; 2061 2062 case SKL_TKN_U32_NUM_CONFIGS: 2063 pipe->nr_cfgs = tkn_val; 2064 break; 2065 2066 default: 2067 dev_err(dev, "Token not handled %d\n", tkn); 2068 return -EINVAL; 2069 } 2070 2071 return 0; 2072 } 2073 2074 /* 2075 * Add pipeline by parsing the relevant tokens 2076 * Return an existing pipe if the pipe already exists. 2077 */ 2078 static int skl_tplg_add_pipe(struct device *dev, 2079 struct skl_module_cfg *mconfig, struct skl *skl, 2080 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2081 { 2082 struct skl_pipeline *ppl; 2083 struct skl_pipe *pipe; 2084 struct skl_pipe_params *params; 2085 2086 list_for_each_entry(ppl, &skl->ppl_list, node) { 2087 if (ppl->pipe->ppl_id == tkn_elem->value) { 2088 mconfig->pipe = ppl->pipe; 2089 return -EEXIST; 2090 } 2091 } 2092 2093 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2094 if (!ppl) 2095 return -ENOMEM; 2096 2097 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2098 if (!pipe) 2099 return -ENOMEM; 2100 2101 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2102 if (!params) 2103 return -ENOMEM; 2104 2105 pipe->p_params = params; 2106 pipe->ppl_id = tkn_elem->value; 2107 INIT_LIST_HEAD(&pipe->w_list); 2108 2109 ppl->pipe = pipe; 2110 list_add(&ppl->node, &skl->ppl_list); 2111 2112 mconfig->pipe = pipe; 2113 mconfig->pipe->state = SKL_PIPE_INVALID; 2114 2115 return 0; 2116 } 2117 2118 static int skl_tplg_get_uuid(struct device *dev, u8 *guid, 2119 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2120 { 2121 if (uuid_tkn->token == SKL_TKN_UUID) { 2122 memcpy(guid, &uuid_tkn->uuid, 16); 2123 return 0; 2124 } 2125 2126 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2127 2128 return -EINVAL; 2129 } 2130 2131 static int skl_tplg_fill_pin(struct device *dev, 2132 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2133 struct skl_module_pin *m_pin, 2134 int pin_index) 2135 { 2136 int ret; 2137 2138 switch (tkn_elem->token) { 2139 case SKL_TKN_U32_PIN_MOD_ID: 2140 m_pin[pin_index].id.module_id = tkn_elem->value; 2141 break; 2142 2143 case SKL_TKN_U32_PIN_INST_ID: 2144 m_pin[pin_index].id.instance_id = tkn_elem->value; 2145 break; 2146 2147 case SKL_TKN_UUID: 2148 ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b, 2149 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2150 if (ret < 0) 2151 return ret; 2152 2153 break; 2154 2155 default: 2156 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2157 return -EINVAL; 2158 } 2159 2160 return 0; 2161 } 2162 2163 /* 2164 * Parse for pin config specific tokens to fill up the 2165 * module private data 2166 */ 2167 static int skl_tplg_fill_pins_info(struct device *dev, 2168 struct skl_module_cfg *mconfig, 2169 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2170 int dir, int pin_count) 2171 { 2172 int ret; 2173 struct skl_module_pin *m_pin; 2174 2175 switch (dir) { 2176 case SKL_DIR_IN: 2177 m_pin = mconfig->m_in_pin; 2178 break; 2179 2180 case SKL_DIR_OUT: 2181 m_pin = mconfig->m_out_pin; 2182 break; 2183 2184 default: 2185 dev_err(dev, "Invalid direction value\n"); 2186 return -EINVAL; 2187 } 2188 2189 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2190 if (ret < 0) 2191 return ret; 2192 2193 m_pin[pin_count].in_use = false; 2194 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2195 2196 return 0; 2197 } 2198 2199 /* 2200 * Fill up input/output module config format based 2201 * on the direction 2202 */ 2203 static int skl_tplg_fill_fmt(struct device *dev, 2204 struct skl_module_fmt *dst_fmt, 2205 u32 tkn, u32 value) 2206 { 2207 switch (tkn) { 2208 case SKL_TKN_U32_FMT_CH: 2209 dst_fmt->channels = value; 2210 break; 2211 2212 case SKL_TKN_U32_FMT_FREQ: 2213 dst_fmt->s_freq = value; 2214 break; 2215 2216 case SKL_TKN_U32_FMT_BIT_DEPTH: 2217 dst_fmt->bit_depth = value; 2218 break; 2219 2220 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2221 dst_fmt->valid_bit_depth = value; 2222 break; 2223 2224 case SKL_TKN_U32_FMT_CH_CONFIG: 2225 dst_fmt->ch_cfg = value; 2226 break; 2227 2228 case SKL_TKN_U32_FMT_INTERLEAVE: 2229 dst_fmt->interleaving_style = value; 2230 break; 2231 2232 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2233 dst_fmt->sample_type = value; 2234 break; 2235 2236 case SKL_TKN_U32_FMT_CH_MAP: 2237 dst_fmt->ch_map = value; 2238 break; 2239 2240 default: 2241 dev_err(dev, "Invalid token %d\n", tkn); 2242 return -EINVAL; 2243 } 2244 2245 return 0; 2246 } 2247 2248 static int skl_tplg_widget_fill_fmt(struct device *dev, 2249 struct skl_module_iface *fmt, 2250 u32 tkn, u32 val, u32 dir, int fmt_idx) 2251 { 2252 struct skl_module_fmt *dst_fmt; 2253 2254 if (!fmt) 2255 return -EINVAL; 2256 2257 switch (dir) { 2258 case SKL_DIR_IN: 2259 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2260 break; 2261 2262 case SKL_DIR_OUT: 2263 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2264 break; 2265 2266 default: 2267 dev_err(dev, "Invalid direction: %d\n", dir); 2268 return -EINVAL; 2269 } 2270 2271 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2272 } 2273 2274 static void skl_tplg_fill_pin_dynamic_val( 2275 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2276 { 2277 int i; 2278 2279 for (i = 0; i < pin_count; i++) 2280 mpin[i].is_dynamic = value; 2281 } 2282 2283 /* 2284 * Resource table in the manifest has pin specific resources 2285 * like pin and pin buffer size 2286 */ 2287 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2288 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2289 struct skl_module_res *res, int pin_idx, int dir) 2290 { 2291 struct skl_module_pin_resources *m_pin; 2292 2293 switch (dir) { 2294 case SKL_DIR_IN: 2295 m_pin = &res->input[pin_idx]; 2296 break; 2297 2298 case SKL_DIR_OUT: 2299 m_pin = &res->output[pin_idx]; 2300 break; 2301 2302 default: 2303 dev_err(dev, "Invalid pin direction: %d\n", dir); 2304 return -EINVAL; 2305 } 2306 2307 switch (tkn_elem->token) { 2308 case SKL_TKN_MM_U32_RES_PIN_ID: 2309 m_pin->pin_index = tkn_elem->value; 2310 break; 2311 2312 case SKL_TKN_MM_U32_PIN_BUF: 2313 m_pin->buf_size = tkn_elem->value; 2314 break; 2315 2316 default: 2317 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2318 return -EINVAL; 2319 } 2320 2321 return 0; 2322 } 2323 2324 /* 2325 * Fill module specific resources from the manifest's resource 2326 * table like CPS, DMA size, mem_pages. 2327 */ 2328 static int skl_tplg_fill_res_tkn(struct device *dev, 2329 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2330 struct skl_module_res *res, 2331 int pin_idx, int dir) 2332 { 2333 int ret, tkn_count = 0; 2334 2335 if (!res) 2336 return -EINVAL; 2337 2338 switch (tkn_elem->token) { 2339 case SKL_TKN_MM_U32_CPS: 2340 res->cps = tkn_elem->value; 2341 break; 2342 2343 case SKL_TKN_MM_U32_DMA_SIZE: 2344 res->dma_buffer_size = tkn_elem->value; 2345 break; 2346 2347 case SKL_TKN_MM_U32_CPC: 2348 res->cpc = tkn_elem->value; 2349 break; 2350 2351 case SKL_TKN_U32_MEM_PAGES: 2352 res->is_pages = tkn_elem->value; 2353 break; 2354 2355 case SKL_TKN_U32_OBS: 2356 res->obs = tkn_elem->value; 2357 break; 2358 2359 case SKL_TKN_U32_IBS: 2360 res->ibs = tkn_elem->value; 2361 break; 2362 2363 case SKL_TKN_U32_MAX_MCPS: 2364 res->cps = tkn_elem->value; 2365 break; 2366 2367 case SKL_TKN_MM_U32_RES_PIN_ID: 2368 case SKL_TKN_MM_U32_PIN_BUF: 2369 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2370 pin_idx, dir); 2371 if (ret < 0) 2372 return ret; 2373 break; 2374 2375 default: 2376 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2377 return -EINVAL; 2378 2379 } 2380 tkn_count++; 2381 2382 return tkn_count; 2383 } 2384 2385 /* 2386 * Parse tokens to fill up the module private data 2387 */ 2388 static int skl_tplg_get_token(struct device *dev, 2389 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2390 struct skl *skl, struct skl_module_cfg *mconfig) 2391 { 2392 int tkn_count = 0; 2393 int ret; 2394 static int is_pipe_exists; 2395 static int pin_index, dir, conf_idx; 2396 struct skl_module_iface *iface = NULL; 2397 struct skl_module_res *res = NULL; 2398 int res_idx = mconfig->res_idx; 2399 int fmt_idx = mconfig->fmt_idx; 2400 2401 /* 2402 * If the manifest structure contains no modules, fill all 2403 * the module data to 0th index. 2404 * res_idx and fmt_idx are default set to 0. 2405 */ 2406 if (skl->nr_modules == 0) { 2407 res = &mconfig->module->resources[res_idx]; 2408 iface = &mconfig->module->formats[fmt_idx]; 2409 } 2410 2411 if (tkn_elem->token > SKL_TKN_MAX) 2412 return -EINVAL; 2413 2414 switch (tkn_elem->token) { 2415 case SKL_TKN_U8_IN_QUEUE_COUNT: 2416 mconfig->module->max_input_pins = tkn_elem->value; 2417 break; 2418 2419 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2420 mconfig->module->max_output_pins = tkn_elem->value; 2421 break; 2422 2423 case SKL_TKN_U8_DYN_IN_PIN: 2424 if (!mconfig->m_in_pin) 2425 mconfig->m_in_pin = 2426 devm_kcalloc(dev, MAX_IN_QUEUE, 2427 sizeof(*mconfig->m_in_pin), 2428 GFP_KERNEL); 2429 if (!mconfig->m_in_pin) 2430 return -ENOMEM; 2431 2432 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2433 tkn_elem->value); 2434 break; 2435 2436 case SKL_TKN_U8_DYN_OUT_PIN: 2437 if (!mconfig->m_out_pin) 2438 mconfig->m_out_pin = 2439 devm_kcalloc(dev, MAX_IN_QUEUE, 2440 sizeof(*mconfig->m_in_pin), 2441 GFP_KERNEL); 2442 if (!mconfig->m_out_pin) 2443 return -ENOMEM; 2444 2445 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2446 tkn_elem->value); 2447 break; 2448 2449 case SKL_TKN_U8_TIME_SLOT: 2450 mconfig->time_slot = tkn_elem->value; 2451 break; 2452 2453 case SKL_TKN_U8_CORE_ID: 2454 mconfig->core_id = tkn_elem->value; 2455 break; 2456 2457 case SKL_TKN_U8_MOD_TYPE: 2458 mconfig->m_type = tkn_elem->value; 2459 break; 2460 2461 case SKL_TKN_U8_DEV_TYPE: 2462 mconfig->dev_type = tkn_elem->value; 2463 break; 2464 2465 case SKL_TKN_U8_HW_CONN_TYPE: 2466 mconfig->hw_conn_type = tkn_elem->value; 2467 break; 2468 2469 case SKL_TKN_U16_MOD_INST_ID: 2470 mconfig->id.instance_id = 2471 tkn_elem->value; 2472 break; 2473 2474 case SKL_TKN_U32_MEM_PAGES: 2475 case SKL_TKN_U32_MAX_MCPS: 2476 case SKL_TKN_U32_OBS: 2477 case SKL_TKN_U32_IBS: 2478 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2479 if (ret < 0) 2480 return ret; 2481 2482 break; 2483 2484 case SKL_TKN_U32_VBUS_ID: 2485 mconfig->vbus_id = tkn_elem->value; 2486 break; 2487 2488 case SKL_TKN_U32_PARAMS_FIXUP: 2489 mconfig->params_fixup = tkn_elem->value; 2490 break; 2491 2492 case SKL_TKN_U32_CONVERTER: 2493 mconfig->converter = tkn_elem->value; 2494 break; 2495 2496 case SKL_TKN_U32_D0I3_CAPS: 2497 mconfig->d0i3_caps = tkn_elem->value; 2498 break; 2499 2500 case SKL_TKN_U32_PIPE_ID: 2501 ret = skl_tplg_add_pipe(dev, 2502 mconfig, skl, tkn_elem); 2503 2504 if (ret < 0) { 2505 if (ret == -EEXIST) { 2506 is_pipe_exists = 1; 2507 break; 2508 } 2509 return is_pipe_exists; 2510 } 2511 2512 break; 2513 2514 case SKL_TKN_U32_PIPE_CONFIG_ID: 2515 conf_idx = tkn_elem->value; 2516 break; 2517 2518 case SKL_TKN_U32_PIPE_CONN_TYPE: 2519 case SKL_TKN_U32_PIPE_PRIORITY: 2520 case SKL_TKN_U32_PIPE_MEM_PGS: 2521 case SKL_TKN_U32_PMODE: 2522 case SKL_TKN_U32_PIPE_DIRECTION: 2523 case SKL_TKN_U32_NUM_CONFIGS: 2524 if (is_pipe_exists) { 2525 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2526 tkn_elem->token, tkn_elem->value); 2527 if (ret < 0) 2528 return ret; 2529 } 2530 2531 break; 2532 2533 case SKL_TKN_U32_PATH_MEM_PGS: 2534 case SKL_TKN_U32_CFG_FREQ: 2535 case SKL_TKN_U8_CFG_CHAN: 2536 case SKL_TKN_U8_CFG_BPS: 2537 if (mconfig->pipe->nr_cfgs) { 2538 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2539 tkn_elem->token, tkn_elem->value, 2540 conf_idx, dir); 2541 if (ret < 0) 2542 return ret; 2543 } 2544 break; 2545 2546 case SKL_TKN_CFG_MOD_RES_ID: 2547 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2548 break; 2549 2550 case SKL_TKN_CFG_MOD_FMT_ID: 2551 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2552 break; 2553 2554 /* 2555 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2556 * direction and the pin count. The first four bits represent 2557 * direction and next four the pin count. 2558 */ 2559 case SKL_TKN_U32_DIR_PIN_COUNT: 2560 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2561 pin_index = (tkn_elem->value & 2562 SKL_PIN_COUNT_MASK) >> 4; 2563 2564 break; 2565 2566 case SKL_TKN_U32_FMT_CH: 2567 case SKL_TKN_U32_FMT_FREQ: 2568 case SKL_TKN_U32_FMT_BIT_DEPTH: 2569 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2570 case SKL_TKN_U32_FMT_CH_CONFIG: 2571 case SKL_TKN_U32_FMT_INTERLEAVE: 2572 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2573 case SKL_TKN_U32_FMT_CH_MAP: 2574 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2575 tkn_elem->value, dir, pin_index); 2576 2577 if (ret < 0) 2578 return ret; 2579 2580 break; 2581 2582 case SKL_TKN_U32_PIN_MOD_ID: 2583 case SKL_TKN_U32_PIN_INST_ID: 2584 case SKL_TKN_UUID: 2585 ret = skl_tplg_fill_pins_info(dev, 2586 mconfig, tkn_elem, dir, 2587 pin_index); 2588 if (ret < 0) 2589 return ret; 2590 2591 break; 2592 2593 case SKL_TKN_U32_CAPS_SIZE: 2594 mconfig->formats_config.caps_size = 2595 tkn_elem->value; 2596 2597 break; 2598 2599 case SKL_TKN_U32_CAPS_SET_PARAMS: 2600 mconfig->formats_config.set_params = 2601 tkn_elem->value; 2602 break; 2603 2604 case SKL_TKN_U32_CAPS_PARAMS_ID: 2605 mconfig->formats_config.param_id = 2606 tkn_elem->value; 2607 break; 2608 2609 case SKL_TKN_U32_PROC_DOMAIN: 2610 mconfig->domain = 2611 tkn_elem->value; 2612 2613 break; 2614 2615 case SKL_TKN_U32_DMA_BUF_SIZE: 2616 mconfig->dma_buffer_size = tkn_elem->value; 2617 break; 2618 2619 case SKL_TKN_U8_IN_PIN_TYPE: 2620 case SKL_TKN_U8_OUT_PIN_TYPE: 2621 case SKL_TKN_U8_CONN_TYPE: 2622 break; 2623 2624 default: 2625 dev_err(dev, "Token %d not handled\n", 2626 tkn_elem->token); 2627 return -EINVAL; 2628 } 2629 2630 tkn_count++; 2631 2632 return tkn_count; 2633 } 2634 2635 /* 2636 * Parse the vendor array for specific tokens to construct 2637 * module private data 2638 */ 2639 static int skl_tplg_get_tokens(struct device *dev, 2640 char *pvt_data, struct skl *skl, 2641 struct skl_module_cfg *mconfig, int block_size) 2642 { 2643 struct snd_soc_tplg_vendor_array *array; 2644 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2645 int tkn_count = 0, ret; 2646 int off = 0, tuple_size = 0; 2647 bool is_module_guid = true; 2648 2649 if (block_size <= 0) 2650 return -EINVAL; 2651 2652 while (tuple_size < block_size) { 2653 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2654 2655 off += array->size; 2656 2657 switch (array->type) { 2658 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2659 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2660 continue; 2661 2662 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2663 if (is_module_guid) { 2664 ret = skl_tplg_get_uuid(dev, mconfig->guid, 2665 array->uuid); 2666 is_module_guid = false; 2667 } else { 2668 ret = skl_tplg_get_token(dev, array->value, skl, 2669 mconfig); 2670 } 2671 2672 if (ret < 0) 2673 return ret; 2674 2675 tuple_size += sizeof(*array->uuid); 2676 2677 continue; 2678 2679 default: 2680 tkn_elem = array->value; 2681 tkn_count = 0; 2682 break; 2683 } 2684 2685 while (tkn_count <= (array->num_elems - 1)) { 2686 ret = skl_tplg_get_token(dev, tkn_elem, 2687 skl, mconfig); 2688 2689 if (ret < 0) 2690 return ret; 2691 2692 tkn_count = tkn_count + ret; 2693 tkn_elem++; 2694 } 2695 2696 tuple_size += tkn_count * sizeof(*tkn_elem); 2697 } 2698 2699 return off; 2700 } 2701 2702 /* 2703 * Every data block is preceded by a descriptor to read the number 2704 * of data blocks, they type of the block and it's size 2705 */ 2706 static int skl_tplg_get_desc_blocks(struct device *dev, 2707 struct snd_soc_tplg_vendor_array *array) 2708 { 2709 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2710 2711 tkn_elem = array->value; 2712 2713 switch (tkn_elem->token) { 2714 case SKL_TKN_U8_NUM_BLOCKS: 2715 case SKL_TKN_U8_BLOCK_TYPE: 2716 case SKL_TKN_U16_BLOCK_SIZE: 2717 return tkn_elem->value; 2718 2719 default: 2720 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2721 break; 2722 } 2723 2724 return -EINVAL; 2725 } 2726 2727 /* Functions to parse private data from configuration file format v4 */ 2728 2729 /* 2730 * Add pipeline from topology binary into driver pipeline list 2731 * 2732 * If already added we return that instance 2733 * Otherwise we create a new instance and add into driver list 2734 */ 2735 static int skl_tplg_add_pipe_v4(struct device *dev, 2736 struct skl_module_cfg *mconfig, struct skl *skl, 2737 struct skl_dfw_v4_pipe *dfw_pipe) 2738 { 2739 struct skl_pipeline *ppl; 2740 struct skl_pipe *pipe; 2741 struct skl_pipe_params *params; 2742 2743 list_for_each_entry(ppl, &skl->ppl_list, node) { 2744 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2745 mconfig->pipe = ppl->pipe; 2746 return 0; 2747 } 2748 } 2749 2750 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2751 if (!ppl) 2752 return -ENOMEM; 2753 2754 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2755 if (!pipe) 2756 return -ENOMEM; 2757 2758 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2759 if (!params) 2760 return -ENOMEM; 2761 2762 pipe->ppl_id = dfw_pipe->pipe_id; 2763 pipe->memory_pages = dfw_pipe->memory_pages; 2764 pipe->pipe_priority = dfw_pipe->pipe_priority; 2765 pipe->conn_type = dfw_pipe->conn_type; 2766 pipe->state = SKL_PIPE_INVALID; 2767 pipe->p_params = params; 2768 INIT_LIST_HEAD(&pipe->w_list); 2769 2770 ppl->pipe = pipe; 2771 list_add(&ppl->node, &skl->ppl_list); 2772 2773 mconfig->pipe = pipe; 2774 2775 return 0; 2776 } 2777 2778 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2779 struct skl_module_pin *m_pin, 2780 bool is_dynamic, int max_pin) 2781 { 2782 int i; 2783 2784 for (i = 0; i < max_pin; i++) { 2785 m_pin[i].id.module_id = dfw_pin[i].module_id; 2786 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2787 m_pin[i].in_use = false; 2788 m_pin[i].is_dynamic = is_dynamic; 2789 m_pin[i].pin_state = SKL_PIN_UNBIND; 2790 } 2791 } 2792 2793 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2794 struct skl_dfw_v4_module_fmt *src_fmt, 2795 int pins) 2796 { 2797 int i; 2798 2799 for (i = 0; i < pins; i++) { 2800 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2801 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2802 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2803 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2804 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2805 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2806 dst_fmt[i].fmt.interleaving_style = 2807 src_fmt[i].interleaving_style; 2808 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2809 } 2810 } 2811 2812 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2813 struct skl *skl, struct device *dev, 2814 struct skl_module_cfg *mconfig) 2815 { 2816 struct skl_dfw_v4_module *dfw = 2817 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2818 int ret; 2819 2820 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2821 2822 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2823 if (ret) 2824 return ret; 2825 mconfig->id.module_id = -1; 2826 mconfig->id.instance_id = dfw->instance_id; 2827 mconfig->module->resources[0].cps = dfw->max_mcps; 2828 mconfig->module->resources[0].ibs = dfw->ibs; 2829 mconfig->module->resources[0].obs = dfw->obs; 2830 mconfig->core_id = dfw->core_id; 2831 mconfig->module->max_input_pins = dfw->max_in_queue; 2832 mconfig->module->max_output_pins = dfw->max_out_queue; 2833 mconfig->module->loadable = dfw->is_loadable; 2834 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2835 MAX_IN_QUEUE); 2836 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2837 MAX_OUT_QUEUE); 2838 2839 mconfig->params_fixup = dfw->params_fixup; 2840 mconfig->converter = dfw->converter; 2841 mconfig->m_type = dfw->module_type; 2842 mconfig->vbus_id = dfw->vbus_id; 2843 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2844 2845 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2846 if (ret) 2847 return ret; 2848 2849 mconfig->dev_type = dfw->dev_type; 2850 mconfig->hw_conn_type = dfw->hw_conn_type; 2851 mconfig->time_slot = dfw->time_slot; 2852 mconfig->formats_config.caps_size = dfw->caps.caps_size; 2853 2854 mconfig->m_in_pin = devm_kcalloc(dev, 2855 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2856 GFP_KERNEL); 2857 if (!mconfig->m_in_pin) 2858 return -ENOMEM; 2859 2860 mconfig->m_out_pin = devm_kcalloc(dev, 2861 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2862 GFP_KERNEL); 2863 if (!mconfig->m_out_pin) 2864 return -ENOMEM; 2865 2866 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2867 dfw->is_dynamic_in_pin, 2868 mconfig->module->max_input_pins); 2869 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2870 dfw->is_dynamic_out_pin, 2871 mconfig->module->max_output_pins); 2872 2873 if (mconfig->formats_config.caps_size) { 2874 mconfig->formats_config.set_params = dfw->caps.set_params; 2875 mconfig->formats_config.param_id = dfw->caps.param_id; 2876 mconfig->formats_config.caps = 2877 devm_kzalloc(dev, mconfig->formats_config.caps_size, 2878 GFP_KERNEL); 2879 if (!mconfig->formats_config.caps) 2880 return -ENOMEM; 2881 memcpy(mconfig->formats_config.caps, dfw->caps.caps, 2882 dfw->caps.caps_size); 2883 } 2884 2885 return 0; 2886 } 2887 2888 /* 2889 * Parse the private data for the token and corresponding value. 2890 * The private data can have multiple data blocks. So, a data block 2891 * is preceded by a descriptor for number of blocks and a descriptor 2892 * for the type and size of the suceeding data block. 2893 */ 2894 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2895 struct skl *skl, struct device *dev, 2896 struct skl_module_cfg *mconfig) 2897 { 2898 struct snd_soc_tplg_vendor_array *array; 2899 int num_blocks, block_size = 0, block_type, off = 0; 2900 char *data; 2901 int ret; 2902 2903 /* 2904 * v4 configuration files have a valid UUID at the start of 2905 * the widget's private data. 2906 */ 2907 if (uuid_is_valid((char *)tplg_w->priv.data)) 2908 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2909 2910 /* Read the NUM_DATA_BLOCKS descriptor */ 2911 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2912 ret = skl_tplg_get_desc_blocks(dev, array); 2913 if (ret < 0) 2914 return ret; 2915 num_blocks = ret; 2916 2917 off += array->size; 2918 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2919 while (num_blocks > 0) { 2920 array = (struct snd_soc_tplg_vendor_array *) 2921 (tplg_w->priv.data + off); 2922 2923 ret = skl_tplg_get_desc_blocks(dev, array); 2924 2925 if (ret < 0) 2926 return ret; 2927 block_type = ret; 2928 off += array->size; 2929 2930 array = (struct snd_soc_tplg_vendor_array *) 2931 (tplg_w->priv.data + off); 2932 2933 ret = skl_tplg_get_desc_blocks(dev, array); 2934 2935 if (ret < 0) 2936 return ret; 2937 block_size = ret; 2938 off += array->size; 2939 2940 array = (struct snd_soc_tplg_vendor_array *) 2941 (tplg_w->priv.data + off); 2942 2943 data = (tplg_w->priv.data + off); 2944 2945 if (block_type == SKL_TYPE_TUPLE) { 2946 ret = skl_tplg_get_tokens(dev, data, 2947 skl, mconfig, block_size); 2948 2949 if (ret < 0) 2950 return ret; 2951 2952 --num_blocks; 2953 } else { 2954 if (mconfig->formats_config.caps_size > 0) 2955 memcpy(mconfig->formats_config.caps, data, 2956 mconfig->formats_config.caps_size); 2957 --num_blocks; 2958 ret = mconfig->formats_config.caps_size; 2959 } 2960 off += ret; 2961 } 2962 2963 return 0; 2964 } 2965 2966 static void skl_clear_pin_config(struct snd_soc_component *component, 2967 struct snd_soc_dapm_widget *w) 2968 { 2969 int i; 2970 struct skl_module_cfg *mconfig; 2971 struct skl_pipe *pipe; 2972 2973 if (!strncmp(w->dapm->component->name, component->name, 2974 strlen(component->name))) { 2975 mconfig = w->priv; 2976 pipe = mconfig->pipe; 2977 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2978 mconfig->m_in_pin[i].in_use = false; 2979 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2980 } 2981 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2982 mconfig->m_out_pin[i].in_use = false; 2983 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2984 } 2985 pipe->state = SKL_PIPE_INVALID; 2986 mconfig->m_state = SKL_MODULE_UNINIT; 2987 } 2988 } 2989 2990 void skl_cleanup_resources(struct skl *skl) 2991 { 2992 struct skl_sst *ctx = skl->skl_sst; 2993 struct snd_soc_component *soc_component = skl->component; 2994 struct snd_soc_dapm_widget *w; 2995 struct snd_soc_card *card; 2996 2997 if (soc_component == NULL) 2998 return; 2999 3000 card = soc_component->card; 3001 if (!card || !card->instantiated) 3002 return; 3003 3004 skl->resource.mem = 0; 3005 skl->resource.mcps = 0; 3006 3007 list_for_each_entry(w, &card->widgets, list) { 3008 if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL) 3009 skl_clear_pin_config(soc_component, w); 3010 } 3011 3012 skl_clear_module_cnt(ctx->dsp); 3013 } 3014 3015 /* 3016 * Topology core widget load callback 3017 * 3018 * This is used to save the private data for each widget which gives 3019 * information to the driver about module and pipeline parameters which DSP 3020 * FW expects like ids, resource values, formats etc 3021 */ 3022 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 3023 struct snd_soc_dapm_widget *w, 3024 struct snd_soc_tplg_dapm_widget *tplg_w) 3025 { 3026 int ret; 3027 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3028 struct skl *skl = bus_to_skl(bus); 3029 struct skl_module_cfg *mconfig; 3030 3031 if (!tplg_w->priv.size) 3032 goto bind_event; 3033 3034 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 3035 3036 if (!mconfig) 3037 return -ENOMEM; 3038 3039 if (skl->nr_modules == 0) { 3040 mconfig->module = devm_kzalloc(bus->dev, 3041 sizeof(*mconfig->module), GFP_KERNEL); 3042 if (!mconfig->module) 3043 return -ENOMEM; 3044 } 3045 3046 w->priv = mconfig; 3047 3048 /* 3049 * module binary can be loaded later, so set it to query when 3050 * module is load for a use case 3051 */ 3052 mconfig->id.module_id = -1; 3053 3054 /* Parse private data for tuples */ 3055 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 3056 if (ret < 0) 3057 return ret; 3058 3059 skl_debug_init_module(skl->debugfs, w, mconfig); 3060 3061 bind_event: 3062 if (tplg_w->event_type == 0) { 3063 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 3064 return 0; 3065 } 3066 3067 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 3068 ARRAY_SIZE(skl_tplg_widget_ops), 3069 tplg_w->event_type); 3070 3071 if (ret) { 3072 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 3073 __func__, tplg_w->event_type); 3074 return -EINVAL; 3075 } 3076 3077 return 0; 3078 } 3079 3080 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 3081 struct snd_soc_tplg_bytes_control *bc) 3082 { 3083 struct skl_algo_data *ac; 3084 struct skl_dfw_algo_data *dfw_ac = 3085 (struct skl_dfw_algo_data *)bc->priv.data; 3086 3087 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 3088 if (!ac) 3089 return -ENOMEM; 3090 3091 /* Fill private data */ 3092 ac->max = dfw_ac->max; 3093 ac->param_id = dfw_ac->param_id; 3094 ac->set_params = dfw_ac->set_params; 3095 ac->size = dfw_ac->max; 3096 3097 if (ac->max) { 3098 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL); 3099 if (!ac->params) 3100 return -ENOMEM; 3101 3102 memcpy(ac->params, dfw_ac->params, ac->max); 3103 } 3104 3105 be->dobj.private = ac; 3106 return 0; 3107 } 3108 3109 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 3110 struct snd_soc_tplg_enum_control *ec) 3111 { 3112 3113 void *data; 3114 3115 if (ec->priv.size) { 3116 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 3117 if (!data) 3118 return -ENOMEM; 3119 memcpy(data, ec->priv.data, ec->priv.size); 3120 se->dobj.private = data; 3121 } 3122 3123 return 0; 3124 3125 } 3126 3127 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 3128 int index, 3129 struct snd_kcontrol_new *kctl, 3130 struct snd_soc_tplg_ctl_hdr *hdr) 3131 { 3132 struct soc_bytes_ext *sb; 3133 struct snd_soc_tplg_bytes_control *tplg_bc; 3134 struct snd_soc_tplg_enum_control *tplg_ec; 3135 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3136 struct soc_enum *se; 3137 3138 switch (hdr->ops.info) { 3139 case SND_SOC_TPLG_CTL_BYTES: 3140 tplg_bc = container_of(hdr, 3141 struct snd_soc_tplg_bytes_control, hdr); 3142 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3143 sb = (struct soc_bytes_ext *)kctl->private_value; 3144 if (tplg_bc->priv.size) 3145 return skl_init_algo_data( 3146 bus->dev, sb, tplg_bc); 3147 } 3148 break; 3149 3150 case SND_SOC_TPLG_CTL_ENUM: 3151 tplg_ec = container_of(hdr, 3152 struct snd_soc_tplg_enum_control, hdr); 3153 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 3154 se = (struct soc_enum *)kctl->private_value; 3155 if (tplg_ec->priv.size) 3156 return skl_init_enum_data(bus->dev, se, 3157 tplg_ec); 3158 } 3159 break; 3160 3161 default: 3162 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3163 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3164 break; 3165 } 3166 3167 return 0; 3168 } 3169 3170 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3171 struct snd_soc_tplg_vendor_string_elem *str_elem, 3172 struct skl *skl) 3173 { 3174 int tkn_count = 0; 3175 static int ref_count; 3176 3177 switch (str_elem->token) { 3178 case SKL_TKN_STR_LIB_NAME: 3179 if (ref_count > skl->skl_sst->lib_count - 1) { 3180 ref_count = 0; 3181 return -EINVAL; 3182 } 3183 3184 strncpy(skl->skl_sst->lib_info[ref_count].name, 3185 str_elem->string, 3186 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name)); 3187 ref_count++; 3188 break; 3189 3190 default: 3191 dev_err(dev, "Not a string token %d\n", str_elem->token); 3192 break; 3193 } 3194 tkn_count++; 3195 3196 return tkn_count; 3197 } 3198 3199 static int skl_tplg_get_str_tkn(struct device *dev, 3200 struct snd_soc_tplg_vendor_array *array, 3201 struct skl *skl) 3202 { 3203 int tkn_count = 0, ret; 3204 struct snd_soc_tplg_vendor_string_elem *str_elem; 3205 3206 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3207 while (tkn_count < array->num_elems) { 3208 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3209 str_elem++; 3210 3211 if (ret < 0) 3212 return ret; 3213 3214 tkn_count = tkn_count + ret; 3215 } 3216 3217 return tkn_count; 3218 } 3219 3220 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3221 struct skl_module_iface *fmt, 3222 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3223 u32 dir, int fmt_idx) 3224 { 3225 struct skl_module_pin_fmt *dst_fmt; 3226 struct skl_module_fmt *mod_fmt; 3227 int ret; 3228 3229 if (!fmt) 3230 return -EINVAL; 3231 3232 switch (dir) { 3233 case SKL_DIR_IN: 3234 dst_fmt = &fmt->inputs[fmt_idx]; 3235 break; 3236 3237 case SKL_DIR_OUT: 3238 dst_fmt = &fmt->outputs[fmt_idx]; 3239 break; 3240 3241 default: 3242 dev_err(dev, "Invalid direction: %d\n", dir); 3243 return -EINVAL; 3244 } 3245 3246 mod_fmt = &dst_fmt->fmt; 3247 3248 switch (tkn_elem->token) { 3249 case SKL_TKN_MM_U32_INTF_PIN_ID: 3250 dst_fmt->id = tkn_elem->value; 3251 break; 3252 3253 default: 3254 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3255 tkn_elem->value); 3256 if (ret < 0) 3257 return ret; 3258 break; 3259 } 3260 3261 return 0; 3262 } 3263 3264 static int skl_tplg_fill_mod_info(struct device *dev, 3265 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3266 struct skl_module *mod) 3267 { 3268 3269 if (!mod) 3270 return -EINVAL; 3271 3272 switch (tkn_elem->token) { 3273 case SKL_TKN_U8_IN_PIN_TYPE: 3274 mod->input_pin_type = tkn_elem->value; 3275 break; 3276 3277 case SKL_TKN_U8_OUT_PIN_TYPE: 3278 mod->output_pin_type = tkn_elem->value; 3279 break; 3280 3281 case SKL_TKN_U8_IN_QUEUE_COUNT: 3282 mod->max_input_pins = tkn_elem->value; 3283 break; 3284 3285 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3286 mod->max_output_pins = tkn_elem->value; 3287 break; 3288 3289 case SKL_TKN_MM_U8_NUM_RES: 3290 mod->nr_resources = tkn_elem->value; 3291 break; 3292 3293 case SKL_TKN_MM_U8_NUM_INTF: 3294 mod->nr_interfaces = tkn_elem->value; 3295 break; 3296 3297 default: 3298 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3299 return -EINVAL; 3300 } 3301 3302 return 0; 3303 } 3304 3305 3306 static int skl_tplg_get_int_tkn(struct device *dev, 3307 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3308 struct skl *skl) 3309 { 3310 int tkn_count = 0, ret, size; 3311 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3312 struct skl_module_res *res = NULL; 3313 struct skl_module_iface *fmt = NULL; 3314 struct skl_module *mod = NULL; 3315 static struct skl_astate_param *astate_table; 3316 static int astate_cfg_idx, count; 3317 int i; 3318 3319 if (skl->modules) { 3320 mod = skl->modules[mod_idx]; 3321 res = &mod->resources[res_val_idx]; 3322 fmt = &mod->formats[intf_val_idx]; 3323 } 3324 3325 switch (tkn_elem->token) { 3326 case SKL_TKN_U32_LIB_COUNT: 3327 skl->skl_sst->lib_count = tkn_elem->value; 3328 break; 3329 3330 case SKL_TKN_U8_NUM_MOD: 3331 skl->nr_modules = tkn_elem->value; 3332 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3333 sizeof(*skl->modules), GFP_KERNEL); 3334 if (!skl->modules) 3335 return -ENOMEM; 3336 3337 for (i = 0; i < skl->nr_modules; i++) { 3338 skl->modules[i] = devm_kzalloc(dev, 3339 sizeof(struct skl_module), GFP_KERNEL); 3340 if (!skl->modules[i]) 3341 return -ENOMEM; 3342 } 3343 break; 3344 3345 case SKL_TKN_MM_U8_MOD_IDX: 3346 mod_idx = tkn_elem->value; 3347 break; 3348 3349 case SKL_TKN_U32_ASTATE_COUNT: 3350 if (astate_table != NULL) { 3351 dev_err(dev, "More than one entry for A-State count"); 3352 return -EINVAL; 3353 } 3354 3355 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3356 dev_err(dev, "Invalid A-State count %d\n", 3357 tkn_elem->value); 3358 return -EINVAL; 3359 } 3360 3361 size = tkn_elem->value * sizeof(struct skl_astate_param) + 3362 sizeof(count); 3363 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3364 if (!skl->cfg.astate_cfg) 3365 return -ENOMEM; 3366 3367 astate_table = skl->cfg.astate_cfg->astate_table; 3368 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3369 break; 3370 3371 case SKL_TKN_U32_ASTATE_IDX: 3372 if (tkn_elem->value >= count) { 3373 dev_err(dev, "Invalid A-State index %d\n", 3374 tkn_elem->value); 3375 return -EINVAL; 3376 } 3377 3378 astate_cfg_idx = tkn_elem->value; 3379 break; 3380 3381 case SKL_TKN_U32_ASTATE_KCPS: 3382 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3383 break; 3384 3385 case SKL_TKN_U32_ASTATE_CLK_SRC: 3386 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3387 break; 3388 3389 case SKL_TKN_U8_IN_PIN_TYPE: 3390 case SKL_TKN_U8_OUT_PIN_TYPE: 3391 case SKL_TKN_U8_IN_QUEUE_COUNT: 3392 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3393 case SKL_TKN_MM_U8_NUM_RES: 3394 case SKL_TKN_MM_U8_NUM_INTF: 3395 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3396 if (ret < 0) 3397 return ret; 3398 break; 3399 3400 case SKL_TKN_U32_DIR_PIN_COUNT: 3401 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3402 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3403 break; 3404 3405 case SKL_TKN_MM_U32_RES_ID: 3406 if (!res) 3407 return -EINVAL; 3408 3409 res->id = tkn_elem->value; 3410 res_val_idx = tkn_elem->value; 3411 break; 3412 3413 case SKL_TKN_MM_U32_FMT_ID: 3414 if (!fmt) 3415 return -EINVAL; 3416 3417 fmt->fmt_idx = tkn_elem->value; 3418 intf_val_idx = tkn_elem->value; 3419 break; 3420 3421 case SKL_TKN_MM_U32_CPS: 3422 case SKL_TKN_MM_U32_DMA_SIZE: 3423 case SKL_TKN_MM_U32_CPC: 3424 case SKL_TKN_U32_MEM_PAGES: 3425 case SKL_TKN_U32_OBS: 3426 case SKL_TKN_U32_IBS: 3427 case SKL_TKN_MM_U32_RES_PIN_ID: 3428 case SKL_TKN_MM_U32_PIN_BUF: 3429 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3430 if (ret < 0) 3431 return ret; 3432 3433 break; 3434 3435 case SKL_TKN_MM_U32_NUM_IN_FMT: 3436 if (!fmt) 3437 return -EINVAL; 3438 3439 res->nr_input_pins = tkn_elem->value; 3440 break; 3441 3442 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3443 if (!fmt) 3444 return -EINVAL; 3445 3446 res->nr_output_pins = tkn_elem->value; 3447 break; 3448 3449 case SKL_TKN_U32_FMT_CH: 3450 case SKL_TKN_U32_FMT_FREQ: 3451 case SKL_TKN_U32_FMT_BIT_DEPTH: 3452 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3453 case SKL_TKN_U32_FMT_CH_CONFIG: 3454 case SKL_TKN_U32_FMT_INTERLEAVE: 3455 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3456 case SKL_TKN_U32_FMT_CH_MAP: 3457 case SKL_TKN_MM_U32_INTF_PIN_ID: 3458 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3459 dir, pin_idx); 3460 if (ret < 0) 3461 return ret; 3462 break; 3463 3464 default: 3465 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3466 return -EINVAL; 3467 } 3468 tkn_count++; 3469 3470 return tkn_count; 3471 } 3472 3473 static int skl_tplg_get_manifest_uuid(struct device *dev, 3474 struct skl *skl, 3475 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 3476 { 3477 static int ref_count; 3478 struct skl_module *mod; 3479 3480 if (uuid_tkn->token == SKL_TKN_UUID) { 3481 mod = skl->modules[ref_count]; 3482 memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid)); 3483 ref_count++; 3484 } else { 3485 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 3486 return -EINVAL; 3487 } 3488 3489 return 0; 3490 } 3491 3492 /* 3493 * Fill the manifest structure by parsing the tokens based on the 3494 * type. 3495 */ 3496 static int skl_tplg_get_manifest_tkn(struct device *dev, 3497 char *pvt_data, struct skl *skl, 3498 int block_size) 3499 { 3500 int tkn_count = 0, ret; 3501 int off = 0, tuple_size = 0; 3502 struct snd_soc_tplg_vendor_array *array; 3503 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3504 3505 if (block_size <= 0) 3506 return -EINVAL; 3507 3508 while (tuple_size < block_size) { 3509 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3510 off += array->size; 3511 switch (array->type) { 3512 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3513 ret = skl_tplg_get_str_tkn(dev, array, skl); 3514 3515 if (ret < 0) 3516 return ret; 3517 tkn_count = ret; 3518 3519 tuple_size += tkn_count * 3520 sizeof(struct snd_soc_tplg_vendor_string_elem); 3521 continue; 3522 3523 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3524 ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid); 3525 if (ret < 0) 3526 return ret; 3527 3528 tuple_size += sizeof(*array->uuid); 3529 continue; 3530 3531 default: 3532 tkn_elem = array->value; 3533 tkn_count = 0; 3534 break; 3535 } 3536 3537 while (tkn_count <= array->num_elems - 1) { 3538 ret = skl_tplg_get_int_tkn(dev, 3539 tkn_elem, skl); 3540 if (ret < 0) 3541 return ret; 3542 3543 tkn_count = tkn_count + ret; 3544 tkn_elem++; 3545 } 3546 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3547 tkn_count = 0; 3548 } 3549 3550 return off; 3551 } 3552 3553 /* 3554 * Parse manifest private data for tokens. The private data block is 3555 * preceded by descriptors for type and size of data block. 3556 */ 3557 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3558 struct device *dev, struct skl *skl) 3559 { 3560 struct snd_soc_tplg_vendor_array *array; 3561 int num_blocks, block_size = 0, block_type, off = 0; 3562 char *data; 3563 int ret; 3564 3565 /* Read the NUM_DATA_BLOCKS descriptor */ 3566 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3567 ret = skl_tplg_get_desc_blocks(dev, array); 3568 if (ret < 0) 3569 return ret; 3570 num_blocks = ret; 3571 3572 off += array->size; 3573 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3574 while (num_blocks > 0) { 3575 array = (struct snd_soc_tplg_vendor_array *) 3576 (manifest->priv.data + off); 3577 ret = skl_tplg_get_desc_blocks(dev, array); 3578 3579 if (ret < 0) 3580 return ret; 3581 block_type = ret; 3582 off += array->size; 3583 3584 array = (struct snd_soc_tplg_vendor_array *) 3585 (manifest->priv.data + off); 3586 3587 ret = skl_tplg_get_desc_blocks(dev, array); 3588 3589 if (ret < 0) 3590 return ret; 3591 block_size = ret; 3592 off += array->size; 3593 3594 array = (struct snd_soc_tplg_vendor_array *) 3595 (manifest->priv.data + off); 3596 3597 data = (manifest->priv.data + off); 3598 3599 if (block_type == SKL_TYPE_TUPLE) { 3600 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3601 block_size); 3602 3603 if (ret < 0) 3604 return ret; 3605 3606 --num_blocks; 3607 } else { 3608 return -EINVAL; 3609 } 3610 off += ret; 3611 } 3612 3613 return 0; 3614 } 3615 3616 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3617 struct snd_soc_tplg_manifest *manifest) 3618 { 3619 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3620 struct skl *skl = bus_to_skl(bus); 3621 3622 /* proceed only if we have private data defined */ 3623 if (manifest->priv.size == 0) 3624 return 0; 3625 3626 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3627 3628 if (skl->skl_sst->lib_count > SKL_MAX_LIB) { 3629 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3630 skl->skl_sst->lib_count); 3631 return -EINVAL; 3632 } 3633 3634 return 0; 3635 } 3636 3637 static struct snd_soc_tplg_ops skl_tplg_ops = { 3638 .widget_load = skl_tplg_widget_load, 3639 .control_load = skl_tplg_control_load, 3640 .bytes_ext_ops = skl_tlv_ops, 3641 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3642 .io_ops = skl_tplg_kcontrol_ops, 3643 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3644 .manifest = skl_manifest_load, 3645 .dai_load = skl_dai_load, 3646 }; 3647 3648 /* 3649 * A pipe can have multiple modules, each of them will be a DAPM widget as 3650 * well. While managing a pipeline we need to get the list of all the 3651 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3652 * helps to get the SKL type widgets in that pipeline 3653 */ 3654 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3655 { 3656 struct snd_soc_dapm_widget *w; 3657 struct skl_module_cfg *mcfg = NULL; 3658 struct skl_pipe_module *p_module = NULL; 3659 struct skl_pipe *pipe; 3660 3661 list_for_each_entry(w, &component->card->widgets, list) { 3662 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3663 mcfg = w->priv; 3664 pipe = mcfg->pipe; 3665 3666 p_module = devm_kzalloc(component->dev, 3667 sizeof(*p_module), GFP_KERNEL); 3668 if (!p_module) 3669 return -ENOMEM; 3670 3671 p_module->w = w; 3672 list_add_tail(&p_module->node, &pipe->w_list); 3673 } 3674 } 3675 3676 return 0; 3677 } 3678 3679 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe) 3680 { 3681 struct skl_pipe_module *w_module; 3682 struct snd_soc_dapm_widget *w; 3683 struct skl_module_cfg *mconfig; 3684 bool host_found = false, link_found = false; 3685 3686 list_for_each_entry(w_module, &pipe->w_list, node) { 3687 w = w_module->w; 3688 mconfig = w->priv; 3689 3690 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3691 host_found = true; 3692 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3693 link_found = true; 3694 } 3695 3696 if (host_found && link_found) 3697 pipe->passthru = true; 3698 else 3699 pipe->passthru = false; 3700 } 3701 3702 /* This will be read from topology manifest, currently defined here */ 3703 #define SKL_MAX_MCPS 30000000 3704 #define SKL_FW_MAX_MEM 1000000 3705 3706 /* 3707 * SKL topology init routine 3708 */ 3709 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3710 { 3711 int ret; 3712 const struct firmware *fw; 3713 struct skl *skl = bus_to_skl(bus); 3714 struct skl_pipeline *ppl; 3715 3716 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3717 if (ret < 0) { 3718 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3719 skl->tplg_name, ret); 3720 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3721 if (ret < 0) { 3722 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3723 "dfw_sst.bin", ret); 3724 return ret; 3725 } 3726 } 3727 3728 /* 3729 * The complete tplg for SKL is loaded as index 0, we don't use 3730 * any other index 3731 */ 3732 ret = snd_soc_tplg_component_load(component, 3733 &skl_tplg_ops, fw, 0); 3734 if (ret < 0) { 3735 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3736 release_firmware(fw); 3737 return -EINVAL; 3738 } 3739 3740 skl->resource.max_mcps = SKL_MAX_MCPS; 3741 skl->resource.max_mem = SKL_FW_MAX_MEM; 3742 3743 skl->tplg = fw; 3744 ret = skl_tplg_create_pipe_widget_list(component); 3745 if (ret < 0) 3746 return ret; 3747 3748 list_for_each_entry(ppl, &skl->ppl_list, node) 3749 skl_tplg_set_pipe_type(skl, ppl->pipe); 3750 3751 return 0; 3752 } 3753