1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-topology.c - Implements Platform component ALSA controls/widget 4 * handlers. 5 * 6 * Copyright (C) 2014-2015 Intel Corp 7 * Author: Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/firmware.h> 14 #include <linux/uuid.h> 15 #include <sound/soc.h> 16 #include <sound/soc-topology.h> 17 #include <uapi/sound/snd_sst_tokens.h> 18 #include <uapi/sound/skl-tplg-interface.h> 19 #include "skl-sst-dsp.h" 20 #include "skl-sst-ipc.h" 21 #include "skl-topology.h" 22 #include "skl.h" 23 #include "../common/sst-dsp.h" 24 #include "../common/sst-dsp-priv.h" 25 26 #define SKL_CH_FIXUP_MASK (1 << 0) 27 #define SKL_RATE_FIXUP_MASK (1 << 1) 28 #define SKL_FMT_FIXUP_MASK (1 << 2) 29 #define SKL_IN_DIR_BIT_MASK BIT(0) 30 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 31 32 static const int mic_mono_list[] = { 33 0, 1, 2, 3, 34 }; 35 static const int mic_stereo_list[][SKL_CH_STEREO] = { 36 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 37 }; 38 static const int mic_trio_list[][SKL_CH_TRIO] = { 39 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 40 }; 41 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 42 {0, 1, 2, 3}, 43 }; 44 45 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 46 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 47 48 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps) 49 { 50 struct skl_d0i3_data *d0i3 = &skl->d0i3; 51 52 switch (caps) { 53 case SKL_D0I3_NONE: 54 d0i3->non_d0i3++; 55 break; 56 57 case SKL_D0I3_STREAMING: 58 d0i3->streaming++; 59 break; 60 61 case SKL_D0I3_NON_STREAMING: 62 d0i3->non_streaming++; 63 break; 64 } 65 } 66 67 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps) 68 { 69 struct skl_d0i3_data *d0i3 = &skl->d0i3; 70 71 switch (caps) { 72 case SKL_D0I3_NONE: 73 d0i3->non_d0i3--; 74 break; 75 76 case SKL_D0I3_STREAMING: 77 d0i3->streaming--; 78 break; 79 80 case SKL_D0I3_NON_STREAMING: 81 d0i3->non_streaming--; 82 break; 83 } 84 } 85 86 /* 87 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 88 * ignore. This helpers checks if the SKL driver handles this widget type 89 */ 90 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 91 struct device *dev) 92 { 93 if (w->dapm->dev != dev) 94 return false; 95 96 switch (w->id) { 97 case snd_soc_dapm_dai_link: 98 case snd_soc_dapm_dai_in: 99 case snd_soc_dapm_aif_in: 100 case snd_soc_dapm_aif_out: 101 case snd_soc_dapm_dai_out: 102 case snd_soc_dapm_switch: 103 case snd_soc_dapm_output: 104 case snd_soc_dapm_mux: 105 106 return false; 107 default: 108 return true; 109 } 110 } 111 112 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg) 113 { 114 struct skl_module_iface *iface = &mcfg->module->formats[0]; 115 116 dev_dbg(skl->dev, "Dumping config\n"); 117 dev_dbg(skl->dev, "Input Format:\n"); 118 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 119 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 120 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 121 dev_dbg(skl->dev, "valid bit depth = %d\n", 122 iface->inputs[0].fmt.valid_bit_depth); 123 dev_dbg(skl->dev, "Output Format:\n"); 124 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 125 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 126 dev_dbg(skl->dev, "valid bit depth = %d\n", 127 iface->outputs[0].fmt.valid_bit_depth); 128 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 129 } 130 131 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 132 { 133 int slot_map = 0xFFFFFFFF; 134 int start_slot = 0; 135 int i; 136 137 for (i = 0; i < chs; i++) { 138 /* 139 * For 2 channels with starting slot as 0, slot map will 140 * look like 0xFFFFFF10. 141 */ 142 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 143 start_slot++; 144 } 145 fmt->ch_map = slot_map; 146 } 147 148 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 149 struct skl_pipe_params *params, int fixup) 150 { 151 if (fixup & SKL_RATE_FIXUP_MASK) 152 fmt->s_freq = params->s_freq; 153 if (fixup & SKL_CH_FIXUP_MASK) { 154 fmt->channels = params->ch; 155 skl_tplg_update_chmap(fmt, fmt->channels); 156 } 157 if (fixup & SKL_FMT_FIXUP_MASK) { 158 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 159 160 /* 161 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 162 * container so update bit depth accordingly 163 */ 164 switch (fmt->valid_bit_depth) { 165 case SKL_DEPTH_16BIT: 166 fmt->bit_depth = fmt->valid_bit_depth; 167 break; 168 169 default: 170 fmt->bit_depth = SKL_DEPTH_32BIT; 171 break; 172 } 173 } 174 175 } 176 177 /* 178 * A pipeline may have modules which impact the pcm parameters, like SRC, 179 * channel converter, format converter. 180 * We need to calculate the output params by applying the 'fixup' 181 * Topology will tell driver which type of fixup is to be applied by 182 * supplying the fixup mask, so based on that we calculate the output 183 * 184 * Now In FE the pcm hw_params is source/target format. Same is applicable 185 * for BE with its hw_params invoked. 186 * here based on FE, BE pipeline and direction we calculate the input and 187 * outfix and then apply that for a module 188 */ 189 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 190 struct skl_pipe_params *params, bool is_fe) 191 { 192 int in_fixup, out_fixup; 193 struct skl_module_fmt *in_fmt, *out_fmt; 194 195 /* Fixups will be applied to pin 0 only */ 196 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 197 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 198 199 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 200 if (is_fe) { 201 in_fixup = m_cfg->params_fixup; 202 out_fixup = (~m_cfg->converter) & 203 m_cfg->params_fixup; 204 } else { 205 out_fixup = m_cfg->params_fixup; 206 in_fixup = (~m_cfg->converter) & 207 m_cfg->params_fixup; 208 } 209 } else { 210 if (is_fe) { 211 out_fixup = m_cfg->params_fixup; 212 in_fixup = (~m_cfg->converter) & 213 m_cfg->params_fixup; 214 } else { 215 in_fixup = m_cfg->params_fixup; 216 out_fixup = (~m_cfg->converter) & 217 m_cfg->params_fixup; 218 } 219 } 220 221 skl_tplg_update_params(in_fmt, params, in_fixup); 222 skl_tplg_update_params(out_fmt, params, out_fixup); 223 } 224 225 /* 226 * A module needs input and output buffers, which are dependent upon pcm 227 * params, so once we have calculate params, we need buffer calculation as 228 * well. 229 */ 230 static void skl_tplg_update_buffer_size(struct skl_dev *skl, 231 struct skl_module_cfg *mcfg) 232 { 233 int multiplier = 1; 234 struct skl_module_fmt *in_fmt, *out_fmt; 235 struct skl_module_res *res; 236 237 /* Since fixups is applied to pin 0 only, ibs, obs needs 238 * change for pin 0 only 239 */ 240 res = &mcfg->module->resources[0]; 241 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 242 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 243 244 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 245 multiplier = 5; 246 247 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 248 in_fmt->channels * (in_fmt->bit_depth >> 3) * 249 multiplier; 250 251 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 252 out_fmt->channels * (out_fmt->bit_depth >> 3) * 253 multiplier; 254 } 255 256 static u8 skl_tplg_be_dev_type(int dev_type) 257 { 258 int ret; 259 260 switch (dev_type) { 261 case SKL_DEVICE_BT: 262 ret = NHLT_DEVICE_BT; 263 break; 264 265 case SKL_DEVICE_DMIC: 266 ret = NHLT_DEVICE_DMIC; 267 break; 268 269 case SKL_DEVICE_I2S: 270 ret = NHLT_DEVICE_I2S; 271 break; 272 273 default: 274 ret = NHLT_DEVICE_INVALID; 275 break; 276 } 277 278 return ret; 279 } 280 281 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 282 struct skl_dev *skl) 283 { 284 struct skl_module_cfg *m_cfg = w->priv; 285 int link_type, dir; 286 u32 ch, s_freq, s_fmt; 287 struct nhlt_specific_cfg *cfg; 288 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 289 int fmt_idx = m_cfg->fmt_idx; 290 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 291 292 /* check if we already have blob */ 293 if (m_cfg->formats_config.caps_size > 0) 294 return 0; 295 296 dev_dbg(skl->dev, "Applying default cfg blob\n"); 297 switch (m_cfg->dev_type) { 298 case SKL_DEVICE_DMIC: 299 link_type = NHLT_LINK_DMIC; 300 dir = SNDRV_PCM_STREAM_CAPTURE; 301 s_freq = m_iface->inputs[0].fmt.s_freq; 302 s_fmt = m_iface->inputs[0].fmt.bit_depth; 303 ch = m_iface->inputs[0].fmt.channels; 304 break; 305 306 case SKL_DEVICE_I2S: 307 link_type = NHLT_LINK_SSP; 308 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 309 dir = SNDRV_PCM_STREAM_PLAYBACK; 310 s_freq = m_iface->outputs[0].fmt.s_freq; 311 s_fmt = m_iface->outputs[0].fmt.bit_depth; 312 ch = m_iface->outputs[0].fmt.channels; 313 } else { 314 dir = SNDRV_PCM_STREAM_CAPTURE; 315 s_freq = m_iface->inputs[0].fmt.s_freq; 316 s_fmt = m_iface->inputs[0].fmt.bit_depth; 317 ch = m_iface->inputs[0].fmt.channels; 318 } 319 break; 320 321 default: 322 return -EINVAL; 323 } 324 325 /* update the blob based on virtual bus_id and default params */ 326 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 327 s_fmt, ch, s_freq, dir, dev_type); 328 if (cfg) { 329 m_cfg->formats_config.caps_size = cfg->size; 330 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 331 } else { 332 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n", 333 m_cfg->vbus_id, link_type, dir); 334 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d\n", 335 ch, s_freq, s_fmt); 336 return -EIO; 337 } 338 339 return 0; 340 } 341 342 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 343 struct skl_dev *skl) 344 { 345 struct skl_module_cfg *m_cfg = w->priv; 346 struct skl_pipe_params *params = m_cfg->pipe->p_params; 347 int p_conn_type = m_cfg->pipe->conn_type; 348 bool is_fe; 349 350 if (!m_cfg->params_fixup) 351 return; 352 353 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n", 354 w->name); 355 356 skl_dump_mconfig(skl, m_cfg); 357 358 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 359 is_fe = true; 360 else 361 is_fe = false; 362 363 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 364 skl_tplg_update_buffer_size(skl, m_cfg); 365 366 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n", 367 w->name); 368 369 skl_dump_mconfig(skl, m_cfg); 370 } 371 372 /* 373 * some modules can have multiple params set from user control and 374 * need to be set after module is initialized. If set_param flag is 375 * set module params will be done after module is initialised. 376 */ 377 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 378 struct skl_dev *skl) 379 { 380 int i, ret; 381 struct skl_module_cfg *mconfig = w->priv; 382 const struct snd_kcontrol_new *k; 383 struct soc_bytes_ext *sb; 384 struct skl_algo_data *bc; 385 struct skl_specific_cfg *sp_cfg; 386 387 if (mconfig->formats_config.caps_size > 0 && 388 mconfig->formats_config.set_params == SKL_PARAM_SET) { 389 sp_cfg = &mconfig->formats_config; 390 ret = skl_set_module_params(skl, sp_cfg->caps, 391 sp_cfg->caps_size, 392 sp_cfg->param_id, mconfig); 393 if (ret < 0) 394 return ret; 395 } 396 397 for (i = 0; i < w->num_kcontrols; i++) { 398 k = &w->kcontrol_news[i]; 399 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 400 sb = (void *) k->private_value; 401 bc = (struct skl_algo_data *)sb->dobj.private; 402 403 if (bc->set_params == SKL_PARAM_SET) { 404 ret = skl_set_module_params(skl, 405 (u32 *)bc->params, bc->size, 406 bc->param_id, mconfig); 407 if (ret < 0) 408 return ret; 409 } 410 } 411 } 412 413 return 0; 414 } 415 416 /* 417 * some module param can set from user control and this is required as 418 * when module is initailzed. if module param is required in init it is 419 * identifed by set_param flag. if set_param flag is not set, then this 420 * parameter needs to set as part of module init. 421 */ 422 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 423 { 424 const struct snd_kcontrol_new *k; 425 struct soc_bytes_ext *sb; 426 struct skl_algo_data *bc; 427 struct skl_module_cfg *mconfig = w->priv; 428 int i; 429 430 for (i = 0; i < w->num_kcontrols; i++) { 431 k = &w->kcontrol_news[i]; 432 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 433 sb = (struct soc_bytes_ext *)k->private_value; 434 bc = (struct skl_algo_data *)sb->dobj.private; 435 436 if (bc->set_params != SKL_PARAM_INIT) 437 continue; 438 439 mconfig->formats_config.caps = (u32 *)bc->params; 440 mconfig->formats_config.caps_size = bc->size; 441 442 break; 443 } 444 } 445 446 return 0; 447 } 448 449 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe, 450 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 451 { 452 switch (mcfg->dev_type) { 453 case SKL_DEVICE_HDAHOST: 454 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params); 455 456 case SKL_DEVICE_HDALINK: 457 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params); 458 } 459 460 return 0; 461 } 462 463 /* 464 * Inside a pipe instance, we can have various modules. These modules need 465 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 466 * skl_init_module() routine, so invoke that for all modules in a pipeline 467 */ 468 static int 469 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe) 470 { 471 struct skl_pipe_module *w_module; 472 struct snd_soc_dapm_widget *w; 473 struct skl_module_cfg *mconfig; 474 u8 cfg_idx; 475 int ret = 0; 476 477 list_for_each_entry(w_module, &pipe->w_list, node) { 478 guid_t *uuid_mod; 479 w = w_module->w; 480 mconfig = w->priv; 481 482 /* check if module ids are populated */ 483 if (mconfig->id.module_id < 0) { 484 dev_err(skl->dev, 485 "module %pUL id not populated\n", 486 (guid_t *)mconfig->guid); 487 return -EIO; 488 } 489 490 cfg_idx = mconfig->pipe->cur_config_idx; 491 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 492 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 493 494 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) { 495 ret = skl->dsp->fw_ops.load_mod(skl->dsp, 496 mconfig->id.module_id, mconfig->guid); 497 if (ret < 0) 498 return ret; 499 500 mconfig->m_state = SKL_MODULE_LOADED; 501 } 502 503 /* prepare the DMA if the module is gateway cpr */ 504 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig); 505 if (ret < 0) 506 return ret; 507 508 /* update blob if blob is null for be with default value */ 509 skl_tplg_update_be_blob(w, skl); 510 511 /* 512 * apply fix/conversion to module params based on 513 * FE/BE params 514 */ 515 skl_tplg_update_module_params(w, skl); 516 uuid_mod = (guid_t *)mconfig->guid; 517 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod, 518 mconfig->id.instance_id); 519 if (mconfig->id.pvt_id < 0) 520 return ret; 521 skl_tplg_set_module_init_data(w); 522 523 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id); 524 if (ret < 0) { 525 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n", 526 mconfig->core_id, ret); 527 return ret; 528 } 529 530 ret = skl_init_module(skl, mconfig); 531 if (ret < 0) { 532 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 533 goto err; 534 } 535 536 ret = skl_tplg_set_module_params(w, skl); 537 if (ret < 0) 538 goto err; 539 } 540 541 return 0; 542 err: 543 skl_dsp_put_core(skl->dsp, mconfig->core_id); 544 return ret; 545 } 546 547 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl, 548 struct skl_pipe *pipe) 549 { 550 int ret = 0; 551 struct skl_pipe_module *w_module = NULL; 552 struct skl_module_cfg *mconfig = NULL; 553 554 list_for_each_entry(w_module, &pipe->w_list, node) { 555 guid_t *uuid_mod; 556 mconfig = w_module->w->priv; 557 uuid_mod = (guid_t *)mconfig->guid; 558 559 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod && 560 mconfig->m_state > SKL_MODULE_UNINIT) { 561 ret = skl->dsp->fw_ops.unload_mod(skl->dsp, 562 mconfig->id.module_id); 563 if (ret < 0) 564 return -EIO; 565 } 566 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 567 568 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id); 569 if (ret < 0) { 570 /* don't return; continue with other modules */ 571 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n", 572 mconfig->core_id, ret); 573 } 574 } 575 576 /* no modules to unload in this path, so return */ 577 return ret; 578 } 579 580 /* 581 * Here, we select pipe format based on the pipe type and pipe 582 * direction to determine the current config index for the pipeline. 583 * The config index is then used to select proper module resources. 584 * Intermediate pipes currently have a fixed format hence we select the 585 * 0th configuratation by default for such pipes. 586 */ 587 static int 588 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig) 589 { 590 struct skl_pipe *pipe = mconfig->pipe; 591 struct skl_pipe_params *params = pipe->p_params; 592 struct skl_path_config *pconfig = &pipe->configs[0]; 593 struct skl_pipe_fmt *fmt = NULL; 594 bool in_fmt = false; 595 int i; 596 597 if (pipe->nr_cfgs == 0) { 598 pipe->cur_config_idx = 0; 599 return 0; 600 } 601 602 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 603 dev_dbg(skl->dev, "No conn_type detected, take 0th config\n"); 604 pipe->cur_config_idx = 0; 605 pipe->memory_pages = pconfig->mem_pages; 606 607 return 0; 608 } 609 610 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 611 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 612 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 613 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 614 in_fmt = true; 615 616 for (i = 0; i < pipe->nr_cfgs; i++) { 617 pconfig = &pipe->configs[i]; 618 if (in_fmt) 619 fmt = &pconfig->in_fmt; 620 else 621 fmt = &pconfig->out_fmt; 622 623 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 624 fmt->channels, fmt->freq, fmt->bps)) { 625 pipe->cur_config_idx = i; 626 pipe->memory_pages = pconfig->mem_pages; 627 dev_dbg(skl->dev, "Using pipe config: %d\n", i); 628 629 return 0; 630 } 631 } 632 633 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 634 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 635 return -EINVAL; 636 } 637 638 /* 639 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 640 * need create the pipeline. So we do following: 641 * - Create the pipeline 642 * - Initialize the modules in pipeline 643 * - finally bind all modules together 644 */ 645 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 646 struct skl_dev *skl) 647 { 648 int ret; 649 struct skl_module_cfg *mconfig = w->priv; 650 struct skl_pipe_module *w_module; 651 struct skl_pipe *s_pipe = mconfig->pipe; 652 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 653 struct skl_module_deferred_bind *modules; 654 655 ret = skl_tplg_get_pipe_config(skl, mconfig); 656 if (ret < 0) 657 return ret; 658 659 /* 660 * Create a list of modules for pipe. 661 * This list contains modules from source to sink 662 */ 663 ret = skl_create_pipeline(skl, mconfig->pipe); 664 if (ret < 0) 665 return ret; 666 667 /* Init all pipe modules from source to sink */ 668 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 669 if (ret < 0) 670 return ret; 671 672 /* Bind modules from source to sink */ 673 list_for_each_entry(w_module, &s_pipe->w_list, node) { 674 dst_module = w_module->w->priv; 675 676 if (src_module == NULL) { 677 src_module = dst_module; 678 continue; 679 } 680 681 ret = skl_bind_modules(skl, src_module, dst_module); 682 if (ret < 0) 683 return ret; 684 685 src_module = dst_module; 686 } 687 688 /* 689 * When the destination module is initialized, check for these modules 690 * in deferred bind list. If found, bind them. 691 */ 692 list_for_each_entry(w_module, &s_pipe->w_list, node) { 693 if (list_empty(&skl->bind_list)) 694 break; 695 696 list_for_each_entry(modules, &skl->bind_list, node) { 697 module = w_module->w->priv; 698 if (modules->dst == module) 699 skl_bind_modules(skl, modules->src, 700 modules->dst); 701 } 702 } 703 704 return 0; 705 } 706 707 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params, 708 int size, struct skl_module_cfg *mcfg) 709 { 710 int i, pvt_id; 711 712 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 713 struct skl_kpb_params *kpb_params = 714 (struct skl_kpb_params *)params; 715 struct skl_mod_inst_map *inst = kpb_params->u.map; 716 717 for (i = 0; i < kpb_params->num_modules; i++) { 718 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id, 719 inst->inst_id); 720 if (pvt_id < 0) 721 return -EINVAL; 722 723 inst->inst_id = pvt_id; 724 inst++; 725 } 726 } 727 728 return 0; 729 } 730 /* 731 * Some modules require params to be set after the module is bound to 732 * all pins connected. 733 * 734 * The module provider initializes set_param flag for such modules and we 735 * send params after binding 736 */ 737 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 738 struct skl_module_cfg *mcfg, struct skl_dev *skl) 739 { 740 int i, ret; 741 struct skl_module_cfg *mconfig = w->priv; 742 const struct snd_kcontrol_new *k; 743 struct soc_bytes_ext *sb; 744 struct skl_algo_data *bc; 745 struct skl_specific_cfg *sp_cfg; 746 u32 *params; 747 748 /* 749 * check all out/in pins are in bind state. 750 * if so set the module param 751 */ 752 for (i = 0; i < mcfg->module->max_output_pins; i++) { 753 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 754 return 0; 755 } 756 757 for (i = 0; i < mcfg->module->max_input_pins; i++) { 758 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 759 return 0; 760 } 761 762 if (mconfig->formats_config.caps_size > 0 && 763 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 764 sp_cfg = &mconfig->formats_config; 765 ret = skl_set_module_params(skl, sp_cfg->caps, 766 sp_cfg->caps_size, 767 sp_cfg->param_id, mconfig); 768 if (ret < 0) 769 return ret; 770 } 771 772 for (i = 0; i < w->num_kcontrols; i++) { 773 k = &w->kcontrol_news[i]; 774 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 775 sb = (void *) k->private_value; 776 bc = (struct skl_algo_data *)sb->dobj.private; 777 778 if (bc->set_params == SKL_PARAM_BIND) { 779 params = kmemdup(bc->params, bc->max, GFP_KERNEL); 780 if (!params) 781 return -ENOMEM; 782 783 skl_fill_sink_instance_id(skl, params, bc->max, 784 mconfig); 785 786 ret = skl_set_module_params(skl, params, 787 bc->max, bc->param_id, mconfig); 788 kfree(params); 789 790 if (ret < 0) 791 return ret; 792 } 793 } 794 } 795 796 return 0; 797 } 798 799 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) 800 { 801 struct uuid_module *module; 802 803 list_for_each_entry(module, &skl->uuid_list, list) { 804 if (guid_equal(uuid, &module->uuid)) 805 return module->id; 806 } 807 808 return -EINVAL; 809 } 810 811 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl, 812 const struct snd_kcontrol_new *k) 813 { 814 struct soc_bytes_ext *sb = (void *) k->private_value; 815 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 816 struct skl_kpb_params *uuid_params, *params; 817 struct hdac_bus *bus = skl_to_bus(skl); 818 int i, size, module_id; 819 820 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 821 uuid_params = (struct skl_kpb_params *)bc->params; 822 size = struct_size(params, u.map, uuid_params->num_modules); 823 824 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 825 if (!params) 826 return -ENOMEM; 827 828 params->num_modules = uuid_params->num_modules; 829 830 for (i = 0; i < uuid_params->num_modules; i++) { 831 module_id = skl_get_module_id(skl, 832 &uuid_params->u.map_uuid[i].mod_uuid); 833 if (module_id < 0) { 834 devm_kfree(bus->dev, params); 835 return -EINVAL; 836 } 837 838 params->u.map[i].mod_id = module_id; 839 params->u.map[i].inst_id = 840 uuid_params->u.map_uuid[i].inst_id; 841 } 842 843 devm_kfree(bus->dev, bc->params); 844 bc->params = (char *)params; 845 bc->max = size; 846 } 847 848 return 0; 849 } 850 851 /* 852 * Retrieve the module id from UUID mentioned in the 853 * post bind params 854 */ 855 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl, 856 struct snd_soc_dapm_widget *w) 857 { 858 struct skl_module_cfg *mconfig = w->priv; 859 int i; 860 861 /* 862 * Post bind params are used for only for KPB 863 * to set copier instances to drain the data 864 * in fast mode 865 */ 866 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 867 return; 868 869 for (i = 0; i < w->num_kcontrols; i++) 870 if ((w->kcontrol_news[i].access & 871 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 872 (skl_tplg_find_moduleid_from_uuid(skl, 873 &w->kcontrol_news[i]) < 0)) 874 dev_err(skl->dev, 875 "%s: invalid kpb post bind params\n", 876 __func__); 877 } 878 879 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl, 880 struct skl_module_cfg *src, struct skl_module_cfg *dst) 881 { 882 struct skl_module_deferred_bind *m_list, *modules; 883 int i; 884 885 /* only supported for module with static pin connection */ 886 for (i = 0; i < dst->module->max_input_pins; i++) { 887 struct skl_module_pin *pin = &dst->m_in_pin[i]; 888 889 if (pin->is_dynamic) 890 continue; 891 892 if ((pin->id.module_id == src->id.module_id) && 893 (pin->id.instance_id == src->id.instance_id)) { 894 895 if (!list_empty(&skl->bind_list)) { 896 list_for_each_entry(modules, &skl->bind_list, node) { 897 if (modules->src == src && modules->dst == dst) 898 return 0; 899 } 900 } 901 902 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 903 if (!m_list) 904 return -ENOMEM; 905 906 m_list->src = src; 907 m_list->dst = dst; 908 909 list_add(&m_list->node, &skl->bind_list); 910 } 911 } 912 913 return 0; 914 } 915 916 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 917 struct skl_dev *skl, 918 struct snd_soc_dapm_widget *src_w, 919 struct skl_module_cfg *src_mconfig) 920 { 921 struct snd_soc_dapm_path *p; 922 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 923 struct skl_module_cfg *sink_mconfig; 924 int ret; 925 926 snd_soc_dapm_widget_for_each_sink_path(w, p) { 927 if (!p->connect) 928 continue; 929 930 dev_dbg(skl->dev, 931 "%s: src widget=%s\n", __func__, w->name); 932 dev_dbg(skl->dev, 933 "%s: sink widget=%s\n", __func__, p->sink->name); 934 935 next_sink = p->sink; 936 937 if (!is_skl_dsp_widget_type(p->sink, skl->dev)) 938 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 939 940 /* 941 * here we will check widgets in sink pipelines, so that 942 * can be any widgets type and we are only interested if 943 * they are ones used for SKL so check that first 944 */ 945 if ((p->sink->priv != NULL) && 946 is_skl_dsp_widget_type(p->sink, skl->dev)) { 947 948 sink = p->sink; 949 sink_mconfig = sink->priv; 950 951 /* 952 * Modules other than PGA leaf can be connected 953 * directly or via switch to a module in another 954 * pipeline. EX: reference path 955 * when the path is enabled, the dst module that needs 956 * to be bound may not be initialized. if the module is 957 * not initialized, add these modules in the deferred 958 * bind list and when the dst module is initialised, 959 * bind this module to the dst_module in deferred list. 960 */ 961 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 962 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 963 964 ret = skl_tplg_module_add_deferred_bind(skl, 965 src_mconfig, sink_mconfig); 966 967 if (ret < 0) 968 return ret; 969 970 } 971 972 973 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 974 sink_mconfig->m_state == SKL_MODULE_UNINIT) 975 continue; 976 977 /* Bind source to sink, mixin is always source */ 978 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 979 if (ret) 980 return ret; 981 982 /* set module params after bind */ 983 skl_tplg_set_module_bind_params(src_w, 984 src_mconfig, skl); 985 skl_tplg_set_module_bind_params(sink, 986 sink_mconfig, skl); 987 988 /* Start sinks pipe first */ 989 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 990 if (sink_mconfig->pipe->conn_type != 991 SKL_PIPE_CONN_TYPE_FE) 992 ret = skl_run_pipe(skl, 993 sink_mconfig->pipe); 994 if (ret) 995 return ret; 996 } 997 } 998 } 999 1000 if (!sink && next_sink) 1001 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1008 * we need to do following: 1009 * - Bind to sink pipeline 1010 * Since the sink pipes can be running and we don't get mixer event on 1011 * connect for already running mixer, we need to find the sink pipes 1012 * here and bind to them. This way dynamic connect works. 1013 * - Start sink pipeline, if not running 1014 * - Then run current pipe 1015 */ 1016 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1017 struct skl_dev *skl) 1018 { 1019 struct skl_module_cfg *src_mconfig; 1020 int ret = 0; 1021 1022 src_mconfig = w->priv; 1023 1024 /* 1025 * find which sink it is connected to, bind with the sink, 1026 * if sink is not started, start sink pipe first, then start 1027 * this pipe 1028 */ 1029 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1030 if (ret) 1031 return ret; 1032 1033 /* Start source pipe last after starting all sinks */ 1034 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1035 return skl_run_pipe(skl, src_mconfig->pipe); 1036 1037 return 0; 1038 } 1039 1040 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1041 struct snd_soc_dapm_widget *w, struct skl_dev *skl) 1042 { 1043 struct snd_soc_dapm_path *p; 1044 struct snd_soc_dapm_widget *src_w = NULL; 1045 1046 snd_soc_dapm_widget_for_each_source_path(w, p) { 1047 src_w = p->source; 1048 if (!p->connect) 1049 continue; 1050 1051 dev_dbg(skl->dev, "sink widget=%s\n", w->name); 1052 dev_dbg(skl->dev, "src widget=%s\n", p->source->name); 1053 1054 /* 1055 * here we will check widgets in sink pipelines, so that can 1056 * be any widgets type and we are only interested if they are 1057 * ones used for SKL so check that first 1058 */ 1059 if ((p->source->priv != NULL) && 1060 is_skl_dsp_widget_type(p->source, skl->dev)) { 1061 return p->source; 1062 } 1063 } 1064 1065 if (src_w != NULL) 1066 return skl_get_src_dsp_widget(src_w, skl); 1067 1068 return NULL; 1069 } 1070 1071 /* 1072 * in the Post-PMU event of mixer we need to do following: 1073 * - Check if this pipe is running 1074 * - if not, then 1075 * - bind this pipeline to its source pipeline 1076 * if source pipe is already running, this means it is a dynamic 1077 * connection and we need to bind only to that pipe 1078 * - start this pipeline 1079 */ 1080 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1081 struct skl_dev *skl) 1082 { 1083 int ret = 0; 1084 struct snd_soc_dapm_widget *source, *sink; 1085 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1086 int src_pipe_started = 0; 1087 1088 sink = w; 1089 sink_mconfig = sink->priv; 1090 1091 /* 1092 * If source pipe is already started, that means source is driving 1093 * one more sink before this sink got connected, Since source is 1094 * started, bind this sink to source and start this pipe. 1095 */ 1096 source = skl_get_src_dsp_widget(w, skl); 1097 if (source != NULL) { 1098 src_mconfig = source->priv; 1099 sink_mconfig = sink->priv; 1100 src_pipe_started = 1; 1101 1102 /* 1103 * check pipe state, then no need to bind or start the 1104 * pipe 1105 */ 1106 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1107 src_pipe_started = 0; 1108 } 1109 1110 if (src_pipe_started) { 1111 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 1112 if (ret) 1113 return ret; 1114 1115 /* set module params after bind */ 1116 skl_tplg_set_module_bind_params(source, src_mconfig, skl); 1117 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl); 1118 1119 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1120 ret = skl_run_pipe(skl, sink_mconfig->pipe); 1121 } 1122 1123 return ret; 1124 } 1125 1126 /* 1127 * in the Pre-PMD event of mixer we need to do following: 1128 * - Stop the pipe 1129 * - find the source connections and remove that from dapm_path_list 1130 * - unbind with source pipelines if still connected 1131 */ 1132 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1133 struct skl_dev *skl) 1134 { 1135 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1136 int ret = 0, i; 1137 1138 sink_mconfig = w->priv; 1139 1140 /* Stop the pipe */ 1141 ret = skl_stop_pipe(skl, sink_mconfig->pipe); 1142 if (ret) 1143 return ret; 1144 1145 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1146 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1147 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1148 if (!src_mconfig) 1149 continue; 1150 1151 ret = skl_unbind_modules(skl, 1152 src_mconfig, sink_mconfig); 1153 } 1154 } 1155 1156 return ret; 1157 } 1158 1159 /* 1160 * in the Post-PMD event of mixer we need to do following: 1161 * - Unbind the modules within the pipeline 1162 * - Delete the pipeline (modules are not required to be explicitly 1163 * deleted, pipeline delete is enough here 1164 */ 1165 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1166 struct skl_dev *skl) 1167 { 1168 struct skl_module_cfg *mconfig = w->priv; 1169 struct skl_pipe_module *w_module; 1170 struct skl_module_cfg *src_module = NULL, *dst_module; 1171 struct skl_pipe *s_pipe = mconfig->pipe; 1172 struct skl_module_deferred_bind *modules, *tmp; 1173 1174 if (s_pipe->state == SKL_PIPE_INVALID) 1175 return -EINVAL; 1176 1177 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1178 if (list_empty(&skl->bind_list)) 1179 break; 1180 1181 src_module = w_module->w->priv; 1182 1183 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1184 /* 1185 * When the destination module is deleted, Unbind the 1186 * modules from deferred bind list. 1187 */ 1188 if (modules->dst == src_module) { 1189 skl_unbind_modules(skl, modules->src, 1190 modules->dst); 1191 } 1192 1193 /* 1194 * When the source module is deleted, remove this entry 1195 * from the deferred bind list. 1196 */ 1197 if (modules->src == src_module) { 1198 list_del(&modules->node); 1199 modules->src = NULL; 1200 modules->dst = NULL; 1201 kfree(modules); 1202 } 1203 } 1204 } 1205 1206 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1207 dst_module = w_module->w->priv; 1208 1209 if (src_module == NULL) { 1210 src_module = dst_module; 1211 continue; 1212 } 1213 1214 skl_unbind_modules(skl, src_module, dst_module); 1215 src_module = dst_module; 1216 } 1217 1218 skl_delete_pipe(skl, mconfig->pipe); 1219 1220 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1221 src_module = w_module->w->priv; 1222 src_module->m_state = SKL_MODULE_UNINIT; 1223 } 1224 1225 return skl_tplg_unload_pipe_modules(skl, s_pipe); 1226 } 1227 1228 /* 1229 * in the Post-PMD event of PGA we need to do following: 1230 * - Stop the pipeline 1231 * - In source pipe is connected, unbind with source pipelines 1232 */ 1233 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1234 struct skl_dev *skl) 1235 { 1236 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1237 int ret = 0, i; 1238 1239 src_mconfig = w->priv; 1240 1241 /* Stop the pipe since this is a mixin module */ 1242 ret = skl_stop_pipe(skl, src_mconfig->pipe); 1243 if (ret) 1244 return ret; 1245 1246 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1247 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1248 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1249 if (!sink_mconfig) 1250 continue; 1251 /* 1252 * This is a connecter and if path is found that means 1253 * unbind between source and sink has not happened yet 1254 */ 1255 ret = skl_unbind_modules(skl, src_mconfig, 1256 sink_mconfig); 1257 } 1258 } 1259 1260 return ret; 1261 } 1262 1263 /* 1264 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1265 * second one is required that is created as another pipe entity. 1266 * The mixer is responsible for pipe management and represent a pipeline 1267 * instance 1268 */ 1269 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1270 struct snd_kcontrol *k, int event) 1271 { 1272 struct snd_soc_dapm_context *dapm = w->dapm; 1273 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1274 1275 switch (event) { 1276 case SND_SOC_DAPM_PRE_PMU: 1277 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1278 1279 case SND_SOC_DAPM_POST_PMU: 1280 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1281 1282 case SND_SOC_DAPM_PRE_PMD: 1283 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1284 1285 case SND_SOC_DAPM_POST_PMD: 1286 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1287 } 1288 1289 return 0; 1290 } 1291 1292 /* 1293 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1294 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1295 * the sink when it is running (two FE to one BE or one FE to two BE) 1296 * scenarios 1297 */ 1298 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1299 struct snd_kcontrol *k, int event) 1300 1301 { 1302 struct snd_soc_dapm_context *dapm = w->dapm; 1303 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1304 1305 switch (event) { 1306 case SND_SOC_DAPM_PRE_PMU: 1307 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1308 1309 case SND_SOC_DAPM_POST_PMD: 1310 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1311 } 1312 1313 return 0; 1314 } 1315 1316 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1317 unsigned int __user *data, unsigned int size) 1318 { 1319 struct soc_bytes_ext *sb = 1320 (struct soc_bytes_ext *)kcontrol->private_value; 1321 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1322 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1323 struct skl_module_cfg *mconfig = w->priv; 1324 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1325 1326 if (w->power) 1327 skl_get_module_params(skl, (u32 *)bc->params, 1328 bc->size, bc->param_id, mconfig); 1329 1330 /* decrement size for TLV header */ 1331 size -= 2 * sizeof(u32); 1332 1333 /* check size as we don't want to send kernel data */ 1334 if (size > bc->max) 1335 size = bc->max; 1336 1337 if (bc->params) { 1338 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1339 return -EFAULT; 1340 if (copy_to_user(data + 1, &size, sizeof(u32))) 1341 return -EFAULT; 1342 if (copy_to_user(data + 2, bc->params, size)) 1343 return -EFAULT; 1344 } 1345 1346 return 0; 1347 } 1348 1349 #define SKL_PARAM_VENDOR_ID 0xff 1350 1351 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1352 const unsigned int __user *data, unsigned int size) 1353 { 1354 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1355 struct skl_module_cfg *mconfig = w->priv; 1356 struct soc_bytes_ext *sb = 1357 (struct soc_bytes_ext *)kcontrol->private_value; 1358 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1359 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1360 1361 if (ac->params) { 1362 /* 1363 * Widget data is expected to be stripped of T and L 1364 */ 1365 size -= 2 * sizeof(unsigned int); 1366 data += 2; 1367 1368 if (size > ac->max) 1369 return -EINVAL; 1370 ac->size = size; 1371 1372 if (copy_from_user(ac->params, data, size)) 1373 return -EFAULT; 1374 1375 if (w->power) 1376 return skl_set_module_params(skl, 1377 (u32 *)ac->params, ac->size, 1378 ac->param_id, mconfig); 1379 } 1380 1381 return 0; 1382 } 1383 1384 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1385 struct snd_ctl_elem_value *ucontrol) 1386 { 1387 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1388 struct skl_module_cfg *mconfig = w->priv; 1389 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1390 u32 ch_type = *((u32 *)ec->dobj.private); 1391 1392 if (mconfig->dmic_ch_type == ch_type) 1393 ucontrol->value.enumerated.item[0] = 1394 mconfig->dmic_ch_combo_index; 1395 else 1396 ucontrol->value.enumerated.item[0] = 0; 1397 1398 return 0; 1399 } 1400 1401 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1402 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1403 { 1404 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1405 1406 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1407 sp_cfg->set_params = SKL_PARAM_SET; 1408 sp_cfg->param_id = 0x00; 1409 if (!sp_cfg->caps) { 1410 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1411 if (!sp_cfg->caps) 1412 return -ENOMEM; 1413 } 1414 1415 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1416 mic_cfg->flags = 0; 1417 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1418 1419 return 0; 1420 } 1421 1422 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1423 struct snd_ctl_elem_value *ucontrol) 1424 { 1425 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1426 struct skl_module_cfg *mconfig = w->priv; 1427 struct skl_mic_sel_config mic_cfg = {0}; 1428 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1429 u32 ch_type = *((u32 *)ec->dobj.private); 1430 const int *list; 1431 u8 in_ch, out_ch, index; 1432 1433 mconfig->dmic_ch_type = ch_type; 1434 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1435 1436 /* enum control index 0 is INVALID, so no channels to be set */ 1437 if (mconfig->dmic_ch_combo_index == 0) 1438 return 0; 1439 1440 /* No valid channel selection map for index 0, so offset by 1 */ 1441 index = mconfig->dmic_ch_combo_index - 1; 1442 1443 switch (ch_type) { 1444 case SKL_CH_MONO: 1445 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1446 return -EINVAL; 1447 1448 list = &mic_mono_list[index]; 1449 break; 1450 1451 case SKL_CH_STEREO: 1452 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1453 return -EINVAL; 1454 1455 list = mic_stereo_list[index]; 1456 break; 1457 1458 case SKL_CH_TRIO: 1459 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1460 return -EINVAL; 1461 1462 list = mic_trio_list[index]; 1463 break; 1464 1465 case SKL_CH_QUATRO: 1466 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1467 return -EINVAL; 1468 1469 list = mic_quatro_list[index]; 1470 break; 1471 1472 default: 1473 dev_err(w->dapm->dev, 1474 "Invalid channel %d for mic_select module\n", 1475 ch_type); 1476 return -EINVAL; 1477 1478 } 1479 1480 /* channel type enum map to number of chanels for that type */ 1481 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1482 in_ch = list[out_ch]; 1483 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1484 } 1485 1486 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1487 } 1488 1489 /* 1490 * Fill the dma id for host and link. In case of passthrough 1491 * pipeline, this will both host and link in the same 1492 * pipeline, so need to copy the link and host based on dev_type 1493 */ 1494 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1495 struct skl_pipe_params *params) 1496 { 1497 struct skl_pipe *pipe = mcfg->pipe; 1498 1499 if (pipe->passthru) { 1500 switch (mcfg->dev_type) { 1501 case SKL_DEVICE_HDALINK: 1502 pipe->p_params->link_dma_id = params->link_dma_id; 1503 pipe->p_params->link_index = params->link_index; 1504 pipe->p_params->link_bps = params->link_bps; 1505 break; 1506 1507 case SKL_DEVICE_HDAHOST: 1508 pipe->p_params->host_dma_id = params->host_dma_id; 1509 pipe->p_params->host_bps = params->host_bps; 1510 break; 1511 1512 default: 1513 break; 1514 } 1515 pipe->p_params->s_fmt = params->s_fmt; 1516 pipe->p_params->ch = params->ch; 1517 pipe->p_params->s_freq = params->s_freq; 1518 pipe->p_params->stream = params->stream; 1519 pipe->p_params->format = params->format; 1520 1521 } else { 1522 memcpy(pipe->p_params, params, sizeof(*params)); 1523 } 1524 } 1525 1526 /* 1527 * The FE params are passed by hw_params of the DAI. 1528 * On hw_params, the params are stored in Gateway module of the FE and we 1529 * need to calculate the format in DSP module configuration, that 1530 * conversion is done here 1531 */ 1532 int skl_tplg_update_pipe_params(struct device *dev, 1533 struct skl_module_cfg *mconfig, 1534 struct skl_pipe_params *params) 1535 { 1536 struct skl_module_res *res = &mconfig->module->resources[0]; 1537 struct skl_dev *skl = get_skl_ctx(dev); 1538 struct skl_module_fmt *format = NULL; 1539 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1540 1541 skl_tplg_fill_dma_id(mconfig, params); 1542 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1543 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1544 1545 if (skl->nr_modules) 1546 return 0; 1547 1548 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1549 format = &mconfig->module->formats[0].inputs[0].fmt; 1550 else 1551 format = &mconfig->module->formats[0].outputs[0].fmt; 1552 1553 /* set the hw_params */ 1554 format->s_freq = params->s_freq; 1555 format->channels = params->ch; 1556 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1557 1558 /* 1559 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1560 * container so update bit depth accordingly 1561 */ 1562 switch (format->valid_bit_depth) { 1563 case SKL_DEPTH_16BIT: 1564 format->bit_depth = format->valid_bit_depth; 1565 break; 1566 1567 case SKL_DEPTH_24BIT: 1568 case SKL_DEPTH_32BIT: 1569 format->bit_depth = SKL_DEPTH_32BIT; 1570 break; 1571 1572 default: 1573 dev_err(dev, "Invalid bit depth %x for pipe\n", 1574 format->valid_bit_depth); 1575 return -EINVAL; 1576 } 1577 1578 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1579 res->ibs = (format->s_freq / 1000) * 1580 (format->channels) * 1581 (format->bit_depth >> 3); 1582 } else { 1583 res->obs = (format->s_freq / 1000) * 1584 (format->channels) * 1585 (format->bit_depth >> 3); 1586 } 1587 1588 return 0; 1589 } 1590 1591 /* 1592 * Query the module config for the FE DAI 1593 * This is used to find the hw_params set for that DAI and apply to FE 1594 * pipeline 1595 */ 1596 struct skl_module_cfg * 1597 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1598 { 1599 struct snd_soc_dapm_widget *w; 1600 struct snd_soc_dapm_path *p = NULL; 1601 1602 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1603 w = dai->playback_widget; 1604 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1605 if (p->connect && p->sink->power && 1606 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1607 continue; 1608 1609 if (p->sink->priv) { 1610 dev_dbg(dai->dev, "set params for %s\n", 1611 p->sink->name); 1612 return p->sink->priv; 1613 } 1614 } 1615 } else { 1616 w = dai->capture_widget; 1617 snd_soc_dapm_widget_for_each_source_path(w, p) { 1618 if (p->connect && p->source->power && 1619 !is_skl_dsp_widget_type(p->source, dai->dev)) 1620 continue; 1621 1622 if (p->source->priv) { 1623 dev_dbg(dai->dev, "set params for %s\n", 1624 p->source->name); 1625 return p->source->priv; 1626 } 1627 } 1628 } 1629 1630 return NULL; 1631 } 1632 1633 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1634 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1635 { 1636 struct snd_soc_dapm_path *p; 1637 struct skl_module_cfg *mconfig = NULL; 1638 1639 snd_soc_dapm_widget_for_each_source_path(w, p) { 1640 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1641 if (p->connect && 1642 (p->sink->id == snd_soc_dapm_aif_out) && 1643 p->source->priv) { 1644 mconfig = p->source->priv; 1645 return mconfig; 1646 } 1647 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1648 if (mconfig) 1649 return mconfig; 1650 } 1651 } 1652 return mconfig; 1653 } 1654 1655 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1656 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1657 { 1658 struct snd_soc_dapm_path *p; 1659 struct skl_module_cfg *mconfig = NULL; 1660 1661 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1662 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1663 if (p->connect && 1664 (p->source->id == snd_soc_dapm_aif_in) && 1665 p->sink->priv) { 1666 mconfig = p->sink->priv; 1667 return mconfig; 1668 } 1669 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1670 if (mconfig) 1671 return mconfig; 1672 } 1673 } 1674 return mconfig; 1675 } 1676 1677 struct skl_module_cfg * 1678 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1679 { 1680 struct snd_soc_dapm_widget *w; 1681 struct skl_module_cfg *mconfig; 1682 1683 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1684 w = dai->playback_widget; 1685 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1686 } else { 1687 w = dai->capture_widget; 1688 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1689 } 1690 return mconfig; 1691 } 1692 1693 static u8 skl_tplg_be_link_type(int dev_type) 1694 { 1695 int ret; 1696 1697 switch (dev_type) { 1698 case SKL_DEVICE_BT: 1699 ret = NHLT_LINK_SSP; 1700 break; 1701 1702 case SKL_DEVICE_DMIC: 1703 ret = NHLT_LINK_DMIC; 1704 break; 1705 1706 case SKL_DEVICE_I2S: 1707 ret = NHLT_LINK_SSP; 1708 break; 1709 1710 case SKL_DEVICE_HDALINK: 1711 ret = NHLT_LINK_HDA; 1712 break; 1713 1714 default: 1715 ret = NHLT_LINK_INVALID; 1716 break; 1717 } 1718 1719 return ret; 1720 } 1721 1722 /* 1723 * Fill the BE gateway parameters 1724 * The BE gateway expects a blob of parameters which are kept in the ACPI 1725 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1726 * The port can have multiple settings so pick based on the PCM 1727 * parameters 1728 */ 1729 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1730 struct skl_module_cfg *mconfig, 1731 struct skl_pipe_params *params) 1732 { 1733 struct nhlt_specific_cfg *cfg; 1734 struct skl_dev *skl = get_skl_ctx(dai->dev); 1735 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1736 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1737 1738 skl_tplg_fill_dma_id(mconfig, params); 1739 1740 if (link_type == NHLT_LINK_HDA) 1741 return 0; 1742 1743 /* update the blob based on virtual bus_id*/ 1744 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1745 params->s_fmt, params->ch, 1746 params->s_freq, params->stream, 1747 dev_type); 1748 if (cfg) { 1749 mconfig->formats_config.caps_size = cfg->size; 1750 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1751 } else { 1752 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1753 mconfig->vbus_id, link_type, 1754 params->stream); 1755 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1756 params->ch, params->s_freq, params->s_fmt); 1757 return -EINVAL; 1758 } 1759 1760 return 0; 1761 } 1762 1763 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1764 struct snd_soc_dapm_widget *w, 1765 struct skl_pipe_params *params) 1766 { 1767 struct snd_soc_dapm_path *p; 1768 int ret = -EIO; 1769 1770 snd_soc_dapm_widget_for_each_source_path(w, p) { 1771 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1772 p->source->priv) { 1773 1774 ret = skl_tplg_be_fill_pipe_params(dai, 1775 p->source->priv, params); 1776 if (ret < 0) 1777 return ret; 1778 } else { 1779 ret = skl_tplg_be_set_src_pipe_params(dai, 1780 p->source, params); 1781 if (ret < 0) 1782 return ret; 1783 } 1784 } 1785 1786 return ret; 1787 } 1788 1789 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1790 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1791 { 1792 struct snd_soc_dapm_path *p = NULL; 1793 int ret = -EIO; 1794 1795 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1796 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1797 p->sink->priv) { 1798 1799 ret = skl_tplg_be_fill_pipe_params(dai, 1800 p->sink->priv, params); 1801 if (ret < 0) 1802 return ret; 1803 } else { 1804 ret = skl_tplg_be_set_sink_pipe_params( 1805 dai, p->sink, params); 1806 if (ret < 0) 1807 return ret; 1808 } 1809 } 1810 1811 return ret; 1812 } 1813 1814 /* 1815 * BE hw_params can be a source parameters (capture) or sink parameters 1816 * (playback). Based on sink and source we need to either find the source 1817 * list or the sink list and set the pipeline parameters 1818 */ 1819 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1820 struct skl_pipe_params *params) 1821 { 1822 struct snd_soc_dapm_widget *w; 1823 1824 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1825 w = dai->playback_widget; 1826 1827 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1828 1829 } else { 1830 w = dai->capture_widget; 1831 1832 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1833 } 1834 1835 return 0; 1836 } 1837 1838 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1839 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1840 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1841 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1842 }; 1843 1844 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1845 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1846 skl_tplg_tlv_control_set}, 1847 }; 1848 1849 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1850 { 1851 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1852 .get = skl_tplg_mic_control_get, 1853 .put = skl_tplg_mic_control_set, 1854 }, 1855 }; 1856 1857 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1858 struct skl_pipe *pipe, u32 tkn, 1859 u32 tkn_val, int conf_idx, int dir) 1860 { 1861 struct skl_pipe_fmt *fmt; 1862 struct skl_path_config *config; 1863 1864 switch (dir) { 1865 case SKL_DIR_IN: 1866 fmt = &pipe->configs[conf_idx].in_fmt; 1867 break; 1868 1869 case SKL_DIR_OUT: 1870 fmt = &pipe->configs[conf_idx].out_fmt; 1871 break; 1872 1873 default: 1874 dev_err(dev, "Invalid direction: %d\n", dir); 1875 return -EINVAL; 1876 } 1877 1878 config = &pipe->configs[conf_idx]; 1879 1880 switch (tkn) { 1881 case SKL_TKN_U32_CFG_FREQ: 1882 fmt->freq = tkn_val; 1883 break; 1884 1885 case SKL_TKN_U8_CFG_CHAN: 1886 fmt->channels = tkn_val; 1887 break; 1888 1889 case SKL_TKN_U8_CFG_BPS: 1890 fmt->bps = tkn_val; 1891 break; 1892 1893 case SKL_TKN_U32_PATH_MEM_PGS: 1894 config->mem_pages = tkn_val; 1895 break; 1896 1897 default: 1898 dev_err(dev, "Invalid token config: %d\n", tkn); 1899 return -EINVAL; 1900 } 1901 1902 return 0; 1903 } 1904 1905 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1906 struct skl_pipe *pipe, u32 tkn, 1907 u32 tkn_val) 1908 { 1909 1910 switch (tkn) { 1911 case SKL_TKN_U32_PIPE_CONN_TYPE: 1912 pipe->conn_type = tkn_val; 1913 break; 1914 1915 case SKL_TKN_U32_PIPE_PRIORITY: 1916 pipe->pipe_priority = tkn_val; 1917 break; 1918 1919 case SKL_TKN_U32_PIPE_MEM_PGS: 1920 pipe->memory_pages = tkn_val; 1921 break; 1922 1923 case SKL_TKN_U32_PMODE: 1924 pipe->lp_mode = tkn_val; 1925 break; 1926 1927 case SKL_TKN_U32_PIPE_DIRECTION: 1928 pipe->direction = tkn_val; 1929 break; 1930 1931 case SKL_TKN_U32_NUM_CONFIGS: 1932 pipe->nr_cfgs = tkn_val; 1933 break; 1934 1935 default: 1936 dev_err(dev, "Token not handled %d\n", tkn); 1937 return -EINVAL; 1938 } 1939 1940 return 0; 1941 } 1942 1943 /* 1944 * Add pipeline by parsing the relevant tokens 1945 * Return an existing pipe if the pipe already exists. 1946 */ 1947 static int skl_tplg_add_pipe(struct device *dev, 1948 struct skl_module_cfg *mconfig, struct skl_dev *skl, 1949 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 1950 { 1951 struct skl_pipeline *ppl; 1952 struct skl_pipe *pipe; 1953 struct skl_pipe_params *params; 1954 1955 list_for_each_entry(ppl, &skl->ppl_list, node) { 1956 if (ppl->pipe->ppl_id == tkn_elem->value) { 1957 mconfig->pipe = ppl->pipe; 1958 return -EEXIST; 1959 } 1960 } 1961 1962 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 1963 if (!ppl) 1964 return -ENOMEM; 1965 1966 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 1967 if (!pipe) 1968 return -ENOMEM; 1969 1970 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 1971 if (!params) 1972 return -ENOMEM; 1973 1974 pipe->p_params = params; 1975 pipe->ppl_id = tkn_elem->value; 1976 INIT_LIST_HEAD(&pipe->w_list); 1977 1978 ppl->pipe = pipe; 1979 list_add(&ppl->node, &skl->ppl_list); 1980 1981 mconfig->pipe = pipe; 1982 mconfig->pipe->state = SKL_PIPE_INVALID; 1983 1984 return 0; 1985 } 1986 1987 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid, 1988 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 1989 { 1990 if (uuid_tkn->token == SKL_TKN_UUID) { 1991 guid_copy(guid, (guid_t *)&uuid_tkn->uuid); 1992 return 0; 1993 } 1994 1995 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 1996 1997 return -EINVAL; 1998 } 1999 2000 static int skl_tplg_fill_pin(struct device *dev, 2001 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2002 struct skl_module_pin *m_pin, 2003 int pin_index) 2004 { 2005 int ret; 2006 2007 switch (tkn_elem->token) { 2008 case SKL_TKN_U32_PIN_MOD_ID: 2009 m_pin[pin_index].id.module_id = tkn_elem->value; 2010 break; 2011 2012 case SKL_TKN_U32_PIN_INST_ID: 2013 m_pin[pin_index].id.instance_id = tkn_elem->value; 2014 break; 2015 2016 case SKL_TKN_UUID: 2017 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid, 2018 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2019 if (ret < 0) 2020 return ret; 2021 2022 break; 2023 2024 default: 2025 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2026 return -EINVAL; 2027 } 2028 2029 return 0; 2030 } 2031 2032 /* 2033 * Parse for pin config specific tokens to fill up the 2034 * module private data 2035 */ 2036 static int skl_tplg_fill_pins_info(struct device *dev, 2037 struct skl_module_cfg *mconfig, 2038 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2039 int dir, int pin_count) 2040 { 2041 int ret; 2042 struct skl_module_pin *m_pin; 2043 2044 switch (dir) { 2045 case SKL_DIR_IN: 2046 m_pin = mconfig->m_in_pin; 2047 break; 2048 2049 case SKL_DIR_OUT: 2050 m_pin = mconfig->m_out_pin; 2051 break; 2052 2053 default: 2054 dev_err(dev, "Invalid direction value\n"); 2055 return -EINVAL; 2056 } 2057 2058 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2059 if (ret < 0) 2060 return ret; 2061 2062 m_pin[pin_count].in_use = false; 2063 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2064 2065 return 0; 2066 } 2067 2068 /* 2069 * Fill up input/output module config format based 2070 * on the direction 2071 */ 2072 static int skl_tplg_fill_fmt(struct device *dev, 2073 struct skl_module_fmt *dst_fmt, 2074 u32 tkn, u32 value) 2075 { 2076 switch (tkn) { 2077 case SKL_TKN_U32_FMT_CH: 2078 dst_fmt->channels = value; 2079 break; 2080 2081 case SKL_TKN_U32_FMT_FREQ: 2082 dst_fmt->s_freq = value; 2083 break; 2084 2085 case SKL_TKN_U32_FMT_BIT_DEPTH: 2086 dst_fmt->bit_depth = value; 2087 break; 2088 2089 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2090 dst_fmt->valid_bit_depth = value; 2091 break; 2092 2093 case SKL_TKN_U32_FMT_CH_CONFIG: 2094 dst_fmt->ch_cfg = value; 2095 break; 2096 2097 case SKL_TKN_U32_FMT_INTERLEAVE: 2098 dst_fmt->interleaving_style = value; 2099 break; 2100 2101 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2102 dst_fmt->sample_type = value; 2103 break; 2104 2105 case SKL_TKN_U32_FMT_CH_MAP: 2106 dst_fmt->ch_map = value; 2107 break; 2108 2109 default: 2110 dev_err(dev, "Invalid token %d\n", tkn); 2111 return -EINVAL; 2112 } 2113 2114 return 0; 2115 } 2116 2117 static int skl_tplg_widget_fill_fmt(struct device *dev, 2118 struct skl_module_iface *fmt, 2119 u32 tkn, u32 val, u32 dir, int fmt_idx) 2120 { 2121 struct skl_module_fmt *dst_fmt; 2122 2123 if (!fmt) 2124 return -EINVAL; 2125 2126 switch (dir) { 2127 case SKL_DIR_IN: 2128 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2129 break; 2130 2131 case SKL_DIR_OUT: 2132 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2133 break; 2134 2135 default: 2136 dev_err(dev, "Invalid direction: %d\n", dir); 2137 return -EINVAL; 2138 } 2139 2140 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2141 } 2142 2143 static void skl_tplg_fill_pin_dynamic_val( 2144 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2145 { 2146 int i; 2147 2148 for (i = 0; i < pin_count; i++) 2149 mpin[i].is_dynamic = value; 2150 } 2151 2152 /* 2153 * Resource table in the manifest has pin specific resources 2154 * like pin and pin buffer size 2155 */ 2156 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2157 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2158 struct skl_module_res *res, int pin_idx, int dir) 2159 { 2160 struct skl_module_pin_resources *m_pin; 2161 2162 switch (dir) { 2163 case SKL_DIR_IN: 2164 m_pin = &res->input[pin_idx]; 2165 break; 2166 2167 case SKL_DIR_OUT: 2168 m_pin = &res->output[pin_idx]; 2169 break; 2170 2171 default: 2172 dev_err(dev, "Invalid pin direction: %d\n", dir); 2173 return -EINVAL; 2174 } 2175 2176 switch (tkn_elem->token) { 2177 case SKL_TKN_MM_U32_RES_PIN_ID: 2178 m_pin->pin_index = tkn_elem->value; 2179 break; 2180 2181 case SKL_TKN_MM_U32_PIN_BUF: 2182 m_pin->buf_size = tkn_elem->value; 2183 break; 2184 2185 default: 2186 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2187 return -EINVAL; 2188 } 2189 2190 return 0; 2191 } 2192 2193 /* 2194 * Fill module specific resources from the manifest's resource 2195 * table like CPS, DMA size, mem_pages. 2196 */ 2197 static int skl_tplg_fill_res_tkn(struct device *dev, 2198 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2199 struct skl_module_res *res, 2200 int pin_idx, int dir) 2201 { 2202 int ret, tkn_count = 0; 2203 2204 if (!res) 2205 return -EINVAL; 2206 2207 switch (tkn_elem->token) { 2208 case SKL_TKN_MM_U32_DMA_SIZE: 2209 res->dma_buffer_size = tkn_elem->value; 2210 break; 2211 2212 case SKL_TKN_MM_U32_CPC: 2213 res->cpc = tkn_elem->value; 2214 break; 2215 2216 case SKL_TKN_U32_MEM_PAGES: 2217 res->is_pages = tkn_elem->value; 2218 break; 2219 2220 case SKL_TKN_U32_OBS: 2221 res->obs = tkn_elem->value; 2222 break; 2223 2224 case SKL_TKN_U32_IBS: 2225 res->ibs = tkn_elem->value; 2226 break; 2227 2228 case SKL_TKN_MM_U32_RES_PIN_ID: 2229 case SKL_TKN_MM_U32_PIN_BUF: 2230 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2231 pin_idx, dir); 2232 if (ret < 0) 2233 return ret; 2234 break; 2235 2236 case SKL_TKN_MM_U32_CPS: 2237 case SKL_TKN_U32_MAX_MCPS: 2238 /* ignore unused tokens */ 2239 break; 2240 2241 default: 2242 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2243 return -EINVAL; 2244 2245 } 2246 tkn_count++; 2247 2248 return tkn_count; 2249 } 2250 2251 /* 2252 * Parse tokens to fill up the module private data 2253 */ 2254 static int skl_tplg_get_token(struct device *dev, 2255 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2256 struct skl_dev *skl, struct skl_module_cfg *mconfig) 2257 { 2258 int tkn_count = 0; 2259 int ret; 2260 static int is_pipe_exists; 2261 static int pin_index, dir, conf_idx; 2262 struct skl_module_iface *iface = NULL; 2263 struct skl_module_res *res = NULL; 2264 int res_idx = mconfig->res_idx; 2265 int fmt_idx = mconfig->fmt_idx; 2266 2267 /* 2268 * If the manifest structure contains no modules, fill all 2269 * the module data to 0th index. 2270 * res_idx and fmt_idx are default set to 0. 2271 */ 2272 if (skl->nr_modules == 0) { 2273 res = &mconfig->module->resources[res_idx]; 2274 iface = &mconfig->module->formats[fmt_idx]; 2275 } 2276 2277 if (tkn_elem->token > SKL_TKN_MAX) 2278 return -EINVAL; 2279 2280 switch (tkn_elem->token) { 2281 case SKL_TKN_U8_IN_QUEUE_COUNT: 2282 mconfig->module->max_input_pins = tkn_elem->value; 2283 break; 2284 2285 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2286 mconfig->module->max_output_pins = tkn_elem->value; 2287 break; 2288 2289 case SKL_TKN_U8_DYN_IN_PIN: 2290 if (!mconfig->m_in_pin) 2291 mconfig->m_in_pin = 2292 devm_kcalloc(dev, MAX_IN_QUEUE, 2293 sizeof(*mconfig->m_in_pin), 2294 GFP_KERNEL); 2295 if (!mconfig->m_in_pin) 2296 return -ENOMEM; 2297 2298 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2299 tkn_elem->value); 2300 break; 2301 2302 case SKL_TKN_U8_DYN_OUT_PIN: 2303 if (!mconfig->m_out_pin) 2304 mconfig->m_out_pin = 2305 devm_kcalloc(dev, MAX_IN_QUEUE, 2306 sizeof(*mconfig->m_in_pin), 2307 GFP_KERNEL); 2308 if (!mconfig->m_out_pin) 2309 return -ENOMEM; 2310 2311 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2312 tkn_elem->value); 2313 break; 2314 2315 case SKL_TKN_U8_TIME_SLOT: 2316 mconfig->time_slot = tkn_elem->value; 2317 break; 2318 2319 case SKL_TKN_U8_CORE_ID: 2320 mconfig->core_id = tkn_elem->value; 2321 break; 2322 2323 case SKL_TKN_U8_MOD_TYPE: 2324 mconfig->m_type = tkn_elem->value; 2325 break; 2326 2327 case SKL_TKN_U8_DEV_TYPE: 2328 mconfig->dev_type = tkn_elem->value; 2329 break; 2330 2331 case SKL_TKN_U8_HW_CONN_TYPE: 2332 mconfig->hw_conn_type = tkn_elem->value; 2333 break; 2334 2335 case SKL_TKN_U16_MOD_INST_ID: 2336 mconfig->id.instance_id = 2337 tkn_elem->value; 2338 break; 2339 2340 case SKL_TKN_U32_MEM_PAGES: 2341 case SKL_TKN_U32_MAX_MCPS: 2342 case SKL_TKN_U32_OBS: 2343 case SKL_TKN_U32_IBS: 2344 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2345 if (ret < 0) 2346 return ret; 2347 2348 break; 2349 2350 case SKL_TKN_U32_VBUS_ID: 2351 mconfig->vbus_id = tkn_elem->value; 2352 break; 2353 2354 case SKL_TKN_U32_PARAMS_FIXUP: 2355 mconfig->params_fixup = tkn_elem->value; 2356 break; 2357 2358 case SKL_TKN_U32_CONVERTER: 2359 mconfig->converter = tkn_elem->value; 2360 break; 2361 2362 case SKL_TKN_U32_D0I3_CAPS: 2363 mconfig->d0i3_caps = tkn_elem->value; 2364 break; 2365 2366 case SKL_TKN_U32_PIPE_ID: 2367 ret = skl_tplg_add_pipe(dev, 2368 mconfig, skl, tkn_elem); 2369 2370 if (ret < 0) { 2371 if (ret == -EEXIST) { 2372 is_pipe_exists = 1; 2373 break; 2374 } 2375 return is_pipe_exists; 2376 } 2377 2378 break; 2379 2380 case SKL_TKN_U32_PIPE_CONFIG_ID: 2381 conf_idx = tkn_elem->value; 2382 break; 2383 2384 case SKL_TKN_U32_PIPE_CONN_TYPE: 2385 case SKL_TKN_U32_PIPE_PRIORITY: 2386 case SKL_TKN_U32_PIPE_MEM_PGS: 2387 case SKL_TKN_U32_PMODE: 2388 case SKL_TKN_U32_PIPE_DIRECTION: 2389 case SKL_TKN_U32_NUM_CONFIGS: 2390 if (is_pipe_exists) { 2391 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2392 tkn_elem->token, tkn_elem->value); 2393 if (ret < 0) 2394 return ret; 2395 } 2396 2397 break; 2398 2399 case SKL_TKN_U32_PATH_MEM_PGS: 2400 case SKL_TKN_U32_CFG_FREQ: 2401 case SKL_TKN_U8_CFG_CHAN: 2402 case SKL_TKN_U8_CFG_BPS: 2403 if (mconfig->pipe->nr_cfgs) { 2404 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2405 tkn_elem->token, tkn_elem->value, 2406 conf_idx, dir); 2407 if (ret < 0) 2408 return ret; 2409 } 2410 break; 2411 2412 case SKL_TKN_CFG_MOD_RES_ID: 2413 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2414 break; 2415 2416 case SKL_TKN_CFG_MOD_FMT_ID: 2417 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2418 break; 2419 2420 /* 2421 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2422 * direction and the pin count. The first four bits represent 2423 * direction and next four the pin count. 2424 */ 2425 case SKL_TKN_U32_DIR_PIN_COUNT: 2426 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2427 pin_index = (tkn_elem->value & 2428 SKL_PIN_COUNT_MASK) >> 4; 2429 2430 break; 2431 2432 case SKL_TKN_U32_FMT_CH: 2433 case SKL_TKN_U32_FMT_FREQ: 2434 case SKL_TKN_U32_FMT_BIT_DEPTH: 2435 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2436 case SKL_TKN_U32_FMT_CH_CONFIG: 2437 case SKL_TKN_U32_FMT_INTERLEAVE: 2438 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2439 case SKL_TKN_U32_FMT_CH_MAP: 2440 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2441 tkn_elem->value, dir, pin_index); 2442 2443 if (ret < 0) 2444 return ret; 2445 2446 break; 2447 2448 case SKL_TKN_U32_PIN_MOD_ID: 2449 case SKL_TKN_U32_PIN_INST_ID: 2450 case SKL_TKN_UUID: 2451 ret = skl_tplg_fill_pins_info(dev, 2452 mconfig, tkn_elem, dir, 2453 pin_index); 2454 if (ret < 0) 2455 return ret; 2456 2457 break; 2458 2459 case SKL_TKN_U32_CAPS_SIZE: 2460 mconfig->formats_config.caps_size = 2461 tkn_elem->value; 2462 2463 break; 2464 2465 case SKL_TKN_U32_CAPS_SET_PARAMS: 2466 mconfig->formats_config.set_params = 2467 tkn_elem->value; 2468 break; 2469 2470 case SKL_TKN_U32_CAPS_PARAMS_ID: 2471 mconfig->formats_config.param_id = 2472 tkn_elem->value; 2473 break; 2474 2475 case SKL_TKN_U32_PROC_DOMAIN: 2476 mconfig->domain = 2477 tkn_elem->value; 2478 2479 break; 2480 2481 case SKL_TKN_U32_DMA_BUF_SIZE: 2482 mconfig->dma_buffer_size = tkn_elem->value; 2483 break; 2484 2485 case SKL_TKN_U8_IN_PIN_TYPE: 2486 case SKL_TKN_U8_OUT_PIN_TYPE: 2487 case SKL_TKN_U8_CONN_TYPE: 2488 break; 2489 2490 default: 2491 dev_err(dev, "Token %d not handled\n", 2492 tkn_elem->token); 2493 return -EINVAL; 2494 } 2495 2496 tkn_count++; 2497 2498 return tkn_count; 2499 } 2500 2501 /* 2502 * Parse the vendor array for specific tokens to construct 2503 * module private data 2504 */ 2505 static int skl_tplg_get_tokens(struct device *dev, 2506 char *pvt_data, struct skl_dev *skl, 2507 struct skl_module_cfg *mconfig, int block_size) 2508 { 2509 struct snd_soc_tplg_vendor_array *array; 2510 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2511 int tkn_count = 0, ret; 2512 int off = 0, tuple_size = 0; 2513 bool is_module_guid = true; 2514 2515 if (block_size <= 0) 2516 return -EINVAL; 2517 2518 while (tuple_size < block_size) { 2519 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2520 2521 off += array->size; 2522 2523 switch (array->type) { 2524 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2525 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2526 continue; 2527 2528 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2529 if (is_module_guid) { 2530 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid, 2531 array->uuid); 2532 is_module_guid = false; 2533 } else { 2534 ret = skl_tplg_get_token(dev, array->value, skl, 2535 mconfig); 2536 } 2537 2538 if (ret < 0) 2539 return ret; 2540 2541 tuple_size += sizeof(*array->uuid); 2542 2543 continue; 2544 2545 default: 2546 tkn_elem = array->value; 2547 tkn_count = 0; 2548 break; 2549 } 2550 2551 while (tkn_count <= (array->num_elems - 1)) { 2552 ret = skl_tplg_get_token(dev, tkn_elem, 2553 skl, mconfig); 2554 2555 if (ret < 0) 2556 return ret; 2557 2558 tkn_count = tkn_count + ret; 2559 tkn_elem++; 2560 } 2561 2562 tuple_size += tkn_count * sizeof(*tkn_elem); 2563 } 2564 2565 return off; 2566 } 2567 2568 /* 2569 * Every data block is preceded by a descriptor to read the number 2570 * of data blocks, they type of the block and it's size 2571 */ 2572 static int skl_tplg_get_desc_blocks(struct device *dev, 2573 struct snd_soc_tplg_vendor_array *array) 2574 { 2575 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2576 2577 tkn_elem = array->value; 2578 2579 switch (tkn_elem->token) { 2580 case SKL_TKN_U8_NUM_BLOCKS: 2581 case SKL_TKN_U8_BLOCK_TYPE: 2582 case SKL_TKN_U16_BLOCK_SIZE: 2583 return tkn_elem->value; 2584 2585 default: 2586 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2587 break; 2588 } 2589 2590 return -EINVAL; 2591 } 2592 2593 /* Functions to parse private data from configuration file format v4 */ 2594 2595 /* 2596 * Add pipeline from topology binary into driver pipeline list 2597 * 2598 * If already added we return that instance 2599 * Otherwise we create a new instance and add into driver list 2600 */ 2601 static int skl_tplg_add_pipe_v4(struct device *dev, 2602 struct skl_module_cfg *mconfig, struct skl_dev *skl, 2603 struct skl_dfw_v4_pipe *dfw_pipe) 2604 { 2605 struct skl_pipeline *ppl; 2606 struct skl_pipe *pipe; 2607 struct skl_pipe_params *params; 2608 2609 list_for_each_entry(ppl, &skl->ppl_list, node) { 2610 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2611 mconfig->pipe = ppl->pipe; 2612 return 0; 2613 } 2614 } 2615 2616 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2617 if (!ppl) 2618 return -ENOMEM; 2619 2620 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2621 if (!pipe) 2622 return -ENOMEM; 2623 2624 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2625 if (!params) 2626 return -ENOMEM; 2627 2628 pipe->ppl_id = dfw_pipe->pipe_id; 2629 pipe->memory_pages = dfw_pipe->memory_pages; 2630 pipe->pipe_priority = dfw_pipe->pipe_priority; 2631 pipe->conn_type = dfw_pipe->conn_type; 2632 pipe->state = SKL_PIPE_INVALID; 2633 pipe->p_params = params; 2634 INIT_LIST_HEAD(&pipe->w_list); 2635 2636 ppl->pipe = pipe; 2637 list_add(&ppl->node, &skl->ppl_list); 2638 2639 mconfig->pipe = pipe; 2640 2641 return 0; 2642 } 2643 2644 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2645 struct skl_module_pin *m_pin, 2646 bool is_dynamic, int max_pin) 2647 { 2648 int i; 2649 2650 for (i = 0; i < max_pin; i++) { 2651 m_pin[i].id.module_id = dfw_pin[i].module_id; 2652 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2653 m_pin[i].in_use = false; 2654 m_pin[i].is_dynamic = is_dynamic; 2655 m_pin[i].pin_state = SKL_PIN_UNBIND; 2656 } 2657 } 2658 2659 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2660 struct skl_dfw_v4_module_fmt *src_fmt, 2661 int pins) 2662 { 2663 int i; 2664 2665 for (i = 0; i < pins; i++) { 2666 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2667 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2668 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2669 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2670 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2671 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2672 dst_fmt[i].fmt.interleaving_style = 2673 src_fmt[i].interleaving_style; 2674 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2675 } 2676 } 2677 2678 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2679 struct skl_dev *skl, struct device *dev, 2680 struct skl_module_cfg *mconfig) 2681 { 2682 struct skl_dfw_v4_module *dfw = 2683 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2684 int ret; 2685 2686 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2687 2688 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2689 if (ret) 2690 return ret; 2691 mconfig->id.module_id = -1; 2692 mconfig->id.instance_id = dfw->instance_id; 2693 mconfig->module->resources[0].cpc = dfw->max_mcps / 1000; 2694 mconfig->module->resources[0].ibs = dfw->ibs; 2695 mconfig->module->resources[0].obs = dfw->obs; 2696 mconfig->core_id = dfw->core_id; 2697 mconfig->module->max_input_pins = dfw->max_in_queue; 2698 mconfig->module->max_output_pins = dfw->max_out_queue; 2699 mconfig->module->loadable = dfw->is_loadable; 2700 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2701 MAX_IN_QUEUE); 2702 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2703 MAX_OUT_QUEUE); 2704 2705 mconfig->params_fixup = dfw->params_fixup; 2706 mconfig->converter = dfw->converter; 2707 mconfig->m_type = dfw->module_type; 2708 mconfig->vbus_id = dfw->vbus_id; 2709 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2710 2711 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2712 if (ret) 2713 return ret; 2714 2715 mconfig->dev_type = dfw->dev_type; 2716 mconfig->hw_conn_type = dfw->hw_conn_type; 2717 mconfig->time_slot = dfw->time_slot; 2718 mconfig->formats_config.caps_size = dfw->caps.caps_size; 2719 2720 mconfig->m_in_pin = devm_kcalloc(dev, 2721 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2722 GFP_KERNEL); 2723 if (!mconfig->m_in_pin) 2724 return -ENOMEM; 2725 2726 mconfig->m_out_pin = devm_kcalloc(dev, 2727 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2728 GFP_KERNEL); 2729 if (!mconfig->m_out_pin) 2730 return -ENOMEM; 2731 2732 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2733 dfw->is_dynamic_in_pin, 2734 mconfig->module->max_input_pins); 2735 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2736 dfw->is_dynamic_out_pin, 2737 mconfig->module->max_output_pins); 2738 2739 if (mconfig->formats_config.caps_size) { 2740 mconfig->formats_config.set_params = dfw->caps.set_params; 2741 mconfig->formats_config.param_id = dfw->caps.param_id; 2742 mconfig->formats_config.caps = 2743 devm_kzalloc(dev, mconfig->formats_config.caps_size, 2744 GFP_KERNEL); 2745 if (!mconfig->formats_config.caps) 2746 return -ENOMEM; 2747 memcpy(mconfig->formats_config.caps, dfw->caps.caps, 2748 dfw->caps.caps_size); 2749 } 2750 2751 return 0; 2752 } 2753 2754 /* 2755 * Parse the private data for the token and corresponding value. 2756 * The private data can have multiple data blocks. So, a data block 2757 * is preceded by a descriptor for number of blocks and a descriptor 2758 * for the type and size of the suceeding data block. 2759 */ 2760 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2761 struct skl_dev *skl, struct device *dev, 2762 struct skl_module_cfg *mconfig) 2763 { 2764 struct snd_soc_tplg_vendor_array *array; 2765 int num_blocks, block_size = 0, block_type, off = 0; 2766 char *data; 2767 int ret; 2768 2769 /* 2770 * v4 configuration files have a valid UUID at the start of 2771 * the widget's private data. 2772 */ 2773 if (uuid_is_valid((char *)tplg_w->priv.data)) 2774 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2775 2776 /* Read the NUM_DATA_BLOCKS descriptor */ 2777 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2778 ret = skl_tplg_get_desc_blocks(dev, array); 2779 if (ret < 0) 2780 return ret; 2781 num_blocks = ret; 2782 2783 off += array->size; 2784 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2785 while (num_blocks > 0) { 2786 array = (struct snd_soc_tplg_vendor_array *) 2787 (tplg_w->priv.data + off); 2788 2789 ret = skl_tplg_get_desc_blocks(dev, array); 2790 2791 if (ret < 0) 2792 return ret; 2793 block_type = ret; 2794 off += array->size; 2795 2796 array = (struct snd_soc_tplg_vendor_array *) 2797 (tplg_w->priv.data + off); 2798 2799 ret = skl_tplg_get_desc_blocks(dev, array); 2800 2801 if (ret < 0) 2802 return ret; 2803 block_size = ret; 2804 off += array->size; 2805 2806 array = (struct snd_soc_tplg_vendor_array *) 2807 (tplg_w->priv.data + off); 2808 2809 data = (tplg_w->priv.data + off); 2810 2811 if (block_type == SKL_TYPE_TUPLE) { 2812 ret = skl_tplg_get_tokens(dev, data, 2813 skl, mconfig, block_size); 2814 2815 if (ret < 0) 2816 return ret; 2817 2818 --num_blocks; 2819 } else { 2820 if (mconfig->formats_config.caps_size > 0) 2821 memcpy(mconfig->formats_config.caps, data, 2822 mconfig->formats_config.caps_size); 2823 --num_blocks; 2824 ret = mconfig->formats_config.caps_size; 2825 } 2826 off += ret; 2827 } 2828 2829 return 0; 2830 } 2831 2832 static void skl_clear_pin_config(struct snd_soc_component *component, 2833 struct snd_soc_dapm_widget *w) 2834 { 2835 int i; 2836 struct skl_module_cfg *mconfig; 2837 struct skl_pipe *pipe; 2838 2839 if (!strncmp(w->dapm->component->name, component->name, 2840 strlen(component->name))) { 2841 mconfig = w->priv; 2842 pipe = mconfig->pipe; 2843 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2844 mconfig->m_in_pin[i].in_use = false; 2845 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2846 } 2847 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2848 mconfig->m_out_pin[i].in_use = false; 2849 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2850 } 2851 pipe->state = SKL_PIPE_INVALID; 2852 mconfig->m_state = SKL_MODULE_UNINIT; 2853 } 2854 } 2855 2856 void skl_cleanup_resources(struct skl_dev *skl) 2857 { 2858 struct snd_soc_component *soc_component = skl->component; 2859 struct snd_soc_dapm_widget *w; 2860 struct snd_soc_card *card; 2861 2862 if (soc_component == NULL) 2863 return; 2864 2865 card = soc_component->card; 2866 if (!card || !card->instantiated) 2867 return; 2868 2869 list_for_each_entry(w, &card->widgets, list) { 2870 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL) 2871 skl_clear_pin_config(soc_component, w); 2872 } 2873 2874 skl_clear_module_cnt(skl->dsp); 2875 } 2876 2877 /* 2878 * Topology core widget load callback 2879 * 2880 * This is used to save the private data for each widget which gives 2881 * information to the driver about module and pipeline parameters which DSP 2882 * FW expects like ids, resource values, formats etc 2883 */ 2884 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 2885 struct snd_soc_dapm_widget *w, 2886 struct snd_soc_tplg_dapm_widget *tplg_w) 2887 { 2888 int ret; 2889 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2890 struct skl_dev *skl = bus_to_skl(bus); 2891 struct skl_module_cfg *mconfig; 2892 2893 if (!tplg_w->priv.size) 2894 goto bind_event; 2895 2896 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2897 2898 if (!mconfig) 2899 return -ENOMEM; 2900 2901 if (skl->nr_modules == 0) { 2902 mconfig->module = devm_kzalloc(bus->dev, 2903 sizeof(*mconfig->module), GFP_KERNEL); 2904 if (!mconfig->module) 2905 return -ENOMEM; 2906 } 2907 2908 w->priv = mconfig; 2909 2910 /* 2911 * module binary can be loaded later, so set it to query when 2912 * module is load for a use case 2913 */ 2914 mconfig->id.module_id = -1; 2915 2916 /* Parse private data for tuples */ 2917 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2918 if (ret < 0) 2919 return ret; 2920 2921 skl_debug_init_module(skl->debugfs, w, mconfig); 2922 2923 bind_event: 2924 if (tplg_w->event_type == 0) { 2925 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2926 return 0; 2927 } 2928 2929 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2930 ARRAY_SIZE(skl_tplg_widget_ops), 2931 tplg_w->event_type); 2932 2933 if (ret) { 2934 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2935 __func__, tplg_w->event_type); 2936 return -EINVAL; 2937 } 2938 2939 return 0; 2940 } 2941 2942 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2943 struct snd_soc_tplg_bytes_control *bc) 2944 { 2945 struct skl_algo_data *ac; 2946 struct skl_dfw_algo_data *dfw_ac = 2947 (struct skl_dfw_algo_data *)bc->priv.data; 2948 2949 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2950 if (!ac) 2951 return -ENOMEM; 2952 2953 /* Fill private data */ 2954 ac->max = dfw_ac->max; 2955 ac->param_id = dfw_ac->param_id; 2956 ac->set_params = dfw_ac->set_params; 2957 ac->size = dfw_ac->max; 2958 2959 if (ac->max) { 2960 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL); 2961 if (!ac->params) 2962 return -ENOMEM; 2963 2964 memcpy(ac->params, dfw_ac->params, ac->max); 2965 } 2966 2967 be->dobj.private = ac; 2968 return 0; 2969 } 2970 2971 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 2972 struct snd_soc_tplg_enum_control *ec) 2973 { 2974 2975 void *data; 2976 2977 if (ec->priv.size) { 2978 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 2979 if (!data) 2980 return -ENOMEM; 2981 memcpy(data, ec->priv.data, ec->priv.size); 2982 se->dobj.private = data; 2983 } 2984 2985 return 0; 2986 2987 } 2988 2989 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2990 int index, 2991 struct snd_kcontrol_new *kctl, 2992 struct snd_soc_tplg_ctl_hdr *hdr) 2993 { 2994 struct soc_bytes_ext *sb; 2995 struct snd_soc_tplg_bytes_control *tplg_bc; 2996 struct snd_soc_tplg_enum_control *tplg_ec; 2997 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2998 struct soc_enum *se; 2999 3000 switch (hdr->ops.info) { 3001 case SND_SOC_TPLG_CTL_BYTES: 3002 tplg_bc = container_of(hdr, 3003 struct snd_soc_tplg_bytes_control, hdr); 3004 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3005 sb = (struct soc_bytes_ext *)kctl->private_value; 3006 if (tplg_bc->priv.size) 3007 return skl_init_algo_data( 3008 bus->dev, sb, tplg_bc); 3009 } 3010 break; 3011 3012 case SND_SOC_TPLG_CTL_ENUM: 3013 tplg_ec = container_of(hdr, 3014 struct snd_soc_tplg_enum_control, hdr); 3015 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 3016 se = (struct soc_enum *)kctl->private_value; 3017 if (tplg_ec->priv.size) 3018 return skl_init_enum_data(bus->dev, se, 3019 tplg_ec); 3020 } 3021 break; 3022 3023 default: 3024 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3025 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3026 break; 3027 } 3028 3029 return 0; 3030 } 3031 3032 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3033 struct snd_soc_tplg_vendor_string_elem *str_elem, 3034 struct skl_dev *skl) 3035 { 3036 int tkn_count = 0; 3037 static int ref_count; 3038 3039 switch (str_elem->token) { 3040 case SKL_TKN_STR_LIB_NAME: 3041 if (ref_count > skl->lib_count - 1) { 3042 ref_count = 0; 3043 return -EINVAL; 3044 } 3045 3046 strncpy(skl->lib_info[ref_count].name, 3047 str_elem->string, 3048 ARRAY_SIZE(skl->lib_info[ref_count].name)); 3049 ref_count++; 3050 break; 3051 3052 default: 3053 dev_err(dev, "Not a string token %d\n", str_elem->token); 3054 break; 3055 } 3056 tkn_count++; 3057 3058 return tkn_count; 3059 } 3060 3061 static int skl_tplg_get_str_tkn(struct device *dev, 3062 struct snd_soc_tplg_vendor_array *array, 3063 struct skl_dev *skl) 3064 { 3065 int tkn_count = 0, ret; 3066 struct snd_soc_tplg_vendor_string_elem *str_elem; 3067 3068 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3069 while (tkn_count < array->num_elems) { 3070 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3071 str_elem++; 3072 3073 if (ret < 0) 3074 return ret; 3075 3076 tkn_count = tkn_count + ret; 3077 } 3078 3079 return tkn_count; 3080 } 3081 3082 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3083 struct skl_module_iface *fmt, 3084 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3085 u32 dir, int fmt_idx) 3086 { 3087 struct skl_module_pin_fmt *dst_fmt; 3088 struct skl_module_fmt *mod_fmt; 3089 int ret; 3090 3091 if (!fmt) 3092 return -EINVAL; 3093 3094 switch (dir) { 3095 case SKL_DIR_IN: 3096 dst_fmt = &fmt->inputs[fmt_idx]; 3097 break; 3098 3099 case SKL_DIR_OUT: 3100 dst_fmt = &fmt->outputs[fmt_idx]; 3101 break; 3102 3103 default: 3104 dev_err(dev, "Invalid direction: %d\n", dir); 3105 return -EINVAL; 3106 } 3107 3108 mod_fmt = &dst_fmt->fmt; 3109 3110 switch (tkn_elem->token) { 3111 case SKL_TKN_MM_U32_INTF_PIN_ID: 3112 dst_fmt->id = tkn_elem->value; 3113 break; 3114 3115 default: 3116 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3117 tkn_elem->value); 3118 if (ret < 0) 3119 return ret; 3120 break; 3121 } 3122 3123 return 0; 3124 } 3125 3126 static int skl_tplg_fill_mod_info(struct device *dev, 3127 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3128 struct skl_module *mod) 3129 { 3130 3131 if (!mod) 3132 return -EINVAL; 3133 3134 switch (tkn_elem->token) { 3135 case SKL_TKN_U8_IN_PIN_TYPE: 3136 mod->input_pin_type = tkn_elem->value; 3137 break; 3138 3139 case SKL_TKN_U8_OUT_PIN_TYPE: 3140 mod->output_pin_type = tkn_elem->value; 3141 break; 3142 3143 case SKL_TKN_U8_IN_QUEUE_COUNT: 3144 mod->max_input_pins = tkn_elem->value; 3145 break; 3146 3147 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3148 mod->max_output_pins = tkn_elem->value; 3149 break; 3150 3151 case SKL_TKN_MM_U8_NUM_RES: 3152 mod->nr_resources = tkn_elem->value; 3153 break; 3154 3155 case SKL_TKN_MM_U8_NUM_INTF: 3156 mod->nr_interfaces = tkn_elem->value; 3157 break; 3158 3159 default: 3160 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3161 return -EINVAL; 3162 } 3163 3164 return 0; 3165 } 3166 3167 3168 static int skl_tplg_get_int_tkn(struct device *dev, 3169 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3170 struct skl_dev *skl) 3171 { 3172 int tkn_count = 0, ret; 3173 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3174 struct skl_module_res *res = NULL; 3175 struct skl_module_iface *fmt = NULL; 3176 struct skl_module *mod = NULL; 3177 static struct skl_astate_param *astate_table; 3178 static int astate_cfg_idx, count; 3179 int i; 3180 size_t size; 3181 3182 if (skl->modules) { 3183 mod = skl->modules[mod_idx]; 3184 res = &mod->resources[res_val_idx]; 3185 fmt = &mod->formats[intf_val_idx]; 3186 } 3187 3188 switch (tkn_elem->token) { 3189 case SKL_TKN_U32_LIB_COUNT: 3190 skl->lib_count = tkn_elem->value; 3191 break; 3192 3193 case SKL_TKN_U8_NUM_MOD: 3194 skl->nr_modules = tkn_elem->value; 3195 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3196 sizeof(*skl->modules), GFP_KERNEL); 3197 if (!skl->modules) 3198 return -ENOMEM; 3199 3200 for (i = 0; i < skl->nr_modules; i++) { 3201 skl->modules[i] = devm_kzalloc(dev, 3202 sizeof(struct skl_module), GFP_KERNEL); 3203 if (!skl->modules[i]) 3204 return -ENOMEM; 3205 } 3206 break; 3207 3208 case SKL_TKN_MM_U8_MOD_IDX: 3209 mod_idx = tkn_elem->value; 3210 break; 3211 3212 case SKL_TKN_U32_ASTATE_COUNT: 3213 if (astate_table != NULL) { 3214 dev_err(dev, "More than one entry for A-State count"); 3215 return -EINVAL; 3216 } 3217 3218 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3219 dev_err(dev, "Invalid A-State count %d\n", 3220 tkn_elem->value); 3221 return -EINVAL; 3222 } 3223 3224 size = struct_size(skl->cfg.astate_cfg, astate_table, 3225 tkn_elem->value); 3226 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3227 if (!skl->cfg.astate_cfg) 3228 return -ENOMEM; 3229 3230 astate_table = skl->cfg.astate_cfg->astate_table; 3231 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3232 break; 3233 3234 case SKL_TKN_U32_ASTATE_IDX: 3235 if (tkn_elem->value >= count) { 3236 dev_err(dev, "Invalid A-State index %d\n", 3237 tkn_elem->value); 3238 return -EINVAL; 3239 } 3240 3241 astate_cfg_idx = tkn_elem->value; 3242 break; 3243 3244 case SKL_TKN_U32_ASTATE_KCPS: 3245 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3246 break; 3247 3248 case SKL_TKN_U32_ASTATE_CLK_SRC: 3249 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3250 break; 3251 3252 case SKL_TKN_U8_IN_PIN_TYPE: 3253 case SKL_TKN_U8_OUT_PIN_TYPE: 3254 case SKL_TKN_U8_IN_QUEUE_COUNT: 3255 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3256 case SKL_TKN_MM_U8_NUM_RES: 3257 case SKL_TKN_MM_U8_NUM_INTF: 3258 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3259 if (ret < 0) 3260 return ret; 3261 break; 3262 3263 case SKL_TKN_U32_DIR_PIN_COUNT: 3264 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3265 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3266 break; 3267 3268 case SKL_TKN_MM_U32_RES_ID: 3269 if (!res) 3270 return -EINVAL; 3271 3272 res->id = tkn_elem->value; 3273 res_val_idx = tkn_elem->value; 3274 break; 3275 3276 case SKL_TKN_MM_U32_FMT_ID: 3277 if (!fmt) 3278 return -EINVAL; 3279 3280 fmt->fmt_idx = tkn_elem->value; 3281 intf_val_idx = tkn_elem->value; 3282 break; 3283 3284 case SKL_TKN_MM_U32_CPS: 3285 case SKL_TKN_MM_U32_DMA_SIZE: 3286 case SKL_TKN_MM_U32_CPC: 3287 case SKL_TKN_U32_MEM_PAGES: 3288 case SKL_TKN_U32_OBS: 3289 case SKL_TKN_U32_IBS: 3290 case SKL_TKN_MM_U32_RES_PIN_ID: 3291 case SKL_TKN_MM_U32_PIN_BUF: 3292 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3293 if (ret < 0) 3294 return ret; 3295 3296 break; 3297 3298 case SKL_TKN_MM_U32_NUM_IN_FMT: 3299 if (!fmt) 3300 return -EINVAL; 3301 3302 res->nr_input_pins = tkn_elem->value; 3303 break; 3304 3305 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3306 if (!fmt) 3307 return -EINVAL; 3308 3309 res->nr_output_pins = tkn_elem->value; 3310 break; 3311 3312 case SKL_TKN_U32_FMT_CH: 3313 case SKL_TKN_U32_FMT_FREQ: 3314 case SKL_TKN_U32_FMT_BIT_DEPTH: 3315 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3316 case SKL_TKN_U32_FMT_CH_CONFIG: 3317 case SKL_TKN_U32_FMT_INTERLEAVE: 3318 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3319 case SKL_TKN_U32_FMT_CH_MAP: 3320 case SKL_TKN_MM_U32_INTF_PIN_ID: 3321 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3322 dir, pin_idx); 3323 if (ret < 0) 3324 return ret; 3325 break; 3326 3327 default: 3328 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3329 return -EINVAL; 3330 } 3331 tkn_count++; 3332 3333 return tkn_count; 3334 } 3335 3336 static int skl_tplg_get_manifest_uuid(struct device *dev, 3337 struct skl_dev *skl, 3338 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 3339 { 3340 static int ref_count; 3341 struct skl_module *mod; 3342 3343 if (uuid_tkn->token == SKL_TKN_UUID) { 3344 mod = skl->modules[ref_count]; 3345 guid_copy(&mod->uuid, (guid_t *)&uuid_tkn->uuid); 3346 ref_count++; 3347 } else { 3348 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token); 3349 return -EINVAL; 3350 } 3351 3352 return 0; 3353 } 3354 3355 /* 3356 * Fill the manifest structure by parsing the tokens based on the 3357 * type. 3358 */ 3359 static int skl_tplg_get_manifest_tkn(struct device *dev, 3360 char *pvt_data, struct skl_dev *skl, 3361 int block_size) 3362 { 3363 int tkn_count = 0, ret; 3364 int off = 0, tuple_size = 0; 3365 struct snd_soc_tplg_vendor_array *array; 3366 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3367 3368 if (block_size <= 0) 3369 return -EINVAL; 3370 3371 while (tuple_size < block_size) { 3372 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3373 off += array->size; 3374 switch (array->type) { 3375 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3376 ret = skl_tplg_get_str_tkn(dev, array, skl); 3377 3378 if (ret < 0) 3379 return ret; 3380 tkn_count = ret; 3381 3382 tuple_size += tkn_count * 3383 sizeof(struct snd_soc_tplg_vendor_string_elem); 3384 continue; 3385 3386 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3387 ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid); 3388 if (ret < 0) 3389 return ret; 3390 3391 tuple_size += sizeof(*array->uuid); 3392 continue; 3393 3394 default: 3395 tkn_elem = array->value; 3396 tkn_count = 0; 3397 break; 3398 } 3399 3400 while (tkn_count <= array->num_elems - 1) { 3401 ret = skl_tplg_get_int_tkn(dev, 3402 tkn_elem, skl); 3403 if (ret < 0) 3404 return ret; 3405 3406 tkn_count = tkn_count + ret; 3407 tkn_elem++; 3408 } 3409 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3410 tkn_count = 0; 3411 } 3412 3413 return off; 3414 } 3415 3416 /* 3417 * Parse manifest private data for tokens. The private data block is 3418 * preceded by descriptors for type and size of data block. 3419 */ 3420 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3421 struct device *dev, struct skl_dev *skl) 3422 { 3423 struct snd_soc_tplg_vendor_array *array; 3424 int num_blocks, block_size = 0, block_type, off = 0; 3425 char *data; 3426 int ret; 3427 3428 /* Read the NUM_DATA_BLOCKS descriptor */ 3429 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3430 ret = skl_tplg_get_desc_blocks(dev, array); 3431 if (ret < 0) 3432 return ret; 3433 num_blocks = ret; 3434 3435 off += array->size; 3436 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3437 while (num_blocks > 0) { 3438 array = (struct snd_soc_tplg_vendor_array *) 3439 (manifest->priv.data + off); 3440 ret = skl_tplg_get_desc_blocks(dev, array); 3441 3442 if (ret < 0) 3443 return ret; 3444 block_type = ret; 3445 off += array->size; 3446 3447 array = (struct snd_soc_tplg_vendor_array *) 3448 (manifest->priv.data + off); 3449 3450 ret = skl_tplg_get_desc_blocks(dev, array); 3451 3452 if (ret < 0) 3453 return ret; 3454 block_size = ret; 3455 off += array->size; 3456 3457 array = (struct snd_soc_tplg_vendor_array *) 3458 (manifest->priv.data + off); 3459 3460 data = (manifest->priv.data + off); 3461 3462 if (block_type == SKL_TYPE_TUPLE) { 3463 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3464 block_size); 3465 3466 if (ret < 0) 3467 return ret; 3468 3469 --num_blocks; 3470 } else { 3471 return -EINVAL; 3472 } 3473 off += ret; 3474 } 3475 3476 return 0; 3477 } 3478 3479 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3480 struct snd_soc_tplg_manifest *manifest) 3481 { 3482 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3483 struct skl_dev *skl = bus_to_skl(bus); 3484 3485 /* proceed only if we have private data defined */ 3486 if (manifest->priv.size == 0) 3487 return 0; 3488 3489 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3490 3491 if (skl->lib_count > SKL_MAX_LIB) { 3492 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3493 skl->lib_count); 3494 return -EINVAL; 3495 } 3496 3497 return 0; 3498 } 3499 3500 static struct snd_soc_tplg_ops skl_tplg_ops = { 3501 .widget_load = skl_tplg_widget_load, 3502 .control_load = skl_tplg_control_load, 3503 .bytes_ext_ops = skl_tlv_ops, 3504 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3505 .io_ops = skl_tplg_kcontrol_ops, 3506 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3507 .manifest = skl_manifest_load, 3508 .dai_load = skl_dai_load, 3509 }; 3510 3511 /* 3512 * A pipe can have multiple modules, each of them will be a DAPM widget as 3513 * well. While managing a pipeline we need to get the list of all the 3514 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3515 * helps to get the SKL type widgets in that pipeline 3516 */ 3517 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3518 { 3519 struct snd_soc_dapm_widget *w; 3520 struct skl_module_cfg *mcfg = NULL; 3521 struct skl_pipe_module *p_module = NULL; 3522 struct skl_pipe *pipe; 3523 3524 list_for_each_entry(w, &component->card->widgets, list) { 3525 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3526 mcfg = w->priv; 3527 pipe = mcfg->pipe; 3528 3529 p_module = devm_kzalloc(component->dev, 3530 sizeof(*p_module), GFP_KERNEL); 3531 if (!p_module) 3532 return -ENOMEM; 3533 3534 p_module->w = w; 3535 list_add_tail(&p_module->node, &pipe->w_list); 3536 } 3537 } 3538 3539 return 0; 3540 } 3541 3542 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe) 3543 { 3544 struct skl_pipe_module *w_module; 3545 struct snd_soc_dapm_widget *w; 3546 struct skl_module_cfg *mconfig; 3547 bool host_found = false, link_found = false; 3548 3549 list_for_each_entry(w_module, &pipe->w_list, node) { 3550 w = w_module->w; 3551 mconfig = w->priv; 3552 3553 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3554 host_found = true; 3555 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3556 link_found = true; 3557 } 3558 3559 if (host_found && link_found) 3560 pipe->passthru = true; 3561 else 3562 pipe->passthru = false; 3563 } 3564 3565 /* 3566 * SKL topology init routine 3567 */ 3568 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3569 { 3570 int ret; 3571 const struct firmware *fw; 3572 struct skl_dev *skl = bus_to_skl(bus); 3573 struct skl_pipeline *ppl; 3574 3575 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3576 if (ret < 0) { 3577 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3578 skl->tplg_name, ret); 3579 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3580 if (ret < 0) { 3581 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3582 "dfw_sst.bin", ret); 3583 return ret; 3584 } 3585 } 3586 3587 /* 3588 * The complete tplg for SKL is loaded as index 0, we don't use 3589 * any other index 3590 */ 3591 ret = snd_soc_tplg_component_load(component, 3592 &skl_tplg_ops, fw, 0); 3593 if (ret < 0) { 3594 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3595 release_firmware(fw); 3596 return -EINVAL; 3597 } 3598 3599 skl->tplg = fw; 3600 ret = skl_tplg_create_pipe_widget_list(component); 3601 if (ret < 0) 3602 return ret; 3603 3604 list_for_each_entry(ppl, &skl->ppl_list, node) 3605 skl_tplg_set_pipe_type(skl, ppl->pipe); 3606 3607 return 0; 3608 } 3609 3610 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus) 3611 { 3612 struct skl_dev *skl = bus_to_skl(bus); 3613 struct skl_pipeline *ppl, *tmp; 3614 3615 if (!list_empty(&skl->ppl_list)) 3616 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node) 3617 list_del(&ppl->node); 3618 3619 /* clean up topology */ 3620 snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL); 3621 3622 release_firmware(skl->tplg); 3623 } 3624