1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-topology.c - Implements Platform component ALSA controls/widget 4 * handlers. 5 * 6 * Copyright (C) 2014-2015 Intel Corp 7 * Author: Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/firmware.h> 14 #include <linux/uuid.h> 15 #include <sound/intel-nhlt.h> 16 #include <sound/soc.h> 17 #include <sound/soc-topology.h> 18 #include <uapi/sound/snd_sst_tokens.h> 19 #include <uapi/sound/skl-tplg-interface.h> 20 #include "skl-sst-dsp.h" 21 #include "skl-sst-ipc.h" 22 #include "skl-topology.h" 23 #include "skl.h" 24 #include "../common/sst-dsp.h" 25 #include "../common/sst-dsp-priv.h" 26 27 #define SKL_CH_FIXUP_MASK (1 << 0) 28 #define SKL_RATE_FIXUP_MASK (1 << 1) 29 #define SKL_FMT_FIXUP_MASK (1 << 2) 30 #define SKL_IN_DIR_BIT_MASK BIT(0) 31 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 32 33 static const int mic_mono_list[] = { 34 0, 1, 2, 3, 35 }; 36 static const int mic_stereo_list[][SKL_CH_STEREO] = { 37 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 38 }; 39 static const int mic_trio_list[][SKL_CH_TRIO] = { 40 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 41 }; 42 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 43 {0, 1, 2, 3}, 44 }; 45 46 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 47 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 48 49 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps) 50 { 51 struct skl_d0i3_data *d0i3 = &skl->d0i3; 52 53 switch (caps) { 54 case SKL_D0I3_NONE: 55 d0i3->non_d0i3++; 56 break; 57 58 case SKL_D0I3_STREAMING: 59 d0i3->streaming++; 60 break; 61 62 case SKL_D0I3_NON_STREAMING: 63 d0i3->non_streaming++; 64 break; 65 } 66 } 67 68 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps) 69 { 70 struct skl_d0i3_data *d0i3 = &skl->d0i3; 71 72 switch (caps) { 73 case SKL_D0I3_NONE: 74 d0i3->non_d0i3--; 75 break; 76 77 case SKL_D0I3_STREAMING: 78 d0i3->streaming--; 79 break; 80 81 case SKL_D0I3_NON_STREAMING: 82 d0i3->non_streaming--; 83 break; 84 } 85 } 86 87 /* 88 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 89 * ignore. This helpers checks if the SKL driver handles this widget type 90 */ 91 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 92 struct device *dev) 93 { 94 if (w->dapm->dev != dev) 95 return false; 96 97 switch (w->id) { 98 case snd_soc_dapm_dai_link: 99 case snd_soc_dapm_dai_in: 100 case snd_soc_dapm_aif_in: 101 case snd_soc_dapm_aif_out: 102 case snd_soc_dapm_dai_out: 103 case snd_soc_dapm_switch: 104 case snd_soc_dapm_output: 105 case snd_soc_dapm_mux: 106 107 return false; 108 default: 109 return true; 110 } 111 } 112 113 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg) 114 { 115 struct skl_module_iface *iface = &mcfg->module->formats[0]; 116 117 dev_dbg(skl->dev, "Dumping config\n"); 118 dev_dbg(skl->dev, "Input Format:\n"); 119 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 120 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 121 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 122 dev_dbg(skl->dev, "valid bit depth = %d\n", 123 iface->inputs[0].fmt.valid_bit_depth); 124 dev_dbg(skl->dev, "Output Format:\n"); 125 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 126 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 127 dev_dbg(skl->dev, "valid bit depth = %d\n", 128 iface->outputs[0].fmt.valid_bit_depth); 129 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 130 } 131 132 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 133 { 134 int slot_map = 0xFFFFFFFF; 135 int start_slot = 0; 136 int i; 137 138 for (i = 0; i < chs; i++) { 139 /* 140 * For 2 channels with starting slot as 0, slot map will 141 * look like 0xFFFFFF10. 142 */ 143 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 144 start_slot++; 145 } 146 fmt->ch_map = slot_map; 147 } 148 149 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 150 struct skl_pipe_params *params, int fixup) 151 { 152 if (fixup & SKL_RATE_FIXUP_MASK) 153 fmt->s_freq = params->s_freq; 154 if (fixup & SKL_CH_FIXUP_MASK) { 155 fmt->channels = params->ch; 156 skl_tplg_update_chmap(fmt, fmt->channels); 157 } 158 if (fixup & SKL_FMT_FIXUP_MASK) { 159 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 160 161 /* 162 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 163 * container so update bit depth accordingly 164 */ 165 switch (fmt->valid_bit_depth) { 166 case SKL_DEPTH_16BIT: 167 fmt->bit_depth = fmt->valid_bit_depth; 168 break; 169 170 default: 171 fmt->bit_depth = SKL_DEPTH_32BIT; 172 break; 173 } 174 } 175 176 } 177 178 /* 179 * A pipeline may have modules which impact the pcm parameters, like SRC, 180 * channel converter, format converter. 181 * We need to calculate the output params by applying the 'fixup' 182 * Topology will tell driver which type of fixup is to be applied by 183 * supplying the fixup mask, so based on that we calculate the output 184 * 185 * Now In FE the pcm hw_params is source/target format. Same is applicable 186 * for BE with its hw_params invoked. 187 * here based on FE, BE pipeline and direction we calculate the input and 188 * outfix and then apply that for a module 189 */ 190 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 191 struct skl_pipe_params *params, bool is_fe) 192 { 193 int in_fixup, out_fixup; 194 struct skl_module_fmt *in_fmt, *out_fmt; 195 196 /* Fixups will be applied to pin 0 only */ 197 in_fmt = &m_cfg->module->formats[0].inputs[0].fmt; 198 out_fmt = &m_cfg->module->formats[0].outputs[0].fmt; 199 200 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 201 if (is_fe) { 202 in_fixup = m_cfg->params_fixup; 203 out_fixup = (~m_cfg->converter) & 204 m_cfg->params_fixup; 205 } else { 206 out_fixup = m_cfg->params_fixup; 207 in_fixup = (~m_cfg->converter) & 208 m_cfg->params_fixup; 209 } 210 } else { 211 if (is_fe) { 212 out_fixup = m_cfg->params_fixup; 213 in_fixup = (~m_cfg->converter) & 214 m_cfg->params_fixup; 215 } else { 216 in_fixup = m_cfg->params_fixup; 217 out_fixup = (~m_cfg->converter) & 218 m_cfg->params_fixup; 219 } 220 } 221 222 skl_tplg_update_params(in_fmt, params, in_fixup); 223 skl_tplg_update_params(out_fmt, params, out_fixup); 224 } 225 226 /* 227 * A module needs input and output buffers, which are dependent upon pcm 228 * params, so once we have calculate params, we need buffer calculation as 229 * well. 230 */ 231 static void skl_tplg_update_buffer_size(struct skl_dev *skl, 232 struct skl_module_cfg *mcfg) 233 { 234 int multiplier = 1; 235 struct skl_module_fmt *in_fmt, *out_fmt; 236 struct skl_module_res *res; 237 238 /* Since fixups is applied to pin 0 only, ibs, obs needs 239 * change for pin 0 only 240 */ 241 res = &mcfg->module->resources[0]; 242 in_fmt = &mcfg->module->formats[0].inputs[0].fmt; 243 out_fmt = &mcfg->module->formats[0].outputs[0].fmt; 244 245 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 246 multiplier = 5; 247 248 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 249 in_fmt->channels * (in_fmt->bit_depth >> 3) * 250 multiplier; 251 252 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 253 out_fmt->channels * (out_fmt->bit_depth >> 3) * 254 multiplier; 255 } 256 257 static u8 skl_tplg_be_dev_type(int dev_type) 258 { 259 int ret; 260 261 switch (dev_type) { 262 case SKL_DEVICE_BT: 263 ret = NHLT_DEVICE_BT; 264 break; 265 266 case SKL_DEVICE_DMIC: 267 ret = NHLT_DEVICE_DMIC; 268 break; 269 270 case SKL_DEVICE_I2S: 271 ret = NHLT_DEVICE_I2S; 272 break; 273 274 default: 275 ret = NHLT_DEVICE_INVALID; 276 break; 277 } 278 279 return ret; 280 } 281 282 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 283 struct skl_dev *skl) 284 { 285 struct skl_module_cfg *m_cfg = w->priv; 286 int link_type, dir; 287 u32 ch, s_freq, s_fmt; 288 struct nhlt_specific_cfg *cfg; 289 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 290 int fmt_idx = m_cfg->fmt_idx; 291 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 292 293 /* check if we already have blob */ 294 if (m_cfg->formats_config.caps_size > 0) 295 return 0; 296 297 dev_dbg(skl->dev, "Applying default cfg blob\n"); 298 switch (m_cfg->dev_type) { 299 case SKL_DEVICE_DMIC: 300 link_type = NHLT_LINK_DMIC; 301 dir = SNDRV_PCM_STREAM_CAPTURE; 302 s_freq = m_iface->inputs[0].fmt.s_freq; 303 s_fmt = m_iface->inputs[0].fmt.bit_depth; 304 ch = m_iface->inputs[0].fmt.channels; 305 break; 306 307 case SKL_DEVICE_I2S: 308 link_type = NHLT_LINK_SSP; 309 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 310 dir = SNDRV_PCM_STREAM_PLAYBACK; 311 s_freq = m_iface->outputs[0].fmt.s_freq; 312 s_fmt = m_iface->outputs[0].fmt.bit_depth; 313 ch = m_iface->outputs[0].fmt.channels; 314 } else { 315 dir = SNDRV_PCM_STREAM_CAPTURE; 316 s_freq = m_iface->inputs[0].fmt.s_freq; 317 s_fmt = m_iface->inputs[0].fmt.bit_depth; 318 ch = m_iface->inputs[0].fmt.channels; 319 } 320 break; 321 322 default: 323 return -EINVAL; 324 } 325 326 /* update the blob based on virtual bus_id and default params */ 327 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type, 328 s_fmt, ch, s_freq, dir, dev_type); 329 if (cfg) { 330 m_cfg->formats_config.caps_size = cfg->size; 331 m_cfg->formats_config.caps = (u32 *) &cfg->caps; 332 } else { 333 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n", 334 m_cfg->vbus_id, link_type, dir); 335 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d\n", 336 ch, s_freq, s_fmt); 337 return -EIO; 338 } 339 340 return 0; 341 } 342 343 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 344 struct skl_dev *skl) 345 { 346 struct skl_module_cfg *m_cfg = w->priv; 347 struct skl_pipe_params *params = m_cfg->pipe->p_params; 348 int p_conn_type = m_cfg->pipe->conn_type; 349 bool is_fe; 350 351 if (!m_cfg->params_fixup) 352 return; 353 354 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n", 355 w->name); 356 357 skl_dump_mconfig(skl, m_cfg); 358 359 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 360 is_fe = true; 361 else 362 is_fe = false; 363 364 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 365 skl_tplg_update_buffer_size(skl, m_cfg); 366 367 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n", 368 w->name); 369 370 skl_dump_mconfig(skl, m_cfg); 371 } 372 373 /* 374 * some modules can have multiple params set from user control and 375 * need to be set after module is initialized. If set_param flag is 376 * set module params will be done after module is initialised. 377 */ 378 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 379 struct skl_dev *skl) 380 { 381 int i, ret; 382 struct skl_module_cfg *mconfig = w->priv; 383 const struct snd_kcontrol_new *k; 384 struct soc_bytes_ext *sb; 385 struct skl_algo_data *bc; 386 struct skl_specific_cfg *sp_cfg; 387 388 if (mconfig->formats_config.caps_size > 0 && 389 mconfig->formats_config.set_params == SKL_PARAM_SET) { 390 sp_cfg = &mconfig->formats_config; 391 ret = skl_set_module_params(skl, sp_cfg->caps, 392 sp_cfg->caps_size, 393 sp_cfg->param_id, mconfig); 394 if (ret < 0) 395 return ret; 396 } 397 398 for (i = 0; i < w->num_kcontrols; i++) { 399 k = &w->kcontrol_news[i]; 400 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 401 sb = (void *) k->private_value; 402 bc = (struct skl_algo_data *)sb->dobj.private; 403 404 if (bc->set_params == SKL_PARAM_SET) { 405 ret = skl_set_module_params(skl, 406 (u32 *)bc->params, bc->size, 407 bc->param_id, mconfig); 408 if (ret < 0) 409 return ret; 410 } 411 } 412 } 413 414 return 0; 415 } 416 417 /* 418 * some module param can set from user control and this is required as 419 * when module is initailzed. if module param is required in init it is 420 * identifed by set_param flag. if set_param flag is not set, then this 421 * parameter needs to set as part of module init. 422 */ 423 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 424 { 425 const struct snd_kcontrol_new *k; 426 struct soc_bytes_ext *sb; 427 struct skl_algo_data *bc; 428 struct skl_module_cfg *mconfig = w->priv; 429 int i; 430 431 for (i = 0; i < w->num_kcontrols; i++) { 432 k = &w->kcontrol_news[i]; 433 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 434 sb = (struct soc_bytes_ext *)k->private_value; 435 bc = (struct skl_algo_data *)sb->dobj.private; 436 437 if (bc->set_params != SKL_PARAM_INIT) 438 continue; 439 440 mconfig->formats_config.caps = (u32 *)bc->params; 441 mconfig->formats_config.caps_size = bc->size; 442 443 break; 444 } 445 } 446 447 return 0; 448 } 449 450 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe, 451 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 452 { 453 switch (mcfg->dev_type) { 454 case SKL_DEVICE_HDAHOST: 455 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params); 456 457 case SKL_DEVICE_HDALINK: 458 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params); 459 } 460 461 return 0; 462 } 463 464 /* 465 * Inside a pipe instance, we can have various modules. These modules need 466 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 467 * skl_init_module() routine, so invoke that for all modules in a pipeline 468 */ 469 static int 470 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe) 471 { 472 struct skl_pipe_module *w_module; 473 struct snd_soc_dapm_widget *w; 474 struct skl_module_cfg *mconfig; 475 u8 cfg_idx; 476 int ret = 0; 477 478 list_for_each_entry(w_module, &pipe->w_list, node) { 479 guid_t *uuid_mod; 480 w = w_module->w; 481 mconfig = w->priv; 482 483 /* check if module ids are populated */ 484 if (mconfig->id.module_id < 0) { 485 dev_err(skl->dev, 486 "module %pUL id not populated\n", 487 (guid_t *)mconfig->guid); 488 return -EIO; 489 } 490 491 cfg_idx = mconfig->pipe->cur_config_idx; 492 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 493 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 494 495 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) { 496 ret = skl->dsp->fw_ops.load_mod(skl->dsp, 497 mconfig->id.module_id, mconfig->guid); 498 if (ret < 0) 499 return ret; 500 501 mconfig->m_state = SKL_MODULE_LOADED; 502 } 503 504 /* prepare the DMA if the module is gateway cpr */ 505 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig); 506 if (ret < 0) 507 return ret; 508 509 /* update blob if blob is null for be with default value */ 510 skl_tplg_update_be_blob(w, skl); 511 512 /* 513 * apply fix/conversion to module params based on 514 * FE/BE params 515 */ 516 skl_tplg_update_module_params(w, skl); 517 uuid_mod = (guid_t *)mconfig->guid; 518 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod, 519 mconfig->id.instance_id); 520 if (mconfig->id.pvt_id < 0) 521 return ret; 522 skl_tplg_set_module_init_data(w); 523 524 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id); 525 if (ret < 0) { 526 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n", 527 mconfig->core_id, ret); 528 return ret; 529 } 530 531 ret = skl_init_module(skl, mconfig); 532 if (ret < 0) { 533 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 534 goto err; 535 } 536 537 ret = skl_tplg_set_module_params(w, skl); 538 if (ret < 0) 539 goto err; 540 } 541 542 return 0; 543 err: 544 skl_dsp_put_core(skl->dsp, mconfig->core_id); 545 return ret; 546 } 547 548 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl, 549 struct skl_pipe *pipe) 550 { 551 int ret = 0; 552 struct skl_pipe_module *w_module = NULL; 553 struct skl_module_cfg *mconfig = NULL; 554 555 list_for_each_entry(w_module, &pipe->w_list, node) { 556 guid_t *uuid_mod; 557 mconfig = w_module->w->priv; 558 uuid_mod = (guid_t *)mconfig->guid; 559 560 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod && 561 mconfig->m_state > SKL_MODULE_UNINIT) { 562 ret = skl->dsp->fw_ops.unload_mod(skl->dsp, 563 mconfig->id.module_id); 564 if (ret < 0) 565 return -EIO; 566 } 567 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 568 569 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id); 570 if (ret < 0) { 571 /* don't return; continue with other modules */ 572 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n", 573 mconfig->core_id, ret); 574 } 575 } 576 577 /* no modules to unload in this path, so return */ 578 return ret; 579 } 580 581 /* 582 * Here, we select pipe format based on the pipe type and pipe 583 * direction to determine the current config index for the pipeline. 584 * The config index is then used to select proper module resources. 585 * Intermediate pipes currently have a fixed format hence we select the 586 * 0th configuratation by default for such pipes. 587 */ 588 static int 589 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig) 590 { 591 struct skl_pipe *pipe = mconfig->pipe; 592 struct skl_pipe_params *params = pipe->p_params; 593 struct skl_path_config *pconfig = &pipe->configs[0]; 594 struct skl_pipe_fmt *fmt = NULL; 595 bool in_fmt = false; 596 int i; 597 598 if (pipe->nr_cfgs == 0) { 599 pipe->cur_config_idx = 0; 600 return 0; 601 } 602 603 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { 604 dev_dbg(skl->dev, "No conn_type detected, take 0th config\n"); 605 pipe->cur_config_idx = 0; 606 pipe->memory_pages = pconfig->mem_pages; 607 608 return 0; 609 } 610 611 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 612 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 613 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 614 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 615 in_fmt = true; 616 617 for (i = 0; i < pipe->nr_cfgs; i++) { 618 pconfig = &pipe->configs[i]; 619 if (in_fmt) 620 fmt = &pconfig->in_fmt; 621 else 622 fmt = &pconfig->out_fmt; 623 624 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 625 fmt->channels, fmt->freq, fmt->bps)) { 626 pipe->cur_config_idx = i; 627 pipe->memory_pages = pconfig->mem_pages; 628 dev_dbg(skl->dev, "Using pipe config: %d\n", i); 629 630 return 0; 631 } 632 } 633 634 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 635 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 636 return -EINVAL; 637 } 638 639 /* 640 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 641 * need create the pipeline. So we do following: 642 * - Create the pipeline 643 * - Initialize the modules in pipeline 644 * - finally bind all modules together 645 */ 646 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 647 struct skl_dev *skl) 648 { 649 int ret; 650 struct skl_module_cfg *mconfig = w->priv; 651 struct skl_pipe_module *w_module; 652 struct skl_pipe *s_pipe = mconfig->pipe; 653 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 654 struct skl_module_deferred_bind *modules; 655 656 ret = skl_tplg_get_pipe_config(skl, mconfig); 657 if (ret < 0) 658 return ret; 659 660 /* 661 * Create a list of modules for pipe. 662 * This list contains modules from source to sink 663 */ 664 ret = skl_create_pipeline(skl, mconfig->pipe); 665 if (ret < 0) 666 return ret; 667 668 /* Init all pipe modules from source to sink */ 669 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 670 if (ret < 0) 671 return ret; 672 673 /* Bind modules from source to sink */ 674 list_for_each_entry(w_module, &s_pipe->w_list, node) { 675 dst_module = w_module->w->priv; 676 677 if (src_module == NULL) { 678 src_module = dst_module; 679 continue; 680 } 681 682 ret = skl_bind_modules(skl, src_module, dst_module); 683 if (ret < 0) 684 return ret; 685 686 src_module = dst_module; 687 } 688 689 /* 690 * When the destination module is initialized, check for these modules 691 * in deferred bind list. If found, bind them. 692 */ 693 list_for_each_entry(w_module, &s_pipe->w_list, node) { 694 if (list_empty(&skl->bind_list)) 695 break; 696 697 list_for_each_entry(modules, &skl->bind_list, node) { 698 module = w_module->w->priv; 699 if (modules->dst == module) 700 skl_bind_modules(skl, modules->src, 701 modules->dst); 702 } 703 } 704 705 return 0; 706 } 707 708 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params, 709 int size, struct skl_module_cfg *mcfg) 710 { 711 int i, pvt_id; 712 713 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 714 struct skl_kpb_params *kpb_params = 715 (struct skl_kpb_params *)params; 716 struct skl_mod_inst_map *inst = kpb_params->u.map; 717 718 for (i = 0; i < kpb_params->num_modules; i++) { 719 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id, 720 inst->inst_id); 721 if (pvt_id < 0) 722 return -EINVAL; 723 724 inst->inst_id = pvt_id; 725 inst++; 726 } 727 } 728 729 return 0; 730 } 731 /* 732 * Some modules require params to be set after the module is bound to 733 * all pins connected. 734 * 735 * The module provider initializes set_param flag for such modules and we 736 * send params after binding 737 */ 738 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 739 struct skl_module_cfg *mcfg, struct skl_dev *skl) 740 { 741 int i, ret; 742 struct skl_module_cfg *mconfig = w->priv; 743 const struct snd_kcontrol_new *k; 744 struct soc_bytes_ext *sb; 745 struct skl_algo_data *bc; 746 struct skl_specific_cfg *sp_cfg; 747 u32 *params; 748 749 /* 750 * check all out/in pins are in bind state. 751 * if so set the module param 752 */ 753 for (i = 0; i < mcfg->module->max_output_pins; i++) { 754 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 755 return 0; 756 } 757 758 for (i = 0; i < mcfg->module->max_input_pins; i++) { 759 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 760 return 0; 761 } 762 763 if (mconfig->formats_config.caps_size > 0 && 764 mconfig->formats_config.set_params == SKL_PARAM_BIND) { 765 sp_cfg = &mconfig->formats_config; 766 ret = skl_set_module_params(skl, sp_cfg->caps, 767 sp_cfg->caps_size, 768 sp_cfg->param_id, mconfig); 769 if (ret < 0) 770 return ret; 771 } 772 773 for (i = 0; i < w->num_kcontrols; i++) { 774 k = &w->kcontrol_news[i]; 775 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 776 sb = (void *) k->private_value; 777 bc = (struct skl_algo_data *)sb->dobj.private; 778 779 if (bc->set_params == SKL_PARAM_BIND) { 780 params = kmemdup(bc->params, bc->max, GFP_KERNEL); 781 if (!params) 782 return -ENOMEM; 783 784 skl_fill_sink_instance_id(skl, params, bc->max, 785 mconfig); 786 787 ret = skl_set_module_params(skl, params, 788 bc->max, bc->param_id, mconfig); 789 kfree(params); 790 791 if (ret < 0) 792 return ret; 793 } 794 } 795 } 796 797 return 0; 798 } 799 800 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) 801 { 802 struct uuid_module *module; 803 804 list_for_each_entry(module, &skl->uuid_list, list) { 805 if (guid_equal(uuid, &module->uuid)) 806 return module->id; 807 } 808 809 return -EINVAL; 810 } 811 812 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl, 813 const struct snd_kcontrol_new *k) 814 { 815 struct soc_bytes_ext *sb = (void *) k->private_value; 816 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 817 struct skl_kpb_params *uuid_params, *params; 818 struct hdac_bus *bus = skl_to_bus(skl); 819 int i, size, module_id; 820 821 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 822 uuid_params = (struct skl_kpb_params *)bc->params; 823 size = struct_size(params, u.map, uuid_params->num_modules); 824 825 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 826 if (!params) 827 return -ENOMEM; 828 829 params->num_modules = uuid_params->num_modules; 830 831 for (i = 0; i < uuid_params->num_modules; i++) { 832 module_id = skl_get_module_id(skl, 833 &uuid_params->u.map_uuid[i].mod_uuid); 834 if (module_id < 0) { 835 devm_kfree(bus->dev, params); 836 return -EINVAL; 837 } 838 839 params->u.map[i].mod_id = module_id; 840 params->u.map[i].inst_id = 841 uuid_params->u.map_uuid[i].inst_id; 842 } 843 844 devm_kfree(bus->dev, bc->params); 845 bc->params = (char *)params; 846 bc->max = size; 847 } 848 849 return 0; 850 } 851 852 /* 853 * Retrieve the module id from UUID mentioned in the 854 * post bind params 855 */ 856 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl, 857 struct snd_soc_dapm_widget *w) 858 { 859 struct skl_module_cfg *mconfig = w->priv; 860 int i; 861 862 /* 863 * Post bind params are used for only for KPB 864 * to set copier instances to drain the data 865 * in fast mode 866 */ 867 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 868 return; 869 870 for (i = 0; i < w->num_kcontrols; i++) 871 if ((w->kcontrol_news[i].access & 872 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 873 (skl_tplg_find_moduleid_from_uuid(skl, 874 &w->kcontrol_news[i]) < 0)) 875 dev_err(skl->dev, 876 "%s: invalid kpb post bind params\n", 877 __func__); 878 } 879 880 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl, 881 struct skl_module_cfg *src, struct skl_module_cfg *dst) 882 { 883 struct skl_module_deferred_bind *m_list, *modules; 884 int i; 885 886 /* only supported for module with static pin connection */ 887 for (i = 0; i < dst->module->max_input_pins; i++) { 888 struct skl_module_pin *pin = &dst->m_in_pin[i]; 889 890 if (pin->is_dynamic) 891 continue; 892 893 if ((pin->id.module_id == src->id.module_id) && 894 (pin->id.instance_id == src->id.instance_id)) { 895 896 if (!list_empty(&skl->bind_list)) { 897 list_for_each_entry(modules, &skl->bind_list, node) { 898 if (modules->src == src && modules->dst == dst) 899 return 0; 900 } 901 } 902 903 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 904 if (!m_list) 905 return -ENOMEM; 906 907 m_list->src = src; 908 m_list->dst = dst; 909 910 list_add(&m_list->node, &skl->bind_list); 911 } 912 } 913 914 return 0; 915 } 916 917 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 918 struct skl_dev *skl, 919 struct snd_soc_dapm_widget *src_w, 920 struct skl_module_cfg *src_mconfig) 921 { 922 struct snd_soc_dapm_path *p; 923 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 924 struct skl_module_cfg *sink_mconfig; 925 int ret; 926 927 snd_soc_dapm_widget_for_each_sink_path(w, p) { 928 if (!p->connect) 929 continue; 930 931 dev_dbg(skl->dev, 932 "%s: src widget=%s\n", __func__, w->name); 933 dev_dbg(skl->dev, 934 "%s: sink widget=%s\n", __func__, p->sink->name); 935 936 next_sink = p->sink; 937 938 if (!is_skl_dsp_widget_type(p->sink, skl->dev)) 939 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 940 941 /* 942 * here we will check widgets in sink pipelines, so that 943 * can be any widgets type and we are only interested if 944 * they are ones used for SKL so check that first 945 */ 946 if ((p->sink->priv != NULL) && 947 is_skl_dsp_widget_type(p->sink, skl->dev)) { 948 949 sink = p->sink; 950 sink_mconfig = sink->priv; 951 952 /* 953 * Modules other than PGA leaf can be connected 954 * directly or via switch to a module in another 955 * pipeline. EX: reference path 956 * when the path is enabled, the dst module that needs 957 * to be bound may not be initialized. if the module is 958 * not initialized, add these modules in the deferred 959 * bind list and when the dst module is initialised, 960 * bind this module to the dst_module in deferred list. 961 */ 962 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 963 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 964 965 ret = skl_tplg_module_add_deferred_bind(skl, 966 src_mconfig, sink_mconfig); 967 968 if (ret < 0) 969 return ret; 970 971 } 972 973 974 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 975 sink_mconfig->m_state == SKL_MODULE_UNINIT) 976 continue; 977 978 /* Bind source to sink, mixin is always source */ 979 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 980 if (ret) 981 return ret; 982 983 /* set module params after bind */ 984 skl_tplg_set_module_bind_params(src_w, 985 src_mconfig, skl); 986 skl_tplg_set_module_bind_params(sink, 987 sink_mconfig, skl); 988 989 /* Start sinks pipe first */ 990 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 991 if (sink_mconfig->pipe->conn_type != 992 SKL_PIPE_CONN_TYPE_FE) 993 ret = skl_run_pipe(skl, 994 sink_mconfig->pipe); 995 if (ret) 996 return ret; 997 } 998 } 999 } 1000 1001 if (!sink && next_sink) 1002 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1003 1004 return 0; 1005 } 1006 1007 /* 1008 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1009 * we need to do following: 1010 * - Bind to sink pipeline 1011 * Since the sink pipes can be running and we don't get mixer event on 1012 * connect for already running mixer, we need to find the sink pipes 1013 * here and bind to them. This way dynamic connect works. 1014 * - Start sink pipeline, if not running 1015 * - Then run current pipe 1016 */ 1017 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1018 struct skl_dev *skl) 1019 { 1020 struct skl_module_cfg *src_mconfig; 1021 int ret = 0; 1022 1023 src_mconfig = w->priv; 1024 1025 /* 1026 * find which sink it is connected to, bind with the sink, 1027 * if sink is not started, start sink pipe first, then start 1028 * this pipe 1029 */ 1030 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1031 if (ret) 1032 return ret; 1033 1034 /* Start source pipe last after starting all sinks */ 1035 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1036 return skl_run_pipe(skl, src_mconfig->pipe); 1037 1038 return 0; 1039 } 1040 1041 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1042 struct snd_soc_dapm_widget *w, struct skl_dev *skl) 1043 { 1044 struct snd_soc_dapm_path *p; 1045 struct snd_soc_dapm_widget *src_w = NULL; 1046 1047 snd_soc_dapm_widget_for_each_source_path(w, p) { 1048 src_w = p->source; 1049 if (!p->connect) 1050 continue; 1051 1052 dev_dbg(skl->dev, "sink widget=%s\n", w->name); 1053 dev_dbg(skl->dev, "src widget=%s\n", p->source->name); 1054 1055 /* 1056 * here we will check widgets in sink pipelines, so that can 1057 * be any widgets type and we are only interested if they are 1058 * ones used for SKL so check that first 1059 */ 1060 if ((p->source->priv != NULL) && 1061 is_skl_dsp_widget_type(p->source, skl->dev)) { 1062 return p->source; 1063 } 1064 } 1065 1066 if (src_w != NULL) 1067 return skl_get_src_dsp_widget(src_w, skl); 1068 1069 return NULL; 1070 } 1071 1072 /* 1073 * in the Post-PMU event of mixer we need to do following: 1074 * - Check if this pipe is running 1075 * - if not, then 1076 * - bind this pipeline to its source pipeline 1077 * if source pipe is already running, this means it is a dynamic 1078 * connection and we need to bind only to that pipe 1079 * - start this pipeline 1080 */ 1081 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1082 struct skl_dev *skl) 1083 { 1084 int ret = 0; 1085 struct snd_soc_dapm_widget *source, *sink; 1086 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1087 int src_pipe_started = 0; 1088 1089 sink = w; 1090 sink_mconfig = sink->priv; 1091 1092 /* 1093 * If source pipe is already started, that means source is driving 1094 * one more sink before this sink got connected, Since source is 1095 * started, bind this sink to source and start this pipe. 1096 */ 1097 source = skl_get_src_dsp_widget(w, skl); 1098 if (source != NULL) { 1099 src_mconfig = source->priv; 1100 sink_mconfig = sink->priv; 1101 src_pipe_started = 1; 1102 1103 /* 1104 * check pipe state, then no need to bind or start the 1105 * pipe 1106 */ 1107 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1108 src_pipe_started = 0; 1109 } 1110 1111 if (src_pipe_started) { 1112 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 1113 if (ret) 1114 return ret; 1115 1116 /* set module params after bind */ 1117 skl_tplg_set_module_bind_params(source, src_mconfig, skl); 1118 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl); 1119 1120 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1121 ret = skl_run_pipe(skl, sink_mconfig->pipe); 1122 } 1123 1124 return ret; 1125 } 1126 1127 /* 1128 * in the Pre-PMD event of mixer we need to do following: 1129 * - Stop the pipe 1130 * - find the source connections and remove that from dapm_path_list 1131 * - unbind with source pipelines if still connected 1132 */ 1133 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1134 struct skl_dev *skl) 1135 { 1136 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1137 int ret = 0, i; 1138 1139 sink_mconfig = w->priv; 1140 1141 /* Stop the pipe */ 1142 ret = skl_stop_pipe(skl, sink_mconfig->pipe); 1143 if (ret) 1144 return ret; 1145 1146 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1147 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1148 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1149 if (!src_mconfig) 1150 continue; 1151 1152 ret = skl_unbind_modules(skl, 1153 src_mconfig, sink_mconfig); 1154 } 1155 } 1156 1157 return ret; 1158 } 1159 1160 /* 1161 * in the Post-PMD event of mixer we need to do following: 1162 * - Unbind the modules within the pipeline 1163 * - Delete the pipeline (modules are not required to be explicitly 1164 * deleted, pipeline delete is enough here 1165 */ 1166 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1167 struct skl_dev *skl) 1168 { 1169 struct skl_module_cfg *mconfig = w->priv; 1170 struct skl_pipe_module *w_module; 1171 struct skl_module_cfg *src_module = NULL, *dst_module; 1172 struct skl_pipe *s_pipe = mconfig->pipe; 1173 struct skl_module_deferred_bind *modules, *tmp; 1174 1175 if (s_pipe->state == SKL_PIPE_INVALID) 1176 return -EINVAL; 1177 1178 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1179 if (list_empty(&skl->bind_list)) 1180 break; 1181 1182 src_module = w_module->w->priv; 1183 1184 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1185 /* 1186 * When the destination module is deleted, Unbind the 1187 * modules from deferred bind list. 1188 */ 1189 if (modules->dst == src_module) { 1190 skl_unbind_modules(skl, modules->src, 1191 modules->dst); 1192 } 1193 1194 /* 1195 * When the source module is deleted, remove this entry 1196 * from the deferred bind list. 1197 */ 1198 if (modules->src == src_module) { 1199 list_del(&modules->node); 1200 modules->src = NULL; 1201 modules->dst = NULL; 1202 kfree(modules); 1203 } 1204 } 1205 } 1206 1207 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1208 dst_module = w_module->w->priv; 1209 1210 if (src_module == NULL) { 1211 src_module = dst_module; 1212 continue; 1213 } 1214 1215 skl_unbind_modules(skl, src_module, dst_module); 1216 src_module = dst_module; 1217 } 1218 1219 skl_delete_pipe(skl, mconfig->pipe); 1220 1221 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1222 src_module = w_module->w->priv; 1223 src_module->m_state = SKL_MODULE_UNINIT; 1224 } 1225 1226 return skl_tplg_unload_pipe_modules(skl, s_pipe); 1227 } 1228 1229 /* 1230 * in the Post-PMD event of PGA we need to do following: 1231 * - Stop the pipeline 1232 * - In source pipe is connected, unbind with source pipelines 1233 */ 1234 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1235 struct skl_dev *skl) 1236 { 1237 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1238 int ret = 0, i; 1239 1240 src_mconfig = w->priv; 1241 1242 /* Stop the pipe since this is a mixin module */ 1243 ret = skl_stop_pipe(skl, src_mconfig->pipe); 1244 if (ret) 1245 return ret; 1246 1247 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1248 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1249 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1250 if (!sink_mconfig) 1251 continue; 1252 /* 1253 * This is a connecter and if path is found that means 1254 * unbind between source and sink has not happened yet 1255 */ 1256 ret = skl_unbind_modules(skl, src_mconfig, 1257 sink_mconfig); 1258 } 1259 } 1260 1261 return ret; 1262 } 1263 1264 /* 1265 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1266 * second one is required that is created as another pipe entity. 1267 * The mixer is responsible for pipe management and represent a pipeline 1268 * instance 1269 */ 1270 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1271 struct snd_kcontrol *k, int event) 1272 { 1273 struct snd_soc_dapm_context *dapm = w->dapm; 1274 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1275 1276 switch (event) { 1277 case SND_SOC_DAPM_PRE_PMU: 1278 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1279 1280 case SND_SOC_DAPM_POST_PMU: 1281 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1282 1283 case SND_SOC_DAPM_PRE_PMD: 1284 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1285 1286 case SND_SOC_DAPM_POST_PMD: 1287 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1288 } 1289 1290 return 0; 1291 } 1292 1293 /* 1294 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1295 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1296 * the sink when it is running (two FE to one BE or one FE to two BE) 1297 * scenarios 1298 */ 1299 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1300 struct snd_kcontrol *k, int event) 1301 1302 { 1303 struct snd_soc_dapm_context *dapm = w->dapm; 1304 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1305 1306 switch (event) { 1307 case SND_SOC_DAPM_PRE_PMU: 1308 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1309 1310 case SND_SOC_DAPM_POST_PMD: 1311 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1312 } 1313 1314 return 0; 1315 } 1316 1317 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1318 unsigned int __user *data, unsigned int size) 1319 { 1320 struct soc_bytes_ext *sb = 1321 (struct soc_bytes_ext *)kcontrol->private_value; 1322 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1323 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1324 struct skl_module_cfg *mconfig = w->priv; 1325 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1326 1327 if (w->power) 1328 skl_get_module_params(skl, (u32 *)bc->params, 1329 bc->size, bc->param_id, mconfig); 1330 1331 /* decrement size for TLV header */ 1332 size -= 2 * sizeof(u32); 1333 1334 /* check size as we don't want to send kernel data */ 1335 if (size > bc->max) 1336 size = bc->max; 1337 1338 if (bc->params) { 1339 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1340 return -EFAULT; 1341 if (copy_to_user(data + 1, &size, sizeof(u32))) 1342 return -EFAULT; 1343 if (copy_to_user(data + 2, bc->params, size)) 1344 return -EFAULT; 1345 } 1346 1347 return 0; 1348 } 1349 1350 #define SKL_PARAM_VENDOR_ID 0xff 1351 1352 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1353 const unsigned int __user *data, unsigned int size) 1354 { 1355 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1356 struct skl_module_cfg *mconfig = w->priv; 1357 struct soc_bytes_ext *sb = 1358 (struct soc_bytes_ext *)kcontrol->private_value; 1359 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1360 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1361 1362 if (ac->params) { 1363 /* 1364 * Widget data is expected to be stripped of T and L 1365 */ 1366 size -= 2 * sizeof(unsigned int); 1367 data += 2; 1368 1369 if (size > ac->max) 1370 return -EINVAL; 1371 ac->size = size; 1372 1373 if (copy_from_user(ac->params, data, size)) 1374 return -EFAULT; 1375 1376 if (w->power) 1377 return skl_set_module_params(skl, 1378 (u32 *)ac->params, ac->size, 1379 ac->param_id, mconfig); 1380 } 1381 1382 return 0; 1383 } 1384 1385 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1386 struct snd_ctl_elem_value *ucontrol) 1387 { 1388 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1389 struct skl_module_cfg *mconfig = w->priv; 1390 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1391 u32 ch_type = *((u32 *)ec->dobj.private); 1392 1393 if (mconfig->dmic_ch_type == ch_type) 1394 ucontrol->value.enumerated.item[0] = 1395 mconfig->dmic_ch_combo_index; 1396 else 1397 ucontrol->value.enumerated.item[0] = 0; 1398 1399 return 0; 1400 } 1401 1402 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1403 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1404 { 1405 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config; 1406 1407 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1408 sp_cfg->set_params = SKL_PARAM_SET; 1409 sp_cfg->param_id = 0x00; 1410 if (!sp_cfg->caps) { 1411 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1412 if (!sp_cfg->caps) 1413 return -ENOMEM; 1414 } 1415 1416 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1417 mic_cfg->flags = 0; 1418 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1419 1420 return 0; 1421 } 1422 1423 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1424 struct snd_ctl_elem_value *ucontrol) 1425 { 1426 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1427 struct skl_module_cfg *mconfig = w->priv; 1428 struct skl_mic_sel_config mic_cfg = {0}; 1429 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1430 u32 ch_type = *((u32 *)ec->dobj.private); 1431 const int *list; 1432 u8 in_ch, out_ch, index; 1433 1434 mconfig->dmic_ch_type = ch_type; 1435 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1436 1437 /* enum control index 0 is INVALID, so no channels to be set */ 1438 if (mconfig->dmic_ch_combo_index == 0) 1439 return 0; 1440 1441 /* No valid channel selection map for index 0, so offset by 1 */ 1442 index = mconfig->dmic_ch_combo_index - 1; 1443 1444 switch (ch_type) { 1445 case SKL_CH_MONO: 1446 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1447 return -EINVAL; 1448 1449 list = &mic_mono_list[index]; 1450 break; 1451 1452 case SKL_CH_STEREO: 1453 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1454 return -EINVAL; 1455 1456 list = mic_stereo_list[index]; 1457 break; 1458 1459 case SKL_CH_TRIO: 1460 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1461 return -EINVAL; 1462 1463 list = mic_trio_list[index]; 1464 break; 1465 1466 case SKL_CH_QUATRO: 1467 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1468 return -EINVAL; 1469 1470 list = mic_quatro_list[index]; 1471 break; 1472 1473 default: 1474 dev_err(w->dapm->dev, 1475 "Invalid channel %d for mic_select module\n", 1476 ch_type); 1477 return -EINVAL; 1478 1479 } 1480 1481 /* channel type enum map to number of chanels for that type */ 1482 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1483 in_ch = list[out_ch]; 1484 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1485 } 1486 1487 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1488 } 1489 1490 /* 1491 * Fill the dma id for host and link. In case of passthrough 1492 * pipeline, this will both host and link in the same 1493 * pipeline, so need to copy the link and host based on dev_type 1494 */ 1495 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1496 struct skl_pipe_params *params) 1497 { 1498 struct skl_pipe *pipe = mcfg->pipe; 1499 1500 if (pipe->passthru) { 1501 switch (mcfg->dev_type) { 1502 case SKL_DEVICE_HDALINK: 1503 pipe->p_params->link_dma_id = params->link_dma_id; 1504 pipe->p_params->link_index = params->link_index; 1505 pipe->p_params->link_bps = params->link_bps; 1506 break; 1507 1508 case SKL_DEVICE_HDAHOST: 1509 pipe->p_params->host_dma_id = params->host_dma_id; 1510 pipe->p_params->host_bps = params->host_bps; 1511 break; 1512 1513 default: 1514 break; 1515 } 1516 pipe->p_params->s_fmt = params->s_fmt; 1517 pipe->p_params->ch = params->ch; 1518 pipe->p_params->s_freq = params->s_freq; 1519 pipe->p_params->stream = params->stream; 1520 pipe->p_params->format = params->format; 1521 1522 } else { 1523 memcpy(pipe->p_params, params, sizeof(*params)); 1524 } 1525 } 1526 1527 /* 1528 * The FE params are passed by hw_params of the DAI. 1529 * On hw_params, the params are stored in Gateway module of the FE and we 1530 * need to calculate the format in DSP module configuration, that 1531 * conversion is done here 1532 */ 1533 int skl_tplg_update_pipe_params(struct device *dev, 1534 struct skl_module_cfg *mconfig, 1535 struct skl_pipe_params *params) 1536 { 1537 struct skl_module_res *res = &mconfig->module->resources[0]; 1538 struct skl_dev *skl = get_skl_ctx(dev); 1539 struct skl_module_fmt *format = NULL; 1540 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1541 1542 skl_tplg_fill_dma_id(mconfig, params); 1543 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1544 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1545 1546 if (skl->nr_modules) 1547 return 0; 1548 1549 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1550 format = &mconfig->module->formats[0].inputs[0].fmt; 1551 else 1552 format = &mconfig->module->formats[0].outputs[0].fmt; 1553 1554 /* set the hw_params */ 1555 format->s_freq = params->s_freq; 1556 format->channels = params->ch; 1557 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1558 1559 /* 1560 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1561 * container so update bit depth accordingly 1562 */ 1563 switch (format->valid_bit_depth) { 1564 case SKL_DEPTH_16BIT: 1565 format->bit_depth = format->valid_bit_depth; 1566 break; 1567 1568 case SKL_DEPTH_24BIT: 1569 case SKL_DEPTH_32BIT: 1570 format->bit_depth = SKL_DEPTH_32BIT; 1571 break; 1572 1573 default: 1574 dev_err(dev, "Invalid bit depth %x for pipe\n", 1575 format->valid_bit_depth); 1576 return -EINVAL; 1577 } 1578 1579 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1580 res->ibs = (format->s_freq / 1000) * 1581 (format->channels) * 1582 (format->bit_depth >> 3); 1583 } else { 1584 res->obs = (format->s_freq / 1000) * 1585 (format->channels) * 1586 (format->bit_depth >> 3); 1587 } 1588 1589 return 0; 1590 } 1591 1592 /* 1593 * Query the module config for the FE DAI 1594 * This is used to find the hw_params set for that DAI and apply to FE 1595 * pipeline 1596 */ 1597 struct skl_module_cfg * 1598 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1599 { 1600 struct snd_soc_dapm_widget *w; 1601 struct snd_soc_dapm_path *p = NULL; 1602 1603 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1604 w = dai->playback_widget; 1605 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1606 if (p->connect && p->sink->power && 1607 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1608 continue; 1609 1610 if (p->sink->priv) { 1611 dev_dbg(dai->dev, "set params for %s\n", 1612 p->sink->name); 1613 return p->sink->priv; 1614 } 1615 } 1616 } else { 1617 w = dai->capture_widget; 1618 snd_soc_dapm_widget_for_each_source_path(w, p) { 1619 if (p->connect && p->source->power && 1620 !is_skl_dsp_widget_type(p->source, dai->dev)) 1621 continue; 1622 1623 if (p->source->priv) { 1624 dev_dbg(dai->dev, "set params for %s\n", 1625 p->source->name); 1626 return p->source->priv; 1627 } 1628 } 1629 } 1630 1631 return NULL; 1632 } 1633 1634 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1635 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1636 { 1637 struct snd_soc_dapm_path *p; 1638 struct skl_module_cfg *mconfig = NULL; 1639 1640 snd_soc_dapm_widget_for_each_source_path(w, p) { 1641 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1642 if (p->connect && 1643 (p->sink->id == snd_soc_dapm_aif_out) && 1644 p->source->priv) { 1645 mconfig = p->source->priv; 1646 return mconfig; 1647 } 1648 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1649 if (mconfig) 1650 return mconfig; 1651 } 1652 } 1653 return mconfig; 1654 } 1655 1656 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1657 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1658 { 1659 struct snd_soc_dapm_path *p; 1660 struct skl_module_cfg *mconfig = NULL; 1661 1662 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1663 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1664 if (p->connect && 1665 (p->source->id == snd_soc_dapm_aif_in) && 1666 p->sink->priv) { 1667 mconfig = p->sink->priv; 1668 return mconfig; 1669 } 1670 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1671 if (mconfig) 1672 return mconfig; 1673 } 1674 } 1675 return mconfig; 1676 } 1677 1678 struct skl_module_cfg * 1679 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1680 { 1681 struct snd_soc_dapm_widget *w; 1682 struct skl_module_cfg *mconfig; 1683 1684 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1685 w = dai->playback_widget; 1686 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1687 } else { 1688 w = dai->capture_widget; 1689 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1690 } 1691 return mconfig; 1692 } 1693 1694 static u8 skl_tplg_be_link_type(int dev_type) 1695 { 1696 int ret; 1697 1698 switch (dev_type) { 1699 case SKL_DEVICE_BT: 1700 ret = NHLT_LINK_SSP; 1701 break; 1702 1703 case SKL_DEVICE_DMIC: 1704 ret = NHLT_LINK_DMIC; 1705 break; 1706 1707 case SKL_DEVICE_I2S: 1708 ret = NHLT_LINK_SSP; 1709 break; 1710 1711 case SKL_DEVICE_HDALINK: 1712 ret = NHLT_LINK_HDA; 1713 break; 1714 1715 default: 1716 ret = NHLT_LINK_INVALID; 1717 break; 1718 } 1719 1720 return ret; 1721 } 1722 1723 /* 1724 * Fill the BE gateway parameters 1725 * The BE gateway expects a blob of parameters which are kept in the ACPI 1726 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1727 * The port can have multiple settings so pick based on the PCM 1728 * parameters 1729 */ 1730 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1731 struct skl_module_cfg *mconfig, 1732 struct skl_pipe_params *params) 1733 { 1734 struct nhlt_specific_cfg *cfg; 1735 struct skl_dev *skl = get_skl_ctx(dai->dev); 1736 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1737 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1738 1739 skl_tplg_fill_dma_id(mconfig, params); 1740 1741 if (link_type == NHLT_LINK_HDA) 1742 return 0; 1743 1744 /* update the blob based on virtual bus_id*/ 1745 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, 1746 params->s_fmt, params->ch, 1747 params->s_freq, params->stream, 1748 dev_type); 1749 if (cfg) { 1750 mconfig->formats_config.caps_size = cfg->size; 1751 mconfig->formats_config.caps = (u32 *) &cfg->caps; 1752 } else { 1753 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n", 1754 mconfig->vbus_id, link_type, 1755 params->stream); 1756 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n", 1757 params->ch, params->s_freq, params->s_fmt); 1758 return -EINVAL; 1759 } 1760 1761 return 0; 1762 } 1763 1764 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1765 struct snd_soc_dapm_widget *w, 1766 struct skl_pipe_params *params) 1767 { 1768 struct snd_soc_dapm_path *p; 1769 int ret = -EIO; 1770 1771 snd_soc_dapm_widget_for_each_source_path(w, p) { 1772 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1773 p->source->priv) { 1774 1775 ret = skl_tplg_be_fill_pipe_params(dai, 1776 p->source->priv, params); 1777 if (ret < 0) 1778 return ret; 1779 } else { 1780 ret = skl_tplg_be_set_src_pipe_params(dai, 1781 p->source, params); 1782 if (ret < 0) 1783 return ret; 1784 } 1785 } 1786 1787 return ret; 1788 } 1789 1790 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1791 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1792 { 1793 struct snd_soc_dapm_path *p = NULL; 1794 int ret = -EIO; 1795 1796 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1797 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1798 p->sink->priv) { 1799 1800 ret = skl_tplg_be_fill_pipe_params(dai, 1801 p->sink->priv, params); 1802 if (ret < 0) 1803 return ret; 1804 } else { 1805 ret = skl_tplg_be_set_sink_pipe_params( 1806 dai, p->sink, params); 1807 if (ret < 0) 1808 return ret; 1809 } 1810 } 1811 1812 return ret; 1813 } 1814 1815 /* 1816 * BE hw_params can be a source parameters (capture) or sink parameters 1817 * (playback). Based on sink and source we need to either find the source 1818 * list or the sink list and set the pipeline parameters 1819 */ 1820 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1821 struct skl_pipe_params *params) 1822 { 1823 struct snd_soc_dapm_widget *w; 1824 1825 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1826 w = dai->playback_widget; 1827 1828 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1829 1830 } else { 1831 w = dai->capture_widget; 1832 1833 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1834 } 1835 1836 return 0; 1837 } 1838 1839 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1840 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1841 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1842 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1843 }; 1844 1845 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1846 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1847 skl_tplg_tlv_control_set}, 1848 }; 1849 1850 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1851 { 1852 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1853 .get = skl_tplg_mic_control_get, 1854 .put = skl_tplg_mic_control_set, 1855 }, 1856 }; 1857 1858 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1859 struct skl_pipe *pipe, u32 tkn, 1860 u32 tkn_val, int conf_idx, int dir) 1861 { 1862 struct skl_pipe_fmt *fmt; 1863 struct skl_path_config *config; 1864 1865 switch (dir) { 1866 case SKL_DIR_IN: 1867 fmt = &pipe->configs[conf_idx].in_fmt; 1868 break; 1869 1870 case SKL_DIR_OUT: 1871 fmt = &pipe->configs[conf_idx].out_fmt; 1872 break; 1873 1874 default: 1875 dev_err(dev, "Invalid direction: %d\n", dir); 1876 return -EINVAL; 1877 } 1878 1879 config = &pipe->configs[conf_idx]; 1880 1881 switch (tkn) { 1882 case SKL_TKN_U32_CFG_FREQ: 1883 fmt->freq = tkn_val; 1884 break; 1885 1886 case SKL_TKN_U8_CFG_CHAN: 1887 fmt->channels = tkn_val; 1888 break; 1889 1890 case SKL_TKN_U8_CFG_BPS: 1891 fmt->bps = tkn_val; 1892 break; 1893 1894 case SKL_TKN_U32_PATH_MEM_PGS: 1895 config->mem_pages = tkn_val; 1896 break; 1897 1898 default: 1899 dev_err(dev, "Invalid token config: %d\n", tkn); 1900 return -EINVAL; 1901 } 1902 1903 return 0; 1904 } 1905 1906 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1907 struct skl_pipe *pipe, u32 tkn, 1908 u32 tkn_val) 1909 { 1910 1911 switch (tkn) { 1912 case SKL_TKN_U32_PIPE_CONN_TYPE: 1913 pipe->conn_type = tkn_val; 1914 break; 1915 1916 case SKL_TKN_U32_PIPE_PRIORITY: 1917 pipe->pipe_priority = tkn_val; 1918 break; 1919 1920 case SKL_TKN_U32_PIPE_MEM_PGS: 1921 pipe->memory_pages = tkn_val; 1922 break; 1923 1924 case SKL_TKN_U32_PMODE: 1925 pipe->lp_mode = tkn_val; 1926 break; 1927 1928 case SKL_TKN_U32_PIPE_DIRECTION: 1929 pipe->direction = tkn_val; 1930 break; 1931 1932 case SKL_TKN_U32_NUM_CONFIGS: 1933 pipe->nr_cfgs = tkn_val; 1934 break; 1935 1936 default: 1937 dev_err(dev, "Token not handled %d\n", tkn); 1938 return -EINVAL; 1939 } 1940 1941 return 0; 1942 } 1943 1944 /* 1945 * Add pipeline by parsing the relevant tokens 1946 * Return an existing pipe if the pipe already exists. 1947 */ 1948 static int skl_tplg_add_pipe(struct device *dev, 1949 struct skl_module_cfg *mconfig, struct skl_dev *skl, 1950 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 1951 { 1952 struct skl_pipeline *ppl; 1953 struct skl_pipe *pipe; 1954 struct skl_pipe_params *params; 1955 1956 list_for_each_entry(ppl, &skl->ppl_list, node) { 1957 if (ppl->pipe->ppl_id == tkn_elem->value) { 1958 mconfig->pipe = ppl->pipe; 1959 return -EEXIST; 1960 } 1961 } 1962 1963 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 1964 if (!ppl) 1965 return -ENOMEM; 1966 1967 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 1968 if (!pipe) 1969 return -ENOMEM; 1970 1971 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 1972 if (!params) 1973 return -ENOMEM; 1974 1975 pipe->p_params = params; 1976 pipe->ppl_id = tkn_elem->value; 1977 INIT_LIST_HEAD(&pipe->w_list); 1978 1979 ppl->pipe = pipe; 1980 list_add(&ppl->node, &skl->ppl_list); 1981 1982 mconfig->pipe = pipe; 1983 mconfig->pipe->state = SKL_PIPE_INVALID; 1984 1985 return 0; 1986 } 1987 1988 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid, 1989 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 1990 { 1991 if (uuid_tkn->token == SKL_TKN_UUID) { 1992 guid_copy(guid, (guid_t *)&uuid_tkn->uuid); 1993 return 0; 1994 } 1995 1996 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 1997 1998 return -EINVAL; 1999 } 2000 2001 static int skl_tplg_fill_pin(struct device *dev, 2002 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2003 struct skl_module_pin *m_pin, 2004 int pin_index) 2005 { 2006 int ret; 2007 2008 switch (tkn_elem->token) { 2009 case SKL_TKN_U32_PIN_MOD_ID: 2010 m_pin[pin_index].id.module_id = tkn_elem->value; 2011 break; 2012 2013 case SKL_TKN_U32_PIN_INST_ID: 2014 m_pin[pin_index].id.instance_id = tkn_elem->value; 2015 break; 2016 2017 case SKL_TKN_UUID: 2018 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid, 2019 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2020 if (ret < 0) 2021 return ret; 2022 2023 break; 2024 2025 default: 2026 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2027 return -EINVAL; 2028 } 2029 2030 return 0; 2031 } 2032 2033 /* 2034 * Parse for pin config specific tokens to fill up the 2035 * module private data 2036 */ 2037 static int skl_tplg_fill_pins_info(struct device *dev, 2038 struct skl_module_cfg *mconfig, 2039 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2040 int dir, int pin_count) 2041 { 2042 int ret; 2043 struct skl_module_pin *m_pin; 2044 2045 switch (dir) { 2046 case SKL_DIR_IN: 2047 m_pin = mconfig->m_in_pin; 2048 break; 2049 2050 case SKL_DIR_OUT: 2051 m_pin = mconfig->m_out_pin; 2052 break; 2053 2054 default: 2055 dev_err(dev, "Invalid direction value\n"); 2056 return -EINVAL; 2057 } 2058 2059 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2060 if (ret < 0) 2061 return ret; 2062 2063 m_pin[pin_count].in_use = false; 2064 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2065 2066 return 0; 2067 } 2068 2069 /* 2070 * Fill up input/output module config format based 2071 * on the direction 2072 */ 2073 static int skl_tplg_fill_fmt(struct device *dev, 2074 struct skl_module_fmt *dst_fmt, 2075 u32 tkn, u32 value) 2076 { 2077 switch (tkn) { 2078 case SKL_TKN_U32_FMT_CH: 2079 dst_fmt->channels = value; 2080 break; 2081 2082 case SKL_TKN_U32_FMT_FREQ: 2083 dst_fmt->s_freq = value; 2084 break; 2085 2086 case SKL_TKN_U32_FMT_BIT_DEPTH: 2087 dst_fmt->bit_depth = value; 2088 break; 2089 2090 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2091 dst_fmt->valid_bit_depth = value; 2092 break; 2093 2094 case SKL_TKN_U32_FMT_CH_CONFIG: 2095 dst_fmt->ch_cfg = value; 2096 break; 2097 2098 case SKL_TKN_U32_FMT_INTERLEAVE: 2099 dst_fmt->interleaving_style = value; 2100 break; 2101 2102 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2103 dst_fmt->sample_type = value; 2104 break; 2105 2106 case SKL_TKN_U32_FMT_CH_MAP: 2107 dst_fmt->ch_map = value; 2108 break; 2109 2110 default: 2111 dev_err(dev, "Invalid token %d\n", tkn); 2112 return -EINVAL; 2113 } 2114 2115 return 0; 2116 } 2117 2118 static int skl_tplg_widget_fill_fmt(struct device *dev, 2119 struct skl_module_iface *fmt, 2120 u32 tkn, u32 val, u32 dir, int fmt_idx) 2121 { 2122 struct skl_module_fmt *dst_fmt; 2123 2124 if (!fmt) 2125 return -EINVAL; 2126 2127 switch (dir) { 2128 case SKL_DIR_IN: 2129 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2130 break; 2131 2132 case SKL_DIR_OUT: 2133 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2134 break; 2135 2136 default: 2137 dev_err(dev, "Invalid direction: %d\n", dir); 2138 return -EINVAL; 2139 } 2140 2141 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2142 } 2143 2144 static void skl_tplg_fill_pin_dynamic_val( 2145 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2146 { 2147 int i; 2148 2149 for (i = 0; i < pin_count; i++) 2150 mpin[i].is_dynamic = value; 2151 } 2152 2153 /* 2154 * Resource table in the manifest has pin specific resources 2155 * like pin and pin buffer size 2156 */ 2157 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2158 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2159 struct skl_module_res *res, int pin_idx, int dir) 2160 { 2161 struct skl_module_pin_resources *m_pin; 2162 2163 switch (dir) { 2164 case SKL_DIR_IN: 2165 m_pin = &res->input[pin_idx]; 2166 break; 2167 2168 case SKL_DIR_OUT: 2169 m_pin = &res->output[pin_idx]; 2170 break; 2171 2172 default: 2173 dev_err(dev, "Invalid pin direction: %d\n", dir); 2174 return -EINVAL; 2175 } 2176 2177 switch (tkn_elem->token) { 2178 case SKL_TKN_MM_U32_RES_PIN_ID: 2179 m_pin->pin_index = tkn_elem->value; 2180 break; 2181 2182 case SKL_TKN_MM_U32_PIN_BUF: 2183 m_pin->buf_size = tkn_elem->value; 2184 break; 2185 2186 default: 2187 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2188 return -EINVAL; 2189 } 2190 2191 return 0; 2192 } 2193 2194 /* 2195 * Fill module specific resources from the manifest's resource 2196 * table like CPS, DMA size, mem_pages. 2197 */ 2198 static int skl_tplg_fill_res_tkn(struct device *dev, 2199 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2200 struct skl_module_res *res, 2201 int pin_idx, int dir) 2202 { 2203 int ret, tkn_count = 0; 2204 2205 if (!res) 2206 return -EINVAL; 2207 2208 switch (tkn_elem->token) { 2209 case SKL_TKN_MM_U32_DMA_SIZE: 2210 res->dma_buffer_size = tkn_elem->value; 2211 break; 2212 2213 case SKL_TKN_MM_U32_CPC: 2214 res->cpc = tkn_elem->value; 2215 break; 2216 2217 case SKL_TKN_U32_MEM_PAGES: 2218 res->is_pages = tkn_elem->value; 2219 break; 2220 2221 case SKL_TKN_U32_OBS: 2222 res->obs = tkn_elem->value; 2223 break; 2224 2225 case SKL_TKN_U32_IBS: 2226 res->ibs = tkn_elem->value; 2227 break; 2228 2229 case SKL_TKN_MM_U32_RES_PIN_ID: 2230 case SKL_TKN_MM_U32_PIN_BUF: 2231 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2232 pin_idx, dir); 2233 if (ret < 0) 2234 return ret; 2235 break; 2236 2237 case SKL_TKN_MM_U32_CPS: 2238 case SKL_TKN_U32_MAX_MCPS: 2239 /* ignore unused tokens */ 2240 break; 2241 2242 default: 2243 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2244 return -EINVAL; 2245 2246 } 2247 tkn_count++; 2248 2249 return tkn_count; 2250 } 2251 2252 /* 2253 * Parse tokens to fill up the module private data 2254 */ 2255 static int skl_tplg_get_token(struct device *dev, 2256 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2257 struct skl_dev *skl, struct skl_module_cfg *mconfig) 2258 { 2259 int tkn_count = 0; 2260 int ret; 2261 static int is_pipe_exists; 2262 static int pin_index, dir, conf_idx; 2263 struct skl_module_iface *iface = NULL; 2264 struct skl_module_res *res = NULL; 2265 int res_idx = mconfig->res_idx; 2266 int fmt_idx = mconfig->fmt_idx; 2267 2268 /* 2269 * If the manifest structure contains no modules, fill all 2270 * the module data to 0th index. 2271 * res_idx and fmt_idx are default set to 0. 2272 */ 2273 if (skl->nr_modules == 0) { 2274 res = &mconfig->module->resources[res_idx]; 2275 iface = &mconfig->module->formats[fmt_idx]; 2276 } 2277 2278 if (tkn_elem->token > SKL_TKN_MAX) 2279 return -EINVAL; 2280 2281 switch (tkn_elem->token) { 2282 case SKL_TKN_U8_IN_QUEUE_COUNT: 2283 mconfig->module->max_input_pins = tkn_elem->value; 2284 break; 2285 2286 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2287 mconfig->module->max_output_pins = tkn_elem->value; 2288 break; 2289 2290 case SKL_TKN_U8_DYN_IN_PIN: 2291 if (!mconfig->m_in_pin) 2292 mconfig->m_in_pin = 2293 devm_kcalloc(dev, MAX_IN_QUEUE, 2294 sizeof(*mconfig->m_in_pin), 2295 GFP_KERNEL); 2296 if (!mconfig->m_in_pin) 2297 return -ENOMEM; 2298 2299 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2300 tkn_elem->value); 2301 break; 2302 2303 case SKL_TKN_U8_DYN_OUT_PIN: 2304 if (!mconfig->m_out_pin) 2305 mconfig->m_out_pin = 2306 devm_kcalloc(dev, MAX_IN_QUEUE, 2307 sizeof(*mconfig->m_in_pin), 2308 GFP_KERNEL); 2309 if (!mconfig->m_out_pin) 2310 return -ENOMEM; 2311 2312 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2313 tkn_elem->value); 2314 break; 2315 2316 case SKL_TKN_U8_TIME_SLOT: 2317 mconfig->time_slot = tkn_elem->value; 2318 break; 2319 2320 case SKL_TKN_U8_CORE_ID: 2321 mconfig->core_id = tkn_elem->value; 2322 break; 2323 2324 case SKL_TKN_U8_MOD_TYPE: 2325 mconfig->m_type = tkn_elem->value; 2326 break; 2327 2328 case SKL_TKN_U8_DEV_TYPE: 2329 mconfig->dev_type = tkn_elem->value; 2330 break; 2331 2332 case SKL_TKN_U8_HW_CONN_TYPE: 2333 mconfig->hw_conn_type = tkn_elem->value; 2334 break; 2335 2336 case SKL_TKN_U16_MOD_INST_ID: 2337 mconfig->id.instance_id = 2338 tkn_elem->value; 2339 break; 2340 2341 case SKL_TKN_U32_MEM_PAGES: 2342 case SKL_TKN_U32_MAX_MCPS: 2343 case SKL_TKN_U32_OBS: 2344 case SKL_TKN_U32_IBS: 2345 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2346 if (ret < 0) 2347 return ret; 2348 2349 break; 2350 2351 case SKL_TKN_U32_VBUS_ID: 2352 mconfig->vbus_id = tkn_elem->value; 2353 break; 2354 2355 case SKL_TKN_U32_PARAMS_FIXUP: 2356 mconfig->params_fixup = tkn_elem->value; 2357 break; 2358 2359 case SKL_TKN_U32_CONVERTER: 2360 mconfig->converter = tkn_elem->value; 2361 break; 2362 2363 case SKL_TKN_U32_D0I3_CAPS: 2364 mconfig->d0i3_caps = tkn_elem->value; 2365 break; 2366 2367 case SKL_TKN_U32_PIPE_ID: 2368 ret = skl_tplg_add_pipe(dev, 2369 mconfig, skl, tkn_elem); 2370 2371 if (ret < 0) { 2372 if (ret == -EEXIST) { 2373 is_pipe_exists = 1; 2374 break; 2375 } 2376 return is_pipe_exists; 2377 } 2378 2379 break; 2380 2381 case SKL_TKN_U32_PIPE_CONFIG_ID: 2382 conf_idx = tkn_elem->value; 2383 break; 2384 2385 case SKL_TKN_U32_PIPE_CONN_TYPE: 2386 case SKL_TKN_U32_PIPE_PRIORITY: 2387 case SKL_TKN_U32_PIPE_MEM_PGS: 2388 case SKL_TKN_U32_PMODE: 2389 case SKL_TKN_U32_PIPE_DIRECTION: 2390 case SKL_TKN_U32_NUM_CONFIGS: 2391 if (is_pipe_exists) { 2392 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2393 tkn_elem->token, tkn_elem->value); 2394 if (ret < 0) 2395 return ret; 2396 } 2397 2398 break; 2399 2400 case SKL_TKN_U32_PATH_MEM_PGS: 2401 case SKL_TKN_U32_CFG_FREQ: 2402 case SKL_TKN_U8_CFG_CHAN: 2403 case SKL_TKN_U8_CFG_BPS: 2404 if (mconfig->pipe->nr_cfgs) { 2405 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2406 tkn_elem->token, tkn_elem->value, 2407 conf_idx, dir); 2408 if (ret < 0) 2409 return ret; 2410 } 2411 break; 2412 2413 case SKL_TKN_CFG_MOD_RES_ID: 2414 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2415 break; 2416 2417 case SKL_TKN_CFG_MOD_FMT_ID: 2418 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2419 break; 2420 2421 /* 2422 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2423 * direction and the pin count. The first four bits represent 2424 * direction and next four the pin count. 2425 */ 2426 case SKL_TKN_U32_DIR_PIN_COUNT: 2427 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2428 pin_index = (tkn_elem->value & 2429 SKL_PIN_COUNT_MASK) >> 4; 2430 2431 break; 2432 2433 case SKL_TKN_U32_FMT_CH: 2434 case SKL_TKN_U32_FMT_FREQ: 2435 case SKL_TKN_U32_FMT_BIT_DEPTH: 2436 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2437 case SKL_TKN_U32_FMT_CH_CONFIG: 2438 case SKL_TKN_U32_FMT_INTERLEAVE: 2439 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2440 case SKL_TKN_U32_FMT_CH_MAP: 2441 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2442 tkn_elem->value, dir, pin_index); 2443 2444 if (ret < 0) 2445 return ret; 2446 2447 break; 2448 2449 case SKL_TKN_U32_PIN_MOD_ID: 2450 case SKL_TKN_U32_PIN_INST_ID: 2451 case SKL_TKN_UUID: 2452 ret = skl_tplg_fill_pins_info(dev, 2453 mconfig, tkn_elem, dir, 2454 pin_index); 2455 if (ret < 0) 2456 return ret; 2457 2458 break; 2459 2460 case SKL_TKN_U32_CAPS_SIZE: 2461 mconfig->formats_config.caps_size = 2462 tkn_elem->value; 2463 2464 break; 2465 2466 case SKL_TKN_U32_CAPS_SET_PARAMS: 2467 mconfig->formats_config.set_params = 2468 tkn_elem->value; 2469 break; 2470 2471 case SKL_TKN_U32_CAPS_PARAMS_ID: 2472 mconfig->formats_config.param_id = 2473 tkn_elem->value; 2474 break; 2475 2476 case SKL_TKN_U32_PROC_DOMAIN: 2477 mconfig->domain = 2478 tkn_elem->value; 2479 2480 break; 2481 2482 case SKL_TKN_U32_DMA_BUF_SIZE: 2483 mconfig->dma_buffer_size = tkn_elem->value; 2484 break; 2485 2486 case SKL_TKN_U8_IN_PIN_TYPE: 2487 case SKL_TKN_U8_OUT_PIN_TYPE: 2488 case SKL_TKN_U8_CONN_TYPE: 2489 break; 2490 2491 default: 2492 dev_err(dev, "Token %d not handled\n", 2493 tkn_elem->token); 2494 return -EINVAL; 2495 } 2496 2497 tkn_count++; 2498 2499 return tkn_count; 2500 } 2501 2502 /* 2503 * Parse the vendor array for specific tokens to construct 2504 * module private data 2505 */ 2506 static int skl_tplg_get_tokens(struct device *dev, 2507 char *pvt_data, struct skl_dev *skl, 2508 struct skl_module_cfg *mconfig, int block_size) 2509 { 2510 struct snd_soc_tplg_vendor_array *array; 2511 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2512 int tkn_count = 0, ret; 2513 int off = 0, tuple_size = 0; 2514 bool is_module_guid = true; 2515 2516 if (block_size <= 0) 2517 return -EINVAL; 2518 2519 while (tuple_size < block_size) { 2520 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2521 2522 off += array->size; 2523 2524 switch (array->type) { 2525 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2526 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2527 continue; 2528 2529 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2530 if (is_module_guid) { 2531 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid, 2532 array->uuid); 2533 is_module_guid = false; 2534 } else { 2535 ret = skl_tplg_get_token(dev, array->value, skl, 2536 mconfig); 2537 } 2538 2539 if (ret < 0) 2540 return ret; 2541 2542 tuple_size += sizeof(*array->uuid); 2543 2544 continue; 2545 2546 default: 2547 tkn_elem = array->value; 2548 tkn_count = 0; 2549 break; 2550 } 2551 2552 while (tkn_count <= (array->num_elems - 1)) { 2553 ret = skl_tplg_get_token(dev, tkn_elem, 2554 skl, mconfig); 2555 2556 if (ret < 0) 2557 return ret; 2558 2559 tkn_count = tkn_count + ret; 2560 tkn_elem++; 2561 } 2562 2563 tuple_size += tkn_count * sizeof(*tkn_elem); 2564 } 2565 2566 return off; 2567 } 2568 2569 /* 2570 * Every data block is preceded by a descriptor to read the number 2571 * of data blocks, they type of the block and it's size 2572 */ 2573 static int skl_tplg_get_desc_blocks(struct device *dev, 2574 struct snd_soc_tplg_vendor_array *array) 2575 { 2576 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2577 2578 tkn_elem = array->value; 2579 2580 switch (tkn_elem->token) { 2581 case SKL_TKN_U8_NUM_BLOCKS: 2582 case SKL_TKN_U8_BLOCK_TYPE: 2583 case SKL_TKN_U16_BLOCK_SIZE: 2584 return tkn_elem->value; 2585 2586 default: 2587 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2588 break; 2589 } 2590 2591 return -EINVAL; 2592 } 2593 2594 /* Functions to parse private data from configuration file format v4 */ 2595 2596 /* 2597 * Add pipeline from topology binary into driver pipeline list 2598 * 2599 * If already added we return that instance 2600 * Otherwise we create a new instance and add into driver list 2601 */ 2602 static int skl_tplg_add_pipe_v4(struct device *dev, 2603 struct skl_module_cfg *mconfig, struct skl_dev *skl, 2604 struct skl_dfw_v4_pipe *dfw_pipe) 2605 { 2606 struct skl_pipeline *ppl; 2607 struct skl_pipe *pipe; 2608 struct skl_pipe_params *params; 2609 2610 list_for_each_entry(ppl, &skl->ppl_list, node) { 2611 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2612 mconfig->pipe = ppl->pipe; 2613 return 0; 2614 } 2615 } 2616 2617 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2618 if (!ppl) 2619 return -ENOMEM; 2620 2621 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2622 if (!pipe) 2623 return -ENOMEM; 2624 2625 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2626 if (!params) 2627 return -ENOMEM; 2628 2629 pipe->ppl_id = dfw_pipe->pipe_id; 2630 pipe->memory_pages = dfw_pipe->memory_pages; 2631 pipe->pipe_priority = dfw_pipe->pipe_priority; 2632 pipe->conn_type = dfw_pipe->conn_type; 2633 pipe->state = SKL_PIPE_INVALID; 2634 pipe->p_params = params; 2635 INIT_LIST_HEAD(&pipe->w_list); 2636 2637 ppl->pipe = pipe; 2638 list_add(&ppl->node, &skl->ppl_list); 2639 2640 mconfig->pipe = pipe; 2641 2642 return 0; 2643 } 2644 2645 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2646 struct skl_module_pin *m_pin, 2647 bool is_dynamic, int max_pin) 2648 { 2649 int i; 2650 2651 for (i = 0; i < max_pin; i++) { 2652 m_pin[i].id.module_id = dfw_pin[i].module_id; 2653 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2654 m_pin[i].in_use = false; 2655 m_pin[i].is_dynamic = is_dynamic; 2656 m_pin[i].pin_state = SKL_PIN_UNBIND; 2657 } 2658 } 2659 2660 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2661 struct skl_dfw_v4_module_fmt *src_fmt, 2662 int pins) 2663 { 2664 int i; 2665 2666 for (i = 0; i < pins; i++) { 2667 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2668 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2669 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2670 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2671 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2672 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2673 dst_fmt[i].fmt.interleaving_style = 2674 src_fmt[i].interleaving_style; 2675 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2676 } 2677 } 2678 2679 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2680 struct skl_dev *skl, struct device *dev, 2681 struct skl_module_cfg *mconfig) 2682 { 2683 struct skl_dfw_v4_module *dfw = 2684 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2685 int ret; 2686 2687 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2688 2689 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2690 if (ret) 2691 return ret; 2692 mconfig->id.module_id = -1; 2693 mconfig->id.instance_id = dfw->instance_id; 2694 mconfig->module->resources[0].cpc = dfw->max_mcps / 1000; 2695 mconfig->module->resources[0].ibs = dfw->ibs; 2696 mconfig->module->resources[0].obs = dfw->obs; 2697 mconfig->core_id = dfw->core_id; 2698 mconfig->module->max_input_pins = dfw->max_in_queue; 2699 mconfig->module->max_output_pins = dfw->max_out_queue; 2700 mconfig->module->loadable = dfw->is_loadable; 2701 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2702 MAX_IN_QUEUE); 2703 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2704 MAX_OUT_QUEUE); 2705 2706 mconfig->params_fixup = dfw->params_fixup; 2707 mconfig->converter = dfw->converter; 2708 mconfig->m_type = dfw->module_type; 2709 mconfig->vbus_id = dfw->vbus_id; 2710 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2711 2712 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2713 if (ret) 2714 return ret; 2715 2716 mconfig->dev_type = dfw->dev_type; 2717 mconfig->hw_conn_type = dfw->hw_conn_type; 2718 mconfig->time_slot = dfw->time_slot; 2719 mconfig->formats_config.caps_size = dfw->caps.caps_size; 2720 2721 mconfig->m_in_pin = devm_kcalloc(dev, 2722 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2723 GFP_KERNEL); 2724 if (!mconfig->m_in_pin) 2725 return -ENOMEM; 2726 2727 mconfig->m_out_pin = devm_kcalloc(dev, 2728 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2729 GFP_KERNEL); 2730 if (!mconfig->m_out_pin) 2731 return -ENOMEM; 2732 2733 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2734 dfw->is_dynamic_in_pin, 2735 mconfig->module->max_input_pins); 2736 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2737 dfw->is_dynamic_out_pin, 2738 mconfig->module->max_output_pins); 2739 2740 if (mconfig->formats_config.caps_size) { 2741 mconfig->formats_config.set_params = dfw->caps.set_params; 2742 mconfig->formats_config.param_id = dfw->caps.param_id; 2743 mconfig->formats_config.caps = 2744 devm_kzalloc(dev, mconfig->formats_config.caps_size, 2745 GFP_KERNEL); 2746 if (!mconfig->formats_config.caps) 2747 return -ENOMEM; 2748 memcpy(mconfig->formats_config.caps, dfw->caps.caps, 2749 dfw->caps.caps_size); 2750 } 2751 2752 return 0; 2753 } 2754 2755 /* 2756 * Parse the private data for the token and corresponding value. 2757 * The private data can have multiple data blocks. So, a data block 2758 * is preceded by a descriptor for number of blocks and a descriptor 2759 * for the type and size of the suceeding data block. 2760 */ 2761 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2762 struct skl_dev *skl, struct device *dev, 2763 struct skl_module_cfg *mconfig) 2764 { 2765 struct snd_soc_tplg_vendor_array *array; 2766 int num_blocks, block_size = 0, block_type, off = 0; 2767 char *data; 2768 int ret; 2769 2770 /* 2771 * v4 configuration files have a valid UUID at the start of 2772 * the widget's private data. 2773 */ 2774 if (uuid_is_valid((char *)tplg_w->priv.data)) 2775 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2776 2777 /* Read the NUM_DATA_BLOCKS descriptor */ 2778 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2779 ret = skl_tplg_get_desc_blocks(dev, array); 2780 if (ret < 0) 2781 return ret; 2782 num_blocks = ret; 2783 2784 off += array->size; 2785 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2786 while (num_blocks > 0) { 2787 array = (struct snd_soc_tplg_vendor_array *) 2788 (tplg_w->priv.data + off); 2789 2790 ret = skl_tplg_get_desc_blocks(dev, array); 2791 2792 if (ret < 0) 2793 return ret; 2794 block_type = ret; 2795 off += array->size; 2796 2797 array = (struct snd_soc_tplg_vendor_array *) 2798 (tplg_w->priv.data + off); 2799 2800 ret = skl_tplg_get_desc_blocks(dev, array); 2801 2802 if (ret < 0) 2803 return ret; 2804 block_size = ret; 2805 off += array->size; 2806 2807 array = (struct snd_soc_tplg_vendor_array *) 2808 (tplg_w->priv.data + off); 2809 2810 data = (tplg_w->priv.data + off); 2811 2812 if (block_type == SKL_TYPE_TUPLE) { 2813 ret = skl_tplg_get_tokens(dev, data, 2814 skl, mconfig, block_size); 2815 2816 if (ret < 0) 2817 return ret; 2818 2819 --num_blocks; 2820 } else { 2821 if (mconfig->formats_config.caps_size > 0) 2822 memcpy(mconfig->formats_config.caps, data, 2823 mconfig->formats_config.caps_size); 2824 --num_blocks; 2825 ret = mconfig->formats_config.caps_size; 2826 } 2827 off += ret; 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void skl_clear_pin_config(struct snd_soc_component *component, 2834 struct snd_soc_dapm_widget *w) 2835 { 2836 int i; 2837 struct skl_module_cfg *mconfig; 2838 struct skl_pipe *pipe; 2839 2840 if (!strncmp(w->dapm->component->name, component->name, 2841 strlen(component->name))) { 2842 mconfig = w->priv; 2843 pipe = mconfig->pipe; 2844 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2845 mconfig->m_in_pin[i].in_use = false; 2846 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2847 } 2848 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2849 mconfig->m_out_pin[i].in_use = false; 2850 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2851 } 2852 pipe->state = SKL_PIPE_INVALID; 2853 mconfig->m_state = SKL_MODULE_UNINIT; 2854 } 2855 } 2856 2857 void skl_cleanup_resources(struct skl_dev *skl) 2858 { 2859 struct snd_soc_component *soc_component = skl->component; 2860 struct snd_soc_dapm_widget *w; 2861 struct snd_soc_card *card; 2862 2863 if (soc_component == NULL) 2864 return; 2865 2866 card = soc_component->card; 2867 if (!card || !card->instantiated) 2868 return; 2869 2870 list_for_each_entry(w, &card->widgets, list) { 2871 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL) 2872 skl_clear_pin_config(soc_component, w); 2873 } 2874 2875 skl_clear_module_cnt(skl->dsp); 2876 } 2877 2878 /* 2879 * Topology core widget load callback 2880 * 2881 * This is used to save the private data for each widget which gives 2882 * information to the driver about module and pipeline parameters which DSP 2883 * FW expects like ids, resource values, formats etc 2884 */ 2885 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 2886 struct snd_soc_dapm_widget *w, 2887 struct snd_soc_tplg_dapm_widget *tplg_w) 2888 { 2889 int ret; 2890 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2891 struct skl_dev *skl = bus_to_skl(bus); 2892 struct skl_module_cfg *mconfig; 2893 2894 if (!tplg_w->priv.size) 2895 goto bind_event; 2896 2897 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2898 2899 if (!mconfig) 2900 return -ENOMEM; 2901 2902 if (skl->nr_modules == 0) { 2903 mconfig->module = devm_kzalloc(bus->dev, 2904 sizeof(*mconfig->module), GFP_KERNEL); 2905 if (!mconfig->module) 2906 return -ENOMEM; 2907 } 2908 2909 w->priv = mconfig; 2910 2911 /* 2912 * module binary can be loaded later, so set it to query when 2913 * module is load for a use case 2914 */ 2915 mconfig->id.module_id = -1; 2916 2917 /* Parse private data for tuples */ 2918 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2919 if (ret < 0) 2920 return ret; 2921 2922 skl_debug_init_module(skl->debugfs, w, mconfig); 2923 2924 bind_event: 2925 if (tplg_w->event_type == 0) { 2926 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2927 return 0; 2928 } 2929 2930 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2931 ARRAY_SIZE(skl_tplg_widget_ops), 2932 tplg_w->event_type); 2933 2934 if (ret) { 2935 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2936 __func__, tplg_w->event_type); 2937 return -EINVAL; 2938 } 2939 2940 return 0; 2941 } 2942 2943 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2944 struct snd_soc_tplg_bytes_control *bc) 2945 { 2946 struct skl_algo_data *ac; 2947 struct skl_dfw_algo_data *dfw_ac = 2948 (struct skl_dfw_algo_data *)bc->priv.data; 2949 2950 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2951 if (!ac) 2952 return -ENOMEM; 2953 2954 /* Fill private data */ 2955 ac->max = dfw_ac->max; 2956 ac->param_id = dfw_ac->param_id; 2957 ac->set_params = dfw_ac->set_params; 2958 ac->size = dfw_ac->max; 2959 2960 if (ac->max) { 2961 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL); 2962 if (!ac->params) 2963 return -ENOMEM; 2964 2965 memcpy(ac->params, dfw_ac->params, ac->max); 2966 } 2967 2968 be->dobj.private = ac; 2969 return 0; 2970 } 2971 2972 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 2973 struct snd_soc_tplg_enum_control *ec) 2974 { 2975 2976 void *data; 2977 2978 if (ec->priv.size) { 2979 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 2980 if (!data) 2981 return -ENOMEM; 2982 memcpy(data, ec->priv.data, ec->priv.size); 2983 se->dobj.private = data; 2984 } 2985 2986 return 0; 2987 2988 } 2989 2990 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2991 int index, 2992 struct snd_kcontrol_new *kctl, 2993 struct snd_soc_tplg_ctl_hdr *hdr) 2994 { 2995 struct soc_bytes_ext *sb; 2996 struct snd_soc_tplg_bytes_control *tplg_bc; 2997 struct snd_soc_tplg_enum_control *tplg_ec; 2998 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2999 struct soc_enum *se; 3000 3001 switch (hdr->ops.info) { 3002 case SND_SOC_TPLG_CTL_BYTES: 3003 tplg_bc = container_of(hdr, 3004 struct snd_soc_tplg_bytes_control, hdr); 3005 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3006 sb = (struct soc_bytes_ext *)kctl->private_value; 3007 if (tplg_bc->priv.size) 3008 return skl_init_algo_data( 3009 bus->dev, sb, tplg_bc); 3010 } 3011 break; 3012 3013 case SND_SOC_TPLG_CTL_ENUM: 3014 tplg_ec = container_of(hdr, 3015 struct snd_soc_tplg_enum_control, hdr); 3016 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { 3017 se = (struct soc_enum *)kctl->private_value; 3018 if (tplg_ec->priv.size) 3019 return skl_init_enum_data(bus->dev, se, 3020 tplg_ec); 3021 } 3022 break; 3023 3024 default: 3025 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3026 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3027 break; 3028 } 3029 3030 return 0; 3031 } 3032 3033 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3034 struct snd_soc_tplg_vendor_string_elem *str_elem, 3035 struct skl_dev *skl) 3036 { 3037 int tkn_count = 0; 3038 static int ref_count; 3039 3040 switch (str_elem->token) { 3041 case SKL_TKN_STR_LIB_NAME: 3042 if (ref_count > skl->lib_count - 1) { 3043 ref_count = 0; 3044 return -EINVAL; 3045 } 3046 3047 strncpy(skl->lib_info[ref_count].name, 3048 str_elem->string, 3049 ARRAY_SIZE(skl->lib_info[ref_count].name)); 3050 ref_count++; 3051 break; 3052 3053 default: 3054 dev_err(dev, "Not a string token %d\n", str_elem->token); 3055 break; 3056 } 3057 tkn_count++; 3058 3059 return tkn_count; 3060 } 3061 3062 static int skl_tplg_get_str_tkn(struct device *dev, 3063 struct snd_soc_tplg_vendor_array *array, 3064 struct skl_dev *skl) 3065 { 3066 int tkn_count = 0, ret; 3067 struct snd_soc_tplg_vendor_string_elem *str_elem; 3068 3069 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3070 while (tkn_count < array->num_elems) { 3071 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3072 str_elem++; 3073 3074 if (ret < 0) 3075 return ret; 3076 3077 tkn_count = tkn_count + ret; 3078 } 3079 3080 return tkn_count; 3081 } 3082 3083 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3084 struct skl_module_iface *fmt, 3085 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3086 u32 dir, int fmt_idx) 3087 { 3088 struct skl_module_pin_fmt *dst_fmt; 3089 struct skl_module_fmt *mod_fmt; 3090 int ret; 3091 3092 if (!fmt) 3093 return -EINVAL; 3094 3095 switch (dir) { 3096 case SKL_DIR_IN: 3097 dst_fmt = &fmt->inputs[fmt_idx]; 3098 break; 3099 3100 case SKL_DIR_OUT: 3101 dst_fmt = &fmt->outputs[fmt_idx]; 3102 break; 3103 3104 default: 3105 dev_err(dev, "Invalid direction: %d\n", dir); 3106 return -EINVAL; 3107 } 3108 3109 mod_fmt = &dst_fmt->fmt; 3110 3111 switch (tkn_elem->token) { 3112 case SKL_TKN_MM_U32_INTF_PIN_ID: 3113 dst_fmt->id = tkn_elem->value; 3114 break; 3115 3116 default: 3117 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3118 tkn_elem->value); 3119 if (ret < 0) 3120 return ret; 3121 break; 3122 } 3123 3124 return 0; 3125 } 3126 3127 static int skl_tplg_fill_mod_info(struct device *dev, 3128 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3129 struct skl_module *mod) 3130 { 3131 3132 if (!mod) 3133 return -EINVAL; 3134 3135 switch (tkn_elem->token) { 3136 case SKL_TKN_U8_IN_PIN_TYPE: 3137 mod->input_pin_type = tkn_elem->value; 3138 break; 3139 3140 case SKL_TKN_U8_OUT_PIN_TYPE: 3141 mod->output_pin_type = tkn_elem->value; 3142 break; 3143 3144 case SKL_TKN_U8_IN_QUEUE_COUNT: 3145 mod->max_input_pins = tkn_elem->value; 3146 break; 3147 3148 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3149 mod->max_output_pins = tkn_elem->value; 3150 break; 3151 3152 case SKL_TKN_MM_U8_NUM_RES: 3153 mod->nr_resources = tkn_elem->value; 3154 break; 3155 3156 case SKL_TKN_MM_U8_NUM_INTF: 3157 mod->nr_interfaces = tkn_elem->value; 3158 break; 3159 3160 default: 3161 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3162 return -EINVAL; 3163 } 3164 3165 return 0; 3166 } 3167 3168 3169 static int skl_tplg_get_int_tkn(struct device *dev, 3170 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3171 struct skl_dev *skl) 3172 { 3173 int tkn_count = 0, ret; 3174 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3175 struct skl_module_res *res = NULL; 3176 struct skl_module_iface *fmt = NULL; 3177 struct skl_module *mod = NULL; 3178 static struct skl_astate_param *astate_table; 3179 static int astate_cfg_idx, count; 3180 int i; 3181 size_t size; 3182 3183 if (skl->modules) { 3184 mod = skl->modules[mod_idx]; 3185 res = &mod->resources[res_val_idx]; 3186 fmt = &mod->formats[intf_val_idx]; 3187 } 3188 3189 switch (tkn_elem->token) { 3190 case SKL_TKN_U32_LIB_COUNT: 3191 skl->lib_count = tkn_elem->value; 3192 break; 3193 3194 case SKL_TKN_U8_NUM_MOD: 3195 skl->nr_modules = tkn_elem->value; 3196 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3197 sizeof(*skl->modules), GFP_KERNEL); 3198 if (!skl->modules) 3199 return -ENOMEM; 3200 3201 for (i = 0; i < skl->nr_modules; i++) { 3202 skl->modules[i] = devm_kzalloc(dev, 3203 sizeof(struct skl_module), GFP_KERNEL); 3204 if (!skl->modules[i]) 3205 return -ENOMEM; 3206 } 3207 break; 3208 3209 case SKL_TKN_MM_U8_MOD_IDX: 3210 mod_idx = tkn_elem->value; 3211 break; 3212 3213 case SKL_TKN_U32_ASTATE_COUNT: 3214 if (astate_table != NULL) { 3215 dev_err(dev, "More than one entry for A-State count"); 3216 return -EINVAL; 3217 } 3218 3219 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3220 dev_err(dev, "Invalid A-State count %d\n", 3221 tkn_elem->value); 3222 return -EINVAL; 3223 } 3224 3225 size = struct_size(skl->cfg.astate_cfg, astate_table, 3226 tkn_elem->value); 3227 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3228 if (!skl->cfg.astate_cfg) 3229 return -ENOMEM; 3230 3231 astate_table = skl->cfg.astate_cfg->astate_table; 3232 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3233 break; 3234 3235 case SKL_TKN_U32_ASTATE_IDX: 3236 if (tkn_elem->value >= count) { 3237 dev_err(dev, "Invalid A-State index %d\n", 3238 tkn_elem->value); 3239 return -EINVAL; 3240 } 3241 3242 astate_cfg_idx = tkn_elem->value; 3243 break; 3244 3245 case SKL_TKN_U32_ASTATE_KCPS: 3246 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3247 break; 3248 3249 case SKL_TKN_U32_ASTATE_CLK_SRC: 3250 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3251 break; 3252 3253 case SKL_TKN_U8_IN_PIN_TYPE: 3254 case SKL_TKN_U8_OUT_PIN_TYPE: 3255 case SKL_TKN_U8_IN_QUEUE_COUNT: 3256 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3257 case SKL_TKN_MM_U8_NUM_RES: 3258 case SKL_TKN_MM_U8_NUM_INTF: 3259 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3260 if (ret < 0) 3261 return ret; 3262 break; 3263 3264 case SKL_TKN_U32_DIR_PIN_COUNT: 3265 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3266 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3267 break; 3268 3269 case SKL_TKN_MM_U32_RES_ID: 3270 if (!res) 3271 return -EINVAL; 3272 3273 res->id = tkn_elem->value; 3274 res_val_idx = tkn_elem->value; 3275 break; 3276 3277 case SKL_TKN_MM_U32_FMT_ID: 3278 if (!fmt) 3279 return -EINVAL; 3280 3281 fmt->fmt_idx = tkn_elem->value; 3282 intf_val_idx = tkn_elem->value; 3283 break; 3284 3285 case SKL_TKN_MM_U32_CPS: 3286 case SKL_TKN_MM_U32_DMA_SIZE: 3287 case SKL_TKN_MM_U32_CPC: 3288 case SKL_TKN_U32_MEM_PAGES: 3289 case SKL_TKN_U32_OBS: 3290 case SKL_TKN_U32_IBS: 3291 case SKL_TKN_MM_U32_RES_PIN_ID: 3292 case SKL_TKN_MM_U32_PIN_BUF: 3293 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3294 if (ret < 0) 3295 return ret; 3296 3297 break; 3298 3299 case SKL_TKN_MM_U32_NUM_IN_FMT: 3300 if (!fmt) 3301 return -EINVAL; 3302 3303 res->nr_input_pins = tkn_elem->value; 3304 break; 3305 3306 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3307 if (!fmt) 3308 return -EINVAL; 3309 3310 res->nr_output_pins = tkn_elem->value; 3311 break; 3312 3313 case SKL_TKN_U32_FMT_CH: 3314 case SKL_TKN_U32_FMT_FREQ: 3315 case SKL_TKN_U32_FMT_BIT_DEPTH: 3316 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3317 case SKL_TKN_U32_FMT_CH_CONFIG: 3318 case SKL_TKN_U32_FMT_INTERLEAVE: 3319 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3320 case SKL_TKN_U32_FMT_CH_MAP: 3321 case SKL_TKN_MM_U32_INTF_PIN_ID: 3322 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3323 dir, pin_idx); 3324 if (ret < 0) 3325 return ret; 3326 break; 3327 3328 default: 3329 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3330 return -EINVAL; 3331 } 3332 tkn_count++; 3333 3334 return tkn_count; 3335 } 3336 3337 /* 3338 * Fill the manifest structure by parsing the tokens based on the 3339 * type. 3340 */ 3341 static int skl_tplg_get_manifest_tkn(struct device *dev, 3342 char *pvt_data, struct skl_dev *skl, 3343 int block_size) 3344 { 3345 int tkn_count = 0, ret; 3346 int off = 0, tuple_size = 0; 3347 u8 uuid_index = 0; 3348 struct snd_soc_tplg_vendor_array *array; 3349 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3350 3351 if (block_size <= 0) 3352 return -EINVAL; 3353 3354 while (tuple_size < block_size) { 3355 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3356 off += array->size; 3357 switch (array->type) { 3358 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3359 ret = skl_tplg_get_str_tkn(dev, array, skl); 3360 3361 if (ret < 0) 3362 return ret; 3363 tkn_count = ret; 3364 3365 tuple_size += tkn_count * 3366 sizeof(struct snd_soc_tplg_vendor_string_elem); 3367 continue; 3368 3369 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3370 if (array->uuid->token != SKL_TKN_UUID) { 3371 dev_err(dev, "Not an UUID token: %d\n", 3372 array->uuid->token); 3373 return -EINVAL; 3374 } 3375 if (uuid_index >= skl->nr_modules) { 3376 dev_err(dev, "Too many UUID tokens\n"); 3377 return -EINVAL; 3378 } 3379 guid_copy(&skl->modules[uuid_index++]->uuid, 3380 (guid_t *)&array->uuid->uuid); 3381 3382 tuple_size += sizeof(*array->uuid); 3383 continue; 3384 3385 default: 3386 tkn_elem = array->value; 3387 tkn_count = 0; 3388 break; 3389 } 3390 3391 while (tkn_count <= array->num_elems - 1) { 3392 ret = skl_tplg_get_int_tkn(dev, 3393 tkn_elem, skl); 3394 if (ret < 0) 3395 return ret; 3396 3397 tkn_count = tkn_count + ret; 3398 tkn_elem++; 3399 } 3400 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3401 tkn_count = 0; 3402 } 3403 3404 return off; 3405 } 3406 3407 /* 3408 * Parse manifest private data for tokens. The private data block is 3409 * preceded by descriptors for type and size of data block. 3410 */ 3411 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3412 struct device *dev, struct skl_dev *skl) 3413 { 3414 struct snd_soc_tplg_vendor_array *array; 3415 int num_blocks, block_size = 0, block_type, off = 0; 3416 char *data; 3417 int ret; 3418 3419 /* Read the NUM_DATA_BLOCKS descriptor */ 3420 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3421 ret = skl_tplg_get_desc_blocks(dev, array); 3422 if (ret < 0) 3423 return ret; 3424 num_blocks = ret; 3425 3426 off += array->size; 3427 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3428 while (num_blocks > 0) { 3429 array = (struct snd_soc_tplg_vendor_array *) 3430 (manifest->priv.data + off); 3431 ret = skl_tplg_get_desc_blocks(dev, array); 3432 3433 if (ret < 0) 3434 return ret; 3435 block_type = ret; 3436 off += array->size; 3437 3438 array = (struct snd_soc_tplg_vendor_array *) 3439 (manifest->priv.data + off); 3440 3441 ret = skl_tplg_get_desc_blocks(dev, array); 3442 3443 if (ret < 0) 3444 return ret; 3445 block_size = ret; 3446 off += array->size; 3447 3448 array = (struct snd_soc_tplg_vendor_array *) 3449 (manifest->priv.data + off); 3450 3451 data = (manifest->priv.data + off); 3452 3453 if (block_type == SKL_TYPE_TUPLE) { 3454 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3455 block_size); 3456 3457 if (ret < 0) 3458 return ret; 3459 3460 --num_blocks; 3461 } else { 3462 return -EINVAL; 3463 } 3464 off += ret; 3465 } 3466 3467 return 0; 3468 } 3469 3470 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3471 struct snd_soc_tplg_manifest *manifest) 3472 { 3473 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3474 struct skl_dev *skl = bus_to_skl(bus); 3475 3476 /* proceed only if we have private data defined */ 3477 if (manifest->priv.size == 0) 3478 return 0; 3479 3480 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3481 3482 if (skl->lib_count > SKL_MAX_LIB) { 3483 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3484 skl->lib_count); 3485 return -EINVAL; 3486 } 3487 3488 return 0; 3489 } 3490 3491 static struct snd_soc_tplg_ops skl_tplg_ops = { 3492 .widget_load = skl_tplg_widget_load, 3493 .control_load = skl_tplg_control_load, 3494 .bytes_ext_ops = skl_tlv_ops, 3495 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3496 .io_ops = skl_tplg_kcontrol_ops, 3497 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3498 .manifest = skl_manifest_load, 3499 .dai_load = skl_dai_load, 3500 }; 3501 3502 /* 3503 * A pipe can have multiple modules, each of them will be a DAPM widget as 3504 * well. While managing a pipeline we need to get the list of all the 3505 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3506 * helps to get the SKL type widgets in that pipeline 3507 */ 3508 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3509 { 3510 struct snd_soc_dapm_widget *w; 3511 struct skl_module_cfg *mcfg = NULL; 3512 struct skl_pipe_module *p_module = NULL; 3513 struct skl_pipe *pipe; 3514 3515 list_for_each_entry(w, &component->card->widgets, list) { 3516 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3517 mcfg = w->priv; 3518 pipe = mcfg->pipe; 3519 3520 p_module = devm_kzalloc(component->dev, 3521 sizeof(*p_module), GFP_KERNEL); 3522 if (!p_module) 3523 return -ENOMEM; 3524 3525 p_module->w = w; 3526 list_add_tail(&p_module->node, &pipe->w_list); 3527 } 3528 } 3529 3530 return 0; 3531 } 3532 3533 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe) 3534 { 3535 struct skl_pipe_module *w_module; 3536 struct snd_soc_dapm_widget *w; 3537 struct skl_module_cfg *mconfig; 3538 bool host_found = false, link_found = false; 3539 3540 list_for_each_entry(w_module, &pipe->w_list, node) { 3541 w = w_module->w; 3542 mconfig = w->priv; 3543 3544 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3545 host_found = true; 3546 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3547 link_found = true; 3548 } 3549 3550 if (host_found && link_found) 3551 pipe->passthru = true; 3552 else 3553 pipe->passthru = false; 3554 } 3555 3556 /* 3557 * SKL topology init routine 3558 */ 3559 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3560 { 3561 int ret; 3562 const struct firmware *fw; 3563 struct skl_dev *skl = bus_to_skl(bus); 3564 struct skl_pipeline *ppl; 3565 3566 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3567 if (ret < 0) { 3568 dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", 3569 skl->tplg_name, ret); 3570 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3571 if (ret < 0) { 3572 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3573 "dfw_sst.bin", ret); 3574 return ret; 3575 } 3576 } 3577 3578 /* 3579 * The complete tplg for SKL is loaded as index 0, we don't use 3580 * any other index 3581 */ 3582 ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw, 0); 3583 if (ret < 0) { 3584 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3585 goto err; 3586 } 3587 3588 ret = skl_tplg_create_pipe_widget_list(component); 3589 if (ret < 0) { 3590 dev_err(bus->dev, "tplg create pipe widget list failed%d\n", 3591 ret); 3592 goto err; 3593 } 3594 3595 list_for_each_entry(ppl, &skl->ppl_list, node) 3596 skl_tplg_set_pipe_type(skl, ppl->pipe); 3597 3598 err: 3599 release_firmware(fw); 3600 return ret; 3601 } 3602 3603 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus) 3604 { 3605 struct skl_dev *skl = bus_to_skl(bus); 3606 struct skl_pipeline *ppl, *tmp; 3607 3608 if (!list_empty(&skl->ppl_list)) 3609 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node) 3610 list_del(&ppl->node); 3611 3612 /* clean up topology */ 3613 snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL); 3614 } 3615