1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-topology.c - Implements Platform component ALSA controls/widget 4 * handlers. 5 * 6 * Copyright (C) 2014-2015 Intel Corp 7 * Author: Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/firmware.h> 14 #include <linux/uuid.h> 15 #include <sound/intel-nhlt.h> 16 #include <sound/soc.h> 17 #include <sound/soc-acpi.h> 18 #include <sound/soc-topology.h> 19 #include <uapi/sound/snd_sst_tokens.h> 20 #include <uapi/sound/skl-tplg-interface.h> 21 #include "skl-sst-dsp.h" 22 #include "skl-sst-ipc.h" 23 #include "skl-topology.h" 24 #include "skl.h" 25 #include "../common/sst-dsp.h" 26 #include "../common/sst-dsp-priv.h" 27 28 #define SKL_CH_FIXUP_MASK (1 << 0) 29 #define SKL_RATE_FIXUP_MASK (1 << 1) 30 #define SKL_FMT_FIXUP_MASK (1 << 2) 31 #define SKL_IN_DIR_BIT_MASK BIT(0) 32 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 33 34 static const int mic_mono_list[] = { 35 0, 1, 2, 3, 36 }; 37 static const int mic_stereo_list[][SKL_CH_STEREO] = { 38 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 39 }; 40 static const int mic_trio_list[][SKL_CH_TRIO] = { 41 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 42 }; 43 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 44 {0, 1, 2, 3}, 45 }; 46 47 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 48 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 49 50 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps) 51 { 52 struct skl_d0i3_data *d0i3 = &skl->d0i3; 53 54 switch (caps) { 55 case SKL_D0I3_NONE: 56 d0i3->non_d0i3++; 57 break; 58 59 case SKL_D0I3_STREAMING: 60 d0i3->streaming++; 61 break; 62 63 case SKL_D0I3_NON_STREAMING: 64 d0i3->non_streaming++; 65 break; 66 } 67 } 68 69 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps) 70 { 71 struct skl_d0i3_data *d0i3 = &skl->d0i3; 72 73 switch (caps) { 74 case SKL_D0I3_NONE: 75 d0i3->non_d0i3--; 76 break; 77 78 case SKL_D0I3_STREAMING: 79 d0i3->streaming--; 80 break; 81 82 case SKL_D0I3_NON_STREAMING: 83 d0i3->non_streaming--; 84 break; 85 } 86 } 87 88 /* 89 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 90 * ignore. This helpers checks if the SKL driver handles this widget type 91 */ 92 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 93 struct device *dev) 94 { 95 if (w->dapm->dev != dev) 96 return false; 97 98 switch (w->id) { 99 case snd_soc_dapm_dai_link: 100 case snd_soc_dapm_dai_in: 101 case snd_soc_dapm_aif_in: 102 case snd_soc_dapm_aif_out: 103 case snd_soc_dapm_dai_out: 104 case snd_soc_dapm_switch: 105 case snd_soc_dapm_output: 106 case snd_soc_dapm_mux: 107 108 return false; 109 default: 110 return true; 111 } 112 } 113 114 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg) 115 { 116 struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx]; 117 118 dev_dbg(skl->dev, "Dumping config\n"); 119 dev_dbg(skl->dev, "Input Format:\n"); 120 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 121 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 122 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 123 dev_dbg(skl->dev, "valid bit depth = %d\n", 124 iface->inputs[0].fmt.valid_bit_depth); 125 dev_dbg(skl->dev, "Output Format:\n"); 126 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 127 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 128 dev_dbg(skl->dev, "valid bit depth = %d\n", 129 iface->outputs[0].fmt.valid_bit_depth); 130 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 131 } 132 133 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 134 { 135 int slot_map = 0xFFFFFFFF; 136 int start_slot = 0; 137 int i; 138 139 for (i = 0; i < chs; i++) { 140 /* 141 * For 2 channels with starting slot as 0, slot map will 142 * look like 0xFFFFFF10. 143 */ 144 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 145 start_slot++; 146 } 147 fmt->ch_map = slot_map; 148 } 149 150 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 151 struct skl_pipe_params *params, int fixup) 152 { 153 if (fixup & SKL_RATE_FIXUP_MASK) 154 fmt->s_freq = params->s_freq; 155 if (fixup & SKL_CH_FIXUP_MASK) { 156 fmt->channels = params->ch; 157 skl_tplg_update_chmap(fmt, fmt->channels); 158 } 159 if (fixup & SKL_FMT_FIXUP_MASK) { 160 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 161 162 /* 163 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 164 * container so update bit depth accordingly 165 */ 166 switch (fmt->valid_bit_depth) { 167 case SKL_DEPTH_16BIT: 168 fmt->bit_depth = fmt->valid_bit_depth; 169 break; 170 171 default: 172 fmt->bit_depth = SKL_DEPTH_32BIT; 173 break; 174 } 175 } 176 177 } 178 179 /* 180 * A pipeline may have modules which impact the pcm parameters, like SRC, 181 * channel converter, format converter. 182 * We need to calculate the output params by applying the 'fixup' 183 * Topology will tell driver which type of fixup is to be applied by 184 * supplying the fixup mask, so based on that we calculate the output 185 * 186 * Now In FE the pcm hw_params is source/target format. Same is applicable 187 * for BE with its hw_params invoked. 188 * here based on FE, BE pipeline and direction we calculate the input and 189 * outfix and then apply that for a module 190 */ 191 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 192 struct skl_pipe_params *params, bool is_fe) 193 { 194 int in_fixup, out_fixup; 195 struct skl_module_fmt *in_fmt, *out_fmt; 196 197 /* Fixups will be applied to pin 0 only */ 198 in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt; 199 out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt; 200 201 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 202 if (is_fe) { 203 in_fixup = m_cfg->params_fixup; 204 out_fixup = (~m_cfg->converter) & 205 m_cfg->params_fixup; 206 } else { 207 out_fixup = m_cfg->params_fixup; 208 in_fixup = (~m_cfg->converter) & 209 m_cfg->params_fixup; 210 } 211 } else { 212 if (is_fe) { 213 out_fixup = m_cfg->params_fixup; 214 in_fixup = (~m_cfg->converter) & 215 m_cfg->params_fixup; 216 } else { 217 in_fixup = m_cfg->params_fixup; 218 out_fixup = (~m_cfg->converter) & 219 m_cfg->params_fixup; 220 } 221 } 222 223 skl_tplg_update_params(in_fmt, params, in_fixup); 224 skl_tplg_update_params(out_fmt, params, out_fixup); 225 } 226 227 /* 228 * A module needs input and output buffers, which are dependent upon pcm 229 * params, so once we have calculate params, we need buffer calculation as 230 * well. 231 */ 232 static void skl_tplg_update_buffer_size(struct skl_dev *skl, 233 struct skl_module_cfg *mcfg) 234 { 235 int multiplier = 1; 236 struct skl_module_fmt *in_fmt, *out_fmt; 237 struct skl_module_res *res; 238 239 /* Since fixups is applied to pin 0 only, ibs, obs needs 240 * change for pin 0 only 241 */ 242 res = &mcfg->module->resources[mcfg->res_idx]; 243 in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt; 244 out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt; 245 246 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 247 multiplier = 5; 248 249 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 250 in_fmt->channels * (in_fmt->bit_depth >> 3) * 251 multiplier; 252 253 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 254 out_fmt->channels * (out_fmt->bit_depth >> 3) * 255 multiplier; 256 } 257 258 static u8 skl_tplg_be_dev_type(int dev_type) 259 { 260 int ret; 261 262 switch (dev_type) { 263 case SKL_DEVICE_BT: 264 ret = NHLT_DEVICE_BT; 265 break; 266 267 case SKL_DEVICE_DMIC: 268 ret = NHLT_DEVICE_DMIC; 269 break; 270 271 case SKL_DEVICE_I2S: 272 ret = NHLT_DEVICE_I2S; 273 break; 274 275 default: 276 ret = NHLT_DEVICE_INVALID; 277 break; 278 } 279 280 return ret; 281 } 282 283 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 284 struct skl_dev *skl) 285 { 286 struct skl_module_cfg *m_cfg = w->priv; 287 int link_type, dir; 288 u32 ch, s_freq, s_fmt, s_cont; 289 struct nhlt_specific_cfg *cfg; 290 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 291 int fmt_idx = m_cfg->fmt_idx; 292 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 293 294 /* check if we already have blob */ 295 if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0) 296 return 0; 297 298 dev_dbg(skl->dev, "Applying default cfg blob\n"); 299 switch (m_cfg->dev_type) { 300 case SKL_DEVICE_DMIC: 301 link_type = NHLT_LINK_DMIC; 302 dir = SNDRV_PCM_STREAM_CAPTURE; 303 s_freq = m_iface->inputs[0].fmt.s_freq; 304 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth; 305 s_cont = m_iface->inputs[0].fmt.bit_depth; 306 ch = m_iface->inputs[0].fmt.channels; 307 break; 308 309 case SKL_DEVICE_I2S: 310 link_type = NHLT_LINK_SSP; 311 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 312 dir = SNDRV_PCM_STREAM_PLAYBACK; 313 s_freq = m_iface->outputs[0].fmt.s_freq; 314 s_fmt = m_iface->outputs[0].fmt.valid_bit_depth; 315 s_cont = m_iface->outputs[0].fmt.bit_depth; 316 ch = m_iface->outputs[0].fmt.channels; 317 } else { 318 dir = SNDRV_PCM_STREAM_CAPTURE; 319 s_freq = m_iface->inputs[0].fmt.s_freq; 320 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth; 321 s_cont = m_iface->inputs[0].fmt.bit_depth; 322 ch = m_iface->inputs[0].fmt.channels; 323 } 324 break; 325 326 default: 327 return -EINVAL; 328 } 329 330 /* update the blob based on virtual bus_id and default params */ 331 cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id, 332 link_type, s_fmt, s_cont, ch, 333 s_freq, dir, dev_type); 334 if (cfg) { 335 m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size; 336 m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps; 337 } else { 338 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n", 339 m_cfg->vbus_id, link_type, dir); 340 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n", 341 ch, s_freq, s_fmt, s_cont); 342 return -EIO; 343 } 344 345 return 0; 346 } 347 348 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 349 struct skl_dev *skl) 350 { 351 struct skl_module_cfg *m_cfg = w->priv; 352 struct skl_pipe_params *params = m_cfg->pipe->p_params; 353 int p_conn_type = m_cfg->pipe->conn_type; 354 bool is_fe; 355 356 if (!m_cfg->params_fixup) 357 return; 358 359 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n", 360 w->name); 361 362 skl_dump_mconfig(skl, m_cfg); 363 364 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 365 is_fe = true; 366 else 367 is_fe = false; 368 369 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 370 skl_tplg_update_buffer_size(skl, m_cfg); 371 372 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n", 373 w->name); 374 375 skl_dump_mconfig(skl, m_cfg); 376 } 377 378 /* 379 * some modules can have multiple params set from user control and 380 * need to be set after module is initialized. If set_param flag is 381 * set module params will be done after module is initialised. 382 */ 383 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 384 struct skl_dev *skl) 385 { 386 int i, ret; 387 struct skl_module_cfg *mconfig = w->priv; 388 const struct snd_kcontrol_new *k; 389 struct soc_bytes_ext *sb; 390 struct skl_algo_data *bc; 391 struct skl_specific_cfg *sp_cfg; 392 393 if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 && 394 mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) { 395 sp_cfg = &mconfig->formats_config[SKL_PARAM_SET]; 396 ret = skl_set_module_params(skl, sp_cfg->caps, 397 sp_cfg->caps_size, 398 sp_cfg->param_id, mconfig); 399 if (ret < 0) 400 return ret; 401 } 402 403 for (i = 0; i < w->num_kcontrols; i++) { 404 k = &w->kcontrol_news[i]; 405 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 406 sb = (void *) k->private_value; 407 bc = (struct skl_algo_data *)sb->dobj.private; 408 409 if (bc->set_params == SKL_PARAM_SET) { 410 ret = skl_set_module_params(skl, 411 (u32 *)bc->params, bc->size, 412 bc->param_id, mconfig); 413 if (ret < 0) 414 return ret; 415 } 416 } 417 } 418 419 return 0; 420 } 421 422 /* 423 * some module param can set from user control and this is required as 424 * when module is initailzed. if module param is required in init it is 425 * identifed by set_param flag. if set_param flag is not set, then this 426 * parameter needs to set as part of module init. 427 */ 428 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 429 { 430 const struct snd_kcontrol_new *k; 431 struct soc_bytes_ext *sb; 432 struct skl_algo_data *bc; 433 struct skl_module_cfg *mconfig = w->priv; 434 int i; 435 436 for (i = 0; i < w->num_kcontrols; i++) { 437 k = &w->kcontrol_news[i]; 438 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 439 sb = (struct soc_bytes_ext *)k->private_value; 440 bc = (struct skl_algo_data *)sb->dobj.private; 441 442 if (bc->set_params != SKL_PARAM_INIT) 443 continue; 444 445 mconfig->formats_config[SKL_PARAM_INIT].caps = 446 (u32 *)bc->params; 447 mconfig->formats_config[SKL_PARAM_INIT].caps_size = 448 bc->size; 449 450 break; 451 } 452 } 453 454 return 0; 455 } 456 457 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe, 458 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 459 { 460 switch (mcfg->dev_type) { 461 case SKL_DEVICE_HDAHOST: 462 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params); 463 464 case SKL_DEVICE_HDALINK: 465 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params); 466 } 467 468 return 0; 469 } 470 471 /* 472 * Inside a pipe instance, we can have various modules. These modules need 473 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 474 * skl_init_module() routine, so invoke that for all modules in a pipeline 475 */ 476 static int 477 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe) 478 { 479 struct skl_pipe_module *w_module; 480 struct snd_soc_dapm_widget *w; 481 struct skl_module_cfg *mconfig; 482 u8 cfg_idx; 483 int ret = 0; 484 485 list_for_each_entry(w_module, &pipe->w_list, node) { 486 guid_t *uuid_mod; 487 w = w_module->w; 488 mconfig = w->priv; 489 490 /* check if module ids are populated */ 491 if (mconfig->id.module_id < 0) { 492 dev_err(skl->dev, 493 "module %pUL id not populated\n", 494 (guid_t *)mconfig->guid); 495 return -EIO; 496 } 497 498 cfg_idx = mconfig->pipe->cur_config_idx; 499 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 500 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 501 502 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) { 503 ret = skl->dsp->fw_ops.load_mod(skl->dsp, 504 mconfig->id.module_id, mconfig->guid); 505 if (ret < 0) 506 return ret; 507 } 508 509 /* prepare the DMA if the module is gateway cpr */ 510 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig); 511 if (ret < 0) 512 return ret; 513 514 /* update blob if blob is null for be with default value */ 515 skl_tplg_update_be_blob(w, skl); 516 517 /* 518 * apply fix/conversion to module params based on 519 * FE/BE params 520 */ 521 skl_tplg_update_module_params(w, skl); 522 uuid_mod = (guid_t *)mconfig->guid; 523 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod, 524 mconfig->id.instance_id); 525 if (mconfig->id.pvt_id < 0) 526 return ret; 527 skl_tplg_set_module_init_data(w); 528 529 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id); 530 if (ret < 0) { 531 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n", 532 mconfig->core_id, ret); 533 return ret; 534 } 535 536 ret = skl_init_module(skl, mconfig); 537 if (ret < 0) { 538 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 539 goto err; 540 } 541 542 ret = skl_tplg_set_module_params(w, skl); 543 if (ret < 0) 544 goto err; 545 } 546 547 return 0; 548 err: 549 skl_dsp_put_core(skl->dsp, mconfig->core_id); 550 return ret; 551 } 552 553 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl, 554 struct skl_pipe *pipe) 555 { 556 int ret = 0; 557 struct skl_pipe_module *w_module; 558 struct skl_module_cfg *mconfig; 559 560 list_for_each_entry(w_module, &pipe->w_list, node) { 561 guid_t *uuid_mod; 562 mconfig = w_module->w->priv; 563 uuid_mod = (guid_t *)mconfig->guid; 564 565 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) { 566 ret = skl->dsp->fw_ops.unload_mod(skl->dsp, 567 mconfig->id.module_id); 568 if (ret < 0) 569 return -EIO; 570 } 571 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 572 573 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id); 574 if (ret < 0) { 575 /* don't return; continue with other modules */ 576 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n", 577 mconfig->core_id, ret); 578 } 579 } 580 581 /* no modules to unload in this path, so return */ 582 return ret; 583 } 584 585 static void skl_tplg_set_pipe_config_idx(struct skl_pipe *pipe, int idx) 586 { 587 pipe->cur_config_idx = idx; 588 pipe->memory_pages = pipe->configs[idx].mem_pages; 589 } 590 591 /* 592 * Here, we select pipe format based on the pipe type and pipe 593 * direction to determine the current config index for the pipeline. 594 * The config index is then used to select proper module resources. 595 * Intermediate pipes currently have a fixed format hence we select the 596 * 0th configuratation by default for such pipes. 597 */ 598 static int 599 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig) 600 { 601 struct skl_pipe *pipe = mconfig->pipe; 602 struct skl_pipe_params *params = pipe->p_params; 603 struct skl_path_config *pconfig = &pipe->configs[0]; 604 struct skl_pipe_fmt *fmt = NULL; 605 bool in_fmt = false; 606 int i; 607 608 if (pipe->nr_cfgs == 0) { 609 skl_tplg_set_pipe_config_idx(pipe, 0); 610 return 0; 611 } 612 613 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) { 614 dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n", 615 pipe->ppl_id); 616 skl_tplg_set_pipe_config_idx(pipe, 0); 617 return 0; 618 } 619 620 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 621 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 622 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 623 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 624 in_fmt = true; 625 626 for (i = 0; i < pipe->nr_cfgs; i++) { 627 pconfig = &pipe->configs[i]; 628 if (in_fmt) 629 fmt = &pconfig->in_fmt; 630 else 631 fmt = &pconfig->out_fmt; 632 633 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 634 fmt->channels, fmt->freq, fmt->bps)) { 635 skl_tplg_set_pipe_config_idx(pipe, i); 636 dev_dbg(skl->dev, "Using pipe config: %d\n", i); 637 return 0; 638 } 639 } 640 641 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 642 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 643 return -EINVAL; 644 } 645 646 /* 647 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 648 * need create the pipeline. So we do following: 649 * - Create the pipeline 650 * - Initialize the modules in pipeline 651 * - finally bind all modules together 652 */ 653 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 654 struct skl_dev *skl) 655 { 656 int ret; 657 struct skl_module_cfg *mconfig = w->priv; 658 struct skl_pipe_module *w_module; 659 struct skl_pipe *s_pipe = mconfig->pipe; 660 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 661 struct skl_module_deferred_bind *modules; 662 663 ret = skl_tplg_get_pipe_config(skl, mconfig); 664 if (ret < 0) 665 return ret; 666 667 /* 668 * Create a list of modules for pipe. 669 * This list contains modules from source to sink 670 */ 671 ret = skl_create_pipeline(skl, mconfig->pipe); 672 if (ret < 0) 673 return ret; 674 675 /* Init all pipe modules from source to sink */ 676 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 677 if (ret < 0) 678 return ret; 679 680 /* Bind modules from source to sink */ 681 list_for_each_entry(w_module, &s_pipe->w_list, node) { 682 dst_module = w_module->w->priv; 683 684 if (src_module == NULL) { 685 src_module = dst_module; 686 continue; 687 } 688 689 ret = skl_bind_modules(skl, src_module, dst_module); 690 if (ret < 0) 691 return ret; 692 693 src_module = dst_module; 694 } 695 696 /* 697 * When the destination module is initialized, check for these modules 698 * in deferred bind list. If found, bind them. 699 */ 700 list_for_each_entry(w_module, &s_pipe->w_list, node) { 701 if (list_empty(&skl->bind_list)) 702 break; 703 704 list_for_each_entry(modules, &skl->bind_list, node) { 705 module = w_module->w->priv; 706 if (modules->dst == module) 707 skl_bind_modules(skl, modules->src, 708 modules->dst); 709 } 710 } 711 712 return 0; 713 } 714 715 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params, 716 int size, struct skl_module_cfg *mcfg) 717 { 718 int i, pvt_id; 719 720 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 721 struct skl_kpb_params *kpb_params = 722 (struct skl_kpb_params *)params; 723 struct skl_mod_inst_map *inst = kpb_params->u.map; 724 725 for (i = 0; i < kpb_params->num_modules; i++) { 726 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id, 727 inst->inst_id); 728 if (pvt_id < 0) 729 return -EINVAL; 730 731 inst->inst_id = pvt_id; 732 inst++; 733 } 734 } 735 736 return 0; 737 } 738 /* 739 * Some modules require params to be set after the module is bound to 740 * all pins connected. 741 * 742 * The module provider initializes set_param flag for such modules and we 743 * send params after binding 744 */ 745 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 746 struct skl_module_cfg *mcfg, struct skl_dev *skl) 747 { 748 int i, ret; 749 struct skl_module_cfg *mconfig = w->priv; 750 const struct snd_kcontrol_new *k; 751 struct soc_bytes_ext *sb; 752 struct skl_algo_data *bc; 753 struct skl_specific_cfg *sp_cfg; 754 u32 *params; 755 756 /* 757 * check all out/in pins are in bind state. 758 * if so set the module param 759 */ 760 for (i = 0; i < mcfg->module->max_output_pins; i++) { 761 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 762 return 0; 763 } 764 765 for (i = 0; i < mcfg->module->max_input_pins; i++) { 766 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 767 return 0; 768 } 769 770 if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 && 771 mconfig->formats_config[SKL_PARAM_BIND].set_params == 772 SKL_PARAM_BIND) { 773 sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND]; 774 ret = skl_set_module_params(skl, sp_cfg->caps, 775 sp_cfg->caps_size, 776 sp_cfg->param_id, mconfig); 777 if (ret < 0) 778 return ret; 779 } 780 781 for (i = 0; i < w->num_kcontrols; i++) { 782 k = &w->kcontrol_news[i]; 783 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 784 sb = (void *) k->private_value; 785 bc = (struct skl_algo_data *)sb->dobj.private; 786 787 if (bc->set_params == SKL_PARAM_BIND) { 788 params = kmemdup(bc->params, bc->max, GFP_KERNEL); 789 if (!params) 790 return -ENOMEM; 791 792 skl_fill_sink_instance_id(skl, params, bc->max, 793 mconfig); 794 795 ret = skl_set_module_params(skl, params, 796 bc->max, bc->param_id, mconfig); 797 kfree(params); 798 799 if (ret < 0) 800 return ret; 801 } 802 } 803 } 804 805 return 0; 806 } 807 808 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) 809 { 810 struct uuid_module *module; 811 812 list_for_each_entry(module, &skl->uuid_list, list) { 813 if (guid_equal(uuid, &module->uuid)) 814 return module->id; 815 } 816 817 return -EINVAL; 818 } 819 820 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl, 821 const struct snd_kcontrol_new *k) 822 { 823 struct soc_bytes_ext *sb = (void *) k->private_value; 824 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 825 struct skl_kpb_params *uuid_params, *params; 826 struct hdac_bus *bus = skl_to_bus(skl); 827 int i, size, module_id; 828 829 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 830 uuid_params = (struct skl_kpb_params *)bc->params; 831 size = struct_size(params, u.map, uuid_params->num_modules); 832 833 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 834 if (!params) 835 return -ENOMEM; 836 837 params->num_modules = uuid_params->num_modules; 838 839 for (i = 0; i < uuid_params->num_modules; i++) { 840 module_id = skl_get_module_id(skl, 841 &uuid_params->u.map_uuid[i].mod_uuid); 842 if (module_id < 0) { 843 devm_kfree(bus->dev, params); 844 return -EINVAL; 845 } 846 847 params->u.map[i].mod_id = module_id; 848 params->u.map[i].inst_id = 849 uuid_params->u.map_uuid[i].inst_id; 850 } 851 852 devm_kfree(bus->dev, bc->params); 853 bc->params = (char *)params; 854 bc->max = size; 855 } 856 857 return 0; 858 } 859 860 /* 861 * Retrieve the module id from UUID mentioned in the 862 * post bind params 863 */ 864 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl, 865 struct snd_soc_dapm_widget *w) 866 { 867 struct skl_module_cfg *mconfig = w->priv; 868 int i; 869 870 /* 871 * Post bind params are used for only for KPB 872 * to set copier instances to drain the data 873 * in fast mode 874 */ 875 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 876 return; 877 878 for (i = 0; i < w->num_kcontrols; i++) 879 if ((w->kcontrol_news[i].access & 880 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 881 (skl_tplg_find_moduleid_from_uuid(skl, 882 &w->kcontrol_news[i]) < 0)) 883 dev_err(skl->dev, 884 "%s: invalid kpb post bind params\n", 885 __func__); 886 } 887 888 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl, 889 struct skl_module_cfg *src, struct skl_module_cfg *dst) 890 { 891 struct skl_module_deferred_bind *m_list, *modules; 892 int i; 893 894 /* only supported for module with static pin connection */ 895 for (i = 0; i < dst->module->max_input_pins; i++) { 896 struct skl_module_pin *pin = &dst->m_in_pin[i]; 897 898 if (pin->is_dynamic) 899 continue; 900 901 if ((pin->id.module_id == src->id.module_id) && 902 (pin->id.instance_id == src->id.instance_id)) { 903 904 if (!list_empty(&skl->bind_list)) { 905 list_for_each_entry(modules, &skl->bind_list, node) { 906 if (modules->src == src && modules->dst == dst) 907 return 0; 908 } 909 } 910 911 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 912 if (!m_list) 913 return -ENOMEM; 914 915 m_list->src = src; 916 m_list->dst = dst; 917 918 list_add(&m_list->node, &skl->bind_list); 919 } 920 } 921 922 return 0; 923 } 924 925 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 926 struct skl_dev *skl, 927 struct snd_soc_dapm_widget *src_w, 928 struct skl_module_cfg *src_mconfig) 929 { 930 struct snd_soc_dapm_path *p; 931 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 932 struct skl_module_cfg *sink_mconfig; 933 int ret; 934 935 snd_soc_dapm_widget_for_each_sink_path(w, p) { 936 if (!p->connect) 937 continue; 938 939 dev_dbg(skl->dev, 940 "%s: src widget=%s\n", __func__, w->name); 941 dev_dbg(skl->dev, 942 "%s: sink widget=%s\n", __func__, p->sink->name); 943 944 next_sink = p->sink; 945 946 if (!is_skl_dsp_widget_type(p->sink, skl->dev)) 947 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 948 949 /* 950 * here we will check widgets in sink pipelines, so that 951 * can be any widgets type and we are only interested if 952 * they are ones used for SKL so check that first 953 */ 954 if ((p->sink->priv != NULL) && 955 is_skl_dsp_widget_type(p->sink, skl->dev)) { 956 957 sink = p->sink; 958 sink_mconfig = sink->priv; 959 960 /* 961 * Modules other than PGA leaf can be connected 962 * directly or via switch to a module in another 963 * pipeline. EX: reference path 964 * when the path is enabled, the dst module that needs 965 * to be bound may not be initialized. if the module is 966 * not initialized, add these modules in the deferred 967 * bind list and when the dst module is initialised, 968 * bind this module to the dst_module in deferred list. 969 */ 970 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 971 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 972 973 ret = skl_tplg_module_add_deferred_bind(skl, 974 src_mconfig, sink_mconfig); 975 976 if (ret < 0) 977 return ret; 978 979 } 980 981 982 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 983 sink_mconfig->m_state == SKL_MODULE_UNINIT) 984 continue; 985 986 /* Bind source to sink, mixin is always source */ 987 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 988 if (ret) 989 return ret; 990 991 /* set module params after bind */ 992 skl_tplg_set_module_bind_params(src_w, 993 src_mconfig, skl); 994 skl_tplg_set_module_bind_params(sink, 995 sink_mconfig, skl); 996 997 /* Start sinks pipe first */ 998 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 999 if (sink_mconfig->pipe->conn_type != 1000 SKL_PIPE_CONN_TYPE_FE) 1001 ret = skl_run_pipe(skl, 1002 sink_mconfig->pipe); 1003 if (ret) 1004 return ret; 1005 } 1006 } 1007 } 1008 1009 if (!sink && next_sink) 1010 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1011 1012 return 0; 1013 } 1014 1015 /* 1016 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1017 * we need to do following: 1018 * - Bind to sink pipeline 1019 * Since the sink pipes can be running and we don't get mixer event on 1020 * connect for already running mixer, we need to find the sink pipes 1021 * here and bind to them. This way dynamic connect works. 1022 * - Start sink pipeline, if not running 1023 * - Then run current pipe 1024 */ 1025 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1026 struct skl_dev *skl) 1027 { 1028 struct skl_module_cfg *src_mconfig; 1029 int ret = 0; 1030 1031 src_mconfig = w->priv; 1032 1033 /* 1034 * find which sink it is connected to, bind with the sink, 1035 * if sink is not started, start sink pipe first, then start 1036 * this pipe 1037 */ 1038 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1039 if (ret) 1040 return ret; 1041 1042 /* Start source pipe last after starting all sinks */ 1043 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1044 return skl_run_pipe(skl, src_mconfig->pipe); 1045 1046 return 0; 1047 } 1048 1049 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1050 struct snd_soc_dapm_widget *w, struct skl_dev *skl) 1051 { 1052 struct snd_soc_dapm_path *p; 1053 struct snd_soc_dapm_widget *src_w = NULL; 1054 1055 snd_soc_dapm_widget_for_each_source_path(w, p) { 1056 src_w = p->source; 1057 if (!p->connect) 1058 continue; 1059 1060 dev_dbg(skl->dev, "sink widget=%s\n", w->name); 1061 dev_dbg(skl->dev, "src widget=%s\n", p->source->name); 1062 1063 /* 1064 * here we will check widgets in sink pipelines, so that can 1065 * be any widgets type and we are only interested if they are 1066 * ones used for SKL so check that first 1067 */ 1068 if ((p->source->priv != NULL) && 1069 is_skl_dsp_widget_type(p->source, skl->dev)) { 1070 return p->source; 1071 } 1072 } 1073 1074 if (src_w != NULL) 1075 return skl_get_src_dsp_widget(src_w, skl); 1076 1077 return NULL; 1078 } 1079 1080 /* 1081 * in the Post-PMU event of mixer we need to do following: 1082 * - Check if this pipe is running 1083 * - if not, then 1084 * - bind this pipeline to its source pipeline 1085 * if source pipe is already running, this means it is a dynamic 1086 * connection and we need to bind only to that pipe 1087 * - start this pipeline 1088 */ 1089 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1090 struct skl_dev *skl) 1091 { 1092 int ret = 0; 1093 struct snd_soc_dapm_widget *source, *sink; 1094 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1095 int src_pipe_started = 0; 1096 1097 sink = w; 1098 sink_mconfig = sink->priv; 1099 1100 /* 1101 * If source pipe is already started, that means source is driving 1102 * one more sink before this sink got connected, Since source is 1103 * started, bind this sink to source and start this pipe. 1104 */ 1105 source = skl_get_src_dsp_widget(w, skl); 1106 if (source != NULL) { 1107 src_mconfig = source->priv; 1108 sink_mconfig = sink->priv; 1109 src_pipe_started = 1; 1110 1111 /* 1112 * check pipe state, then no need to bind or start the 1113 * pipe 1114 */ 1115 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1116 src_pipe_started = 0; 1117 } 1118 1119 if (src_pipe_started) { 1120 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 1121 if (ret) 1122 return ret; 1123 1124 /* set module params after bind */ 1125 skl_tplg_set_module_bind_params(source, src_mconfig, skl); 1126 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl); 1127 1128 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1129 ret = skl_run_pipe(skl, sink_mconfig->pipe); 1130 } 1131 1132 return ret; 1133 } 1134 1135 /* 1136 * in the Pre-PMD event of mixer we need to do following: 1137 * - Stop the pipe 1138 * - find the source connections and remove that from dapm_path_list 1139 * - unbind with source pipelines if still connected 1140 */ 1141 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1142 struct skl_dev *skl) 1143 { 1144 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1145 int ret = 0, i; 1146 1147 sink_mconfig = w->priv; 1148 1149 /* Stop the pipe */ 1150 ret = skl_stop_pipe(skl, sink_mconfig->pipe); 1151 if (ret) 1152 return ret; 1153 1154 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1155 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1156 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1157 if (!src_mconfig) 1158 continue; 1159 1160 ret = skl_unbind_modules(skl, 1161 src_mconfig, sink_mconfig); 1162 } 1163 } 1164 1165 return ret; 1166 } 1167 1168 /* 1169 * in the Post-PMD event of mixer we need to do following: 1170 * - Unbind the modules within the pipeline 1171 * - Delete the pipeline (modules are not required to be explicitly 1172 * deleted, pipeline delete is enough here 1173 */ 1174 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1175 struct skl_dev *skl) 1176 { 1177 struct skl_module_cfg *mconfig = w->priv; 1178 struct skl_pipe_module *w_module; 1179 struct skl_module_cfg *src_module = NULL, *dst_module; 1180 struct skl_pipe *s_pipe = mconfig->pipe; 1181 struct skl_module_deferred_bind *modules, *tmp; 1182 1183 if (s_pipe->state == SKL_PIPE_INVALID) 1184 return -EINVAL; 1185 1186 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1187 if (list_empty(&skl->bind_list)) 1188 break; 1189 1190 src_module = w_module->w->priv; 1191 1192 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1193 /* 1194 * When the destination module is deleted, Unbind the 1195 * modules from deferred bind list. 1196 */ 1197 if (modules->dst == src_module) { 1198 skl_unbind_modules(skl, modules->src, 1199 modules->dst); 1200 } 1201 1202 /* 1203 * When the source module is deleted, remove this entry 1204 * from the deferred bind list. 1205 */ 1206 if (modules->src == src_module) { 1207 list_del(&modules->node); 1208 modules->src = NULL; 1209 modules->dst = NULL; 1210 kfree(modules); 1211 } 1212 } 1213 } 1214 1215 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1216 dst_module = w_module->w->priv; 1217 1218 if (src_module == NULL) { 1219 src_module = dst_module; 1220 continue; 1221 } 1222 1223 skl_unbind_modules(skl, src_module, dst_module); 1224 src_module = dst_module; 1225 } 1226 1227 skl_delete_pipe(skl, mconfig->pipe); 1228 1229 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1230 src_module = w_module->w->priv; 1231 src_module->m_state = SKL_MODULE_UNINIT; 1232 } 1233 1234 return skl_tplg_unload_pipe_modules(skl, s_pipe); 1235 } 1236 1237 /* 1238 * in the Post-PMD event of PGA we need to do following: 1239 * - Stop the pipeline 1240 * - In source pipe is connected, unbind with source pipelines 1241 */ 1242 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1243 struct skl_dev *skl) 1244 { 1245 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1246 int ret = 0, i; 1247 1248 src_mconfig = w->priv; 1249 1250 /* Stop the pipe since this is a mixin module */ 1251 ret = skl_stop_pipe(skl, src_mconfig->pipe); 1252 if (ret) 1253 return ret; 1254 1255 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1256 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1257 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1258 if (!sink_mconfig) 1259 continue; 1260 /* 1261 * This is a connecter and if path is found that means 1262 * unbind between source and sink has not happened yet 1263 */ 1264 ret = skl_unbind_modules(skl, src_mconfig, 1265 sink_mconfig); 1266 } 1267 } 1268 1269 return ret; 1270 } 1271 1272 /* 1273 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1274 * second one is required that is created as another pipe entity. 1275 * The mixer is responsible for pipe management and represent a pipeline 1276 * instance 1277 */ 1278 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1279 struct snd_kcontrol *k, int event) 1280 { 1281 struct snd_soc_dapm_context *dapm = w->dapm; 1282 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1283 1284 switch (event) { 1285 case SND_SOC_DAPM_PRE_PMU: 1286 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1287 1288 case SND_SOC_DAPM_POST_PMU: 1289 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1290 1291 case SND_SOC_DAPM_PRE_PMD: 1292 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1293 1294 case SND_SOC_DAPM_POST_PMD: 1295 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1296 } 1297 1298 return 0; 1299 } 1300 1301 /* 1302 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1303 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1304 * the sink when it is running (two FE to one BE or one FE to two BE) 1305 * scenarios 1306 */ 1307 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1308 struct snd_kcontrol *k, int event) 1309 1310 { 1311 struct snd_soc_dapm_context *dapm = w->dapm; 1312 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1313 1314 switch (event) { 1315 case SND_SOC_DAPM_PRE_PMU: 1316 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1317 1318 case SND_SOC_DAPM_POST_PMD: 1319 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol, 1326 struct snd_ctl_elem_value *ucontrol, 1327 bool is_set) 1328 { 1329 struct snd_soc_component *component = 1330 snd_soc_kcontrol_component(kcontrol); 1331 struct hdac_bus *bus = snd_soc_component_get_drvdata(component); 1332 struct skl_dev *skl = bus_to_skl(bus); 1333 struct skl_pipeline *ppl; 1334 struct skl_pipe *pipe = NULL; 1335 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1336 u32 *pipe_id; 1337 1338 if (!ec) 1339 return -EINVAL; 1340 1341 if (is_set && ucontrol->value.enumerated.item[0] > ec->items) 1342 return -EINVAL; 1343 1344 pipe_id = ec->dobj.private; 1345 1346 list_for_each_entry(ppl, &skl->ppl_list, node) { 1347 if (ppl->pipe->ppl_id == *pipe_id) { 1348 pipe = ppl->pipe; 1349 break; 1350 } 1351 } 1352 if (!pipe) 1353 return -EIO; 1354 1355 if (is_set) 1356 skl_tplg_set_pipe_config_idx(pipe, ucontrol->value.enumerated.item[0]); 1357 else 1358 ucontrol->value.enumerated.item[0] = pipe->cur_config_idx; 1359 1360 return 0; 1361 } 1362 1363 static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol, 1364 struct snd_ctl_elem_value *ucontrol) 1365 { 1366 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); 1367 } 1368 1369 static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol, 1370 struct snd_ctl_elem_value *ucontrol) 1371 { 1372 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); 1373 } 1374 1375 static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol, 1376 struct snd_ctl_elem_value *ucontrol) 1377 { 1378 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); 1379 } 1380 1381 static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol, 1382 struct snd_ctl_elem_value *ucontrol) 1383 { 1384 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); 1385 } 1386 1387 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1388 unsigned int __user *data, unsigned int size) 1389 { 1390 struct soc_bytes_ext *sb = 1391 (struct soc_bytes_ext *)kcontrol->private_value; 1392 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1393 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1394 struct skl_module_cfg *mconfig = w->priv; 1395 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1396 1397 if (w->power) 1398 skl_get_module_params(skl, (u32 *)bc->params, 1399 bc->size, bc->param_id, mconfig); 1400 1401 /* decrement size for TLV header */ 1402 size -= 2 * sizeof(u32); 1403 1404 /* check size as we don't want to send kernel data */ 1405 if (size > bc->max) 1406 size = bc->max; 1407 1408 if (bc->params) { 1409 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1410 return -EFAULT; 1411 if (copy_to_user(data + 1, &size, sizeof(u32))) 1412 return -EFAULT; 1413 if (copy_to_user(data + 2, bc->params, size)) 1414 return -EFAULT; 1415 } 1416 1417 return 0; 1418 } 1419 1420 #define SKL_PARAM_VENDOR_ID 0xff 1421 1422 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1423 const unsigned int __user *data, unsigned int size) 1424 { 1425 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1426 struct skl_module_cfg *mconfig = w->priv; 1427 struct soc_bytes_ext *sb = 1428 (struct soc_bytes_ext *)kcontrol->private_value; 1429 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1430 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1431 1432 if (ac->params) { 1433 if (size > ac->max) 1434 return -EINVAL; 1435 ac->size = size; 1436 1437 if (copy_from_user(ac->params, data, size)) 1438 return -EFAULT; 1439 1440 if (w->power) 1441 return skl_set_module_params(skl, 1442 (u32 *)ac->params, ac->size, 1443 ac->param_id, mconfig); 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1450 struct snd_ctl_elem_value *ucontrol) 1451 { 1452 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1453 struct skl_module_cfg *mconfig = w->priv; 1454 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1455 u32 ch_type = *((u32 *)ec->dobj.private); 1456 1457 if (mconfig->dmic_ch_type == ch_type) 1458 ucontrol->value.enumerated.item[0] = 1459 mconfig->dmic_ch_combo_index; 1460 else 1461 ucontrol->value.enumerated.item[0] = 0; 1462 1463 return 0; 1464 } 1465 1466 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1467 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1468 { 1469 struct skl_specific_cfg *sp_cfg = 1470 &mconfig->formats_config[SKL_PARAM_INIT]; 1471 1472 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1473 sp_cfg->set_params = SKL_PARAM_SET; 1474 sp_cfg->param_id = 0x00; 1475 if (!sp_cfg->caps) { 1476 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1477 if (!sp_cfg->caps) 1478 return -ENOMEM; 1479 } 1480 1481 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1482 mic_cfg->flags = 0; 1483 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1484 1485 return 0; 1486 } 1487 1488 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1489 struct snd_ctl_elem_value *ucontrol) 1490 { 1491 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1492 struct skl_module_cfg *mconfig = w->priv; 1493 struct skl_mic_sel_config mic_cfg = {0}; 1494 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1495 u32 ch_type = *((u32 *)ec->dobj.private); 1496 const int *list; 1497 u8 in_ch, out_ch, index; 1498 1499 mconfig->dmic_ch_type = ch_type; 1500 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1501 1502 /* enum control index 0 is INVALID, so no channels to be set */ 1503 if (mconfig->dmic_ch_combo_index == 0) 1504 return 0; 1505 1506 /* No valid channel selection map for index 0, so offset by 1 */ 1507 index = mconfig->dmic_ch_combo_index - 1; 1508 1509 switch (ch_type) { 1510 case SKL_CH_MONO: 1511 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1512 return -EINVAL; 1513 1514 list = &mic_mono_list[index]; 1515 break; 1516 1517 case SKL_CH_STEREO: 1518 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1519 return -EINVAL; 1520 1521 list = mic_stereo_list[index]; 1522 break; 1523 1524 case SKL_CH_TRIO: 1525 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1526 return -EINVAL; 1527 1528 list = mic_trio_list[index]; 1529 break; 1530 1531 case SKL_CH_QUATRO: 1532 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1533 return -EINVAL; 1534 1535 list = mic_quatro_list[index]; 1536 break; 1537 1538 default: 1539 dev_err(w->dapm->dev, 1540 "Invalid channel %d for mic_select module\n", 1541 ch_type); 1542 return -EINVAL; 1543 1544 } 1545 1546 /* channel type enum map to number of chanels for that type */ 1547 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1548 in_ch = list[out_ch]; 1549 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1550 } 1551 1552 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1553 } 1554 1555 /* 1556 * Fill the dma id for host and link. In case of passthrough 1557 * pipeline, this will both host and link in the same 1558 * pipeline, so need to copy the link and host based on dev_type 1559 */ 1560 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1561 struct skl_pipe_params *params) 1562 { 1563 struct skl_pipe *pipe = mcfg->pipe; 1564 1565 if (pipe->passthru) { 1566 switch (mcfg->dev_type) { 1567 case SKL_DEVICE_HDALINK: 1568 pipe->p_params->link_dma_id = params->link_dma_id; 1569 pipe->p_params->link_index = params->link_index; 1570 pipe->p_params->link_bps = params->link_bps; 1571 break; 1572 1573 case SKL_DEVICE_HDAHOST: 1574 pipe->p_params->host_dma_id = params->host_dma_id; 1575 pipe->p_params->host_bps = params->host_bps; 1576 break; 1577 1578 default: 1579 break; 1580 } 1581 pipe->p_params->s_fmt = params->s_fmt; 1582 pipe->p_params->ch = params->ch; 1583 pipe->p_params->s_freq = params->s_freq; 1584 pipe->p_params->stream = params->stream; 1585 pipe->p_params->format = params->format; 1586 1587 } else { 1588 memcpy(pipe->p_params, params, sizeof(*params)); 1589 } 1590 } 1591 1592 /* 1593 * The FE params are passed by hw_params of the DAI. 1594 * On hw_params, the params are stored in Gateway module of the FE and we 1595 * need to calculate the format in DSP module configuration, that 1596 * conversion is done here 1597 */ 1598 int skl_tplg_update_pipe_params(struct device *dev, 1599 struct skl_module_cfg *mconfig, 1600 struct skl_pipe_params *params) 1601 { 1602 struct skl_module_res *res; 1603 struct skl_dev *skl = get_skl_ctx(dev); 1604 struct skl_module_fmt *format = NULL; 1605 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1606 1607 res = &mconfig->module->resources[mconfig->res_idx]; 1608 skl_tplg_fill_dma_id(mconfig, params); 1609 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1610 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1611 1612 if (skl->nr_modules) 1613 return 0; 1614 1615 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1616 format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt; 1617 else 1618 format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt; 1619 1620 /* set the hw_params */ 1621 format->s_freq = params->s_freq; 1622 format->channels = params->ch; 1623 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1624 1625 /* 1626 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1627 * container so update bit depth accordingly 1628 */ 1629 switch (format->valid_bit_depth) { 1630 case SKL_DEPTH_16BIT: 1631 format->bit_depth = format->valid_bit_depth; 1632 break; 1633 1634 case SKL_DEPTH_24BIT: 1635 case SKL_DEPTH_32BIT: 1636 format->bit_depth = SKL_DEPTH_32BIT; 1637 break; 1638 1639 default: 1640 dev_err(dev, "Invalid bit depth %x for pipe\n", 1641 format->valid_bit_depth); 1642 return -EINVAL; 1643 } 1644 1645 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1646 res->ibs = (format->s_freq / 1000) * 1647 (format->channels) * 1648 (format->bit_depth >> 3); 1649 } else { 1650 res->obs = (format->s_freq / 1000) * 1651 (format->channels) * 1652 (format->bit_depth >> 3); 1653 } 1654 1655 return 0; 1656 } 1657 1658 /* 1659 * Query the module config for the FE DAI 1660 * This is used to find the hw_params set for that DAI and apply to FE 1661 * pipeline 1662 */ 1663 struct skl_module_cfg * 1664 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1665 { 1666 struct snd_soc_dapm_widget *w; 1667 struct snd_soc_dapm_path *p = NULL; 1668 1669 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1670 w = dai->playback_widget; 1671 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1672 if (p->connect && p->sink->power && 1673 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1674 continue; 1675 1676 if (p->sink->priv) { 1677 dev_dbg(dai->dev, "set params for %s\n", 1678 p->sink->name); 1679 return p->sink->priv; 1680 } 1681 } 1682 } else { 1683 w = dai->capture_widget; 1684 snd_soc_dapm_widget_for_each_source_path(w, p) { 1685 if (p->connect && p->source->power && 1686 !is_skl_dsp_widget_type(p->source, dai->dev)) 1687 continue; 1688 1689 if (p->source->priv) { 1690 dev_dbg(dai->dev, "set params for %s\n", 1691 p->source->name); 1692 return p->source->priv; 1693 } 1694 } 1695 } 1696 1697 return NULL; 1698 } 1699 1700 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1701 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1702 { 1703 struct snd_soc_dapm_path *p; 1704 struct skl_module_cfg *mconfig = NULL; 1705 1706 snd_soc_dapm_widget_for_each_source_path(w, p) { 1707 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1708 if (p->connect && 1709 (p->sink->id == snd_soc_dapm_aif_out) && 1710 p->source->priv) { 1711 mconfig = p->source->priv; 1712 return mconfig; 1713 } 1714 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1715 if (mconfig) 1716 return mconfig; 1717 } 1718 } 1719 return mconfig; 1720 } 1721 1722 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1723 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1724 { 1725 struct snd_soc_dapm_path *p; 1726 struct skl_module_cfg *mconfig = NULL; 1727 1728 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1729 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1730 if (p->connect && 1731 (p->source->id == snd_soc_dapm_aif_in) && 1732 p->sink->priv) { 1733 mconfig = p->sink->priv; 1734 return mconfig; 1735 } 1736 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1737 if (mconfig) 1738 return mconfig; 1739 } 1740 } 1741 return mconfig; 1742 } 1743 1744 struct skl_module_cfg * 1745 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1746 { 1747 struct snd_soc_dapm_widget *w; 1748 struct skl_module_cfg *mconfig; 1749 1750 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1751 w = dai->playback_widget; 1752 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1753 } else { 1754 w = dai->capture_widget; 1755 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1756 } 1757 return mconfig; 1758 } 1759 1760 static u8 skl_tplg_be_link_type(int dev_type) 1761 { 1762 int ret; 1763 1764 switch (dev_type) { 1765 case SKL_DEVICE_BT: 1766 ret = NHLT_LINK_SSP; 1767 break; 1768 1769 case SKL_DEVICE_DMIC: 1770 ret = NHLT_LINK_DMIC; 1771 break; 1772 1773 case SKL_DEVICE_I2S: 1774 ret = NHLT_LINK_SSP; 1775 break; 1776 1777 case SKL_DEVICE_HDALINK: 1778 ret = NHLT_LINK_HDA; 1779 break; 1780 1781 default: 1782 ret = NHLT_LINK_INVALID; 1783 break; 1784 } 1785 1786 return ret; 1787 } 1788 1789 /* 1790 * Fill the BE gateway parameters 1791 * The BE gateway expects a blob of parameters which are kept in the ACPI 1792 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1793 * The port can have multiple settings so pick based on the pipeline 1794 * parameters 1795 */ 1796 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1797 struct skl_module_cfg *mconfig, 1798 struct skl_pipe_params *params) 1799 { 1800 struct nhlt_specific_cfg *cfg; 1801 struct skl_pipe *pipe = mconfig->pipe; 1802 struct skl_pipe_params save = *pipe->p_params; 1803 struct skl_pipe_fmt *pipe_fmt; 1804 struct skl_dev *skl = get_skl_ctx(dai->dev); 1805 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1806 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1807 int ret; 1808 1809 skl_tplg_fill_dma_id(mconfig, params); 1810 1811 if (link_type == NHLT_LINK_HDA) 1812 return 0; 1813 1814 *pipe->p_params = *params; 1815 ret = skl_tplg_get_pipe_config(skl, mconfig); 1816 if (ret) 1817 goto err; 1818 1819 dev_dbg(skl->dev, "%s using pipe config: %d\n", __func__, pipe->cur_config_idx); 1820 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) 1821 pipe_fmt = &pipe->configs[pipe->cur_config_idx].out_fmt; 1822 else 1823 pipe_fmt = &pipe->configs[pipe->cur_config_idx].in_fmt; 1824 1825 /* update the blob based on virtual bus_id*/ 1826 cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt, 1827 mconfig->vbus_id, link_type, 1828 pipe_fmt->bps, params->s_cont, 1829 pipe_fmt->channels, pipe_fmt->freq, 1830 pipe->direction, dev_type); 1831 if (cfg) { 1832 mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size; 1833 mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps; 1834 } else { 1835 dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n", 1836 mconfig->vbus_id, link_type, params->stream, 1837 params->ch, params->s_freq, params->s_fmt); 1838 ret = -EINVAL; 1839 goto err; 1840 } 1841 1842 return 0; 1843 1844 err: 1845 *pipe->p_params = save; 1846 return ret; 1847 } 1848 1849 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1850 struct snd_soc_dapm_widget *w, 1851 struct skl_pipe_params *params) 1852 { 1853 struct snd_soc_dapm_path *p; 1854 int ret = -EIO; 1855 1856 snd_soc_dapm_widget_for_each_source_path(w, p) { 1857 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1858 p->source->priv) { 1859 1860 ret = skl_tplg_be_fill_pipe_params(dai, 1861 p->source->priv, params); 1862 if (ret < 0) 1863 return ret; 1864 } else { 1865 ret = skl_tplg_be_set_src_pipe_params(dai, 1866 p->source, params); 1867 if (ret < 0) 1868 return ret; 1869 } 1870 } 1871 1872 return ret; 1873 } 1874 1875 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1876 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1877 { 1878 struct snd_soc_dapm_path *p; 1879 int ret = -EIO; 1880 1881 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1882 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1883 p->sink->priv) { 1884 1885 ret = skl_tplg_be_fill_pipe_params(dai, 1886 p->sink->priv, params); 1887 if (ret < 0) 1888 return ret; 1889 } else { 1890 ret = skl_tplg_be_set_sink_pipe_params( 1891 dai, p->sink, params); 1892 if (ret < 0) 1893 return ret; 1894 } 1895 } 1896 1897 return ret; 1898 } 1899 1900 /* 1901 * BE hw_params can be a source parameters (capture) or sink parameters 1902 * (playback). Based on sink and source we need to either find the source 1903 * list or the sink list and set the pipeline parameters 1904 */ 1905 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1906 struct skl_pipe_params *params) 1907 { 1908 struct snd_soc_dapm_widget *w; 1909 1910 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1911 w = dai->playback_widget; 1912 1913 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1914 1915 } else { 1916 w = dai->capture_widget; 1917 1918 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1919 } 1920 1921 return 0; 1922 } 1923 1924 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1925 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1926 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1927 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1928 }; 1929 1930 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1931 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1932 skl_tplg_tlv_control_set}, 1933 }; 1934 1935 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1936 { 1937 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1938 .get = skl_tplg_mic_control_get, 1939 .put = skl_tplg_mic_control_set, 1940 }, 1941 { 1942 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT, 1943 .get = skl_tplg_multi_config_get, 1944 .put = skl_tplg_multi_config_set, 1945 }, 1946 { 1947 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC, 1948 .get = skl_tplg_multi_config_get_dmic, 1949 .put = skl_tplg_multi_config_set_dmic, 1950 } 1951 }; 1952 1953 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1954 struct skl_pipe *pipe, u32 tkn, 1955 u32 tkn_val, int conf_idx, int dir) 1956 { 1957 struct skl_pipe_fmt *fmt; 1958 struct skl_path_config *config; 1959 1960 switch (dir) { 1961 case SKL_DIR_IN: 1962 fmt = &pipe->configs[conf_idx].in_fmt; 1963 break; 1964 1965 case SKL_DIR_OUT: 1966 fmt = &pipe->configs[conf_idx].out_fmt; 1967 break; 1968 1969 default: 1970 dev_err(dev, "Invalid direction: %d\n", dir); 1971 return -EINVAL; 1972 } 1973 1974 config = &pipe->configs[conf_idx]; 1975 1976 switch (tkn) { 1977 case SKL_TKN_U32_CFG_FREQ: 1978 fmt->freq = tkn_val; 1979 break; 1980 1981 case SKL_TKN_U8_CFG_CHAN: 1982 fmt->channels = tkn_val; 1983 break; 1984 1985 case SKL_TKN_U8_CFG_BPS: 1986 fmt->bps = tkn_val; 1987 break; 1988 1989 case SKL_TKN_U32_PATH_MEM_PGS: 1990 config->mem_pages = tkn_val; 1991 break; 1992 1993 default: 1994 dev_err(dev, "Invalid token config: %d\n", tkn); 1995 return -EINVAL; 1996 } 1997 1998 return 0; 1999 } 2000 2001 static int skl_tplg_fill_pipe_tkn(struct device *dev, 2002 struct skl_pipe *pipe, u32 tkn, 2003 u32 tkn_val) 2004 { 2005 2006 switch (tkn) { 2007 case SKL_TKN_U32_PIPE_CONN_TYPE: 2008 pipe->conn_type = tkn_val; 2009 break; 2010 2011 case SKL_TKN_U32_PIPE_PRIORITY: 2012 pipe->pipe_priority = tkn_val; 2013 break; 2014 2015 case SKL_TKN_U32_PIPE_MEM_PGS: 2016 pipe->memory_pages = tkn_val; 2017 break; 2018 2019 case SKL_TKN_U32_PMODE: 2020 pipe->lp_mode = tkn_val; 2021 break; 2022 2023 case SKL_TKN_U32_PIPE_DIRECTION: 2024 pipe->direction = tkn_val; 2025 break; 2026 2027 case SKL_TKN_U32_NUM_CONFIGS: 2028 pipe->nr_cfgs = tkn_val; 2029 break; 2030 2031 default: 2032 dev_err(dev, "Token not handled %d\n", tkn); 2033 return -EINVAL; 2034 } 2035 2036 return 0; 2037 } 2038 2039 /* 2040 * Add pipeline by parsing the relevant tokens 2041 * Return an existing pipe if the pipe already exists. 2042 */ 2043 static int skl_tplg_add_pipe(struct device *dev, 2044 struct skl_module_cfg *mconfig, struct skl_dev *skl, 2045 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2046 { 2047 struct skl_pipeline *ppl; 2048 struct skl_pipe *pipe; 2049 struct skl_pipe_params *params; 2050 2051 list_for_each_entry(ppl, &skl->ppl_list, node) { 2052 if (ppl->pipe->ppl_id == tkn_elem->value) { 2053 mconfig->pipe = ppl->pipe; 2054 return -EEXIST; 2055 } 2056 } 2057 2058 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2059 if (!ppl) 2060 return -ENOMEM; 2061 2062 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2063 if (!pipe) 2064 return -ENOMEM; 2065 2066 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2067 if (!params) 2068 return -ENOMEM; 2069 2070 pipe->p_params = params; 2071 pipe->ppl_id = tkn_elem->value; 2072 INIT_LIST_HEAD(&pipe->w_list); 2073 2074 ppl->pipe = pipe; 2075 list_add(&ppl->node, &skl->ppl_list); 2076 2077 mconfig->pipe = pipe; 2078 mconfig->pipe->state = SKL_PIPE_INVALID; 2079 2080 return 0; 2081 } 2082 2083 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid, 2084 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2085 { 2086 if (uuid_tkn->token == SKL_TKN_UUID) { 2087 import_guid(guid, uuid_tkn->uuid); 2088 return 0; 2089 } 2090 2091 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2092 2093 return -EINVAL; 2094 } 2095 2096 static int skl_tplg_fill_pin(struct device *dev, 2097 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2098 struct skl_module_pin *m_pin, 2099 int pin_index) 2100 { 2101 int ret; 2102 2103 switch (tkn_elem->token) { 2104 case SKL_TKN_U32_PIN_MOD_ID: 2105 m_pin[pin_index].id.module_id = tkn_elem->value; 2106 break; 2107 2108 case SKL_TKN_U32_PIN_INST_ID: 2109 m_pin[pin_index].id.instance_id = tkn_elem->value; 2110 break; 2111 2112 case SKL_TKN_UUID: 2113 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid, 2114 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2115 if (ret < 0) 2116 return ret; 2117 2118 break; 2119 2120 default: 2121 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2122 return -EINVAL; 2123 } 2124 2125 return 0; 2126 } 2127 2128 /* 2129 * Parse for pin config specific tokens to fill up the 2130 * module private data 2131 */ 2132 static int skl_tplg_fill_pins_info(struct device *dev, 2133 struct skl_module_cfg *mconfig, 2134 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2135 int dir, int pin_count) 2136 { 2137 int ret; 2138 struct skl_module_pin *m_pin; 2139 2140 switch (dir) { 2141 case SKL_DIR_IN: 2142 m_pin = mconfig->m_in_pin; 2143 break; 2144 2145 case SKL_DIR_OUT: 2146 m_pin = mconfig->m_out_pin; 2147 break; 2148 2149 default: 2150 dev_err(dev, "Invalid direction value\n"); 2151 return -EINVAL; 2152 } 2153 2154 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2155 if (ret < 0) 2156 return ret; 2157 2158 m_pin[pin_count].in_use = false; 2159 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2160 2161 return 0; 2162 } 2163 2164 /* 2165 * Fill up input/output module config format based 2166 * on the direction 2167 */ 2168 static int skl_tplg_fill_fmt(struct device *dev, 2169 struct skl_module_fmt *dst_fmt, 2170 u32 tkn, u32 value) 2171 { 2172 switch (tkn) { 2173 case SKL_TKN_U32_FMT_CH: 2174 dst_fmt->channels = value; 2175 break; 2176 2177 case SKL_TKN_U32_FMT_FREQ: 2178 dst_fmt->s_freq = value; 2179 break; 2180 2181 case SKL_TKN_U32_FMT_BIT_DEPTH: 2182 dst_fmt->bit_depth = value; 2183 break; 2184 2185 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2186 dst_fmt->valid_bit_depth = value; 2187 break; 2188 2189 case SKL_TKN_U32_FMT_CH_CONFIG: 2190 dst_fmt->ch_cfg = value; 2191 break; 2192 2193 case SKL_TKN_U32_FMT_INTERLEAVE: 2194 dst_fmt->interleaving_style = value; 2195 break; 2196 2197 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2198 dst_fmt->sample_type = value; 2199 break; 2200 2201 case SKL_TKN_U32_FMT_CH_MAP: 2202 dst_fmt->ch_map = value; 2203 break; 2204 2205 default: 2206 dev_err(dev, "Invalid token %d\n", tkn); 2207 return -EINVAL; 2208 } 2209 2210 return 0; 2211 } 2212 2213 static int skl_tplg_widget_fill_fmt(struct device *dev, 2214 struct skl_module_iface *fmt, 2215 u32 tkn, u32 val, u32 dir, int fmt_idx) 2216 { 2217 struct skl_module_fmt *dst_fmt; 2218 2219 if (!fmt) 2220 return -EINVAL; 2221 2222 switch (dir) { 2223 case SKL_DIR_IN: 2224 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2225 break; 2226 2227 case SKL_DIR_OUT: 2228 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2229 break; 2230 2231 default: 2232 dev_err(dev, "Invalid direction: %d\n", dir); 2233 return -EINVAL; 2234 } 2235 2236 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2237 } 2238 2239 static void skl_tplg_fill_pin_dynamic_val( 2240 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2241 { 2242 int i; 2243 2244 for (i = 0; i < pin_count; i++) 2245 mpin[i].is_dynamic = value; 2246 } 2247 2248 /* 2249 * Resource table in the manifest has pin specific resources 2250 * like pin and pin buffer size 2251 */ 2252 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2253 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2254 struct skl_module_res *res, int pin_idx, int dir) 2255 { 2256 struct skl_module_pin_resources *m_pin; 2257 2258 switch (dir) { 2259 case SKL_DIR_IN: 2260 m_pin = &res->input[pin_idx]; 2261 break; 2262 2263 case SKL_DIR_OUT: 2264 m_pin = &res->output[pin_idx]; 2265 break; 2266 2267 default: 2268 dev_err(dev, "Invalid pin direction: %d\n", dir); 2269 return -EINVAL; 2270 } 2271 2272 switch (tkn_elem->token) { 2273 case SKL_TKN_MM_U32_RES_PIN_ID: 2274 m_pin->pin_index = tkn_elem->value; 2275 break; 2276 2277 case SKL_TKN_MM_U32_PIN_BUF: 2278 m_pin->buf_size = tkn_elem->value; 2279 break; 2280 2281 default: 2282 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2283 return -EINVAL; 2284 } 2285 2286 return 0; 2287 } 2288 2289 /* 2290 * Fill module specific resources from the manifest's resource 2291 * table like CPS, DMA size, mem_pages. 2292 */ 2293 static int skl_tplg_fill_res_tkn(struct device *dev, 2294 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2295 struct skl_module_res *res, 2296 int pin_idx, int dir) 2297 { 2298 int ret, tkn_count = 0; 2299 2300 if (!res) 2301 return -EINVAL; 2302 2303 switch (tkn_elem->token) { 2304 case SKL_TKN_MM_U32_DMA_SIZE: 2305 res->dma_buffer_size = tkn_elem->value; 2306 break; 2307 2308 case SKL_TKN_MM_U32_CPC: 2309 res->cpc = tkn_elem->value; 2310 break; 2311 2312 case SKL_TKN_U32_MEM_PAGES: 2313 res->is_pages = tkn_elem->value; 2314 break; 2315 2316 case SKL_TKN_U32_OBS: 2317 res->obs = tkn_elem->value; 2318 break; 2319 2320 case SKL_TKN_U32_IBS: 2321 res->ibs = tkn_elem->value; 2322 break; 2323 2324 case SKL_TKN_MM_U32_RES_PIN_ID: 2325 case SKL_TKN_MM_U32_PIN_BUF: 2326 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2327 pin_idx, dir); 2328 if (ret < 0) 2329 return ret; 2330 break; 2331 2332 case SKL_TKN_MM_U32_CPS: 2333 case SKL_TKN_U32_MAX_MCPS: 2334 /* ignore unused tokens */ 2335 break; 2336 2337 default: 2338 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2339 return -EINVAL; 2340 2341 } 2342 tkn_count++; 2343 2344 return tkn_count; 2345 } 2346 2347 /* 2348 * Parse tokens to fill up the module private data 2349 */ 2350 static int skl_tplg_get_token(struct device *dev, 2351 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2352 struct skl_dev *skl, struct skl_module_cfg *mconfig) 2353 { 2354 int tkn_count = 0; 2355 int ret; 2356 static int is_pipe_exists; 2357 static int pin_index, dir, conf_idx; 2358 struct skl_module_iface *iface = NULL; 2359 struct skl_module_res *res = NULL; 2360 int res_idx = mconfig->res_idx; 2361 int fmt_idx = mconfig->fmt_idx; 2362 2363 /* 2364 * If the manifest structure contains no modules, fill all 2365 * the module data to 0th index. 2366 * res_idx and fmt_idx are default set to 0. 2367 */ 2368 if (skl->nr_modules == 0) { 2369 res = &mconfig->module->resources[res_idx]; 2370 iface = &mconfig->module->formats[fmt_idx]; 2371 } 2372 2373 if (tkn_elem->token > SKL_TKN_MAX) 2374 return -EINVAL; 2375 2376 switch (tkn_elem->token) { 2377 case SKL_TKN_U8_IN_QUEUE_COUNT: 2378 mconfig->module->max_input_pins = tkn_elem->value; 2379 break; 2380 2381 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2382 mconfig->module->max_output_pins = tkn_elem->value; 2383 break; 2384 2385 case SKL_TKN_U8_DYN_IN_PIN: 2386 if (!mconfig->m_in_pin) 2387 mconfig->m_in_pin = 2388 devm_kcalloc(dev, MAX_IN_QUEUE, 2389 sizeof(*mconfig->m_in_pin), 2390 GFP_KERNEL); 2391 if (!mconfig->m_in_pin) 2392 return -ENOMEM; 2393 2394 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2395 tkn_elem->value); 2396 break; 2397 2398 case SKL_TKN_U8_DYN_OUT_PIN: 2399 if (!mconfig->m_out_pin) 2400 mconfig->m_out_pin = 2401 devm_kcalloc(dev, MAX_IN_QUEUE, 2402 sizeof(*mconfig->m_in_pin), 2403 GFP_KERNEL); 2404 if (!mconfig->m_out_pin) 2405 return -ENOMEM; 2406 2407 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2408 tkn_elem->value); 2409 break; 2410 2411 case SKL_TKN_U8_TIME_SLOT: 2412 mconfig->time_slot = tkn_elem->value; 2413 break; 2414 2415 case SKL_TKN_U8_CORE_ID: 2416 mconfig->core_id = tkn_elem->value; 2417 break; 2418 2419 case SKL_TKN_U8_MOD_TYPE: 2420 mconfig->m_type = tkn_elem->value; 2421 break; 2422 2423 case SKL_TKN_U8_DEV_TYPE: 2424 mconfig->dev_type = tkn_elem->value; 2425 break; 2426 2427 case SKL_TKN_U8_HW_CONN_TYPE: 2428 mconfig->hw_conn_type = tkn_elem->value; 2429 break; 2430 2431 case SKL_TKN_U16_MOD_INST_ID: 2432 mconfig->id.instance_id = 2433 tkn_elem->value; 2434 break; 2435 2436 case SKL_TKN_U32_MEM_PAGES: 2437 case SKL_TKN_U32_MAX_MCPS: 2438 case SKL_TKN_U32_OBS: 2439 case SKL_TKN_U32_IBS: 2440 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2441 if (ret < 0) 2442 return ret; 2443 2444 break; 2445 2446 case SKL_TKN_U32_VBUS_ID: 2447 mconfig->vbus_id = tkn_elem->value; 2448 break; 2449 2450 case SKL_TKN_U32_PARAMS_FIXUP: 2451 mconfig->params_fixup = tkn_elem->value; 2452 break; 2453 2454 case SKL_TKN_U32_CONVERTER: 2455 mconfig->converter = tkn_elem->value; 2456 break; 2457 2458 case SKL_TKN_U32_D0I3_CAPS: 2459 mconfig->d0i3_caps = tkn_elem->value; 2460 break; 2461 2462 case SKL_TKN_U32_PIPE_ID: 2463 ret = skl_tplg_add_pipe(dev, 2464 mconfig, skl, tkn_elem); 2465 2466 if (ret < 0) { 2467 if (ret == -EEXIST) { 2468 is_pipe_exists = 1; 2469 break; 2470 } 2471 return is_pipe_exists; 2472 } 2473 2474 break; 2475 2476 case SKL_TKN_U32_PIPE_CONFIG_ID: 2477 conf_idx = tkn_elem->value; 2478 break; 2479 2480 case SKL_TKN_U32_PIPE_CONN_TYPE: 2481 case SKL_TKN_U32_PIPE_PRIORITY: 2482 case SKL_TKN_U32_PIPE_MEM_PGS: 2483 case SKL_TKN_U32_PMODE: 2484 case SKL_TKN_U32_PIPE_DIRECTION: 2485 case SKL_TKN_U32_NUM_CONFIGS: 2486 if (is_pipe_exists) { 2487 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2488 tkn_elem->token, tkn_elem->value); 2489 if (ret < 0) 2490 return ret; 2491 } 2492 2493 break; 2494 2495 case SKL_TKN_U32_PATH_MEM_PGS: 2496 case SKL_TKN_U32_CFG_FREQ: 2497 case SKL_TKN_U8_CFG_CHAN: 2498 case SKL_TKN_U8_CFG_BPS: 2499 if (mconfig->pipe->nr_cfgs) { 2500 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2501 tkn_elem->token, tkn_elem->value, 2502 conf_idx, dir); 2503 if (ret < 0) 2504 return ret; 2505 } 2506 break; 2507 2508 case SKL_TKN_CFG_MOD_RES_ID: 2509 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2510 break; 2511 2512 case SKL_TKN_CFG_MOD_FMT_ID: 2513 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2514 break; 2515 2516 /* 2517 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2518 * direction and the pin count. The first four bits represent 2519 * direction and next four the pin count. 2520 */ 2521 case SKL_TKN_U32_DIR_PIN_COUNT: 2522 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2523 pin_index = (tkn_elem->value & 2524 SKL_PIN_COUNT_MASK) >> 4; 2525 2526 break; 2527 2528 case SKL_TKN_U32_FMT_CH: 2529 case SKL_TKN_U32_FMT_FREQ: 2530 case SKL_TKN_U32_FMT_BIT_DEPTH: 2531 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2532 case SKL_TKN_U32_FMT_CH_CONFIG: 2533 case SKL_TKN_U32_FMT_INTERLEAVE: 2534 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2535 case SKL_TKN_U32_FMT_CH_MAP: 2536 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2537 tkn_elem->value, dir, pin_index); 2538 2539 if (ret < 0) 2540 return ret; 2541 2542 break; 2543 2544 case SKL_TKN_U32_PIN_MOD_ID: 2545 case SKL_TKN_U32_PIN_INST_ID: 2546 case SKL_TKN_UUID: 2547 ret = skl_tplg_fill_pins_info(dev, 2548 mconfig, tkn_elem, dir, 2549 pin_index); 2550 if (ret < 0) 2551 return ret; 2552 2553 break; 2554 2555 case SKL_TKN_U32_FMT_CFG_IDX: 2556 if (tkn_elem->value > SKL_MAX_PARAMS_TYPES) 2557 return -EINVAL; 2558 2559 mconfig->fmt_cfg_idx = tkn_elem->value; 2560 break; 2561 2562 case SKL_TKN_U32_CAPS_SIZE: 2563 mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size = 2564 tkn_elem->value; 2565 2566 break; 2567 2568 case SKL_TKN_U32_CAPS_SET_PARAMS: 2569 mconfig->formats_config[mconfig->fmt_cfg_idx].set_params = 2570 tkn_elem->value; 2571 break; 2572 2573 case SKL_TKN_U32_CAPS_PARAMS_ID: 2574 mconfig->formats_config[mconfig->fmt_cfg_idx].param_id = 2575 tkn_elem->value; 2576 break; 2577 2578 case SKL_TKN_U32_PROC_DOMAIN: 2579 mconfig->domain = 2580 tkn_elem->value; 2581 2582 break; 2583 2584 case SKL_TKN_U32_DMA_BUF_SIZE: 2585 mconfig->dma_buffer_size = tkn_elem->value; 2586 break; 2587 2588 case SKL_TKN_U8_IN_PIN_TYPE: 2589 case SKL_TKN_U8_OUT_PIN_TYPE: 2590 case SKL_TKN_U8_CONN_TYPE: 2591 break; 2592 2593 default: 2594 dev_err(dev, "Token %d not handled\n", 2595 tkn_elem->token); 2596 return -EINVAL; 2597 } 2598 2599 tkn_count++; 2600 2601 return tkn_count; 2602 } 2603 2604 /* 2605 * Parse the vendor array for specific tokens to construct 2606 * module private data 2607 */ 2608 static int skl_tplg_get_tokens(struct device *dev, 2609 char *pvt_data, struct skl_dev *skl, 2610 struct skl_module_cfg *mconfig, int block_size) 2611 { 2612 struct snd_soc_tplg_vendor_array *array; 2613 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2614 int tkn_count = 0, ret; 2615 int off = 0, tuple_size = 0; 2616 bool is_module_guid = true; 2617 2618 if (block_size <= 0) 2619 return -EINVAL; 2620 2621 while (tuple_size < block_size) { 2622 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2623 2624 off += array->size; 2625 2626 switch (array->type) { 2627 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2628 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2629 continue; 2630 2631 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2632 if (is_module_guid) { 2633 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid, 2634 array->uuid); 2635 is_module_guid = false; 2636 } else { 2637 ret = skl_tplg_get_token(dev, array->value, skl, 2638 mconfig); 2639 } 2640 2641 if (ret < 0) 2642 return ret; 2643 2644 tuple_size += sizeof(*array->uuid); 2645 2646 continue; 2647 2648 default: 2649 tkn_elem = array->value; 2650 tkn_count = 0; 2651 break; 2652 } 2653 2654 while (tkn_count <= (array->num_elems - 1)) { 2655 ret = skl_tplg_get_token(dev, tkn_elem, 2656 skl, mconfig); 2657 2658 if (ret < 0) 2659 return ret; 2660 2661 tkn_count = tkn_count + ret; 2662 tkn_elem++; 2663 } 2664 2665 tuple_size += tkn_count * sizeof(*tkn_elem); 2666 } 2667 2668 return off; 2669 } 2670 2671 /* 2672 * Every data block is preceded by a descriptor to read the number 2673 * of data blocks, they type of the block and it's size 2674 */ 2675 static int skl_tplg_get_desc_blocks(struct device *dev, 2676 struct snd_soc_tplg_vendor_array *array) 2677 { 2678 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2679 2680 tkn_elem = array->value; 2681 2682 switch (tkn_elem->token) { 2683 case SKL_TKN_U8_NUM_BLOCKS: 2684 case SKL_TKN_U8_BLOCK_TYPE: 2685 case SKL_TKN_U16_BLOCK_SIZE: 2686 return tkn_elem->value; 2687 2688 default: 2689 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2690 break; 2691 } 2692 2693 return -EINVAL; 2694 } 2695 2696 /* Functions to parse private data from configuration file format v4 */ 2697 2698 /* 2699 * Add pipeline from topology binary into driver pipeline list 2700 * 2701 * If already added we return that instance 2702 * Otherwise we create a new instance and add into driver list 2703 */ 2704 static int skl_tplg_add_pipe_v4(struct device *dev, 2705 struct skl_module_cfg *mconfig, struct skl_dev *skl, 2706 struct skl_dfw_v4_pipe *dfw_pipe) 2707 { 2708 struct skl_pipeline *ppl; 2709 struct skl_pipe *pipe; 2710 struct skl_pipe_params *params; 2711 2712 list_for_each_entry(ppl, &skl->ppl_list, node) { 2713 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) { 2714 mconfig->pipe = ppl->pipe; 2715 return 0; 2716 } 2717 } 2718 2719 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2720 if (!ppl) 2721 return -ENOMEM; 2722 2723 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2724 if (!pipe) 2725 return -ENOMEM; 2726 2727 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2728 if (!params) 2729 return -ENOMEM; 2730 2731 pipe->ppl_id = dfw_pipe->pipe_id; 2732 pipe->memory_pages = dfw_pipe->memory_pages; 2733 pipe->pipe_priority = dfw_pipe->pipe_priority; 2734 pipe->conn_type = dfw_pipe->conn_type; 2735 pipe->state = SKL_PIPE_INVALID; 2736 pipe->p_params = params; 2737 INIT_LIST_HEAD(&pipe->w_list); 2738 2739 ppl->pipe = pipe; 2740 list_add(&ppl->node, &skl->ppl_list); 2741 2742 mconfig->pipe = pipe; 2743 2744 return 0; 2745 } 2746 2747 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin, 2748 struct skl_module_pin *m_pin, 2749 bool is_dynamic, int max_pin) 2750 { 2751 int i; 2752 2753 for (i = 0; i < max_pin; i++) { 2754 m_pin[i].id.module_id = dfw_pin[i].module_id; 2755 m_pin[i].id.instance_id = dfw_pin[i].instance_id; 2756 m_pin[i].in_use = false; 2757 m_pin[i].is_dynamic = is_dynamic; 2758 m_pin[i].pin_state = SKL_PIN_UNBIND; 2759 } 2760 } 2761 2762 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt, 2763 struct skl_dfw_v4_module_fmt *src_fmt, 2764 int pins) 2765 { 2766 int i; 2767 2768 for (i = 0; i < pins; i++) { 2769 dst_fmt[i].fmt.channels = src_fmt[i].channels; 2770 dst_fmt[i].fmt.s_freq = src_fmt[i].freq; 2771 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth; 2772 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth; 2773 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg; 2774 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map; 2775 dst_fmt[i].fmt.interleaving_style = 2776 src_fmt[i].interleaving_style; 2777 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type; 2778 } 2779 } 2780 2781 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w, 2782 struct skl_dev *skl, struct device *dev, 2783 struct skl_module_cfg *mconfig) 2784 { 2785 struct skl_dfw_v4_module *dfw = 2786 (struct skl_dfw_v4_module *)tplg_w->priv.data; 2787 int ret; 2788 int idx = mconfig->fmt_cfg_idx; 2789 2790 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n"); 2791 2792 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid); 2793 if (ret) 2794 return ret; 2795 mconfig->id.module_id = -1; 2796 mconfig->id.instance_id = dfw->instance_id; 2797 mconfig->module->resources[0].cpc = dfw->max_mcps / 1000; 2798 mconfig->module->resources[0].ibs = dfw->ibs; 2799 mconfig->module->resources[0].obs = dfw->obs; 2800 mconfig->core_id = dfw->core_id; 2801 mconfig->module->max_input_pins = dfw->max_in_queue; 2802 mconfig->module->max_output_pins = dfw->max_out_queue; 2803 mconfig->module->loadable = dfw->is_loadable; 2804 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt, 2805 MAX_IN_QUEUE); 2806 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt, 2807 MAX_OUT_QUEUE); 2808 2809 mconfig->params_fixup = dfw->params_fixup; 2810 mconfig->converter = dfw->converter; 2811 mconfig->m_type = dfw->module_type; 2812 mconfig->vbus_id = dfw->vbus_id; 2813 mconfig->module->resources[0].is_pages = dfw->mem_pages; 2814 2815 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe); 2816 if (ret) 2817 return ret; 2818 2819 mconfig->dev_type = dfw->dev_type; 2820 mconfig->hw_conn_type = dfw->hw_conn_type; 2821 mconfig->time_slot = dfw->time_slot; 2822 mconfig->formats_config[idx].caps_size = dfw->caps.caps_size; 2823 2824 mconfig->m_in_pin = devm_kcalloc(dev, 2825 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin), 2826 GFP_KERNEL); 2827 if (!mconfig->m_in_pin) 2828 return -ENOMEM; 2829 2830 mconfig->m_out_pin = devm_kcalloc(dev, 2831 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin), 2832 GFP_KERNEL); 2833 if (!mconfig->m_out_pin) 2834 return -ENOMEM; 2835 2836 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin, 2837 dfw->is_dynamic_in_pin, 2838 mconfig->module->max_input_pins); 2839 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin, 2840 dfw->is_dynamic_out_pin, 2841 mconfig->module->max_output_pins); 2842 2843 if (mconfig->formats_config[idx].caps_size) { 2844 mconfig->formats_config[idx].set_params = dfw->caps.set_params; 2845 mconfig->formats_config[idx].param_id = dfw->caps.param_id; 2846 mconfig->formats_config[idx].caps = 2847 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size, 2848 GFP_KERNEL); 2849 if (!mconfig->formats_config[idx].caps) 2850 return -ENOMEM; 2851 memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps, 2852 dfw->caps.caps_size); 2853 } 2854 2855 return 0; 2856 } 2857 2858 static int skl_tplg_get_caps_data(struct device *dev, char *data, 2859 struct skl_module_cfg *mconfig) 2860 { 2861 int idx = mconfig->fmt_cfg_idx; 2862 2863 if (mconfig->formats_config[idx].caps_size > 0) { 2864 mconfig->formats_config[idx].caps = 2865 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size, 2866 GFP_KERNEL); 2867 if (!mconfig->formats_config[idx].caps) 2868 return -ENOMEM; 2869 memcpy(mconfig->formats_config[idx].caps, data, 2870 mconfig->formats_config[idx].caps_size); 2871 } 2872 2873 return mconfig->formats_config[idx].caps_size; 2874 } 2875 2876 /* 2877 * Parse the private data for the token and corresponding value. 2878 * The private data can have multiple data blocks. So, a data block 2879 * is preceded by a descriptor for number of blocks and a descriptor 2880 * for the type and size of the suceeding data block. 2881 */ 2882 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2883 struct skl_dev *skl, struct device *dev, 2884 struct skl_module_cfg *mconfig) 2885 { 2886 struct snd_soc_tplg_vendor_array *array; 2887 int num_blocks, block_size, block_type, off = 0; 2888 char *data; 2889 int ret; 2890 2891 /* 2892 * v4 configuration files have a valid UUID at the start of 2893 * the widget's private data. 2894 */ 2895 if (uuid_is_valid((char *)tplg_w->priv.data)) 2896 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig); 2897 2898 /* Read the NUM_DATA_BLOCKS descriptor */ 2899 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2900 ret = skl_tplg_get_desc_blocks(dev, array); 2901 if (ret < 0) 2902 return ret; 2903 num_blocks = ret; 2904 2905 off += array->size; 2906 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2907 while (num_blocks > 0) { 2908 array = (struct snd_soc_tplg_vendor_array *) 2909 (tplg_w->priv.data + off); 2910 2911 ret = skl_tplg_get_desc_blocks(dev, array); 2912 2913 if (ret < 0) 2914 return ret; 2915 block_type = ret; 2916 off += array->size; 2917 2918 array = (struct snd_soc_tplg_vendor_array *) 2919 (tplg_w->priv.data + off); 2920 2921 ret = skl_tplg_get_desc_blocks(dev, array); 2922 2923 if (ret < 0) 2924 return ret; 2925 block_size = ret; 2926 off += array->size; 2927 2928 data = (tplg_w->priv.data + off); 2929 2930 if (block_type == SKL_TYPE_TUPLE) { 2931 ret = skl_tplg_get_tokens(dev, data, 2932 skl, mconfig, block_size); 2933 } else { 2934 ret = skl_tplg_get_caps_data(dev, data, mconfig); 2935 } 2936 2937 if (ret < 0) 2938 return ret; 2939 2940 --num_blocks; 2941 off += ret; 2942 } 2943 2944 return 0; 2945 } 2946 2947 static void skl_clear_pin_config(struct snd_soc_component *component, 2948 struct snd_soc_dapm_widget *w) 2949 { 2950 int i; 2951 struct skl_module_cfg *mconfig; 2952 struct skl_pipe *pipe; 2953 2954 if (!strncmp(w->dapm->component->name, component->name, 2955 strlen(component->name))) { 2956 mconfig = w->priv; 2957 pipe = mconfig->pipe; 2958 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2959 mconfig->m_in_pin[i].in_use = false; 2960 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2961 } 2962 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2963 mconfig->m_out_pin[i].in_use = false; 2964 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2965 } 2966 pipe->state = SKL_PIPE_INVALID; 2967 mconfig->m_state = SKL_MODULE_UNINIT; 2968 } 2969 } 2970 2971 void skl_cleanup_resources(struct skl_dev *skl) 2972 { 2973 struct snd_soc_component *soc_component = skl->component; 2974 struct snd_soc_dapm_widget *w; 2975 struct snd_soc_card *card; 2976 2977 if (soc_component == NULL) 2978 return; 2979 2980 card = soc_component->card; 2981 if (!card || !card->instantiated) 2982 return; 2983 2984 list_for_each_entry(w, &card->widgets, list) { 2985 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL) 2986 skl_clear_pin_config(soc_component, w); 2987 } 2988 2989 skl_clear_module_cnt(skl->dsp); 2990 } 2991 2992 /* 2993 * Topology core widget load callback 2994 * 2995 * This is used to save the private data for each widget which gives 2996 * information to the driver about module and pipeline parameters which DSP 2997 * FW expects like ids, resource values, formats etc 2998 */ 2999 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 3000 struct snd_soc_dapm_widget *w, 3001 struct snd_soc_tplg_dapm_widget *tplg_w) 3002 { 3003 int ret; 3004 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3005 struct skl_dev *skl = bus_to_skl(bus); 3006 struct skl_module_cfg *mconfig; 3007 3008 if (!tplg_w->priv.size) 3009 goto bind_event; 3010 3011 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 3012 3013 if (!mconfig) 3014 return -ENOMEM; 3015 3016 if (skl->nr_modules == 0) { 3017 mconfig->module = devm_kzalloc(bus->dev, 3018 sizeof(*mconfig->module), GFP_KERNEL); 3019 if (!mconfig->module) 3020 return -ENOMEM; 3021 } 3022 3023 w->priv = mconfig; 3024 3025 /* 3026 * module binary can be loaded later, so set it to query when 3027 * module is load for a use case 3028 */ 3029 mconfig->id.module_id = -1; 3030 3031 /* To provide backward compatibility, set default as SKL_PARAM_INIT */ 3032 mconfig->fmt_cfg_idx = SKL_PARAM_INIT; 3033 3034 /* Parse private data for tuples */ 3035 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 3036 if (ret < 0) 3037 return ret; 3038 3039 skl_debug_init_module(skl->debugfs, w, mconfig); 3040 3041 bind_event: 3042 if (tplg_w->event_type == 0) { 3043 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 3044 return 0; 3045 } 3046 3047 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 3048 ARRAY_SIZE(skl_tplg_widget_ops), 3049 tplg_w->event_type); 3050 3051 if (ret) { 3052 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 3053 __func__, tplg_w->event_type); 3054 return -EINVAL; 3055 } 3056 3057 return 0; 3058 } 3059 3060 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 3061 struct snd_soc_tplg_bytes_control *bc) 3062 { 3063 struct skl_algo_data *ac; 3064 struct skl_dfw_algo_data *dfw_ac = 3065 (struct skl_dfw_algo_data *)bc->priv.data; 3066 3067 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 3068 if (!ac) 3069 return -ENOMEM; 3070 3071 /* Fill private data */ 3072 ac->max = dfw_ac->max; 3073 ac->param_id = dfw_ac->param_id; 3074 ac->set_params = dfw_ac->set_params; 3075 ac->size = dfw_ac->max; 3076 3077 if (ac->max) { 3078 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL); 3079 if (!ac->params) 3080 return -ENOMEM; 3081 3082 memcpy(ac->params, dfw_ac->params, ac->max); 3083 } 3084 3085 be->dobj.private = ac; 3086 return 0; 3087 } 3088 3089 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 3090 struct snd_soc_tplg_enum_control *ec) 3091 { 3092 3093 void *data; 3094 3095 if (ec->priv.size) { 3096 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 3097 if (!data) 3098 return -ENOMEM; 3099 memcpy(data, ec->priv.data, ec->priv.size); 3100 se->dobj.private = data; 3101 } 3102 3103 return 0; 3104 3105 } 3106 3107 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 3108 int index, 3109 struct snd_kcontrol_new *kctl, 3110 struct snd_soc_tplg_ctl_hdr *hdr) 3111 { 3112 struct soc_bytes_ext *sb; 3113 struct snd_soc_tplg_bytes_control *tplg_bc; 3114 struct snd_soc_tplg_enum_control *tplg_ec; 3115 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3116 struct soc_enum *se; 3117 3118 switch (hdr->ops.info) { 3119 case SND_SOC_TPLG_CTL_BYTES: 3120 tplg_bc = container_of(hdr, 3121 struct snd_soc_tplg_bytes_control, hdr); 3122 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 3123 sb = (struct soc_bytes_ext *)kctl->private_value; 3124 if (tplg_bc->priv.size) 3125 return skl_init_algo_data( 3126 bus->dev, sb, tplg_bc); 3127 } 3128 break; 3129 3130 case SND_SOC_TPLG_CTL_ENUM: 3131 tplg_ec = container_of(hdr, 3132 struct snd_soc_tplg_enum_control, hdr); 3133 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) { 3134 se = (struct soc_enum *)kctl->private_value; 3135 if (tplg_ec->priv.size) 3136 skl_init_enum_data(bus->dev, se, tplg_ec); 3137 } 3138 3139 /* 3140 * now that the control initializations are done, remove 3141 * write permission for the DMIC configuration enums to 3142 * avoid conflicts between NHLT settings and user interaction 3143 */ 3144 3145 if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC) 3146 kctl->access = SNDRV_CTL_ELEM_ACCESS_READ; 3147 3148 break; 3149 3150 default: 3151 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 3152 hdr->ops.get, hdr->ops.put, hdr->ops.info); 3153 break; 3154 } 3155 3156 return 0; 3157 } 3158 3159 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 3160 struct snd_soc_tplg_vendor_string_elem *str_elem, 3161 struct skl_dev *skl) 3162 { 3163 int tkn_count = 0; 3164 static int ref_count; 3165 3166 switch (str_elem->token) { 3167 case SKL_TKN_STR_LIB_NAME: 3168 if (ref_count > skl->lib_count - 1) { 3169 ref_count = 0; 3170 return -EINVAL; 3171 } 3172 3173 strncpy(skl->lib_info[ref_count].name, 3174 str_elem->string, 3175 ARRAY_SIZE(skl->lib_info[ref_count].name)); 3176 ref_count++; 3177 break; 3178 3179 default: 3180 dev_err(dev, "Not a string token %d\n", str_elem->token); 3181 break; 3182 } 3183 tkn_count++; 3184 3185 return tkn_count; 3186 } 3187 3188 static int skl_tplg_get_str_tkn(struct device *dev, 3189 struct snd_soc_tplg_vendor_array *array, 3190 struct skl_dev *skl) 3191 { 3192 int tkn_count = 0, ret; 3193 struct snd_soc_tplg_vendor_string_elem *str_elem; 3194 3195 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3196 while (tkn_count < array->num_elems) { 3197 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3198 str_elem++; 3199 3200 if (ret < 0) 3201 return ret; 3202 3203 tkn_count = tkn_count + ret; 3204 } 3205 3206 return tkn_count; 3207 } 3208 3209 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3210 struct skl_module_iface *fmt, 3211 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3212 u32 dir, int fmt_idx) 3213 { 3214 struct skl_module_pin_fmt *dst_fmt; 3215 struct skl_module_fmt *mod_fmt; 3216 int ret; 3217 3218 if (!fmt) 3219 return -EINVAL; 3220 3221 switch (dir) { 3222 case SKL_DIR_IN: 3223 dst_fmt = &fmt->inputs[fmt_idx]; 3224 break; 3225 3226 case SKL_DIR_OUT: 3227 dst_fmt = &fmt->outputs[fmt_idx]; 3228 break; 3229 3230 default: 3231 dev_err(dev, "Invalid direction: %d\n", dir); 3232 return -EINVAL; 3233 } 3234 3235 mod_fmt = &dst_fmt->fmt; 3236 3237 switch (tkn_elem->token) { 3238 case SKL_TKN_MM_U32_INTF_PIN_ID: 3239 dst_fmt->id = tkn_elem->value; 3240 break; 3241 3242 default: 3243 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3244 tkn_elem->value); 3245 if (ret < 0) 3246 return ret; 3247 break; 3248 } 3249 3250 return 0; 3251 } 3252 3253 static int skl_tplg_fill_mod_info(struct device *dev, 3254 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3255 struct skl_module *mod) 3256 { 3257 3258 if (!mod) 3259 return -EINVAL; 3260 3261 switch (tkn_elem->token) { 3262 case SKL_TKN_U8_IN_PIN_TYPE: 3263 mod->input_pin_type = tkn_elem->value; 3264 break; 3265 3266 case SKL_TKN_U8_OUT_PIN_TYPE: 3267 mod->output_pin_type = tkn_elem->value; 3268 break; 3269 3270 case SKL_TKN_U8_IN_QUEUE_COUNT: 3271 mod->max_input_pins = tkn_elem->value; 3272 break; 3273 3274 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3275 mod->max_output_pins = tkn_elem->value; 3276 break; 3277 3278 case SKL_TKN_MM_U8_NUM_RES: 3279 mod->nr_resources = tkn_elem->value; 3280 break; 3281 3282 case SKL_TKN_MM_U8_NUM_INTF: 3283 mod->nr_interfaces = tkn_elem->value; 3284 break; 3285 3286 default: 3287 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3288 return -EINVAL; 3289 } 3290 3291 return 0; 3292 } 3293 3294 3295 static int skl_tplg_get_int_tkn(struct device *dev, 3296 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3297 struct skl_dev *skl) 3298 { 3299 int tkn_count = 0, ret; 3300 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3301 struct skl_module_res *res = NULL; 3302 struct skl_module_iface *fmt = NULL; 3303 struct skl_module *mod = NULL; 3304 static struct skl_astate_param *astate_table; 3305 static int astate_cfg_idx, count; 3306 int i; 3307 size_t size; 3308 3309 if (skl->modules) { 3310 mod = skl->modules[mod_idx]; 3311 res = &mod->resources[res_val_idx]; 3312 fmt = &mod->formats[intf_val_idx]; 3313 } 3314 3315 switch (tkn_elem->token) { 3316 case SKL_TKN_U32_LIB_COUNT: 3317 skl->lib_count = tkn_elem->value; 3318 break; 3319 3320 case SKL_TKN_U8_NUM_MOD: 3321 skl->nr_modules = tkn_elem->value; 3322 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3323 sizeof(*skl->modules), GFP_KERNEL); 3324 if (!skl->modules) 3325 return -ENOMEM; 3326 3327 for (i = 0; i < skl->nr_modules; i++) { 3328 skl->modules[i] = devm_kzalloc(dev, 3329 sizeof(struct skl_module), GFP_KERNEL); 3330 if (!skl->modules[i]) 3331 return -ENOMEM; 3332 } 3333 break; 3334 3335 case SKL_TKN_MM_U8_MOD_IDX: 3336 mod_idx = tkn_elem->value; 3337 break; 3338 3339 case SKL_TKN_U32_ASTATE_COUNT: 3340 if (astate_table != NULL) { 3341 dev_err(dev, "More than one entry for A-State count"); 3342 return -EINVAL; 3343 } 3344 3345 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3346 dev_err(dev, "Invalid A-State count %d\n", 3347 tkn_elem->value); 3348 return -EINVAL; 3349 } 3350 3351 size = struct_size(skl->cfg.astate_cfg, astate_table, 3352 tkn_elem->value); 3353 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3354 if (!skl->cfg.astate_cfg) 3355 return -ENOMEM; 3356 3357 astate_table = skl->cfg.astate_cfg->astate_table; 3358 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3359 break; 3360 3361 case SKL_TKN_U32_ASTATE_IDX: 3362 if (tkn_elem->value >= count) { 3363 dev_err(dev, "Invalid A-State index %d\n", 3364 tkn_elem->value); 3365 return -EINVAL; 3366 } 3367 3368 astate_cfg_idx = tkn_elem->value; 3369 break; 3370 3371 case SKL_TKN_U32_ASTATE_KCPS: 3372 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3373 break; 3374 3375 case SKL_TKN_U32_ASTATE_CLK_SRC: 3376 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3377 break; 3378 3379 case SKL_TKN_U8_IN_PIN_TYPE: 3380 case SKL_TKN_U8_OUT_PIN_TYPE: 3381 case SKL_TKN_U8_IN_QUEUE_COUNT: 3382 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3383 case SKL_TKN_MM_U8_NUM_RES: 3384 case SKL_TKN_MM_U8_NUM_INTF: 3385 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3386 if (ret < 0) 3387 return ret; 3388 break; 3389 3390 case SKL_TKN_U32_DIR_PIN_COUNT: 3391 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3392 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3393 break; 3394 3395 case SKL_TKN_MM_U32_RES_ID: 3396 if (!res) 3397 return -EINVAL; 3398 3399 res->id = tkn_elem->value; 3400 res_val_idx = tkn_elem->value; 3401 break; 3402 3403 case SKL_TKN_MM_U32_FMT_ID: 3404 if (!fmt) 3405 return -EINVAL; 3406 3407 fmt->fmt_idx = tkn_elem->value; 3408 intf_val_idx = tkn_elem->value; 3409 break; 3410 3411 case SKL_TKN_MM_U32_CPS: 3412 case SKL_TKN_MM_U32_DMA_SIZE: 3413 case SKL_TKN_MM_U32_CPC: 3414 case SKL_TKN_U32_MEM_PAGES: 3415 case SKL_TKN_U32_OBS: 3416 case SKL_TKN_U32_IBS: 3417 case SKL_TKN_MM_U32_RES_PIN_ID: 3418 case SKL_TKN_MM_U32_PIN_BUF: 3419 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3420 if (ret < 0) 3421 return ret; 3422 3423 break; 3424 3425 case SKL_TKN_MM_U32_NUM_IN_FMT: 3426 if (!fmt) 3427 return -EINVAL; 3428 3429 res->nr_input_pins = tkn_elem->value; 3430 break; 3431 3432 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3433 if (!fmt) 3434 return -EINVAL; 3435 3436 res->nr_output_pins = tkn_elem->value; 3437 break; 3438 3439 case SKL_TKN_U32_FMT_CH: 3440 case SKL_TKN_U32_FMT_FREQ: 3441 case SKL_TKN_U32_FMT_BIT_DEPTH: 3442 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3443 case SKL_TKN_U32_FMT_CH_CONFIG: 3444 case SKL_TKN_U32_FMT_INTERLEAVE: 3445 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3446 case SKL_TKN_U32_FMT_CH_MAP: 3447 case SKL_TKN_MM_U32_INTF_PIN_ID: 3448 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3449 dir, pin_idx); 3450 if (ret < 0) 3451 return ret; 3452 break; 3453 3454 default: 3455 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3456 return -EINVAL; 3457 } 3458 tkn_count++; 3459 3460 return tkn_count; 3461 } 3462 3463 /* 3464 * Fill the manifest structure by parsing the tokens based on the 3465 * type. 3466 */ 3467 static int skl_tplg_get_manifest_tkn(struct device *dev, 3468 char *pvt_data, struct skl_dev *skl, 3469 int block_size) 3470 { 3471 int tkn_count = 0, ret; 3472 int off = 0, tuple_size = 0; 3473 u8 uuid_index = 0; 3474 struct snd_soc_tplg_vendor_array *array; 3475 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3476 3477 if (block_size <= 0) 3478 return -EINVAL; 3479 3480 while (tuple_size < block_size) { 3481 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3482 off += array->size; 3483 switch (array->type) { 3484 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3485 ret = skl_tplg_get_str_tkn(dev, array, skl); 3486 3487 if (ret < 0) 3488 return ret; 3489 tkn_count = ret; 3490 3491 tuple_size += tkn_count * 3492 sizeof(struct snd_soc_tplg_vendor_string_elem); 3493 continue; 3494 3495 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3496 if (array->uuid->token != SKL_TKN_UUID) { 3497 dev_err(dev, "Not an UUID token: %d\n", 3498 array->uuid->token); 3499 return -EINVAL; 3500 } 3501 if (uuid_index >= skl->nr_modules) { 3502 dev_err(dev, "Too many UUID tokens\n"); 3503 return -EINVAL; 3504 } 3505 import_guid(&skl->modules[uuid_index++]->uuid, 3506 array->uuid->uuid); 3507 3508 tuple_size += sizeof(*array->uuid); 3509 continue; 3510 3511 default: 3512 tkn_elem = array->value; 3513 tkn_count = 0; 3514 break; 3515 } 3516 3517 while (tkn_count <= array->num_elems - 1) { 3518 ret = skl_tplg_get_int_tkn(dev, 3519 tkn_elem, skl); 3520 if (ret < 0) 3521 return ret; 3522 3523 tkn_count = tkn_count + ret; 3524 tkn_elem++; 3525 } 3526 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3527 tkn_count = 0; 3528 } 3529 3530 return off; 3531 } 3532 3533 /* 3534 * Parse manifest private data for tokens. The private data block is 3535 * preceded by descriptors for type and size of data block. 3536 */ 3537 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3538 struct device *dev, struct skl_dev *skl) 3539 { 3540 struct snd_soc_tplg_vendor_array *array; 3541 int num_blocks, block_size = 0, block_type, off = 0; 3542 char *data; 3543 int ret; 3544 3545 /* Read the NUM_DATA_BLOCKS descriptor */ 3546 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3547 ret = skl_tplg_get_desc_blocks(dev, array); 3548 if (ret < 0) 3549 return ret; 3550 num_blocks = ret; 3551 3552 off += array->size; 3553 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3554 while (num_blocks > 0) { 3555 array = (struct snd_soc_tplg_vendor_array *) 3556 (manifest->priv.data + off); 3557 ret = skl_tplg_get_desc_blocks(dev, array); 3558 3559 if (ret < 0) 3560 return ret; 3561 block_type = ret; 3562 off += array->size; 3563 3564 array = (struct snd_soc_tplg_vendor_array *) 3565 (manifest->priv.data + off); 3566 3567 ret = skl_tplg_get_desc_blocks(dev, array); 3568 3569 if (ret < 0) 3570 return ret; 3571 block_size = ret; 3572 off += array->size; 3573 3574 data = (manifest->priv.data + off); 3575 3576 if (block_type == SKL_TYPE_TUPLE) { 3577 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3578 block_size); 3579 3580 if (ret < 0) 3581 return ret; 3582 3583 --num_blocks; 3584 } else { 3585 return -EINVAL; 3586 } 3587 off += ret; 3588 } 3589 3590 return 0; 3591 } 3592 3593 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3594 struct snd_soc_tplg_manifest *manifest) 3595 { 3596 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3597 struct skl_dev *skl = bus_to_skl(bus); 3598 3599 /* proceed only if we have private data defined */ 3600 if (manifest->priv.size == 0) 3601 return 0; 3602 3603 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3604 3605 if (skl->lib_count > SKL_MAX_LIB) { 3606 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3607 skl->lib_count); 3608 return -EINVAL; 3609 } 3610 3611 return 0; 3612 } 3613 3614 static int skl_tplg_complete(struct snd_soc_component *component) 3615 { 3616 struct snd_soc_dobj *dobj; 3617 struct snd_soc_acpi_mach *mach; 3618 struct snd_ctl_elem_value *val; 3619 int i; 3620 3621 val = kmalloc(sizeof(*val), GFP_KERNEL); 3622 if (!val) 3623 return -ENOMEM; 3624 3625 mach = dev_get_platdata(component->card->dev); 3626 list_for_each_entry(dobj, &component->dobj_list, list) { 3627 struct snd_kcontrol *kcontrol = dobj->control.kcontrol; 3628 struct soc_enum *se; 3629 char **texts; 3630 char chan_text[4]; 3631 3632 if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol || 3633 kcontrol->put != skl_tplg_multi_config_set_dmic) 3634 continue; 3635 3636 se = (struct soc_enum *)kcontrol->private_value; 3637 texts = dobj->control.dtexts; 3638 sprintf(chan_text, "c%d", mach->mach_params.dmic_num); 3639 3640 for (i = 0; i < se->items; i++) { 3641 if (strstr(texts[i], chan_text)) { 3642 memset(val, 0, sizeof(*val)); 3643 val->value.enumerated.item[0] = i; 3644 kcontrol->put(kcontrol, val); 3645 } 3646 } 3647 } 3648 3649 kfree(val); 3650 return 0; 3651 } 3652 3653 static struct snd_soc_tplg_ops skl_tplg_ops = { 3654 .widget_load = skl_tplg_widget_load, 3655 .control_load = skl_tplg_control_load, 3656 .bytes_ext_ops = skl_tlv_ops, 3657 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3658 .io_ops = skl_tplg_kcontrol_ops, 3659 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3660 .manifest = skl_manifest_load, 3661 .dai_load = skl_dai_load, 3662 .complete = skl_tplg_complete, 3663 }; 3664 3665 /* 3666 * A pipe can have multiple modules, each of them will be a DAPM widget as 3667 * well. While managing a pipeline we need to get the list of all the 3668 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3669 * helps to get the SKL type widgets in that pipeline 3670 */ 3671 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3672 { 3673 struct snd_soc_dapm_widget *w; 3674 struct skl_module_cfg *mcfg = NULL; 3675 struct skl_pipe_module *p_module = NULL; 3676 struct skl_pipe *pipe; 3677 3678 list_for_each_entry(w, &component->card->widgets, list) { 3679 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3680 mcfg = w->priv; 3681 pipe = mcfg->pipe; 3682 3683 p_module = devm_kzalloc(component->dev, 3684 sizeof(*p_module), GFP_KERNEL); 3685 if (!p_module) 3686 return -ENOMEM; 3687 3688 p_module->w = w; 3689 list_add_tail(&p_module->node, &pipe->w_list); 3690 } 3691 } 3692 3693 return 0; 3694 } 3695 3696 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe) 3697 { 3698 struct skl_pipe_module *w_module; 3699 struct snd_soc_dapm_widget *w; 3700 struct skl_module_cfg *mconfig; 3701 bool host_found = false, link_found = false; 3702 3703 list_for_each_entry(w_module, &pipe->w_list, node) { 3704 w = w_module->w; 3705 mconfig = w->priv; 3706 3707 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3708 host_found = true; 3709 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3710 link_found = true; 3711 } 3712 3713 if (host_found && link_found) 3714 pipe->passthru = true; 3715 else 3716 pipe->passthru = false; 3717 } 3718 3719 /* 3720 * SKL topology init routine 3721 */ 3722 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3723 { 3724 int ret; 3725 const struct firmware *fw; 3726 struct skl_dev *skl = bus_to_skl(bus); 3727 struct skl_pipeline *ppl; 3728 3729 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3730 if (ret < 0) { 3731 char alt_tplg_name[64]; 3732 3733 snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin", 3734 skl->mach->drv_name); 3735 dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s", 3736 skl->tplg_name, ret, alt_tplg_name); 3737 3738 ret = request_firmware(&fw, alt_tplg_name, bus->dev); 3739 if (!ret) 3740 goto component_load; 3741 3742 dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin", 3743 alt_tplg_name, ret); 3744 3745 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3746 if (ret < 0) { 3747 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3748 "dfw_sst.bin", ret); 3749 return ret; 3750 } 3751 } 3752 3753 component_load: 3754 ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw); 3755 if (ret < 0) { 3756 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3757 goto err; 3758 } 3759 3760 ret = skl_tplg_create_pipe_widget_list(component); 3761 if (ret < 0) { 3762 dev_err(bus->dev, "tplg create pipe widget list failed%d\n", 3763 ret); 3764 goto err; 3765 } 3766 3767 list_for_each_entry(ppl, &skl->ppl_list, node) 3768 skl_tplg_set_pipe_type(skl, ppl->pipe); 3769 3770 err: 3771 release_firmware(fw); 3772 return ret; 3773 } 3774 3775 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus) 3776 { 3777 struct skl_dev *skl = bus_to_skl(bus); 3778 struct skl_pipeline *ppl, *tmp; 3779 3780 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node) 3781 list_del(&ppl->node); 3782 3783 /* clean up topology */ 3784 snd_soc_tplg_component_remove(component); 3785 } 3786