1 /* 2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module 3 * configurations 4 * 5 * Copyright (C) 2015 Intel Corp 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 */ 19 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <sound/core.h> 23 #include <sound/pcm.h> 24 #include "skl-sst-dsp.h" 25 #include "cnl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl.h" 28 #include "../common/sst-dsp.h" 29 #include "../common/sst-dsp-priv.h" 30 #include "skl-topology.h" 31 #include "skl-tplg-interface.h" 32 33 static int skl_alloc_dma_buf(struct device *dev, 34 struct snd_dma_buffer *dmab, size_t size) 35 { 36 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 37 struct hdac_bus *bus = ebus_to_hbus(ebus); 38 39 if (!bus) 40 return -ENODEV; 41 42 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab); 43 } 44 45 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) 46 { 47 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 48 struct hdac_bus *bus = ebus_to_hbus(ebus); 49 50 if (!bus) 51 return -ENODEV; 52 53 bus->io_ops->dma_free_pages(bus, dmab); 54 55 return 0; 56 } 57 58 #define NOTIFICATION_PARAM_ID 3 59 #define NOTIFICATION_MASK 0xf 60 61 /* disable notfication for underruns/overruns from firmware module */ 62 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable) 63 { 64 struct notification_mask mask; 65 struct skl_ipc_large_config_msg msg = {0}; 66 67 mask.notify = NOTIFICATION_MASK; 68 mask.enable = enable; 69 70 msg.large_param_id = NOTIFICATION_PARAM_ID; 71 msg.param_data_size = sizeof(mask); 72 73 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask); 74 } 75 76 static int skl_dsp_setup_spib(struct device *dev, unsigned int size, 77 int stream_tag, int enable) 78 { 79 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 80 struct hdac_bus *bus = ebus_to_hbus(ebus); 81 struct hdac_stream *stream = snd_hdac_get_stream(bus, 82 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 83 struct hdac_ext_stream *estream; 84 85 if (!stream) 86 return -EINVAL; 87 88 estream = stream_to_hdac_ext_stream(stream); 89 /* enable/disable SPIB for this hdac stream */ 90 snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index); 91 92 /* set the spib value */ 93 snd_hdac_ext_stream_set_spib(ebus, estream, size); 94 95 return 0; 96 } 97 98 static int skl_dsp_prepare(struct device *dev, unsigned int format, 99 unsigned int size, struct snd_dma_buffer *dmab) 100 { 101 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 102 struct hdac_bus *bus = ebus_to_hbus(ebus); 103 struct hdac_ext_stream *estream; 104 struct hdac_stream *stream; 105 struct snd_pcm_substream substream; 106 int ret; 107 108 if (!bus) 109 return -ENODEV; 110 111 memset(&substream, 0, sizeof(substream)); 112 substream.stream = SNDRV_PCM_STREAM_PLAYBACK; 113 114 estream = snd_hdac_ext_stream_assign(ebus, &substream, 115 HDAC_EXT_STREAM_TYPE_HOST); 116 if (!estream) 117 return -ENODEV; 118 119 stream = hdac_stream(estream); 120 121 /* assign decouple host dma channel */ 122 ret = snd_hdac_dsp_prepare(stream, format, size, dmab); 123 if (ret < 0) 124 return ret; 125 126 skl_dsp_setup_spib(dev, size, stream->stream_tag, true); 127 128 return stream->stream_tag; 129 } 130 131 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) 132 { 133 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 134 struct hdac_stream *stream; 135 struct hdac_bus *bus = ebus_to_hbus(ebus); 136 137 if (!bus) 138 return -ENODEV; 139 140 stream = snd_hdac_get_stream(bus, 141 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 142 if (!stream) 143 return -EINVAL; 144 145 snd_hdac_dsp_trigger(stream, start); 146 147 return 0; 148 } 149 150 static int skl_dsp_cleanup(struct device *dev, 151 struct snd_dma_buffer *dmab, int stream_tag) 152 { 153 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 154 struct hdac_stream *stream; 155 struct hdac_ext_stream *estream; 156 struct hdac_bus *bus = ebus_to_hbus(ebus); 157 158 if (!bus) 159 return -ENODEV; 160 161 stream = snd_hdac_get_stream(bus, 162 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 163 if (!stream) 164 return -EINVAL; 165 166 estream = stream_to_hdac_ext_stream(stream); 167 skl_dsp_setup_spib(dev, 0, stream_tag, false); 168 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); 169 170 snd_hdac_dsp_cleanup(stream, dmab); 171 172 return 0; 173 } 174 175 static struct skl_dsp_loader_ops skl_get_loader_ops(void) 176 { 177 struct skl_dsp_loader_ops loader_ops; 178 179 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops)); 180 181 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 182 loader_ops.free_dma_buf = skl_free_dma_buf; 183 184 return loader_ops; 185 }; 186 187 static struct skl_dsp_loader_ops bxt_get_loader_ops(void) 188 { 189 struct skl_dsp_loader_ops loader_ops; 190 191 memset(&loader_ops, 0, sizeof(loader_ops)); 192 193 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 194 loader_ops.free_dma_buf = skl_free_dma_buf; 195 loader_ops.prepare = skl_dsp_prepare; 196 loader_ops.trigger = skl_dsp_trigger; 197 loader_ops.cleanup = skl_dsp_cleanup; 198 199 return loader_ops; 200 }; 201 202 static const struct skl_dsp_ops dsp_ops[] = { 203 { 204 .id = 0x9d70, 205 .num_cores = 2, 206 .loader_ops = skl_get_loader_ops, 207 .init = skl_sst_dsp_init, 208 .init_fw = skl_sst_init_fw, 209 .cleanup = skl_sst_dsp_cleanup 210 }, 211 { 212 .id = 0x9d71, 213 .num_cores = 2, 214 .loader_ops = skl_get_loader_ops, 215 .init = kbl_sst_dsp_init, 216 .init_fw = skl_sst_init_fw, 217 .cleanup = skl_sst_dsp_cleanup 218 }, 219 { 220 .id = 0x5a98, 221 .num_cores = 2, 222 .loader_ops = bxt_get_loader_ops, 223 .init = bxt_sst_dsp_init, 224 .init_fw = bxt_sst_init_fw, 225 .cleanup = bxt_sst_dsp_cleanup 226 }, 227 { 228 .id = 0x3198, 229 .num_cores = 2, 230 .loader_ops = bxt_get_loader_ops, 231 .init = bxt_sst_dsp_init, 232 .init_fw = bxt_sst_init_fw, 233 .cleanup = bxt_sst_dsp_cleanup 234 }, 235 { 236 .id = 0x9dc8, 237 .num_cores = 4, 238 .loader_ops = bxt_get_loader_ops, 239 .init = cnl_sst_dsp_init, 240 .init_fw = cnl_sst_init_fw, 241 .cleanup = cnl_sst_dsp_cleanup 242 }, 243 }; 244 245 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) 246 { 247 int i; 248 249 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) { 250 if (dsp_ops[i].id == pci_id) 251 return &dsp_ops[i]; 252 } 253 254 return NULL; 255 } 256 257 int skl_init_dsp(struct skl *skl) 258 { 259 void __iomem *mmio_base; 260 struct hdac_ext_bus *ebus = &skl->ebus; 261 struct hdac_bus *bus = ebus_to_hbus(ebus); 262 struct skl_dsp_loader_ops loader_ops; 263 int irq = bus->irq; 264 const struct skl_dsp_ops *ops; 265 struct skl_dsp_cores *cores; 266 int ret; 267 268 /* enable ppcap interrupt */ 269 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 270 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 271 272 /* read the BAR of the ADSP MMIO */ 273 mmio_base = pci_ioremap_bar(skl->pci, 4); 274 if (mmio_base == NULL) { 275 dev_err(bus->dev, "ioremap error\n"); 276 return -ENXIO; 277 } 278 279 ops = skl_get_dsp_ops(skl->pci->device); 280 if (!ops) { 281 ret = -EIO; 282 goto unmap_mmio; 283 } 284 285 loader_ops = ops->loader_ops(); 286 ret = ops->init(bus->dev, mmio_base, irq, 287 skl->fw_name, loader_ops, 288 &skl->skl_sst); 289 290 if (ret < 0) 291 goto unmap_mmio; 292 293 skl->skl_sst->dsp_ops = ops; 294 cores = &skl->skl_sst->cores; 295 cores->count = ops->num_cores; 296 297 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); 298 if (!cores->state) { 299 ret = -ENOMEM; 300 goto unmap_mmio; 301 } 302 303 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), 304 GFP_KERNEL); 305 if (!cores->usage_count) { 306 ret = -ENOMEM; 307 goto free_core_state; 308 } 309 310 dev_dbg(bus->dev, "dsp registration status=%d\n", ret); 311 312 return 0; 313 314 free_core_state: 315 kfree(cores->state); 316 317 unmap_mmio: 318 iounmap(mmio_base); 319 320 return ret; 321 } 322 323 int skl_free_dsp(struct skl *skl) 324 { 325 struct hdac_ext_bus *ebus = &skl->ebus; 326 struct hdac_bus *bus = ebus_to_hbus(ebus); 327 struct skl_sst *ctx = skl->skl_sst; 328 329 /* disable ppcap interrupt */ 330 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 331 332 ctx->dsp_ops->cleanup(bus->dev, ctx); 333 334 kfree(ctx->cores.state); 335 kfree(ctx->cores.usage_count); 336 337 if (ctx->dsp->addr.lpe) 338 iounmap(ctx->dsp->addr.lpe); 339 340 return 0; 341 } 342 343 /* 344 * In the case of "suspend_active" i.e, the Audio IP being active 345 * during system suspend, immediately excecute any pending D0i3 work 346 * before suspending. This is needed for the IP to work in low power 347 * mode during system suspend. In the case of normal suspend, cancel 348 * any pending D0i3 work. 349 */ 350 int skl_suspend_late_dsp(struct skl *skl) 351 { 352 struct skl_sst *ctx = skl->skl_sst; 353 struct delayed_work *dwork; 354 355 if (!ctx) 356 return 0; 357 358 dwork = &ctx->d0i3.work; 359 360 if (dwork->work.func) { 361 if (skl->supend_active) 362 flush_delayed_work(dwork); 363 else 364 cancel_delayed_work_sync(dwork); 365 } 366 367 return 0; 368 } 369 370 int skl_suspend_dsp(struct skl *skl) 371 { 372 struct skl_sst *ctx = skl->skl_sst; 373 int ret; 374 375 /* if ppcap is not supported return 0 */ 376 if (!skl->ebus.bus.ppcap) 377 return 0; 378 379 ret = skl_dsp_sleep(ctx->dsp); 380 if (ret < 0) 381 return ret; 382 383 /* disable ppcap interrupt */ 384 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 385 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false); 386 387 return 0; 388 } 389 390 int skl_resume_dsp(struct skl *skl) 391 { 392 struct skl_sst *ctx = skl->skl_sst; 393 int ret; 394 395 /* if ppcap is not supported return 0 */ 396 if (!skl->ebus.bus.ppcap) 397 return 0; 398 399 /* enable ppcap interrupt */ 400 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 401 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 402 403 /* check if DSP 1st boot is done */ 404 if (skl->skl_sst->is_first_boot == true) 405 return 0; 406 407 ret = skl_dsp_wake(ctx->dsp); 408 if (ret < 0) 409 return ret; 410 411 skl_dsp_enable_notification(skl->skl_sst, false); 412 return ret; 413 } 414 415 enum skl_bitdepth skl_get_bit_depth(int params) 416 { 417 switch (params) { 418 case 8: 419 return SKL_DEPTH_8BIT; 420 421 case 16: 422 return SKL_DEPTH_16BIT; 423 424 case 24: 425 return SKL_DEPTH_24BIT; 426 427 case 32: 428 return SKL_DEPTH_32BIT; 429 430 default: 431 return SKL_DEPTH_INVALID; 432 433 } 434 } 435 436 /* 437 * Each module in DSP expects a base module configuration, which consists of 438 * PCM format information, which we calculate in driver and resource values 439 * which are read from widget information passed through topology binary 440 * This is send when we create a module with INIT_INSTANCE IPC msg 441 */ 442 static void skl_set_base_module_format(struct skl_sst *ctx, 443 struct skl_module_cfg *mconfig, 444 struct skl_base_cfg *base_cfg) 445 { 446 struct skl_module *module = mconfig->module; 447 struct skl_module_res *res = &module->resources[mconfig->res_idx]; 448 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 449 struct skl_module_fmt *format = &fmt->inputs[0].fmt; 450 451 base_cfg->audio_fmt.number_of_channels = format->channels; 452 453 base_cfg->audio_fmt.s_freq = format->s_freq; 454 base_cfg->audio_fmt.bit_depth = format->bit_depth; 455 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; 456 base_cfg->audio_fmt.ch_cfg = format->ch_cfg; 457 458 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", 459 format->bit_depth, format->valid_bit_depth, 460 format->ch_cfg); 461 462 base_cfg->audio_fmt.channel_map = format->ch_map; 463 464 base_cfg->audio_fmt.interleaving = format->interleaving_style; 465 466 base_cfg->cps = res->cps; 467 base_cfg->ibs = res->ibs; 468 base_cfg->obs = res->obs; 469 base_cfg->is_pages = res->is_pages; 470 } 471 472 /* 473 * Copies copier capabilities into copier module and updates copier module 474 * config size. 475 */ 476 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig, 477 struct skl_cpr_cfg *cpr_mconfig) 478 { 479 if (mconfig->formats_config.caps_size == 0) 480 return; 481 482 memcpy(cpr_mconfig->gtw_cfg.config_data, 483 mconfig->formats_config.caps, 484 mconfig->formats_config.caps_size); 485 486 cpr_mconfig->gtw_cfg.config_length = 487 (mconfig->formats_config.caps_size) / 4; 488 } 489 490 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF 491 /* 492 * Calculate the gatewat settings required for copier module, type of 493 * gateway and index of gateway to use 494 */ 495 static u32 skl_get_node_id(struct skl_sst *ctx, 496 struct skl_module_cfg *mconfig) 497 { 498 union skl_connector_node_id node_id = {0}; 499 union skl_ssp_dma_node ssp_node = {0}; 500 struct skl_pipe_params *params = mconfig->pipe->p_params; 501 502 switch (mconfig->dev_type) { 503 case SKL_DEVICE_BT: 504 node_id.node.dma_type = 505 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 506 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 507 SKL_DMA_I2S_LINK_INPUT_CLASS; 508 node_id.node.vindex = params->host_dma_id + 509 (mconfig->vbus_id << 3); 510 break; 511 512 case SKL_DEVICE_I2S: 513 node_id.node.dma_type = 514 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 515 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 516 SKL_DMA_I2S_LINK_INPUT_CLASS; 517 ssp_node.dma_node.time_slot_index = mconfig->time_slot; 518 ssp_node.dma_node.i2s_instance = mconfig->vbus_id; 519 node_id.node.vindex = ssp_node.val; 520 break; 521 522 case SKL_DEVICE_DMIC: 523 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS; 524 node_id.node.vindex = mconfig->vbus_id + 525 (mconfig->time_slot); 526 break; 527 528 case SKL_DEVICE_HDALINK: 529 node_id.node.dma_type = 530 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 531 SKL_DMA_HDA_LINK_OUTPUT_CLASS : 532 SKL_DMA_HDA_LINK_INPUT_CLASS; 533 node_id.node.vindex = params->link_dma_id; 534 break; 535 536 case SKL_DEVICE_HDAHOST: 537 node_id.node.dma_type = 538 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 539 SKL_DMA_HDA_HOST_OUTPUT_CLASS : 540 SKL_DMA_HDA_HOST_INPUT_CLASS; 541 node_id.node.vindex = params->host_dma_id; 542 break; 543 544 default: 545 node_id.val = 0xFFFFFFFF; 546 break; 547 } 548 549 return node_id.val; 550 } 551 552 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx, 553 struct skl_module_cfg *mconfig, 554 struct skl_cpr_cfg *cpr_mconfig) 555 { 556 u32 dma_io_buf; 557 struct skl_module_res *res; 558 int res_idx = mconfig->res_idx; 559 struct skl *skl = get_skl_ctx(ctx->dev); 560 561 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig); 562 563 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) { 564 cpr_mconfig->cpr_feature_mask = 0; 565 return; 566 } 567 568 if (skl->nr_modules) { 569 res = &mconfig->module->resources[mconfig->res_idx]; 570 cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size; 571 goto skip_buf_size_calc; 572 } else { 573 res = &mconfig->module->resources[res_idx]; 574 } 575 576 switch (mconfig->hw_conn_type) { 577 case SKL_CONN_SOURCE: 578 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 579 dma_io_buf = res->ibs; 580 else 581 dma_io_buf = res->obs; 582 break; 583 584 case SKL_CONN_SINK: 585 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 586 dma_io_buf = res->obs; 587 else 588 dma_io_buf = res->ibs; 589 break; 590 591 default: 592 dev_warn(ctx->dev, "wrong connection type: %d\n", 593 mconfig->hw_conn_type); 594 return; 595 } 596 597 cpr_mconfig->gtw_cfg.dma_buffer_size = 598 mconfig->dma_buffer_size * dma_io_buf; 599 600 /* fallback to 2ms default value */ 601 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) { 602 if (mconfig->hw_conn_type == SKL_CONN_SOURCE) 603 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs; 604 else 605 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs; 606 } 607 608 skip_buf_size_calc: 609 cpr_mconfig->cpr_feature_mask = 0; 610 cpr_mconfig->gtw_cfg.config_length = 0; 611 612 skl_copy_copier_caps(mconfig, cpr_mconfig); 613 } 614 615 #define DMA_CONTROL_ID 5 616 #define DMA_I2S_BLOB_SIZE 21 617 618 int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps, 619 u32 caps_size, u32 node_id) 620 { 621 struct skl_dma_control *dma_ctrl; 622 struct skl_ipc_large_config_msg msg = {0}; 623 int err = 0; 624 625 626 /* 627 * if blob size zero, then return 628 */ 629 if (caps_size == 0) 630 return 0; 631 632 msg.large_param_id = DMA_CONTROL_ID; 633 msg.param_data_size = sizeof(struct skl_dma_control) + caps_size; 634 635 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL); 636 if (dma_ctrl == NULL) 637 return -ENOMEM; 638 639 dma_ctrl->node_id = node_id; 640 641 /* 642 * NHLT blob may contain additional configs along with i2s blob. 643 * firmware expects only the i2s blob size as the config_length. 644 * So fix to i2s blob size. 645 * size in dwords. 646 */ 647 dma_ctrl->config_length = DMA_I2S_BLOB_SIZE; 648 649 memcpy(dma_ctrl->config_data, caps, caps_size); 650 651 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl); 652 653 kfree(dma_ctrl); 654 return err; 655 } 656 657 static void skl_setup_out_format(struct skl_sst *ctx, 658 struct skl_module_cfg *mconfig, 659 struct skl_audio_data_format *out_fmt) 660 { 661 struct skl_module *module = mconfig->module; 662 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 663 struct skl_module_fmt *format = &fmt->outputs[0].fmt; 664 665 out_fmt->number_of_channels = (u8)format->channels; 666 out_fmt->s_freq = format->s_freq; 667 out_fmt->bit_depth = format->bit_depth; 668 out_fmt->valid_bit_depth = format->valid_bit_depth; 669 out_fmt->ch_cfg = format->ch_cfg; 670 671 out_fmt->channel_map = format->ch_map; 672 out_fmt->interleaving = format->interleaving_style; 673 out_fmt->sample_type = format->sample_type; 674 675 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n", 676 out_fmt->number_of_channels, format->s_freq, format->bit_depth); 677 } 678 679 /* 680 * DSP needs SRC module for frequency conversion, SRC takes base module 681 * configuration and the target frequency as extra parameter passed as src 682 * config 683 */ 684 static void skl_set_src_format(struct skl_sst *ctx, 685 struct skl_module_cfg *mconfig, 686 struct skl_src_module_cfg *src_mconfig) 687 { 688 struct skl_module *module = mconfig->module; 689 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 690 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 691 692 skl_set_base_module_format(ctx, mconfig, 693 (struct skl_base_cfg *)src_mconfig); 694 695 src_mconfig->src_cfg = fmt->s_freq; 696 } 697 698 /* 699 * DSP needs updown module to do channel conversion. updown module take base 700 * module configuration and channel configuration 701 * It also take coefficients and now we have defaults applied here 702 */ 703 static void skl_set_updown_mixer_format(struct skl_sst *ctx, 704 struct skl_module_cfg *mconfig, 705 struct skl_up_down_mixer_cfg *mixer_mconfig) 706 { 707 struct skl_module *module = mconfig->module; 708 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 709 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 710 711 skl_set_base_module_format(ctx, mconfig, 712 (struct skl_base_cfg *)mixer_mconfig); 713 mixer_mconfig->out_ch_cfg = fmt->ch_cfg; 714 mixer_mconfig->ch_map = fmt->ch_map; 715 } 716 717 /* 718 * 'copier' is DSP internal module which copies data from Host DMA (HDA host 719 * dma) or link (hda link, SSP, PDM) 720 * Here we calculate the copier module parameters, like PCM format, output 721 * format, gateway settings 722 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg 723 */ 724 static void skl_set_copier_format(struct skl_sst *ctx, 725 struct skl_module_cfg *mconfig, 726 struct skl_cpr_cfg *cpr_mconfig) 727 { 728 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt; 729 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig; 730 731 skl_set_base_module_format(ctx, mconfig, base_cfg); 732 733 skl_setup_out_format(ctx, mconfig, out_fmt); 734 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig); 735 } 736 737 /* 738 * Algo module are DSP pre processing modules. Algo module take base module 739 * configuration and params 740 */ 741 742 static void skl_set_algo_format(struct skl_sst *ctx, 743 struct skl_module_cfg *mconfig, 744 struct skl_algo_cfg *algo_mcfg) 745 { 746 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg; 747 748 skl_set_base_module_format(ctx, mconfig, base_cfg); 749 750 if (mconfig->formats_config.caps_size == 0) 751 return; 752 753 memcpy(algo_mcfg->params, 754 mconfig->formats_config.caps, 755 mconfig->formats_config.caps_size); 756 757 } 758 759 /* 760 * Mic select module allows selecting one or many input channels, thus 761 * acting as a demux. 762 * 763 * Mic select module take base module configuration and out-format 764 * configuration 765 */ 766 static void skl_set_base_outfmt_format(struct skl_sst *ctx, 767 struct skl_module_cfg *mconfig, 768 struct skl_base_outfmt_cfg *base_outfmt_mcfg) 769 { 770 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt; 771 struct skl_base_cfg *base_cfg = 772 (struct skl_base_cfg *)base_outfmt_mcfg; 773 774 skl_set_base_module_format(ctx, mconfig, base_cfg); 775 skl_setup_out_format(ctx, mconfig, out_fmt); 776 } 777 778 static u16 skl_get_module_param_size(struct skl_sst *ctx, 779 struct skl_module_cfg *mconfig) 780 { 781 u16 param_size; 782 783 switch (mconfig->m_type) { 784 case SKL_MODULE_TYPE_COPIER: 785 param_size = sizeof(struct skl_cpr_cfg); 786 param_size += mconfig->formats_config.caps_size; 787 return param_size; 788 789 case SKL_MODULE_TYPE_SRCINT: 790 return sizeof(struct skl_src_module_cfg); 791 792 case SKL_MODULE_TYPE_UPDWMIX: 793 return sizeof(struct skl_up_down_mixer_cfg); 794 795 case SKL_MODULE_TYPE_ALGO: 796 param_size = sizeof(struct skl_base_cfg); 797 param_size += mconfig->formats_config.caps_size; 798 return param_size; 799 800 case SKL_MODULE_TYPE_BASE_OUTFMT: 801 case SKL_MODULE_TYPE_MIC_SELECT: 802 case SKL_MODULE_TYPE_KPB: 803 return sizeof(struct skl_base_outfmt_cfg); 804 805 default: 806 /* 807 * return only base cfg when no specific module type is 808 * specified 809 */ 810 return sizeof(struct skl_base_cfg); 811 } 812 813 return 0; 814 } 815 816 /* 817 * DSP firmware supports various modules like copier, SRC, updown etc. 818 * These modules required various parameters to be calculated and sent for 819 * the module initialization to DSP. By default a generic module needs only 820 * base module format configuration 821 */ 822 823 static int skl_set_module_format(struct skl_sst *ctx, 824 struct skl_module_cfg *module_config, 825 u16 *module_config_size, 826 void **param_data) 827 { 828 u16 param_size; 829 830 param_size = skl_get_module_param_size(ctx, module_config); 831 832 *param_data = kzalloc(param_size, GFP_KERNEL); 833 if (NULL == *param_data) 834 return -ENOMEM; 835 836 *module_config_size = param_size; 837 838 switch (module_config->m_type) { 839 case SKL_MODULE_TYPE_COPIER: 840 skl_set_copier_format(ctx, module_config, *param_data); 841 break; 842 843 case SKL_MODULE_TYPE_SRCINT: 844 skl_set_src_format(ctx, module_config, *param_data); 845 break; 846 847 case SKL_MODULE_TYPE_UPDWMIX: 848 skl_set_updown_mixer_format(ctx, module_config, *param_data); 849 break; 850 851 case SKL_MODULE_TYPE_ALGO: 852 skl_set_algo_format(ctx, module_config, *param_data); 853 break; 854 855 case SKL_MODULE_TYPE_BASE_OUTFMT: 856 case SKL_MODULE_TYPE_MIC_SELECT: 857 case SKL_MODULE_TYPE_KPB: 858 skl_set_base_outfmt_format(ctx, module_config, *param_data); 859 break; 860 861 default: 862 skl_set_base_module_format(ctx, module_config, *param_data); 863 break; 864 865 } 866 867 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n", 868 module_config->id.module_id, param_size); 869 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4, 870 *param_data, param_size, false); 871 return 0; 872 } 873 874 static int skl_get_queue_index(struct skl_module_pin *mpin, 875 struct skl_module_inst_id id, int max) 876 { 877 int i; 878 879 for (i = 0; i < max; i++) { 880 if (mpin[i].id.module_id == id.module_id && 881 mpin[i].id.instance_id == id.instance_id) 882 return i; 883 } 884 885 return -EINVAL; 886 } 887 888 /* 889 * Allocates queue for each module. 890 * if dynamic, the pin_index is allocated 0 to max_pin. 891 * In static, the pin_index is fixed based on module_id and instance id 892 */ 893 static int skl_alloc_queue(struct skl_module_pin *mpin, 894 struct skl_module_cfg *tgt_cfg, int max) 895 { 896 int i; 897 struct skl_module_inst_id id = tgt_cfg->id; 898 /* 899 * if pin in dynamic, find first free pin 900 * otherwise find match module and instance id pin as topology will 901 * ensure a unique pin is assigned to this so no need to 902 * allocate/free 903 */ 904 for (i = 0; i < max; i++) { 905 if (mpin[i].is_dynamic) { 906 if (!mpin[i].in_use && 907 mpin[i].pin_state == SKL_PIN_UNBIND) { 908 909 mpin[i].in_use = true; 910 mpin[i].id.module_id = id.module_id; 911 mpin[i].id.instance_id = id.instance_id; 912 mpin[i].id.pvt_id = id.pvt_id; 913 mpin[i].tgt_mcfg = tgt_cfg; 914 return i; 915 } 916 } else { 917 if (mpin[i].id.module_id == id.module_id && 918 mpin[i].id.instance_id == id.instance_id && 919 mpin[i].pin_state == SKL_PIN_UNBIND) { 920 921 mpin[i].tgt_mcfg = tgt_cfg; 922 return i; 923 } 924 } 925 } 926 927 return -EINVAL; 928 } 929 930 static void skl_free_queue(struct skl_module_pin *mpin, int q_index) 931 { 932 if (mpin[q_index].is_dynamic) { 933 mpin[q_index].in_use = false; 934 mpin[q_index].id.module_id = 0; 935 mpin[q_index].id.instance_id = 0; 936 mpin[q_index].id.pvt_id = 0; 937 } 938 mpin[q_index].pin_state = SKL_PIN_UNBIND; 939 mpin[q_index].tgt_mcfg = NULL; 940 } 941 942 /* Module state will be set to unint, if all the out pin state is UNBIND */ 943 944 static void skl_clear_module_state(struct skl_module_pin *mpin, int max, 945 struct skl_module_cfg *mcfg) 946 { 947 int i; 948 bool found = false; 949 950 for (i = 0; i < max; i++) { 951 if (mpin[i].pin_state == SKL_PIN_UNBIND) 952 continue; 953 found = true; 954 break; 955 } 956 957 if (!found) 958 mcfg->m_state = SKL_MODULE_INIT_DONE; 959 return; 960 } 961 962 /* 963 * A module needs to be instanataited in DSP. A mdoule is present in a 964 * collection of module referred as a PIPE. 965 * We first calculate the module format, based on module type and then 966 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper 967 */ 968 int skl_init_module(struct skl_sst *ctx, 969 struct skl_module_cfg *mconfig) 970 { 971 u16 module_config_size = 0; 972 void *param_data = NULL; 973 int ret; 974 struct skl_ipc_init_instance_msg msg; 975 976 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__, 977 mconfig->id.module_id, mconfig->id.pvt_id); 978 979 if (mconfig->pipe->state != SKL_PIPE_CREATED) { 980 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n", 981 mconfig->pipe->state, mconfig->pipe->ppl_id); 982 return -EIO; 983 } 984 985 ret = skl_set_module_format(ctx, mconfig, 986 &module_config_size, ¶m_data); 987 if (ret < 0) { 988 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret); 989 return ret; 990 } 991 992 msg.module_id = mconfig->id.module_id; 993 msg.instance_id = mconfig->id.pvt_id; 994 msg.ppl_instance_id = mconfig->pipe->ppl_id; 995 msg.param_data_size = module_config_size; 996 msg.core_id = mconfig->core_id; 997 msg.domain = mconfig->domain; 998 999 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data); 1000 if (ret < 0) { 1001 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret); 1002 kfree(param_data); 1003 return ret; 1004 } 1005 mconfig->m_state = SKL_MODULE_INIT_DONE; 1006 kfree(param_data); 1007 return ret; 1008 } 1009 1010 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg 1011 *src_module, struct skl_module_cfg *dst_module) 1012 { 1013 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n", 1014 __func__, src_module->id.module_id, src_module->id.pvt_id); 1015 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instance=%d\n", __func__, 1016 dst_module->id.module_id, dst_module->id.pvt_id); 1017 1018 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n", 1019 src_module->m_state, dst_module->m_state); 1020 } 1021 1022 /* 1023 * On module freeup, we need to unbind the module with modules 1024 * it is already bind. 1025 * Find the pin allocated and unbind then using bind_unbind IPC 1026 */ 1027 int skl_unbind_modules(struct skl_sst *ctx, 1028 struct skl_module_cfg *src_mcfg, 1029 struct skl_module_cfg *dst_mcfg) 1030 { 1031 int ret; 1032 struct skl_ipc_bind_unbind_msg msg; 1033 struct skl_module_inst_id src_id = src_mcfg->id; 1034 struct skl_module_inst_id dst_id = dst_mcfg->id; 1035 int in_max = dst_mcfg->module->max_input_pins; 1036 int out_max = src_mcfg->module->max_output_pins; 1037 int src_index, dst_index, src_pin_state, dst_pin_state; 1038 1039 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1040 1041 /* get src queue index */ 1042 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); 1043 if (src_index < 0) 1044 return 0; 1045 1046 msg.src_queue = src_index; 1047 1048 /* get dst queue index */ 1049 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); 1050 if (dst_index < 0) 1051 return 0; 1052 1053 msg.dst_queue = dst_index; 1054 1055 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state; 1056 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state; 1057 1058 if (src_pin_state != SKL_PIN_BIND_DONE || 1059 dst_pin_state != SKL_PIN_BIND_DONE) 1060 return 0; 1061 1062 msg.module_id = src_mcfg->id.module_id; 1063 msg.instance_id = src_mcfg->id.pvt_id; 1064 msg.dst_module_id = dst_mcfg->id.module_id; 1065 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1066 msg.bind = false; 1067 1068 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1069 if (!ret) { 1070 /* free queue only if unbind is success */ 1071 skl_free_queue(src_mcfg->m_out_pin, src_index); 1072 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1073 1074 /* 1075 * check only if src module bind state, bind is 1076 * always from src -> sink 1077 */ 1078 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg); 1079 } 1080 1081 return ret; 1082 } 1083 1084 static void fill_pin_params(struct skl_audio_data_format *pin_fmt, 1085 struct skl_module_fmt *format) 1086 { 1087 pin_fmt->number_of_channels = format->channels; 1088 pin_fmt->s_freq = format->s_freq; 1089 pin_fmt->bit_depth = format->bit_depth; 1090 pin_fmt->valid_bit_depth = format->valid_bit_depth; 1091 pin_fmt->ch_cfg = format->ch_cfg; 1092 pin_fmt->sample_type = format->sample_type; 1093 pin_fmt->channel_map = format->ch_map; 1094 pin_fmt->interleaving = format->interleaving_style; 1095 } 1096 1097 #define CPR_SINK_FMT_PARAM_ID 2 1098 1099 /* 1100 * Once a module is instantiated it need to be 'bind' with other modules in 1101 * the pipeline. For binding we need to find the module pins which are bind 1102 * together 1103 * This function finds the pins and then sends bund_unbind IPC message to 1104 * DSP using IPC helper 1105 */ 1106 int skl_bind_modules(struct skl_sst *ctx, 1107 struct skl_module_cfg *src_mcfg, 1108 struct skl_module_cfg *dst_mcfg) 1109 { 1110 int ret = 0; 1111 struct skl_ipc_bind_unbind_msg msg; 1112 int in_max = dst_mcfg->module->max_input_pins; 1113 int out_max = src_mcfg->module->max_output_pins; 1114 int src_index, dst_index; 1115 struct skl_module_fmt *format; 1116 struct skl_cpr_pin_fmt pin_fmt; 1117 struct skl_module *module; 1118 struct skl_module_iface *fmt; 1119 1120 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1121 1122 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE || 1123 dst_mcfg->m_state < SKL_MODULE_INIT_DONE) 1124 return 0; 1125 1126 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max); 1127 if (src_index < 0) 1128 return -EINVAL; 1129 1130 msg.src_queue = src_index; 1131 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max); 1132 if (dst_index < 0) { 1133 skl_free_queue(src_mcfg->m_out_pin, src_index); 1134 return -EINVAL; 1135 } 1136 1137 /* 1138 * Copier module requires the separate large_config_set_ipc to 1139 * configure the pins other than 0 1140 */ 1141 if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) { 1142 pin_fmt.sink_id = src_index; 1143 module = src_mcfg->module; 1144 fmt = &module->formats[src_mcfg->fmt_idx]; 1145 1146 /* Input fmt is same as that of src module input cfg */ 1147 format = &fmt->inputs[0].fmt; 1148 fill_pin_params(&(pin_fmt.src_fmt), format); 1149 1150 format = &fmt->outputs[src_index].fmt; 1151 fill_pin_params(&(pin_fmt.dst_fmt), format); 1152 ret = skl_set_module_params(ctx, (void *)&pin_fmt, 1153 sizeof(struct skl_cpr_pin_fmt), 1154 CPR_SINK_FMT_PARAM_ID, src_mcfg); 1155 1156 if (ret < 0) 1157 goto out; 1158 } 1159 1160 msg.dst_queue = dst_index; 1161 1162 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n", 1163 msg.src_queue, msg.dst_queue); 1164 1165 msg.module_id = src_mcfg->id.module_id; 1166 msg.instance_id = src_mcfg->id.pvt_id; 1167 msg.dst_module_id = dst_mcfg->id.module_id; 1168 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1169 msg.bind = true; 1170 1171 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1172 1173 if (!ret) { 1174 src_mcfg->m_state = SKL_MODULE_BIND_DONE; 1175 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE; 1176 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE; 1177 return ret; 1178 } 1179 out: 1180 /* error case , if IPC fails, clear the queue index */ 1181 skl_free_queue(src_mcfg->m_out_pin, src_index); 1182 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1183 1184 return ret; 1185 } 1186 1187 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe, 1188 enum skl_ipc_pipeline_state state) 1189 { 1190 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state); 1191 1192 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state); 1193 } 1194 1195 /* 1196 * A pipeline is a collection of modules. Before a module in instantiated a 1197 * pipeline needs to be created for it. 1198 * This function creates pipeline, by sending create pipeline IPC messages 1199 * to FW 1200 */ 1201 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe) 1202 { 1203 int ret; 1204 1205 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id); 1206 1207 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages, 1208 pipe->pipe_priority, pipe->ppl_id, 1209 pipe->lp_mode); 1210 if (ret < 0) { 1211 dev_err(ctx->dev, "Failed to create pipeline\n"); 1212 return ret; 1213 } 1214 1215 pipe->state = SKL_PIPE_CREATED; 1216 1217 return 0; 1218 } 1219 1220 /* 1221 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then 1222 * pause the pipeline first and then delete it 1223 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the 1224 * DMA engines and releases resources 1225 */ 1226 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1227 { 1228 int ret; 1229 1230 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1231 1232 /* If pipe is started, do stop the pipe in FW. */ 1233 if (pipe->state >= SKL_PIPE_STARTED) { 1234 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1235 if (ret < 0) { 1236 dev_err(ctx->dev, "Failed to stop pipeline\n"); 1237 return ret; 1238 } 1239 1240 pipe->state = SKL_PIPE_PAUSED; 1241 } 1242 1243 /* If pipe was not created in FW, do not try to delete it */ 1244 if (pipe->state < SKL_PIPE_CREATED) 1245 return 0; 1246 1247 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id); 1248 if (ret < 0) { 1249 dev_err(ctx->dev, "Failed to delete pipeline\n"); 1250 return ret; 1251 } 1252 1253 pipe->state = SKL_PIPE_INVALID; 1254 1255 return ret; 1256 } 1257 1258 /* 1259 * A pipeline is also a scheduling entity in DSP which can be run, stopped 1260 * For processing data the pipe need to be run by sending IPC set pipe state 1261 * to DSP 1262 */ 1263 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1264 { 1265 int ret; 1266 1267 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1268 1269 /* If pipe was not created in FW, do not try to pause or delete */ 1270 if (pipe->state < SKL_PIPE_CREATED) 1271 return 0; 1272 1273 /* Pipe has to be paused before it is started */ 1274 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1275 if (ret < 0) { 1276 dev_err(ctx->dev, "Failed to pause pipe\n"); 1277 return ret; 1278 } 1279 1280 pipe->state = SKL_PIPE_PAUSED; 1281 1282 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING); 1283 if (ret < 0) { 1284 dev_err(ctx->dev, "Failed to start pipe\n"); 1285 return ret; 1286 } 1287 1288 pipe->state = SKL_PIPE_STARTED; 1289 1290 return 0; 1291 } 1292 1293 /* 1294 * Stop the pipeline by sending set pipe state IPC 1295 * DSP doesnt implement stop so we always send pause message 1296 */ 1297 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1298 { 1299 int ret; 1300 1301 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id); 1302 1303 /* If pipe was not created in FW, do not try to pause or delete */ 1304 if (pipe->state < SKL_PIPE_PAUSED) 1305 return 0; 1306 1307 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1308 if (ret < 0) { 1309 dev_dbg(ctx->dev, "Failed to stop pipe\n"); 1310 return ret; 1311 } 1312 1313 pipe->state = SKL_PIPE_PAUSED; 1314 1315 return 0; 1316 } 1317 1318 /* 1319 * Reset the pipeline by sending set pipe state IPC this will reset the DMA 1320 * from the DSP side 1321 */ 1322 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1323 { 1324 int ret; 1325 1326 /* If pipe was not created in FW, do not try to pause or delete */ 1327 if (pipe->state < SKL_PIPE_PAUSED) 1328 return 0; 1329 1330 ret = skl_set_pipe_state(ctx, pipe, PPL_RESET); 1331 if (ret < 0) { 1332 dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret); 1333 return ret; 1334 } 1335 1336 pipe->state = SKL_PIPE_RESET; 1337 1338 return 0; 1339 } 1340 1341 /* Algo parameter set helper function */ 1342 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size, 1343 u32 param_id, struct skl_module_cfg *mcfg) 1344 { 1345 struct skl_ipc_large_config_msg msg; 1346 1347 msg.module_id = mcfg->id.module_id; 1348 msg.instance_id = mcfg->id.pvt_id; 1349 msg.param_data_size = size; 1350 msg.large_param_id = param_id; 1351 1352 return skl_ipc_set_large_config(&ctx->ipc, &msg, params); 1353 } 1354 1355 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size, 1356 u32 param_id, struct skl_module_cfg *mcfg) 1357 { 1358 struct skl_ipc_large_config_msg msg; 1359 1360 msg.module_id = mcfg->id.module_id; 1361 msg.instance_id = mcfg->id.pvt_id; 1362 msg.param_data_size = size; 1363 msg.large_param_id = param_id; 1364 1365 return skl_ipc_get_large_config(&ctx->ipc, &msg, params); 1366 } 1367