1 /* 2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module 3 * configurations 4 * 5 * Copyright (C) 2015 Intel Corp 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 */ 19 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <sound/core.h> 23 #include <sound/pcm.h> 24 #include <uapi/sound/skl-tplg-interface.h> 25 #include "skl-sst-dsp.h" 26 #include "cnl-sst-dsp.h" 27 #include "skl-sst-ipc.h" 28 #include "skl.h" 29 #include "../common/sst-dsp.h" 30 #include "../common/sst-dsp-priv.h" 31 #include "skl-topology.h" 32 33 static int skl_alloc_dma_buf(struct device *dev, 34 struct snd_dma_buffer *dmab, size_t size) 35 { 36 struct hdac_bus *bus = dev_get_drvdata(dev); 37 38 if (!bus) 39 return -ENODEV; 40 41 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab); 42 } 43 44 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) 45 { 46 struct hdac_bus *bus = dev_get_drvdata(dev); 47 48 if (!bus) 49 return -ENODEV; 50 51 bus->io_ops->dma_free_pages(bus, dmab); 52 53 return 0; 54 } 55 56 #define SKL_ASTATE_PARAM_ID 4 57 58 void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data) 59 { 60 struct skl_ipc_large_config_msg msg = {0}; 61 62 msg.large_param_id = SKL_ASTATE_PARAM_ID; 63 msg.param_data_size = (cnt * sizeof(struct skl_astate_param) + 64 sizeof(cnt)); 65 66 skl_ipc_set_large_config(&ctx->ipc, &msg, data); 67 } 68 69 #define NOTIFICATION_PARAM_ID 3 70 #define NOTIFICATION_MASK 0xf 71 72 /* disable notfication for underruns/overruns from firmware module */ 73 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable) 74 { 75 struct notification_mask mask; 76 struct skl_ipc_large_config_msg msg = {0}; 77 78 mask.notify = NOTIFICATION_MASK; 79 mask.enable = enable; 80 81 msg.large_param_id = NOTIFICATION_PARAM_ID; 82 msg.param_data_size = sizeof(mask); 83 84 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask); 85 } 86 87 static int skl_dsp_setup_spib(struct device *dev, unsigned int size, 88 int stream_tag, int enable) 89 { 90 struct hdac_bus *bus = dev_get_drvdata(dev); 91 struct hdac_stream *stream = snd_hdac_get_stream(bus, 92 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 93 struct hdac_ext_stream *estream; 94 95 if (!stream) 96 return -EINVAL; 97 98 estream = stream_to_hdac_ext_stream(stream); 99 /* enable/disable SPIB for this hdac stream */ 100 snd_hdac_ext_stream_spbcap_enable(bus, enable, stream->index); 101 102 /* set the spib value */ 103 snd_hdac_ext_stream_set_spib(bus, estream, size); 104 105 return 0; 106 } 107 108 static int skl_dsp_prepare(struct device *dev, unsigned int format, 109 unsigned int size, struct snd_dma_buffer *dmab) 110 { 111 struct hdac_bus *bus = dev_get_drvdata(dev); 112 struct hdac_ext_stream *estream; 113 struct hdac_stream *stream; 114 struct snd_pcm_substream substream; 115 int ret; 116 117 if (!bus) 118 return -ENODEV; 119 120 memset(&substream, 0, sizeof(substream)); 121 substream.stream = SNDRV_PCM_STREAM_PLAYBACK; 122 123 estream = snd_hdac_ext_stream_assign(bus, &substream, 124 HDAC_EXT_STREAM_TYPE_HOST); 125 if (!estream) 126 return -ENODEV; 127 128 stream = hdac_stream(estream); 129 130 /* assign decouple host dma channel */ 131 ret = snd_hdac_dsp_prepare(stream, format, size, dmab); 132 if (ret < 0) 133 return ret; 134 135 skl_dsp_setup_spib(dev, size, stream->stream_tag, true); 136 137 return stream->stream_tag; 138 } 139 140 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) 141 { 142 struct hdac_bus *bus = dev_get_drvdata(dev); 143 struct hdac_stream *stream; 144 145 if (!bus) 146 return -ENODEV; 147 148 stream = snd_hdac_get_stream(bus, 149 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 150 if (!stream) 151 return -EINVAL; 152 153 snd_hdac_dsp_trigger(stream, start); 154 155 return 0; 156 } 157 158 static int skl_dsp_cleanup(struct device *dev, 159 struct snd_dma_buffer *dmab, int stream_tag) 160 { 161 struct hdac_bus *bus = dev_get_drvdata(dev); 162 struct hdac_stream *stream; 163 struct hdac_ext_stream *estream; 164 165 if (!bus) 166 return -ENODEV; 167 168 stream = snd_hdac_get_stream(bus, 169 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 170 if (!stream) 171 return -EINVAL; 172 173 estream = stream_to_hdac_ext_stream(stream); 174 skl_dsp_setup_spib(dev, 0, stream_tag, false); 175 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); 176 177 snd_hdac_dsp_cleanup(stream, dmab); 178 179 return 0; 180 } 181 182 static struct skl_dsp_loader_ops skl_get_loader_ops(void) 183 { 184 struct skl_dsp_loader_ops loader_ops; 185 186 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops)); 187 188 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 189 loader_ops.free_dma_buf = skl_free_dma_buf; 190 191 return loader_ops; 192 }; 193 194 static struct skl_dsp_loader_ops bxt_get_loader_ops(void) 195 { 196 struct skl_dsp_loader_ops loader_ops; 197 198 memset(&loader_ops, 0, sizeof(loader_ops)); 199 200 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 201 loader_ops.free_dma_buf = skl_free_dma_buf; 202 loader_ops.prepare = skl_dsp_prepare; 203 loader_ops.trigger = skl_dsp_trigger; 204 loader_ops.cleanup = skl_dsp_cleanup; 205 206 return loader_ops; 207 }; 208 209 static const struct skl_dsp_ops dsp_ops[] = { 210 { 211 .id = 0x9d70, 212 .num_cores = 2, 213 .loader_ops = skl_get_loader_ops, 214 .init = skl_sst_dsp_init, 215 .init_fw = skl_sst_init_fw, 216 .cleanup = skl_sst_dsp_cleanup 217 }, 218 { 219 .id = 0x9d71, 220 .num_cores = 2, 221 .loader_ops = skl_get_loader_ops, 222 .init = skl_sst_dsp_init, 223 .init_fw = skl_sst_init_fw, 224 .cleanup = skl_sst_dsp_cleanup 225 }, 226 { 227 .id = 0x5a98, 228 .num_cores = 2, 229 .loader_ops = bxt_get_loader_ops, 230 .init = bxt_sst_dsp_init, 231 .init_fw = bxt_sst_init_fw, 232 .cleanup = bxt_sst_dsp_cleanup 233 }, 234 { 235 .id = 0x3198, 236 .num_cores = 2, 237 .loader_ops = bxt_get_loader_ops, 238 .init = bxt_sst_dsp_init, 239 .init_fw = bxt_sst_init_fw, 240 .cleanup = bxt_sst_dsp_cleanup 241 }, 242 { 243 .id = 0x9dc8, 244 .num_cores = 4, 245 .loader_ops = bxt_get_loader_ops, 246 .init = cnl_sst_dsp_init, 247 .init_fw = cnl_sst_init_fw, 248 .cleanup = cnl_sst_dsp_cleanup 249 }, 250 { 251 .id = 0xa348, 252 .num_cores = 4, 253 .loader_ops = bxt_get_loader_ops, 254 .init = cnl_sst_dsp_init, 255 .init_fw = cnl_sst_init_fw, 256 .cleanup = cnl_sst_dsp_cleanup 257 }, 258 }; 259 260 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) 261 { 262 int i; 263 264 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) { 265 if (dsp_ops[i].id == pci_id) 266 return &dsp_ops[i]; 267 } 268 269 return NULL; 270 } 271 272 int skl_init_dsp(struct skl *skl) 273 { 274 void __iomem *mmio_base; 275 struct hdac_bus *bus = skl_to_bus(skl); 276 struct skl_dsp_loader_ops loader_ops; 277 int irq = bus->irq; 278 const struct skl_dsp_ops *ops; 279 struct skl_dsp_cores *cores; 280 int ret; 281 282 /* enable ppcap interrupt */ 283 snd_hdac_ext_bus_ppcap_enable(bus, true); 284 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 285 286 /* read the BAR of the ADSP MMIO */ 287 mmio_base = pci_ioremap_bar(skl->pci, 4); 288 if (mmio_base == NULL) { 289 dev_err(bus->dev, "ioremap error\n"); 290 return -ENXIO; 291 } 292 293 ops = skl_get_dsp_ops(skl->pci->device); 294 if (!ops) { 295 ret = -EIO; 296 goto unmap_mmio; 297 } 298 299 loader_ops = ops->loader_ops(); 300 ret = ops->init(bus->dev, mmio_base, irq, 301 skl->fw_name, loader_ops, 302 &skl->skl_sst); 303 304 if (ret < 0) 305 goto unmap_mmio; 306 307 skl->skl_sst->dsp_ops = ops; 308 cores = &skl->skl_sst->cores; 309 cores->count = ops->num_cores; 310 311 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); 312 if (!cores->state) { 313 ret = -ENOMEM; 314 goto unmap_mmio; 315 } 316 317 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), 318 GFP_KERNEL); 319 if (!cores->usage_count) { 320 ret = -ENOMEM; 321 goto free_core_state; 322 } 323 324 dev_dbg(bus->dev, "dsp registration status=%d\n", ret); 325 326 return 0; 327 328 free_core_state: 329 kfree(cores->state); 330 331 unmap_mmio: 332 iounmap(mmio_base); 333 334 return ret; 335 } 336 337 int skl_free_dsp(struct skl *skl) 338 { 339 struct hdac_bus *bus = skl_to_bus(skl); 340 struct skl_sst *ctx = skl->skl_sst; 341 342 /* disable ppcap interrupt */ 343 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 344 345 ctx->dsp_ops->cleanup(bus->dev, ctx); 346 347 kfree(ctx->cores.state); 348 kfree(ctx->cores.usage_count); 349 350 if (ctx->dsp->addr.lpe) 351 iounmap(ctx->dsp->addr.lpe); 352 353 return 0; 354 } 355 356 /* 357 * In the case of "suspend_active" i.e, the Audio IP being active 358 * during system suspend, immediately excecute any pending D0i3 work 359 * before suspending. This is needed for the IP to work in low power 360 * mode during system suspend. In the case of normal suspend, cancel 361 * any pending D0i3 work. 362 */ 363 int skl_suspend_late_dsp(struct skl *skl) 364 { 365 struct skl_sst *ctx = skl->skl_sst; 366 struct delayed_work *dwork; 367 368 if (!ctx) 369 return 0; 370 371 dwork = &ctx->d0i3.work; 372 373 if (dwork->work.func) { 374 if (skl->supend_active) 375 flush_delayed_work(dwork); 376 else 377 cancel_delayed_work_sync(dwork); 378 } 379 380 return 0; 381 } 382 383 int skl_suspend_dsp(struct skl *skl) 384 { 385 struct skl_sst *ctx = skl->skl_sst; 386 struct hdac_bus *bus = skl_to_bus(skl); 387 int ret; 388 389 /* if ppcap is not supported return 0 */ 390 if (!bus->ppcap) 391 return 0; 392 393 ret = skl_dsp_sleep(ctx->dsp); 394 if (ret < 0) 395 return ret; 396 397 /* disable ppcap interrupt */ 398 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 399 snd_hdac_ext_bus_ppcap_enable(bus, false); 400 401 return 0; 402 } 403 404 int skl_resume_dsp(struct skl *skl) 405 { 406 struct skl_sst *ctx = skl->skl_sst; 407 struct hdac_bus *bus = skl_to_bus(skl); 408 int ret; 409 410 /* if ppcap is not supported return 0 */ 411 if (!bus->ppcap) 412 return 0; 413 414 /* enable ppcap interrupt */ 415 snd_hdac_ext_bus_ppcap_enable(bus, true); 416 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 417 418 /* check if DSP 1st boot is done */ 419 if (skl->skl_sst->is_first_boot) 420 return 0; 421 422 /* 423 * Disable dynamic clock and power gating during firmware 424 * and library download 425 */ 426 ctx->enable_miscbdcge(ctx->dev, false); 427 ctx->clock_power_gating(ctx->dev, false); 428 429 ret = skl_dsp_wake(ctx->dsp); 430 ctx->enable_miscbdcge(ctx->dev, true); 431 ctx->clock_power_gating(ctx->dev, true); 432 if (ret < 0) 433 return ret; 434 435 skl_dsp_enable_notification(skl->skl_sst, false); 436 437 if (skl->cfg.astate_cfg != NULL) { 438 skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count, 439 skl->cfg.astate_cfg); 440 } 441 return ret; 442 } 443 444 enum skl_bitdepth skl_get_bit_depth(int params) 445 { 446 switch (params) { 447 case 8: 448 return SKL_DEPTH_8BIT; 449 450 case 16: 451 return SKL_DEPTH_16BIT; 452 453 case 24: 454 return SKL_DEPTH_24BIT; 455 456 case 32: 457 return SKL_DEPTH_32BIT; 458 459 default: 460 return SKL_DEPTH_INVALID; 461 462 } 463 } 464 465 /* 466 * Each module in DSP expects a base module configuration, which consists of 467 * PCM format information, which we calculate in driver and resource values 468 * which are read from widget information passed through topology binary 469 * This is send when we create a module with INIT_INSTANCE IPC msg 470 */ 471 static void skl_set_base_module_format(struct skl_sst *ctx, 472 struct skl_module_cfg *mconfig, 473 struct skl_base_cfg *base_cfg) 474 { 475 struct skl_module *module = mconfig->module; 476 struct skl_module_res *res = &module->resources[mconfig->res_idx]; 477 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 478 struct skl_module_fmt *format = &fmt->inputs[0].fmt; 479 480 base_cfg->audio_fmt.number_of_channels = format->channels; 481 482 base_cfg->audio_fmt.s_freq = format->s_freq; 483 base_cfg->audio_fmt.bit_depth = format->bit_depth; 484 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; 485 base_cfg->audio_fmt.ch_cfg = format->ch_cfg; 486 487 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", 488 format->bit_depth, format->valid_bit_depth, 489 format->ch_cfg); 490 491 base_cfg->audio_fmt.channel_map = format->ch_map; 492 493 base_cfg->audio_fmt.interleaving = format->interleaving_style; 494 495 base_cfg->cps = res->cps; 496 base_cfg->ibs = res->ibs; 497 base_cfg->obs = res->obs; 498 base_cfg->is_pages = res->is_pages; 499 } 500 501 /* 502 * Copies copier capabilities into copier module and updates copier module 503 * config size. 504 */ 505 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig, 506 struct skl_cpr_cfg *cpr_mconfig) 507 { 508 if (mconfig->formats_config.caps_size == 0) 509 return; 510 511 memcpy(cpr_mconfig->gtw_cfg.config_data, 512 mconfig->formats_config.caps, 513 mconfig->formats_config.caps_size); 514 515 cpr_mconfig->gtw_cfg.config_length = 516 (mconfig->formats_config.caps_size) / 4; 517 } 518 519 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF 520 /* 521 * Calculate the gatewat settings required for copier module, type of 522 * gateway and index of gateway to use 523 */ 524 static u32 skl_get_node_id(struct skl_sst *ctx, 525 struct skl_module_cfg *mconfig) 526 { 527 union skl_connector_node_id node_id = {0}; 528 union skl_ssp_dma_node ssp_node = {0}; 529 struct skl_pipe_params *params = mconfig->pipe->p_params; 530 531 switch (mconfig->dev_type) { 532 case SKL_DEVICE_BT: 533 node_id.node.dma_type = 534 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 535 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 536 SKL_DMA_I2S_LINK_INPUT_CLASS; 537 node_id.node.vindex = params->host_dma_id + 538 (mconfig->vbus_id << 3); 539 break; 540 541 case SKL_DEVICE_I2S: 542 node_id.node.dma_type = 543 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 544 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 545 SKL_DMA_I2S_LINK_INPUT_CLASS; 546 ssp_node.dma_node.time_slot_index = mconfig->time_slot; 547 ssp_node.dma_node.i2s_instance = mconfig->vbus_id; 548 node_id.node.vindex = ssp_node.val; 549 break; 550 551 case SKL_DEVICE_DMIC: 552 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS; 553 node_id.node.vindex = mconfig->vbus_id + 554 (mconfig->time_slot); 555 break; 556 557 case SKL_DEVICE_HDALINK: 558 node_id.node.dma_type = 559 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 560 SKL_DMA_HDA_LINK_OUTPUT_CLASS : 561 SKL_DMA_HDA_LINK_INPUT_CLASS; 562 node_id.node.vindex = params->link_dma_id; 563 break; 564 565 case SKL_DEVICE_HDAHOST: 566 node_id.node.dma_type = 567 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 568 SKL_DMA_HDA_HOST_OUTPUT_CLASS : 569 SKL_DMA_HDA_HOST_INPUT_CLASS; 570 node_id.node.vindex = params->host_dma_id; 571 break; 572 573 default: 574 node_id.val = 0xFFFFFFFF; 575 break; 576 } 577 578 return node_id.val; 579 } 580 581 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx, 582 struct skl_module_cfg *mconfig, 583 struct skl_cpr_cfg *cpr_mconfig) 584 { 585 u32 dma_io_buf; 586 struct skl_module_res *res; 587 int res_idx = mconfig->res_idx; 588 struct skl *skl = get_skl_ctx(ctx->dev); 589 590 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig); 591 592 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) { 593 cpr_mconfig->cpr_feature_mask = 0; 594 return; 595 } 596 597 if (skl->nr_modules) { 598 res = &mconfig->module->resources[mconfig->res_idx]; 599 cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size; 600 goto skip_buf_size_calc; 601 } else { 602 res = &mconfig->module->resources[res_idx]; 603 } 604 605 switch (mconfig->hw_conn_type) { 606 case SKL_CONN_SOURCE: 607 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 608 dma_io_buf = res->ibs; 609 else 610 dma_io_buf = res->obs; 611 break; 612 613 case SKL_CONN_SINK: 614 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 615 dma_io_buf = res->obs; 616 else 617 dma_io_buf = res->ibs; 618 break; 619 620 default: 621 dev_warn(ctx->dev, "wrong connection type: %d\n", 622 mconfig->hw_conn_type); 623 return; 624 } 625 626 cpr_mconfig->gtw_cfg.dma_buffer_size = 627 mconfig->dma_buffer_size * dma_io_buf; 628 629 /* fallback to 2ms default value */ 630 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) { 631 if (mconfig->hw_conn_type == SKL_CONN_SOURCE) 632 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs; 633 else 634 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs; 635 } 636 637 skip_buf_size_calc: 638 cpr_mconfig->cpr_feature_mask = 0; 639 cpr_mconfig->gtw_cfg.config_length = 0; 640 641 skl_copy_copier_caps(mconfig, cpr_mconfig); 642 } 643 644 #define DMA_CONTROL_ID 5 645 #define DMA_I2S_BLOB_SIZE 21 646 647 int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps, 648 u32 caps_size, u32 node_id) 649 { 650 struct skl_dma_control *dma_ctrl; 651 struct skl_ipc_large_config_msg msg = {0}; 652 int err = 0; 653 654 655 /* 656 * if blob size zero, then return 657 */ 658 if (caps_size == 0) 659 return 0; 660 661 msg.large_param_id = DMA_CONTROL_ID; 662 msg.param_data_size = sizeof(struct skl_dma_control) + caps_size; 663 664 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL); 665 if (dma_ctrl == NULL) 666 return -ENOMEM; 667 668 dma_ctrl->node_id = node_id; 669 670 /* 671 * NHLT blob may contain additional configs along with i2s blob. 672 * firmware expects only the i2s blob size as the config_length. 673 * So fix to i2s blob size. 674 * size in dwords. 675 */ 676 dma_ctrl->config_length = DMA_I2S_BLOB_SIZE; 677 678 memcpy(dma_ctrl->config_data, caps, caps_size); 679 680 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl); 681 682 kfree(dma_ctrl); 683 return err; 684 } 685 EXPORT_SYMBOL_GPL(skl_dsp_set_dma_control); 686 687 static void skl_setup_out_format(struct skl_sst *ctx, 688 struct skl_module_cfg *mconfig, 689 struct skl_audio_data_format *out_fmt) 690 { 691 struct skl_module *module = mconfig->module; 692 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 693 struct skl_module_fmt *format = &fmt->outputs[0].fmt; 694 695 out_fmt->number_of_channels = (u8)format->channels; 696 out_fmt->s_freq = format->s_freq; 697 out_fmt->bit_depth = format->bit_depth; 698 out_fmt->valid_bit_depth = format->valid_bit_depth; 699 out_fmt->ch_cfg = format->ch_cfg; 700 701 out_fmt->channel_map = format->ch_map; 702 out_fmt->interleaving = format->interleaving_style; 703 out_fmt->sample_type = format->sample_type; 704 705 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n", 706 out_fmt->number_of_channels, format->s_freq, format->bit_depth); 707 } 708 709 /* 710 * DSP needs SRC module for frequency conversion, SRC takes base module 711 * configuration and the target frequency as extra parameter passed as src 712 * config 713 */ 714 static void skl_set_src_format(struct skl_sst *ctx, 715 struct skl_module_cfg *mconfig, 716 struct skl_src_module_cfg *src_mconfig) 717 { 718 struct skl_module *module = mconfig->module; 719 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 720 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 721 722 skl_set_base_module_format(ctx, mconfig, 723 (struct skl_base_cfg *)src_mconfig); 724 725 src_mconfig->src_cfg = fmt->s_freq; 726 } 727 728 /* 729 * DSP needs updown module to do channel conversion. updown module take base 730 * module configuration and channel configuration 731 * It also take coefficients and now we have defaults applied here 732 */ 733 static void skl_set_updown_mixer_format(struct skl_sst *ctx, 734 struct skl_module_cfg *mconfig, 735 struct skl_up_down_mixer_cfg *mixer_mconfig) 736 { 737 struct skl_module *module = mconfig->module; 738 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 739 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 740 741 skl_set_base_module_format(ctx, mconfig, 742 (struct skl_base_cfg *)mixer_mconfig); 743 mixer_mconfig->out_ch_cfg = fmt->ch_cfg; 744 mixer_mconfig->ch_map = fmt->ch_map; 745 } 746 747 /* 748 * 'copier' is DSP internal module which copies data from Host DMA (HDA host 749 * dma) or link (hda link, SSP, PDM) 750 * Here we calculate the copier module parameters, like PCM format, output 751 * format, gateway settings 752 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg 753 */ 754 static void skl_set_copier_format(struct skl_sst *ctx, 755 struct skl_module_cfg *mconfig, 756 struct skl_cpr_cfg *cpr_mconfig) 757 { 758 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt; 759 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig; 760 761 skl_set_base_module_format(ctx, mconfig, base_cfg); 762 763 skl_setup_out_format(ctx, mconfig, out_fmt); 764 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig); 765 } 766 767 /* 768 * Algo module are DSP pre processing modules. Algo module take base module 769 * configuration and params 770 */ 771 772 static void skl_set_algo_format(struct skl_sst *ctx, 773 struct skl_module_cfg *mconfig, 774 struct skl_algo_cfg *algo_mcfg) 775 { 776 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg; 777 778 skl_set_base_module_format(ctx, mconfig, base_cfg); 779 780 if (mconfig->formats_config.caps_size == 0) 781 return; 782 783 memcpy(algo_mcfg->params, 784 mconfig->formats_config.caps, 785 mconfig->formats_config.caps_size); 786 787 } 788 789 /* 790 * Mic select module allows selecting one or many input channels, thus 791 * acting as a demux. 792 * 793 * Mic select module take base module configuration and out-format 794 * configuration 795 */ 796 static void skl_set_base_outfmt_format(struct skl_sst *ctx, 797 struct skl_module_cfg *mconfig, 798 struct skl_base_outfmt_cfg *base_outfmt_mcfg) 799 { 800 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt; 801 struct skl_base_cfg *base_cfg = 802 (struct skl_base_cfg *)base_outfmt_mcfg; 803 804 skl_set_base_module_format(ctx, mconfig, base_cfg); 805 skl_setup_out_format(ctx, mconfig, out_fmt); 806 } 807 808 static u16 skl_get_module_param_size(struct skl_sst *ctx, 809 struct skl_module_cfg *mconfig) 810 { 811 u16 param_size; 812 813 switch (mconfig->m_type) { 814 case SKL_MODULE_TYPE_COPIER: 815 param_size = sizeof(struct skl_cpr_cfg); 816 param_size += mconfig->formats_config.caps_size; 817 return param_size; 818 819 case SKL_MODULE_TYPE_SRCINT: 820 return sizeof(struct skl_src_module_cfg); 821 822 case SKL_MODULE_TYPE_UPDWMIX: 823 return sizeof(struct skl_up_down_mixer_cfg); 824 825 case SKL_MODULE_TYPE_ALGO: 826 param_size = sizeof(struct skl_base_cfg); 827 param_size += mconfig->formats_config.caps_size; 828 return param_size; 829 830 case SKL_MODULE_TYPE_BASE_OUTFMT: 831 case SKL_MODULE_TYPE_MIC_SELECT: 832 case SKL_MODULE_TYPE_KPB: 833 return sizeof(struct skl_base_outfmt_cfg); 834 835 default: 836 /* 837 * return only base cfg when no specific module type is 838 * specified 839 */ 840 return sizeof(struct skl_base_cfg); 841 } 842 843 return 0; 844 } 845 846 /* 847 * DSP firmware supports various modules like copier, SRC, updown etc. 848 * These modules required various parameters to be calculated and sent for 849 * the module initialization to DSP. By default a generic module needs only 850 * base module format configuration 851 */ 852 853 static int skl_set_module_format(struct skl_sst *ctx, 854 struct skl_module_cfg *module_config, 855 u16 *module_config_size, 856 void **param_data) 857 { 858 u16 param_size; 859 860 param_size = skl_get_module_param_size(ctx, module_config); 861 862 *param_data = kzalloc(param_size, GFP_KERNEL); 863 if (NULL == *param_data) 864 return -ENOMEM; 865 866 *module_config_size = param_size; 867 868 switch (module_config->m_type) { 869 case SKL_MODULE_TYPE_COPIER: 870 skl_set_copier_format(ctx, module_config, *param_data); 871 break; 872 873 case SKL_MODULE_TYPE_SRCINT: 874 skl_set_src_format(ctx, module_config, *param_data); 875 break; 876 877 case SKL_MODULE_TYPE_UPDWMIX: 878 skl_set_updown_mixer_format(ctx, module_config, *param_data); 879 break; 880 881 case SKL_MODULE_TYPE_ALGO: 882 skl_set_algo_format(ctx, module_config, *param_data); 883 break; 884 885 case SKL_MODULE_TYPE_BASE_OUTFMT: 886 case SKL_MODULE_TYPE_MIC_SELECT: 887 case SKL_MODULE_TYPE_KPB: 888 skl_set_base_outfmt_format(ctx, module_config, *param_data); 889 break; 890 891 default: 892 skl_set_base_module_format(ctx, module_config, *param_data); 893 break; 894 895 } 896 897 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n", 898 module_config->id.module_id, param_size); 899 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4, 900 *param_data, param_size, false); 901 return 0; 902 } 903 904 static int skl_get_queue_index(struct skl_module_pin *mpin, 905 struct skl_module_inst_id id, int max) 906 { 907 int i; 908 909 for (i = 0; i < max; i++) { 910 if (mpin[i].id.module_id == id.module_id && 911 mpin[i].id.instance_id == id.instance_id) 912 return i; 913 } 914 915 return -EINVAL; 916 } 917 918 /* 919 * Allocates queue for each module. 920 * if dynamic, the pin_index is allocated 0 to max_pin. 921 * In static, the pin_index is fixed based on module_id and instance id 922 */ 923 static int skl_alloc_queue(struct skl_module_pin *mpin, 924 struct skl_module_cfg *tgt_cfg, int max) 925 { 926 int i; 927 struct skl_module_inst_id id = tgt_cfg->id; 928 /* 929 * if pin in dynamic, find first free pin 930 * otherwise find match module and instance id pin as topology will 931 * ensure a unique pin is assigned to this so no need to 932 * allocate/free 933 */ 934 for (i = 0; i < max; i++) { 935 if (mpin[i].is_dynamic) { 936 if (!mpin[i].in_use && 937 mpin[i].pin_state == SKL_PIN_UNBIND) { 938 939 mpin[i].in_use = true; 940 mpin[i].id.module_id = id.module_id; 941 mpin[i].id.instance_id = id.instance_id; 942 mpin[i].id.pvt_id = id.pvt_id; 943 mpin[i].tgt_mcfg = tgt_cfg; 944 return i; 945 } 946 } else { 947 if (mpin[i].id.module_id == id.module_id && 948 mpin[i].id.instance_id == id.instance_id && 949 mpin[i].pin_state == SKL_PIN_UNBIND) { 950 951 mpin[i].tgt_mcfg = tgt_cfg; 952 return i; 953 } 954 } 955 } 956 957 return -EINVAL; 958 } 959 960 static void skl_free_queue(struct skl_module_pin *mpin, int q_index) 961 { 962 if (mpin[q_index].is_dynamic) { 963 mpin[q_index].in_use = false; 964 mpin[q_index].id.module_id = 0; 965 mpin[q_index].id.instance_id = 0; 966 mpin[q_index].id.pvt_id = 0; 967 } 968 mpin[q_index].pin_state = SKL_PIN_UNBIND; 969 mpin[q_index].tgt_mcfg = NULL; 970 } 971 972 /* Module state will be set to unint, if all the out pin state is UNBIND */ 973 974 static void skl_clear_module_state(struct skl_module_pin *mpin, int max, 975 struct skl_module_cfg *mcfg) 976 { 977 int i; 978 bool found = false; 979 980 for (i = 0; i < max; i++) { 981 if (mpin[i].pin_state == SKL_PIN_UNBIND) 982 continue; 983 found = true; 984 break; 985 } 986 987 if (!found) 988 mcfg->m_state = SKL_MODULE_INIT_DONE; 989 return; 990 } 991 992 /* 993 * A module needs to be instanataited in DSP. A mdoule is present in a 994 * collection of module referred as a PIPE. 995 * We first calculate the module format, based on module type and then 996 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper 997 */ 998 int skl_init_module(struct skl_sst *ctx, 999 struct skl_module_cfg *mconfig) 1000 { 1001 u16 module_config_size = 0; 1002 void *param_data = NULL; 1003 int ret; 1004 struct skl_ipc_init_instance_msg msg; 1005 1006 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__, 1007 mconfig->id.module_id, mconfig->id.pvt_id); 1008 1009 if (mconfig->pipe->state != SKL_PIPE_CREATED) { 1010 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n", 1011 mconfig->pipe->state, mconfig->pipe->ppl_id); 1012 return -EIO; 1013 } 1014 1015 ret = skl_set_module_format(ctx, mconfig, 1016 &module_config_size, ¶m_data); 1017 if (ret < 0) { 1018 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret); 1019 return ret; 1020 } 1021 1022 msg.module_id = mconfig->id.module_id; 1023 msg.instance_id = mconfig->id.pvt_id; 1024 msg.ppl_instance_id = mconfig->pipe->ppl_id; 1025 msg.param_data_size = module_config_size; 1026 msg.core_id = mconfig->core_id; 1027 msg.domain = mconfig->domain; 1028 1029 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data); 1030 if (ret < 0) { 1031 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret); 1032 kfree(param_data); 1033 return ret; 1034 } 1035 mconfig->m_state = SKL_MODULE_INIT_DONE; 1036 kfree(param_data); 1037 return ret; 1038 } 1039 1040 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg 1041 *src_module, struct skl_module_cfg *dst_module) 1042 { 1043 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n", 1044 __func__, src_module->id.module_id, src_module->id.pvt_id); 1045 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instance=%d\n", __func__, 1046 dst_module->id.module_id, dst_module->id.pvt_id); 1047 1048 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n", 1049 src_module->m_state, dst_module->m_state); 1050 } 1051 1052 /* 1053 * On module freeup, we need to unbind the module with modules 1054 * it is already bind. 1055 * Find the pin allocated and unbind then using bind_unbind IPC 1056 */ 1057 int skl_unbind_modules(struct skl_sst *ctx, 1058 struct skl_module_cfg *src_mcfg, 1059 struct skl_module_cfg *dst_mcfg) 1060 { 1061 int ret; 1062 struct skl_ipc_bind_unbind_msg msg; 1063 struct skl_module_inst_id src_id = src_mcfg->id; 1064 struct skl_module_inst_id dst_id = dst_mcfg->id; 1065 int in_max = dst_mcfg->module->max_input_pins; 1066 int out_max = src_mcfg->module->max_output_pins; 1067 int src_index, dst_index, src_pin_state, dst_pin_state; 1068 1069 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1070 1071 /* get src queue index */ 1072 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); 1073 if (src_index < 0) 1074 return 0; 1075 1076 msg.src_queue = src_index; 1077 1078 /* get dst queue index */ 1079 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); 1080 if (dst_index < 0) 1081 return 0; 1082 1083 msg.dst_queue = dst_index; 1084 1085 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state; 1086 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state; 1087 1088 if (src_pin_state != SKL_PIN_BIND_DONE || 1089 dst_pin_state != SKL_PIN_BIND_DONE) 1090 return 0; 1091 1092 msg.module_id = src_mcfg->id.module_id; 1093 msg.instance_id = src_mcfg->id.pvt_id; 1094 msg.dst_module_id = dst_mcfg->id.module_id; 1095 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1096 msg.bind = false; 1097 1098 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1099 if (!ret) { 1100 /* free queue only if unbind is success */ 1101 skl_free_queue(src_mcfg->m_out_pin, src_index); 1102 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1103 1104 /* 1105 * check only if src module bind state, bind is 1106 * always from src -> sink 1107 */ 1108 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg); 1109 } 1110 1111 return ret; 1112 } 1113 1114 static void fill_pin_params(struct skl_audio_data_format *pin_fmt, 1115 struct skl_module_fmt *format) 1116 { 1117 pin_fmt->number_of_channels = format->channels; 1118 pin_fmt->s_freq = format->s_freq; 1119 pin_fmt->bit_depth = format->bit_depth; 1120 pin_fmt->valid_bit_depth = format->valid_bit_depth; 1121 pin_fmt->ch_cfg = format->ch_cfg; 1122 pin_fmt->sample_type = format->sample_type; 1123 pin_fmt->channel_map = format->ch_map; 1124 pin_fmt->interleaving = format->interleaving_style; 1125 } 1126 1127 #define CPR_SINK_FMT_PARAM_ID 2 1128 1129 /* 1130 * Once a module is instantiated it need to be 'bind' with other modules in 1131 * the pipeline. For binding we need to find the module pins which are bind 1132 * together 1133 * This function finds the pins and then sends bund_unbind IPC message to 1134 * DSP using IPC helper 1135 */ 1136 int skl_bind_modules(struct skl_sst *ctx, 1137 struct skl_module_cfg *src_mcfg, 1138 struct skl_module_cfg *dst_mcfg) 1139 { 1140 int ret = 0; 1141 struct skl_ipc_bind_unbind_msg msg; 1142 int in_max = dst_mcfg->module->max_input_pins; 1143 int out_max = src_mcfg->module->max_output_pins; 1144 int src_index, dst_index; 1145 struct skl_module_fmt *format; 1146 struct skl_cpr_pin_fmt pin_fmt; 1147 struct skl_module *module; 1148 struct skl_module_iface *fmt; 1149 1150 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1151 1152 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE || 1153 dst_mcfg->m_state < SKL_MODULE_INIT_DONE) 1154 return 0; 1155 1156 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max); 1157 if (src_index < 0) 1158 return -EINVAL; 1159 1160 msg.src_queue = src_index; 1161 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max); 1162 if (dst_index < 0) { 1163 skl_free_queue(src_mcfg->m_out_pin, src_index); 1164 return -EINVAL; 1165 } 1166 1167 /* 1168 * Copier module requires the separate large_config_set_ipc to 1169 * configure the pins other than 0 1170 */ 1171 if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) { 1172 pin_fmt.sink_id = src_index; 1173 module = src_mcfg->module; 1174 fmt = &module->formats[src_mcfg->fmt_idx]; 1175 1176 /* Input fmt is same as that of src module input cfg */ 1177 format = &fmt->inputs[0].fmt; 1178 fill_pin_params(&(pin_fmt.src_fmt), format); 1179 1180 format = &fmt->outputs[src_index].fmt; 1181 fill_pin_params(&(pin_fmt.dst_fmt), format); 1182 ret = skl_set_module_params(ctx, (void *)&pin_fmt, 1183 sizeof(struct skl_cpr_pin_fmt), 1184 CPR_SINK_FMT_PARAM_ID, src_mcfg); 1185 1186 if (ret < 0) 1187 goto out; 1188 } 1189 1190 msg.dst_queue = dst_index; 1191 1192 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n", 1193 msg.src_queue, msg.dst_queue); 1194 1195 msg.module_id = src_mcfg->id.module_id; 1196 msg.instance_id = src_mcfg->id.pvt_id; 1197 msg.dst_module_id = dst_mcfg->id.module_id; 1198 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1199 msg.bind = true; 1200 1201 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1202 1203 if (!ret) { 1204 src_mcfg->m_state = SKL_MODULE_BIND_DONE; 1205 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE; 1206 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE; 1207 return ret; 1208 } 1209 out: 1210 /* error case , if IPC fails, clear the queue index */ 1211 skl_free_queue(src_mcfg->m_out_pin, src_index); 1212 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1213 1214 return ret; 1215 } 1216 1217 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe, 1218 enum skl_ipc_pipeline_state state) 1219 { 1220 dev_dbg(ctx->dev, "%s: pipe_state = %d\n", __func__, state); 1221 1222 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state); 1223 } 1224 1225 /* 1226 * A pipeline is a collection of modules. Before a module in instantiated a 1227 * pipeline needs to be created for it. 1228 * This function creates pipeline, by sending create pipeline IPC messages 1229 * to FW 1230 */ 1231 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe) 1232 { 1233 int ret; 1234 1235 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id); 1236 1237 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages, 1238 pipe->pipe_priority, pipe->ppl_id, 1239 pipe->lp_mode); 1240 if (ret < 0) { 1241 dev_err(ctx->dev, "Failed to create pipeline\n"); 1242 return ret; 1243 } 1244 1245 pipe->state = SKL_PIPE_CREATED; 1246 1247 return 0; 1248 } 1249 1250 /* 1251 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then 1252 * pause the pipeline first and then delete it 1253 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the 1254 * DMA engines and releases resources 1255 */ 1256 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1257 { 1258 int ret; 1259 1260 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1261 1262 /* If pipe is started, do stop the pipe in FW. */ 1263 if (pipe->state >= SKL_PIPE_STARTED) { 1264 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1265 if (ret < 0) { 1266 dev_err(ctx->dev, "Failed to stop pipeline\n"); 1267 return ret; 1268 } 1269 1270 pipe->state = SKL_PIPE_PAUSED; 1271 } 1272 1273 /* If pipe was not created in FW, do not try to delete it */ 1274 if (pipe->state < SKL_PIPE_CREATED) 1275 return 0; 1276 1277 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id); 1278 if (ret < 0) { 1279 dev_err(ctx->dev, "Failed to delete pipeline\n"); 1280 return ret; 1281 } 1282 1283 pipe->state = SKL_PIPE_INVALID; 1284 1285 return ret; 1286 } 1287 1288 /* 1289 * A pipeline is also a scheduling entity in DSP which can be run, stopped 1290 * For processing data the pipe need to be run by sending IPC set pipe state 1291 * to DSP 1292 */ 1293 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1294 { 1295 int ret; 1296 1297 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1298 1299 /* If pipe was not created in FW, do not try to pause or delete */ 1300 if (pipe->state < SKL_PIPE_CREATED) 1301 return 0; 1302 1303 /* Pipe has to be paused before it is started */ 1304 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1305 if (ret < 0) { 1306 dev_err(ctx->dev, "Failed to pause pipe\n"); 1307 return ret; 1308 } 1309 1310 pipe->state = SKL_PIPE_PAUSED; 1311 1312 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING); 1313 if (ret < 0) { 1314 dev_err(ctx->dev, "Failed to start pipe\n"); 1315 return ret; 1316 } 1317 1318 pipe->state = SKL_PIPE_STARTED; 1319 1320 return 0; 1321 } 1322 1323 /* 1324 * Stop the pipeline by sending set pipe state IPC 1325 * DSP doesnt implement stop so we always send pause message 1326 */ 1327 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1328 { 1329 int ret; 1330 1331 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id); 1332 1333 /* If pipe was not created in FW, do not try to pause or delete */ 1334 if (pipe->state < SKL_PIPE_PAUSED) 1335 return 0; 1336 1337 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1338 if (ret < 0) { 1339 dev_dbg(ctx->dev, "Failed to stop pipe\n"); 1340 return ret; 1341 } 1342 1343 pipe->state = SKL_PIPE_PAUSED; 1344 1345 return 0; 1346 } 1347 1348 /* 1349 * Reset the pipeline by sending set pipe state IPC this will reset the DMA 1350 * from the DSP side 1351 */ 1352 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1353 { 1354 int ret; 1355 1356 /* If pipe was not created in FW, do not try to pause or delete */ 1357 if (pipe->state < SKL_PIPE_PAUSED) 1358 return 0; 1359 1360 ret = skl_set_pipe_state(ctx, pipe, PPL_RESET); 1361 if (ret < 0) { 1362 dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret); 1363 return ret; 1364 } 1365 1366 pipe->state = SKL_PIPE_RESET; 1367 1368 return 0; 1369 } 1370 1371 /* Algo parameter set helper function */ 1372 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size, 1373 u32 param_id, struct skl_module_cfg *mcfg) 1374 { 1375 struct skl_ipc_large_config_msg msg; 1376 1377 msg.module_id = mcfg->id.module_id; 1378 msg.instance_id = mcfg->id.pvt_id; 1379 msg.param_data_size = size; 1380 msg.large_param_id = param_id; 1381 1382 return skl_ipc_set_large_config(&ctx->ipc, &msg, params); 1383 } 1384 1385 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size, 1386 u32 param_id, struct skl_module_cfg *mcfg) 1387 { 1388 struct skl_ipc_large_config_msg msg; 1389 1390 msg.module_id = mcfg->id.module_id; 1391 msg.instance_id = mcfg->id.pvt_id; 1392 msg.param_data_size = size; 1393 msg.large_param_id = param_id; 1394 1395 return skl_ipc_get_large_config(&ctx->ipc, &msg, params); 1396 } 1397