1 /* 2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module 3 * configurations 4 * 5 * Copyright (C) 2015 Intel Corp 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 */ 19 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <sound/core.h> 23 #include <sound/pcm.h> 24 #include "skl-sst-dsp.h" 25 #include "cnl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl.h" 28 #include "../common/sst-dsp.h" 29 #include "../common/sst-dsp-priv.h" 30 #include "skl-topology.h" 31 #include "skl-tplg-interface.h" 32 33 static int skl_alloc_dma_buf(struct device *dev, 34 struct snd_dma_buffer *dmab, size_t size) 35 { 36 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 37 struct hdac_bus *bus = ebus_to_hbus(ebus); 38 39 if (!bus) 40 return -ENODEV; 41 42 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab); 43 } 44 45 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) 46 { 47 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 48 struct hdac_bus *bus = ebus_to_hbus(ebus); 49 50 if (!bus) 51 return -ENODEV; 52 53 bus->io_ops->dma_free_pages(bus, dmab); 54 55 return 0; 56 } 57 58 #define SKL_ASTATE_PARAM_ID 4 59 60 void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data) 61 { 62 struct skl_ipc_large_config_msg msg = {0}; 63 64 msg.large_param_id = SKL_ASTATE_PARAM_ID; 65 msg.param_data_size = (cnt * sizeof(struct skl_astate_param) + 66 sizeof(cnt)); 67 68 skl_ipc_set_large_config(&ctx->ipc, &msg, data); 69 } 70 71 #define NOTIFICATION_PARAM_ID 3 72 #define NOTIFICATION_MASK 0xf 73 74 /* disable notfication for underruns/overruns from firmware module */ 75 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable) 76 { 77 struct notification_mask mask; 78 struct skl_ipc_large_config_msg msg = {0}; 79 80 mask.notify = NOTIFICATION_MASK; 81 mask.enable = enable; 82 83 msg.large_param_id = NOTIFICATION_PARAM_ID; 84 msg.param_data_size = sizeof(mask); 85 86 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask); 87 } 88 89 static int skl_dsp_setup_spib(struct device *dev, unsigned int size, 90 int stream_tag, int enable) 91 { 92 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 93 struct hdac_bus *bus = ebus_to_hbus(ebus); 94 struct hdac_stream *stream = snd_hdac_get_stream(bus, 95 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 96 struct hdac_ext_stream *estream; 97 98 if (!stream) 99 return -EINVAL; 100 101 estream = stream_to_hdac_ext_stream(stream); 102 /* enable/disable SPIB for this hdac stream */ 103 snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index); 104 105 /* set the spib value */ 106 snd_hdac_ext_stream_set_spib(ebus, estream, size); 107 108 return 0; 109 } 110 111 static int skl_dsp_prepare(struct device *dev, unsigned int format, 112 unsigned int size, struct snd_dma_buffer *dmab) 113 { 114 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 115 struct hdac_bus *bus = ebus_to_hbus(ebus); 116 struct hdac_ext_stream *estream; 117 struct hdac_stream *stream; 118 struct snd_pcm_substream substream; 119 int ret; 120 121 if (!bus) 122 return -ENODEV; 123 124 memset(&substream, 0, sizeof(substream)); 125 substream.stream = SNDRV_PCM_STREAM_PLAYBACK; 126 127 estream = snd_hdac_ext_stream_assign(ebus, &substream, 128 HDAC_EXT_STREAM_TYPE_HOST); 129 if (!estream) 130 return -ENODEV; 131 132 stream = hdac_stream(estream); 133 134 /* assign decouple host dma channel */ 135 ret = snd_hdac_dsp_prepare(stream, format, size, dmab); 136 if (ret < 0) 137 return ret; 138 139 skl_dsp_setup_spib(dev, size, stream->stream_tag, true); 140 141 return stream->stream_tag; 142 } 143 144 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) 145 { 146 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 147 struct hdac_stream *stream; 148 struct hdac_bus *bus = ebus_to_hbus(ebus); 149 150 if (!bus) 151 return -ENODEV; 152 153 stream = snd_hdac_get_stream(bus, 154 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 155 if (!stream) 156 return -EINVAL; 157 158 snd_hdac_dsp_trigger(stream, start); 159 160 return 0; 161 } 162 163 static int skl_dsp_cleanup(struct device *dev, 164 struct snd_dma_buffer *dmab, int stream_tag) 165 { 166 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 167 struct hdac_stream *stream; 168 struct hdac_ext_stream *estream; 169 struct hdac_bus *bus = ebus_to_hbus(ebus); 170 171 if (!bus) 172 return -ENODEV; 173 174 stream = snd_hdac_get_stream(bus, 175 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 176 if (!stream) 177 return -EINVAL; 178 179 estream = stream_to_hdac_ext_stream(stream); 180 skl_dsp_setup_spib(dev, 0, stream_tag, false); 181 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); 182 183 snd_hdac_dsp_cleanup(stream, dmab); 184 185 return 0; 186 } 187 188 static struct skl_dsp_loader_ops skl_get_loader_ops(void) 189 { 190 struct skl_dsp_loader_ops loader_ops; 191 192 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops)); 193 194 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 195 loader_ops.free_dma_buf = skl_free_dma_buf; 196 197 return loader_ops; 198 }; 199 200 static struct skl_dsp_loader_ops bxt_get_loader_ops(void) 201 { 202 struct skl_dsp_loader_ops loader_ops; 203 204 memset(&loader_ops, 0, sizeof(loader_ops)); 205 206 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 207 loader_ops.free_dma_buf = skl_free_dma_buf; 208 loader_ops.prepare = skl_dsp_prepare; 209 loader_ops.trigger = skl_dsp_trigger; 210 loader_ops.cleanup = skl_dsp_cleanup; 211 212 return loader_ops; 213 }; 214 215 static const struct skl_dsp_ops dsp_ops[] = { 216 { 217 .id = 0x9d70, 218 .num_cores = 2, 219 .loader_ops = skl_get_loader_ops, 220 .init = skl_sst_dsp_init, 221 .init_fw = skl_sst_init_fw, 222 .cleanup = skl_sst_dsp_cleanup 223 }, 224 { 225 .id = 0x9d71, 226 .num_cores = 2, 227 .loader_ops = skl_get_loader_ops, 228 .init = kbl_sst_dsp_init, 229 .init_fw = skl_sst_init_fw, 230 .cleanup = skl_sst_dsp_cleanup 231 }, 232 { 233 .id = 0x5a98, 234 .num_cores = 2, 235 .loader_ops = bxt_get_loader_ops, 236 .init = bxt_sst_dsp_init, 237 .init_fw = bxt_sst_init_fw, 238 .cleanup = bxt_sst_dsp_cleanup 239 }, 240 { 241 .id = 0x3198, 242 .num_cores = 2, 243 .loader_ops = bxt_get_loader_ops, 244 .init = bxt_sst_dsp_init, 245 .init_fw = bxt_sst_init_fw, 246 .cleanup = bxt_sst_dsp_cleanup 247 }, 248 { 249 .id = 0x9dc8, 250 .num_cores = 4, 251 .loader_ops = bxt_get_loader_ops, 252 .init = cnl_sst_dsp_init, 253 .init_fw = cnl_sst_init_fw, 254 .cleanup = cnl_sst_dsp_cleanup 255 }, 256 }; 257 258 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) 259 { 260 int i; 261 262 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) { 263 if (dsp_ops[i].id == pci_id) 264 return &dsp_ops[i]; 265 } 266 267 return NULL; 268 } 269 270 int skl_init_dsp(struct skl *skl) 271 { 272 void __iomem *mmio_base; 273 struct hdac_ext_bus *ebus = &skl->ebus; 274 struct hdac_bus *bus = ebus_to_hbus(ebus); 275 struct skl_dsp_loader_ops loader_ops; 276 int irq = bus->irq; 277 const struct skl_dsp_ops *ops; 278 struct skl_dsp_cores *cores; 279 int ret; 280 281 /* enable ppcap interrupt */ 282 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 283 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 284 285 /* read the BAR of the ADSP MMIO */ 286 mmio_base = pci_ioremap_bar(skl->pci, 4); 287 if (mmio_base == NULL) { 288 dev_err(bus->dev, "ioremap error\n"); 289 return -ENXIO; 290 } 291 292 ops = skl_get_dsp_ops(skl->pci->device); 293 if (!ops) { 294 ret = -EIO; 295 goto unmap_mmio; 296 } 297 298 loader_ops = ops->loader_ops(); 299 ret = ops->init(bus->dev, mmio_base, irq, 300 skl->fw_name, loader_ops, 301 &skl->skl_sst); 302 303 if (ret < 0) 304 goto unmap_mmio; 305 306 skl->skl_sst->dsp_ops = ops; 307 cores = &skl->skl_sst->cores; 308 cores->count = ops->num_cores; 309 310 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); 311 if (!cores->state) { 312 ret = -ENOMEM; 313 goto unmap_mmio; 314 } 315 316 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), 317 GFP_KERNEL); 318 if (!cores->usage_count) { 319 ret = -ENOMEM; 320 goto free_core_state; 321 } 322 323 dev_dbg(bus->dev, "dsp registration status=%d\n", ret); 324 325 return 0; 326 327 free_core_state: 328 kfree(cores->state); 329 330 unmap_mmio: 331 iounmap(mmio_base); 332 333 return ret; 334 } 335 336 int skl_free_dsp(struct skl *skl) 337 { 338 struct hdac_ext_bus *ebus = &skl->ebus; 339 struct hdac_bus *bus = ebus_to_hbus(ebus); 340 struct skl_sst *ctx = skl->skl_sst; 341 342 /* disable ppcap interrupt */ 343 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 344 345 ctx->dsp_ops->cleanup(bus->dev, ctx); 346 347 kfree(ctx->cores.state); 348 kfree(ctx->cores.usage_count); 349 350 if (ctx->dsp->addr.lpe) 351 iounmap(ctx->dsp->addr.lpe); 352 353 return 0; 354 } 355 356 /* 357 * In the case of "suspend_active" i.e, the Audio IP being active 358 * during system suspend, immediately excecute any pending D0i3 work 359 * before suspending. This is needed for the IP to work in low power 360 * mode during system suspend. In the case of normal suspend, cancel 361 * any pending D0i3 work. 362 */ 363 int skl_suspend_late_dsp(struct skl *skl) 364 { 365 struct skl_sst *ctx = skl->skl_sst; 366 struct delayed_work *dwork; 367 368 if (!ctx) 369 return 0; 370 371 dwork = &ctx->d0i3.work; 372 373 if (dwork->work.func) { 374 if (skl->supend_active) 375 flush_delayed_work(dwork); 376 else 377 cancel_delayed_work_sync(dwork); 378 } 379 380 return 0; 381 } 382 383 int skl_suspend_dsp(struct skl *skl) 384 { 385 struct skl_sst *ctx = skl->skl_sst; 386 int ret; 387 388 /* if ppcap is not supported return 0 */ 389 if (!skl->ebus.bus.ppcap) 390 return 0; 391 392 ret = skl_dsp_sleep(ctx->dsp); 393 if (ret < 0) 394 return ret; 395 396 /* disable ppcap interrupt */ 397 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 398 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false); 399 400 return 0; 401 } 402 403 int skl_resume_dsp(struct skl *skl) 404 { 405 struct skl_sst *ctx = skl->skl_sst; 406 int ret; 407 408 /* if ppcap is not supported return 0 */ 409 if (!skl->ebus.bus.ppcap) 410 return 0; 411 412 /* enable ppcap interrupt */ 413 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 414 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 415 416 /* check if DSP 1st boot is done */ 417 if (skl->skl_sst->is_first_boot == true) 418 return 0; 419 420 /* disable dynamic clock gating during fw and lib download */ 421 ctx->enable_miscbdcge(ctx->dev, false); 422 423 ret = skl_dsp_wake(ctx->dsp); 424 ctx->enable_miscbdcge(ctx->dev, true); 425 if (ret < 0) 426 return ret; 427 428 skl_dsp_enable_notification(skl->skl_sst, false); 429 430 if (skl->cfg.astate_cfg != NULL) { 431 skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count, 432 skl->cfg.astate_cfg); 433 } 434 return ret; 435 } 436 437 enum skl_bitdepth skl_get_bit_depth(int params) 438 { 439 switch (params) { 440 case 8: 441 return SKL_DEPTH_8BIT; 442 443 case 16: 444 return SKL_DEPTH_16BIT; 445 446 case 24: 447 return SKL_DEPTH_24BIT; 448 449 case 32: 450 return SKL_DEPTH_32BIT; 451 452 default: 453 return SKL_DEPTH_INVALID; 454 455 } 456 } 457 458 /* 459 * Each module in DSP expects a base module configuration, which consists of 460 * PCM format information, which we calculate in driver and resource values 461 * which are read from widget information passed through topology binary 462 * This is send when we create a module with INIT_INSTANCE IPC msg 463 */ 464 static void skl_set_base_module_format(struct skl_sst *ctx, 465 struct skl_module_cfg *mconfig, 466 struct skl_base_cfg *base_cfg) 467 { 468 struct skl_module *module = mconfig->module; 469 struct skl_module_res *res = &module->resources[mconfig->res_idx]; 470 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 471 struct skl_module_fmt *format = &fmt->inputs[0].fmt; 472 473 base_cfg->audio_fmt.number_of_channels = format->channels; 474 475 base_cfg->audio_fmt.s_freq = format->s_freq; 476 base_cfg->audio_fmt.bit_depth = format->bit_depth; 477 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; 478 base_cfg->audio_fmt.ch_cfg = format->ch_cfg; 479 480 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", 481 format->bit_depth, format->valid_bit_depth, 482 format->ch_cfg); 483 484 base_cfg->audio_fmt.channel_map = format->ch_map; 485 486 base_cfg->audio_fmt.interleaving = format->interleaving_style; 487 488 base_cfg->cps = res->cps; 489 base_cfg->ibs = res->ibs; 490 base_cfg->obs = res->obs; 491 base_cfg->is_pages = res->is_pages; 492 } 493 494 /* 495 * Copies copier capabilities into copier module and updates copier module 496 * config size. 497 */ 498 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig, 499 struct skl_cpr_cfg *cpr_mconfig) 500 { 501 if (mconfig->formats_config.caps_size == 0) 502 return; 503 504 memcpy(cpr_mconfig->gtw_cfg.config_data, 505 mconfig->formats_config.caps, 506 mconfig->formats_config.caps_size); 507 508 cpr_mconfig->gtw_cfg.config_length = 509 (mconfig->formats_config.caps_size) / 4; 510 } 511 512 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF 513 /* 514 * Calculate the gatewat settings required for copier module, type of 515 * gateway and index of gateway to use 516 */ 517 static u32 skl_get_node_id(struct skl_sst *ctx, 518 struct skl_module_cfg *mconfig) 519 { 520 union skl_connector_node_id node_id = {0}; 521 union skl_ssp_dma_node ssp_node = {0}; 522 struct skl_pipe_params *params = mconfig->pipe->p_params; 523 524 switch (mconfig->dev_type) { 525 case SKL_DEVICE_BT: 526 node_id.node.dma_type = 527 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 528 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 529 SKL_DMA_I2S_LINK_INPUT_CLASS; 530 node_id.node.vindex = params->host_dma_id + 531 (mconfig->vbus_id << 3); 532 break; 533 534 case SKL_DEVICE_I2S: 535 node_id.node.dma_type = 536 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 537 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 538 SKL_DMA_I2S_LINK_INPUT_CLASS; 539 ssp_node.dma_node.time_slot_index = mconfig->time_slot; 540 ssp_node.dma_node.i2s_instance = mconfig->vbus_id; 541 node_id.node.vindex = ssp_node.val; 542 break; 543 544 case SKL_DEVICE_DMIC: 545 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS; 546 node_id.node.vindex = mconfig->vbus_id + 547 (mconfig->time_slot); 548 break; 549 550 case SKL_DEVICE_HDALINK: 551 node_id.node.dma_type = 552 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 553 SKL_DMA_HDA_LINK_OUTPUT_CLASS : 554 SKL_DMA_HDA_LINK_INPUT_CLASS; 555 node_id.node.vindex = params->link_dma_id; 556 break; 557 558 case SKL_DEVICE_HDAHOST: 559 node_id.node.dma_type = 560 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 561 SKL_DMA_HDA_HOST_OUTPUT_CLASS : 562 SKL_DMA_HDA_HOST_INPUT_CLASS; 563 node_id.node.vindex = params->host_dma_id; 564 break; 565 566 default: 567 node_id.val = 0xFFFFFFFF; 568 break; 569 } 570 571 return node_id.val; 572 } 573 574 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx, 575 struct skl_module_cfg *mconfig, 576 struct skl_cpr_cfg *cpr_mconfig) 577 { 578 u32 dma_io_buf; 579 struct skl_module_res *res; 580 int res_idx = mconfig->res_idx; 581 struct skl *skl = get_skl_ctx(ctx->dev); 582 583 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig); 584 585 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) { 586 cpr_mconfig->cpr_feature_mask = 0; 587 return; 588 } 589 590 if (skl->nr_modules) { 591 res = &mconfig->module->resources[mconfig->res_idx]; 592 cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size; 593 goto skip_buf_size_calc; 594 } else { 595 res = &mconfig->module->resources[res_idx]; 596 } 597 598 switch (mconfig->hw_conn_type) { 599 case SKL_CONN_SOURCE: 600 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 601 dma_io_buf = res->ibs; 602 else 603 dma_io_buf = res->obs; 604 break; 605 606 case SKL_CONN_SINK: 607 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 608 dma_io_buf = res->obs; 609 else 610 dma_io_buf = res->ibs; 611 break; 612 613 default: 614 dev_warn(ctx->dev, "wrong connection type: %d\n", 615 mconfig->hw_conn_type); 616 return; 617 } 618 619 cpr_mconfig->gtw_cfg.dma_buffer_size = 620 mconfig->dma_buffer_size * dma_io_buf; 621 622 /* fallback to 2ms default value */ 623 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) { 624 if (mconfig->hw_conn_type == SKL_CONN_SOURCE) 625 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs; 626 else 627 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs; 628 } 629 630 skip_buf_size_calc: 631 cpr_mconfig->cpr_feature_mask = 0; 632 cpr_mconfig->gtw_cfg.config_length = 0; 633 634 skl_copy_copier_caps(mconfig, cpr_mconfig); 635 } 636 637 #define DMA_CONTROL_ID 5 638 #define DMA_I2S_BLOB_SIZE 21 639 640 int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps, 641 u32 caps_size, u32 node_id) 642 { 643 struct skl_dma_control *dma_ctrl; 644 struct skl_ipc_large_config_msg msg = {0}; 645 int err = 0; 646 647 648 /* 649 * if blob size zero, then return 650 */ 651 if (caps_size == 0) 652 return 0; 653 654 msg.large_param_id = DMA_CONTROL_ID; 655 msg.param_data_size = sizeof(struct skl_dma_control) + caps_size; 656 657 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL); 658 if (dma_ctrl == NULL) 659 return -ENOMEM; 660 661 dma_ctrl->node_id = node_id; 662 663 /* 664 * NHLT blob may contain additional configs along with i2s blob. 665 * firmware expects only the i2s blob size as the config_length. 666 * So fix to i2s blob size. 667 * size in dwords. 668 */ 669 dma_ctrl->config_length = DMA_I2S_BLOB_SIZE; 670 671 memcpy(dma_ctrl->config_data, caps, caps_size); 672 673 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl); 674 675 kfree(dma_ctrl); 676 return err; 677 } 678 EXPORT_SYMBOL_GPL(skl_dsp_set_dma_control); 679 680 static void skl_setup_out_format(struct skl_sst *ctx, 681 struct skl_module_cfg *mconfig, 682 struct skl_audio_data_format *out_fmt) 683 { 684 struct skl_module *module = mconfig->module; 685 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 686 struct skl_module_fmt *format = &fmt->outputs[0].fmt; 687 688 out_fmt->number_of_channels = (u8)format->channels; 689 out_fmt->s_freq = format->s_freq; 690 out_fmt->bit_depth = format->bit_depth; 691 out_fmt->valid_bit_depth = format->valid_bit_depth; 692 out_fmt->ch_cfg = format->ch_cfg; 693 694 out_fmt->channel_map = format->ch_map; 695 out_fmt->interleaving = format->interleaving_style; 696 out_fmt->sample_type = format->sample_type; 697 698 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n", 699 out_fmt->number_of_channels, format->s_freq, format->bit_depth); 700 } 701 702 /* 703 * DSP needs SRC module for frequency conversion, SRC takes base module 704 * configuration and the target frequency as extra parameter passed as src 705 * config 706 */ 707 static void skl_set_src_format(struct skl_sst *ctx, 708 struct skl_module_cfg *mconfig, 709 struct skl_src_module_cfg *src_mconfig) 710 { 711 struct skl_module *module = mconfig->module; 712 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 713 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 714 715 skl_set_base_module_format(ctx, mconfig, 716 (struct skl_base_cfg *)src_mconfig); 717 718 src_mconfig->src_cfg = fmt->s_freq; 719 } 720 721 /* 722 * DSP needs updown module to do channel conversion. updown module take base 723 * module configuration and channel configuration 724 * It also take coefficients and now we have defaults applied here 725 */ 726 static void skl_set_updown_mixer_format(struct skl_sst *ctx, 727 struct skl_module_cfg *mconfig, 728 struct skl_up_down_mixer_cfg *mixer_mconfig) 729 { 730 struct skl_module *module = mconfig->module; 731 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 732 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 733 734 skl_set_base_module_format(ctx, mconfig, 735 (struct skl_base_cfg *)mixer_mconfig); 736 mixer_mconfig->out_ch_cfg = fmt->ch_cfg; 737 mixer_mconfig->ch_map = fmt->ch_map; 738 } 739 740 /* 741 * 'copier' is DSP internal module which copies data from Host DMA (HDA host 742 * dma) or link (hda link, SSP, PDM) 743 * Here we calculate the copier module parameters, like PCM format, output 744 * format, gateway settings 745 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg 746 */ 747 static void skl_set_copier_format(struct skl_sst *ctx, 748 struct skl_module_cfg *mconfig, 749 struct skl_cpr_cfg *cpr_mconfig) 750 { 751 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt; 752 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig; 753 754 skl_set_base_module_format(ctx, mconfig, base_cfg); 755 756 skl_setup_out_format(ctx, mconfig, out_fmt); 757 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig); 758 } 759 760 /* 761 * Algo module are DSP pre processing modules. Algo module take base module 762 * configuration and params 763 */ 764 765 static void skl_set_algo_format(struct skl_sst *ctx, 766 struct skl_module_cfg *mconfig, 767 struct skl_algo_cfg *algo_mcfg) 768 { 769 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg; 770 771 skl_set_base_module_format(ctx, mconfig, base_cfg); 772 773 if (mconfig->formats_config.caps_size == 0) 774 return; 775 776 memcpy(algo_mcfg->params, 777 mconfig->formats_config.caps, 778 mconfig->formats_config.caps_size); 779 780 } 781 782 /* 783 * Mic select module allows selecting one or many input channels, thus 784 * acting as a demux. 785 * 786 * Mic select module take base module configuration and out-format 787 * configuration 788 */ 789 static void skl_set_base_outfmt_format(struct skl_sst *ctx, 790 struct skl_module_cfg *mconfig, 791 struct skl_base_outfmt_cfg *base_outfmt_mcfg) 792 { 793 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt; 794 struct skl_base_cfg *base_cfg = 795 (struct skl_base_cfg *)base_outfmt_mcfg; 796 797 skl_set_base_module_format(ctx, mconfig, base_cfg); 798 skl_setup_out_format(ctx, mconfig, out_fmt); 799 } 800 801 static u16 skl_get_module_param_size(struct skl_sst *ctx, 802 struct skl_module_cfg *mconfig) 803 { 804 u16 param_size; 805 806 switch (mconfig->m_type) { 807 case SKL_MODULE_TYPE_COPIER: 808 param_size = sizeof(struct skl_cpr_cfg); 809 param_size += mconfig->formats_config.caps_size; 810 return param_size; 811 812 case SKL_MODULE_TYPE_SRCINT: 813 return sizeof(struct skl_src_module_cfg); 814 815 case SKL_MODULE_TYPE_UPDWMIX: 816 return sizeof(struct skl_up_down_mixer_cfg); 817 818 case SKL_MODULE_TYPE_ALGO: 819 param_size = sizeof(struct skl_base_cfg); 820 param_size += mconfig->formats_config.caps_size; 821 return param_size; 822 823 case SKL_MODULE_TYPE_BASE_OUTFMT: 824 case SKL_MODULE_TYPE_MIC_SELECT: 825 case SKL_MODULE_TYPE_KPB: 826 return sizeof(struct skl_base_outfmt_cfg); 827 828 default: 829 /* 830 * return only base cfg when no specific module type is 831 * specified 832 */ 833 return sizeof(struct skl_base_cfg); 834 } 835 836 return 0; 837 } 838 839 /* 840 * DSP firmware supports various modules like copier, SRC, updown etc. 841 * These modules required various parameters to be calculated and sent for 842 * the module initialization to DSP. By default a generic module needs only 843 * base module format configuration 844 */ 845 846 static int skl_set_module_format(struct skl_sst *ctx, 847 struct skl_module_cfg *module_config, 848 u16 *module_config_size, 849 void **param_data) 850 { 851 u16 param_size; 852 853 param_size = skl_get_module_param_size(ctx, module_config); 854 855 *param_data = kzalloc(param_size, GFP_KERNEL); 856 if (NULL == *param_data) 857 return -ENOMEM; 858 859 *module_config_size = param_size; 860 861 switch (module_config->m_type) { 862 case SKL_MODULE_TYPE_COPIER: 863 skl_set_copier_format(ctx, module_config, *param_data); 864 break; 865 866 case SKL_MODULE_TYPE_SRCINT: 867 skl_set_src_format(ctx, module_config, *param_data); 868 break; 869 870 case SKL_MODULE_TYPE_UPDWMIX: 871 skl_set_updown_mixer_format(ctx, module_config, *param_data); 872 break; 873 874 case SKL_MODULE_TYPE_ALGO: 875 skl_set_algo_format(ctx, module_config, *param_data); 876 break; 877 878 case SKL_MODULE_TYPE_BASE_OUTFMT: 879 case SKL_MODULE_TYPE_MIC_SELECT: 880 case SKL_MODULE_TYPE_KPB: 881 skl_set_base_outfmt_format(ctx, module_config, *param_data); 882 break; 883 884 default: 885 skl_set_base_module_format(ctx, module_config, *param_data); 886 break; 887 888 } 889 890 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n", 891 module_config->id.module_id, param_size); 892 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4, 893 *param_data, param_size, false); 894 return 0; 895 } 896 897 static int skl_get_queue_index(struct skl_module_pin *mpin, 898 struct skl_module_inst_id id, int max) 899 { 900 int i; 901 902 for (i = 0; i < max; i++) { 903 if (mpin[i].id.module_id == id.module_id && 904 mpin[i].id.instance_id == id.instance_id) 905 return i; 906 } 907 908 return -EINVAL; 909 } 910 911 /* 912 * Allocates queue for each module. 913 * if dynamic, the pin_index is allocated 0 to max_pin. 914 * In static, the pin_index is fixed based on module_id and instance id 915 */ 916 static int skl_alloc_queue(struct skl_module_pin *mpin, 917 struct skl_module_cfg *tgt_cfg, int max) 918 { 919 int i; 920 struct skl_module_inst_id id = tgt_cfg->id; 921 /* 922 * if pin in dynamic, find first free pin 923 * otherwise find match module and instance id pin as topology will 924 * ensure a unique pin is assigned to this so no need to 925 * allocate/free 926 */ 927 for (i = 0; i < max; i++) { 928 if (mpin[i].is_dynamic) { 929 if (!mpin[i].in_use && 930 mpin[i].pin_state == SKL_PIN_UNBIND) { 931 932 mpin[i].in_use = true; 933 mpin[i].id.module_id = id.module_id; 934 mpin[i].id.instance_id = id.instance_id; 935 mpin[i].id.pvt_id = id.pvt_id; 936 mpin[i].tgt_mcfg = tgt_cfg; 937 return i; 938 } 939 } else { 940 if (mpin[i].id.module_id == id.module_id && 941 mpin[i].id.instance_id == id.instance_id && 942 mpin[i].pin_state == SKL_PIN_UNBIND) { 943 944 mpin[i].tgt_mcfg = tgt_cfg; 945 return i; 946 } 947 } 948 } 949 950 return -EINVAL; 951 } 952 953 static void skl_free_queue(struct skl_module_pin *mpin, int q_index) 954 { 955 if (mpin[q_index].is_dynamic) { 956 mpin[q_index].in_use = false; 957 mpin[q_index].id.module_id = 0; 958 mpin[q_index].id.instance_id = 0; 959 mpin[q_index].id.pvt_id = 0; 960 } 961 mpin[q_index].pin_state = SKL_PIN_UNBIND; 962 mpin[q_index].tgt_mcfg = NULL; 963 } 964 965 /* Module state will be set to unint, if all the out pin state is UNBIND */ 966 967 static void skl_clear_module_state(struct skl_module_pin *mpin, int max, 968 struct skl_module_cfg *mcfg) 969 { 970 int i; 971 bool found = false; 972 973 for (i = 0; i < max; i++) { 974 if (mpin[i].pin_state == SKL_PIN_UNBIND) 975 continue; 976 found = true; 977 break; 978 } 979 980 if (!found) 981 mcfg->m_state = SKL_MODULE_INIT_DONE; 982 return; 983 } 984 985 /* 986 * A module needs to be instanataited in DSP. A mdoule is present in a 987 * collection of module referred as a PIPE. 988 * We first calculate the module format, based on module type and then 989 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper 990 */ 991 int skl_init_module(struct skl_sst *ctx, 992 struct skl_module_cfg *mconfig) 993 { 994 u16 module_config_size = 0; 995 void *param_data = NULL; 996 int ret; 997 struct skl_ipc_init_instance_msg msg; 998 999 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__, 1000 mconfig->id.module_id, mconfig->id.pvt_id); 1001 1002 if (mconfig->pipe->state != SKL_PIPE_CREATED) { 1003 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n", 1004 mconfig->pipe->state, mconfig->pipe->ppl_id); 1005 return -EIO; 1006 } 1007 1008 ret = skl_set_module_format(ctx, mconfig, 1009 &module_config_size, ¶m_data); 1010 if (ret < 0) { 1011 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret); 1012 return ret; 1013 } 1014 1015 msg.module_id = mconfig->id.module_id; 1016 msg.instance_id = mconfig->id.pvt_id; 1017 msg.ppl_instance_id = mconfig->pipe->ppl_id; 1018 msg.param_data_size = module_config_size; 1019 msg.core_id = mconfig->core_id; 1020 msg.domain = mconfig->domain; 1021 1022 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data); 1023 if (ret < 0) { 1024 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret); 1025 kfree(param_data); 1026 return ret; 1027 } 1028 mconfig->m_state = SKL_MODULE_INIT_DONE; 1029 kfree(param_data); 1030 return ret; 1031 } 1032 1033 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg 1034 *src_module, struct skl_module_cfg *dst_module) 1035 { 1036 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n", 1037 __func__, src_module->id.module_id, src_module->id.pvt_id); 1038 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instance=%d\n", __func__, 1039 dst_module->id.module_id, dst_module->id.pvt_id); 1040 1041 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n", 1042 src_module->m_state, dst_module->m_state); 1043 } 1044 1045 /* 1046 * On module freeup, we need to unbind the module with modules 1047 * it is already bind. 1048 * Find the pin allocated and unbind then using bind_unbind IPC 1049 */ 1050 int skl_unbind_modules(struct skl_sst *ctx, 1051 struct skl_module_cfg *src_mcfg, 1052 struct skl_module_cfg *dst_mcfg) 1053 { 1054 int ret; 1055 struct skl_ipc_bind_unbind_msg msg; 1056 struct skl_module_inst_id src_id = src_mcfg->id; 1057 struct skl_module_inst_id dst_id = dst_mcfg->id; 1058 int in_max = dst_mcfg->module->max_input_pins; 1059 int out_max = src_mcfg->module->max_output_pins; 1060 int src_index, dst_index, src_pin_state, dst_pin_state; 1061 1062 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1063 1064 /* get src queue index */ 1065 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); 1066 if (src_index < 0) 1067 return 0; 1068 1069 msg.src_queue = src_index; 1070 1071 /* get dst queue index */ 1072 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); 1073 if (dst_index < 0) 1074 return 0; 1075 1076 msg.dst_queue = dst_index; 1077 1078 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state; 1079 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state; 1080 1081 if (src_pin_state != SKL_PIN_BIND_DONE || 1082 dst_pin_state != SKL_PIN_BIND_DONE) 1083 return 0; 1084 1085 msg.module_id = src_mcfg->id.module_id; 1086 msg.instance_id = src_mcfg->id.pvt_id; 1087 msg.dst_module_id = dst_mcfg->id.module_id; 1088 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1089 msg.bind = false; 1090 1091 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1092 if (!ret) { 1093 /* free queue only if unbind is success */ 1094 skl_free_queue(src_mcfg->m_out_pin, src_index); 1095 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1096 1097 /* 1098 * check only if src module bind state, bind is 1099 * always from src -> sink 1100 */ 1101 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg); 1102 } 1103 1104 return ret; 1105 } 1106 1107 static void fill_pin_params(struct skl_audio_data_format *pin_fmt, 1108 struct skl_module_fmt *format) 1109 { 1110 pin_fmt->number_of_channels = format->channels; 1111 pin_fmt->s_freq = format->s_freq; 1112 pin_fmt->bit_depth = format->bit_depth; 1113 pin_fmt->valid_bit_depth = format->valid_bit_depth; 1114 pin_fmt->ch_cfg = format->ch_cfg; 1115 pin_fmt->sample_type = format->sample_type; 1116 pin_fmt->channel_map = format->ch_map; 1117 pin_fmt->interleaving = format->interleaving_style; 1118 } 1119 1120 #define CPR_SINK_FMT_PARAM_ID 2 1121 1122 /* 1123 * Once a module is instantiated it need to be 'bind' with other modules in 1124 * the pipeline. For binding we need to find the module pins which are bind 1125 * together 1126 * This function finds the pins and then sends bund_unbind IPC message to 1127 * DSP using IPC helper 1128 */ 1129 int skl_bind_modules(struct skl_sst *ctx, 1130 struct skl_module_cfg *src_mcfg, 1131 struct skl_module_cfg *dst_mcfg) 1132 { 1133 int ret = 0; 1134 struct skl_ipc_bind_unbind_msg msg; 1135 int in_max = dst_mcfg->module->max_input_pins; 1136 int out_max = src_mcfg->module->max_output_pins; 1137 int src_index, dst_index; 1138 struct skl_module_fmt *format; 1139 struct skl_cpr_pin_fmt pin_fmt; 1140 struct skl_module *module; 1141 struct skl_module_iface *fmt; 1142 1143 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1144 1145 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE || 1146 dst_mcfg->m_state < SKL_MODULE_INIT_DONE) 1147 return 0; 1148 1149 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max); 1150 if (src_index < 0) 1151 return -EINVAL; 1152 1153 msg.src_queue = src_index; 1154 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max); 1155 if (dst_index < 0) { 1156 skl_free_queue(src_mcfg->m_out_pin, src_index); 1157 return -EINVAL; 1158 } 1159 1160 /* 1161 * Copier module requires the separate large_config_set_ipc to 1162 * configure the pins other than 0 1163 */ 1164 if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) { 1165 pin_fmt.sink_id = src_index; 1166 module = src_mcfg->module; 1167 fmt = &module->formats[src_mcfg->fmt_idx]; 1168 1169 /* Input fmt is same as that of src module input cfg */ 1170 format = &fmt->inputs[0].fmt; 1171 fill_pin_params(&(pin_fmt.src_fmt), format); 1172 1173 format = &fmt->outputs[src_index].fmt; 1174 fill_pin_params(&(pin_fmt.dst_fmt), format); 1175 ret = skl_set_module_params(ctx, (void *)&pin_fmt, 1176 sizeof(struct skl_cpr_pin_fmt), 1177 CPR_SINK_FMT_PARAM_ID, src_mcfg); 1178 1179 if (ret < 0) 1180 goto out; 1181 } 1182 1183 msg.dst_queue = dst_index; 1184 1185 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n", 1186 msg.src_queue, msg.dst_queue); 1187 1188 msg.module_id = src_mcfg->id.module_id; 1189 msg.instance_id = src_mcfg->id.pvt_id; 1190 msg.dst_module_id = dst_mcfg->id.module_id; 1191 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1192 msg.bind = true; 1193 1194 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1195 1196 if (!ret) { 1197 src_mcfg->m_state = SKL_MODULE_BIND_DONE; 1198 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE; 1199 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE; 1200 return ret; 1201 } 1202 out: 1203 /* error case , if IPC fails, clear the queue index */ 1204 skl_free_queue(src_mcfg->m_out_pin, src_index); 1205 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1206 1207 return ret; 1208 } 1209 1210 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe, 1211 enum skl_ipc_pipeline_state state) 1212 { 1213 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state); 1214 1215 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state); 1216 } 1217 1218 /* 1219 * A pipeline is a collection of modules. Before a module in instantiated a 1220 * pipeline needs to be created for it. 1221 * This function creates pipeline, by sending create pipeline IPC messages 1222 * to FW 1223 */ 1224 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe) 1225 { 1226 int ret; 1227 1228 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id); 1229 1230 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages, 1231 pipe->pipe_priority, pipe->ppl_id, 1232 pipe->lp_mode); 1233 if (ret < 0) { 1234 dev_err(ctx->dev, "Failed to create pipeline\n"); 1235 return ret; 1236 } 1237 1238 pipe->state = SKL_PIPE_CREATED; 1239 1240 return 0; 1241 } 1242 1243 /* 1244 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then 1245 * pause the pipeline first and then delete it 1246 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the 1247 * DMA engines and releases resources 1248 */ 1249 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1250 { 1251 int ret; 1252 1253 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1254 1255 /* If pipe is started, do stop the pipe in FW. */ 1256 if (pipe->state >= SKL_PIPE_STARTED) { 1257 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1258 if (ret < 0) { 1259 dev_err(ctx->dev, "Failed to stop pipeline\n"); 1260 return ret; 1261 } 1262 1263 pipe->state = SKL_PIPE_PAUSED; 1264 } 1265 1266 /* If pipe was not created in FW, do not try to delete it */ 1267 if (pipe->state < SKL_PIPE_CREATED) 1268 return 0; 1269 1270 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id); 1271 if (ret < 0) { 1272 dev_err(ctx->dev, "Failed to delete pipeline\n"); 1273 return ret; 1274 } 1275 1276 pipe->state = SKL_PIPE_INVALID; 1277 1278 return ret; 1279 } 1280 1281 /* 1282 * A pipeline is also a scheduling entity in DSP which can be run, stopped 1283 * For processing data the pipe need to be run by sending IPC set pipe state 1284 * to DSP 1285 */ 1286 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1287 { 1288 int ret; 1289 1290 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1291 1292 /* If pipe was not created in FW, do not try to pause or delete */ 1293 if (pipe->state < SKL_PIPE_CREATED) 1294 return 0; 1295 1296 /* Pipe has to be paused before it is started */ 1297 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1298 if (ret < 0) { 1299 dev_err(ctx->dev, "Failed to pause pipe\n"); 1300 return ret; 1301 } 1302 1303 pipe->state = SKL_PIPE_PAUSED; 1304 1305 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING); 1306 if (ret < 0) { 1307 dev_err(ctx->dev, "Failed to start pipe\n"); 1308 return ret; 1309 } 1310 1311 pipe->state = SKL_PIPE_STARTED; 1312 1313 return 0; 1314 } 1315 1316 /* 1317 * Stop the pipeline by sending set pipe state IPC 1318 * DSP doesnt implement stop so we always send pause message 1319 */ 1320 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1321 { 1322 int ret; 1323 1324 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id); 1325 1326 /* If pipe was not created in FW, do not try to pause or delete */ 1327 if (pipe->state < SKL_PIPE_PAUSED) 1328 return 0; 1329 1330 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1331 if (ret < 0) { 1332 dev_dbg(ctx->dev, "Failed to stop pipe\n"); 1333 return ret; 1334 } 1335 1336 pipe->state = SKL_PIPE_PAUSED; 1337 1338 return 0; 1339 } 1340 1341 /* 1342 * Reset the pipeline by sending set pipe state IPC this will reset the DMA 1343 * from the DSP side 1344 */ 1345 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1346 { 1347 int ret; 1348 1349 /* If pipe was not created in FW, do not try to pause or delete */ 1350 if (pipe->state < SKL_PIPE_PAUSED) 1351 return 0; 1352 1353 ret = skl_set_pipe_state(ctx, pipe, PPL_RESET); 1354 if (ret < 0) { 1355 dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret); 1356 return ret; 1357 } 1358 1359 pipe->state = SKL_PIPE_RESET; 1360 1361 return 0; 1362 } 1363 1364 /* Algo parameter set helper function */ 1365 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size, 1366 u32 param_id, struct skl_module_cfg *mcfg) 1367 { 1368 struct skl_ipc_large_config_msg msg; 1369 1370 msg.module_id = mcfg->id.module_id; 1371 msg.instance_id = mcfg->id.pvt_id; 1372 msg.param_data_size = size; 1373 msg.large_param_id = param_id; 1374 1375 return skl_ipc_set_large_config(&ctx->ipc, &msg, params); 1376 } 1377 1378 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size, 1379 u32 param_id, struct skl_module_cfg *mcfg) 1380 { 1381 struct skl_ipc_large_config_msg msg; 1382 1383 msg.module_id = mcfg->id.module_id; 1384 msg.instance_id = mcfg->id.pvt_id; 1385 msg.param_data_size = size; 1386 msg.large_param_id = param_id; 1387 1388 return skl_ipc_get_large_config(&ctx->ipc, &msg, params); 1389 } 1390