1 /* 2 * sst.c - Intel SST Driver for audio engine 3 * 4 * Copyright (C) 2008-14 Intel Corp 5 * Authors: Vinod Koul <vinod.koul@intel.com> 6 * Harsha Priya <priya.harsha@intel.com> 7 * Dharageswari R <dharageswari.r@intel.com> 8 * KP Jeeja <jeeja.kp@intel.com> 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; version 2 of the License. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 21 */ 22 #include <linux/module.h> 23 #include <linux/fs.h> 24 #include <linux/interrupt.h> 25 #include <linux/firmware.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_qos.h> 28 #include <linux/async.h> 29 #include <linux/acpi.h> 30 #include <sound/core.h> 31 #include <sound/soc.h> 32 #include <asm/platform_sst_audio.h> 33 #include "../sst-mfld-platform.h" 34 #include "sst.h" 35 #include "../../common/sst-dsp.h" 36 37 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 38 MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>"); 39 MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver"); 40 MODULE_LICENSE("GPL v2"); 41 42 static inline bool sst_is_process_reply(u32 msg_id) 43 { 44 return ((msg_id & PROCESS_MSG) ? true : false); 45 } 46 47 static inline bool sst_validate_mailbox_size(unsigned int size) 48 { 49 return ((size <= SST_MAILBOX_SIZE) ? true : false); 50 } 51 52 static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context) 53 { 54 union interrupt_reg_mrfld isr; 55 union ipc_header_mrfld header; 56 union sst_imr_reg_mrfld imr; 57 struct ipc_post *msg = NULL; 58 unsigned int size = 0; 59 struct intel_sst_drv *drv = (struct intel_sst_drv *) context; 60 irqreturn_t retval = IRQ_HANDLED; 61 62 /* Interrupt arrived, check src */ 63 isr.full = sst_shim_read64(drv->shim, SST_ISRX); 64 65 if (isr.part.done_interrupt) { 66 /* Clear done bit */ 67 spin_lock(&drv->ipc_spin_lock); 68 header.full = sst_shim_read64(drv->shim, 69 drv->ipc_reg.ipcx); 70 header.p.header_high.part.done = 0; 71 sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full); 72 73 /* write 1 to clear status register */; 74 isr.part.done_interrupt = 1; 75 sst_shim_write64(drv->shim, SST_ISRX, isr.full); 76 spin_unlock(&drv->ipc_spin_lock); 77 78 /* we can send more messages to DSP so trigger work */ 79 queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq); 80 retval = IRQ_HANDLED; 81 } 82 83 if (isr.part.busy_interrupt) { 84 /* message from dsp so copy that */ 85 spin_lock(&drv->ipc_spin_lock); 86 imr.full = sst_shim_read64(drv->shim, SST_IMRX); 87 imr.part.busy_interrupt = 1; 88 sst_shim_write64(drv->shim, SST_IMRX, imr.full); 89 spin_unlock(&drv->ipc_spin_lock); 90 header.full = sst_shim_read64(drv->shim, drv->ipc_reg.ipcd); 91 92 if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) { 93 drv->ops->clear_interrupt(drv); 94 return IRQ_HANDLED; 95 } 96 97 if (header.p.header_high.part.large) { 98 size = header.p.header_low_payload; 99 if (sst_validate_mailbox_size(size)) { 100 memcpy_fromio(msg->mailbox_data, 101 drv->mailbox + drv->mailbox_recv_offset, size); 102 } else { 103 dev_err(drv->dev, 104 "Mailbox not copied, payload size is: %u\n", size); 105 header.p.header_low_payload = 0; 106 } 107 } 108 109 msg->mrfld_header = header; 110 msg->is_process_reply = 111 sst_is_process_reply(header.p.header_high.part.msg_id); 112 spin_lock(&drv->rx_msg_lock); 113 list_add_tail(&msg->node, &drv->rx_list); 114 spin_unlock(&drv->rx_msg_lock); 115 drv->ops->clear_interrupt(drv); 116 retval = IRQ_WAKE_THREAD; 117 } 118 return retval; 119 } 120 121 static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context) 122 { 123 struct intel_sst_drv *drv = (struct intel_sst_drv *) context; 124 struct ipc_post *__msg, *msg = NULL; 125 unsigned long irq_flags; 126 127 spin_lock_irqsave(&drv->rx_msg_lock, irq_flags); 128 if (list_empty(&drv->rx_list)) { 129 spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); 130 return IRQ_HANDLED; 131 } 132 133 list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) { 134 list_del(&msg->node); 135 spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); 136 if (msg->is_process_reply) 137 drv->ops->process_message(msg); 138 else 139 drv->ops->process_reply(drv, msg); 140 141 if (msg->is_large) 142 kfree(msg->mailbox_data); 143 kfree(msg); 144 spin_lock_irqsave(&drv->rx_msg_lock, irq_flags); 145 } 146 spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); 147 return IRQ_HANDLED; 148 } 149 150 static int sst_save_dsp_context_v2(struct intel_sst_drv *sst) 151 { 152 int ret = 0; 153 154 ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD, 155 IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL, 156 true, true, false, true); 157 158 if (ret < 0) { 159 dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret); 160 return -EIO; 161 } 162 163 return 0; 164 } 165 166 167 static struct intel_sst_ops mrfld_ops = { 168 .interrupt = intel_sst_interrupt_mrfld, 169 .irq_thread = intel_sst_irq_thread_mrfld, 170 .clear_interrupt = intel_sst_clear_intr_mrfld, 171 .start = sst_start_mrfld, 172 .reset = intel_sst_reset_dsp_mrfld, 173 .post_message = sst_post_message_mrfld, 174 .process_reply = sst_process_reply_mrfld, 175 .save_dsp_context = sst_save_dsp_context_v2, 176 .alloc_stream = sst_alloc_stream_mrfld, 177 .post_download = sst_post_download_mrfld, 178 }; 179 180 int sst_driver_ops(struct intel_sst_drv *sst) 181 { 182 183 switch (sst->dev_id) { 184 case SST_MRFLD_PCI_ID: 185 case SST_BYT_ACPI_ID: 186 case SST_CHV_ACPI_ID: 187 sst->tstamp = SST_TIME_STAMP_MRFLD; 188 sst->ops = &mrfld_ops; 189 return 0; 190 191 default: 192 dev_err(sst->dev, 193 "SST Driver capablities missing for dev_id: %x", sst->dev_id); 194 return -EINVAL; 195 }; 196 } 197 198 void sst_process_pending_msg(struct work_struct *work) 199 { 200 struct intel_sst_drv *ctx = container_of(work, 201 struct intel_sst_drv, ipc_post_msg_wq); 202 203 ctx->ops->post_message(ctx, NULL, false); 204 } 205 206 static int sst_workqueue_init(struct intel_sst_drv *ctx) 207 { 208 INIT_LIST_HEAD(&ctx->memcpy_list); 209 INIT_LIST_HEAD(&ctx->rx_list); 210 INIT_LIST_HEAD(&ctx->ipc_dispatch_list); 211 INIT_LIST_HEAD(&ctx->block_list); 212 INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg); 213 init_waitqueue_head(&ctx->wait_queue); 214 215 ctx->post_msg_wq = 216 create_singlethread_workqueue("sst_post_msg_wq"); 217 if (!ctx->post_msg_wq) 218 return -EBUSY; 219 return 0; 220 } 221 222 static void sst_init_locks(struct intel_sst_drv *ctx) 223 { 224 mutex_init(&ctx->sst_lock); 225 spin_lock_init(&ctx->rx_msg_lock); 226 spin_lock_init(&ctx->ipc_spin_lock); 227 spin_lock_init(&ctx->block_lock); 228 } 229 230 int sst_alloc_drv_context(struct intel_sst_drv **ctx, 231 struct device *dev, unsigned int dev_id) 232 { 233 *ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL); 234 if (!(*ctx)) 235 return -ENOMEM; 236 237 (*ctx)->dev = dev; 238 (*ctx)->dev_id = dev_id; 239 240 return 0; 241 } 242 EXPORT_SYMBOL_GPL(sst_alloc_drv_context); 243 244 int sst_context_init(struct intel_sst_drv *ctx) 245 { 246 int ret = 0, i; 247 248 if (!ctx->pdata) 249 return -EINVAL; 250 251 if (!ctx->pdata->probe_data) 252 return -EINVAL; 253 254 memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info)); 255 256 ret = sst_driver_ops(ctx); 257 if (ret != 0) 258 return -EINVAL; 259 260 sst_init_locks(ctx); 261 sst_set_fw_state_locked(ctx, SST_RESET); 262 263 /* pvt_id 0 reserved for async messages */ 264 ctx->pvt_id = 1; 265 ctx->stream_cnt = 0; 266 ctx->fw_in_mem = NULL; 267 /* we use memcpy, so set to 0 */ 268 ctx->use_dma = 0; 269 ctx->use_lli = 0; 270 271 if (sst_workqueue_init(ctx)) 272 return -EINVAL; 273 274 ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off; 275 ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset; 276 ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset; 277 278 dev_info(ctx->dev, "Got drv data max stream %d\n", 279 ctx->info.max_streams); 280 281 for (i = 1; i <= ctx->info.max_streams; i++) { 282 struct stream_info *stream = &ctx->streams[i]; 283 284 memset(stream, 0, sizeof(*stream)); 285 stream->pipe_id = PIPE_RSVD; 286 mutex_init(&stream->lock); 287 } 288 289 /* Register the ISR */ 290 ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt, 291 ctx->ops->irq_thread, 0, SST_DRV_NAME, 292 ctx); 293 if (ret) 294 goto do_free_mem; 295 296 dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num); 297 298 /* default intr are unmasked so set this as masked */ 299 sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038); 300 301 ctx->qos = devm_kzalloc(ctx->dev, 302 sizeof(struct pm_qos_request), GFP_KERNEL); 303 if (!ctx->qos) { 304 ret = -ENOMEM; 305 goto do_free_mem; 306 } 307 pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY, 308 PM_QOS_DEFAULT_VALUE); 309 310 dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name); 311 ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name, 312 ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb); 313 if (ret) { 314 dev_err(ctx->dev, "Firmware download failed:%d\n", ret); 315 goto do_free_mem; 316 } 317 sst_register(ctx->dev); 318 return 0; 319 320 do_free_mem: 321 destroy_workqueue(ctx->post_msg_wq); 322 return ret; 323 } 324 EXPORT_SYMBOL_GPL(sst_context_init); 325 326 void sst_context_cleanup(struct intel_sst_drv *ctx) 327 { 328 pm_runtime_get_noresume(ctx->dev); 329 pm_runtime_disable(ctx->dev); 330 sst_unregister(ctx->dev); 331 sst_set_fw_state_locked(ctx, SST_SHUTDOWN); 332 flush_scheduled_work(); 333 destroy_workqueue(ctx->post_msg_wq); 334 pm_qos_remove_request(ctx->qos); 335 kfree(ctx->fw_sg_list.src); 336 kfree(ctx->fw_sg_list.dst); 337 ctx->fw_sg_list.list_len = 0; 338 kfree(ctx->fw_in_mem); 339 ctx->fw_in_mem = NULL; 340 sst_memcpy_free_resources(ctx); 341 ctx = NULL; 342 } 343 EXPORT_SYMBOL_GPL(sst_context_cleanup); 344 345 static inline void sst_save_shim64(struct intel_sst_drv *ctx, 346 void __iomem *shim, 347 struct sst_shim_regs64 *shim_regs) 348 { 349 unsigned long irq_flags; 350 351 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); 352 353 shim_regs->imrx = sst_shim_read64(shim, SST_IMRX); 354 shim_regs->csr = sst_shim_read64(shim, SST_CSR); 355 356 357 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); 358 } 359 360 static inline void sst_restore_shim64(struct intel_sst_drv *ctx, 361 void __iomem *shim, 362 struct sst_shim_regs64 *shim_regs) 363 { 364 unsigned long irq_flags; 365 366 /* 367 * we only need to restore IMRX for this case, rest will be 368 * initialize by FW or driver when firmware is loaded 369 */ 370 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); 371 sst_shim_write64(shim, SST_IMRX, shim_regs->imrx), 372 sst_shim_write64(shim, SST_CSR, shim_regs->csr), 373 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); 374 } 375 376 void sst_configure_runtime_pm(struct intel_sst_drv *ctx) 377 { 378 pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY); 379 pm_runtime_use_autosuspend(ctx->dev); 380 /* 381 * For acpi devices, the actual physical device state is 382 * initially active. So change the state to active before 383 * enabling the pm 384 */ 385 386 if (!acpi_disabled) 387 pm_runtime_set_active(ctx->dev); 388 389 pm_runtime_enable(ctx->dev); 390 391 if (acpi_disabled) 392 pm_runtime_set_active(ctx->dev); 393 else 394 pm_runtime_put_noidle(ctx->dev); 395 396 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); 397 } 398 EXPORT_SYMBOL_GPL(sst_configure_runtime_pm); 399 400 static int intel_sst_runtime_suspend(struct device *dev) 401 { 402 int ret = 0; 403 struct intel_sst_drv *ctx = dev_get_drvdata(dev); 404 405 if (ctx->sst_state == SST_RESET) { 406 dev_dbg(dev, "LPE is already in RESET state, No action\n"); 407 return 0; 408 } 409 /* save fw context */ 410 if (ctx->ops->save_dsp_context(ctx)) 411 return -EBUSY; 412 413 /* Move the SST state to Reset */ 414 sst_set_fw_state_locked(ctx, SST_RESET); 415 416 synchronize_irq(ctx->irq_num); 417 flush_workqueue(ctx->post_msg_wq); 418 419 ctx->ops->reset(ctx); 420 /* save the shim registers because PMC doesn't save state */ 421 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); 422 423 return ret; 424 } 425 426 static int intel_sst_suspend(struct device *dev) 427 { 428 struct intel_sst_drv *ctx = dev_get_drvdata(dev); 429 struct sst_fw_save *fw_save; 430 int i, ret = 0; 431 432 /* check first if we are already in SW reset */ 433 if (ctx->sst_state == SST_RESET) 434 return 0; 435 436 /* 437 * check if any stream is active and running 438 * they should already by suspend by soc_suspend 439 */ 440 for (i = 1; i <= ctx->info.max_streams; i++) { 441 struct stream_info *stream = &ctx->streams[i]; 442 443 if (stream->status == STREAM_RUNNING) { 444 dev_err(dev, "stream %d is running, cant susupend, abort\n", i); 445 return -EBUSY; 446 } 447 } 448 synchronize_irq(ctx->irq_num); 449 flush_workqueue(ctx->post_msg_wq); 450 451 /* Move the SST state to Reset */ 452 sst_set_fw_state_locked(ctx, SST_RESET); 453 454 /* tell DSP we are suspending */ 455 if (ctx->ops->save_dsp_context(ctx)) 456 return -EBUSY; 457 458 /* save the memories */ 459 fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL); 460 if (!fw_save) 461 return -ENOMEM; 462 fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL); 463 if (!fw_save->iram) { 464 ret = -ENOMEM; 465 goto iram; 466 } 467 fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL); 468 if (!fw_save->dram) { 469 ret = -ENOMEM; 470 goto dram; 471 } 472 fw_save->sram = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL); 473 if (!fw_save->sram) { 474 ret = -ENOMEM; 475 goto sram; 476 } 477 478 fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL); 479 if (!fw_save->ddr) { 480 ret = -ENOMEM; 481 goto ddr; 482 } 483 484 memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base); 485 memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base); 486 memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE); 487 memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base); 488 489 ctx->fw_save = fw_save; 490 ctx->ops->reset(ctx); 491 return 0; 492 ddr: 493 kfree(fw_save->sram); 494 sram: 495 kfree(fw_save->dram); 496 dram: 497 kfree(fw_save->iram); 498 iram: 499 kfree(fw_save); 500 return ret; 501 } 502 503 static int intel_sst_resume(struct device *dev) 504 { 505 struct intel_sst_drv *ctx = dev_get_drvdata(dev); 506 struct sst_fw_save *fw_save = ctx->fw_save; 507 int ret = 0; 508 struct sst_block *block; 509 510 if (!fw_save) 511 return 0; 512 513 sst_set_fw_state_locked(ctx, SST_FW_LOADING); 514 515 /* we have to restore the memory saved */ 516 ctx->ops->reset(ctx); 517 518 ctx->fw_save = NULL; 519 520 memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base); 521 memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base); 522 memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE); 523 memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base); 524 525 kfree(fw_save->sram); 526 kfree(fw_save->dram); 527 kfree(fw_save->iram); 528 kfree(fw_save->ddr); 529 kfree(fw_save); 530 531 block = sst_create_block(ctx, 0, FW_DWNL_ID); 532 if (block == NULL) 533 return -ENOMEM; 534 535 536 /* start and wait for ack */ 537 ctx->ops->start(ctx); 538 ret = sst_wait_timeout(ctx, block); 539 if (ret) { 540 dev_err(ctx->dev, "fw download failed %d\n", ret); 541 /* FW download failed due to timeout */ 542 ret = -EBUSY; 543 544 } else { 545 sst_set_fw_state_locked(ctx, SST_FW_RUNNING); 546 } 547 548 sst_free_block(ctx, block); 549 return ret; 550 } 551 552 const struct dev_pm_ops intel_sst_pm = { 553 .suspend = intel_sst_suspend, 554 .resume = intel_sst_resume, 555 .runtime_suspend = intel_sst_runtime_suspend, 556 }; 557 EXPORT_SYMBOL_GPL(intel_sst_pm); 558