xref: /openbmc/linux/sound/soc/intel/atom/sst/sst.c (revision f3a8b664)
1 /*
2  *  sst.c - Intel SST Driver for audio engine
3  *
4  *  Copyright (C) 2008-14	Intel Corp
5  *  Authors:	Vinod Koul <vinod.koul@intel.com>
6  *		Harsha Priya <priya.harsha@intel.com>
7  *		Dharageswari R <dharageswari.r@intel.com>
8  *		KP Jeeja <jeeja.kp@intel.com>
9  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; version 2 of the License.
14  *
15  *  This program is distributed in the hope that it will be useful, but
16  *  WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  *  General Public License for more details.
19  *
20  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21  */
22 #include <linux/module.h>
23 #include <linux/fs.h>
24 #include <linux/interrupt.h>
25 #include <linux/firmware.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_qos.h>
28 #include <linux/async.h>
29 #include <linux/acpi.h>
30 #include <sound/core.h>
31 #include <sound/soc.h>
32 #include <asm/platform_sst_audio.h>
33 #include "../sst-mfld-platform.h"
34 #include "sst.h"
35 #include "../../common/sst-dsp.h"
36 
37 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
38 MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
39 MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
40 MODULE_LICENSE("GPL v2");
41 
42 static inline bool sst_is_process_reply(u32 msg_id)
43 {
44 	return ((msg_id & PROCESS_MSG) ? true : false);
45 }
46 
47 static inline bool sst_validate_mailbox_size(unsigned int size)
48 {
49 	return ((size <= SST_MAILBOX_SIZE) ? true : false);
50 }
51 
52 static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
53 {
54 	union interrupt_reg_mrfld isr;
55 	union ipc_header_mrfld header;
56 	union sst_imr_reg_mrfld imr;
57 	struct ipc_post *msg = NULL;
58 	unsigned int size = 0;
59 	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
60 	irqreturn_t retval = IRQ_HANDLED;
61 
62 	/* Interrupt arrived, check src */
63 	isr.full = sst_shim_read64(drv->shim, SST_ISRX);
64 
65 	if (isr.part.done_interrupt) {
66 		/* Clear done bit */
67 		spin_lock(&drv->ipc_spin_lock);
68 		header.full = sst_shim_read64(drv->shim,
69 					drv->ipc_reg.ipcx);
70 		header.p.header_high.part.done = 0;
71 		sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
72 
73 		/* write 1 to clear status register */;
74 		isr.part.done_interrupt = 1;
75 		sst_shim_write64(drv->shim, SST_ISRX, isr.full);
76 		spin_unlock(&drv->ipc_spin_lock);
77 
78 		/* we can send more messages to DSP so trigger work */
79 		queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq);
80 		retval = IRQ_HANDLED;
81 	}
82 
83 	if (isr.part.busy_interrupt) {
84 		/* message from dsp so copy that */
85 		spin_lock(&drv->ipc_spin_lock);
86 		imr.full = sst_shim_read64(drv->shim, SST_IMRX);
87 		imr.part.busy_interrupt = 1;
88 		sst_shim_write64(drv->shim, SST_IMRX, imr.full);
89 		spin_unlock(&drv->ipc_spin_lock);
90 		header.full =  sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
91 
92 		if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
93 			drv->ops->clear_interrupt(drv);
94 			return IRQ_HANDLED;
95 		}
96 
97 		if (header.p.header_high.part.large) {
98 			size = header.p.header_low_payload;
99 			if (sst_validate_mailbox_size(size)) {
100 				memcpy_fromio(msg->mailbox_data,
101 					drv->mailbox + drv->mailbox_recv_offset, size);
102 			} else {
103 				dev_err(drv->dev,
104 					"Mailbox not copied, payload size is: %u\n", size);
105 				header.p.header_low_payload = 0;
106 			}
107 		}
108 
109 		msg->mrfld_header = header;
110 		msg->is_process_reply =
111 			sst_is_process_reply(header.p.header_high.part.msg_id);
112 		spin_lock(&drv->rx_msg_lock);
113 		list_add_tail(&msg->node, &drv->rx_list);
114 		spin_unlock(&drv->rx_msg_lock);
115 		drv->ops->clear_interrupt(drv);
116 		retval = IRQ_WAKE_THREAD;
117 	}
118 	return retval;
119 }
120 
121 static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context)
122 {
123 	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
124 	struct ipc_post *__msg, *msg = NULL;
125 	unsigned long irq_flags;
126 
127 	spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
128 	if (list_empty(&drv->rx_list)) {
129 		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
130 		return IRQ_HANDLED;
131 	}
132 
133 	list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
134 		list_del(&msg->node);
135 		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
136 		if (msg->is_process_reply)
137 			drv->ops->process_message(msg);
138 		else
139 			drv->ops->process_reply(drv, msg);
140 
141 		if (msg->is_large)
142 			kfree(msg->mailbox_data);
143 		kfree(msg);
144 		spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
145 	}
146 	spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
147 	return IRQ_HANDLED;
148 }
149 
150 static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
151 {
152 	int ret = 0;
153 
154 	ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD,
155 			IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL,
156 			true, true, false, true);
157 
158 	if (ret < 0) {
159 		dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret);
160 		return -EIO;
161 	}
162 
163 	return 0;
164 }
165 
166 
167 static struct intel_sst_ops mrfld_ops = {
168 	.interrupt = intel_sst_interrupt_mrfld,
169 	.irq_thread = intel_sst_irq_thread_mrfld,
170 	.clear_interrupt = intel_sst_clear_intr_mrfld,
171 	.start = sst_start_mrfld,
172 	.reset = intel_sst_reset_dsp_mrfld,
173 	.post_message = sst_post_message_mrfld,
174 	.process_reply = sst_process_reply_mrfld,
175 	.save_dsp_context =  sst_save_dsp_context_v2,
176 	.alloc_stream = sst_alloc_stream_mrfld,
177 	.post_download = sst_post_download_mrfld,
178 };
179 
180 int sst_driver_ops(struct intel_sst_drv *sst)
181 {
182 
183 	switch (sst->dev_id) {
184 	case SST_MRFLD_PCI_ID:
185 	case SST_BYT_ACPI_ID:
186 	case SST_CHV_ACPI_ID:
187 		sst->tstamp = SST_TIME_STAMP_MRFLD;
188 		sst->ops = &mrfld_ops;
189 		return 0;
190 
191 	default:
192 		dev_err(sst->dev,
193 			"SST Driver capabilities missing for dev_id: %x",
194 			sst->dev_id);
195 		return -EINVAL;
196 	};
197 }
198 
199 void sst_process_pending_msg(struct work_struct *work)
200 {
201 	struct intel_sst_drv *ctx = container_of(work,
202 			struct intel_sst_drv, ipc_post_msg_wq);
203 
204 	ctx->ops->post_message(ctx, NULL, false);
205 }
206 
207 static int sst_workqueue_init(struct intel_sst_drv *ctx)
208 {
209 	INIT_LIST_HEAD(&ctx->memcpy_list);
210 	INIT_LIST_HEAD(&ctx->rx_list);
211 	INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
212 	INIT_LIST_HEAD(&ctx->block_list);
213 	INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg);
214 	init_waitqueue_head(&ctx->wait_queue);
215 
216 	ctx->post_msg_wq =
217 		create_singlethread_workqueue("sst_post_msg_wq");
218 	if (!ctx->post_msg_wq)
219 		return -EBUSY;
220 	return 0;
221 }
222 
223 static void sst_init_locks(struct intel_sst_drv *ctx)
224 {
225 	mutex_init(&ctx->sst_lock);
226 	spin_lock_init(&ctx->rx_msg_lock);
227 	spin_lock_init(&ctx->ipc_spin_lock);
228 	spin_lock_init(&ctx->block_lock);
229 }
230 
231 int sst_alloc_drv_context(struct intel_sst_drv **ctx,
232 		struct device *dev, unsigned int dev_id)
233 {
234 	*ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL);
235 	if (!(*ctx))
236 		return -ENOMEM;
237 
238 	(*ctx)->dev = dev;
239 	(*ctx)->dev_id = dev_id;
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
244 
245 int sst_context_init(struct intel_sst_drv *ctx)
246 {
247 	int ret = 0, i;
248 
249 	if (!ctx->pdata)
250 		return -EINVAL;
251 
252 	if (!ctx->pdata->probe_data)
253 		return -EINVAL;
254 
255 	memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
256 
257 	ret = sst_driver_ops(ctx);
258 	if (ret != 0)
259 		return -EINVAL;
260 
261 	sst_init_locks(ctx);
262 	sst_set_fw_state_locked(ctx, SST_RESET);
263 
264 	/* pvt_id 0 reserved for async messages */
265 	ctx->pvt_id = 1;
266 	ctx->stream_cnt = 0;
267 	ctx->fw_in_mem = NULL;
268 	/* we use memcpy, so set to 0 */
269 	ctx->use_dma = 0;
270 	ctx->use_lli = 0;
271 
272 	if (sst_workqueue_init(ctx))
273 		return -EINVAL;
274 
275 	ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
276 	ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
277 	ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
278 
279 	dev_info(ctx->dev, "Got drv data max stream %d\n",
280 				ctx->info.max_streams);
281 
282 	for (i = 1; i <= ctx->info.max_streams; i++) {
283 		struct stream_info *stream = &ctx->streams[i];
284 
285 		memset(stream, 0, sizeof(*stream));
286 		stream->pipe_id = PIPE_RSVD;
287 		mutex_init(&stream->lock);
288 	}
289 
290 	/* Register the ISR */
291 	ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
292 					ctx->ops->irq_thread, 0, SST_DRV_NAME,
293 					ctx);
294 	if (ret)
295 		goto do_free_mem;
296 
297 	dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num);
298 
299 	/* default intr are unmasked so set this as masked */
300 	sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
301 
302 	ctx->qos = devm_kzalloc(ctx->dev,
303 		sizeof(struct pm_qos_request), GFP_KERNEL);
304 	if (!ctx->qos) {
305 		ret = -ENOMEM;
306 		goto do_free_mem;
307 	}
308 	pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY,
309 				PM_QOS_DEFAULT_VALUE);
310 
311 	dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name);
312 	ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name,
313 				      ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
314 	if (ret) {
315 		dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
316 		goto do_free_mem;
317 	}
318 	sst_register(ctx->dev);
319 	return 0;
320 
321 do_free_mem:
322 	destroy_workqueue(ctx->post_msg_wq);
323 	return ret;
324 }
325 EXPORT_SYMBOL_GPL(sst_context_init);
326 
327 void sst_context_cleanup(struct intel_sst_drv *ctx)
328 {
329 	pm_runtime_get_noresume(ctx->dev);
330 	pm_runtime_disable(ctx->dev);
331 	sst_unregister(ctx->dev);
332 	sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
333 	flush_scheduled_work();
334 	destroy_workqueue(ctx->post_msg_wq);
335 	pm_qos_remove_request(ctx->qos);
336 	kfree(ctx->fw_sg_list.src);
337 	kfree(ctx->fw_sg_list.dst);
338 	ctx->fw_sg_list.list_len = 0;
339 	kfree(ctx->fw_in_mem);
340 	ctx->fw_in_mem = NULL;
341 	sst_memcpy_free_resources(ctx);
342 	ctx = NULL;
343 }
344 EXPORT_SYMBOL_GPL(sst_context_cleanup);
345 
346 static inline void sst_save_shim64(struct intel_sst_drv *ctx,
347 			    void __iomem *shim,
348 			    struct sst_shim_regs64 *shim_regs)
349 {
350 	unsigned long irq_flags;
351 
352 	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
353 
354 	shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
355 	shim_regs->csr = sst_shim_read64(shim, SST_CSR);
356 
357 
358 	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
359 }
360 
361 static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
362 				      void __iomem *shim,
363 				      struct sst_shim_regs64 *shim_regs)
364 {
365 	unsigned long irq_flags;
366 
367 	/*
368 	 * we only need to restore IMRX for this case, rest will be
369 	 * initialize by FW or driver when firmware is loaded
370 	 */
371 	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
372 	sst_shim_write64(shim, SST_IMRX, shim_regs->imrx);
373 	sst_shim_write64(shim, SST_CSR, shim_regs->csr);
374 	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
375 }
376 
377 void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
378 {
379 	pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY);
380 	pm_runtime_use_autosuspend(ctx->dev);
381 	/*
382 	 * For acpi devices, the actual physical device state is
383 	 * initially active. So change the state to active before
384 	 * enabling the pm
385 	 */
386 
387 	if (!acpi_disabled)
388 		pm_runtime_set_active(ctx->dev);
389 
390 	pm_runtime_enable(ctx->dev);
391 
392 	if (acpi_disabled)
393 		pm_runtime_set_active(ctx->dev);
394 	else
395 		pm_runtime_put_noidle(ctx->dev);
396 
397 	sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
398 }
399 EXPORT_SYMBOL_GPL(sst_configure_runtime_pm);
400 
401 static int intel_sst_runtime_suspend(struct device *dev)
402 {
403 	int ret = 0;
404 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
405 
406 	if (ctx->sst_state == SST_RESET) {
407 		dev_dbg(dev, "LPE is already in RESET state, No action\n");
408 		return 0;
409 	}
410 	/* save fw context */
411 	if (ctx->ops->save_dsp_context(ctx))
412 		return -EBUSY;
413 
414 	/* Move the SST state to Reset */
415 	sst_set_fw_state_locked(ctx, SST_RESET);
416 
417 	synchronize_irq(ctx->irq_num);
418 	flush_workqueue(ctx->post_msg_wq);
419 
420 	ctx->ops->reset(ctx);
421 	/* save the shim registers because PMC doesn't save state */
422 	sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
423 
424 	return ret;
425 }
426 
427 static int intel_sst_suspend(struct device *dev)
428 {
429 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
430 	struct sst_fw_save *fw_save;
431 	int i, ret = 0;
432 
433 	/* check first if we are already in SW reset */
434 	if (ctx->sst_state == SST_RESET)
435 		return 0;
436 
437 	/*
438 	 * check if any stream is active and running
439 	 * they should already by suspend by soc_suspend
440 	 */
441 	for (i = 1; i <= ctx->info.max_streams; i++) {
442 		struct stream_info *stream = &ctx->streams[i];
443 
444 		if (stream->status == STREAM_RUNNING) {
445 			dev_err(dev, "stream %d is running, can't suspend, abort\n", i);
446 			return -EBUSY;
447 		}
448 	}
449 	synchronize_irq(ctx->irq_num);
450 	flush_workqueue(ctx->post_msg_wq);
451 
452 	/* Move the SST state to Reset */
453 	sst_set_fw_state_locked(ctx, SST_RESET);
454 
455 	/* tell DSP we are suspending */
456 	if (ctx->ops->save_dsp_context(ctx))
457 		return -EBUSY;
458 
459 	/* save the memories */
460 	fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
461 	if (!fw_save)
462 		return -ENOMEM;
463 	fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
464 	if (!fw_save->iram) {
465 		ret = -ENOMEM;
466 		goto iram;
467 	}
468 	fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
469 	if (!fw_save->dram) {
470 		ret = -ENOMEM;
471 		goto dram;
472 	}
473 	fw_save->sram = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
474 	if (!fw_save->sram) {
475 		ret = -ENOMEM;
476 		goto sram;
477 	}
478 
479 	fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
480 	if (!fw_save->ddr) {
481 		ret = -ENOMEM;
482 		goto ddr;
483 	}
484 
485 	memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base);
486 	memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base);
487 	memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE);
488 	memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base);
489 
490 	ctx->fw_save = fw_save;
491 	ctx->ops->reset(ctx);
492 	return 0;
493 ddr:
494 	kfree(fw_save->sram);
495 sram:
496 	kfree(fw_save->dram);
497 dram:
498 	kfree(fw_save->iram);
499 iram:
500 	kfree(fw_save);
501 	return ret;
502 }
503 
504 static int intel_sst_resume(struct device *dev)
505 {
506 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
507 	struct sst_fw_save *fw_save = ctx->fw_save;
508 	int ret = 0;
509 	struct sst_block *block;
510 
511 	if (!fw_save)
512 		return 0;
513 
514 	sst_set_fw_state_locked(ctx, SST_FW_LOADING);
515 
516 	/* we have to restore the memory saved */
517 	ctx->ops->reset(ctx);
518 
519 	ctx->fw_save = NULL;
520 
521 	memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base);
522 	memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base);
523 	memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
524 	memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
525 
526 	kfree(fw_save->sram);
527 	kfree(fw_save->dram);
528 	kfree(fw_save->iram);
529 	kfree(fw_save->ddr);
530 	kfree(fw_save);
531 
532 	block = sst_create_block(ctx, 0, FW_DWNL_ID);
533 	if (block == NULL)
534 		return -ENOMEM;
535 
536 
537 	/* start and wait for ack */
538 	ctx->ops->start(ctx);
539 	ret = sst_wait_timeout(ctx, block);
540 	if (ret) {
541 		dev_err(ctx->dev, "fw download failed %d\n", ret);
542 		/* FW download failed due to timeout */
543 		ret = -EBUSY;
544 
545 	} else {
546 		sst_set_fw_state_locked(ctx, SST_FW_RUNNING);
547 	}
548 
549 	sst_free_block(ctx, block);
550 	return ret;
551 }
552 
553 const struct dev_pm_ops intel_sst_pm = {
554 	.suspend = intel_sst_suspend,
555 	.resume = intel_sst_resume,
556 	.runtime_suspend = intel_sst_runtime_suspend,
557 };
558 EXPORT_SYMBOL_GPL(intel_sst_pm);
559