xref: /openbmc/linux/sound/soc/intel/atom/sst/sst.c (revision bf642bf5)
1 /*
2  *  sst.c - Intel SST Driver for audio engine
3  *
4  *  Copyright (C) 2008-14	Intel Corp
5  *  Authors:	Vinod Koul <vinod.koul@intel.com>
6  *		Harsha Priya <priya.harsha@intel.com>
7  *		Dharageswari R <dharageswari.r@intel.com>
8  *		KP Jeeja <jeeja.kp@intel.com>
9  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; version 2 of the License.
14  *
15  *  This program is distributed in the hope that it will be useful, but
16  *  WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  *  General Public License for more details.
19  *
20  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21  */
22 #include <linux/module.h>
23 #include <linux/fs.h>
24 #include <linux/interrupt.h>
25 #include <linux/firmware.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_qos.h>
28 #include <linux/async.h>
29 #include <linux/acpi.h>
30 #include <linux/sysfs.h>
31 #include <sound/core.h>
32 #include <sound/soc.h>
33 #include <asm/platform_sst_audio.h>
34 #include "../sst-mfld-platform.h"
35 #include "sst.h"
36 #include "../../common/sst-dsp.h"
37 
38 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
39 MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
40 MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
41 MODULE_LICENSE("GPL v2");
42 
43 static inline bool sst_is_process_reply(u32 msg_id)
44 {
45 	return ((msg_id & PROCESS_MSG) ? true : false);
46 }
47 
48 static inline bool sst_validate_mailbox_size(unsigned int size)
49 {
50 	return ((size <= SST_MAILBOX_SIZE) ? true : false);
51 }
52 
53 static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
54 {
55 	union interrupt_reg_mrfld isr;
56 	union ipc_header_mrfld header;
57 	union sst_imr_reg_mrfld imr;
58 	struct ipc_post *msg = NULL;
59 	unsigned int size = 0;
60 	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
61 	irqreturn_t retval = IRQ_HANDLED;
62 
63 	/* Interrupt arrived, check src */
64 	isr.full = sst_shim_read64(drv->shim, SST_ISRX);
65 
66 	if (isr.part.done_interrupt) {
67 		/* Clear done bit */
68 		spin_lock(&drv->ipc_spin_lock);
69 		header.full = sst_shim_read64(drv->shim,
70 					drv->ipc_reg.ipcx);
71 		header.p.header_high.part.done = 0;
72 		sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
73 
74 		/* write 1 to clear status register */;
75 		isr.part.done_interrupt = 1;
76 		sst_shim_write64(drv->shim, SST_ISRX, isr.full);
77 		spin_unlock(&drv->ipc_spin_lock);
78 
79 		/* we can send more messages to DSP so trigger work */
80 		queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq);
81 		retval = IRQ_HANDLED;
82 	}
83 
84 	if (isr.part.busy_interrupt) {
85 		/* message from dsp so copy that */
86 		spin_lock(&drv->ipc_spin_lock);
87 		imr.full = sst_shim_read64(drv->shim, SST_IMRX);
88 		imr.part.busy_interrupt = 1;
89 		sst_shim_write64(drv->shim, SST_IMRX, imr.full);
90 		spin_unlock(&drv->ipc_spin_lock);
91 		header.full =  sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
92 
93 		if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
94 			drv->ops->clear_interrupt(drv);
95 			return IRQ_HANDLED;
96 		}
97 
98 		if (header.p.header_high.part.large) {
99 			size = header.p.header_low_payload;
100 			if (sst_validate_mailbox_size(size)) {
101 				memcpy_fromio(msg->mailbox_data,
102 					drv->mailbox + drv->mailbox_recv_offset, size);
103 			} else {
104 				dev_err(drv->dev,
105 					"Mailbox not copied, payload size is: %u\n", size);
106 				header.p.header_low_payload = 0;
107 			}
108 		}
109 
110 		msg->mrfld_header = header;
111 		msg->is_process_reply =
112 			sst_is_process_reply(header.p.header_high.part.msg_id);
113 		spin_lock(&drv->rx_msg_lock);
114 		list_add_tail(&msg->node, &drv->rx_list);
115 		spin_unlock(&drv->rx_msg_lock);
116 		drv->ops->clear_interrupt(drv);
117 		retval = IRQ_WAKE_THREAD;
118 	}
119 	return retval;
120 }
121 
122 static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context)
123 {
124 	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
125 	struct ipc_post *__msg, *msg = NULL;
126 	unsigned long irq_flags;
127 
128 	spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
129 	if (list_empty(&drv->rx_list)) {
130 		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
131 		return IRQ_HANDLED;
132 	}
133 
134 	list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
135 		list_del(&msg->node);
136 		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
137 		if (msg->is_process_reply)
138 			drv->ops->process_message(msg);
139 		else
140 			drv->ops->process_reply(drv, msg);
141 
142 		if (msg->is_large)
143 			kfree(msg->mailbox_data);
144 		kfree(msg);
145 		spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
146 	}
147 	spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
148 	return IRQ_HANDLED;
149 }
150 
151 static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
152 {
153 	int ret = 0;
154 
155 	ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD,
156 			IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL,
157 			true, true, false, true);
158 
159 	if (ret < 0) {
160 		dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret);
161 		return -EIO;
162 	}
163 
164 	return 0;
165 }
166 
167 
168 static struct intel_sst_ops mrfld_ops = {
169 	.interrupt = intel_sst_interrupt_mrfld,
170 	.irq_thread = intel_sst_irq_thread_mrfld,
171 	.clear_interrupt = intel_sst_clear_intr_mrfld,
172 	.start = sst_start_mrfld,
173 	.reset = intel_sst_reset_dsp_mrfld,
174 	.post_message = sst_post_message_mrfld,
175 	.process_reply = sst_process_reply_mrfld,
176 	.save_dsp_context =  sst_save_dsp_context_v2,
177 	.alloc_stream = sst_alloc_stream_mrfld,
178 	.post_download = sst_post_download_mrfld,
179 };
180 
181 int sst_driver_ops(struct intel_sst_drv *sst)
182 {
183 
184 	switch (sst->dev_id) {
185 	case SST_MRFLD_PCI_ID:
186 	case SST_BYT_ACPI_ID:
187 	case SST_CHV_ACPI_ID:
188 		sst->tstamp = SST_TIME_STAMP_MRFLD;
189 		sst->ops = &mrfld_ops;
190 		return 0;
191 
192 	default:
193 		dev_err(sst->dev,
194 			"SST Driver capabilities missing for dev_id: %x",
195 			sst->dev_id);
196 		return -EINVAL;
197 	};
198 }
199 
200 void sst_process_pending_msg(struct work_struct *work)
201 {
202 	struct intel_sst_drv *ctx = container_of(work,
203 			struct intel_sst_drv, ipc_post_msg_wq);
204 
205 	ctx->ops->post_message(ctx, NULL, false);
206 }
207 
208 static int sst_workqueue_init(struct intel_sst_drv *ctx)
209 {
210 	INIT_LIST_HEAD(&ctx->memcpy_list);
211 	INIT_LIST_HEAD(&ctx->rx_list);
212 	INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
213 	INIT_LIST_HEAD(&ctx->block_list);
214 	INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg);
215 	init_waitqueue_head(&ctx->wait_queue);
216 
217 	ctx->post_msg_wq =
218 		create_singlethread_workqueue("sst_post_msg_wq");
219 	if (!ctx->post_msg_wq)
220 		return -EBUSY;
221 	return 0;
222 }
223 
224 static void sst_init_locks(struct intel_sst_drv *ctx)
225 {
226 	mutex_init(&ctx->sst_lock);
227 	spin_lock_init(&ctx->rx_msg_lock);
228 	spin_lock_init(&ctx->ipc_spin_lock);
229 	spin_lock_init(&ctx->block_lock);
230 }
231 
232 int sst_alloc_drv_context(struct intel_sst_drv **ctx,
233 		struct device *dev, unsigned int dev_id)
234 {
235 	*ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL);
236 	if (!(*ctx))
237 		return -ENOMEM;
238 
239 	(*ctx)->dev = dev;
240 	(*ctx)->dev_id = dev_id;
241 
242 	return 0;
243 }
244 EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
245 
246 static ssize_t firmware_version_show(struct device *dev,
247 			    struct device_attribute *attr, char *buf)
248 {
249 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
250 
251 	if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 &&
252 	    ctx->fw_version.minor == 0 && ctx->fw_version.build == 0)
253 		return sprintf(buf, "FW not yet loaded\n");
254 	else
255 		return sprintf(buf, "v%02x.%02x.%02x.%02x\n",
256 			       ctx->fw_version.type, ctx->fw_version.major,
257 			       ctx->fw_version.minor, ctx->fw_version.build);
258 
259 }
260 
261 static DEVICE_ATTR_RO(firmware_version);
262 
263 static const struct attribute *sst_fw_version_attrs[] = {
264 	&dev_attr_firmware_version.attr,
265 	NULL,
266 };
267 
268 static const struct attribute_group sst_fw_version_attr_group = {
269 	.attrs = (struct attribute **)sst_fw_version_attrs,
270 };
271 
272 int sst_context_init(struct intel_sst_drv *ctx)
273 {
274 	int ret = 0, i;
275 
276 	if (!ctx->pdata)
277 		return -EINVAL;
278 
279 	if (!ctx->pdata->probe_data)
280 		return -EINVAL;
281 
282 	memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
283 
284 	ret = sst_driver_ops(ctx);
285 	if (ret != 0)
286 		return -EINVAL;
287 
288 	sst_init_locks(ctx);
289 	sst_set_fw_state_locked(ctx, SST_RESET);
290 
291 	/* pvt_id 0 reserved for async messages */
292 	ctx->pvt_id = 1;
293 	ctx->stream_cnt = 0;
294 	ctx->fw_in_mem = NULL;
295 	/* we use memcpy, so set to 0 */
296 	ctx->use_dma = 0;
297 	ctx->use_lli = 0;
298 
299 	if (sst_workqueue_init(ctx))
300 		return -EINVAL;
301 
302 	ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
303 	ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
304 	ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
305 
306 	dev_info(ctx->dev, "Got drv data max stream %d\n",
307 				ctx->info.max_streams);
308 
309 	for (i = 1; i <= ctx->info.max_streams; i++) {
310 		struct stream_info *stream = &ctx->streams[i];
311 
312 		memset(stream, 0, sizeof(*stream));
313 		stream->pipe_id = PIPE_RSVD;
314 		mutex_init(&stream->lock);
315 	}
316 
317 	/* Register the ISR */
318 	ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
319 					ctx->ops->irq_thread, 0, SST_DRV_NAME,
320 					ctx);
321 	if (ret)
322 		goto do_free_mem;
323 
324 	dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num);
325 
326 	/* default intr are unmasked so set this as masked */
327 	sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
328 
329 	ctx->qos = devm_kzalloc(ctx->dev,
330 		sizeof(struct pm_qos_request), GFP_KERNEL);
331 	if (!ctx->qos) {
332 		ret = -ENOMEM;
333 		goto do_free_mem;
334 	}
335 	pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY,
336 				PM_QOS_DEFAULT_VALUE);
337 
338 	dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name);
339 	ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name,
340 				      ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
341 	if (ret) {
342 		dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
343 		goto do_free_mem;
344 	}
345 
346 	ret = sysfs_create_group(&ctx->dev->kobj,
347 				 &sst_fw_version_attr_group);
348 	if (ret) {
349 		dev_err(ctx->dev,
350 			"Unable to create sysfs\n");
351 		goto err_sysfs;
352 	}
353 
354 	sst_register(ctx->dev);
355 	return 0;
356 err_sysfs:
357 	sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
358 
359 do_free_mem:
360 	destroy_workqueue(ctx->post_msg_wq);
361 	return ret;
362 }
363 EXPORT_SYMBOL_GPL(sst_context_init);
364 
365 void sst_context_cleanup(struct intel_sst_drv *ctx)
366 {
367 	pm_runtime_get_noresume(ctx->dev);
368 	pm_runtime_disable(ctx->dev);
369 	sst_unregister(ctx->dev);
370 	sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
371 	sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
372 	flush_scheduled_work();
373 	destroy_workqueue(ctx->post_msg_wq);
374 	pm_qos_remove_request(ctx->qos);
375 	kfree(ctx->fw_sg_list.src);
376 	kfree(ctx->fw_sg_list.dst);
377 	ctx->fw_sg_list.list_len = 0;
378 	kfree(ctx->fw_in_mem);
379 	ctx->fw_in_mem = NULL;
380 	sst_memcpy_free_resources(ctx);
381 	ctx = NULL;
382 }
383 EXPORT_SYMBOL_GPL(sst_context_cleanup);
384 
385 void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
386 {
387 	pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY);
388 	pm_runtime_use_autosuspend(ctx->dev);
389 	/*
390 	 * For acpi devices, the actual physical device state is
391 	 * initially active. So change the state to active before
392 	 * enabling the pm
393 	 */
394 
395 	if (!acpi_disabled)
396 		pm_runtime_set_active(ctx->dev);
397 
398 	pm_runtime_enable(ctx->dev);
399 
400 	if (acpi_disabled)
401 		pm_runtime_set_active(ctx->dev);
402 	else
403 		pm_runtime_put_noidle(ctx->dev);
404 }
405 EXPORT_SYMBOL_GPL(sst_configure_runtime_pm);
406 
407 static int intel_sst_runtime_suspend(struct device *dev)
408 {
409 	int ret = 0;
410 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
411 
412 	if (ctx->sst_state == SST_RESET) {
413 		dev_dbg(dev, "LPE is already in RESET state, No action\n");
414 		return 0;
415 	}
416 	/* save fw context */
417 	if (ctx->ops->save_dsp_context(ctx))
418 		return -EBUSY;
419 
420 	/* Move the SST state to Reset */
421 	sst_set_fw_state_locked(ctx, SST_RESET);
422 
423 	synchronize_irq(ctx->irq_num);
424 	flush_workqueue(ctx->post_msg_wq);
425 
426 	ctx->ops->reset(ctx);
427 
428 	return ret;
429 }
430 
431 static int intel_sst_suspend(struct device *dev)
432 {
433 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
434 	struct sst_fw_save *fw_save;
435 	int i, ret = 0;
436 
437 	/* check first if we are already in SW reset */
438 	if (ctx->sst_state == SST_RESET)
439 		return 0;
440 
441 	/*
442 	 * check if any stream is active and running
443 	 * they should already by suspend by soc_suspend
444 	 */
445 	for (i = 1; i <= ctx->info.max_streams; i++) {
446 		struct stream_info *stream = &ctx->streams[i];
447 
448 		if (stream->status == STREAM_RUNNING) {
449 			dev_err(dev, "stream %d is running, can't suspend, abort\n", i);
450 			return -EBUSY;
451 		}
452 
453 		if (ctx->pdata->streams_lost_on_suspend) {
454 			stream->resume_status = stream->status;
455 			stream->resume_prev = stream->prev;
456 			if (stream->status != STREAM_UN_INIT)
457 				sst_free_stream(ctx, i);
458 		}
459 	}
460 	synchronize_irq(ctx->irq_num);
461 	flush_workqueue(ctx->post_msg_wq);
462 
463 	/* Move the SST state to Reset */
464 	sst_set_fw_state_locked(ctx, SST_RESET);
465 
466 	/* tell DSP we are suspending */
467 	if (ctx->ops->save_dsp_context(ctx))
468 		return -EBUSY;
469 
470 	/* save the memories */
471 	fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
472 	if (!fw_save)
473 		return -ENOMEM;
474 	fw_save->iram = kvzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
475 	if (!fw_save->iram) {
476 		ret = -ENOMEM;
477 		goto iram;
478 	}
479 	fw_save->dram = kvzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
480 	if (!fw_save->dram) {
481 		ret = -ENOMEM;
482 		goto dram;
483 	}
484 	fw_save->sram = kvzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
485 	if (!fw_save->sram) {
486 		ret = -ENOMEM;
487 		goto sram;
488 	}
489 
490 	fw_save->ddr = kvzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
491 	if (!fw_save->ddr) {
492 		ret = -ENOMEM;
493 		goto ddr;
494 	}
495 
496 	memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base);
497 	memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base);
498 	memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE);
499 	memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base);
500 
501 	ctx->fw_save = fw_save;
502 	ctx->ops->reset(ctx);
503 	return 0;
504 ddr:
505 	kvfree(fw_save->sram);
506 sram:
507 	kvfree(fw_save->dram);
508 dram:
509 	kvfree(fw_save->iram);
510 iram:
511 	kfree(fw_save);
512 	return ret;
513 }
514 
515 static int intel_sst_resume(struct device *dev)
516 {
517 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
518 	struct sst_fw_save *fw_save = ctx->fw_save;
519 	struct sst_block *block;
520 	int i, ret = 0;
521 
522 	if (!fw_save)
523 		return 0;
524 
525 	sst_set_fw_state_locked(ctx, SST_FW_LOADING);
526 
527 	/* we have to restore the memory saved */
528 	ctx->ops->reset(ctx);
529 
530 	ctx->fw_save = NULL;
531 
532 	memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base);
533 	memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base);
534 	memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
535 	memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
536 
537 	kvfree(fw_save->sram);
538 	kvfree(fw_save->dram);
539 	kvfree(fw_save->iram);
540 	kvfree(fw_save->ddr);
541 	kfree(fw_save);
542 
543 	block = sst_create_block(ctx, 0, FW_DWNL_ID);
544 	if (block == NULL)
545 		return -ENOMEM;
546 
547 
548 	/* start and wait for ack */
549 	ctx->ops->start(ctx);
550 	ret = sst_wait_timeout(ctx, block);
551 	if (ret) {
552 		dev_err(ctx->dev, "fw download failed %d\n", ret);
553 		/* FW download failed due to timeout */
554 		ret = -EBUSY;
555 
556 	} else {
557 		sst_set_fw_state_locked(ctx, SST_FW_RUNNING);
558 	}
559 
560 	if (ctx->pdata->streams_lost_on_suspend) {
561 		for (i = 1; i <= ctx->info.max_streams; i++) {
562 			struct stream_info *stream = &ctx->streams[i];
563 
564 			if (stream->resume_status != STREAM_UN_INIT) {
565 				dev_dbg(ctx->dev, "Re-allocing stream %d status %d prev %d\n",
566 					i, stream->resume_status,
567 					stream->resume_prev);
568 				sst_realloc_stream(ctx, i);
569 				stream->status = stream->resume_status;
570 				stream->prev = stream->resume_prev;
571 			}
572 		}
573 	}
574 
575 	sst_free_block(ctx, block);
576 	return ret;
577 }
578 
579 const struct dev_pm_ops intel_sst_pm = {
580 	.suspend = intel_sst_suspend,
581 	.resume = intel_sst_resume,
582 	.runtime_suspend = intel_sst_runtime_suspend,
583 };
584 EXPORT_SYMBOL_GPL(intel_sst_pm);
585