xref: /openbmc/linux/sound/soc/intel/skylake/skl-sst.c (revision b732539e)
1 /*
2  * skl-sst.c - HDA DSP library functions for SKL platform
3  *
4  * Copyright (C) 2014-15, Intel Corporation.
5  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6  *	Jeeja KP <jeeja.kp@intel.com>
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/err.h>
23 #include <linux/uuid.h>
24 #include "../common/sst-dsp.h"
25 #include "../common/sst-dsp-priv.h"
26 #include "../common/sst-ipc.h"
27 #include "skl-sst-ipc.h"
28 
29 #define SKL_BASEFW_TIMEOUT	300
30 #define SKL_INIT_TIMEOUT	1000
31 
32 /* Intel HD Audio SRAM Window 0*/
33 #define SKL_ADSP_SRAM0_BASE	0x8000
34 
35 /* Firmware status window */
36 #define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
37 #define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
38 
39 #define SKL_NUM_MODULES		1
40 
41 static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
42 {
43 	u32 cur_sts;
44 
45 	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
46 
47 	return (cur_sts == status);
48 }
49 
50 static int skl_transfer_firmware(struct sst_dsp *ctx,
51 		const void *basefw, u32 base_fw_size)
52 {
53 	int ret = 0;
54 
55 	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size,
56 								true);
57 	if (ret < 0)
58 		return ret;
59 
60 	ret = sst_dsp_register_poll(ctx,
61 			SKL_ADSP_FW_STATUS,
62 			SKL_FW_STS_MASK,
63 			SKL_FW_RFW_START,
64 			SKL_BASEFW_TIMEOUT,
65 			"Firmware boot");
66 
67 	ctx->cl_dev.ops.cl_stop_dma(ctx);
68 
69 	return ret;
70 }
71 
72 #define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
73 
74 static int skl_load_base_firmware(struct sst_dsp *ctx)
75 {
76 	int ret = 0, i;
77 	struct skl_sst *skl = ctx->thread_context;
78 	struct firmware stripped_fw;
79 	u32 reg;
80 
81 	skl->boot_complete = false;
82 	init_waitqueue_head(&skl->boot_wait);
83 
84 	if (ctx->fw == NULL) {
85 		ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
86 		if (ret < 0) {
87 			dev_err(ctx->dev, "Request firmware failed %d\n", ret);
88 			return -EIO;
89 		}
90 	}
91 
92 	/* prase uuids on first boot */
93 	if (skl->is_first_boot) {
94 		ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
95 		if (ret < 0) {
96 			dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
97 			release_firmware(ctx->fw);
98 			skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
99 			return ret;
100 		}
101 	}
102 
103 	/* check for extended manifest */
104 	stripped_fw.data = ctx->fw->data;
105 	stripped_fw.size = ctx->fw->size;
106 
107 	skl_dsp_strip_extended_manifest(&stripped_fw);
108 
109 	ret = skl_dsp_boot(ctx);
110 	if (ret < 0) {
111 		dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
112 		goto skl_load_base_firmware_failed;
113 	}
114 
115 	ret = skl_cldma_prepare(ctx);
116 	if (ret < 0) {
117 		dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
118 		goto skl_load_base_firmware_failed;
119 	}
120 
121 	/* enable Interrupt */
122 	skl_ipc_int_enable(ctx);
123 	skl_ipc_op_int_enable(ctx);
124 
125 	/* check ROM Status */
126 	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
127 		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
128 			dev_dbg(ctx->dev,
129 				"ROM loaded, we can continue with FW loading\n");
130 			break;
131 		}
132 		mdelay(1);
133 	}
134 	if (!i) {
135 		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
136 		dev_err(ctx->dev,
137 			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
138 		ret = -EIO;
139 		goto transfer_firmware_failed;
140 	}
141 
142 	ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
143 	if (ret < 0) {
144 		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
145 		goto transfer_firmware_failed;
146 	} else {
147 		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
148 					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
149 		if (ret == 0) {
150 			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
151 			ret = -EIO;
152 			goto transfer_firmware_failed;
153 		}
154 
155 		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
156 		skl->fw_loaded = true;
157 	}
158 	return 0;
159 transfer_firmware_failed:
160 	ctx->cl_dev.ops.cl_cleanup_controller(ctx);
161 skl_load_base_firmware_failed:
162 	skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
163 	release_firmware(ctx->fw);
164 	ctx->fw = NULL;
165 	return ret;
166 }
167 
168 static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
169 {
170 	int ret;
171 	struct skl_ipc_dxstate_info dx;
172 	struct skl_sst *skl = ctx->thread_context;
173 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
174 
175 	/* If core0 is being turned on, we need to load the FW */
176 	if (core_id == SKL_DSP_CORE0_ID) {
177 		ret = skl_load_base_firmware(ctx);
178 		if (ret < 0) {
179 			dev_err(ctx->dev, "unable to load firmware\n");
180 			return ret;
181 		}
182 
183 		/* load libs as they are also lost on D3 */
184 		if (skl->lib_count > 1) {
185 			ret = ctx->fw_ops.load_library(ctx, skl->lib_info,
186 							skl->lib_count);
187 			if (ret < 0) {
188 				dev_err(ctx->dev, "reload libs failed: %d\n",
189 						ret);
190 				return ret;
191 			}
192 
193 		}
194 	}
195 
196 	/*
197 	 * If any core other than core 0 is being moved to D0, enable the
198 	 * core and send the set dx IPC for the core.
199 	 */
200 	if (core_id != SKL_DSP_CORE0_ID) {
201 		ret = skl_dsp_enable_core(ctx, core_mask);
202 		if (ret < 0)
203 			return ret;
204 
205 		dx.core_mask = core_mask;
206 		dx.dx_mask = core_mask;
207 
208 		ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
209 					SKL_BASE_FW_MODULE_ID, &dx);
210 		if (ret < 0) {
211 			dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
212 					core_id);
213 			skl_dsp_disable_core(ctx, core_mask);
214 		}
215 	}
216 
217 	skl->cores.state[core_id] = SKL_DSP_RUNNING;
218 
219 	return 0;
220 }
221 
222 static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
223 {
224 	int ret;
225 	struct skl_ipc_dxstate_info dx;
226 	struct skl_sst *skl = ctx->thread_context;
227 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
228 
229 	dx.core_mask = core_mask;
230 	dx.dx_mask = SKL_IPC_D3_MASK;
231 
232 	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
233 	if (ret < 0)
234 		dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
235 
236 	if (core_id == SKL_DSP_CORE0_ID) {
237 		/* disable Interrupt */
238 		ctx->cl_dev.ops.cl_cleanup_controller(ctx);
239 		skl_cldma_int_disable(ctx);
240 		skl_ipc_op_int_disable(ctx);
241 		skl_ipc_int_disable(ctx);
242 	}
243 
244 	ret = skl_dsp_disable_core(ctx, core_mask);
245 	if (ret < 0)
246 		return ret;
247 
248 	skl->cores.state[core_id] = SKL_DSP_RESET;
249 	return ret;
250 }
251 
252 static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
253 {
254 	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
255 }
256 
257 /*
258  * since get/set_module are called from DAPM context,
259  * we don't need lock for usage count
260  */
261 static int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
262 {
263 	struct skl_module_table *module;
264 
265 	list_for_each_entry(module, &ctx->module_list, list) {
266 		if (module->mod_info->mod_id == mod_id)
267 			return ++module->usage_cnt;
268 	}
269 
270 	return -EINVAL;
271 }
272 
273 static int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
274 {
275 	struct skl_module_table *module;
276 
277 	list_for_each_entry(module, &ctx->module_list, list) {
278 		if (module->mod_info->mod_id == mod_id)
279 			return --module->usage_cnt;
280 	}
281 
282 	return -EINVAL;
283 }
284 
285 static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
286 						char *mod_name, int mod_id)
287 {
288 	const struct firmware *fw;
289 	struct skl_module_table *skl_module;
290 	unsigned int size;
291 	int ret;
292 
293 	ret = request_firmware(&fw, mod_name, ctx->dev);
294 	if (ret < 0) {
295 		dev_err(ctx->dev, "Request Module %s failed :%d\n",
296 							mod_name, ret);
297 		return NULL;
298 	}
299 
300 	skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
301 	if (skl_module == NULL) {
302 		release_firmware(fw);
303 		return NULL;
304 	}
305 
306 	size = sizeof(*skl_module->mod_info);
307 	skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
308 	if (skl_module->mod_info == NULL) {
309 		release_firmware(fw);
310 		return NULL;
311 	}
312 
313 	skl_module->mod_info->mod_id = mod_id;
314 	skl_module->mod_info->fw = fw;
315 	list_add(&skl_module->list, &ctx->module_list);
316 
317 	return skl_module;
318 }
319 
320 /* get a module from it's unique ID */
321 static struct skl_module_table *skl_module_get_from_id(
322 			struct sst_dsp *ctx, u16 mod_id)
323 {
324 	struct skl_module_table *module;
325 
326 	if (list_empty(&ctx->module_list)) {
327 		dev_err(ctx->dev, "Module list is empty\n");
328 		return NULL;
329 	}
330 
331 	list_for_each_entry(module, &ctx->module_list, list) {
332 		if (module->mod_info->mod_id == mod_id)
333 			return module;
334 	}
335 
336 	return NULL;
337 }
338 
339 static int skl_transfer_module(struct sst_dsp *ctx, const void *data,
340 			u32 size, u16 mod_id, u8 table_id, bool is_module)
341 {
342 	int ret, bytes_left, curr_pos;
343 	struct skl_sst *skl = ctx->thread_context;
344 	skl->mod_load_complete = false;
345 
346 	bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false);
347 	if (bytes_left < 0)
348 		return bytes_left;
349 
350 	/* check is_module flag to load module or library */
351 	if (is_module)
352 		ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id);
353 	else
354 		ret = skl_sst_ipc_load_library(&skl->ipc, 0, table_id, false);
355 
356 	if (ret < 0) {
357 		dev_err(ctx->dev, "Failed to Load %s with err %d\n",
358 				is_module ? "module" : "lib", ret);
359 		goto out;
360 	}
361 
362 	/*
363 	 * if bytes_left > 0 then wait for BDL complete interrupt and
364 	 * copy the next chunk till bytes_left is 0. if bytes_left is
365 	 * is zero, then wait for load module IPC reply
366 	 */
367 	while (bytes_left > 0) {
368 		curr_pos = size - bytes_left;
369 
370 		ret = skl_cldma_wait_interruptible(ctx);
371 		if (ret < 0)
372 			goto out;
373 
374 		bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx,
375 							data + curr_pos,
376 							bytes_left, false);
377 	}
378 
379 	ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete,
380 				msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
381 	if (ret == 0 || !skl->mod_load_status) {
382 		dev_err(ctx->dev, "Module Load failed\n");
383 		ret = -EIO;
384 	}
385 
386 out:
387 	ctx->cl_dev.ops.cl_stop_dma(ctx);
388 
389 	return ret;
390 }
391 
392 static int
393 kbl_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
394 {
395 	struct skl_sst *skl = ctx->thread_context;
396 	struct firmware stripped_fw;
397 	int ret, i;
398 
399 	/* library indices start from 1 to N. 0 represents base FW */
400 	for (i = 1; i < lib_count; i++) {
401 		ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
402 					SKL_ADSP_FW_BIN_HDR_OFFSET, i);
403 		if (ret < 0)
404 			goto load_library_failed;
405 		ret = skl_transfer_module(ctx, stripped_fw.data,
406 				stripped_fw.size, 0, i, false);
407 		if (ret < 0)
408 			goto load_library_failed;
409 	}
410 
411 	return 0;
412 
413 load_library_failed:
414 	skl_release_library(linfo, lib_count);
415 	return ret;
416 }
417 
418 static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
419 {
420 	struct skl_module_table *module_entry = NULL;
421 	int ret = 0;
422 	char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
423 	uuid_le *uuid_mod;
424 
425 	uuid_mod = (uuid_le *)guid;
426 	snprintf(mod_name, sizeof(mod_name), "%s%pUL%s",
427 				"intel/dsp_fw_", uuid_mod, ".bin");
428 
429 	module_entry = skl_module_get_from_id(ctx, mod_id);
430 	if (module_entry == NULL) {
431 		module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
432 		if (module_entry == NULL) {
433 			dev_err(ctx->dev, "Failed to Load module\n");
434 			return -EINVAL;
435 		}
436 	}
437 
438 	if (!module_entry->usage_cnt) {
439 		ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data,
440 				module_entry->mod_info->fw->size,
441 				mod_id, 0, true);
442 		if (ret < 0) {
443 			dev_err(ctx->dev, "Failed to Load module\n");
444 			return ret;
445 		}
446 	}
447 
448 	ret = skl_get_module(ctx, mod_id);
449 
450 	return ret;
451 }
452 
453 static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
454 {
455 	int usage_cnt;
456 	struct skl_sst *skl = ctx->thread_context;
457 	int ret = 0;
458 
459 	usage_cnt = skl_put_module(ctx, mod_id);
460 	if (usage_cnt < 0) {
461 		dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
462 		return -EIO;
463 	}
464 
465 	/* if module is used by others return, no need to unload */
466 	if (usage_cnt > 0)
467 		return 0;
468 
469 	ret = skl_ipc_unload_modules(&skl->ipc,
470 			SKL_NUM_MODULES, &mod_id);
471 	if (ret < 0) {
472 		dev_err(ctx->dev, "Failed to UnLoad module\n");
473 		skl_get_module(ctx, mod_id);
474 		return ret;
475 	}
476 
477 	return ret;
478 }
479 
480 void skl_clear_module_cnt(struct sst_dsp *ctx)
481 {
482 	struct skl_module_table *module;
483 
484 	if (list_empty(&ctx->module_list))
485 		return;
486 
487 	list_for_each_entry(module, &ctx->module_list, list) {
488 		module->usage_cnt = 0;
489 	}
490 }
491 EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
492 
493 static void skl_clear_module_table(struct sst_dsp *ctx)
494 {
495 	struct skl_module_table *module, *tmp;
496 
497 	if (list_empty(&ctx->module_list))
498 		return;
499 
500 	list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
501 		list_del(&module->list);
502 		release_firmware(module->mod_info->fw);
503 	}
504 }
505 
506 static const struct skl_dsp_fw_ops skl_fw_ops = {
507 	.set_state_D0 = skl_set_dsp_D0,
508 	.set_state_D3 = skl_set_dsp_D3,
509 	.load_fw = skl_load_base_firmware,
510 	.get_fw_errcode = skl_get_errorcode,
511 	.load_mod = skl_load_module,
512 	.unload_mod = skl_unload_module,
513 };
514 
515 static const struct skl_dsp_fw_ops kbl_fw_ops = {
516 	.set_state_D0 = skl_set_dsp_D0,
517 	.set_state_D3 = skl_set_dsp_D3,
518 	.load_fw = skl_load_base_firmware,
519 	.get_fw_errcode = skl_get_errorcode,
520 	.load_library = kbl_load_library,
521 	.load_mod = skl_load_module,
522 	.unload_mod = skl_unload_module,
523 };
524 
525 static struct sst_ops skl_ops = {
526 	.irq_handler = skl_dsp_sst_interrupt,
527 	.write = sst_shim32_write,
528 	.read = sst_shim32_read,
529 	.ram_read = sst_memcpy_fromio_32,
530 	.ram_write = sst_memcpy_toio_32,
531 	.free = skl_dsp_free,
532 };
533 
534 static struct sst_dsp_device skl_dev = {
535 	.thread = skl_dsp_irq_thread_handler,
536 	.ops = &skl_ops,
537 };
538 
539 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
540 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
541 {
542 	struct skl_sst *skl;
543 	struct sst_dsp *sst;
544 	int ret;
545 
546 	ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
547 	if (ret < 0) {
548 		dev_err(dev, "%s: no device\n", __func__);
549 		return ret;
550 	}
551 
552 	skl = *dsp;
553 	sst = skl->dsp;
554 	sst->addr.lpe = mmio_base;
555 	sst->addr.shim = mmio_base;
556 	sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE;
557 	sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE;
558 	sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
559 	sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
560 
561 	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
562 			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
563 
564 	ret = skl_ipc_init(dev, skl);
565 	if (ret) {
566 		skl_dsp_free(sst);
567 		return ret;
568 	}
569 
570 	sst->fw_ops = skl_fw_ops;
571 
572 	return skl_dsp_acquire_irq(sst);
573 }
574 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
575 
576 int kbl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
577 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
578 		struct skl_sst **dsp)
579 {
580 	struct sst_dsp *sst;
581 	int ret;
582 
583 	ret = skl_sst_dsp_init(dev, mmio_base, irq, fw_name, dsp_ops, dsp);
584 	if (ret < 0) {
585 		dev_err(dev, "%s: Init failed %d\n", __func__, ret);
586 		return ret;
587 	}
588 
589 	sst = (*dsp)->dsp;
590 	sst->fw_ops = kbl_fw_ops;
591 
592 	return 0;
593 
594 }
595 EXPORT_SYMBOL_GPL(kbl_sst_dsp_init);
596 
597 int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
598 {
599 	int ret;
600 	struct sst_dsp *sst = ctx->dsp;
601 
602 	ret = sst->fw_ops.load_fw(sst);
603 	if (ret < 0) {
604 		dev_err(dev, "Load base fw failed : %d\n", ret);
605 		return ret;
606 	}
607 
608 	skl_dsp_init_core_state(sst);
609 
610 	if (ctx->lib_count > 1) {
611 		ret = sst->fw_ops.load_library(sst, ctx->lib_info,
612 						ctx->lib_count);
613 		if (ret < 0) {
614 			dev_err(dev, "Load Library failed : %x\n", ret);
615 			return ret;
616 		}
617 	}
618 	ctx->is_first_boot = false;
619 
620 	return 0;
621 }
622 EXPORT_SYMBOL_GPL(skl_sst_init_fw);
623 
624 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
625 {
626 
627 	if (ctx->dsp->fw)
628 		release_firmware(ctx->dsp->fw);
629 	skl_clear_module_table(ctx->dsp);
630 	skl_freeup_uuid_list(ctx);
631 	skl_ipc_free(&ctx->ipc);
632 	ctx->dsp->ops->free(ctx->dsp);
633 	if (ctx->boot_complete) {
634 		ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
635 		skl_cldma_int_disable(ctx->dsp);
636 	}
637 }
638 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
639 
640 MODULE_LICENSE("GPL v2");
641 MODULE_DESCRIPTION("Intel Skylake IPC driver");
642