xref: /openbmc/linux/sound/soc/intel/skylake/skl-sst.c (revision 4e0277d2)
1 /*
2  * skl-sst.c - HDA DSP library functions for SKL platform
3  *
4  * Copyright (C) 2014-15, Intel Corporation.
5  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6  *	Jeeja KP <jeeja.kp@intel.com>
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/err.h>
23 #include <linux/uuid.h>
24 #include "../common/sst-dsp.h"
25 #include "../common/sst-dsp-priv.h"
26 #include "../common/sst-ipc.h"
27 #include "skl-sst-ipc.h"
28 
29 #define SKL_BASEFW_TIMEOUT	300
30 #define SKL_INIT_TIMEOUT	1000
31 
32 /* Intel HD Audio SRAM Window 0*/
33 #define SKL_ADSP_SRAM0_BASE	0x8000
34 
35 /* Firmware status window */
36 #define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
37 #define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
38 
39 #define SKL_NUM_MODULES		1
40 
41 static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
42 {
43 	u32 cur_sts;
44 
45 	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
46 
47 	return (cur_sts == status);
48 }
49 
50 static int skl_transfer_firmware(struct sst_dsp *ctx,
51 		const void *basefw, u32 base_fw_size)
52 {
53 	int ret = 0;
54 
55 	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size,
56 								true);
57 	if (ret < 0)
58 		return ret;
59 
60 	ret = sst_dsp_register_poll(ctx,
61 			SKL_ADSP_FW_STATUS,
62 			SKL_FW_STS_MASK,
63 			SKL_FW_RFW_START,
64 			SKL_BASEFW_TIMEOUT,
65 			"Firmware boot");
66 
67 	ctx->cl_dev.ops.cl_stop_dma(ctx);
68 
69 	return ret;
70 }
71 
72 #define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
73 
74 static int skl_load_base_firmware(struct sst_dsp *ctx)
75 {
76 	int ret = 0, i;
77 	struct skl_sst *skl = ctx->thread_context;
78 	struct firmware stripped_fw;
79 	u32 reg;
80 
81 	skl->boot_complete = false;
82 	init_waitqueue_head(&skl->boot_wait);
83 
84 	if (ctx->fw == NULL) {
85 		ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
86 		if (ret < 0) {
87 			dev_err(ctx->dev, "Request firmware failed %d\n", ret);
88 			return -EIO;
89 		}
90 	}
91 
92 	/* prase uuids on first boot */
93 	if (skl->is_first_boot) {
94 		ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
95 		if (ret < 0) {
96 			dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
97 			release_firmware(ctx->fw);
98 			skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
99 			return ret;
100 		}
101 	}
102 
103 	/* check for extended manifest */
104 	stripped_fw.data = ctx->fw->data;
105 	stripped_fw.size = ctx->fw->size;
106 
107 	skl_dsp_strip_extended_manifest(&stripped_fw);
108 
109 	ret = skl_dsp_boot(ctx);
110 	if (ret < 0) {
111 		dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
112 		goto skl_load_base_firmware_failed;
113 	}
114 
115 	ret = skl_cldma_prepare(ctx);
116 	if (ret < 0) {
117 		dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
118 		goto skl_load_base_firmware_failed;
119 	}
120 
121 	/* enable Interrupt */
122 	skl_ipc_int_enable(ctx);
123 	skl_ipc_op_int_enable(ctx);
124 
125 	/* check ROM Status */
126 	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
127 		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
128 			dev_dbg(ctx->dev,
129 				"ROM loaded, we can continue with FW loading\n");
130 			break;
131 		}
132 		mdelay(1);
133 	}
134 	if (!i) {
135 		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
136 		dev_err(ctx->dev,
137 			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
138 		ret = -EIO;
139 		goto transfer_firmware_failed;
140 	}
141 
142 	ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
143 	if (ret < 0) {
144 		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
145 		goto transfer_firmware_failed;
146 	} else {
147 		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
148 					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
149 		if (ret == 0) {
150 			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
151 			ret = -EIO;
152 			goto transfer_firmware_failed;
153 		}
154 
155 		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
156 		skl->fw_loaded = true;
157 	}
158 	return 0;
159 transfer_firmware_failed:
160 	ctx->cl_dev.ops.cl_cleanup_controller(ctx);
161 skl_load_base_firmware_failed:
162 	skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
163 	release_firmware(ctx->fw);
164 	ctx->fw = NULL;
165 	return ret;
166 }
167 
168 static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
169 {
170 	int ret;
171 	struct skl_ipc_dxstate_info dx;
172 	struct skl_sst *skl = ctx->thread_context;
173 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
174 
175 	/* If core0 is being turned on, we need to load the FW */
176 	if (core_id == SKL_DSP_CORE0_ID) {
177 		ret = skl_load_base_firmware(ctx);
178 		if (ret < 0) {
179 			dev_err(ctx->dev, "unable to load firmware\n");
180 			return ret;
181 		}
182 	}
183 
184 	/*
185 	 * If any core other than core 0 is being moved to D0, enable the
186 	 * core and send the set dx IPC for the core.
187 	 */
188 	if (core_id != SKL_DSP_CORE0_ID) {
189 		ret = skl_dsp_enable_core(ctx, core_mask);
190 		if (ret < 0)
191 			return ret;
192 
193 		dx.core_mask = core_mask;
194 		dx.dx_mask = core_mask;
195 
196 		ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
197 					SKL_BASE_FW_MODULE_ID, &dx);
198 		if (ret < 0) {
199 			dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
200 					core_id);
201 			skl_dsp_disable_core(ctx, core_mask);
202 		}
203 	}
204 
205 	skl->cores.state[core_id] = SKL_DSP_RUNNING;
206 
207 	return ret;
208 }
209 
210 static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
211 {
212 	int ret;
213 	struct skl_ipc_dxstate_info dx;
214 	struct skl_sst *skl = ctx->thread_context;
215 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
216 
217 	dx.core_mask = core_mask;
218 	dx.dx_mask = SKL_IPC_D3_MASK;
219 
220 	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
221 	if (ret < 0)
222 		dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
223 
224 	if (core_id == SKL_DSP_CORE0_ID) {
225 		/* disable Interrupt */
226 		ctx->cl_dev.ops.cl_cleanup_controller(ctx);
227 		skl_cldma_int_disable(ctx);
228 		skl_ipc_op_int_disable(ctx);
229 		skl_ipc_int_disable(ctx);
230 	}
231 
232 	ret = skl_dsp_disable_core(ctx, core_mask);
233 	if (ret < 0)
234 		return ret;
235 
236 	skl->cores.state[core_id] = SKL_DSP_RESET;
237 	return ret;
238 }
239 
240 static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
241 {
242 	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
243 }
244 
245 /*
246  * since get/set_module are called from DAPM context,
247  * we don't need lock for usage count
248  */
249 static int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
250 {
251 	struct skl_module_table *module;
252 
253 	list_for_each_entry(module, &ctx->module_list, list) {
254 		if (module->mod_info->mod_id == mod_id)
255 			return ++module->usage_cnt;
256 	}
257 
258 	return -EINVAL;
259 }
260 
261 static int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
262 {
263 	struct skl_module_table *module;
264 
265 	list_for_each_entry(module, &ctx->module_list, list) {
266 		if (module->mod_info->mod_id == mod_id)
267 			return --module->usage_cnt;
268 	}
269 
270 	return -EINVAL;
271 }
272 
273 static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
274 						char *mod_name, int mod_id)
275 {
276 	const struct firmware *fw;
277 	struct skl_module_table *skl_module;
278 	unsigned int size;
279 	int ret;
280 
281 	ret = request_firmware(&fw, mod_name, ctx->dev);
282 	if (ret < 0) {
283 		dev_err(ctx->dev, "Request Module %s failed :%d\n",
284 							mod_name, ret);
285 		return NULL;
286 	}
287 
288 	skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
289 	if (skl_module == NULL) {
290 		release_firmware(fw);
291 		return NULL;
292 	}
293 
294 	size = sizeof(*skl_module->mod_info);
295 	skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
296 	if (skl_module->mod_info == NULL) {
297 		release_firmware(fw);
298 		return NULL;
299 	}
300 
301 	skl_module->mod_info->mod_id = mod_id;
302 	skl_module->mod_info->fw = fw;
303 	list_add(&skl_module->list, &ctx->module_list);
304 
305 	return skl_module;
306 }
307 
308 /* get a module from it's unique ID */
309 static struct skl_module_table *skl_module_get_from_id(
310 			struct sst_dsp *ctx, u16 mod_id)
311 {
312 	struct skl_module_table *module;
313 
314 	if (list_empty(&ctx->module_list)) {
315 		dev_err(ctx->dev, "Module list is empty\n");
316 		return NULL;
317 	}
318 
319 	list_for_each_entry(module, &ctx->module_list, list) {
320 		if (module->mod_info->mod_id == mod_id)
321 			return module;
322 	}
323 
324 	return NULL;
325 }
326 
327 static int skl_transfer_module(struct sst_dsp *ctx, const void *data,
328 			u32 size, u16 mod_id, u8 table_id, bool is_module)
329 {
330 	int ret, bytes_left, curr_pos;
331 	struct skl_sst *skl = ctx->thread_context;
332 	skl->mod_load_complete = false;
333 
334 	bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false);
335 	if (bytes_left < 0)
336 		return bytes_left;
337 
338 	if (is_module) { /* load module */
339 		ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id);
340 		if (ret < 0) {
341 			dev_err(ctx->dev, "Failed to Load module: %d\n", ret);
342 			goto out;
343 		}
344 	}
345 
346 	/*
347 	 * if bytes_left > 0 then wait for BDL complete interrupt and
348 	 * copy the next chunk till bytes_left is 0. if bytes_left is
349 	 * is zero, then wait for load module IPC reply
350 	 */
351 	while (bytes_left > 0) {
352 		curr_pos = size - bytes_left;
353 
354 		ret = skl_cldma_wait_interruptible(ctx);
355 		if (ret < 0)
356 			goto out;
357 
358 		bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx,
359 							data + curr_pos,
360 							bytes_left, false);
361 	}
362 
363 	ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete,
364 				msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
365 	if (ret == 0 || !skl->mod_load_status) {
366 		dev_err(ctx->dev, "Module Load failed\n");
367 		ret = -EIO;
368 	}
369 
370 out:
371 	ctx->cl_dev.ops.cl_stop_dma(ctx);
372 
373 	return ret;
374 }
375 
376 static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
377 {
378 	struct skl_module_table *module_entry = NULL;
379 	int ret = 0;
380 	char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
381 	uuid_le *uuid_mod;
382 
383 	uuid_mod = (uuid_le *)guid;
384 	snprintf(mod_name, sizeof(mod_name), "%s%pUL%s",
385 				"intel/dsp_fw_", uuid_mod, ".bin");
386 
387 	module_entry = skl_module_get_from_id(ctx, mod_id);
388 	if (module_entry == NULL) {
389 		module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
390 		if (module_entry == NULL) {
391 			dev_err(ctx->dev, "Failed to Load module\n");
392 			return -EINVAL;
393 		}
394 	}
395 
396 	if (!module_entry->usage_cnt) {
397 		ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data,
398 				module_entry->mod_info->fw->size,
399 				mod_id, 0, true);
400 		if (ret < 0) {
401 			dev_err(ctx->dev, "Failed to Load module\n");
402 			return ret;
403 		}
404 	}
405 
406 	ret = skl_get_module(ctx, mod_id);
407 
408 	return ret;
409 }
410 
411 static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
412 {
413 	int usage_cnt;
414 	struct skl_sst *skl = ctx->thread_context;
415 	int ret = 0;
416 
417 	usage_cnt = skl_put_module(ctx, mod_id);
418 	if (usage_cnt < 0) {
419 		dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
420 		return -EIO;
421 	}
422 
423 	/* if module is used by others return, no need to unload */
424 	if (usage_cnt > 0)
425 		return 0;
426 
427 	ret = skl_ipc_unload_modules(&skl->ipc,
428 			SKL_NUM_MODULES, &mod_id);
429 	if (ret < 0) {
430 		dev_err(ctx->dev, "Failed to UnLoad module\n");
431 		skl_get_module(ctx, mod_id);
432 		return ret;
433 	}
434 
435 	return ret;
436 }
437 
438 void skl_clear_module_cnt(struct sst_dsp *ctx)
439 {
440 	struct skl_module_table *module;
441 
442 	if (list_empty(&ctx->module_list))
443 		return;
444 
445 	list_for_each_entry(module, &ctx->module_list, list) {
446 		module->usage_cnt = 0;
447 	}
448 }
449 EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
450 
451 static void skl_clear_module_table(struct sst_dsp *ctx)
452 {
453 	struct skl_module_table *module, *tmp;
454 
455 	if (list_empty(&ctx->module_list))
456 		return;
457 
458 	list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
459 		list_del(&module->list);
460 		release_firmware(module->mod_info->fw);
461 	}
462 }
463 
464 static struct skl_dsp_fw_ops skl_fw_ops = {
465 	.set_state_D0 = skl_set_dsp_D0,
466 	.set_state_D3 = skl_set_dsp_D3,
467 	.load_fw = skl_load_base_firmware,
468 	.get_fw_errcode = skl_get_errorcode,
469 	.load_mod = skl_load_module,
470 	.unload_mod = skl_unload_module,
471 };
472 
473 static struct sst_ops skl_ops = {
474 	.irq_handler = skl_dsp_sst_interrupt,
475 	.write = sst_shim32_write,
476 	.read = sst_shim32_read,
477 	.ram_read = sst_memcpy_fromio_32,
478 	.ram_write = sst_memcpy_toio_32,
479 	.free = skl_dsp_free,
480 };
481 
482 static struct sst_dsp_device skl_dev = {
483 	.thread = skl_dsp_irq_thread_handler,
484 	.ops = &skl_ops,
485 };
486 
487 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
488 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
489 {
490 	struct skl_sst *skl;
491 	struct sst_dsp *sst;
492 	int ret;
493 
494 	ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
495 	if (ret < 0) {
496 		dev_err(dev, "%s: no device\n", __func__);
497 		return ret;
498 	}
499 
500 	skl = *dsp;
501 	sst = skl->dsp;
502 	sst->addr.lpe = mmio_base;
503 	sst->addr.shim = mmio_base;
504 	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
505 			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
506 
507 	sst->fw_ops = skl_fw_ops;
508 
509 	skl->cores.count = 2;
510 
511 	return 0;
512 }
513 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
514 
515 int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
516 {
517 	int ret;
518 	struct sst_dsp *sst = ctx->dsp;
519 
520 	ret = sst->fw_ops.load_fw(sst);
521 	if (ret < 0) {
522 		dev_err(dev, "Load base fw failed : %d\n", ret);
523 		return ret;
524 	}
525 
526 	skl_dsp_init_core_state(sst);
527 	ctx->is_first_boot = false;
528 
529 	return 0;
530 }
531 EXPORT_SYMBOL_GPL(skl_sst_init_fw);
532 
533 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
534 {
535 
536 	if (ctx->dsp->fw)
537 		release_firmware(ctx->dsp->fw);
538 	skl_clear_module_table(ctx->dsp);
539 	skl_freeup_uuid_list(ctx);
540 	skl_ipc_free(&ctx->ipc);
541 	ctx->dsp->ops->free(ctx->dsp);
542 	if (ctx->boot_complete) {
543 		ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
544 		skl_cldma_int_disable(ctx->dsp);
545 	}
546 }
547 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
548 
549 MODULE_LICENSE("GPL v2");
550 MODULE_DESCRIPTION("Intel Skylake IPC driver");
551