xref: /openbmc/linux/sound/soc/intel/skylake/skl-sst.c (revision f7ea7777)
1 /*
2  * skl-sst.c - HDA DSP library functions for SKL platform
3  *
4  * Copyright (C) 2014-15, Intel Corporation.
5  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6  *	Jeeja KP <jeeja.kp@intel.com>
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/err.h>
23 #include <linux/uuid.h>
24 #include "../common/sst-dsp.h"
25 #include "../common/sst-dsp-priv.h"
26 #include "../common/sst-ipc.h"
27 #include "skl-sst-ipc.h"
28 
29 #define SKL_BASEFW_TIMEOUT	300
30 #define SKL_INIT_TIMEOUT	1000
31 
32 /* Intel HD Audio SRAM Window 0*/
33 #define SKL_ADSP_SRAM0_BASE	0x8000
34 
35 /* Firmware status window */
36 #define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
37 #define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
38 
39 #define SKL_NUM_MODULES		1
40 
41 static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
42 {
43 	u32 cur_sts;
44 
45 	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
46 
47 	return (cur_sts == status);
48 }
49 
50 static int skl_transfer_firmware(struct sst_dsp *ctx,
51 		const void *basefw, u32 base_fw_size)
52 {
53 	int ret = 0;
54 
55 	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size,
56 								true);
57 	if (ret < 0)
58 		return ret;
59 
60 	ret = sst_dsp_register_poll(ctx,
61 			SKL_ADSP_FW_STATUS,
62 			SKL_FW_STS_MASK,
63 			SKL_FW_RFW_START,
64 			SKL_BASEFW_TIMEOUT,
65 			"Firmware boot");
66 
67 	ctx->cl_dev.ops.cl_stop_dma(ctx);
68 
69 	return ret;
70 }
71 
72 #define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
73 
74 static int skl_load_base_firmware(struct sst_dsp *ctx)
75 {
76 	int ret = 0, i;
77 	struct skl_sst *skl = ctx->thread_context;
78 	struct firmware stripped_fw;
79 	u32 reg;
80 
81 	skl->boot_complete = false;
82 	init_waitqueue_head(&skl->boot_wait);
83 
84 	if (ctx->fw == NULL) {
85 		ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
86 		if (ret < 0) {
87 			dev_err(ctx->dev, "Request firmware failed %d\n", ret);
88 			return -EIO;
89 		}
90 	}
91 
92 	/* prase uuids on first boot */
93 	if (skl->is_first_boot) {
94 		ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
95 		if (ret < 0) {
96 			dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
97 			release_firmware(ctx->fw);
98 			skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
99 			return ret;
100 		}
101 	}
102 
103 	/* check for extended manifest */
104 	stripped_fw.data = ctx->fw->data;
105 	stripped_fw.size = ctx->fw->size;
106 
107 	skl_dsp_strip_extended_manifest(&stripped_fw);
108 
109 	ret = skl_dsp_boot(ctx);
110 	if (ret < 0) {
111 		dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
112 		goto skl_load_base_firmware_failed;
113 	}
114 
115 	ret = skl_cldma_prepare(ctx);
116 	if (ret < 0) {
117 		dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
118 		goto skl_load_base_firmware_failed;
119 	}
120 
121 	/* enable Interrupt */
122 	skl_ipc_int_enable(ctx);
123 	skl_ipc_op_int_enable(ctx);
124 
125 	/* check ROM Status */
126 	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
127 		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
128 			dev_dbg(ctx->dev,
129 				"ROM loaded, we can continue with FW loading\n");
130 			break;
131 		}
132 		mdelay(1);
133 	}
134 	if (!i) {
135 		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
136 		dev_err(ctx->dev,
137 			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
138 		ret = -EIO;
139 		goto transfer_firmware_failed;
140 	}
141 
142 	ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
143 	if (ret < 0) {
144 		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
145 		goto transfer_firmware_failed;
146 	} else {
147 		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
148 					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
149 		if (ret == 0) {
150 			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
151 			ret = -EIO;
152 			goto transfer_firmware_failed;
153 		}
154 
155 		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
156 		skl->fw_loaded = true;
157 	}
158 	return 0;
159 transfer_firmware_failed:
160 	ctx->cl_dev.ops.cl_cleanup_controller(ctx);
161 skl_load_base_firmware_failed:
162 	skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
163 	release_firmware(ctx->fw);
164 	ctx->fw = NULL;
165 	return ret;
166 }
167 
168 static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
169 {
170 	int ret;
171 	struct skl_ipc_dxstate_info dx;
172 	struct skl_sst *skl = ctx->thread_context;
173 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
174 
175 	/* If core0 is being turned on, we need to load the FW */
176 	if (core_id == SKL_DSP_CORE0_ID) {
177 		ret = skl_load_base_firmware(ctx);
178 		if (ret < 0) {
179 			dev_err(ctx->dev, "unable to load firmware\n");
180 			return ret;
181 		}
182 	}
183 
184 	/*
185 	 * If any core other than core 0 is being moved to D0, enable the
186 	 * core and send the set dx IPC for the core.
187 	 */
188 	if (core_id != SKL_DSP_CORE0_ID) {
189 		ret = skl_dsp_enable_core(ctx, core_mask);
190 		if (ret < 0)
191 			return ret;
192 
193 		dx.core_mask = core_mask;
194 		dx.dx_mask = core_mask;
195 
196 		ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
197 					SKL_BASE_FW_MODULE_ID, &dx);
198 		if (ret < 0) {
199 			dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
200 					core_id);
201 			skl_dsp_disable_core(ctx, core_mask);
202 		}
203 	}
204 
205 	skl->cores.state[core_id] = SKL_DSP_RUNNING;
206 
207 	return ret;
208 }
209 
210 static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
211 {
212 	int ret;
213 	struct skl_ipc_dxstate_info dx;
214 	struct skl_sst *skl = ctx->thread_context;
215 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
216 
217 	dx.core_mask = core_mask;
218 	dx.dx_mask = SKL_IPC_D3_MASK;
219 
220 	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
221 	if (ret < 0)
222 		dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
223 
224 	if (core_id == SKL_DSP_CORE0_ID) {
225 		/* disable Interrupt */
226 		ctx->cl_dev.ops.cl_cleanup_controller(ctx);
227 		skl_cldma_int_disable(ctx);
228 		skl_ipc_op_int_disable(ctx);
229 		skl_ipc_int_disable(ctx);
230 	}
231 
232 	ret = skl_dsp_disable_core(ctx, core_mask);
233 	if (ret < 0)
234 		return ret;
235 
236 	skl->cores.state[core_id] = SKL_DSP_RESET;
237 	return ret;
238 }
239 
240 static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
241 {
242 	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
243 }
244 
245 /*
246  * since get/set_module are called from DAPM context,
247  * we don't need lock for usage count
248  */
249 static int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
250 {
251 	struct skl_module_table *module;
252 
253 	list_for_each_entry(module, &ctx->module_list, list) {
254 		if (module->mod_info->mod_id == mod_id)
255 			return ++module->usage_cnt;
256 	}
257 
258 	return -EINVAL;
259 }
260 
261 static int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
262 {
263 	struct skl_module_table *module;
264 
265 	list_for_each_entry(module, &ctx->module_list, list) {
266 		if (module->mod_info->mod_id == mod_id)
267 			return --module->usage_cnt;
268 	}
269 
270 	return -EINVAL;
271 }
272 
273 static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
274 						char *mod_name, int mod_id)
275 {
276 	const struct firmware *fw;
277 	struct skl_module_table *skl_module;
278 	unsigned int size;
279 	int ret;
280 
281 	ret = request_firmware(&fw, mod_name, ctx->dev);
282 	if (ret < 0) {
283 		dev_err(ctx->dev, "Request Module %s failed :%d\n",
284 							mod_name, ret);
285 		return NULL;
286 	}
287 
288 	skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
289 	if (skl_module == NULL) {
290 		release_firmware(fw);
291 		return NULL;
292 	}
293 
294 	size = sizeof(*skl_module->mod_info);
295 	skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
296 	if (skl_module->mod_info == NULL) {
297 		release_firmware(fw);
298 		return NULL;
299 	}
300 
301 	skl_module->mod_info->mod_id = mod_id;
302 	skl_module->mod_info->fw = fw;
303 	list_add(&skl_module->list, &ctx->module_list);
304 
305 	return skl_module;
306 }
307 
308 /* get a module from it's unique ID */
309 static struct skl_module_table *skl_module_get_from_id(
310 			struct sst_dsp *ctx, u16 mod_id)
311 {
312 	struct skl_module_table *module;
313 
314 	if (list_empty(&ctx->module_list)) {
315 		dev_err(ctx->dev, "Module list is empty\n");
316 		return NULL;
317 	}
318 
319 	list_for_each_entry(module, &ctx->module_list, list) {
320 		if (module->mod_info->mod_id == mod_id)
321 			return module;
322 	}
323 
324 	return NULL;
325 }
326 
327 static int skl_transfer_module(struct sst_dsp *ctx, const void *data,
328 				u32 size, u16 mod_id)
329 {
330 	int ret, bytes_left, curr_pos;
331 	struct skl_sst *skl = ctx->thread_context;
332 	skl->mod_load_complete = false;
333 	init_waitqueue_head(&skl->mod_load_wait);
334 
335 	bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false);
336 	if (bytes_left < 0)
337 		return bytes_left;
338 
339 	ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id);
340 	if (ret < 0) {
341 		dev_err(ctx->dev, "Failed to Load module: %d\n", ret);
342 		goto out;
343 	}
344 
345 	/*
346 	 * if bytes_left > 0 then wait for BDL complete interrupt and
347 	 * copy the next chunk till bytes_left is 0. if bytes_left is
348 	 * is zero, then wait for load module IPC reply
349 	 */
350 	while (bytes_left > 0) {
351 		curr_pos = size - bytes_left;
352 
353 		ret = skl_cldma_wait_interruptible(ctx);
354 		if (ret < 0)
355 			goto out;
356 
357 		bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx,
358 							data + curr_pos,
359 							bytes_left, false);
360 	}
361 
362 	ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete,
363 				msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
364 	if (ret == 0 || !skl->mod_load_status) {
365 		dev_err(ctx->dev, "Module Load failed\n");
366 		ret = -EIO;
367 	}
368 
369 out:
370 	ctx->cl_dev.ops.cl_stop_dma(ctx);
371 
372 	return ret;
373 }
374 
375 static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
376 {
377 	struct skl_module_table *module_entry = NULL;
378 	int ret = 0;
379 	char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
380 	uuid_le *uuid_mod;
381 
382 	uuid_mod = (uuid_le *)guid;
383 	snprintf(mod_name, sizeof(mod_name), "%s%pUL%s",
384 				"intel/dsp_fw_", uuid_mod, ".bin");
385 
386 	module_entry = skl_module_get_from_id(ctx, mod_id);
387 	if (module_entry == NULL) {
388 		module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
389 		if (module_entry == NULL) {
390 			dev_err(ctx->dev, "Failed to Load module\n");
391 			return -EINVAL;
392 		}
393 	}
394 
395 	if (!module_entry->usage_cnt) {
396 		ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data,
397 				module_entry->mod_info->fw->size, mod_id);
398 		if (ret < 0) {
399 			dev_err(ctx->dev, "Failed to Load module\n");
400 			return ret;
401 		}
402 	}
403 
404 	ret = skl_get_module(ctx, mod_id);
405 
406 	return ret;
407 }
408 
409 static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
410 {
411 	int usage_cnt;
412 	struct skl_sst *skl = ctx->thread_context;
413 	int ret = 0;
414 
415 	usage_cnt = skl_put_module(ctx, mod_id);
416 	if (usage_cnt < 0) {
417 		dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
418 		return -EIO;
419 	}
420 
421 	/* if module is used by others return, no need to unload */
422 	if (usage_cnt > 0)
423 		return 0;
424 
425 	ret = skl_ipc_unload_modules(&skl->ipc,
426 			SKL_NUM_MODULES, &mod_id);
427 	if (ret < 0) {
428 		dev_err(ctx->dev, "Failed to UnLoad module\n");
429 		skl_get_module(ctx, mod_id);
430 		return ret;
431 	}
432 
433 	return ret;
434 }
435 
436 void skl_clear_module_cnt(struct sst_dsp *ctx)
437 {
438 	struct skl_module_table *module;
439 
440 	if (list_empty(&ctx->module_list))
441 		return;
442 
443 	list_for_each_entry(module, &ctx->module_list, list) {
444 		module->usage_cnt = 0;
445 	}
446 }
447 EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
448 
449 static void skl_clear_module_table(struct sst_dsp *ctx)
450 {
451 	struct skl_module_table *module, *tmp;
452 
453 	if (list_empty(&ctx->module_list))
454 		return;
455 
456 	list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
457 		list_del(&module->list);
458 		release_firmware(module->mod_info->fw);
459 	}
460 }
461 
462 static struct skl_dsp_fw_ops skl_fw_ops = {
463 	.set_state_D0 = skl_set_dsp_D0,
464 	.set_state_D3 = skl_set_dsp_D3,
465 	.load_fw = skl_load_base_firmware,
466 	.get_fw_errcode = skl_get_errorcode,
467 	.load_mod = skl_load_module,
468 	.unload_mod = skl_unload_module,
469 };
470 
471 static struct sst_ops skl_ops = {
472 	.irq_handler = skl_dsp_sst_interrupt,
473 	.write = sst_shim32_write,
474 	.read = sst_shim32_read,
475 	.ram_read = sst_memcpy_fromio_32,
476 	.ram_write = sst_memcpy_toio_32,
477 	.free = skl_dsp_free,
478 };
479 
480 static struct sst_dsp_device skl_dev = {
481 	.thread = skl_dsp_irq_thread_handler,
482 	.ops = &skl_ops,
483 };
484 
485 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
486 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
487 {
488 	struct skl_sst *skl;
489 	struct sst_dsp *sst;
490 	int ret;
491 
492 	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
493 	if (skl == NULL)
494 		return -ENOMEM;
495 
496 	skl->dev = dev;
497 	skl_dev.thread_context = skl;
498 	INIT_LIST_HEAD(&skl->uuid_list);
499 
500 	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
501 	if (!skl->dsp) {
502 		dev_err(skl->dev, "%s: no device\n", __func__);
503 		return -ENODEV;
504 	}
505 
506 	sst = skl->dsp;
507 
508 	sst->fw_name = fw_name;
509 	sst->addr.lpe = mmio_base;
510 	sst->addr.shim = mmio_base;
511 	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
512 			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
513 
514 	INIT_LIST_HEAD(&sst->module_list);
515 	sst->dsp_ops = dsp_ops;
516 	sst->fw_ops = skl_fw_ops;
517 
518 	ret = skl_ipc_init(dev, skl);
519 	if (ret)
520 		return ret;
521 
522 	skl->cores.count = 2;
523 	skl->is_first_boot = true;
524 
525 	if (dsp)
526 		*dsp = skl;
527 
528 	return ret;
529 }
530 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
531 
532 int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
533 {
534 	int ret;
535 	struct sst_dsp *sst = ctx->dsp;
536 
537 	ret = sst->fw_ops.load_fw(sst);
538 	if (ret < 0) {
539 		dev_err(dev, "Load base fw failed : %d\n", ret);
540 		return ret;
541 	}
542 
543 	skl_dsp_init_core_state(sst);
544 	ctx->is_first_boot = false;
545 
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(skl_sst_init_fw);
549 
550 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
551 {
552 
553 	if (ctx->dsp->fw)
554 		release_firmware(ctx->dsp->fw);
555 	skl_clear_module_table(ctx->dsp);
556 	skl_freeup_uuid_list(ctx);
557 	skl_ipc_free(&ctx->ipc);
558 	ctx->dsp->ops->free(ctx->dsp);
559 	if (ctx->boot_complete) {
560 		ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
561 		skl_cldma_int_disable(ctx->dsp);
562 	}
563 }
564 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
565 
566 MODULE_LICENSE("GPL v2");
567 MODULE_DESCRIPTION("Intel Skylake IPC driver");
568