xref: /openbmc/linux/sound/soc/intel/skylake/skl-sst.c (revision a77e393c)
1 /*
2  * skl-sst.c - HDA DSP library functions for SKL platform
3  *
4  * Copyright (C) 2014-15, Intel Corporation.
5  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6  *	Jeeja KP <jeeja.kp@intel.com>
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/err.h>
23 #include <linux/uuid.h>
24 #include "../common/sst-dsp.h"
25 #include "../common/sst-dsp-priv.h"
26 #include "../common/sst-ipc.h"
27 #include "skl-sst-ipc.h"
28 
29 #define SKL_BASEFW_TIMEOUT	300
30 #define SKL_INIT_TIMEOUT	1000
31 
32 /* Intel HD Audio SRAM Window 0*/
33 #define SKL_ADSP_SRAM0_BASE	0x8000
34 
35 /* Firmware status window */
36 #define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
37 #define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
38 
39 #define SKL_NUM_MODULES		1
40 
41 static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
42 {
43 	u32 cur_sts;
44 
45 	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
46 
47 	return (cur_sts == status);
48 }
49 
50 static int skl_transfer_firmware(struct sst_dsp *ctx,
51 		const void *basefw, u32 base_fw_size)
52 {
53 	int ret = 0;
54 
55 	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
56 	if (ret < 0)
57 		return ret;
58 
59 	ret = sst_dsp_register_poll(ctx,
60 			SKL_ADSP_FW_STATUS,
61 			SKL_FW_STS_MASK,
62 			SKL_FW_RFW_START,
63 			SKL_BASEFW_TIMEOUT,
64 			"Firmware boot");
65 
66 	ctx->cl_dev.ops.cl_stop_dma(ctx);
67 
68 	return ret;
69 }
70 
71 #define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
72 
73 static int skl_load_base_firmware(struct sst_dsp *ctx)
74 {
75 	int ret = 0, i;
76 	struct skl_sst *skl = ctx->thread_context;
77 	struct firmware stripped_fw;
78 	u32 reg;
79 
80 	skl->boot_complete = false;
81 	init_waitqueue_head(&skl->boot_wait);
82 
83 	if (ctx->fw == NULL) {
84 		ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
85 		if (ret < 0) {
86 			dev_err(ctx->dev, "Request firmware failed %d\n", ret);
87 			return -EIO;
88 		}
89 	}
90 
91 	/* prase uuids on first boot */
92 	if (skl->is_first_boot) {
93 		ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
94 		if (ret < 0) {
95 			dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
96 			release_firmware(ctx->fw);
97 			skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
98 			return ret;
99 		}
100 	}
101 
102 	/* check for extended manifest */
103 	stripped_fw.data = ctx->fw->data;
104 	stripped_fw.size = ctx->fw->size;
105 
106 	skl_dsp_strip_extended_manifest(&stripped_fw);
107 
108 	ret = skl_dsp_boot(ctx);
109 	if (ret < 0) {
110 		dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
111 		goto skl_load_base_firmware_failed;
112 	}
113 
114 	ret = skl_cldma_prepare(ctx);
115 	if (ret < 0) {
116 		dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
117 		goto skl_load_base_firmware_failed;
118 	}
119 
120 	/* enable Interrupt */
121 	skl_ipc_int_enable(ctx);
122 	skl_ipc_op_int_enable(ctx);
123 
124 	/* check ROM Status */
125 	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
126 		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
127 			dev_dbg(ctx->dev,
128 				"ROM loaded, we can continue with FW loading\n");
129 			break;
130 		}
131 		mdelay(1);
132 	}
133 	if (!i) {
134 		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
135 		dev_err(ctx->dev,
136 			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
137 		ret = -EIO;
138 		goto transfer_firmware_failed;
139 	}
140 
141 	ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
142 	if (ret < 0) {
143 		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
144 		goto transfer_firmware_failed;
145 	} else {
146 		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
147 					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
148 		if (ret == 0) {
149 			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
150 			ret = -EIO;
151 			goto transfer_firmware_failed;
152 		}
153 
154 		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
155 		skl->fw_loaded = true;
156 	}
157 	return 0;
158 transfer_firmware_failed:
159 	ctx->cl_dev.ops.cl_cleanup_controller(ctx);
160 skl_load_base_firmware_failed:
161 	skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
162 	release_firmware(ctx->fw);
163 	ctx->fw = NULL;
164 	return ret;
165 }
166 
167 static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
168 {
169 	int ret;
170 	struct skl_ipc_dxstate_info dx;
171 	struct skl_sst *skl = ctx->thread_context;
172 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
173 
174 	/* If core0 is being turned on, we need to load the FW */
175 	if (core_id == SKL_DSP_CORE0_ID) {
176 		ret = skl_load_base_firmware(ctx);
177 		if (ret < 0) {
178 			dev_err(ctx->dev, "unable to load firmware\n");
179 			return ret;
180 		}
181 	}
182 
183 	/*
184 	 * If any core other than core 0 is being moved to D0, enable the
185 	 * core and send the set dx IPC for the core.
186 	 */
187 	if (core_id != SKL_DSP_CORE0_ID) {
188 		ret = skl_dsp_enable_core(ctx, core_mask);
189 		if (ret < 0)
190 			return ret;
191 
192 		dx.core_mask = core_mask;
193 		dx.dx_mask = core_mask;
194 
195 		ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
196 					SKL_BASE_FW_MODULE_ID, &dx);
197 		if (ret < 0) {
198 			dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
199 					core_id);
200 			skl_dsp_disable_core(ctx, core_mask);
201 		}
202 	}
203 
204 	skl->cores.state[core_id] = SKL_DSP_RUNNING;
205 
206 	return ret;
207 }
208 
209 static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
210 {
211 	int ret;
212 	struct skl_ipc_dxstate_info dx;
213 	struct skl_sst *skl = ctx->thread_context;
214 	unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
215 
216 	dx.core_mask = core_mask;
217 	dx.dx_mask = SKL_IPC_D3_MASK;
218 
219 	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
220 	if (ret < 0)
221 		dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
222 
223 	if (core_id == SKL_DSP_CORE0_ID) {
224 		/* disable Interrupt */
225 		ctx->cl_dev.ops.cl_cleanup_controller(ctx);
226 		skl_cldma_int_disable(ctx);
227 		skl_ipc_op_int_disable(ctx);
228 		skl_ipc_int_disable(ctx);
229 	}
230 
231 	ret = skl_dsp_disable_core(ctx, core_mask);
232 	if (ret < 0)
233 		return ret;
234 
235 	skl->cores.state[core_id] = SKL_DSP_RESET;
236 	return ret;
237 }
238 
239 static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
240 {
241 	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
242 }
243 
244 /*
245  * since get/set_module are called from DAPM context,
246  * we don't need lock for usage count
247  */
248 static int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
249 {
250 	struct skl_module_table *module;
251 
252 	list_for_each_entry(module, &ctx->module_list, list) {
253 		if (module->mod_info->mod_id == mod_id)
254 			return ++module->usage_cnt;
255 	}
256 
257 	return -EINVAL;
258 }
259 
260 static int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
261 {
262 	struct skl_module_table *module;
263 
264 	list_for_each_entry(module, &ctx->module_list, list) {
265 		if (module->mod_info->mod_id == mod_id)
266 			return --module->usage_cnt;
267 	}
268 
269 	return -EINVAL;
270 }
271 
272 static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
273 						char *mod_name, int mod_id)
274 {
275 	const struct firmware *fw;
276 	struct skl_module_table *skl_module;
277 	unsigned int size;
278 	int ret;
279 
280 	ret = request_firmware(&fw, mod_name, ctx->dev);
281 	if (ret < 0) {
282 		dev_err(ctx->dev, "Request Module %s failed :%d\n",
283 							mod_name, ret);
284 		return NULL;
285 	}
286 
287 	skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
288 	if (skl_module == NULL) {
289 		release_firmware(fw);
290 		return NULL;
291 	}
292 
293 	size = sizeof(*skl_module->mod_info);
294 	skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
295 	if (skl_module->mod_info == NULL) {
296 		release_firmware(fw);
297 		return NULL;
298 	}
299 
300 	skl_module->mod_info->mod_id = mod_id;
301 	skl_module->mod_info->fw = fw;
302 	list_add(&skl_module->list, &ctx->module_list);
303 
304 	return skl_module;
305 }
306 
307 /* get a module from it's unique ID */
308 static struct skl_module_table *skl_module_get_from_id(
309 			struct sst_dsp *ctx, u16 mod_id)
310 {
311 	struct skl_module_table *module;
312 
313 	if (list_empty(&ctx->module_list)) {
314 		dev_err(ctx->dev, "Module list is empty\n");
315 		return NULL;
316 	}
317 
318 	list_for_each_entry(module, &ctx->module_list, list) {
319 		if (module->mod_info->mod_id == mod_id)
320 			return module;
321 	}
322 
323 	return NULL;
324 }
325 
326 static int skl_transfer_module(struct sst_dsp *ctx,
327 			struct skl_load_module_info *module)
328 {
329 	int ret;
330 	struct skl_sst *skl = ctx->thread_context;
331 
332 	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, module->fw->data,
333 							module->fw->size);
334 	if (ret < 0)
335 		return ret;
336 
337 	ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES,
338 						(void *)&module->mod_id);
339 	if (ret < 0)
340 		dev_err(ctx->dev, "Failed to Load module: %d\n", ret);
341 
342 	ctx->cl_dev.ops.cl_stop_dma(ctx);
343 
344 	return ret;
345 }
346 
347 static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
348 {
349 	struct skl_module_table *module_entry = NULL;
350 	int ret = 0;
351 	char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
352 	uuid_le *uuid_mod;
353 
354 	uuid_mod = (uuid_le *)guid;
355 	snprintf(mod_name, sizeof(mod_name), "%s%pUL%s",
356 				"intel/dsp_fw_", uuid_mod, ".bin");
357 
358 	module_entry = skl_module_get_from_id(ctx, mod_id);
359 	if (module_entry == NULL) {
360 		module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
361 		if (module_entry == NULL) {
362 			dev_err(ctx->dev, "Failed to Load module\n");
363 			return -EINVAL;
364 		}
365 	}
366 
367 	if (!module_entry->usage_cnt) {
368 		ret = skl_transfer_module(ctx, module_entry->mod_info);
369 		if (ret < 0) {
370 			dev_err(ctx->dev, "Failed to Load module\n");
371 			return ret;
372 		}
373 	}
374 
375 	ret = skl_get_module(ctx, mod_id);
376 
377 	return ret;
378 }
379 
380 static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
381 {
382 	int usage_cnt;
383 	struct skl_sst *skl = ctx->thread_context;
384 	int ret = 0;
385 
386 	usage_cnt = skl_put_module(ctx, mod_id);
387 	if (usage_cnt < 0) {
388 		dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
389 		return -EIO;
390 	}
391 	ret = skl_ipc_unload_modules(&skl->ipc,
392 			SKL_NUM_MODULES, &mod_id);
393 	if (ret < 0) {
394 		dev_err(ctx->dev, "Failed to UnLoad module\n");
395 		skl_get_module(ctx, mod_id);
396 		return ret;
397 	}
398 
399 	return ret;
400 }
401 
402 void skl_clear_module_cnt(struct sst_dsp *ctx)
403 {
404 	struct skl_module_table *module;
405 
406 	if (list_empty(&ctx->module_list))
407 		return;
408 
409 	list_for_each_entry(module, &ctx->module_list, list) {
410 		module->usage_cnt = 0;
411 	}
412 }
413 EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
414 
415 static void skl_clear_module_table(struct sst_dsp *ctx)
416 {
417 	struct skl_module_table *module, *tmp;
418 
419 	if (list_empty(&ctx->module_list))
420 		return;
421 
422 	list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
423 		list_del(&module->list);
424 		release_firmware(module->mod_info->fw);
425 	}
426 }
427 
428 static struct skl_dsp_fw_ops skl_fw_ops = {
429 	.set_state_D0 = skl_set_dsp_D0,
430 	.set_state_D3 = skl_set_dsp_D3,
431 	.load_fw = skl_load_base_firmware,
432 	.get_fw_errcode = skl_get_errorcode,
433 	.load_mod = skl_load_module,
434 	.unload_mod = skl_unload_module,
435 };
436 
437 static struct sst_ops skl_ops = {
438 	.irq_handler = skl_dsp_sst_interrupt,
439 	.write = sst_shim32_write,
440 	.read = sst_shim32_read,
441 	.ram_read = sst_memcpy_fromio_32,
442 	.ram_write = sst_memcpy_toio_32,
443 	.free = skl_dsp_free,
444 };
445 
446 static struct sst_dsp_device skl_dev = {
447 	.thread = skl_dsp_irq_thread_handler,
448 	.ops = &skl_ops,
449 };
450 
451 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
452 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
453 {
454 	struct skl_sst *skl;
455 	struct sst_dsp *sst;
456 	int ret;
457 
458 	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
459 	if (skl == NULL)
460 		return -ENOMEM;
461 
462 	skl->dev = dev;
463 	skl_dev.thread_context = skl;
464 	INIT_LIST_HEAD(&skl->uuid_list);
465 
466 	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
467 	if (!skl->dsp) {
468 		dev_err(skl->dev, "%s: no device\n", __func__);
469 		return -ENODEV;
470 	}
471 
472 	sst = skl->dsp;
473 
474 	sst->fw_name = fw_name;
475 	sst->addr.lpe = mmio_base;
476 	sst->addr.shim = mmio_base;
477 	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
478 			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
479 
480 	INIT_LIST_HEAD(&sst->module_list);
481 	sst->dsp_ops = dsp_ops;
482 	sst->fw_ops = skl_fw_ops;
483 
484 	ret = skl_ipc_init(dev, skl);
485 	if (ret)
486 		return ret;
487 
488 	skl->cores.count = 2;
489 	skl->is_first_boot = true;
490 
491 	if (dsp)
492 		*dsp = skl;
493 
494 	return ret;
495 }
496 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
497 
498 int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
499 {
500 	int ret;
501 	struct sst_dsp *sst = ctx->dsp;
502 
503 	ret = sst->fw_ops.load_fw(sst);
504 	if (ret < 0) {
505 		dev_err(dev, "Load base fw failed : %d\n", ret);
506 		return ret;
507 	}
508 
509 	skl_dsp_init_core_state(sst);
510 	ctx->is_first_boot = false;
511 
512 	return 0;
513 }
514 EXPORT_SYMBOL_GPL(skl_sst_init_fw);
515 
516 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
517 {
518 	skl_clear_module_table(ctx->dsp);
519 	skl_freeup_uuid_list(ctx);
520 	skl_ipc_free(&ctx->ipc);
521 	ctx->dsp->ops->free(ctx->dsp);
522 	if (ctx->boot_complete) {
523 		ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
524 		skl_cldma_int_disable(ctx->dsp);
525 	}
526 }
527 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
528 
529 MODULE_LICENSE("GPL v2");
530 MODULE_DESCRIPTION("Intel Skylake IPC driver");
531