1 /*
2  * skl-sst-dsp.c - SKL SST library generic function
3  *
4  * Copyright (C) 2014-15, Intel Corporation.
5  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6  *	Jeeja KP <jeeja.kp@intel.com>
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 #include <sound/pcm.h>
19 
20 #include "../common/sst-dsp.h"
21 #include "../common/sst-ipc.h"
22 #include "../common/sst-dsp-priv.h"
23 #include "skl-sst-ipc.h"
24 
25 /* various timeout values */
26 #define SKL_DSP_PU_TO		50
27 #define SKL_DSP_PD_TO		50
28 #define SKL_DSP_RESET_TO	50
29 
30 void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
31 {
32 	mutex_lock(&ctx->mutex);
33 	ctx->sst_state = state;
34 	mutex_unlock(&ctx->mutex);
35 }
36 
37 /*
38  * Initialize core power state and usage count. To be called after
39  * successful first boot. Hence core 0 will be running and other cores
40  * will be reset
41  */
42 void skl_dsp_init_core_state(struct sst_dsp *ctx)
43 {
44 	struct skl_sst *skl = ctx->thread_context;
45 	int i;
46 
47 	skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
48 	skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
49 
50 	for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) {
51 		skl->cores.state[i] = SKL_DSP_RESET;
52 		skl->cores.usage_count[i] = 0;
53 	}
54 }
55 
56 /* Get the mask for all enabled cores */
57 unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
58 {
59 	struct skl_sst *skl = ctx->thread_context;
60 	unsigned int core_mask, en_cores_mask;
61 	u32 val;
62 
63 	core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
64 
65 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
66 
67 	/* Cores having CPA bit set */
68 	en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
69 			SKL_ADSPCS_CPA_SHIFT;
70 
71 	/* And cores having CRST bit cleared */
72 	en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
73 			SKL_ADSPCS_CRST_SHIFT;
74 
75 	/* And cores having CSTALL bit cleared */
76 	en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
77 			SKL_ADSPCS_CSTALL_SHIFT;
78 	en_cores_mask &= core_mask;
79 
80 	dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
81 
82 	return en_cores_mask;
83 }
84 
85 static int
86 skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
87 {
88 	int ret;
89 
90 	/* update bits */
91 	sst_dsp_shim_update_bits_unlocked(ctx,
92 			SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
93 			SKL_ADSPCS_CRST_MASK(core_mask));
94 
95 	/* poll with timeout to check if operation successful */
96 	ret = sst_dsp_register_poll(ctx,
97 			SKL_ADSP_REG_ADSPCS,
98 			SKL_ADSPCS_CRST_MASK(core_mask),
99 			SKL_ADSPCS_CRST_MASK(core_mask),
100 			SKL_DSP_RESET_TO,
101 			"Set reset");
102 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
103 				SKL_ADSPCS_CRST_MASK(core_mask)) !=
104 				SKL_ADSPCS_CRST_MASK(core_mask)) {
105 		dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
106 							core_mask);
107 		ret = -EIO;
108 	}
109 
110 	return ret;
111 }
112 
113 int skl_dsp_core_unset_reset_state(
114 		struct sst_dsp *ctx, unsigned int core_mask)
115 {
116 	int ret;
117 
118 	dev_dbg(ctx->dev, "In %s\n", __func__);
119 
120 	/* update bits */
121 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
122 				SKL_ADSPCS_CRST_MASK(core_mask), 0);
123 
124 	/* poll with timeout to check if operation successful */
125 	ret = sst_dsp_register_poll(ctx,
126 			SKL_ADSP_REG_ADSPCS,
127 			SKL_ADSPCS_CRST_MASK(core_mask),
128 			0,
129 			SKL_DSP_RESET_TO,
130 			"Unset reset");
131 
132 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
133 				SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
134 		dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
135 				core_mask);
136 		ret = -EIO;
137 	}
138 
139 	return ret;
140 }
141 
142 static bool
143 is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
144 {
145 	int val;
146 	bool is_enable;
147 
148 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
149 
150 	is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
151 			(val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
152 			!(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
153 			!(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
154 
155 	dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
156 						is_enable, core_mask);
157 
158 	return is_enable;
159 }
160 
161 static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
162 {
163 	/* stall core */
164 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
165 			SKL_ADSPCS_CSTALL_MASK(core_mask),
166 			SKL_ADSPCS_CSTALL_MASK(core_mask));
167 
168 	/* set reset state */
169 	return skl_dsp_core_set_reset_state(ctx, core_mask);
170 }
171 
172 int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
173 {
174 	int ret;
175 
176 	/* unset reset state */
177 	ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
178 	if (ret < 0)
179 		return ret;
180 
181 	/* run core */
182 	dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
183 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
184 			SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
185 
186 	if (!is_skl_dsp_core_enable(ctx, core_mask)) {
187 		skl_dsp_reset_core(ctx, core_mask);
188 		dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
189 							core_mask);
190 		ret = -EIO;
191 	}
192 
193 	return ret;
194 }
195 
196 int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
197 {
198 	int ret;
199 
200 	/* update bits */
201 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
202 			SKL_ADSPCS_SPA_MASK(core_mask),
203 			SKL_ADSPCS_SPA_MASK(core_mask));
204 
205 	/* poll with timeout to check if operation successful */
206 	ret = sst_dsp_register_poll(ctx,
207 			SKL_ADSP_REG_ADSPCS,
208 			SKL_ADSPCS_CPA_MASK(core_mask),
209 			SKL_ADSPCS_CPA_MASK(core_mask),
210 			SKL_DSP_PU_TO,
211 			"Power up");
212 
213 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
214 			SKL_ADSPCS_CPA_MASK(core_mask)) !=
215 			SKL_ADSPCS_CPA_MASK(core_mask)) {
216 		dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
217 				core_mask);
218 		ret = -EIO;
219 	}
220 
221 	return ret;
222 }
223 
224 int skl_dsp_core_power_down(struct sst_dsp  *ctx, unsigned int core_mask)
225 {
226 	/* update bits */
227 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
228 				SKL_ADSPCS_SPA_MASK(core_mask), 0);
229 
230 	/* poll with timeout to check if operation successful */
231 	return sst_dsp_register_poll(ctx,
232 			SKL_ADSP_REG_ADSPCS,
233 			SKL_ADSPCS_CPA_MASK(core_mask),
234 			0,
235 			SKL_DSP_PD_TO,
236 			"Power down");
237 }
238 
239 int skl_dsp_enable_core(struct sst_dsp  *ctx, unsigned int core_mask)
240 {
241 	int ret;
242 
243 	/* power up */
244 	ret = skl_dsp_core_power_up(ctx, core_mask);
245 	if (ret < 0) {
246 		dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
247 							core_mask);
248 		return ret;
249 	}
250 
251 	return skl_dsp_start_core(ctx, core_mask);
252 }
253 
254 int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
255 {
256 	int ret;
257 
258 	ret = skl_dsp_reset_core(ctx, core_mask);
259 	if (ret < 0) {
260 		dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
261 							core_mask);
262 		return ret;
263 	}
264 
265 	/* power down core*/
266 	ret = skl_dsp_core_power_down(ctx, core_mask);
267 	if (ret < 0) {
268 		dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
269 							core_mask, ret);
270 		return ret;
271 	}
272 
273 	if (is_skl_dsp_core_enable(ctx, core_mask)) {
274 		dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
275 							core_mask, ret);
276 		ret = -EIO;
277 	}
278 
279 	return ret;
280 }
281 
282 int skl_dsp_boot(struct sst_dsp *ctx)
283 {
284 	int ret;
285 
286 	if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
287 		ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
288 		if (ret < 0) {
289 			dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
290 			return ret;
291 		}
292 
293 		ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
294 		if (ret < 0) {
295 			dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
296 			return ret;
297 		}
298 	} else {
299 		ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
300 		if (ret < 0) {
301 			dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
302 			return ret;
303 		}
304 		ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
305 	}
306 
307 	return ret;
308 }
309 
310 irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
311 {
312 	struct sst_dsp *ctx = dev_id;
313 	u32 val;
314 	irqreturn_t result = IRQ_NONE;
315 
316 	spin_lock(&ctx->spinlock);
317 
318 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
319 	ctx->intr_status = val;
320 
321 	if (val == 0xffffffff) {
322 		spin_unlock(&ctx->spinlock);
323 		return IRQ_NONE;
324 	}
325 
326 	if (val & SKL_ADSPIS_IPC) {
327 		skl_ipc_int_disable(ctx);
328 		result = IRQ_WAKE_THREAD;
329 	}
330 
331 	if (val & SKL_ADSPIS_CL_DMA) {
332 		skl_cldma_int_disable(ctx);
333 		result = IRQ_WAKE_THREAD;
334 	}
335 
336 	spin_unlock(&ctx->spinlock);
337 
338 	return result;
339 }
340 /*
341  * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
342  * within the dapm mutex. Hence no separate lock is used.
343  */
344 int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
345 {
346 	struct skl_sst *skl = ctx->thread_context;
347 	int ret = 0;
348 
349 	if (core_id >= skl->cores.count) {
350 		dev_err(ctx->dev, "invalid core id: %d\n", core_id);
351 		return -EINVAL;
352 	}
353 
354 	skl->cores.usage_count[core_id]++;
355 
356 	if (skl->cores.state[core_id] == SKL_DSP_RESET) {
357 		ret = ctx->fw_ops.set_state_D0(ctx, core_id);
358 		if (ret < 0) {
359 			dev_err(ctx->dev, "unable to get core%d\n", core_id);
360 			goto out;
361 		}
362 	}
363 
364 out:
365 	dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
366 			core_id, skl->cores.state[core_id],
367 			skl->cores.usage_count[core_id]);
368 
369 	return ret;
370 }
371 EXPORT_SYMBOL_GPL(skl_dsp_get_core);
372 
373 int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
374 {
375 	struct skl_sst *skl = ctx->thread_context;
376 	int ret = 0;
377 
378 	if (core_id >= skl->cores.count) {
379 		dev_err(ctx->dev, "invalid core id: %d\n", core_id);
380 		return -EINVAL;
381 	}
382 
383 	if ((--skl->cores.usage_count[core_id] == 0) &&
384 		(skl->cores.state[core_id] != SKL_DSP_RESET)) {
385 		ret = ctx->fw_ops.set_state_D3(ctx, core_id);
386 		if (ret < 0) {
387 			dev_err(ctx->dev, "unable to put core %d: %d\n",
388 					core_id, ret);
389 			skl->cores.usage_count[core_id]++;
390 		}
391 	}
392 
393 	dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
394 			core_id, skl->cores.state[core_id],
395 			skl->cores.usage_count[core_id]);
396 
397 	return ret;
398 }
399 EXPORT_SYMBOL_GPL(skl_dsp_put_core);
400 
401 int skl_dsp_wake(struct sst_dsp *ctx)
402 {
403 	return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
404 }
405 EXPORT_SYMBOL_GPL(skl_dsp_wake);
406 
407 int skl_dsp_sleep(struct sst_dsp *ctx)
408 {
409 	return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
410 }
411 EXPORT_SYMBOL_GPL(skl_dsp_sleep);
412 
413 struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
414 		struct sst_dsp_device *sst_dev, int irq)
415 {
416 	int ret;
417 	struct sst_dsp *sst;
418 
419 	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
420 	if (sst == NULL)
421 		return NULL;
422 
423 	spin_lock_init(&sst->spinlock);
424 	mutex_init(&sst->mutex);
425 	sst->dev = dev;
426 	sst->sst_dev = sst_dev;
427 	sst->irq = irq;
428 	sst->ops = sst_dev->ops;
429 	sst->thread_context = sst_dev->thread_context;
430 
431 	/* Initialise SST Audio DSP */
432 	if (sst->ops->init) {
433 		ret = sst->ops->init(sst, NULL);
434 		if (ret < 0)
435 			return NULL;
436 	}
437 
438 	return sst;
439 }
440 
441 int skl_dsp_acquire_irq(struct sst_dsp *sst)
442 {
443 	struct sst_dsp_device *sst_dev = sst->sst_dev;
444 	int ret;
445 
446 	/* Register the ISR */
447 	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
448 		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
449 	if (ret)
450 		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
451 			       sst->irq);
452 
453 	return ret;
454 }
455 
456 void skl_dsp_free(struct sst_dsp *dsp)
457 {
458 	skl_ipc_int_disable(dsp);
459 
460 	free_irq(dsp->irq, dsp);
461 	skl_ipc_op_int_disable(dsp);
462 	skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
463 }
464 EXPORT_SYMBOL_GPL(skl_dsp_free);
465 
466 bool is_skl_dsp_running(struct sst_dsp *ctx)
467 {
468 	return (ctx->sst_state == SKL_DSP_RUNNING);
469 }
470 EXPORT_SYMBOL_GPL(is_skl_dsp_running);
471