xref: /openbmc/linux/sound/soc/sof/ops.h (revision 99a15348)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /*
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * Copyright(c) 2018 Intel Corporation. All rights reserved.
7  *
8  * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9  */
10 
11 #ifndef __SOUND_SOC_SOF_IO_H
12 #define __SOUND_SOC_SOF_IO_H
13 
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <sound/pcm.h>
19 #include "sof-priv.h"
20 
21 #define sof_ops(sdev) \
22 	((sdev)->pdata->desc->ops)
23 
24 /* Mandatory operations are verified during probing */
25 
26 /* init */
27 static inline int snd_sof_probe(struct snd_sof_dev *sdev)
28 {
29 	return sof_ops(sdev)->probe(sdev);
30 }
31 
32 static inline int snd_sof_remove(struct snd_sof_dev *sdev)
33 {
34 	if (sof_ops(sdev)->remove)
35 		return sof_ops(sdev)->remove(sdev);
36 
37 	return 0;
38 }
39 
40 static inline int snd_sof_shutdown(struct snd_sof_dev *sdev)
41 {
42 	if (sof_ops(sdev)->shutdown)
43 		return sof_ops(sdev)->shutdown(sdev);
44 
45 	return 0;
46 }
47 
48 /* control */
49 
50 /*
51  * snd_sof_dsp_run returns the core mask of the cores that are available
52  * after successful fw boot
53  */
54 static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
55 {
56 	return sof_ops(sdev)->run(sdev);
57 }
58 
59 static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
60 {
61 	if (sof_ops(sdev)->stall)
62 		return sof_ops(sdev)->stall(sdev, core_mask);
63 
64 	return 0;
65 }
66 
67 static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
68 {
69 	if (sof_ops(sdev)->reset)
70 		return sof_ops(sdev)->reset(sdev);
71 
72 	return 0;
73 }
74 
75 /* dsp core get/put */
76 static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core)
77 {
78 	if (core > sdev->num_cores - 1) {
79 		dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
80 			sdev->num_cores);
81 		return -EINVAL;
82 	}
83 
84 	if (sof_ops(sdev)->core_get) {
85 		int ret;
86 
87 		/* if current ref_count is > 0, increment it and return */
88 		if (sdev->dsp_core_ref_count[core] > 0) {
89 			sdev->dsp_core_ref_count[core]++;
90 			return 0;
91 		}
92 
93 		/* power up the core */
94 		ret = sof_ops(sdev)->core_get(sdev, core);
95 		if (ret < 0)
96 			return ret;
97 
98 		/* increment ref_count */
99 		sdev->dsp_core_ref_count[core]++;
100 
101 		/* and update enabled_cores_mask */
102 		sdev->enabled_cores_mask |= BIT(core);
103 
104 		dev_dbg(sdev->dev, "Core %d powered up\n", core);
105 	}
106 
107 	return 0;
108 }
109 
110 static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core)
111 {
112 	if (core > sdev->num_cores - 1) {
113 		dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
114 			sdev->num_cores);
115 		return -EINVAL;
116 	}
117 
118 	if (sof_ops(sdev)->core_put) {
119 		int ret;
120 
121 		/* decrement ref_count and return if it is > 0 */
122 		if (--(sdev->dsp_core_ref_count[core]) > 0)
123 			return 0;
124 
125 		/* power down the core */
126 		ret = sof_ops(sdev)->core_put(sdev, core);
127 		if (ret < 0)
128 			return ret;
129 
130 		/* and update enabled_cores_mask */
131 		sdev->enabled_cores_mask &= ~BIT(core);
132 
133 		dev_dbg(sdev->dev, "Core %d powered down\n", core);
134 	}
135 
136 	return 0;
137 }
138 
139 /* pre/post fw load */
140 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
141 {
142 	if (sof_ops(sdev)->pre_fw_run)
143 		return sof_ops(sdev)->pre_fw_run(sdev);
144 
145 	return 0;
146 }
147 
148 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
149 {
150 	if (sof_ops(sdev)->post_fw_run)
151 		return sof_ops(sdev)->post_fw_run(sdev);
152 
153 	return 0;
154 }
155 
156 /* parse platform specific extended manifest */
157 static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev,
158 							  const struct sof_ext_man_elem_header *hdr)
159 {
160 	if (sof_ops(sdev)->parse_platform_ext_manifest)
161 		return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr);
162 
163 	return 0;
164 }
165 
166 /* misc */
167 
168 /**
169  * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
170  *
171  * @sdev: sof device
172  * @type: section type as described by snd_sof_fw_blk_type
173  *
174  * Returns the corresponding BAR index (a positive integer) or -EINVAL
175  * in case there is no mapping
176  */
177 static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
178 {
179 	if (sof_ops(sdev)->get_bar_index)
180 		return sof_ops(sdev)->get_bar_index(sdev, type);
181 
182 	return sdev->mmio_bar;
183 }
184 
185 static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev)
186 {
187 	if (sof_ops(sdev)->get_mailbox_offset)
188 		return sof_ops(sdev)->get_mailbox_offset(sdev);
189 
190 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
191 	return -ENOTSUPP;
192 }
193 
194 static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev,
195 						u32 id)
196 {
197 	if (sof_ops(sdev)->get_window_offset)
198 		return sof_ops(sdev)->get_window_offset(sdev, id);
199 
200 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
201 	return -ENOTSUPP;
202 }
203 /* power management */
204 static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
205 {
206 	if (sof_ops(sdev)->resume)
207 		return sof_ops(sdev)->resume(sdev);
208 
209 	return 0;
210 }
211 
212 static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
213 				      u32 target_state)
214 {
215 	if (sof_ops(sdev)->suspend)
216 		return sof_ops(sdev)->suspend(sdev, target_state);
217 
218 	return 0;
219 }
220 
221 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
222 {
223 	if (sof_ops(sdev)->runtime_resume)
224 		return sof_ops(sdev)->runtime_resume(sdev);
225 
226 	return 0;
227 }
228 
229 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev)
230 {
231 	if (sof_ops(sdev)->runtime_suspend)
232 		return sof_ops(sdev)->runtime_suspend(sdev);
233 
234 	return 0;
235 }
236 
237 static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev)
238 {
239 	if (sof_ops(sdev)->runtime_idle)
240 		return sof_ops(sdev)->runtime_idle(sdev);
241 
242 	return 0;
243 }
244 
245 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
246 {
247 	if (sof_ops(sdev)->set_hw_params_upon_resume)
248 		return sof_ops(sdev)->set_hw_params_upon_resume(sdev);
249 	return 0;
250 }
251 
252 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
253 {
254 	if (sof_ops(sdev)->set_clk)
255 		return sof_ops(sdev)->set_clk(sdev, freq);
256 
257 	return 0;
258 }
259 
260 static inline int
261 snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
262 			    const struct sof_dsp_power_state *target_state)
263 {
264 	int ret = 0;
265 
266 	mutex_lock(&sdev->power_state_access);
267 
268 	if (sof_ops(sdev)->set_power_state)
269 		ret = sof_ops(sdev)->set_power_state(sdev, target_state);
270 
271 	mutex_unlock(&sdev->power_state_access);
272 
273 	return ret;
274 }
275 
276 /* debug */
277 void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags);
278 
279 static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev,
280 		enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
281 		const char *name, enum sof_debugfs_access_type access_type)
282 {
283 	if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item)
284 		return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset,
285 							      size, name, access_type);
286 
287 	return 0;
288 }
289 
290 /* register IO */
291 static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
292 				     u32 offset, u32 value)
293 {
294 	if (sof_ops(sdev)->write) {
295 		sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
296 		return;
297 	}
298 
299 	dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
300 }
301 
302 static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
303 				       u32 offset, u64 value)
304 {
305 	if (sof_ops(sdev)->write64) {
306 		sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
307 		return;
308 	}
309 
310 	dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
311 }
312 
313 static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
314 				   u32 offset)
315 {
316 	if (sof_ops(sdev)->read)
317 		return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
318 
319 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
320 	return -ENOTSUPP;
321 }
322 
323 static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
324 				     u32 offset)
325 {
326 	if (sof_ops(sdev)->read64)
327 		return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
328 
329 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
330 	return -ENOTSUPP;
331 }
332 
333 /* block IO */
334 static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev,
335 					 enum snd_sof_fw_blk_type blk_type,
336 					 u32 offset, void *dest, size_t bytes)
337 {
338 	return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes);
339 }
340 
341 static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev,
342 					  enum snd_sof_fw_blk_type blk_type,
343 					  u32 offset, void *src, size_t bytes)
344 {
345 	return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes);
346 }
347 
348 /* mailbox IO */
349 static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev,
350 					    u32 offset, void *dest, size_t bytes)
351 {
352 	if (sof_ops(sdev)->mailbox_read)
353 		sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes);
354 }
355 
356 static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev,
357 					     u32 offset, void *src, size_t bytes)
358 {
359 	if (sof_ops(sdev)->mailbox_write)
360 		sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes);
361 }
362 
363 /* ipc */
364 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
365 				       struct snd_sof_ipc_msg *msg)
366 {
367 	return sof_ops(sdev)->send_msg(sdev, msg);
368 }
369 
370 /* host DMA trace */
371 static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev,
372 					 struct sof_ipc_dma_trace_params_ext *dtrace_params)
373 {
374 	if (sof_ops(sdev)->trace_init)
375 		return sof_ops(sdev)->trace_init(sdev, dtrace_params);
376 
377 	return 0;
378 }
379 
380 static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev)
381 {
382 	if (sof_ops(sdev)->trace_release)
383 		return sof_ops(sdev)->trace_release(sdev);
384 
385 	return 0;
386 }
387 
388 static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd)
389 {
390 	if (sof_ops(sdev)->trace_trigger)
391 		return sof_ops(sdev)->trace_trigger(sdev, cmd);
392 
393 	return 0;
394 }
395 
396 /* host PCM ops */
397 static inline int
398 snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
399 			  struct snd_pcm_substream *substream)
400 {
401 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
402 		return sof_ops(sdev)->pcm_open(sdev, substream);
403 
404 	return 0;
405 }
406 
407 /* disconnect pcm substream to a host stream */
408 static inline int
409 snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
410 			   struct snd_pcm_substream *substream)
411 {
412 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
413 		return sof_ops(sdev)->pcm_close(sdev, substream);
414 
415 	return 0;
416 }
417 
418 /* host stream hw params */
419 static inline int
420 snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
421 			       struct snd_pcm_substream *substream,
422 			       struct snd_pcm_hw_params *params,
423 			       struct snd_sof_platform_stream_params *platform_params)
424 {
425 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
426 		return sof_ops(sdev)->pcm_hw_params(sdev, substream, params,
427 						    platform_params);
428 
429 	return 0;
430 }
431 
432 /* host stream hw free */
433 static inline int
434 snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev,
435 			     struct snd_pcm_substream *substream)
436 {
437 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free)
438 		return sof_ops(sdev)->pcm_hw_free(sdev, substream);
439 
440 	return 0;
441 }
442 
443 /* host stream trigger */
444 static inline int
445 snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
446 			     struct snd_pcm_substream *substream, int cmd)
447 {
448 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
449 		return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
450 
451 	return 0;
452 }
453 
454 /* Firmware loading */
455 static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev)
456 {
457 	dev_dbg(sdev->dev, "loading firmware\n");
458 
459 	return sof_ops(sdev)->load_firmware(sdev);
460 }
461 
462 /* host DSP message data */
463 static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
464 				       struct snd_pcm_substream *substream,
465 				       void *p, size_t sz)
466 {
467 	return sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
468 }
469 /* host side configuration of the stream's data offset in stream mailbox area */
470 static inline int
471 snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev,
472 			       struct snd_pcm_substream *substream,
473 			       size_t posn_offset)
474 {
475 	if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset)
476 		return sof_ops(sdev)->set_stream_data_offset(sdev, substream,
477 							     posn_offset);
478 
479 	return 0;
480 }
481 
482 /* host stream pointer */
483 static inline snd_pcm_uframes_t
484 snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
485 			     struct snd_pcm_substream *substream)
486 {
487 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
488 		return sof_ops(sdev)->pcm_pointer(sdev, substream);
489 
490 	return 0;
491 }
492 
493 /* pcm ack */
494 static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev,
495 					   struct snd_pcm_substream *substream)
496 {
497 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack)
498 		return sof_ops(sdev)->pcm_ack(sdev, substream);
499 
500 	return 0;
501 }
502 
503 /* machine driver */
504 static inline int
505 snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
506 {
507 	if (sof_ops(sdev) && sof_ops(sdev)->machine_register)
508 		return sof_ops(sdev)->machine_register(sdev, pdata);
509 
510 	return 0;
511 }
512 
513 static inline void
514 snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
515 {
516 	if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister)
517 		sof_ops(sdev)->machine_unregister(sdev, pdata);
518 }
519 
520 static inline struct snd_soc_acpi_mach *
521 snd_sof_machine_select(struct snd_sof_dev *sdev)
522 {
523 	if (sof_ops(sdev) && sof_ops(sdev)->machine_select)
524 		return sof_ops(sdev)->machine_select(sdev);
525 
526 	return NULL;
527 }
528 
529 static inline void
530 snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach,
531 			struct snd_sof_dev *sdev)
532 {
533 	if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params)
534 		sof_ops(sdev)->set_mach_params(mach, sdev);
535 }
536 
537 /**
538  * snd_sof_dsp_register_poll_timeout - Periodically poll an address
539  * until a condition is met or a timeout occurs
540  * @op: accessor function (takes @addr as its only argument)
541  * @addr: Address to poll
542  * @val: Variable to read the value into
543  * @cond: Break condition (usually involving @val)
544  * @sleep_us: Maximum time to sleep between reads in us (0
545  *            tight-loops).  Should be less than ~20ms since usleep_range
546  *            is used (see Documentation/timers/timers-howto.rst).
547  * @timeout_us: Timeout in us, 0 means never timeout
548  *
549  * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
550  * case, the last read value at @addr is stored in @val. Must not
551  * be called from atomic context if sleep_us or timeout_us are used.
552  *
553  * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
554  */
555 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
556 ({ \
557 	u64 __timeout_us = (timeout_us); \
558 	unsigned long __sleep_us = (sleep_us); \
559 	ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
560 	might_sleep_if((__sleep_us) != 0); \
561 	for (;;) {							\
562 		(val) = snd_sof_dsp_read(sdev, bar, offset);		\
563 		if (cond) { \
564 			dev_dbg(sdev->dev, \
565 				"FW Poll Status: reg[%#x]=%#x successful\n", \
566 				(offset), (val)); \
567 			break; \
568 		} \
569 		if (__timeout_us && \
570 		    ktime_compare(ktime_get(), __timeout) > 0) { \
571 			(val) = snd_sof_dsp_read(sdev, bar, offset); \
572 			dev_dbg(sdev->dev, \
573 				"FW Poll Status: reg[%#x]=%#x timedout\n", \
574 				(offset), (val)); \
575 			break; \
576 		} \
577 		if (__sleep_us) \
578 			usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
579 	} \
580 	(cond) ? 0 : -ETIMEDOUT; \
581 })
582 
583 /* This is for registers bits with attribute RWC */
584 bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
585 			     u32 mask, u32 value);
586 
587 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
588 				      u32 offset, u32 mask, u32 value);
589 
590 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
591 					u32 offset, u64 mask, u64 value);
592 
593 bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
594 			     u32 mask, u32 value);
595 
596 bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
597 			       u32 offset, u64 mask, u64 value);
598 
599 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
600 				    u32 offset, u32 mask, u32 value);
601 
602 int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
603 			      u32 mask, u32 target, u32 timeout_ms,
604 			      u32 interval_us);
605 
606 void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable);
607 #endif
608