xref: /openbmc/linux/sound/soc/sof/trace.c (revision 5e012745)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //
10 
11 #include <linux/debugfs.h>
12 #include <linux/sched/signal.h>
13 #include "sof-priv.h"
14 #include "ops.h"
15 
16 static size_t sof_trace_avail(struct snd_sof_dev *sdev,
17 			      loff_t pos, size_t buffer_size)
18 {
19 	loff_t host_offset = READ_ONCE(sdev->host_offset);
20 
21 	/*
22 	 * If host offset is less than local pos, it means write pointer of
23 	 * host DMA buffer has been wrapped. We should output the trace data
24 	 * at the end of host DMA buffer at first.
25 	 */
26 	if (host_offset < pos)
27 		return buffer_size - pos;
28 
29 	/* If there is available trace data now, it is unnecessary to wait. */
30 	if (host_offset > pos)
31 		return host_offset - pos;
32 
33 	return 0;
34 }
35 
36 static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
37 				   loff_t pos, size_t buffer_size)
38 {
39 	wait_queue_entry_t wait;
40 	size_t ret = sof_trace_avail(sdev, pos, buffer_size);
41 
42 	/* data immediately available */
43 	if (ret)
44 		return ret;
45 
46 	if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
47 		/*
48 		 * tracing has ended and all traces have been
49 		 * read by client, return EOF
50 		 */
51 		sdev->dtrace_draining = false;
52 		return 0;
53 	}
54 
55 	/* wait for available trace data from FW */
56 	init_waitqueue_entry(&wait, current);
57 	set_current_state(TASK_INTERRUPTIBLE);
58 	add_wait_queue(&sdev->trace_sleep, &wait);
59 
60 	if (!signal_pending(current)) {
61 		/* set timeout to max value, no error code */
62 		schedule_timeout(MAX_SCHEDULE_TIMEOUT);
63 	}
64 	remove_wait_queue(&sdev->trace_sleep, &wait);
65 
66 	return sof_trace_avail(sdev, pos, buffer_size);
67 }
68 
69 static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
70 				       size_t count, loff_t *ppos)
71 {
72 	struct snd_sof_dfsentry *dfse = file->private_data;
73 	struct snd_sof_dev *sdev = dfse->sdev;
74 	unsigned long rem;
75 	loff_t lpos = *ppos;
76 	size_t avail, buffer_size = dfse->size;
77 	u64 lpos_64;
78 
79 	/* make sure we know about any failures on the DSP side */
80 	sdev->dtrace_error = false;
81 
82 	/* check pos and count */
83 	if (lpos < 0)
84 		return -EINVAL;
85 	if (!count)
86 		return 0;
87 
88 	/* check for buffer wrap and count overflow */
89 	lpos_64 = lpos;
90 	lpos = do_div(lpos_64, buffer_size);
91 
92 	if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
93 		count = buffer_size - lpos;
94 
95 	/* get available count based on current host offset */
96 	avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
97 	if (sdev->dtrace_error) {
98 		dev_err(sdev->dev, "error: trace IO error\n");
99 		return -EIO;
100 	}
101 
102 	/* make sure count is <= avail */
103 	count = avail > count ? count : avail;
104 
105 	/* copy available trace data to debugfs */
106 	rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
107 	if (rem)
108 		return -EFAULT;
109 
110 	*ppos += count;
111 
112 	/* move debugfs reading position */
113 	return count;
114 }
115 
116 static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
117 {
118 	struct snd_sof_dfsentry *dfse = inode->i_private;
119 	struct snd_sof_dev *sdev = dfse->sdev;
120 
121 	/* avoid duplicate traces at next open */
122 	if (!sdev->dtrace_is_enabled)
123 		sdev->host_offset = 0;
124 
125 	return 0;
126 }
127 
128 static const struct file_operations sof_dfs_trace_fops = {
129 	.open = simple_open,
130 	.read = sof_dfsentry_trace_read,
131 	.llseek = default_llseek,
132 	.release = sof_dfsentry_trace_release,
133 };
134 
135 static int trace_debugfs_create(struct snd_sof_dev *sdev)
136 {
137 	struct snd_sof_dfsentry *dfse;
138 
139 	if (!sdev)
140 		return -EINVAL;
141 
142 	dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
143 	if (!dfse)
144 		return -ENOMEM;
145 
146 	dfse->type = SOF_DFSENTRY_TYPE_BUF;
147 	dfse->buf = sdev->dmatb.area;
148 	dfse->size = sdev->dmatb.bytes;
149 	dfse->sdev = sdev;
150 
151 	dfse->dfsentry = debugfs_create_file("trace", 0444, sdev->debugfs_root,
152 					     dfse, &sof_dfs_trace_fops);
153 	if (!dfse->dfsentry) {
154 		/* can't rely on debugfs, only log error and keep going */
155 		dev_err(sdev->dev,
156 			"error: cannot create debugfs entry for trace\n");
157 	}
158 
159 	return 0;
160 }
161 
162 int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
163 {
164 	struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
165 	struct sof_ipc_fw_version *v = &ready->version;
166 	struct sof_ipc_dma_trace_params_ext params;
167 	struct sof_ipc_reply ipc_reply;
168 	int ret;
169 
170 	if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
171 		return -EINVAL;
172 
173 	/* set IPC parameters */
174 	params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
175 	/* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
176 	if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
177 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
178 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
179 		params.timestamp_ns = ktime_get(); /* in nanosecond */
180 	} else {
181 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
182 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
183 	}
184 	params.buffer.phy_addr = sdev->dmatp.addr;
185 	params.buffer.size = sdev->dmatb.bytes;
186 	params.buffer.pages = sdev->dma_trace_pages;
187 	params.stream_tag = 0;
188 
189 	sdev->host_offset = 0;
190 	sdev->dtrace_draining = false;
191 
192 	ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
193 	if (ret < 0) {
194 		dev_err(sdev->dev,
195 			"error: fail in snd_sof_dma_trace_init %d\n", ret);
196 		return ret;
197 	}
198 	dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
199 
200 	/* send IPC to the DSP */
201 	ret = sof_ipc_tx_message(sdev->ipc,
202 				 params.hdr.cmd, &params, sizeof(params),
203 				 &ipc_reply, sizeof(ipc_reply));
204 	if (ret < 0) {
205 		dev_err(sdev->dev,
206 			"error: can't set params for DMA for trace %d\n", ret);
207 		goto trace_release;
208 	}
209 
210 	ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
211 	if (ret < 0) {
212 		dev_err(sdev->dev,
213 			"error: snd_sof_dma_trace_trigger: start: %d\n", ret);
214 		goto trace_release;
215 	}
216 
217 	sdev->dtrace_is_enabled = true;
218 
219 	return 0;
220 
221 trace_release:
222 	snd_sof_dma_trace_release(sdev);
223 	return ret;
224 }
225 
226 int snd_sof_init_trace(struct snd_sof_dev *sdev)
227 {
228 	int ret;
229 
230 	/* set false before start initialization */
231 	sdev->dtrace_is_enabled = false;
232 
233 	/* allocate trace page table buffer */
234 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
235 				  PAGE_SIZE, &sdev->dmatp);
236 	if (ret < 0) {
237 		dev_err(sdev->dev,
238 			"error: can't alloc page table for trace %d\n", ret);
239 		return ret;
240 	}
241 
242 	/* allocate trace data buffer */
243 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
244 				  DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
245 	if (ret < 0) {
246 		dev_err(sdev->dev,
247 			"error: can't alloc buffer for trace %d\n", ret);
248 		goto page_err;
249 	}
250 
251 	/* create compressed page table for audio firmware */
252 	ret = snd_sof_create_page_table(sdev, &sdev->dmatb, sdev->dmatp.area,
253 					sdev->dmatb.bytes);
254 	if (ret < 0)
255 		goto table_err;
256 
257 	sdev->dma_trace_pages = ret;
258 	dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
259 
260 	if (sdev->first_boot) {
261 		ret = trace_debugfs_create(sdev);
262 		if (ret < 0)
263 			goto table_err;
264 	}
265 
266 	init_waitqueue_head(&sdev->trace_sleep);
267 
268 	ret = snd_sof_init_trace_ipc(sdev);
269 	if (ret < 0)
270 		goto table_err;
271 
272 	return 0;
273 table_err:
274 	sdev->dma_trace_pages = 0;
275 	snd_dma_free_pages(&sdev->dmatb);
276 page_err:
277 	snd_dma_free_pages(&sdev->dmatp);
278 	return ret;
279 }
280 EXPORT_SYMBOL(snd_sof_init_trace);
281 
282 int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
283 			     struct sof_ipc_dma_trace_posn *posn)
284 {
285 	if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
286 		sdev->host_offset = posn->host_offset;
287 		wake_up(&sdev->trace_sleep);
288 	}
289 
290 	if (posn->overflow != 0)
291 		dev_err(sdev->dev,
292 			"error: DSP trace buffer overflow %u bytes. Total messages %d\n",
293 			posn->overflow, posn->messages);
294 
295 	return 0;
296 }
297 
298 /* an error has occurred within the DSP that prevents further trace */
299 void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
300 {
301 	if (sdev->dtrace_is_enabled) {
302 		dev_err(sdev->dev, "error: waking up any trace sleepers\n");
303 		sdev->dtrace_error = true;
304 		wake_up(&sdev->trace_sleep);
305 	}
306 }
307 EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
308 
309 void snd_sof_release_trace(struct snd_sof_dev *sdev)
310 {
311 	int ret;
312 
313 	if (!sdev->dtrace_is_enabled)
314 		return;
315 
316 	ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
317 	if (ret < 0)
318 		dev_err(sdev->dev,
319 			"error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
320 
321 	ret = snd_sof_dma_trace_release(sdev);
322 	if (ret < 0)
323 		dev_err(sdev->dev,
324 			"error: fail in snd_sof_dma_trace_release %d\n", ret);
325 
326 	sdev->dtrace_is_enabled = false;
327 	sdev->dtrace_draining = true;
328 	wake_up(&sdev->trace_sleep);
329 }
330 EXPORT_SYMBOL(snd_sof_release_trace);
331 
332 void snd_sof_free_trace(struct snd_sof_dev *sdev)
333 {
334 	snd_sof_release_trace(sdev);
335 
336 	snd_dma_free_pages(&sdev->dmatb);
337 	snd_dma_free_pages(&sdev->dmatp);
338 }
339 EXPORT_SYMBOL(snd_sof_free_trace);
340