1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2022 Intel Corporation. All rights reserved.
4
5 #include <linux/debugfs.h>
6 #include <linux/sched/signal.h>
7 #include <sound/sof/ipc4/header.h>
8 #include "sof-priv.h"
9 #include "ipc4-priv.h"
10
11 /*
12 * debug info window is organized in 16 (equal sized) pages:
13 *
14 * ------------------------
15 * | Page0 - descriptors |
16 * ------------------------
17 * | Page1 - slot0 |
18 * ------------------------
19 * | Page2 - slot1 |
20 * ------------------------
21 * | ... |
22 * ------------------------
23 * | Page14 - slot13 |
24 * ------------------------
25 * | Page15 - slot14 |
26 * ------------------------
27 *
28 * The slot size == page size
29 *
30 * The first page contains descriptors for the remaining 15 cores
31 * The slot descriptor is:
32 * u32 res_id;
33 * u32 type;
34 * u32 vma;
35 *
36 * Log buffer slots have the following layout:
37 * u32 host_read_ptr;
38 * u32 dsp_write_ptr;
39 * u8 buffer[];
40 *
41 * The two pointers are offsets within the buffer.
42 */
43
44 #define SOF_MTRACE_DESCRIPTOR_SIZE 12 /* 3 x u32 */
45
46 #define FW_EPOCH_DELTA 11644473600LL
47
48 #define INVALID_SLOT_OFFSET 0xffffffff
49 #define MAX_ALLOWED_LIBRARIES 16
50 #define MAX_MTRACE_SLOTS 15
51
52 #define SOF_MTRACE_PAGE_SIZE 0x1000
53 #define SOF_MTRACE_SLOT_SIZE SOF_MTRACE_PAGE_SIZE
54
55 /* debug log slot types */
56 #define SOF_MTRACE_SLOT_UNUSED 0x00000000
57 #define SOF_MTRACE_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */
58 #define SOF_MTRACE_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */
59 #define SOF_MTRACE_SLOT_GDB_STUB 0x42444700
60 #define SOF_MTRACE_SLOT_TELEMETRY 0x4c455400
61 #define SOF_MTRACE_SLOT_BROKEN 0x44414544
62 /* for debug and critical types */
63 #define SOF_MTRACE_SLOT_CORE_MASK GENMASK(7, 0)
64 #define SOF_MTRACE_SLOT_TYPE_MASK GENMASK(31, 8)
65
66 #define DEFAULT_AGING_TIMER_PERIOD_MS 0x100
67 #define DEFAULT_FIFO_FULL_TIMER_PERIOD_MS 0x1000
68
69 /* ipc4 log level and source definitions for logs_priorities_mask */
70 #define SOF_MTRACE_LOG_LEVEL_CRITICAL BIT(0)
71 #define SOF_MTRACE_LOG_LEVEL_ERROR BIT(1)
72 #define SOF_MTRACE_LOG_LEVEL_WARNING BIT(2)
73 #define SOF_MTRACE_LOG_LEVEL_INFO BIT(3)
74 #define SOF_MTRACE_LOG_LEVEL_VERBOSE BIT(4)
75 #define SOF_MTRACE_LOG_SOURCE_INFRA BIT(5) /* log source 0 */
76 #define SOF_MTRACE_LOG_SOURCE_HAL BIT(6)
77 #define SOF_MTRACE_LOG_SOURCE_MODULE BIT(7)
78 #define SOF_MTRACE_LOG_SOURCE_AUDIO BIT(8)
79 #define SOF_MTRACE_LOG_SOURCE_SCHEDULER BIT(9)
80 #define SOF_MTRACE_LOG_SOURCE_ULP_INFRA BIT(10)
81 #define SOF_MTRACE_LOG_SOURCE_ULP_MODULE BIT(11)
82 #define SOF_MTRACE_LOG_SOURCE_VISION BIT(12) /* log source 7 */
83 #define DEFAULT_LOGS_PRIORITIES_MASK (SOF_MTRACE_LOG_LEVEL_CRITICAL | \
84 SOF_MTRACE_LOG_LEVEL_ERROR | \
85 SOF_MTRACE_LOG_LEVEL_WARNING | \
86 SOF_MTRACE_LOG_LEVEL_INFO | \
87 SOF_MTRACE_LOG_SOURCE_INFRA | \
88 SOF_MTRACE_LOG_SOURCE_HAL | \
89 SOF_MTRACE_LOG_SOURCE_MODULE | \
90 SOF_MTRACE_LOG_SOURCE_AUDIO)
91
92 struct sof_log_state_info {
93 u32 aging_timer_period;
94 u32 fifo_full_timer_period;
95 u32 enable;
96 u32 logs_priorities_mask[MAX_ALLOWED_LIBRARIES];
97 } __packed;
98
99 enum sof_mtrace_state {
100 SOF_MTRACE_DISABLED,
101 SOF_MTRACE_INITIALIZING,
102 SOF_MTRACE_ENABLED,
103 };
104
105 struct sof_mtrace_core_data {
106 struct snd_sof_dev *sdev;
107
108 int id;
109 u32 slot_offset;
110 void *log_buffer;
111 struct mutex buffer_lock; /* for log_buffer alloc/free */
112 u32 host_read_ptr;
113 u32 dsp_write_ptr;
114 /* pos update IPC arrived before the slot offset is known, queried */
115 bool delayed_pos_update;
116 wait_queue_head_t trace_sleep;
117 };
118
119 struct sof_mtrace_priv {
120 struct snd_sof_dev *sdev;
121 enum sof_mtrace_state mtrace_state;
122 struct sof_log_state_info state_info;
123
124 struct sof_mtrace_core_data cores[];
125 };
126
sof_ipc4_mtrace_dfs_open(struct inode * inode,struct file * file)127 static int sof_ipc4_mtrace_dfs_open(struct inode *inode, struct file *file)
128 {
129 struct sof_mtrace_core_data *core_data = inode->i_private;
130 int ret;
131
132 mutex_lock(&core_data->buffer_lock);
133
134 if (core_data->log_buffer) {
135 ret = -EBUSY;
136 goto out;
137 }
138
139 ret = debugfs_file_get(file->f_path.dentry);
140 if (unlikely(ret))
141 goto out;
142
143 core_data->log_buffer = kmalloc(SOF_MTRACE_SLOT_SIZE, GFP_KERNEL);
144 if (!core_data->log_buffer) {
145 debugfs_file_put(file->f_path.dentry);
146 ret = -ENOMEM;
147 goto out;
148 }
149
150 ret = simple_open(inode, file);
151 if (ret) {
152 kfree(core_data->log_buffer);
153 debugfs_file_put(file->f_path.dentry);
154 }
155
156 out:
157 mutex_unlock(&core_data->buffer_lock);
158
159 return ret;
160 }
161
sof_wait_mtrace_avail(struct sof_mtrace_core_data * core_data)162 static bool sof_wait_mtrace_avail(struct sof_mtrace_core_data *core_data)
163 {
164 wait_queue_entry_t wait;
165
166 /* data immediately available */
167 if (core_data->host_read_ptr != core_data->dsp_write_ptr)
168 return true;
169
170 /* wait for available trace data from FW */
171 init_waitqueue_entry(&wait, current);
172 set_current_state(TASK_INTERRUPTIBLE);
173 add_wait_queue(&core_data->trace_sleep, &wait);
174
175 if (!signal_pending(current)) {
176 /* set timeout to max value, no error code */
177 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
178 }
179 remove_wait_queue(&core_data->trace_sleep, &wait);
180
181 if (core_data->host_read_ptr != core_data->dsp_write_ptr)
182 return true;
183
184 return false;
185 }
186
sof_ipc4_mtrace_dfs_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)187 static ssize_t sof_ipc4_mtrace_dfs_read(struct file *file, char __user *buffer,
188 size_t count, loff_t *ppos)
189 {
190 struct sof_mtrace_core_data *core_data = file->private_data;
191 u32 log_buffer_offset, log_buffer_size, read_ptr, write_ptr;
192 struct snd_sof_dev *sdev = core_data->sdev;
193 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
194 void *log_buffer = core_data->log_buffer;
195 loff_t lpos = *ppos;
196 u32 avail;
197 int ret;
198
199 /* check pos and count */
200 if (lpos < 0)
201 return -EINVAL;
202 if (!count || count < sizeof(avail))
203 return 0;
204
205 /* get available count based on current host offset */
206 if (!sof_wait_mtrace_avail(core_data)) {
207 /* No data available */
208 avail = 0;
209 if (copy_to_user(buffer, &avail, sizeof(avail)))
210 return -EFAULT;
211
212 return 0;
213 }
214
215 if (core_data->slot_offset == INVALID_SLOT_OFFSET)
216 return 0;
217
218 /* The log data buffer starts after the two pointer in the slot */
219 log_buffer_offset = core_data->slot_offset + (sizeof(u32) * 2);
220 /* The log data size excludes the pointers */
221 log_buffer_size = SOF_MTRACE_SLOT_SIZE - (sizeof(u32) * 2);
222
223 read_ptr = core_data->host_read_ptr;
224 write_ptr = core_data->dsp_write_ptr;
225
226 if (read_ptr < write_ptr)
227 avail = write_ptr - read_ptr;
228 else
229 avail = log_buffer_size - read_ptr + write_ptr;
230
231 if (!avail)
232 return 0;
233
234 if (avail > log_buffer_size)
235 avail = log_buffer_size;
236
237 /* Need space for the initial u32 of the avail */
238 if (avail > count - sizeof(avail))
239 avail = count - sizeof(avail);
240
241 if (sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
242 dev_dbg(sdev->dev,
243 "core%d, host read: %#x, dsp write: %#x, avail: %#x\n",
244 core_data->id, read_ptr, write_ptr, avail);
245
246 if (read_ptr < write_ptr) {
247 /* Read data between read pointer and write pointer */
248 sof_mailbox_read(sdev, log_buffer_offset + read_ptr, log_buffer, avail);
249 } else {
250 /* read from read pointer to end of the slot */
251 sof_mailbox_read(sdev, log_buffer_offset + read_ptr, log_buffer,
252 avail - write_ptr);
253 /* read from slot start to write pointer */
254 if (write_ptr)
255 sof_mailbox_read(sdev, log_buffer_offset,
256 (u8 *)(log_buffer) + avail - write_ptr,
257 write_ptr);
258 }
259
260 /* first write the number of bytes we have gathered */
261 ret = copy_to_user(buffer, &avail, sizeof(avail));
262 if (ret)
263 return -EFAULT;
264
265 /* Followed by the data itself */
266 ret = copy_to_user(buffer + sizeof(avail), log_buffer, avail);
267 if (ret)
268 return -EFAULT;
269
270 /* Update the host_read_ptr in the slot for this core */
271 read_ptr += avail;
272 if (read_ptr >= log_buffer_size)
273 read_ptr -= log_buffer_size;
274 sof_mailbox_write(sdev, core_data->slot_offset, &read_ptr, sizeof(read_ptr));
275
276 /* Only update the host_read_ptr if mtrace is enabled */
277 if (priv->mtrace_state != SOF_MTRACE_DISABLED)
278 core_data->host_read_ptr = read_ptr;
279
280 /*
281 * Ask for a new buffer from user space for the next chunk, not
282 * streaming due to the heading number of bytes value.
283 */
284 *ppos += count;
285
286 return count;
287 }
288
sof_ipc4_mtrace_dfs_release(struct inode * inode,struct file * file)289 static int sof_ipc4_mtrace_dfs_release(struct inode *inode, struct file *file)
290 {
291 struct sof_mtrace_core_data *core_data = inode->i_private;
292
293 debugfs_file_put(file->f_path.dentry);
294
295 mutex_lock(&core_data->buffer_lock);
296 kfree(core_data->log_buffer);
297 core_data->log_buffer = NULL;
298 mutex_unlock(&core_data->buffer_lock);
299
300 return 0;
301 }
302
303 static const struct file_operations sof_dfs_mtrace_fops = {
304 .open = sof_ipc4_mtrace_dfs_open,
305 .read = sof_ipc4_mtrace_dfs_read,
306 .llseek = default_llseek,
307 .release = sof_ipc4_mtrace_dfs_release,
308
309 .owner = THIS_MODULE,
310 };
311
sof_ipc4_priority_mask_dfs_read(struct file * file,char __user * to,size_t count,loff_t * ppos)312 static ssize_t sof_ipc4_priority_mask_dfs_read(struct file *file, char __user *to,
313 size_t count, loff_t *ppos)
314 {
315 struct sof_mtrace_priv *priv = file->private_data;
316 int i, ret, offset, remaining;
317 char *buf;
318
319 /*
320 * one entry (14 char + new line = 15):
321 * " 0: 000001ef"
322 *
323 * 16 * 15 + 1 = 241
324 */
325 buf = kzalloc(241, GFP_KERNEL);
326 if (!buf)
327 return -ENOMEM;
328
329 for (i = 0; i < MAX_ALLOWED_LIBRARIES; i++) {
330 offset = strlen(buf);
331 remaining = 241 - offset;
332 snprintf(buf + offset, remaining, "%2d: 0x%08x\n", i,
333 priv->state_info.logs_priorities_mask[i]);
334 }
335
336 ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
337
338 kfree(buf);
339 return ret;
340 }
341
sof_ipc4_priority_mask_dfs_write(struct file * file,const char __user * from,size_t count,loff_t * ppos)342 static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
343 const char __user *from,
344 size_t count, loff_t *ppos)
345 {
346 struct sof_mtrace_priv *priv = file->private_data;
347 unsigned int id;
348 char *buf;
349 u32 mask;
350 int ret;
351
352 /*
353 * To update Nth mask entry, write:
354 * "N,0x1234" or "N,1234" to the debugfs file
355 * The mask will be interpreted as hexadecimal number
356 */
357 buf = memdup_user_nul(from, count);
358 if (IS_ERR(buf))
359 return PTR_ERR(buf);
360
361 ret = sscanf(buf, "%u,0x%x", &id, &mask);
362 if (ret != 2) {
363 ret = sscanf(buf, "%u,%x", &id, &mask);
364 if (ret != 2) {
365 ret = -EINVAL;
366 goto out;
367 }
368 }
369
370 if (id >= MAX_ALLOWED_LIBRARIES) {
371 ret = -EINVAL;
372 goto out;
373 }
374
375 priv->state_info.logs_priorities_mask[id] = mask;
376 ret = count;
377
378 out:
379 kfree(buf);
380 return ret;
381 }
382
383 static const struct file_operations sof_dfs_priority_mask_fops = {
384 .open = simple_open,
385 .read = sof_ipc4_priority_mask_dfs_read,
386 .write = sof_ipc4_priority_mask_dfs_write,
387 .llseek = default_llseek,
388
389 .owner = THIS_MODULE,
390 };
391
mtrace_debugfs_create(struct snd_sof_dev * sdev)392 static int mtrace_debugfs_create(struct snd_sof_dev *sdev)
393 {
394 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
395 struct dentry *dfs_root;
396 char dfs_name[100];
397 int i;
398
399 dfs_root = debugfs_create_dir("mtrace", sdev->debugfs_root);
400 if (IS_ERR_OR_NULL(dfs_root))
401 return 0;
402
403 /* Create files for the logging parameters */
404 debugfs_create_u32("aging_timer_period", 0644, dfs_root,
405 &priv->state_info.aging_timer_period);
406 debugfs_create_u32("fifo_full_timer_period", 0644, dfs_root,
407 &priv->state_info.fifo_full_timer_period);
408 debugfs_create_file("logs_priorities_mask", 0644, dfs_root, priv,
409 &sof_dfs_priority_mask_fops);
410
411 /* Separate log files per core */
412 for (i = 0; i < sdev->num_cores; i++) {
413 snprintf(dfs_name, sizeof(dfs_name), "core%d", i);
414 debugfs_create_file(dfs_name, 0444, dfs_root, &priv->cores[i],
415 &sof_dfs_mtrace_fops);
416 }
417
418 return 0;
419 }
420
ipc4_mtrace_enable(struct snd_sof_dev * sdev)421 static int ipc4_mtrace_enable(struct snd_sof_dev *sdev)
422 {
423 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
424 const struct sof_ipc_ops *iops = sdev->ipc->ops;
425 struct sof_ipc4_msg msg;
426 u64 system_time;
427 ktime_t kt;
428 int ret;
429
430 if (priv->mtrace_state != SOF_MTRACE_DISABLED)
431 return 0;
432
433 msg.primary = SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
434 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
435 msg.primary |= SOF_IPC4_MOD_ID(SOF_IPC4_MOD_INIT_BASEFW_MOD_ID);
436 msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
437 msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_SYSTEM_TIME);
438
439 /* The system time is in usec, UTC, epoch is 1601-01-01 00:00:00 */
440 kt = ktime_add_us(ktime_get_real(), FW_EPOCH_DELTA * USEC_PER_SEC);
441 system_time = ktime_to_us(kt);
442 msg.data_size = sizeof(system_time);
443 msg.data_ptr = &system_time;
444 ret = iops->set_get_data(sdev, &msg, msg.data_size, true);
445 if (ret)
446 return ret;
447
448 msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_ENABLE_LOGS);
449
450 priv->state_info.enable = 1;
451
452 msg.data_size = sizeof(priv->state_info);
453 msg.data_ptr = &priv->state_info;
454
455 priv->mtrace_state = SOF_MTRACE_INITIALIZING;
456 ret = iops->set_get_data(sdev, &msg, msg.data_size, true);
457 if (ret) {
458 priv->mtrace_state = SOF_MTRACE_DISABLED;
459 return ret;
460 }
461
462 priv->mtrace_state = SOF_MTRACE_ENABLED;
463
464 return 0;
465 }
466
ipc4_mtrace_disable(struct snd_sof_dev * sdev)467 static void ipc4_mtrace_disable(struct snd_sof_dev *sdev)
468 {
469 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
470 const struct sof_ipc_ops *iops = sdev->ipc->ops;
471 struct sof_ipc4_msg msg;
472 int i;
473
474 if (priv->mtrace_state == SOF_MTRACE_DISABLED)
475 return;
476
477 msg.primary = SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
478 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
479 msg.primary |= SOF_IPC4_MOD_ID(SOF_IPC4_MOD_INIT_BASEFW_MOD_ID);
480 msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
481 msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_ENABLE_LOGS);
482
483 priv->state_info.enable = 0;
484
485 msg.data_size = sizeof(priv->state_info);
486 msg.data_ptr = &priv->state_info;
487 iops->set_get_data(sdev, &msg, msg.data_size, true);
488
489 priv->mtrace_state = SOF_MTRACE_DISABLED;
490
491 for (i = 0; i < sdev->num_cores; i++) {
492 struct sof_mtrace_core_data *core_data = &priv->cores[i];
493
494 core_data->host_read_ptr = 0;
495 core_data->dsp_write_ptr = 0;
496 wake_up(&core_data->trace_sleep);
497 }
498 }
499
500 /*
501 * Each DSP core logs to a dedicated slot.
502 * Parse the slot descriptors at debug_box offset to find the debug log slots
503 * and map them to cores.
504 * There are 15 slots and therefore 15 descriptors to check (MAX_MTRACE_SLOTS)
505 */
sof_mtrace_find_core_slots(struct snd_sof_dev * sdev)506 static void sof_mtrace_find_core_slots(struct snd_sof_dev *sdev)
507 {
508 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
509 struct sof_mtrace_core_data *core_data;
510 u32 slot_desc_type_offset, type, core;
511 int i;
512
513 for (i = 0; i < MAX_MTRACE_SLOTS; i++) {
514 /* The type is the second u32 in the slot descriptor */
515 slot_desc_type_offset = sdev->debug_box.offset;
516 slot_desc_type_offset += SOF_MTRACE_DESCRIPTOR_SIZE * i + sizeof(u32);
517 sof_mailbox_read(sdev, slot_desc_type_offset, &type, sizeof(type));
518
519 if ((type & SOF_MTRACE_SLOT_TYPE_MASK) == SOF_MTRACE_SLOT_DEBUG_LOG) {
520 core = type & SOF_MTRACE_SLOT_CORE_MASK;
521
522 if (core >= sdev->num_cores) {
523 dev_dbg(sdev->dev, "core%u is invalid for slot%d\n",
524 core, i);
525 continue;
526 }
527
528 core_data = &priv->cores[core];
529 /*
530 * The area reserved for descriptors have the same size
531 * as a slot.
532 * In other words: slot0 starts at
533 * debug_box + SOF_MTRACE_SLOT_SIZE offset
534 */
535 core_data->slot_offset = sdev->debug_box.offset;
536 core_data->slot_offset += SOF_MTRACE_SLOT_SIZE * (i + 1);
537 dev_dbg(sdev->dev, "slot%d is used for core%u\n", i, core);
538 if (core_data->delayed_pos_update) {
539 sof_ipc4_mtrace_update_pos(sdev, core);
540 core_data->delayed_pos_update = false;
541 }
542 } else if (type) {
543 dev_dbg(sdev->dev, "slot%d is not a log slot (%#x)\n", i, type);
544 }
545 }
546 }
547
ipc4_mtrace_init(struct snd_sof_dev * sdev)548 static int ipc4_mtrace_init(struct snd_sof_dev *sdev)
549 {
550 struct sof_ipc4_fw_data *ipc4_data = sdev->private;
551 struct sof_mtrace_priv *priv;
552 int i, ret;
553
554 if (sdev->fw_trace_data) {
555 dev_err(sdev->dev, "fw_trace_data has been already allocated\n");
556 return -EBUSY;
557 }
558
559 if (!ipc4_data->mtrace_log_bytes ||
560 ipc4_data->mtrace_type != SOF_IPC4_MTRACE_INTEL_CAVS_2) {
561 sdev->fw_trace_is_supported = false;
562 return 0;
563 }
564
565 priv = devm_kzalloc(sdev->dev, struct_size(priv, cores, sdev->num_cores),
566 GFP_KERNEL);
567 if (!priv)
568 return -ENOMEM;
569
570 sdev->fw_trace_data = priv;
571
572 /* Set initial values for mtrace parameters */
573 priv->state_info.aging_timer_period = DEFAULT_AGING_TIMER_PERIOD_MS;
574 priv->state_info.fifo_full_timer_period = DEFAULT_FIFO_FULL_TIMER_PERIOD_MS;
575 /* Only enable basefw logs initially (index 0 is always basefw) */
576 priv->state_info.logs_priorities_mask[0] = DEFAULT_LOGS_PRIORITIES_MASK;
577
578 for (i = 0; i < sdev->num_cores; i++) {
579 struct sof_mtrace_core_data *core_data = &priv->cores[i];
580
581 init_waitqueue_head(&core_data->trace_sleep);
582 mutex_init(&core_data->buffer_lock);
583 core_data->sdev = sdev;
584 core_data->id = i;
585 }
586
587 ret = ipc4_mtrace_enable(sdev);
588 if (ret) {
589 /*
590 * Mark firmware tracing as not supported and return 0 to not
591 * block the whole audio stack
592 */
593 sdev->fw_trace_is_supported = false;
594 dev_dbg(sdev->dev, "initialization failed, fw tracing is disabled\n");
595 return 0;
596 }
597
598 sof_mtrace_find_core_slots(sdev);
599
600 ret = mtrace_debugfs_create(sdev);
601 if (ret)
602 ipc4_mtrace_disable(sdev);
603
604 return ret;
605 }
606
ipc4_mtrace_free(struct snd_sof_dev * sdev)607 static void ipc4_mtrace_free(struct snd_sof_dev *sdev)
608 {
609 ipc4_mtrace_disable(sdev);
610 }
611
sof_ipc4_mtrace_update_pos_all_cores(struct snd_sof_dev * sdev)612 static int sof_ipc4_mtrace_update_pos_all_cores(struct snd_sof_dev *sdev)
613 {
614 int i;
615
616 for (i = 0; i < sdev->num_cores; i++)
617 sof_ipc4_mtrace_update_pos(sdev, i);
618
619 return 0;
620 }
621
sof_ipc4_mtrace_update_pos(struct snd_sof_dev * sdev,int core)622 int sof_ipc4_mtrace_update_pos(struct snd_sof_dev *sdev, int core)
623 {
624 struct sof_mtrace_priv *priv = sdev->fw_trace_data;
625 struct sof_mtrace_core_data *core_data;
626
627 if (!sdev->fw_trace_is_supported ||
628 priv->mtrace_state == SOF_MTRACE_DISABLED)
629 return 0;
630
631 if (core >= sdev->num_cores)
632 return -EINVAL;
633
634 core_data = &priv->cores[core];
635
636 if (core_data->slot_offset == INVALID_SLOT_OFFSET) {
637 core_data->delayed_pos_update = true;
638 return 0;
639 }
640
641 /* Read out the dsp_write_ptr from the slot for this core */
642 sof_mailbox_read(sdev, core_data->slot_offset + sizeof(u32),
643 &core_data->dsp_write_ptr, 4);
644 core_data->dsp_write_ptr -= core_data->dsp_write_ptr % 4;
645
646 if (sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
647 dev_dbg(sdev->dev, "core%d, host read: %#x, dsp write: %#x",
648 core, core_data->host_read_ptr, core_data->dsp_write_ptr);
649
650 wake_up(&core_data->trace_sleep);
651
652 return 0;
653 }
654
ipc4_mtrace_fw_crashed(struct snd_sof_dev * sdev)655 static void ipc4_mtrace_fw_crashed(struct snd_sof_dev *sdev)
656 {
657 /*
658 * The DSP might not be able to send SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS
659 * messages anymore, so check the log buffer status on all
660 * cores and process any pending messages.
661 */
662 sof_ipc4_mtrace_update_pos_all_cores(sdev);
663 }
664
ipc4_mtrace_resume(struct snd_sof_dev * sdev)665 static int ipc4_mtrace_resume(struct snd_sof_dev *sdev)
666 {
667 return ipc4_mtrace_enable(sdev);
668 }
669
ipc4_mtrace_suspend(struct snd_sof_dev * sdev,pm_message_t pm_state)670 static void ipc4_mtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
671 {
672 ipc4_mtrace_disable(sdev);
673 }
674
675 const struct sof_ipc_fw_tracing_ops ipc4_mtrace_ops = {
676 .init = ipc4_mtrace_init,
677 .free = ipc4_mtrace_free,
678 .fw_crashed = ipc4_mtrace_fw_crashed,
679 .suspend = ipc4_mtrace_suspend,
680 .resume = ipc4_mtrace_resume,
681 };
682