1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8
9 #include <linux/devcoredump.h>
10 #include <linux/slab.h>
11 #include <sound/hdaudio_ext.h>
12 #include "avs.h"
13 #include "messages.h"
14
15 static int __maybe_unused
skl_enable_logs(struct avs_dev * adev,enum avs_log_enable enable,u32 aging_period,u32 fifo_full_period,unsigned long resource_mask,u32 * priorities)16 skl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period,
17 u32 fifo_full_period, unsigned long resource_mask, u32 *priorities)
18 {
19 struct skl_log_state_info *info;
20 u32 size, num_cores = adev->hw_cfg.dsp_cores;
21 int ret, i;
22
23 if (fls_long(resource_mask) > num_cores)
24 return -EINVAL;
25 size = struct_size(info, logs_core, num_cores);
26 info = kzalloc(size, GFP_KERNEL);
27 if (!info)
28 return -ENOMEM;
29
30 info->core_mask = resource_mask;
31 if (enable)
32 for_each_set_bit(i, &resource_mask, num_cores) {
33 info->logs_core[i].enable = enable;
34 info->logs_core[i].min_priority = *priorities++;
35 }
36 else
37 for_each_set_bit(i, &resource_mask, num_cores)
38 info->logs_core[i].enable = enable;
39
40 ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size);
41 kfree(info);
42 if (ret)
43 return AVS_IPC_RET(ret);
44
45 return 0;
46 }
47
skl_log_buffer_offset(struct avs_dev * adev,u32 core)48 int skl_log_buffer_offset(struct avs_dev *adev, u32 core)
49 {
50 return core * avs_log_buffer_size(adev);
51 }
52
53 /* fw DbgLogWp registers */
54 #define FW_REGS_DBG_LOG_WP(core) (0x30 + 0x4 * core)
55
56 static int
skl_log_buffer_status(struct avs_dev * adev,union avs_notify_msg * msg)57 skl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg)
58 {
59 void __iomem *buf;
60 u16 size, write, offset;
61
62 if (!avs_logging_fw(adev))
63 return 0;
64
65 size = avs_log_buffer_size(adev) / 2;
66 write = readl(avs_sram_addr(adev, AVS_FW_REGS_WINDOW) + FW_REGS_DBG_LOG_WP(msg->log.core));
67 /* determine buffer half */
68 offset = (write < size) ? size : 0;
69
70 /* Address is guaranteed to exist in SRAM2. */
71 buf = avs_log_buffer_addr(adev, msg->log.core) + offset;
72 avs_dump_fw_log_wakeup(adev, buf, size);
73
74 return 0;
75 }
76
skl_coredump(struct avs_dev * adev,union avs_notify_msg * msg)77 static int skl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
78 {
79 u8 *dump;
80
81 dump = vzalloc(AVS_FW_REGS_SIZE);
82 if (!dump)
83 return -ENOMEM;
84
85 memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE);
86 dev_coredumpv(adev->dev, dump, AVS_FW_REGS_SIZE, GFP_KERNEL);
87
88 return 0;
89 }
90
91 static bool
skl_d0ix_toggle(struct avs_dev * adev,struct avs_ipc_msg * tx,bool wake)92 skl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake)
93 {
94 /* unsupported on cAVS 1.5 hw */
95 return false;
96 }
97
skl_set_d0ix(struct avs_dev * adev,bool enable)98 static int skl_set_d0ix(struct avs_dev *adev, bool enable)
99 {
100 /* unsupported on cAVS 1.5 hw */
101 return 0;
102 }
103
104 const struct avs_dsp_ops skl_dsp_ops = {
105 .power = avs_dsp_core_power,
106 .reset = avs_dsp_core_reset,
107 .stall = avs_dsp_core_stall,
108 .irq_handler = avs_dsp_irq_handler,
109 .irq_thread = avs_dsp_irq_thread,
110 .int_control = avs_dsp_interrupt_control,
111 .load_basefw = avs_cldma_load_basefw,
112 .load_lib = avs_cldma_load_library,
113 .transfer_mods = avs_cldma_transfer_modules,
114 .log_buffer_offset = skl_log_buffer_offset,
115 .log_buffer_status = skl_log_buffer_status,
116 .coredump = skl_coredump,
117 .d0ix_toggle = skl_d0ix_toggle,
118 .set_d0ix = skl_set_d0ix,
119 AVS_SET_ENABLE_LOGS_OP(skl)
120 };
121