xref: /openbmc/linux/sound/soc/sof/intel/icl.c (revision bc33f5e5)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // Copyright(c) 2020 Intel Corporation. All rights reserved.
4 //
5 // Author: Fred Oh <fred.oh@linux.intel.com>
6 //
7 
8 /*
9  * Hardware interface for audio DSP on IceLake.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/kconfig.h>
14 #include <linux/export.h>
15 #include <linux/bits.h>
16 #include "../ipc4-priv.h"
17 #include "../ops.h"
18 #include "hda.h"
19 #include "hda-ipc.h"
20 #include "../sof-audio.h"
21 
22 #define ICL_DSP_HPRO_CORE_ID 3
23 
24 static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
25 	{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
26 	{"pp", HDA_DSP_PP_BAR,  0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
27 	{"dsp", HDA_DSP_BAR,  0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
28 };
29 
30 static int icl_dsp_core_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
31 {
32 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
33 	const struct sof_intel_dsp_desc *chip = hda->desc;
34 
35 	/* make sure core_mask in host managed cores */
36 	core_mask &= chip->host_managed_cores_mask;
37 	if (!core_mask) {
38 		dev_err(sdev->dev, "error: core_mask is not in host managed cores\n");
39 		return -EINVAL;
40 	}
41 
42 	/* stall core */
43 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
44 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
45 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
46 
47 	return 0;
48 }
49 
50 /*
51  * post fw run operation for ICL.
52  * Core 3 will be powered up and in stall when HPRO is enabled
53  */
54 static int icl_dsp_post_fw_run(struct snd_sof_dev *sdev)
55 {
56 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
57 	int ret;
58 
59 	if (sdev->first_boot) {
60 		struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
61 
62 		ret = hda_sdw_startup(sdev);
63 		if (ret < 0) {
64 			dev_err(sdev->dev, "error: could not startup SoundWire links\n");
65 			return ret;
66 		}
67 
68 		/* Check if IMR boot is usable */
69 		if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
70 		    sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT)
71 			hdev->imrboot_supported = true;
72 	}
73 
74 	hda_sdw_int_enable(sdev, true);
75 
76 	/*
77 	 * The recommended HW programming sequence for ICL is to
78 	 * power up core 3 and keep it in stall if HPRO is enabled.
79 	 */
80 	if (!hda->clk_config_lpro) {
81 		ret = hda_dsp_enable_core(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
82 		if (ret < 0) {
83 			dev_err(sdev->dev, "error: dsp core power up failed on core %d\n",
84 				ICL_DSP_HPRO_CORE_ID);
85 			return ret;
86 		}
87 
88 		sdev->enabled_cores_mask |= BIT(ICL_DSP_HPRO_CORE_ID);
89 		sdev->dsp_core_ref_count[ICL_DSP_HPRO_CORE_ID]++;
90 
91 		snd_sof_dsp_stall(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
92 	}
93 
94 	/* re-enable clock gating and power gating */
95 	return hda_dsp_ctrl_clock_power_gating(sdev, true);
96 }
97 
98 /* Icelake ops */
99 struct snd_sof_dsp_ops sof_icl_ops;
100 EXPORT_SYMBOL_NS(sof_icl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
101 
102 int sof_icl_ops_init(struct snd_sof_dev *sdev)
103 {
104 	/* common defaults */
105 	memcpy(&sof_icl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
106 
107 	/* probe/remove/shutdown */
108 	sof_icl_ops.shutdown	= hda_dsp_shutdown;
109 
110 	if (sdev->pdata->ipc_type == SOF_IPC) {
111 		/* doorbell */
112 		sof_icl_ops.irq_thread	= cnl_ipc_irq_thread;
113 
114 		/* ipc */
115 		sof_icl_ops.send_msg	= cnl_ipc_send_msg;
116 
117 		/* debug */
118 		sof_icl_ops.ipc_dump	= cnl_ipc_dump;
119 	}
120 
121 	if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
122 		struct sof_ipc4_fw_data *ipc4_data;
123 
124 		sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
125 		if (!sdev->private)
126 			return -ENOMEM;
127 
128 		ipc4_data = sdev->private;
129 		ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
130 
131 		ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
132 
133 		/* doorbell */
134 		sof_icl_ops.irq_thread	= cnl_ipc4_irq_thread;
135 
136 		/* ipc */
137 		sof_icl_ops.send_msg	= cnl_ipc4_send_msg;
138 
139 		/* debug */
140 		sof_icl_ops.ipc_dump	= cnl_ipc4_dump;
141 	}
142 
143 	/* debug */
144 	sof_icl_ops.debug_map	= icl_dsp_debugfs;
145 	sof_icl_ops.debug_map_count	= ARRAY_SIZE(icl_dsp_debugfs);
146 
147 	/* pre/post fw run */
148 	sof_icl_ops.post_fw_run = icl_dsp_post_fw_run;
149 
150 	/* firmware run */
151 	sof_icl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
152 	sof_icl_ops.stall = icl_dsp_core_stall;
153 
154 	/* dsp core get/put */
155 	sof_icl_ops.core_get = hda_dsp_core_get;
156 
157 	/* set DAI driver ops */
158 	hda_set_dai_drv_ops(sdev, &sof_icl_ops);
159 
160 	return 0;
161 };
162 EXPORT_SYMBOL_NS(sof_icl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
163 
164 const struct sof_intel_dsp_desc icl_chip_info = {
165 	/* Icelake */
166 	.cores_num = 4,
167 	.init_core_mask = 1,
168 	.host_managed_cores_mask = GENMASK(3, 0),
169 	.ipc_req = CNL_DSP_REG_HIPCIDR,
170 	.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
171 	.ipc_ack = CNL_DSP_REG_HIPCIDA,
172 	.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
173 	.ipc_ctl = CNL_DSP_REG_HIPCCTL,
174 	.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
175 	.rom_init_timeout	= 300,
176 	.ssp_count = ICL_SSP_COUNT,
177 	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
178 	.sdw_shim_base = SDW_SHIM_BASE,
179 	.sdw_alh_base = SDW_ALH_BASE,
180 	.check_sdw_irq	= hda_common_check_sdw_irq,
181 	.check_ipc_irq	= hda_dsp_check_ipc_irq,
182 	.cl_init = cl_dsp_init,
183 	.power_down_dsp = hda_power_down_dsp,
184 	.disable_interrupts = hda_dsp_disable_interrupts,
185 	.hw_ip_version = SOF_INTEL_CAVS_2_0,
186 };
187 EXPORT_SYMBOL_NS(icl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
188