xref: /openbmc/linux/sound/soc/sof/intel/cnl.c (revision dff03381)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for audio DSP on Cannonlake.
16  */
17 
18 #include <sound/sof/ext_manifest4.h>
19 #include <sound/sof/ipc4/header.h>
20 #include "../ipc4-priv.h"
21 #include "../ops.h"
22 #include "hda.h"
23 #include "hda-ipc.h"
24 #include "../sof-audio.h"
25 
26 static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
27 	{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
28 	{"pp", HDA_DSP_PP_BAR,  0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
29 	{"dsp", HDA_DSP_BAR,  0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
30 };
31 
32 static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
33 static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
34 
35 irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
36 {
37 	struct sof_ipc4_msg notification_data = {{ 0 }};
38 	struct snd_sof_dev *sdev = context;
39 	bool ipc_irq = false;
40 	u32 hipcida, hipctdr;
41 
42 	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
43 	if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
44 		/* DSP received the message */
45 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
46 					CNL_DSP_REG_HIPCCTL,
47 					CNL_DSP_REG_HIPCCTL_DONE, 0);
48 		cnl_ipc_dsp_done(sdev);
49 
50 		ipc_irq = true;
51 	}
52 
53 	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
54 	if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
55 		/* Message from DSP (reply or notification) */
56 		u32 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
57 					       CNL_DSP_REG_HIPCTDD);
58 		u32 primary = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
59 		u32 extension = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
60 
61 		if (primary & SOF_IPC4_MSG_DIR_MASK) {
62 			/* Reply received */
63 			if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
64 				struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
65 
66 				data->primary = primary;
67 				data->extension = extension;
68 
69 				spin_lock_irq(&sdev->ipc_lock);
70 
71 				snd_sof_ipc_get_reply(sdev);
72 				snd_sof_ipc_reply(sdev, data->primary);
73 
74 				spin_unlock_irq(&sdev->ipc_lock);
75 			} else {
76 				dev_dbg_ratelimited(sdev->dev,
77 						    "IPC reply before FW_READY: %#x|%#x\n",
78 						    primary, extension);
79 			}
80 		} else {
81 			/* Notification received */
82 			notification_data.primary = primary;
83 			notification_data.extension = extension;
84 
85 			sdev->ipc->msg.rx_data = &notification_data;
86 			snd_sof_ipc_msgs_rx(sdev);
87 			sdev->ipc->msg.rx_data = NULL;
88 		}
89 
90 		/* Let DSP know that we have finished processing the message */
91 		cnl_ipc_host_done(sdev);
92 
93 		ipc_irq = true;
94 	}
95 
96 	if (!ipc_irq)
97 		/* This interrupt is not shared so no need to return IRQ_NONE. */
98 		dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
99 
100 	return IRQ_HANDLED;
101 }
102 
103 irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
104 {
105 	struct snd_sof_dev *sdev = context;
106 	u32 hipci;
107 	u32 hipcida;
108 	u32 hipctdr;
109 	u32 hipctdd;
110 	u32 msg;
111 	u32 msg_ext;
112 	bool ipc_irq = false;
113 
114 	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
115 	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
116 	hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
117 	hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
118 
119 	/* reply message from DSP */
120 	if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
121 		msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
122 		msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
123 
124 		dev_vdbg(sdev->dev,
125 			 "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
126 			 msg, msg_ext);
127 
128 		/* mask Done interrupt */
129 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
130 					CNL_DSP_REG_HIPCCTL,
131 					CNL_DSP_REG_HIPCCTL_DONE, 0);
132 
133 		if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
134 			spin_lock_irq(&sdev->ipc_lock);
135 
136 			/* handle immediate reply from DSP core */
137 			hda_dsp_ipc_get_reply(sdev);
138 			snd_sof_ipc_reply(sdev, msg);
139 
140 			cnl_ipc_dsp_done(sdev);
141 
142 			spin_unlock_irq(&sdev->ipc_lock);
143 		} else {
144 			dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
145 					    msg);
146 		}
147 
148 		ipc_irq = true;
149 	}
150 
151 	/* new message from DSP */
152 	if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
153 		msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
154 		msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
155 
156 		dev_vdbg(sdev->dev,
157 			 "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
158 			 msg, msg_ext);
159 
160 		/* handle messages from DSP */
161 		if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
162 			struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
163 			bool non_recoverable = true;
164 
165 			/*
166 			 * This is a PANIC message!
167 			 *
168 			 * If it is arriving during firmware boot and it is not
169 			 * the last boot attempt then change the non_recoverable
170 			 * to false as the DSP might be able to boot in the next
171 			 * iteration(s)
172 			 */
173 			if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
174 			    hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
175 				non_recoverable = false;
176 
177 			snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
178 					  non_recoverable);
179 		} else {
180 			snd_sof_ipc_msgs_rx(sdev);
181 		}
182 
183 		cnl_ipc_host_done(sdev);
184 
185 		ipc_irq = true;
186 	}
187 
188 	if (!ipc_irq) {
189 		/*
190 		 * This interrupt is not shared so no need to return IRQ_NONE.
191 		 */
192 		dev_dbg_ratelimited(sdev->dev,
193 				    "nothing to do in IPC IRQ thread\n");
194 	}
195 
196 	return IRQ_HANDLED;
197 }
198 
199 static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
200 {
201 	/*
202 	 * clear busy interrupt to tell dsp controller this
203 	 * interrupt has been accepted, not trigger it again
204 	 */
205 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
206 				       CNL_DSP_REG_HIPCTDR,
207 				       CNL_DSP_REG_HIPCTDR_BUSY,
208 				       CNL_DSP_REG_HIPCTDR_BUSY);
209 	/*
210 	 * set done bit to ack dsp the msg has been
211 	 * processed and send reply msg to dsp
212 	 */
213 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
214 				       CNL_DSP_REG_HIPCTDA,
215 				       CNL_DSP_REG_HIPCTDA_DONE,
216 				       CNL_DSP_REG_HIPCTDA_DONE);
217 }
218 
219 static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
220 {
221 	/*
222 	 * set DONE bit - tell DSP we have received the reply msg
223 	 * from DSP, and processed it, don't send more reply to host
224 	 */
225 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
226 				       CNL_DSP_REG_HIPCIDA,
227 				       CNL_DSP_REG_HIPCIDA_DONE,
228 				       CNL_DSP_REG_HIPCIDA_DONE);
229 
230 	/* unmask Done interrupt */
231 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
232 				CNL_DSP_REG_HIPCCTL,
233 				CNL_DSP_REG_HIPCCTL_DONE,
234 				CNL_DSP_REG_HIPCCTL_DONE);
235 }
236 
237 static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
238 				     u32 *dr, u32 *dd)
239 {
240 	struct sof_ipc_pm_gate *pm_gate = msg->msg_data;
241 
242 	if (pm_gate->hdr.cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
243 		/* send the compact message via the primary register */
244 		*dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
245 
246 		/* send payload via the extended data register */
247 		*dd = pm_gate->flags;
248 
249 		return true;
250 	}
251 
252 	return false;
253 }
254 
255 int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
256 {
257 	struct sof_ipc4_msg *msg_data = msg->msg_data;
258 
259 	/* send the message via mailbox */
260 	if (msg_data->data_size)
261 		sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
262 				  msg_data->data_size);
263 
264 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD, msg_data->extension);
265 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
266 			  msg_data->primary | CNL_DSP_REG_HIPCIDR_BUSY);
267 
268 	return 0;
269 }
270 
271 int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
272 {
273 	struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
274 	struct sof_ipc_cmd_hdr *hdr;
275 	u32 dr = 0;
276 	u32 dd = 0;
277 
278 	/*
279 	 * Currently the only compact IPC supported is the PM_GATE
280 	 * IPC which is used for transitioning the DSP between the
281 	 * D0I0 and D0I3 states. And these are sent only during the
282 	 * set_power_state() op. Therefore, there will never be a case
283 	 * that a compact IPC results in the DSP exiting D0I3 without
284 	 * the host and FW being in sync.
285 	 */
286 	if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
287 		/* send the message via IPC registers */
288 		snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
289 				  dd);
290 		snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
291 				  CNL_DSP_REG_HIPCIDR_BUSY | dr);
292 		return 0;
293 	}
294 
295 	/* send the message via mailbox */
296 	sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
297 			  msg->msg_size);
298 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
299 			  CNL_DSP_REG_HIPCIDR_BUSY);
300 
301 	hdr = msg->msg_data;
302 
303 	/*
304 	 * Use mod_delayed_work() to schedule the delayed work
305 	 * to avoid scheduling multiple workqueue items when
306 	 * IPCs are sent at a high-rate. mod_delayed_work()
307 	 * modifies the timer if the work is pending.
308 	 * Also, a new delayed work should not be queued after the
309 	 * CTX_SAVE IPC, which is sent before the DSP enters D3.
310 	 */
311 	if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
312 		mod_delayed_work(system_wq, &hdev->d0i3_work,
313 				 msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
314 
315 	return 0;
316 }
317 
318 void cnl_ipc_dump(struct snd_sof_dev *sdev)
319 {
320 	u32 hipcctl;
321 	u32 hipcida;
322 	u32 hipctdr;
323 
324 	hda_ipc_irq_dump(sdev);
325 
326 	/* read IPC status */
327 	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
328 	hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
329 	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
330 
331 	/* dump the IPC regs */
332 	/* TODO: parse the raw msg */
333 	dev_err(sdev->dev,
334 		"error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
335 		hipcida, hipctdr, hipcctl);
336 }
337 
338 /* cannonlake ops */
339 struct snd_sof_dsp_ops sof_cnl_ops;
340 EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
341 
342 int sof_cnl_ops_init(struct snd_sof_dev *sdev)
343 {
344 	/* common defaults */
345 	memcpy(&sof_cnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
346 
347 	/* probe/remove/shutdown */
348 	sof_cnl_ops.shutdown	= hda_dsp_shutdown;
349 
350 	/* ipc */
351 	if (sdev->pdata->ipc_type == SOF_IPC) {
352 		/* doorbell */
353 		sof_cnl_ops.irq_thread	= cnl_ipc_irq_thread;
354 
355 		/* ipc */
356 		sof_cnl_ops.send_msg	= cnl_ipc_send_msg;
357 	}
358 
359 	if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
360 		struct sof_ipc4_fw_data *ipc4_data;
361 
362 		sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
363 		if (!sdev->private)
364 			return -ENOMEM;
365 
366 		ipc4_data = sdev->private;
367 		ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
368 
369 		/* doorbell */
370 		sof_cnl_ops.irq_thread	= cnl_ipc4_irq_thread;
371 
372 		/* ipc */
373 		sof_cnl_ops.send_msg	= cnl_ipc4_send_msg;
374 	}
375 
376 	/* set DAI driver ops */
377 	hda_set_dai_drv_ops(sdev, &sof_cnl_ops);
378 
379 	/* debug */
380 	sof_cnl_ops.debug_map	= cnl_dsp_debugfs;
381 	sof_cnl_ops.debug_map_count	= ARRAY_SIZE(cnl_dsp_debugfs);
382 	sof_cnl_ops.ipc_dump	= cnl_ipc_dump;
383 
384 	/* pre/post fw run */
385 	sof_cnl_ops.post_fw_run = hda_dsp_post_fw_run;
386 
387 	/* firmware run */
388 	sof_cnl_ops.run = hda_dsp_cl_boot_firmware;
389 
390 	/* dsp core get/put */
391 	sof_cnl_ops.core_get = hda_dsp_core_get;
392 
393 	return 0;
394 };
395 EXPORT_SYMBOL_NS(sof_cnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
396 
397 const struct sof_intel_dsp_desc cnl_chip_info = {
398 	/* Cannonlake */
399 	.cores_num = 4,
400 	.init_core_mask = 1,
401 	.host_managed_cores_mask = GENMASK(3, 0),
402 	.ipc_req = CNL_DSP_REG_HIPCIDR,
403 	.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
404 	.ipc_ack = CNL_DSP_REG_HIPCIDA,
405 	.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
406 	.ipc_ctl = CNL_DSP_REG_HIPCCTL,
407 	.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
408 	.rom_init_timeout	= 300,
409 	.ssp_count = CNL_SSP_COUNT,
410 	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
411 	.sdw_shim_base = SDW_SHIM_BASE,
412 	.sdw_alh_base = SDW_ALH_BASE,
413 	.check_sdw_irq	= hda_common_check_sdw_irq,
414 	.check_ipc_irq	= hda_dsp_check_ipc_irq,
415 	.cl_init = cl_dsp_init,
416 	.hw_ip_version = SOF_INTEL_CAVS_1_8,
417 };
418 EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
419 
420 /*
421  * JasperLake is technically derived from IceLake, and should be in
422  * described in icl.c. However since JasperLake was designed with
423  * two cores, it cannot support the IceLake-specific power-up sequences
424  * which rely on core3. To simplify, JasperLake uses the CannonLake ops and
425  * is described in cnl.c
426  */
427 const struct sof_intel_dsp_desc jsl_chip_info = {
428 	/* Jasperlake */
429 	.cores_num = 2,
430 	.init_core_mask = 1,
431 	.host_managed_cores_mask = GENMASK(1, 0),
432 	.ipc_req = CNL_DSP_REG_HIPCIDR,
433 	.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
434 	.ipc_ack = CNL_DSP_REG_HIPCIDA,
435 	.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
436 	.ipc_ctl = CNL_DSP_REG_HIPCCTL,
437 	.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
438 	.rom_init_timeout	= 300,
439 	.ssp_count = ICL_SSP_COUNT,
440 	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
441 	.sdw_shim_base = SDW_SHIM_BASE,
442 	.sdw_alh_base = SDW_ALH_BASE,
443 	.check_sdw_irq	= hda_common_check_sdw_irq,
444 	.check_ipc_irq	= hda_dsp_check_ipc_irq,
445 	.cl_init = cl_dsp_init,
446 	.hw_ip_version = SOF_INTEL_CAVS_2_0,
447 };
448 EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
449