xref: /openbmc/linux/drivers/soundwire/intel.c (revision 0352f880)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 /*
5  * Soundwire Intel Master Driver
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/debugfs.h>
10 #include <linux/delay.h>
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/auxiliary_bus.h>
15 #include <sound/pcm_params.h>
16 #include <linux/pm_runtime.h>
17 #include <sound/soc.h>
18 #include <linux/soundwire/sdw_registers.h>
19 #include <linux/soundwire/sdw.h>
20 #include <linux/soundwire/sdw_intel.h>
21 #include "cadence_master.h"
22 #include "bus.h"
23 #include "intel.h"
24 
25 /* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
26 #define INTEL_DEV_NUM_IDA_MIN           4
27 
28 #define INTEL_MASTER_SUSPEND_DELAY_MS	3000
29 #define INTEL_MASTER_RESET_ITERATIONS	10
30 
31 /*
32  * debug/config flags for the Intel SoundWire Master.
33  *
34  * Since we may have multiple masters active, we can have up to 8
35  * flags reused in each byte, with master0 using the ls-byte, etc.
36  */
37 
38 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME		BIT(0)
39 #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP		BIT(1)
40 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE	BIT(2)
41 #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK		BIT(3)
42 
43 static int md_flags;
44 module_param_named(sdw_md_flags, md_flags, int, 0444);
45 MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
46 
47 enum intel_pdi_type {
48 	INTEL_PDI_IN = 0,
49 	INTEL_PDI_OUT = 1,
50 	INTEL_PDI_BD = 2,
51 };
52 
53 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
54 
55 /*
56  * Read, write helpers for HW registers
57  */
58 static inline int intel_readl(void __iomem *base, int offset)
59 {
60 	return readl(base + offset);
61 }
62 
63 static inline void intel_writel(void __iomem *base, int offset, int value)
64 {
65 	writel(value, base + offset);
66 }
67 
68 static inline u16 intel_readw(void __iomem *base, int offset)
69 {
70 	return readw(base + offset);
71 }
72 
73 static inline void intel_writew(void __iomem *base, int offset, u16 value)
74 {
75 	writew(value, base + offset);
76 }
77 
78 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
79 {
80 	int timeout = 10;
81 	u32 reg_read;
82 
83 	do {
84 		reg_read = readl(base + offset);
85 		if ((reg_read & mask) == target)
86 			return 0;
87 
88 		timeout--;
89 		usleep_range(50, 100);
90 	} while (timeout != 0);
91 
92 	return -EAGAIN;
93 }
94 
95 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
96 {
97 	writel(value, base + offset);
98 	return intel_wait_bit(base, offset, mask, 0);
99 }
100 
101 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
102 {
103 	writel(value, base + offset);
104 	return intel_wait_bit(base, offset, mask, mask);
105 }
106 
107 /*
108  * debugfs
109  */
110 #ifdef CONFIG_DEBUG_FS
111 
112 #define RD_BUF (2 * PAGE_SIZE)
113 
114 static ssize_t intel_sprintf(void __iomem *mem, bool l,
115 			     char *buf, size_t pos, unsigned int reg)
116 {
117 	int value;
118 
119 	if (l)
120 		value = intel_readl(mem, reg);
121 	else
122 		value = intel_readw(mem, reg);
123 
124 	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
125 }
126 
127 static int intel_reg_show(struct seq_file *s_file, void *data)
128 {
129 	struct sdw_intel *sdw = s_file->private;
130 	void __iomem *s = sdw->link_res->shim;
131 	void __iomem *a = sdw->link_res->alh;
132 	char *buf;
133 	ssize_t ret;
134 	int i, j;
135 	unsigned int links, reg;
136 
137 	buf = kzalloc(RD_BUF, GFP_KERNEL);
138 	if (!buf)
139 		return -ENOMEM;
140 
141 	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
142 
143 	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
144 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
145 
146 	for (i = 0; i < links; i++) {
147 		reg = SDW_SHIM_LCAP + i * 4;
148 		ret += intel_sprintf(s, true, buf, ret, reg);
149 	}
150 
151 	for (i = 0; i < links; i++) {
152 		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
153 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
154 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
155 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
156 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
157 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
158 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
159 
160 		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
161 
162 		/*
163 		 * the value 10 is the number of PDIs. We will need a
164 		 * cleanup to remove hard-coded Intel configurations
165 		 * from cadence_master.c
166 		 */
167 		for (j = 0; j < 10; j++) {
168 			ret += intel_sprintf(s, false, buf, ret,
169 					SDW_SHIM_PCMSYCHM(i, j));
170 			ret += intel_sprintf(s, false, buf, ret,
171 					SDW_SHIM_PCMSYCHC(i, j));
172 		}
173 		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
174 
175 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
176 		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
177 	}
178 
179 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
180 	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
181 	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
182 
183 	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
184 	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
185 		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
186 
187 	seq_printf(s_file, "%s", buf);
188 	kfree(buf);
189 
190 	return 0;
191 }
192 DEFINE_SHOW_ATTRIBUTE(intel_reg);
193 
194 static int intel_set_m_datamode(void *data, u64 value)
195 {
196 	struct sdw_intel *sdw = data;
197 	struct sdw_bus *bus = &sdw->cdns.bus;
198 
199 	if (value > SDW_PORT_DATA_MODE_STATIC_1)
200 		return -EINVAL;
201 
202 	/* Userspace changed the hardware state behind the kernel's back */
203 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
204 
205 	bus->params.m_data_mode = value;
206 
207 	return 0;
208 }
209 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
210 			 intel_set_m_datamode, "%llu\n");
211 
212 static int intel_set_s_datamode(void *data, u64 value)
213 {
214 	struct sdw_intel *sdw = data;
215 	struct sdw_bus *bus = &sdw->cdns.bus;
216 
217 	if (value > SDW_PORT_DATA_MODE_STATIC_1)
218 		return -EINVAL;
219 
220 	/* Userspace changed the hardware state behind the kernel's back */
221 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
222 
223 	bus->params.s_data_mode = value;
224 
225 	return 0;
226 }
227 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
228 			 intel_set_s_datamode, "%llu\n");
229 
230 static void intel_debugfs_init(struct sdw_intel *sdw)
231 {
232 	struct dentry *root = sdw->cdns.bus.debugfs;
233 
234 	if (!root)
235 		return;
236 
237 	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
238 
239 	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
240 			    &intel_reg_fops);
241 
242 	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
243 			    &intel_set_m_datamode_fops);
244 
245 	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
246 			    &intel_set_s_datamode_fops);
247 
248 	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
249 }
250 
251 static void intel_debugfs_exit(struct sdw_intel *sdw)
252 {
253 	debugfs_remove_recursive(sdw->debugfs);
254 }
255 #else
256 static void intel_debugfs_init(struct sdw_intel *sdw) {}
257 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
258 #endif /* CONFIG_DEBUG_FS */
259 
260 /*
261  * shim ops
262  */
263 /* this needs to be called with shim_lock */
264 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
265 {
266 	void __iomem *shim = sdw->link_res->shim;
267 	unsigned int link_id = sdw->instance;
268 	u16 ioctl;
269 
270 	/* Switch to MIP from Glue logic */
271 	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
272 
273 	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
274 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
275 	usleep_range(10, 15);
276 
277 	ioctl &= ~(SDW_SHIM_IOCTL_DO);
278 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
279 	usleep_range(10, 15);
280 
281 	ioctl |= (SDW_SHIM_IOCTL_MIF);
282 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
283 	usleep_range(10, 15);
284 
285 	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
286 	ioctl &= ~(SDW_SHIM_IOCTL_COE);
287 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
288 	usleep_range(10, 15);
289 
290 	/* at this point Master IP has full control of the I/Os */
291 }
292 
293 /* this needs to be called with shim_lock */
294 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
295 {
296 	unsigned int link_id = sdw->instance;
297 	void __iomem *shim = sdw->link_res->shim;
298 	u16 ioctl;
299 
300 	/* Glue logic */
301 	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
302 	ioctl |= SDW_SHIM_IOCTL_BKE;
303 	ioctl |= SDW_SHIM_IOCTL_COE;
304 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
305 	usleep_range(10, 15);
306 
307 	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
308 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
309 	usleep_range(10, 15);
310 
311 	/* at this point Integration Glue has full control of the I/Os */
312 }
313 
314 /* this needs to be called with shim_lock */
315 static void intel_shim_init(struct sdw_intel *sdw)
316 {
317 	void __iomem *shim = sdw->link_res->shim;
318 	unsigned int link_id = sdw->instance;
319 	u16 ioctl = 0, act = 0;
320 
321 	/* Initialize Shim */
322 	ioctl |= SDW_SHIM_IOCTL_BKE;
323 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
324 	usleep_range(10, 15);
325 
326 	ioctl |= SDW_SHIM_IOCTL_WPDD;
327 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
328 	usleep_range(10, 15);
329 
330 	ioctl |= SDW_SHIM_IOCTL_DO;
331 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
332 	usleep_range(10, 15);
333 
334 	ioctl |= SDW_SHIM_IOCTL_DOE;
335 	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
336 	usleep_range(10, 15);
337 
338 	intel_shim_glue_to_master_ip(sdw);
339 
340 	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
341 	act |= SDW_SHIM_CTMCTL_DACTQE;
342 	act |= SDW_SHIM_CTMCTL_DODS;
343 	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
344 	usleep_range(10, 15);
345 }
346 
347 static int intel_shim_check_wake(struct sdw_intel *sdw)
348 {
349 	void __iomem *shim;
350 	u16 wake_sts;
351 
352 	shim = sdw->link_res->shim;
353 	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
354 
355 	return wake_sts & BIT(sdw->instance);
356 }
357 
358 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
359 {
360 	void __iomem *shim = sdw->link_res->shim;
361 	unsigned int link_id = sdw->instance;
362 	u16 wake_en, wake_sts;
363 
364 	mutex_lock(sdw->link_res->shim_lock);
365 	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
366 
367 	if (wake_enable) {
368 		/* Enable the wakeup */
369 		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
370 		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
371 	} else {
372 		/* Disable the wake up interrupt */
373 		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
374 		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
375 
376 		/* Clear wake status */
377 		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
378 		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
379 		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
380 	}
381 	mutex_unlock(sdw->link_res->shim_lock);
382 }
383 
384 static int intel_link_power_up(struct sdw_intel *sdw)
385 {
386 	unsigned int link_id = sdw->instance;
387 	void __iomem *shim = sdw->link_res->shim;
388 	u32 *shim_mask = sdw->link_res->shim_mask;
389 	struct sdw_bus *bus = &sdw->cdns.bus;
390 	struct sdw_master_prop *prop = &bus->prop;
391 	u32 spa_mask, cpa_mask;
392 	u32 link_control;
393 	int ret = 0;
394 	u32 syncprd;
395 	u32 sync_reg;
396 
397 	mutex_lock(sdw->link_res->shim_lock);
398 
399 	/*
400 	 * The hardware relies on an internal counter, typically 4kHz,
401 	 * to generate the SoundWire SSP - which defines a 'safe'
402 	 * synchronization point between commands and audio transport
403 	 * and allows for multi link synchronization. The SYNCPRD value
404 	 * is only dependent on the oscillator clock provided to
405 	 * the IP, so adjust based on _DSD properties reported in DSDT
406 	 * tables. The values reported are based on either 24MHz
407 	 * (CNL/CML) or 38.4 MHz (ICL/TGL+).
408 	 */
409 	if (prop->mclk_freq % 6000000)
410 		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
411 	else
412 		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
413 
414 	if (!*shim_mask) {
415 		dev_dbg(sdw->cdns.dev, "powering up all links\n");
416 
417 		/* we first need to program the SyncPRD/CPU registers */
418 		dev_dbg(sdw->cdns.dev,
419 			"first link up, programming SYNCPRD\n");
420 
421 		/* set SyncPRD period */
422 		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
423 		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
424 
425 		/* Set SyncCPU bit */
426 		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
427 		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
428 
429 		/* Link power up sequence */
430 		link_control = intel_readl(shim, SDW_SHIM_LCTL);
431 
432 		/* only power-up enabled links */
433 		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
434 		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
435 
436 		link_control |=  spa_mask;
437 
438 		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
439 		if (ret < 0) {
440 			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
441 			goto out;
442 		}
443 
444 		/* SyncCPU will change once link is active */
445 		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
446 				     SDW_SHIM_SYNC_SYNCCPU, 0);
447 		if (ret < 0) {
448 			dev_err(sdw->cdns.dev,
449 				"Failed to set SHIM_SYNC: %d\n", ret);
450 			goto out;
451 		}
452 	}
453 
454 	*shim_mask |= BIT(link_id);
455 
456 	sdw->cdns.link_up = true;
457 
458 	intel_shim_init(sdw);
459 
460 out:
461 	mutex_unlock(sdw->link_res->shim_lock);
462 
463 	return ret;
464 }
465 
466 static int intel_link_power_down(struct sdw_intel *sdw)
467 {
468 	u32 link_control, spa_mask, cpa_mask;
469 	unsigned int link_id = sdw->instance;
470 	void __iomem *shim = sdw->link_res->shim;
471 	u32 *shim_mask = sdw->link_res->shim_mask;
472 	int ret = 0;
473 
474 	mutex_lock(sdw->link_res->shim_lock);
475 
476 	if (!(*shim_mask & BIT(link_id)))
477 		dev_err(sdw->cdns.dev,
478 			"%s: Unbalanced power-up/down calls\n", __func__);
479 
480 	sdw->cdns.link_up = false;
481 
482 	intel_shim_master_ip_to_glue(sdw);
483 
484 	*shim_mask &= ~BIT(link_id);
485 
486 	if (!*shim_mask) {
487 
488 		dev_dbg(sdw->cdns.dev, "powering down all links\n");
489 
490 		/* Link power down sequence */
491 		link_control = intel_readl(shim, SDW_SHIM_LCTL);
492 
493 		/* only power-down enabled links */
494 		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
495 		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
496 
497 		link_control &=  spa_mask;
498 
499 		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
500 		if (ret < 0) {
501 			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
502 
503 			/*
504 			 * we leave the sdw->cdns.link_up flag as false since we've disabled
505 			 * the link at this point and cannot handle interrupts any longer.
506 			 */
507 		}
508 	}
509 
510 	mutex_unlock(sdw->link_res->shim_lock);
511 
512 	return ret;
513 }
514 
515 static void intel_shim_sync_arm(struct sdw_intel *sdw)
516 {
517 	void __iomem *shim = sdw->link_res->shim;
518 	u32 sync_reg;
519 
520 	mutex_lock(sdw->link_res->shim_lock);
521 
522 	/* update SYNC register */
523 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
524 	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
525 	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
526 
527 	mutex_unlock(sdw->link_res->shim_lock);
528 }
529 
530 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
531 {
532 	void __iomem *shim = sdw->link_res->shim;
533 	u32 sync_reg;
534 	int ret;
535 
536 	/* Read SYNC register */
537 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
538 
539 	/*
540 	 * Set SyncGO bit to synchronously trigger a bank switch for
541 	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
542 	 * the Masters.
543 	 */
544 	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
545 
546 	ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
547 			      SDW_SHIM_SYNC_SYNCGO);
548 
549 	if (ret < 0)
550 		dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
551 
552 	return ret;
553 }
554 
555 static int intel_shim_sync_go(struct sdw_intel *sdw)
556 {
557 	int ret;
558 
559 	mutex_lock(sdw->link_res->shim_lock);
560 
561 	ret = intel_shim_sync_go_unlocked(sdw);
562 
563 	mutex_unlock(sdw->link_res->shim_lock);
564 
565 	return ret;
566 }
567 
568 /*
569  * PDI routines
570  */
571 static void intel_pdi_init(struct sdw_intel *sdw,
572 			   struct sdw_cdns_stream_config *config)
573 {
574 	void __iomem *shim = sdw->link_res->shim;
575 	unsigned int link_id = sdw->instance;
576 	int pcm_cap;
577 
578 	/* PCM Stream Capability */
579 	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
580 
581 	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
582 	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
583 	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
584 
585 	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
586 		config->pcm_bd, config->pcm_in, config->pcm_out);
587 }
588 
589 static int
590 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
591 {
592 	void __iomem *shim = sdw->link_res->shim;
593 	unsigned int link_id = sdw->instance;
594 	int count;
595 
596 	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
597 
598 	/*
599 	 * WORKAROUND: on all existing Intel controllers, pdi
600 	 * number 2 reports channel count as 1 even though it
601 	 * supports 8 channels. Performing hardcoding for pdi
602 	 * number 2.
603 	 */
604 	if (pdi_num == 2)
605 		count = 7;
606 
607 	/* zero based values for channel count in register */
608 	count++;
609 
610 	return count;
611 }
612 
613 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
614 				   struct sdw_cdns_pdi *pdi,
615 				   unsigned int num_pdi,
616 				   unsigned int *num_ch)
617 {
618 	int i, ch_count = 0;
619 
620 	for (i = 0; i < num_pdi; i++) {
621 		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
622 		ch_count += pdi->ch_count;
623 		pdi++;
624 	}
625 
626 	*num_ch = ch_count;
627 	return 0;
628 }
629 
630 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
631 				      struct sdw_cdns_streams *stream)
632 {
633 	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
634 				&stream->num_ch_bd);
635 
636 	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
637 				&stream->num_ch_in);
638 
639 	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
640 				&stream->num_ch_out);
641 
642 	return 0;
643 }
644 
645 static int intel_pdi_ch_update(struct sdw_intel *sdw)
646 {
647 	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
648 
649 	return 0;
650 }
651 
652 static void
653 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
654 {
655 	void __iomem *shim = sdw->link_res->shim;
656 	unsigned int link_id = sdw->instance;
657 	int pdi_conf = 0;
658 
659 	/* the Bulk and PCM streams are not contiguous */
660 	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
661 	if (pdi->num >= 2)
662 		pdi->intel_alh_id += 2;
663 
664 	/*
665 	 * Program stream parameters to stream SHIM register
666 	 * This is applicable for PCM stream only.
667 	 */
668 	if (pdi->type != SDW_STREAM_PCM)
669 		return;
670 
671 	if (pdi->dir == SDW_DATA_DIR_RX)
672 		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
673 	else
674 		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
675 
676 	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
677 	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
678 	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
679 
680 	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
681 }
682 
683 static void
684 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
685 {
686 	void __iomem *alh = sdw->link_res->alh;
687 	unsigned int link_id = sdw->instance;
688 	unsigned int conf;
689 
690 	/* the Bulk and PCM streams are not contiguous */
691 	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
692 	if (pdi->num >= 2)
693 		pdi->intel_alh_id += 2;
694 
695 	/* Program Stream config ALH register */
696 	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
697 
698 	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
699 	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
700 
701 	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
702 }
703 
704 static int intel_params_stream(struct sdw_intel *sdw,
705 			       int stream,
706 			       struct snd_soc_dai *dai,
707 			       struct snd_pcm_hw_params *hw_params,
708 			       int link_id, int alh_stream_id)
709 {
710 	struct sdw_intel_link_res *res = sdw->link_res;
711 	struct sdw_intel_stream_params_data params_data;
712 
713 	params_data.stream = stream; /* direction */
714 	params_data.dai = dai;
715 	params_data.hw_params = hw_params;
716 	params_data.link_id = link_id;
717 	params_data.alh_stream_id = alh_stream_id;
718 
719 	if (res->ops && res->ops->params_stream && res->dev)
720 		return res->ops->params_stream(res->dev,
721 					       &params_data);
722 	return -EIO;
723 }
724 
725 static int intel_free_stream(struct sdw_intel *sdw,
726 			     int stream,
727 			     struct snd_soc_dai *dai,
728 			     int link_id)
729 {
730 	struct sdw_intel_link_res *res = sdw->link_res;
731 	struct sdw_intel_stream_free_data free_data;
732 
733 	free_data.stream = stream; /* direction */
734 	free_data.dai = dai;
735 	free_data.link_id = link_id;
736 
737 	if (res->ops && res->ops->free_stream && res->dev)
738 		return res->ops->free_stream(res->dev,
739 					     &free_data);
740 
741 	return 0;
742 }
743 
744 /*
745  * bank switch routines
746  */
747 
748 static int intel_pre_bank_switch(struct sdw_bus *bus)
749 {
750 	struct sdw_cdns *cdns = bus_to_cdns(bus);
751 	struct sdw_intel *sdw = cdns_to_intel(cdns);
752 
753 	/* Write to register only for multi-link */
754 	if (!bus->multi_link)
755 		return 0;
756 
757 	intel_shim_sync_arm(sdw);
758 
759 	return 0;
760 }
761 
762 static int intel_post_bank_switch(struct sdw_bus *bus)
763 {
764 	struct sdw_cdns *cdns = bus_to_cdns(bus);
765 	struct sdw_intel *sdw = cdns_to_intel(cdns);
766 	void __iomem *shim = sdw->link_res->shim;
767 	int sync_reg, ret;
768 
769 	/* Write to register only for multi-link */
770 	if (!bus->multi_link)
771 		return 0;
772 
773 	mutex_lock(sdw->link_res->shim_lock);
774 
775 	/* Read SYNC register */
776 	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
777 
778 	/*
779 	 * post_bank_switch() ops is called from the bus in loop for
780 	 * all the Masters in the steam with the expectation that
781 	 * we trigger the bankswitch for the only first Master in the list
782 	 * and do nothing for the other Masters
783 	 *
784 	 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
785 	 */
786 	if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
787 		ret = 0;
788 		goto unlock;
789 	}
790 
791 	ret = intel_shim_sync_go_unlocked(sdw);
792 unlock:
793 	mutex_unlock(sdw->link_res->shim_lock);
794 
795 	if (ret < 0)
796 		dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
797 
798 	return ret;
799 }
800 
801 /*
802  * DAI routines
803  */
804 
805 static int intel_startup(struct snd_pcm_substream *substream,
806 			 struct snd_soc_dai *dai)
807 {
808 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
809 	int ret;
810 
811 	ret = pm_runtime_resume_and_get(cdns->dev);
812 	if (ret < 0 && ret != -EACCES) {
813 		dev_err_ratelimited(cdns->dev,
814 				    "pm_runtime_resume_and_get failed in %s, ret %d\n",
815 				    __func__, ret);
816 		return ret;
817 	}
818 	return 0;
819 }
820 
821 static int intel_hw_params(struct snd_pcm_substream *substream,
822 			   struct snd_pcm_hw_params *params,
823 			   struct snd_soc_dai *dai)
824 {
825 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
826 	struct sdw_intel *sdw = cdns_to_intel(cdns);
827 	struct sdw_cdns_dma_data *dma;
828 	struct sdw_cdns_pdi *pdi;
829 	struct sdw_stream_config sconfig;
830 	struct sdw_port_config *pconfig;
831 	int ch, dir;
832 	int ret;
833 
834 	dma = snd_soc_dai_get_dma_data(dai, substream);
835 	if (!dma)
836 		return -EIO;
837 
838 	ch = params_channels(params);
839 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
840 		dir = SDW_DATA_DIR_RX;
841 	else
842 		dir = SDW_DATA_DIR_TX;
843 
844 	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
845 
846 	if (!pdi) {
847 		ret = -EINVAL;
848 		goto error;
849 	}
850 
851 	/* do run-time configurations for SHIM, ALH and PDI/PORT */
852 	intel_pdi_shim_configure(sdw, pdi);
853 	intel_pdi_alh_configure(sdw, pdi);
854 	sdw_cdns_config_stream(cdns, ch, dir, pdi);
855 
856 	/* store pdi and hw_params, may be needed in prepare step */
857 	dma->paused = false;
858 	dma->suspended = false;
859 	dma->pdi = pdi;
860 	dma->hw_params = params;
861 
862 	/* Inform DSP about PDI stream number */
863 	ret = intel_params_stream(sdw, substream->stream, dai, params,
864 				  sdw->instance,
865 				  pdi->intel_alh_id);
866 	if (ret)
867 		goto error;
868 
869 	sconfig.direction = dir;
870 	sconfig.ch_count = ch;
871 	sconfig.frame_rate = params_rate(params);
872 	sconfig.type = dma->stream_type;
873 
874 	sconfig.bps = snd_pcm_format_width(params_format(params));
875 
876 	/* Port configuration */
877 	pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
878 	if (!pconfig) {
879 		ret =  -ENOMEM;
880 		goto error;
881 	}
882 
883 	pconfig->num = pdi->num;
884 	pconfig->ch_mask = (1 << ch) - 1;
885 
886 	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
887 				    pconfig, 1, dma->stream);
888 	if (ret)
889 		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
890 
891 	kfree(pconfig);
892 error:
893 	return ret;
894 }
895 
896 static int intel_prepare(struct snd_pcm_substream *substream,
897 			 struct snd_soc_dai *dai)
898 {
899 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
900 	struct sdw_intel *sdw = cdns_to_intel(cdns);
901 	struct sdw_cdns_dma_data *dma;
902 	int ch, dir;
903 	int ret = 0;
904 
905 	dma = snd_soc_dai_get_dma_data(dai, substream);
906 	if (!dma) {
907 		dev_err(dai->dev, "failed to get dma data in %s\n",
908 			__func__);
909 		return -EIO;
910 	}
911 
912 	if (dma->suspended) {
913 		dma->suspended = false;
914 
915 		/*
916 		 * .prepare() is called after system resume, where we
917 		 * need to reinitialize the SHIM/ALH/Cadence IP.
918 		 * .prepare() is also called to deal with underflows,
919 		 * but in those cases we cannot touch ALH/SHIM
920 		 * registers
921 		 */
922 
923 		/* configure stream */
924 		ch = params_channels(dma->hw_params);
925 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
926 			dir = SDW_DATA_DIR_RX;
927 		else
928 			dir = SDW_DATA_DIR_TX;
929 
930 		intel_pdi_shim_configure(sdw, dma->pdi);
931 		intel_pdi_alh_configure(sdw, dma->pdi);
932 		sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
933 
934 		/* Inform DSP about PDI stream number */
935 		ret = intel_params_stream(sdw, substream->stream, dai,
936 					  dma->hw_params,
937 					  sdw->instance,
938 					  dma->pdi->intel_alh_id);
939 	}
940 
941 	return ret;
942 }
943 
944 static int
945 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
946 {
947 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
948 	struct sdw_intel *sdw = cdns_to_intel(cdns);
949 	struct sdw_cdns_dma_data *dma;
950 	int ret;
951 
952 	dma = snd_soc_dai_get_dma_data(dai, substream);
953 	if (!dma)
954 		return -EIO;
955 
956 	/*
957 	 * The sdw stream state will transition to RELEASED when stream->
958 	 * master_list is empty. So the stream state will transition to
959 	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
960 	 * cpu-dai.
961 	 */
962 	ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
963 	if (ret < 0) {
964 		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
965 			dma->stream->name, ret);
966 		return ret;
967 	}
968 
969 	ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
970 	if (ret < 0) {
971 		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
972 		return ret;
973 	}
974 
975 	dma->hw_params = NULL;
976 	dma->pdi = NULL;
977 
978 	return 0;
979 }
980 
981 static void intel_shutdown(struct snd_pcm_substream *substream,
982 			   struct snd_soc_dai *dai)
983 {
984 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
985 
986 	pm_runtime_mark_last_busy(cdns->dev);
987 	pm_runtime_put_autosuspend(cdns->dev);
988 }
989 
990 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
991 				    void *stream, int direction)
992 {
993 	return cdns_set_sdw_stream(dai, stream, direction);
994 }
995 
996 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
997 				  int direction)
998 {
999 	struct sdw_cdns_dma_data *dma;
1000 
1001 	if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1002 		dma = dai->playback_dma_data;
1003 	else
1004 		dma = dai->capture_dma_data;
1005 
1006 	if (!dma)
1007 		return ERR_PTR(-EINVAL);
1008 
1009 	return dma->stream;
1010 }
1011 
1012 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
1013 {
1014 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1015 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1016 	struct sdw_intel_link_res *res = sdw->link_res;
1017 	struct sdw_cdns_dma_data *dma;
1018 	int ret = 0;
1019 
1020 	/*
1021 	 * The .trigger callback is used to send required IPC to audio
1022 	 * firmware. The .free_stream callback will still be called
1023 	 * by intel_free_stream() in the TRIGGER_SUSPEND case.
1024 	 */
1025 	if (res->ops && res->ops->trigger)
1026 		res->ops->trigger(dai, cmd, substream->stream);
1027 
1028 	dma = snd_soc_dai_get_dma_data(dai, substream);
1029 	if (!dma) {
1030 		dev_err(dai->dev, "failed to get dma data in %s\n",
1031 			__func__);
1032 		return -EIO;
1033 	}
1034 
1035 	switch (cmd) {
1036 	case SNDRV_PCM_TRIGGER_SUSPEND:
1037 
1038 		/*
1039 		 * The .prepare callback is used to deal with xruns and resume operations.
1040 		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
1041 		 * but for resume operations the DMAs and SHIM registers need to be initialized.
1042 		 * the .trigger callback is used to track the suspend case only.
1043 		 */
1044 
1045 		dma->suspended = true;
1046 
1047 		ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
1048 		break;
1049 
1050 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1051 		dma->paused = true;
1052 		break;
1053 	case SNDRV_PCM_TRIGGER_STOP:
1054 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1055 		dma->paused = false;
1056 		break;
1057 	default:
1058 		break;
1059 	}
1060 
1061 	return ret;
1062 }
1063 
1064 static int intel_component_probe(struct snd_soc_component *component)
1065 {
1066 	int ret;
1067 
1068 	/*
1069 	 * make sure the device is pm_runtime_active before initiating
1070 	 * bus transactions during the card registration.
1071 	 * We use pm_runtime_resume() here, without taking a reference
1072 	 * and releasing it immediately.
1073 	 */
1074 	ret = pm_runtime_resume(component->dev);
1075 	if (ret < 0 && ret != -EACCES)
1076 		return ret;
1077 
1078 	return 0;
1079 }
1080 
1081 static int intel_component_dais_suspend(struct snd_soc_component *component)
1082 {
1083 	struct snd_soc_dai *dai;
1084 
1085 	/*
1086 	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1087 	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1088 	 * Since the component suspend is called last, we can trap this corner case
1089 	 * and force the DAIs to release their resources.
1090 	 */
1091 	for_each_component_dais(component, dai) {
1092 		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1093 		struct sdw_intel *sdw = cdns_to_intel(cdns);
1094 		struct sdw_cdns_dma_data *dma;
1095 		int stream;
1096 		int ret;
1097 
1098 		dma = dai->playback_dma_data;
1099 		stream = SNDRV_PCM_STREAM_PLAYBACK;
1100 		if (!dma) {
1101 			dma = dai->capture_dma_data;
1102 			stream = SNDRV_PCM_STREAM_CAPTURE;
1103 		}
1104 
1105 		if (!dma)
1106 			continue;
1107 
1108 		if (dma->suspended)
1109 			continue;
1110 
1111 		if (dma->paused) {
1112 			dma->suspended = true;
1113 
1114 			ret = intel_free_stream(sdw, stream, dai, sdw->instance);
1115 			if (ret < 0)
1116 				return ret;
1117 		}
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1124 	.startup = intel_startup,
1125 	.hw_params = intel_hw_params,
1126 	.prepare = intel_prepare,
1127 	.hw_free = intel_hw_free,
1128 	.trigger = intel_trigger,
1129 	.shutdown = intel_shutdown,
1130 	.set_stream = intel_pcm_set_sdw_stream,
1131 	.get_stream = intel_get_sdw_stream,
1132 };
1133 
1134 static const struct snd_soc_component_driver dai_component = {
1135 	.name			= "soundwire",
1136 	.probe			= intel_component_probe,
1137 	.suspend		= intel_component_dais_suspend,
1138 	.legacy_dai_naming	= 1,
1139 };
1140 
1141 static int intel_create_dai(struct sdw_cdns *cdns,
1142 			    struct snd_soc_dai_driver *dais,
1143 			    enum intel_pdi_type type,
1144 			    u32 num, u32 off, u32 max_ch)
1145 {
1146 	int i;
1147 
1148 	if (num == 0)
1149 		return 0;
1150 
1151 	 /* TODO: Read supported rates/formats from hardware */
1152 	for (i = off; i < (off + num); i++) {
1153 		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1154 					      "SDW%d Pin%d",
1155 					      cdns->instance, i);
1156 		if (!dais[i].name)
1157 			return -ENOMEM;
1158 
1159 		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1160 			dais[i].playback.channels_min = 1;
1161 			dais[i].playback.channels_max = max_ch;
1162 			dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1163 			dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1164 		}
1165 
1166 		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1167 			dais[i].capture.channels_min = 1;
1168 			dais[i].capture.channels_max = max_ch;
1169 			dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1170 			dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1171 		}
1172 
1173 		dais[i].ops = &intel_pcm_dai_ops;
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 static int intel_register_dai(struct sdw_intel *sdw)
1180 {
1181 	struct sdw_cdns_stream_config config;
1182 	struct sdw_cdns *cdns = &sdw->cdns;
1183 	struct sdw_cdns_streams *stream;
1184 	struct snd_soc_dai_driver *dais;
1185 	int num_dai, ret, off = 0;
1186 
1187 	/* Read the PDI config and initialize cadence PDI */
1188 	intel_pdi_init(sdw, &config);
1189 	ret = sdw_cdns_pdi_init(cdns, config);
1190 	if (ret)
1191 		return ret;
1192 
1193 	intel_pdi_ch_update(sdw);
1194 
1195 	/* DAIs are created based on total number of PDIs supported */
1196 	num_dai = cdns->pcm.num_pdi;
1197 
1198 	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1199 	if (!dais)
1200 		return -ENOMEM;
1201 
1202 	/* Create PCM DAIs */
1203 	stream = &cdns->pcm;
1204 
1205 	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1206 			       off, stream->num_ch_in);
1207 	if (ret)
1208 		return ret;
1209 
1210 	off += cdns->pcm.num_in;
1211 	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1212 			       off, stream->num_ch_out);
1213 	if (ret)
1214 		return ret;
1215 
1216 	off += cdns->pcm.num_out;
1217 	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1218 			       off, stream->num_ch_bd);
1219 	if (ret)
1220 		return ret;
1221 
1222 	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1223 					       dais, num_dai);
1224 }
1225 
1226 static int intel_start_bus(struct sdw_intel *sdw)
1227 {
1228 	struct device *dev = sdw->cdns.dev;
1229 	struct sdw_cdns *cdns = &sdw->cdns;
1230 	struct sdw_bus *bus = &cdns->bus;
1231 	int ret;
1232 
1233 	ret = sdw_cdns_enable_interrupt(cdns, true);
1234 	if (ret < 0) {
1235 		dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1236 		return ret;
1237 	}
1238 
1239 	/*
1240 	 * follow recommended programming flows to avoid timeouts when
1241 	 * gsync is enabled
1242 	 */
1243 	if (bus->multi_link)
1244 		intel_shim_sync_arm(sdw);
1245 
1246 	ret = sdw_cdns_init(cdns);
1247 	if (ret < 0) {
1248 		dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
1249 		goto err_interrupt;
1250 	}
1251 
1252 	ret = sdw_cdns_exit_reset(cdns);
1253 	if (ret < 0) {
1254 		dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
1255 		goto err_interrupt;
1256 	}
1257 
1258 	if (bus->multi_link) {
1259 		ret = intel_shim_sync_go(sdw);
1260 		if (ret < 0) {
1261 			dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
1262 			goto err_interrupt;
1263 		}
1264 	}
1265 	sdw_cdns_check_self_clearing_bits(cdns, __func__,
1266 					  true, INTEL_MASTER_RESET_ITERATIONS);
1267 
1268 	return 0;
1269 
1270 err_interrupt:
1271 	sdw_cdns_enable_interrupt(cdns, false);
1272 	return ret;
1273 }
1274 
1275 static int intel_start_bus_after_reset(struct sdw_intel *sdw)
1276 {
1277 	struct device *dev = sdw->cdns.dev;
1278 	struct sdw_cdns *cdns = &sdw->cdns;
1279 	struct sdw_bus *bus = &cdns->bus;
1280 	bool clock_stop0;
1281 	int status;
1282 	int ret;
1283 
1284 	/*
1285 	 * An exception condition occurs for the CLK_STOP_BUS_RESET
1286 	 * case if one or more masters remain active. In this condition,
1287 	 * all the masters are powered on for they are in the same power
1288 	 * domain. Master can preserve its context for clock stop0, so
1289 	 * there is no need to clear slave status and reset bus.
1290 	 */
1291 	clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1292 
1293 	if (!clock_stop0) {
1294 
1295 		/*
1296 		 * make sure all Slaves are tagged as UNATTACHED and
1297 		 * provide reason for reinitialization
1298 		 */
1299 
1300 		status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1301 		sdw_clear_slave_status(bus, status);
1302 
1303 		ret = sdw_cdns_enable_interrupt(cdns, true);
1304 		if (ret < 0) {
1305 			dev_err(dev, "cannot enable interrupts during resume\n");
1306 			return ret;
1307 		}
1308 
1309 		/*
1310 		 * follow recommended programming flows to avoid
1311 		 * timeouts when gsync is enabled
1312 		 */
1313 		if (bus->multi_link)
1314 			intel_shim_sync_arm(sdw);
1315 
1316 		/*
1317 		 * Re-initialize the IP since it was powered-off
1318 		 */
1319 		sdw_cdns_init(&sdw->cdns);
1320 
1321 	} else {
1322 		ret = sdw_cdns_enable_interrupt(cdns, true);
1323 		if (ret < 0) {
1324 			dev_err(dev, "cannot enable interrupts during resume\n");
1325 			return ret;
1326 		}
1327 	}
1328 
1329 	ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1330 	if (ret < 0) {
1331 		dev_err(dev, "unable to restart clock during resume\n");
1332 		goto err_interrupt;
1333 	}
1334 
1335 	if (!clock_stop0) {
1336 		ret = sdw_cdns_exit_reset(cdns);
1337 		if (ret < 0) {
1338 			dev_err(dev, "unable to exit bus reset sequence during resume\n");
1339 			goto err_interrupt;
1340 		}
1341 
1342 		if (bus->multi_link) {
1343 			ret = intel_shim_sync_go(sdw);
1344 			if (ret < 0) {
1345 				dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1346 				goto err_interrupt;
1347 			}
1348 		}
1349 	}
1350 	sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
1351 
1352 	return 0;
1353 
1354 err_interrupt:
1355 	sdw_cdns_enable_interrupt(cdns, false);
1356 	return ret;
1357 }
1358 
1359 static void intel_check_clock_stop(struct sdw_intel *sdw)
1360 {
1361 	struct device *dev = sdw->cdns.dev;
1362 	bool clock_stop0;
1363 
1364 	clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1365 	if (!clock_stop0)
1366 		dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
1367 }
1368 
1369 static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
1370 {
1371 	struct device *dev = sdw->cdns.dev;
1372 	struct sdw_cdns *cdns = &sdw->cdns;
1373 	int ret;
1374 
1375 	ret = sdw_cdns_enable_interrupt(cdns, true);
1376 	if (ret < 0) {
1377 		dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1378 		return ret;
1379 	}
1380 
1381 	ret = sdw_cdns_clock_restart(cdns, false);
1382 	if (ret < 0) {
1383 		dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
1384 		sdw_cdns_enable_interrupt(cdns, false);
1385 		return ret;
1386 	}
1387 
1388 	sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
1389 					  true, INTEL_MASTER_RESET_ITERATIONS);
1390 
1391 	return 0;
1392 }
1393 
1394 static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
1395 {
1396 	struct device *dev = sdw->cdns.dev;
1397 	struct sdw_cdns *cdns = &sdw->cdns;
1398 	bool wake_enable = false;
1399 	int ret;
1400 
1401 	if (clock_stop) {
1402 		ret = sdw_cdns_clock_stop(cdns, true);
1403 		if (ret < 0)
1404 			dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
1405 		else
1406 			wake_enable = true;
1407 	}
1408 
1409 	ret = sdw_cdns_enable_interrupt(cdns, false);
1410 	if (ret < 0) {
1411 		dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
1412 		return ret;
1413 	}
1414 
1415 	ret = intel_link_power_down(sdw);
1416 	if (ret) {
1417 		dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
1418 		return ret;
1419 	}
1420 
1421 	intel_shim_wake(sdw, wake_enable);
1422 
1423 	return 0;
1424 }
1425 
1426 static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1427 {
1428 	struct sdw_master_prop *prop = &bus->prop;
1429 	struct fwnode_handle *link;
1430 	char name[32];
1431 	u32 quirk_mask;
1432 
1433 	/* Find master handle */
1434 	snprintf(name, sizeof(name),
1435 		 "mipi-sdw-link-%d-subproperties", bus->link_id);
1436 
1437 	link = device_get_named_child_node(bus->dev, name);
1438 	if (!link) {
1439 		dev_err(bus->dev, "Master node %s not found\n", name);
1440 		return -EIO;
1441 	}
1442 
1443 	fwnode_property_read_u32(link,
1444 				 "intel-sdw-ip-clock",
1445 				 &prop->mclk_freq);
1446 
1447 	/* the values reported by BIOS are the 2x clock, not the bus clock */
1448 	prop->mclk_freq /= 2;
1449 
1450 	fwnode_property_read_u32(link,
1451 				 "intel-quirk-mask",
1452 				 &quirk_mask);
1453 
1454 	if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1455 		prop->hw_disabled = true;
1456 
1457 	prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
1458 		SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
1459 
1460 	return 0;
1461 }
1462 
1463 static int intel_prop_read(struct sdw_bus *bus)
1464 {
1465 	/* Initialize with default handler to read all DisCo properties */
1466 	sdw_master_read_prop(bus);
1467 
1468 	/* read Intel-specific properties */
1469 	sdw_master_read_intel_prop(bus);
1470 
1471 	return 0;
1472 }
1473 
1474 static struct sdw_master_ops sdw_intel_ops = {
1475 	.read_prop = intel_prop_read,
1476 	.override_adr = sdw_dmi_override_adr,
1477 	.xfer_msg = cdns_xfer_msg,
1478 	.xfer_msg_defer = cdns_xfer_msg_defer,
1479 	.reset_page_addr = cdns_reset_page_addr,
1480 	.set_bus_conf = cdns_bus_conf,
1481 	.pre_bank_switch = intel_pre_bank_switch,
1482 	.post_bank_switch = intel_post_bank_switch,
1483 	.read_ping_status = cdns_read_ping_status,
1484 };
1485 
1486 /*
1487  * probe and init (aux_dev_id argument is required by function prototype but not used)
1488  */
1489 static int intel_link_probe(struct auxiliary_device *auxdev,
1490 			    const struct auxiliary_device_id *aux_dev_id)
1491 
1492 {
1493 	struct device *dev = &auxdev->dev;
1494 	struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
1495 	struct sdw_intel *sdw;
1496 	struct sdw_cdns *cdns;
1497 	struct sdw_bus *bus;
1498 	int ret;
1499 
1500 	sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1501 	if (!sdw)
1502 		return -ENOMEM;
1503 
1504 	cdns = &sdw->cdns;
1505 	bus = &cdns->bus;
1506 
1507 	sdw->instance = auxdev->id;
1508 	sdw->link_res = &ldev->link_res;
1509 	cdns->dev = dev;
1510 	cdns->registers = sdw->link_res->registers;
1511 	cdns->instance = sdw->instance;
1512 	cdns->msg_count = 0;
1513 
1514 	bus->link_id = auxdev->id;
1515 	bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
1516 
1517 	sdw_cdns_probe(cdns);
1518 
1519 	/* Set ops */
1520 	bus->ops = &sdw_intel_ops;
1521 
1522 	/* set driver data, accessed by snd_soc_dai_get_drvdata() */
1523 	auxiliary_set_drvdata(auxdev, cdns);
1524 
1525 	/* use generic bandwidth allocation algorithm */
1526 	sdw->cdns.bus.compute_params = sdw_compute_params;
1527 
1528 	/* avoid resuming from pm_runtime suspend if it's not required */
1529 	dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
1530 
1531 	ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1532 	if (ret) {
1533 		dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1534 		return ret;
1535 	}
1536 
1537 	if (bus->prop.hw_disabled)
1538 		dev_info(dev,
1539 			 "SoundWire master %d is disabled, will be ignored\n",
1540 			 bus->link_id);
1541 	/*
1542 	 * Ignore BIOS err_threshold, it's a really bad idea when dealing
1543 	 * with multiple hardware synchronized links
1544 	 */
1545 	bus->prop.err_threshold = 0;
1546 
1547 	return 0;
1548 }
1549 
1550 int intel_link_startup(struct auxiliary_device *auxdev)
1551 {
1552 	struct device *dev = &auxdev->dev;
1553 	struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1554 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1555 	struct sdw_bus *bus = &cdns->bus;
1556 	int link_flags;
1557 	bool multi_link;
1558 	u32 clock_stop_quirks;
1559 	int ret;
1560 
1561 	if (bus->prop.hw_disabled) {
1562 		dev_info(dev,
1563 			 "SoundWire master %d is disabled, ignoring\n",
1564 			 sdw->instance);
1565 		return 0;
1566 	}
1567 
1568 	link_flags = md_flags >> (bus->link_id * 8);
1569 	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1570 	if (!multi_link) {
1571 		dev_dbg(dev, "Multi-link is disabled\n");
1572 	} else {
1573 		/*
1574 		 * hardware-based synchronization is required regardless
1575 		 * of the number of segments used by a stream: SSP-based
1576 		 * synchronization is gated by gsync when the multi-master
1577 		 * mode is set.
1578 		 */
1579 		bus->hw_sync_min_links = 1;
1580 	}
1581 	bus->multi_link = multi_link;
1582 
1583 	/* Initialize shim, controller */
1584 	ret = intel_link_power_up(sdw);
1585 	if (ret)
1586 		goto err_init;
1587 
1588 	/* Register DAIs */
1589 	ret = intel_register_dai(sdw);
1590 	if (ret) {
1591 		dev_err(dev, "DAI registration failed: %d\n", ret);
1592 		goto err_power_up;
1593 	}
1594 
1595 	intel_debugfs_init(sdw);
1596 
1597 	/* start bus */
1598 	ret = intel_start_bus(sdw);
1599 	if (ret) {
1600 		dev_err(dev, "bus start failed: %d\n", ret);
1601 		goto err_power_up;
1602 	}
1603 
1604 	/* Enable runtime PM */
1605 	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1606 		pm_runtime_set_autosuspend_delay(dev,
1607 						 INTEL_MASTER_SUSPEND_DELAY_MS);
1608 		pm_runtime_use_autosuspend(dev);
1609 		pm_runtime_mark_last_busy(dev);
1610 
1611 		pm_runtime_set_active(dev);
1612 		pm_runtime_enable(dev);
1613 	}
1614 
1615 	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1616 	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1617 		/*
1618 		 * To keep the clock running we need to prevent
1619 		 * pm_runtime suspend from happening by increasing the
1620 		 * reference count.
1621 		 * This quirk is specified by the parent PCI device in
1622 		 * case of specific latency requirements. It will have
1623 		 * no effect if pm_runtime is disabled by the user via
1624 		 * a module parameter for testing purposes.
1625 		 */
1626 		pm_runtime_get_noresume(dev);
1627 	}
1628 
1629 	/*
1630 	 * The runtime PM status of Slave devices is "Unsupported"
1631 	 * until they report as ATTACHED. If they don't, e.g. because
1632 	 * there are no Slave devices populated or if the power-on is
1633 	 * delayed or dependent on a power switch, the Master will
1634 	 * remain active and prevent its parent from suspending.
1635 	 *
1636 	 * Conditionally force the pm_runtime core to re-evaluate the
1637 	 * Master status in the absence of any Slave activity. A quirk
1638 	 * is provided to e.g. deal with Slaves that may be powered on
1639 	 * with a delay. A more complete solution would require the
1640 	 * definition of Master properties.
1641 	 */
1642 	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1643 		pm_runtime_idle(dev);
1644 
1645 	sdw->startup_done = true;
1646 	return 0;
1647 
1648 err_power_up:
1649 	intel_link_power_down(sdw);
1650 err_init:
1651 	return ret;
1652 }
1653 
1654 static void intel_link_remove(struct auxiliary_device *auxdev)
1655 {
1656 	struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1657 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1658 	struct sdw_bus *bus = &cdns->bus;
1659 
1660 	/*
1661 	 * Since pm_runtime is already disabled, we don't decrease
1662 	 * the refcount when the clock_stop_quirk is
1663 	 * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1664 	 */
1665 	if (!bus->prop.hw_disabled) {
1666 		intel_debugfs_exit(sdw);
1667 		sdw_cdns_enable_interrupt(cdns, false);
1668 	}
1669 	sdw_bus_master_delete(bus);
1670 }
1671 
1672 int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
1673 {
1674 	struct device *dev = &auxdev->dev;
1675 	struct sdw_intel *sdw;
1676 	struct sdw_bus *bus;
1677 
1678 	sdw = auxiliary_get_drvdata(auxdev);
1679 	bus = &sdw->cdns.bus;
1680 
1681 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1682 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1683 			bus->link_id);
1684 		return 0;
1685 	}
1686 
1687 	if (!intel_shim_check_wake(sdw))
1688 		return 0;
1689 
1690 	/* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1691 	intel_shim_wake(sdw, false);
1692 
1693 	/*
1694 	 * resume the Master, which will generate a bus reset and result in
1695 	 * Slaves re-attaching and be re-enumerated. The SoundWire physical
1696 	 * device which generated the wake will trigger an interrupt, which
1697 	 * will in turn cause the corresponding Linux Slave device to be
1698 	 * resumed and the Slave codec driver to check the status.
1699 	 */
1700 	pm_request_resume(dev);
1701 
1702 	return 0;
1703 }
1704 
1705 /*
1706  * PM calls
1707  */
1708 
1709 static int intel_resume_child_device(struct device *dev, void *data)
1710 {
1711 	int ret;
1712 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
1713 
1714 	if (!slave->probed) {
1715 		dev_dbg(dev, "skipping device, no probed driver\n");
1716 		return 0;
1717 	}
1718 	if (!slave->dev_num_sticky) {
1719 		dev_dbg(dev, "skipping device, never detected on bus\n");
1720 		return 0;
1721 	}
1722 
1723 	ret = pm_request_resume(dev);
1724 	if (ret < 0)
1725 		dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1726 
1727 	return ret;
1728 }
1729 
1730 static int __maybe_unused intel_pm_prepare(struct device *dev)
1731 {
1732 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1733 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1734 	struct sdw_bus *bus = &cdns->bus;
1735 	u32 clock_stop_quirks;
1736 	int ret;
1737 
1738 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1739 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1740 			bus->link_id);
1741 		return 0;
1742 	}
1743 
1744 	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1745 
1746 	if (pm_runtime_suspended(dev) &&
1747 	    pm_runtime_suspended(dev->parent) &&
1748 	    ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1749 	     !clock_stop_quirks)) {
1750 		/*
1751 		 * if we've enabled clock stop, and the parent is suspended, the SHIM registers
1752 		 * are not accessible and the shim wake cannot be disabled.
1753 		 * The only solution is to resume the entire bus to full power
1754 		 */
1755 
1756 		/*
1757 		 * If any operation in this block fails, we keep going since we don't want
1758 		 * to prevent system suspend from happening and errors should be recoverable
1759 		 * on resume.
1760 		 */
1761 
1762 		/*
1763 		 * first resume the device for this link. This will also by construction
1764 		 * resume the PCI parent device.
1765 		 */
1766 		ret = pm_request_resume(dev);
1767 		if (ret < 0) {
1768 			dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1769 			return 0;
1770 		}
1771 
1772 		/*
1773 		 * Continue resuming the entire bus (parent + child devices) to exit
1774 		 * the clock stop mode. If there are no devices connected on this link
1775 		 * this is a no-op.
1776 		 * The resume to full power could have been implemented with a .prepare
1777 		 * step in SoundWire codec drivers. This would however require a lot
1778 		 * of code to handle an Intel-specific corner case. It is simpler in
1779 		 * practice to add a loop at the link level.
1780 		 */
1781 		ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device);
1782 
1783 		if (ret < 0)
1784 			dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret);
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static int __maybe_unused intel_suspend(struct device *dev)
1791 {
1792 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1793 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1794 	struct sdw_bus *bus = &cdns->bus;
1795 	u32 clock_stop_quirks;
1796 	int ret;
1797 
1798 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1799 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1800 			bus->link_id);
1801 		return 0;
1802 	}
1803 
1804 	if (pm_runtime_suspended(dev)) {
1805 		dev_dbg(dev, "pm_runtime status: suspended\n");
1806 
1807 		clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1808 
1809 		if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1810 		    !clock_stop_quirks) {
1811 
1812 			if (pm_runtime_suspended(dev->parent)) {
1813 				/*
1814 				 * paranoia check: this should not happen with the .prepare
1815 				 * resume to full power
1816 				 */
1817 				dev_err(dev, "%s: invalid config: parent is suspended\n", __func__);
1818 			} else {
1819 				intel_shim_wake(sdw, false);
1820 			}
1821 		}
1822 
1823 		return 0;
1824 	}
1825 
1826 	ret = intel_stop_bus(sdw, false);
1827 	if (ret < 0) {
1828 		dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret);
1829 		return ret;
1830 	}
1831 
1832 	return 0;
1833 }
1834 
1835 static int __maybe_unused intel_suspend_runtime(struct device *dev)
1836 {
1837 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1838 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1839 	struct sdw_bus *bus = &cdns->bus;
1840 	u32 clock_stop_quirks;
1841 	int ret;
1842 
1843 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1844 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1845 			bus->link_id);
1846 		return 0;
1847 	}
1848 
1849 	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1850 
1851 	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1852 		ret = intel_stop_bus(sdw, false);
1853 		if (ret < 0) {
1854 			dev_err(dev, "%s: cannot stop bus during teardown: %d\n",
1855 				__func__, ret);
1856 			return ret;
1857 		}
1858 	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) {
1859 		ret = intel_stop_bus(sdw, true);
1860 		if (ret < 0) {
1861 			dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n",
1862 				__func__, ret);
1863 			return ret;
1864 		}
1865 	} else {
1866 		dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1867 			__func__, clock_stop_quirks);
1868 		ret = -EINVAL;
1869 	}
1870 
1871 	return ret;
1872 }
1873 
1874 static int __maybe_unused intel_resume(struct device *dev)
1875 {
1876 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1877 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1878 	struct sdw_bus *bus = &cdns->bus;
1879 	int link_flags;
1880 	int ret;
1881 
1882 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1883 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1884 			bus->link_id);
1885 		return 0;
1886 	}
1887 
1888 	link_flags = md_flags >> (bus->link_id * 8);
1889 
1890 	if (pm_runtime_suspended(dev)) {
1891 		dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
1892 
1893 		/* follow required sequence from runtime_pm.rst */
1894 		pm_runtime_disable(dev);
1895 		pm_runtime_set_active(dev);
1896 		pm_runtime_mark_last_busy(dev);
1897 		pm_runtime_enable(dev);
1898 
1899 		link_flags = md_flags >> (bus->link_id * 8);
1900 
1901 		if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1902 			pm_runtime_idle(dev);
1903 	}
1904 
1905 	ret = intel_link_power_up(sdw);
1906 	if (ret) {
1907 		dev_err(dev, "%s failed: %d\n", __func__, ret);
1908 		return ret;
1909 	}
1910 
1911 	/*
1912 	 * make sure all Slaves are tagged as UNATTACHED and provide
1913 	 * reason for reinitialization
1914 	 */
1915 	sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1916 
1917 	ret = intel_start_bus(sdw);
1918 	if (ret < 0) {
1919 		dev_err(dev, "cannot start bus during resume\n");
1920 		intel_link_power_down(sdw);
1921 		return ret;
1922 	}
1923 
1924 	/*
1925 	 * after system resume, the pm_runtime suspend() may kick in
1926 	 * during the enumeration, before any children device force the
1927 	 * master device to remain active.  Using pm_runtime_get()
1928 	 * routines is not really possible, since it'd prevent the
1929 	 * master from suspending.
1930 	 * A reasonable compromise is to update the pm_runtime
1931 	 * counters and delay the pm_runtime suspend by several
1932 	 * seconds, by when all enumeration should be complete.
1933 	 */
1934 	pm_runtime_mark_last_busy(dev);
1935 
1936 	return 0;
1937 }
1938 
1939 static int __maybe_unused intel_resume_runtime(struct device *dev)
1940 {
1941 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1942 	struct sdw_intel *sdw = cdns_to_intel(cdns);
1943 	struct sdw_bus *bus = &cdns->bus;
1944 	u32 clock_stop_quirks;
1945 	int ret;
1946 
1947 	if (bus->prop.hw_disabled || !sdw->startup_done) {
1948 		dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1949 			bus->link_id);
1950 		return 0;
1951 	}
1952 
1953 	/* unconditionally disable WAKEEN interrupt */
1954 	intel_shim_wake(sdw, false);
1955 
1956 	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1957 
1958 	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1959 		ret = intel_link_power_up(sdw);
1960 		if (ret) {
1961 			dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret);
1962 			return ret;
1963 		}
1964 
1965 		/*
1966 		 * make sure all Slaves are tagged as UNATTACHED and provide
1967 		 * reason for reinitialization
1968 		 */
1969 		sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1970 
1971 		ret = intel_start_bus(sdw);
1972 		if (ret < 0) {
1973 			dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret);
1974 			intel_link_power_down(sdw);
1975 			return ret;
1976 		}
1977 
1978 
1979 	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1980 		ret = intel_link_power_up(sdw);
1981 		if (ret) {
1982 			dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret);
1983 			return ret;
1984 		}
1985 
1986 		ret = intel_start_bus_after_reset(sdw);
1987 		if (ret < 0) {
1988 			dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret);
1989 			intel_link_power_down(sdw);
1990 			return ret;
1991 		}
1992 	} else if (!clock_stop_quirks) {
1993 
1994 		intel_check_clock_stop(sdw);
1995 
1996 		ret = intel_link_power_up(sdw);
1997 		if (ret) {
1998 			dev_err(dev, "%s: power_up failed: %d\n", __func__, ret);
1999 			return ret;
2000 		}
2001 
2002 		ret = intel_start_bus_after_clock_stop(sdw);
2003 		if (ret < 0) {
2004 			dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret);
2005 			intel_link_power_down(sdw);
2006 			return ret;
2007 		}
2008 	} else {
2009 		dev_err(dev, "%s: clock_stop_quirks %x unsupported\n",
2010 			__func__, clock_stop_quirks);
2011 		ret = -EINVAL;
2012 	}
2013 
2014 	return ret;
2015 }
2016 
2017 static const struct dev_pm_ops intel_pm = {
2018 	.prepare = intel_pm_prepare,
2019 	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
2020 	SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
2021 };
2022 
2023 static const struct auxiliary_device_id intel_link_id_table[] = {
2024 	{ .name = "soundwire_intel.link" },
2025 	{},
2026 };
2027 MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
2028 
2029 static struct auxiliary_driver sdw_intel_drv = {
2030 	.probe = intel_link_probe,
2031 	.remove = intel_link_remove,
2032 	.driver = {
2033 		/* auxiliary_driver_register() sets .name to be the modname */
2034 		.pm = &intel_pm,
2035 	},
2036 	.id_table = intel_link_id_table
2037 };
2038 module_auxiliary_driver(sdw_intel_drv);
2039 
2040 MODULE_LICENSE("Dual BSD/GPL");
2041 MODULE_DESCRIPTION("Intel Soundwire Link Driver");
2042