xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision f17f06a0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
31 
32 /* DSP lock helpers */
33 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
36 
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
40 {
41 	struct hdac_stream *s;
42 
43 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
44 	if (!s)
45 		return NULL;
46 	return stream_to_azx_dev(s);
47 }
48 
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
51 {
52 	snd_hdac_stream_release(azx_stream(azx_dev));
53 }
54 
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
57 {
58 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 	return &apcm->info->stream[substream->stream];
60 }
61 
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
63 				u64 nsec)
64 {
65 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 	u64 codec_frames, codec_nsecs;
68 
69 	if (!hinfo->ops.get_delay)
70 		return nsec;
71 
72 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 			      substream->runtime->rate);
75 
76 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 		return nsec + codec_nsecs;
78 
79 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
80 }
81 
82 /*
83  * PCM ops
84  */
85 
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
87 {
88 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 	struct azx *chip = apcm->chip;
91 	struct azx_dev *azx_dev = get_azx_dev(substream);
92 
93 	trace_azx_pcm_close(chip, azx_dev);
94 	mutex_lock(&chip->open_mutex);
95 	azx_release_device(azx_dev);
96 	if (hinfo->ops.close)
97 		hinfo->ops.close(hinfo, apcm->codec, substream);
98 	snd_hda_power_down(apcm->codec);
99 	mutex_unlock(&chip->open_mutex);
100 	snd_hda_codec_pcm_put(apcm->info);
101 	return 0;
102 }
103 
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 			     struct snd_pcm_hw_params *hw_params)
106 {
107 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 	struct azx *chip = apcm->chip;
109 	struct azx_dev *azx_dev = get_azx_dev(substream);
110 	int ret = 0;
111 
112 	trace_azx_pcm_hw_params(chip, azx_dev);
113 	dsp_lock(azx_dev);
114 	if (dsp_is_locked(azx_dev)) {
115 		ret = -EBUSY;
116 		goto unlock;
117 	}
118 
119 	azx_dev->core.bufsize = 0;
120 	azx_dev->core.period_bytes = 0;
121 	azx_dev->core.format_val = 0;
122 
123 unlock:
124 	dsp_unlock(azx_dev);
125 	return ret;
126 }
127 
128 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
129 {
130 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
131 	struct azx_dev *azx_dev = get_azx_dev(substream);
132 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
133 
134 	/* reset BDL address */
135 	dsp_lock(azx_dev);
136 	if (!dsp_is_locked(azx_dev))
137 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
138 
139 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
140 
141 	azx_stream(azx_dev)->prepared = 0;
142 	dsp_unlock(azx_dev);
143 	return 0;
144 }
145 
146 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
147 {
148 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
149 	struct azx *chip = apcm->chip;
150 	struct azx_dev *azx_dev = get_azx_dev(substream);
151 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
152 	struct snd_pcm_runtime *runtime = substream->runtime;
153 	unsigned int format_val, stream_tag;
154 	int err;
155 	struct hda_spdif_out *spdif =
156 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
157 	unsigned short ctls = spdif ? spdif->ctls : 0;
158 
159 	trace_azx_pcm_prepare(chip, azx_dev);
160 	dsp_lock(azx_dev);
161 	if (dsp_is_locked(azx_dev)) {
162 		err = -EBUSY;
163 		goto unlock;
164 	}
165 
166 	snd_hdac_stream_reset(azx_stream(azx_dev));
167 	format_val = snd_hdac_calc_stream_format(runtime->rate,
168 						runtime->channels,
169 						runtime->format,
170 						hinfo->maxbps,
171 						ctls);
172 	if (!format_val) {
173 		dev_err(chip->card->dev,
174 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
175 			runtime->rate, runtime->channels, runtime->format);
176 		err = -EINVAL;
177 		goto unlock;
178 	}
179 
180 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
181 	if (err < 0)
182 		goto unlock;
183 
184 	snd_hdac_stream_setup(azx_stream(azx_dev));
185 
186 	stream_tag = azx_dev->core.stream_tag;
187 	/* CA-IBG chips need the playback stream starting from 1 */
188 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
189 	    stream_tag > chip->capture_streams)
190 		stream_tag -= chip->capture_streams;
191 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
192 				     azx_dev->core.format_val, substream);
193 
194  unlock:
195 	if (!err)
196 		azx_stream(azx_dev)->prepared = 1;
197 	dsp_unlock(azx_dev);
198 	return err;
199 }
200 
201 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202 {
203 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
204 	struct azx *chip = apcm->chip;
205 	struct hdac_bus *bus = azx_bus(chip);
206 	struct azx_dev *azx_dev;
207 	struct snd_pcm_substream *s;
208 	struct hdac_stream *hstr;
209 	bool start;
210 	int sbits = 0;
211 	int sync_reg;
212 
213 	azx_dev = get_azx_dev(substream);
214 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
215 
216 	hstr = azx_stream(azx_dev);
217 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
218 		sync_reg = AZX_REG_OLD_SSYNC;
219 	else
220 		sync_reg = AZX_REG_SSYNC;
221 
222 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
223 		return -EPIPE;
224 
225 	switch (cmd) {
226 	case SNDRV_PCM_TRIGGER_START:
227 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
228 	case SNDRV_PCM_TRIGGER_RESUME:
229 		start = true;
230 		break;
231 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
232 	case SNDRV_PCM_TRIGGER_SUSPEND:
233 	case SNDRV_PCM_TRIGGER_STOP:
234 		start = false;
235 		break;
236 	default:
237 		return -EINVAL;
238 	}
239 
240 	snd_pcm_group_for_each_entry(s, substream) {
241 		if (s->pcm->card != substream->pcm->card)
242 			continue;
243 		azx_dev = get_azx_dev(s);
244 		sbits |= 1 << azx_dev->core.index;
245 		snd_pcm_trigger_done(s, substream);
246 	}
247 
248 	spin_lock(&bus->reg_lock);
249 
250 	/* first, set SYNC bits of corresponding streams */
251 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
252 
253 	snd_pcm_group_for_each_entry(s, substream) {
254 		if (s->pcm->card != substream->pcm->card)
255 			continue;
256 		azx_dev = get_azx_dev(s);
257 		if (start) {
258 			azx_dev->insufficient = 1;
259 			snd_hdac_stream_start(azx_stream(azx_dev), true);
260 		} else {
261 			snd_hdac_stream_stop(azx_stream(azx_dev));
262 		}
263 	}
264 	spin_unlock(&bus->reg_lock);
265 
266 	snd_hdac_stream_sync(hstr, start, sbits);
267 
268 	spin_lock(&bus->reg_lock);
269 	/* reset SYNC bits */
270 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
271 	if (start)
272 		snd_hdac_stream_timecounter_init(hstr, sbits);
273 	spin_unlock(&bus->reg_lock);
274 	return 0;
275 }
276 
277 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
278 {
279 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
280 }
281 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
282 
283 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
284 {
285 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
286 }
287 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
288 
289 unsigned int azx_get_position(struct azx *chip,
290 			      struct azx_dev *azx_dev)
291 {
292 	struct snd_pcm_substream *substream = azx_dev->core.substream;
293 	unsigned int pos;
294 	int stream = substream->stream;
295 	int delay = 0;
296 
297 	if (chip->get_position[stream])
298 		pos = chip->get_position[stream](chip, azx_dev);
299 	else /* use the position buffer as default */
300 		pos = azx_get_pos_posbuf(chip, azx_dev);
301 
302 	if (pos >= azx_dev->core.bufsize)
303 		pos = 0;
304 
305 	if (substream->runtime) {
306 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
307 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
308 
309 		if (chip->get_delay[stream])
310 			delay += chip->get_delay[stream](chip, azx_dev, pos);
311 		if (hinfo->ops.get_delay)
312 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
313 						      substream);
314 		substream->runtime->delay = delay;
315 	}
316 
317 	trace_azx_get_position(chip, azx_dev, pos, delay);
318 	return pos;
319 }
320 EXPORT_SYMBOL_GPL(azx_get_position);
321 
322 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
323 {
324 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
325 	struct azx *chip = apcm->chip;
326 	struct azx_dev *azx_dev = get_azx_dev(substream);
327 	return bytes_to_frames(substream->runtime,
328 			       azx_get_position(chip, azx_dev));
329 }
330 
331 /*
332  * azx_scale64: Scale base by mult/div while not overflowing sanely
333  *
334  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
335  *
336  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
337  * is about 384307 ie ~4.5 days.
338  *
339  * This scales the calculation so that overflow will happen but after 2^64 /
340  * 48000 secs, which is pretty large!
341  *
342  * In caln below:
343  *	base may overflow, but since there isn’t any additional division
344  *	performed on base it’s OK
345  *	rem can’t overflow because both are 32-bit values
346  */
347 
348 #ifdef CONFIG_X86
349 static u64 azx_scale64(u64 base, u32 num, u32 den)
350 {
351 	u64 rem;
352 
353 	rem = do_div(base, den);
354 
355 	base *= num;
356 	rem *= num;
357 
358 	do_div(rem, den);
359 
360 	return base + rem;
361 }
362 
363 static int azx_get_sync_time(ktime_t *device,
364 		struct system_counterval_t *system, void *ctx)
365 {
366 	struct snd_pcm_substream *substream = ctx;
367 	struct azx_dev *azx_dev = get_azx_dev(substream);
368 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
369 	struct azx *chip = apcm->chip;
370 	struct snd_pcm_runtime *runtime;
371 	u64 ll_counter, ll_counter_l, ll_counter_h;
372 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
373 	u32 wallclk_ctr, wallclk_cycles;
374 	bool direction;
375 	u32 dma_select;
376 	u32 timeout = 200;
377 	u32 retry_count = 0;
378 
379 	runtime = substream->runtime;
380 
381 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
382 		direction = 1;
383 	else
384 		direction = 0;
385 
386 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
387 	do {
388 		timeout = 100;
389 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
390 					(azx_dev->core.stream_tag - 1);
391 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
392 
393 		/* Enable the capture */
394 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
395 
396 		while (timeout) {
397 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
398 						GTSCC_TSCCD_MASK)
399 				break;
400 
401 			timeout--;
402 		}
403 
404 		if (!timeout) {
405 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
406 			return -EIO;
407 		}
408 
409 		/* Read wall clock counter */
410 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
411 
412 		/* Read TSC counter */
413 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
414 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
415 
416 		/* Read Link counter */
417 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
418 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
419 
420 		/* Ack: registers read done */
421 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
422 
423 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
424 						tsc_counter_l;
425 
426 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
427 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
428 
429 		/*
430 		 * An error occurs near frame "rollover". The clocks in
431 		 * frame value indicates whether this error may have
432 		 * occurred. Here we use the value of 10 i.e.,
433 		 * HDA_MAX_CYCLE_OFFSET
434 		 */
435 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
436 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
437 			break;
438 
439 		/*
440 		 * Sleep before we read again, else we may again get
441 		 * value near to MAX_CYCLE. Try to sleep for different
442 		 * amount of time so we dont hit the same number again
443 		 */
444 		udelay(retry_count++);
445 
446 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
447 
448 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
449 		dev_err_ratelimited(chip->card->dev,
450 			"Error in WALFCC cycle count\n");
451 		return -EIO;
452 	}
453 
454 	*device = ns_to_ktime(azx_scale64(ll_counter,
455 				NSEC_PER_SEC, runtime->rate));
456 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
457 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
458 
459 	*system = convert_art_to_tsc(tsc_counter);
460 
461 	return 0;
462 }
463 
464 #else
465 static int azx_get_sync_time(ktime_t *device,
466 		struct system_counterval_t *system, void *ctx)
467 {
468 	return -ENXIO;
469 }
470 #endif
471 
472 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
473 			      struct system_device_crosststamp *xtstamp)
474 {
475 	return get_device_system_crosststamp(azx_get_sync_time,
476 					substream, NULL, xtstamp);
477 }
478 
479 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
480 				struct snd_pcm_audio_tstamp_config *ts)
481 {
482 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
483 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
484 			return true;
485 
486 	return false;
487 }
488 
489 static int azx_get_time_info(struct snd_pcm_substream *substream,
490 			struct timespec64 *system_ts, struct timespec64 *audio_ts,
491 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
492 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
493 {
494 	struct azx_dev *azx_dev = get_azx_dev(substream);
495 	struct snd_pcm_runtime *runtime = substream->runtime;
496 	struct system_device_crosststamp xtstamp;
497 	int ret;
498 	u64 nsec;
499 
500 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
501 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
502 
503 		snd_pcm_gettime(substream->runtime, system_ts);
504 
505 		nsec = timecounter_read(&azx_dev->core.tc);
506 		nsec = div_u64(nsec, 3); /* can be optimized */
507 		if (audio_tstamp_config->report_delay)
508 			nsec = azx_adjust_codec_delay(substream, nsec);
509 
510 		*audio_ts = ns_to_timespec64(nsec);
511 
512 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
513 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
514 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
515 
516 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
517 
518 		ret = azx_get_crosststamp(substream, &xtstamp);
519 		if (ret)
520 			return ret;
521 
522 		switch (runtime->tstamp_type) {
523 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
524 			return -EINVAL;
525 
526 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
527 			*system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
528 			break;
529 
530 		default:
531 			*system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
532 			break;
533 
534 		}
535 
536 		*audio_ts = ktime_to_timespec64(xtstamp.device);
537 
538 		audio_tstamp_report->actual_type =
539 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
540 		audio_tstamp_report->accuracy_report = 1;
541 		/* 24 MHz WallClock == 42ns resolution */
542 		audio_tstamp_report->accuracy = 42;
543 
544 	} else {
545 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
546 	}
547 
548 	return 0;
549 }
550 
551 static const struct snd_pcm_hardware azx_pcm_hw = {
552 	.info =			(SNDRV_PCM_INFO_MMAP |
553 				 SNDRV_PCM_INFO_INTERLEAVED |
554 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
555 				 SNDRV_PCM_INFO_MMAP_VALID |
556 				 /* No full-resume yet implemented */
557 				 /* SNDRV_PCM_INFO_RESUME |*/
558 				 SNDRV_PCM_INFO_PAUSE |
559 				 SNDRV_PCM_INFO_SYNC_START |
560 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
561 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
562 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
563 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
564 	.rates =		SNDRV_PCM_RATE_48000,
565 	.rate_min =		48000,
566 	.rate_max =		48000,
567 	.channels_min =		2,
568 	.channels_max =		2,
569 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
570 	.period_bytes_min =	128,
571 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
572 	.periods_min =		2,
573 	.periods_max =		AZX_MAX_FRAG,
574 	.fifo_size =		0,
575 };
576 
577 static int azx_pcm_open(struct snd_pcm_substream *substream)
578 {
579 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
580 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
581 	struct azx *chip = apcm->chip;
582 	struct azx_dev *azx_dev;
583 	struct snd_pcm_runtime *runtime = substream->runtime;
584 	int err;
585 	int buff_step;
586 
587 	snd_hda_codec_pcm_get(apcm->info);
588 	mutex_lock(&chip->open_mutex);
589 	azx_dev = azx_assign_device(chip, substream);
590 	trace_azx_pcm_open(chip, azx_dev);
591 	if (azx_dev == NULL) {
592 		err = -EBUSY;
593 		goto unlock;
594 	}
595 	runtime->private_data = azx_dev;
596 
597 	runtime->hw = azx_pcm_hw;
598 	if (chip->gts_present)
599 		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
600 	runtime->hw.channels_min = hinfo->channels_min;
601 	runtime->hw.channels_max = hinfo->channels_max;
602 	runtime->hw.formats = hinfo->formats;
603 	runtime->hw.rates = hinfo->rates;
604 	snd_pcm_limit_hw_rates(runtime);
605 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
606 
607 	/* avoid wrap-around with wall-clock */
608 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
609 				     20,
610 				     178000000);
611 
612 	/* by some reason, the playback stream stalls on PulseAudio with
613 	 * tsched=1 when a capture stream triggers.  Until we figure out the
614 	 * real cause, disable tsched mode by telling the PCM info flag.
615 	 */
616 	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
617 		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
618 
619 	if (chip->align_buffer_size)
620 		/* constrain buffer sizes to be multiple of 128
621 		   bytes. This is more efficient in terms of memory
622 		   access but isn't required by the HDA spec and
623 		   prevents users from specifying exact period/buffer
624 		   sizes. For example for 44.1kHz, a period size set
625 		   to 20ms will be rounded to 19.59ms. */
626 		buff_step = 128;
627 	else
628 		/* Don't enforce steps on buffer sizes, still need to
629 		   be multiple of 4 bytes (HDA spec). Tested on Intel
630 		   HDA controllers, may not work on all devices where
631 		   option needs to be disabled */
632 		buff_step = 4;
633 
634 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
635 				   buff_step);
636 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
637 				   buff_step);
638 	snd_hda_power_up(apcm->codec);
639 	if (hinfo->ops.open)
640 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
641 	else
642 		err = -ENODEV;
643 	if (err < 0) {
644 		azx_release_device(azx_dev);
645 		goto powerdown;
646 	}
647 	snd_pcm_limit_hw_rates(runtime);
648 	/* sanity check */
649 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
650 	    snd_BUG_ON(!runtime->hw.channels_max) ||
651 	    snd_BUG_ON(!runtime->hw.formats) ||
652 	    snd_BUG_ON(!runtime->hw.rates)) {
653 		azx_release_device(azx_dev);
654 		if (hinfo->ops.close)
655 			hinfo->ops.close(hinfo, apcm->codec, substream);
656 		err = -EINVAL;
657 		goto powerdown;
658 	}
659 
660 	/* disable LINK_ATIME timestamps for capture streams
661 	   until we figure out how to handle digital inputs */
662 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
663 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
664 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
665 	}
666 
667 	snd_pcm_set_sync(substream);
668 	mutex_unlock(&chip->open_mutex);
669 	return 0;
670 
671  powerdown:
672 	snd_hda_power_down(apcm->codec);
673  unlock:
674 	mutex_unlock(&chip->open_mutex);
675 	snd_hda_codec_pcm_put(apcm->info);
676 	return err;
677 }
678 
679 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
680 			struct vm_area_struct *area)
681 {
682 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
683 	struct azx *chip = apcm->chip;
684 	if (chip->ops->pcm_mmap_prepare)
685 		chip->ops->pcm_mmap_prepare(substream, area);
686 	return snd_pcm_lib_default_mmap(substream, area);
687 }
688 
689 static const struct snd_pcm_ops azx_pcm_ops = {
690 	.open = azx_pcm_open,
691 	.close = azx_pcm_close,
692 	.hw_params = azx_pcm_hw_params,
693 	.hw_free = azx_pcm_hw_free,
694 	.prepare = azx_pcm_prepare,
695 	.trigger = azx_pcm_trigger,
696 	.pointer = azx_pcm_pointer,
697 	.get_time_info =  azx_get_time_info,
698 	.mmap = azx_pcm_mmap,
699 };
700 
701 static void azx_pcm_free(struct snd_pcm *pcm)
702 {
703 	struct azx_pcm *apcm = pcm->private_data;
704 	if (apcm) {
705 		list_del(&apcm->list);
706 		apcm->info->pcm = NULL;
707 		kfree(apcm);
708 	}
709 }
710 
711 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
712 
713 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
714 			      struct hda_pcm *cpcm)
715 {
716 	struct hdac_bus *bus = &_bus->core;
717 	struct azx *chip = bus_to_azx(bus);
718 	struct snd_pcm *pcm;
719 	struct azx_pcm *apcm;
720 	int pcm_dev = cpcm->device;
721 	unsigned int size;
722 	int s, err;
723 	int type = SNDRV_DMA_TYPE_DEV_SG;
724 
725 	list_for_each_entry(apcm, &chip->pcm_list, list) {
726 		if (apcm->pcm->device == pcm_dev) {
727 			dev_err(chip->card->dev, "PCM %d already exists\n",
728 				pcm_dev);
729 			return -EBUSY;
730 		}
731 	}
732 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
733 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
734 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
735 			  &pcm);
736 	if (err < 0)
737 		return err;
738 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
739 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
740 	if (apcm == NULL) {
741 		snd_device_free(chip->card, pcm);
742 		return -ENOMEM;
743 	}
744 	apcm->chip = chip;
745 	apcm->pcm = pcm;
746 	apcm->codec = codec;
747 	apcm->info = cpcm;
748 	pcm->private_data = apcm;
749 	pcm->private_free = azx_pcm_free;
750 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
751 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
752 	list_add_tail(&apcm->list, &chip->pcm_list);
753 	cpcm->pcm = pcm;
754 	for (s = 0; s < 2; s++) {
755 		if (cpcm->stream[s].substreams)
756 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
757 	}
758 	/* buffer pre-allocation */
759 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
760 	if (size > MAX_PREALLOC_SIZE)
761 		size = MAX_PREALLOC_SIZE;
762 	if (chip->uc_buffer)
763 		type = SNDRV_DMA_TYPE_DEV_UC_SG;
764 	snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
765 				       size, MAX_PREALLOC_SIZE);
766 	return 0;
767 }
768 
769 static unsigned int azx_command_addr(u32 cmd)
770 {
771 	unsigned int addr = cmd >> 28;
772 
773 	if (addr >= AZX_MAX_CODECS) {
774 		snd_BUG();
775 		addr = 0;
776 	}
777 
778 	return addr;
779 }
780 
781 /* receive a response */
782 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
783 				 unsigned int *res)
784 {
785 	struct azx *chip = bus_to_azx(bus);
786 	struct hda_bus *hbus = &chip->bus;
787 	int err;
788 
789  again:
790 	err = snd_hdac_bus_get_response(bus, addr, res);
791 	if (!err)
792 		return 0;
793 
794 	if (hbus->no_response_fallback)
795 		return -EIO;
796 
797 	if (!bus->polling_mode) {
798 		dev_warn(chip->card->dev,
799 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
800 			 bus->last_cmd[addr]);
801 		bus->polling_mode = 1;
802 		goto again;
803 	}
804 
805 	if (chip->msi) {
806 		dev_warn(chip->card->dev,
807 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
808 			 bus->last_cmd[addr]);
809 		if (chip->ops->disable_msi_reset_irq &&
810 		    chip->ops->disable_msi_reset_irq(chip) < 0)
811 			return -EIO;
812 		goto again;
813 	}
814 
815 	if (chip->probing) {
816 		/* If this critical timeout happens during the codec probing
817 		 * phase, this is likely an access to a non-existing codec
818 		 * slot.  Better to return an error and reset the system.
819 		 */
820 		return -EIO;
821 	}
822 
823 	/* no fallback mechanism? */
824 	if (!chip->fallback_to_single_cmd)
825 		return -EIO;
826 
827 	/* a fatal communication error; need either to reset or to fallback
828 	 * to the single_cmd mode
829 	 */
830 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
831 		hbus->response_reset = 1;
832 		dev_err(chip->card->dev,
833 			"No response from codec, resetting bus: last cmd=0x%08x\n",
834 			bus->last_cmd[addr]);
835 		return -EAGAIN; /* give a chance to retry */
836 	}
837 
838 	dev_err(chip->card->dev,
839 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
840 		bus->last_cmd[addr]);
841 	chip->single_cmd = 1;
842 	hbus->response_reset = 0;
843 	snd_hdac_bus_stop_cmd_io(bus);
844 	return -EIO;
845 }
846 
847 /*
848  * Use the single immediate command instead of CORB/RIRB for simplicity
849  *
850  * Note: according to Intel, this is not preferred use.  The command was
851  *       intended for the BIOS only, and may get confused with unsolicited
852  *       responses.  So, we shouldn't use it for normal operation from the
853  *       driver.
854  *       I left the codes, however, for debugging/testing purposes.
855  */
856 
857 /* receive a response */
858 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
859 {
860 	int timeout = 50;
861 
862 	while (timeout--) {
863 		/* check IRV busy bit */
864 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
865 			/* reuse rirb.res as the response return value */
866 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
867 			return 0;
868 		}
869 		udelay(1);
870 	}
871 	if (printk_ratelimit())
872 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
873 			azx_readw(chip, IRS));
874 	azx_bus(chip)->rirb.res[addr] = -1;
875 	return -EIO;
876 }
877 
878 /* send a command */
879 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
880 {
881 	struct azx *chip = bus_to_azx(bus);
882 	unsigned int addr = azx_command_addr(val);
883 	int timeout = 50;
884 
885 	bus->last_cmd[azx_command_addr(val)] = val;
886 	while (timeout--) {
887 		/* check ICB busy bit */
888 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
889 			/* Clear IRV valid bit */
890 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
891 				   AZX_IRS_VALID);
892 			azx_writel(chip, IC, val);
893 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
894 				   AZX_IRS_BUSY);
895 			return azx_single_wait_for_response(chip, addr);
896 		}
897 		udelay(1);
898 	}
899 	if (printk_ratelimit())
900 		dev_dbg(chip->card->dev,
901 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
902 			azx_readw(chip, IRS), val);
903 	return -EIO;
904 }
905 
906 /* receive a response */
907 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
908 				   unsigned int *res)
909 {
910 	if (res)
911 		*res = bus->rirb.res[addr];
912 	return 0;
913 }
914 
915 /*
916  * The below are the main callbacks from hda_codec.
917  *
918  * They are just the skeleton to call sub-callbacks according to the
919  * current setting of chip->single_cmd.
920  */
921 
922 /* send a command */
923 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
924 {
925 	struct azx *chip = bus_to_azx(bus);
926 
927 	if (chip->disabled)
928 		return 0;
929 	if (chip->single_cmd)
930 		return azx_single_send_cmd(bus, val);
931 	else
932 		return snd_hdac_bus_send_cmd(bus, val);
933 }
934 
935 /* get a response */
936 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
937 			    unsigned int *res)
938 {
939 	struct azx *chip = bus_to_azx(bus);
940 
941 	if (chip->disabled)
942 		return 0;
943 	if (chip->single_cmd)
944 		return azx_single_get_response(bus, addr, res);
945 	else
946 		return azx_rirb_get_response(bus, addr, res);
947 }
948 
949 static const struct hdac_bus_ops bus_core_ops = {
950 	.command = azx_send_cmd,
951 	.get_response = azx_get_response,
952 };
953 
954 #ifdef CONFIG_SND_HDA_DSP_LOADER
955 /*
956  * DSP loading code (e.g. for CA0132)
957  */
958 
959 /* use the first stream for loading DSP */
960 static struct azx_dev *
961 azx_get_dsp_loader_dev(struct azx *chip)
962 {
963 	struct hdac_bus *bus = azx_bus(chip);
964 	struct hdac_stream *s;
965 
966 	list_for_each_entry(s, &bus->stream_list, list)
967 		if (s->index == chip->playback_index_offset)
968 			return stream_to_azx_dev(s);
969 
970 	return NULL;
971 }
972 
973 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
974 				   unsigned int byte_size,
975 				   struct snd_dma_buffer *bufp)
976 {
977 	struct hdac_bus *bus = &codec->bus->core;
978 	struct azx *chip = bus_to_azx(bus);
979 	struct azx_dev *azx_dev;
980 	struct hdac_stream *hstr;
981 	bool saved = false;
982 	int err;
983 
984 	azx_dev = azx_get_dsp_loader_dev(chip);
985 	hstr = azx_stream(azx_dev);
986 	spin_lock_irq(&bus->reg_lock);
987 	if (hstr->opened) {
988 		chip->saved_azx_dev = *azx_dev;
989 		saved = true;
990 	}
991 	spin_unlock_irq(&bus->reg_lock);
992 
993 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
994 	if (err < 0) {
995 		spin_lock_irq(&bus->reg_lock);
996 		if (saved)
997 			*azx_dev = chip->saved_azx_dev;
998 		spin_unlock_irq(&bus->reg_lock);
999 		return err;
1000 	}
1001 
1002 	hstr->prepared = 0;
1003 	return err;
1004 }
1005 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1006 
1007 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1008 {
1009 	struct hdac_bus *bus = &codec->bus->core;
1010 	struct azx *chip = bus_to_azx(bus);
1011 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1012 
1013 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1014 }
1015 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1016 
1017 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1018 				    struct snd_dma_buffer *dmab)
1019 {
1020 	struct hdac_bus *bus = &codec->bus->core;
1021 	struct azx *chip = bus_to_azx(bus);
1022 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1023 	struct hdac_stream *hstr = azx_stream(azx_dev);
1024 
1025 	if (!dmab->area || !hstr->locked)
1026 		return;
1027 
1028 	snd_hdac_dsp_cleanup(hstr, dmab);
1029 	spin_lock_irq(&bus->reg_lock);
1030 	if (hstr->opened)
1031 		*azx_dev = chip->saved_azx_dev;
1032 	hstr->locked = false;
1033 	spin_unlock_irq(&bus->reg_lock);
1034 }
1035 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1036 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1037 
1038 /*
1039  * reset and start the controller registers
1040  */
1041 void azx_init_chip(struct azx *chip, bool full_reset)
1042 {
1043 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1044 		/* correct RINTCNT for CXT */
1045 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1046 			azx_writew(chip, RINTCNT, 0xc0);
1047 	}
1048 }
1049 EXPORT_SYMBOL_GPL(azx_init_chip);
1050 
1051 void azx_stop_all_streams(struct azx *chip)
1052 {
1053 	struct hdac_bus *bus = azx_bus(chip);
1054 	struct hdac_stream *s;
1055 
1056 	list_for_each_entry(s, &bus->stream_list, list)
1057 		snd_hdac_stream_stop(s);
1058 }
1059 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1060 
1061 void azx_stop_chip(struct azx *chip)
1062 {
1063 	snd_hdac_bus_stop_chip(azx_bus(chip));
1064 }
1065 EXPORT_SYMBOL_GPL(azx_stop_chip);
1066 
1067 /*
1068  * interrupt handler
1069  */
1070 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1071 {
1072 	struct azx *chip = bus_to_azx(bus);
1073 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1074 
1075 	/* check whether this IRQ is really acceptable */
1076 	if (!chip->ops->position_check ||
1077 	    chip->ops->position_check(chip, azx_dev)) {
1078 		spin_unlock(&bus->reg_lock);
1079 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1080 		spin_lock(&bus->reg_lock);
1081 	}
1082 }
1083 
1084 irqreturn_t azx_interrupt(int irq, void *dev_id)
1085 {
1086 	struct azx *chip = dev_id;
1087 	struct hdac_bus *bus = azx_bus(chip);
1088 	u32 status;
1089 	bool active, handled = false;
1090 	int repeat = 0; /* count for avoiding endless loop */
1091 
1092 #ifdef CONFIG_PM
1093 	if (azx_has_pm_runtime(chip))
1094 		if (!pm_runtime_active(chip->card->dev))
1095 			return IRQ_NONE;
1096 #endif
1097 
1098 	spin_lock(&bus->reg_lock);
1099 
1100 	if (chip->disabled)
1101 		goto unlock;
1102 
1103 	do {
1104 		status = azx_readl(chip, INTSTS);
1105 		if (status == 0 || status == 0xffffffff)
1106 			break;
1107 
1108 		handled = true;
1109 		active = false;
1110 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1111 			active = true;
1112 
1113 		status = azx_readb(chip, RIRBSTS);
1114 		if (status & RIRB_INT_MASK) {
1115 			/*
1116 			 * Clearing the interrupt status here ensures that no
1117 			 * interrupt gets masked after the RIRB wp is read in
1118 			 * snd_hdac_bus_update_rirb. This avoids a possible
1119 			 * race condition where codec response in RIRB may
1120 			 * remain unserviced by IRQ, eventually falling back
1121 			 * to polling mode in azx_rirb_get_response.
1122 			 */
1123 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1124 			active = true;
1125 			if (status & RIRB_INT_RESPONSE) {
1126 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1127 					udelay(80);
1128 				snd_hdac_bus_update_rirb(bus);
1129 			}
1130 		}
1131 	} while (active && ++repeat < 10);
1132 
1133  unlock:
1134 	spin_unlock(&bus->reg_lock);
1135 
1136 	return IRQ_RETVAL(handled);
1137 }
1138 EXPORT_SYMBOL_GPL(azx_interrupt);
1139 
1140 /*
1141  * Codec initerface
1142  */
1143 
1144 /*
1145  * Probe the given codec address
1146  */
1147 static int probe_codec(struct azx *chip, int addr)
1148 {
1149 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1150 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1151 	struct hdac_bus *bus = azx_bus(chip);
1152 	int err;
1153 	unsigned int res = -1;
1154 
1155 	mutex_lock(&bus->cmd_mutex);
1156 	chip->probing = 1;
1157 	azx_send_cmd(bus, cmd);
1158 	err = azx_get_response(bus, addr, &res);
1159 	chip->probing = 0;
1160 	mutex_unlock(&bus->cmd_mutex);
1161 	if (err < 0 || res == -1)
1162 		return -EIO;
1163 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1164 	return 0;
1165 }
1166 
1167 void snd_hda_bus_reset(struct hda_bus *bus)
1168 {
1169 	struct azx *chip = bus_to_azx(&bus->core);
1170 
1171 	bus->in_reset = 1;
1172 	azx_stop_chip(chip);
1173 	azx_init_chip(chip, true);
1174 	if (bus->core.chip_init)
1175 		snd_hda_bus_reset_codecs(bus);
1176 	bus->in_reset = 0;
1177 }
1178 
1179 /* HD-audio bus initialization */
1180 int azx_bus_init(struct azx *chip, const char *model)
1181 {
1182 	struct hda_bus *bus = &chip->bus;
1183 	int err;
1184 
1185 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1186 	if (err < 0)
1187 		return err;
1188 
1189 	bus->card = chip->card;
1190 	mutex_init(&bus->prepare_mutex);
1191 	bus->pci = chip->pci;
1192 	bus->modelname = model;
1193 	bus->mixer_assigned = -1;
1194 	bus->core.snoop = azx_snoop(chip);
1195 	if (chip->get_position[0] != azx_get_pos_lpib ||
1196 	    chip->get_position[1] != azx_get_pos_lpib)
1197 		bus->core.use_posbuf = true;
1198 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1199 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1200 		bus->core.corbrp_self_clear = true;
1201 
1202 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1203 		bus->core.align_bdle_4k = true;
1204 
1205 	/* AMD chipsets often cause the communication stalls upon certain
1206 	 * sequence like the pin-detection.  It seems that forcing the synced
1207 	 * access works around the stall.  Grrr...
1208 	 */
1209 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1210 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1211 		bus->core.sync_write = 1;
1212 		bus->allow_bus_reset = 1;
1213 	}
1214 
1215 	return 0;
1216 }
1217 EXPORT_SYMBOL_GPL(azx_bus_init);
1218 
1219 /* Probe codecs */
1220 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1221 {
1222 	struct hdac_bus *bus = azx_bus(chip);
1223 	int c, codecs, err;
1224 
1225 	codecs = 0;
1226 	if (!max_slots)
1227 		max_slots = AZX_DEFAULT_CODECS;
1228 
1229 	/* First try to probe all given codec slots */
1230 	for (c = 0; c < max_slots; c++) {
1231 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1232 			if (probe_codec(chip, c) < 0) {
1233 				/* Some BIOSen give you wrong codec addresses
1234 				 * that don't exist
1235 				 */
1236 				dev_warn(chip->card->dev,
1237 					 "Codec #%d probe error; disabling it...\n", c);
1238 				bus->codec_mask &= ~(1 << c);
1239 				/* More badly, accessing to a non-existing
1240 				 * codec often screws up the controller chip,
1241 				 * and disturbs the further communications.
1242 				 * Thus if an error occurs during probing,
1243 				 * better to reset the controller chip to
1244 				 * get back to the sanity state.
1245 				 */
1246 				azx_stop_chip(chip);
1247 				azx_init_chip(chip, true);
1248 			}
1249 		}
1250 	}
1251 
1252 	/* Then create codec instances */
1253 	for (c = 0; c < max_slots; c++) {
1254 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1255 			struct hda_codec *codec;
1256 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1257 			if (err < 0)
1258 				continue;
1259 			codec->jackpoll_interval = chip->jackpoll_interval;
1260 			codec->beep_mode = chip->beep_mode;
1261 			codecs++;
1262 		}
1263 	}
1264 	if (!codecs) {
1265 		dev_err(chip->card->dev, "no codecs initialized\n");
1266 		return -ENXIO;
1267 	}
1268 	return 0;
1269 }
1270 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1271 
1272 /* configure each codec instance */
1273 int azx_codec_configure(struct azx *chip)
1274 {
1275 	struct hda_codec *codec, *next;
1276 
1277 	/* use _safe version here since snd_hda_codec_configure() deregisters
1278 	 * the device upon error and deletes itself from the bus list.
1279 	 */
1280 	list_for_each_codec_safe(codec, next, &chip->bus) {
1281 		snd_hda_codec_configure(codec);
1282 	}
1283 
1284 	if (!azx_bus(chip)->num_codecs)
1285 		return -ENODEV;
1286 	return 0;
1287 }
1288 EXPORT_SYMBOL_GPL(azx_codec_configure);
1289 
1290 static int stream_direction(struct azx *chip, unsigned char index)
1291 {
1292 	if (index >= chip->capture_index_offset &&
1293 	    index < chip->capture_index_offset + chip->capture_streams)
1294 		return SNDRV_PCM_STREAM_CAPTURE;
1295 	return SNDRV_PCM_STREAM_PLAYBACK;
1296 }
1297 
1298 /* initialize SD streams */
1299 int azx_init_streams(struct azx *chip)
1300 {
1301 	int i;
1302 	int stream_tags[2] = { 0, 0 };
1303 
1304 	/* initialize each stream (aka device)
1305 	 * assign the starting bdl address to each stream (device)
1306 	 * and initialize
1307 	 */
1308 	for (i = 0; i < chip->num_streams; i++) {
1309 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1310 		int dir, tag;
1311 
1312 		if (!azx_dev)
1313 			return -ENOMEM;
1314 
1315 		dir = stream_direction(chip, i);
1316 		/* stream tag must be unique throughout
1317 		 * the stream direction group,
1318 		 * valid values 1...15
1319 		 * use separate stream tag if the flag
1320 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1321 		 */
1322 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1323 			tag = ++stream_tags[dir];
1324 		else
1325 			tag = i + 1;
1326 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1327 				     i, dir, tag);
1328 	}
1329 
1330 	return 0;
1331 }
1332 EXPORT_SYMBOL_GPL(azx_init_streams);
1333 
1334 void azx_free_streams(struct azx *chip)
1335 {
1336 	struct hdac_bus *bus = azx_bus(chip);
1337 	struct hdac_stream *s;
1338 
1339 	while (!list_empty(&bus->stream_list)) {
1340 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1341 		list_del(&s->list);
1342 		kfree(stream_to_azx_dev(s));
1343 	}
1344 }
1345 EXPORT_SYMBOL_GPL(azx_free_streams);
1346