xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 99a15348)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 #include "hda_local.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "hda_controller_trace.h"
32 
33 /* DSP lock helpers */
34 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
35 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
36 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
37 
38 /* assign a stream for the PCM */
39 static inline struct azx_dev *
40 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
41 {
42 	struct hdac_stream *s;
43 
44 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
45 	if (!s)
46 		return NULL;
47 	return stream_to_azx_dev(s);
48 }
49 
50 /* release the assigned stream */
51 static inline void azx_release_device(struct azx_dev *azx_dev)
52 {
53 	snd_hdac_stream_release(azx_stream(azx_dev));
54 }
55 
56 static inline struct hda_pcm_stream *
57 to_hda_pcm_stream(struct snd_pcm_substream *substream)
58 {
59 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
60 	return &apcm->info->stream[substream->stream];
61 }
62 
63 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
64 				u64 nsec)
65 {
66 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
67 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
68 	u64 codec_frames, codec_nsecs;
69 
70 	if (!hinfo->ops.get_delay)
71 		return nsec;
72 
73 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
74 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
75 			      substream->runtime->rate);
76 
77 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
78 		return nsec + codec_nsecs;
79 
80 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
81 }
82 
83 /*
84  * PCM ops
85  */
86 
87 static int azx_pcm_close(struct snd_pcm_substream *substream)
88 {
89 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
90 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
91 	struct azx *chip = apcm->chip;
92 	struct azx_dev *azx_dev = get_azx_dev(substream);
93 
94 	trace_azx_pcm_close(chip, azx_dev);
95 	mutex_lock(&chip->open_mutex);
96 	azx_release_device(azx_dev);
97 	if (hinfo->ops.close)
98 		hinfo->ops.close(hinfo, apcm->codec, substream);
99 	snd_hda_power_down(apcm->codec);
100 	mutex_unlock(&chip->open_mutex);
101 	snd_hda_codec_pcm_put(apcm->info);
102 	return 0;
103 }
104 
105 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
106 			     struct snd_pcm_hw_params *hw_params)
107 {
108 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
109 	struct azx *chip = apcm->chip;
110 	struct azx_dev *azx_dev = get_azx_dev(substream);
111 	int ret = 0;
112 
113 	trace_azx_pcm_hw_params(chip, azx_dev);
114 	dsp_lock(azx_dev);
115 	if (dsp_is_locked(azx_dev)) {
116 		ret = -EBUSY;
117 		goto unlock;
118 	}
119 
120 	azx_dev->core.bufsize = 0;
121 	azx_dev->core.period_bytes = 0;
122 	azx_dev->core.format_val = 0;
123 
124 unlock:
125 	dsp_unlock(azx_dev);
126 	return ret;
127 }
128 
129 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
130 {
131 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
132 	struct azx_dev *azx_dev = get_azx_dev(substream);
133 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
134 
135 	/* reset BDL address */
136 	dsp_lock(azx_dev);
137 	if (!dsp_is_locked(azx_dev))
138 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
139 
140 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
141 
142 	azx_stream(azx_dev)->prepared = 0;
143 	dsp_unlock(azx_dev);
144 	return 0;
145 }
146 
147 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
148 {
149 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
150 	struct azx *chip = apcm->chip;
151 	struct azx_dev *azx_dev = get_azx_dev(substream);
152 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
153 	struct snd_pcm_runtime *runtime = substream->runtime;
154 	unsigned int format_val, stream_tag;
155 	int err;
156 	struct hda_spdif_out *spdif =
157 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
158 	unsigned short ctls = spdif ? spdif->ctls : 0;
159 
160 	trace_azx_pcm_prepare(chip, azx_dev);
161 	dsp_lock(azx_dev);
162 	if (dsp_is_locked(azx_dev)) {
163 		err = -EBUSY;
164 		goto unlock;
165 	}
166 
167 	snd_hdac_stream_reset(azx_stream(azx_dev));
168 	format_val = snd_hdac_calc_stream_format(runtime->rate,
169 						runtime->channels,
170 						runtime->format,
171 						hinfo->maxbps,
172 						ctls);
173 	if (!format_val) {
174 		dev_err(chip->card->dev,
175 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
176 			runtime->rate, runtime->channels, runtime->format);
177 		err = -EINVAL;
178 		goto unlock;
179 	}
180 
181 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
182 	if (err < 0)
183 		goto unlock;
184 
185 	snd_hdac_stream_setup(azx_stream(azx_dev));
186 
187 	stream_tag = azx_dev->core.stream_tag;
188 	/* CA-IBG chips need the playback stream starting from 1 */
189 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
190 	    stream_tag > chip->capture_streams)
191 		stream_tag -= chip->capture_streams;
192 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
193 				     azx_dev->core.format_val, substream);
194 
195  unlock:
196 	if (!err)
197 		azx_stream(azx_dev)->prepared = 1;
198 	dsp_unlock(azx_dev);
199 	return err;
200 }
201 
202 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
203 {
204 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
205 	struct azx *chip = apcm->chip;
206 	struct hdac_bus *bus = azx_bus(chip);
207 	struct azx_dev *azx_dev;
208 	struct snd_pcm_substream *s;
209 	struct hdac_stream *hstr;
210 	bool start;
211 	int sbits = 0;
212 	int sync_reg;
213 
214 	azx_dev = get_azx_dev(substream);
215 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
216 
217 	hstr = azx_stream(azx_dev);
218 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
219 		sync_reg = AZX_REG_OLD_SSYNC;
220 	else
221 		sync_reg = AZX_REG_SSYNC;
222 
223 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
224 		return -EPIPE;
225 
226 	switch (cmd) {
227 	case SNDRV_PCM_TRIGGER_START:
228 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
229 	case SNDRV_PCM_TRIGGER_RESUME:
230 		start = true;
231 		break;
232 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
233 	case SNDRV_PCM_TRIGGER_SUSPEND:
234 	case SNDRV_PCM_TRIGGER_STOP:
235 		start = false;
236 		break;
237 	default:
238 		return -EINVAL;
239 	}
240 
241 	snd_pcm_group_for_each_entry(s, substream) {
242 		if (s->pcm->card != substream->pcm->card)
243 			continue;
244 		azx_dev = get_azx_dev(s);
245 		sbits |= 1 << azx_dev->core.index;
246 		snd_pcm_trigger_done(s, substream);
247 	}
248 
249 	spin_lock(&bus->reg_lock);
250 
251 	/* first, set SYNC bits of corresponding streams */
252 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
253 
254 	snd_pcm_group_for_each_entry(s, substream) {
255 		if (s->pcm->card != substream->pcm->card)
256 			continue;
257 		azx_dev = get_azx_dev(s);
258 		if (start) {
259 			azx_dev->insufficient = 1;
260 			snd_hdac_stream_start(azx_stream(azx_dev), true);
261 		} else {
262 			snd_hdac_stream_stop(azx_stream(azx_dev));
263 		}
264 	}
265 	spin_unlock(&bus->reg_lock);
266 
267 	snd_hdac_stream_sync(hstr, start, sbits);
268 
269 	spin_lock(&bus->reg_lock);
270 	/* reset SYNC bits */
271 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
272 	if (start)
273 		snd_hdac_stream_timecounter_init(hstr, sbits);
274 	spin_unlock(&bus->reg_lock);
275 	return 0;
276 }
277 
278 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
279 {
280 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
281 }
282 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
283 
284 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
285 {
286 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
287 }
288 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
289 
290 unsigned int azx_get_position(struct azx *chip,
291 			      struct azx_dev *azx_dev)
292 {
293 	struct snd_pcm_substream *substream = azx_dev->core.substream;
294 	unsigned int pos;
295 	int stream = substream->stream;
296 	int delay = 0;
297 
298 	if (chip->get_position[stream])
299 		pos = chip->get_position[stream](chip, azx_dev);
300 	else /* use the position buffer as default */
301 		pos = azx_get_pos_posbuf(chip, azx_dev);
302 
303 	if (pos >= azx_dev->core.bufsize)
304 		pos = 0;
305 
306 	if (substream->runtime) {
307 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
308 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
309 
310 		if (chip->get_delay[stream])
311 			delay += chip->get_delay[stream](chip, azx_dev, pos);
312 		if (hinfo->ops.get_delay)
313 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
314 						      substream);
315 		substream->runtime->delay = delay;
316 	}
317 
318 	trace_azx_get_position(chip, azx_dev, pos, delay);
319 	return pos;
320 }
321 EXPORT_SYMBOL_GPL(azx_get_position);
322 
323 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
324 {
325 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
326 	struct azx *chip = apcm->chip;
327 	struct azx_dev *azx_dev = get_azx_dev(substream);
328 	return bytes_to_frames(substream->runtime,
329 			       azx_get_position(chip, azx_dev));
330 }
331 
332 /*
333  * azx_scale64: Scale base by mult/div while not overflowing sanely
334  *
335  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
336  *
337  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
338  * is about 384307 ie ~4.5 days.
339  *
340  * This scales the calculation so that overflow will happen but after 2^64 /
341  * 48000 secs, which is pretty large!
342  *
343  * In caln below:
344  *	base may overflow, but since there isn’t any additional division
345  *	performed on base it’s OK
346  *	rem can’t overflow because both are 32-bit values
347  */
348 
349 #ifdef CONFIG_X86
350 static u64 azx_scale64(u64 base, u32 num, u32 den)
351 {
352 	u64 rem;
353 
354 	rem = do_div(base, den);
355 
356 	base *= num;
357 	rem *= num;
358 
359 	do_div(rem, den);
360 
361 	return base + rem;
362 }
363 
364 static int azx_get_sync_time(ktime_t *device,
365 		struct system_counterval_t *system, void *ctx)
366 {
367 	struct snd_pcm_substream *substream = ctx;
368 	struct azx_dev *azx_dev = get_azx_dev(substream);
369 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
370 	struct azx *chip = apcm->chip;
371 	struct snd_pcm_runtime *runtime;
372 	u64 ll_counter, ll_counter_l, ll_counter_h;
373 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
374 	u32 wallclk_ctr, wallclk_cycles;
375 	bool direction;
376 	u32 dma_select;
377 	u32 timeout;
378 	u32 retry_count = 0;
379 
380 	runtime = substream->runtime;
381 
382 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
383 		direction = 1;
384 	else
385 		direction = 0;
386 
387 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
388 	do {
389 		timeout = 100;
390 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
391 					(azx_dev->core.stream_tag - 1);
392 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
393 
394 		/* Enable the capture */
395 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
396 
397 		while (timeout) {
398 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
399 						GTSCC_TSCCD_MASK)
400 				break;
401 
402 			timeout--;
403 		}
404 
405 		if (!timeout) {
406 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
407 			return -EIO;
408 		}
409 
410 		/* Read wall clock counter */
411 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
412 
413 		/* Read TSC counter */
414 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
415 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
416 
417 		/* Read Link counter */
418 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
419 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
420 
421 		/* Ack: registers read done */
422 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
423 
424 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
425 						tsc_counter_l;
426 
427 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
428 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
429 
430 		/*
431 		 * An error occurs near frame "rollover". The clocks in
432 		 * frame value indicates whether this error may have
433 		 * occurred. Here we use the value of 10 i.e.,
434 		 * HDA_MAX_CYCLE_OFFSET
435 		 */
436 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
437 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
438 			break;
439 
440 		/*
441 		 * Sleep before we read again, else we may again get
442 		 * value near to MAX_CYCLE. Try to sleep for different
443 		 * amount of time so we dont hit the same number again
444 		 */
445 		udelay(retry_count++);
446 
447 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
448 
449 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
450 		dev_err_ratelimited(chip->card->dev,
451 			"Error in WALFCC cycle count\n");
452 		return -EIO;
453 	}
454 
455 	*device = ns_to_ktime(azx_scale64(ll_counter,
456 				NSEC_PER_SEC, runtime->rate));
457 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
458 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
459 
460 	*system = convert_art_to_tsc(tsc_counter);
461 
462 	return 0;
463 }
464 
465 #else
466 static int azx_get_sync_time(ktime_t *device,
467 		struct system_counterval_t *system, void *ctx)
468 {
469 	return -ENXIO;
470 }
471 #endif
472 
473 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
474 			      struct system_device_crosststamp *xtstamp)
475 {
476 	return get_device_system_crosststamp(azx_get_sync_time,
477 					substream, NULL, xtstamp);
478 }
479 
480 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
481 				struct snd_pcm_audio_tstamp_config *ts)
482 {
483 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
484 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
485 			return true;
486 
487 	return false;
488 }
489 
490 static int azx_get_time_info(struct snd_pcm_substream *substream,
491 			struct timespec64 *system_ts, struct timespec64 *audio_ts,
492 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
493 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
494 {
495 	struct azx_dev *azx_dev = get_azx_dev(substream);
496 	struct snd_pcm_runtime *runtime = substream->runtime;
497 	struct system_device_crosststamp xtstamp;
498 	int ret;
499 	u64 nsec;
500 
501 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
502 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
503 
504 		snd_pcm_gettime(substream->runtime, system_ts);
505 
506 		nsec = timecounter_read(&azx_dev->core.tc);
507 		if (audio_tstamp_config->report_delay)
508 			nsec = azx_adjust_codec_delay(substream, nsec);
509 
510 		*audio_ts = ns_to_timespec64(nsec);
511 
512 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
513 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
514 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
515 
516 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
517 
518 		ret = azx_get_crosststamp(substream, &xtstamp);
519 		if (ret)
520 			return ret;
521 
522 		switch (runtime->tstamp_type) {
523 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
524 			return -EINVAL;
525 
526 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
527 			*system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
528 			break;
529 
530 		default:
531 			*system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
532 			break;
533 
534 		}
535 
536 		*audio_ts = ktime_to_timespec64(xtstamp.device);
537 
538 		audio_tstamp_report->actual_type =
539 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
540 		audio_tstamp_report->accuracy_report = 1;
541 		/* 24 MHz WallClock == 42ns resolution */
542 		audio_tstamp_report->accuracy = 42;
543 
544 	} else {
545 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
546 	}
547 
548 	return 0;
549 }
550 
551 static const struct snd_pcm_hardware azx_pcm_hw = {
552 	.info =			(SNDRV_PCM_INFO_MMAP |
553 				 SNDRV_PCM_INFO_INTERLEAVED |
554 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
555 				 SNDRV_PCM_INFO_MMAP_VALID |
556 				 /* No full-resume yet implemented */
557 				 /* SNDRV_PCM_INFO_RESUME |*/
558 				 SNDRV_PCM_INFO_PAUSE |
559 				 SNDRV_PCM_INFO_SYNC_START |
560 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
561 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
562 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
563 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
564 	.rates =		SNDRV_PCM_RATE_48000,
565 	.rate_min =		48000,
566 	.rate_max =		48000,
567 	.channels_min =		2,
568 	.channels_max =		2,
569 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
570 	.period_bytes_min =	128,
571 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
572 	.periods_min =		2,
573 	.periods_max =		AZX_MAX_FRAG,
574 	.fifo_size =		0,
575 };
576 
577 static int azx_pcm_open(struct snd_pcm_substream *substream)
578 {
579 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
580 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
581 	struct azx *chip = apcm->chip;
582 	struct azx_dev *azx_dev;
583 	struct snd_pcm_runtime *runtime = substream->runtime;
584 	int err;
585 	int buff_step;
586 
587 	snd_hda_codec_pcm_get(apcm->info);
588 	mutex_lock(&chip->open_mutex);
589 	azx_dev = azx_assign_device(chip, substream);
590 	trace_azx_pcm_open(chip, azx_dev);
591 	if (azx_dev == NULL) {
592 		err = -EBUSY;
593 		goto unlock;
594 	}
595 	runtime->private_data = azx_dev;
596 
597 	runtime->hw = azx_pcm_hw;
598 	if (chip->gts_present)
599 		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
600 	runtime->hw.channels_min = hinfo->channels_min;
601 	runtime->hw.channels_max = hinfo->channels_max;
602 	runtime->hw.formats = hinfo->formats;
603 	runtime->hw.rates = hinfo->rates;
604 	snd_pcm_limit_hw_rates(runtime);
605 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
606 
607 	/* avoid wrap-around with wall-clock */
608 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
609 				     20,
610 				     178000000);
611 
612 	if (chip->align_buffer_size)
613 		/* constrain buffer sizes to be multiple of 128
614 		   bytes. This is more efficient in terms of memory
615 		   access but isn't required by the HDA spec and
616 		   prevents users from specifying exact period/buffer
617 		   sizes. For example for 44.1kHz, a period size set
618 		   to 20ms will be rounded to 19.59ms. */
619 		buff_step = 128;
620 	else
621 		/* Don't enforce steps on buffer sizes, still need to
622 		   be multiple of 4 bytes (HDA spec). Tested on Intel
623 		   HDA controllers, may not work on all devices where
624 		   option needs to be disabled */
625 		buff_step = 4;
626 
627 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
628 				   buff_step);
629 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
630 				   buff_step);
631 	snd_hda_power_up(apcm->codec);
632 	if (hinfo->ops.open)
633 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
634 	else
635 		err = -ENODEV;
636 	if (err < 0) {
637 		azx_release_device(azx_dev);
638 		goto powerdown;
639 	}
640 	snd_pcm_limit_hw_rates(runtime);
641 	/* sanity check */
642 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
643 	    snd_BUG_ON(!runtime->hw.channels_max) ||
644 	    snd_BUG_ON(!runtime->hw.formats) ||
645 	    snd_BUG_ON(!runtime->hw.rates)) {
646 		azx_release_device(azx_dev);
647 		if (hinfo->ops.close)
648 			hinfo->ops.close(hinfo, apcm->codec, substream);
649 		err = -EINVAL;
650 		goto powerdown;
651 	}
652 
653 	/* disable LINK_ATIME timestamps for capture streams
654 	   until we figure out how to handle digital inputs */
655 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
656 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
657 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
658 	}
659 
660 	snd_pcm_set_sync(substream);
661 	mutex_unlock(&chip->open_mutex);
662 	return 0;
663 
664  powerdown:
665 	snd_hda_power_down(apcm->codec);
666  unlock:
667 	mutex_unlock(&chip->open_mutex);
668 	snd_hda_codec_pcm_put(apcm->info);
669 	return err;
670 }
671 
672 static const struct snd_pcm_ops azx_pcm_ops = {
673 	.open = azx_pcm_open,
674 	.close = azx_pcm_close,
675 	.hw_params = azx_pcm_hw_params,
676 	.hw_free = azx_pcm_hw_free,
677 	.prepare = azx_pcm_prepare,
678 	.trigger = azx_pcm_trigger,
679 	.pointer = azx_pcm_pointer,
680 	.get_time_info =  azx_get_time_info,
681 };
682 
683 static void azx_pcm_free(struct snd_pcm *pcm)
684 {
685 	struct azx_pcm *apcm = pcm->private_data;
686 	if (apcm) {
687 		list_del(&apcm->list);
688 		apcm->info->pcm = NULL;
689 		kfree(apcm);
690 	}
691 }
692 
693 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
694 
695 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
696 			      struct hda_pcm *cpcm)
697 {
698 	struct hdac_bus *bus = &_bus->core;
699 	struct azx *chip = bus_to_azx(bus);
700 	struct snd_pcm *pcm;
701 	struct azx_pcm *apcm;
702 	int pcm_dev = cpcm->device;
703 	unsigned int size;
704 	int s, err;
705 	int type = SNDRV_DMA_TYPE_DEV_SG;
706 
707 	list_for_each_entry(apcm, &chip->pcm_list, list) {
708 		if (apcm->pcm->device == pcm_dev) {
709 			dev_err(chip->card->dev, "PCM %d already exists\n",
710 				pcm_dev);
711 			return -EBUSY;
712 		}
713 	}
714 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
715 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
716 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
717 			  &pcm);
718 	if (err < 0)
719 		return err;
720 	strscpy(pcm->name, cpcm->name, sizeof(pcm->name));
721 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
722 	if (apcm == NULL) {
723 		snd_device_free(chip->card, pcm);
724 		return -ENOMEM;
725 	}
726 	apcm->chip = chip;
727 	apcm->pcm = pcm;
728 	apcm->codec = codec;
729 	apcm->info = cpcm;
730 	pcm->private_data = apcm;
731 	pcm->private_free = azx_pcm_free;
732 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
733 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
734 	list_add_tail(&apcm->list, &chip->pcm_list);
735 	cpcm->pcm = pcm;
736 	for (s = 0; s < 2; s++) {
737 		if (cpcm->stream[s].substreams)
738 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
739 	}
740 	/* buffer pre-allocation */
741 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
742 	if (size > MAX_PREALLOC_SIZE)
743 		size = MAX_PREALLOC_SIZE;
744 	if (chip->uc_buffer)
745 		type = SNDRV_DMA_TYPE_DEV_WC_SG;
746 	snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
747 				       size, MAX_PREALLOC_SIZE);
748 	return 0;
749 }
750 
751 static unsigned int azx_command_addr(u32 cmd)
752 {
753 	unsigned int addr = cmd >> 28;
754 
755 	if (addr >= AZX_MAX_CODECS) {
756 		snd_BUG();
757 		addr = 0;
758 	}
759 
760 	return addr;
761 }
762 
763 /* receive a response */
764 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
765 				 unsigned int *res)
766 {
767 	struct azx *chip = bus_to_azx(bus);
768 	struct hda_bus *hbus = &chip->bus;
769 	int err;
770 
771  again:
772 	err = snd_hdac_bus_get_response(bus, addr, res);
773 	if (!err)
774 		return 0;
775 
776 	if (hbus->no_response_fallback)
777 		return -EIO;
778 
779 	if (!bus->polling_mode) {
780 		dev_warn(chip->card->dev,
781 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
782 			 bus->last_cmd[addr]);
783 		bus->polling_mode = 1;
784 		goto again;
785 	}
786 
787 	if (chip->msi) {
788 		dev_warn(chip->card->dev,
789 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
790 			 bus->last_cmd[addr]);
791 		if (chip->ops->disable_msi_reset_irq &&
792 		    chip->ops->disable_msi_reset_irq(chip) < 0)
793 			return -EIO;
794 		goto again;
795 	}
796 
797 	if (chip->probing) {
798 		/* If this critical timeout happens during the codec probing
799 		 * phase, this is likely an access to a non-existing codec
800 		 * slot.  Better to return an error and reset the system.
801 		 */
802 		return -EIO;
803 	}
804 
805 	/* no fallback mechanism? */
806 	if (!chip->fallback_to_single_cmd)
807 		return -EIO;
808 
809 	/* a fatal communication error; need either to reset or to fallback
810 	 * to the single_cmd mode
811 	 */
812 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
813 		hbus->response_reset = 1;
814 		dev_err(chip->card->dev,
815 			"No response from codec, resetting bus: last cmd=0x%08x\n",
816 			bus->last_cmd[addr]);
817 		return -EAGAIN; /* give a chance to retry */
818 	}
819 
820 	dev_err(chip->card->dev,
821 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
822 		bus->last_cmd[addr]);
823 	chip->single_cmd = 1;
824 	hbus->response_reset = 0;
825 	snd_hdac_bus_stop_cmd_io(bus);
826 	return -EIO;
827 }
828 
829 /*
830  * Use the single immediate command instead of CORB/RIRB for simplicity
831  *
832  * Note: according to Intel, this is not preferred use.  The command was
833  *       intended for the BIOS only, and may get confused with unsolicited
834  *       responses.  So, we shouldn't use it for normal operation from the
835  *       driver.
836  *       I left the codes, however, for debugging/testing purposes.
837  */
838 
839 /* receive a response */
840 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
841 {
842 	int timeout = 50;
843 
844 	while (timeout--) {
845 		/* check IRV busy bit */
846 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
847 			/* reuse rirb.res as the response return value */
848 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
849 			return 0;
850 		}
851 		udelay(1);
852 	}
853 	if (printk_ratelimit())
854 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
855 			azx_readw(chip, IRS));
856 	azx_bus(chip)->rirb.res[addr] = -1;
857 	return -EIO;
858 }
859 
860 /* send a command */
861 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
862 {
863 	struct azx *chip = bus_to_azx(bus);
864 	unsigned int addr = azx_command_addr(val);
865 	int timeout = 50;
866 
867 	bus->last_cmd[azx_command_addr(val)] = val;
868 	while (timeout--) {
869 		/* check ICB busy bit */
870 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
871 			/* Clear IRV valid bit */
872 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
873 				   AZX_IRS_VALID);
874 			azx_writel(chip, IC, val);
875 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
876 				   AZX_IRS_BUSY);
877 			return azx_single_wait_for_response(chip, addr);
878 		}
879 		udelay(1);
880 	}
881 	if (printk_ratelimit())
882 		dev_dbg(chip->card->dev,
883 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
884 			azx_readw(chip, IRS), val);
885 	return -EIO;
886 }
887 
888 /* receive a response */
889 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
890 				   unsigned int *res)
891 {
892 	if (res)
893 		*res = bus->rirb.res[addr];
894 	return 0;
895 }
896 
897 /*
898  * The below are the main callbacks from hda_codec.
899  *
900  * They are just the skeleton to call sub-callbacks according to the
901  * current setting of chip->single_cmd.
902  */
903 
904 /* send a command */
905 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
906 {
907 	struct azx *chip = bus_to_azx(bus);
908 
909 	if (chip->disabled)
910 		return 0;
911 	if (chip->single_cmd)
912 		return azx_single_send_cmd(bus, val);
913 	else
914 		return snd_hdac_bus_send_cmd(bus, val);
915 }
916 
917 /* get a response */
918 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
919 			    unsigned int *res)
920 {
921 	struct azx *chip = bus_to_azx(bus);
922 
923 	if (chip->disabled)
924 		return 0;
925 	if (chip->single_cmd)
926 		return azx_single_get_response(bus, addr, res);
927 	else
928 		return azx_rirb_get_response(bus, addr, res);
929 }
930 
931 static const struct hdac_bus_ops bus_core_ops = {
932 	.command = azx_send_cmd,
933 	.get_response = azx_get_response,
934 };
935 
936 #ifdef CONFIG_SND_HDA_DSP_LOADER
937 /*
938  * DSP loading code (e.g. for CA0132)
939  */
940 
941 /* use the first stream for loading DSP */
942 static struct azx_dev *
943 azx_get_dsp_loader_dev(struct azx *chip)
944 {
945 	struct hdac_bus *bus = azx_bus(chip);
946 	struct hdac_stream *s;
947 
948 	list_for_each_entry(s, &bus->stream_list, list)
949 		if (s->index == chip->playback_index_offset)
950 			return stream_to_azx_dev(s);
951 
952 	return NULL;
953 }
954 
955 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
956 				   unsigned int byte_size,
957 				   struct snd_dma_buffer *bufp)
958 {
959 	struct hdac_bus *bus = &codec->bus->core;
960 	struct azx *chip = bus_to_azx(bus);
961 	struct azx_dev *azx_dev;
962 	struct hdac_stream *hstr;
963 	bool saved = false;
964 	int err;
965 
966 	azx_dev = azx_get_dsp_loader_dev(chip);
967 	hstr = azx_stream(azx_dev);
968 	spin_lock_irq(&bus->reg_lock);
969 	if (hstr->opened) {
970 		chip->saved_azx_dev = *azx_dev;
971 		saved = true;
972 	}
973 	spin_unlock_irq(&bus->reg_lock);
974 
975 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
976 	if (err < 0) {
977 		spin_lock_irq(&bus->reg_lock);
978 		if (saved)
979 			*azx_dev = chip->saved_azx_dev;
980 		spin_unlock_irq(&bus->reg_lock);
981 		return err;
982 	}
983 
984 	hstr->prepared = 0;
985 	return err;
986 }
987 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
988 
989 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
990 {
991 	struct hdac_bus *bus = &codec->bus->core;
992 	struct azx *chip = bus_to_azx(bus);
993 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
994 
995 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
996 }
997 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
998 
999 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1000 				    struct snd_dma_buffer *dmab)
1001 {
1002 	struct hdac_bus *bus = &codec->bus->core;
1003 	struct azx *chip = bus_to_azx(bus);
1004 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1005 	struct hdac_stream *hstr = azx_stream(azx_dev);
1006 
1007 	if (!dmab->area || !hstr->locked)
1008 		return;
1009 
1010 	snd_hdac_dsp_cleanup(hstr, dmab);
1011 	spin_lock_irq(&bus->reg_lock);
1012 	if (hstr->opened)
1013 		*azx_dev = chip->saved_azx_dev;
1014 	hstr->locked = false;
1015 	spin_unlock_irq(&bus->reg_lock);
1016 }
1017 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1018 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1019 
1020 /*
1021  * reset and start the controller registers
1022  */
1023 void azx_init_chip(struct azx *chip, bool full_reset)
1024 {
1025 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1026 		/* correct RINTCNT for CXT */
1027 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1028 			azx_writew(chip, RINTCNT, 0xc0);
1029 	}
1030 }
1031 EXPORT_SYMBOL_GPL(azx_init_chip);
1032 
1033 void azx_stop_all_streams(struct azx *chip)
1034 {
1035 	struct hdac_bus *bus = azx_bus(chip);
1036 	struct hdac_stream *s;
1037 
1038 	list_for_each_entry(s, &bus->stream_list, list)
1039 		snd_hdac_stream_stop(s);
1040 }
1041 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1042 
1043 void azx_stop_chip(struct azx *chip)
1044 {
1045 	snd_hdac_bus_stop_chip(azx_bus(chip));
1046 }
1047 EXPORT_SYMBOL_GPL(azx_stop_chip);
1048 
1049 /*
1050  * interrupt handler
1051  */
1052 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1053 {
1054 	struct azx *chip = bus_to_azx(bus);
1055 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1056 
1057 	/* check whether this IRQ is really acceptable */
1058 	if (!chip->ops->position_check ||
1059 	    chip->ops->position_check(chip, azx_dev)) {
1060 		spin_unlock(&bus->reg_lock);
1061 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1062 		spin_lock(&bus->reg_lock);
1063 	}
1064 }
1065 
1066 irqreturn_t azx_interrupt(int irq, void *dev_id)
1067 {
1068 	struct azx *chip = dev_id;
1069 	struct hdac_bus *bus = azx_bus(chip);
1070 	u32 status;
1071 	bool active, handled = false;
1072 	int repeat = 0; /* count for avoiding endless loop */
1073 
1074 #ifdef CONFIG_PM
1075 	if (azx_has_pm_runtime(chip))
1076 		if (!pm_runtime_active(chip->card->dev))
1077 			return IRQ_NONE;
1078 #endif
1079 
1080 	spin_lock(&bus->reg_lock);
1081 
1082 	if (chip->disabled)
1083 		goto unlock;
1084 
1085 	do {
1086 		status = azx_readl(chip, INTSTS);
1087 		if (status == 0 || status == 0xffffffff)
1088 			break;
1089 
1090 		handled = true;
1091 		active = false;
1092 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1093 			active = true;
1094 
1095 		status = azx_readb(chip, RIRBSTS);
1096 		if (status & RIRB_INT_MASK) {
1097 			/*
1098 			 * Clearing the interrupt status here ensures that no
1099 			 * interrupt gets masked after the RIRB wp is read in
1100 			 * snd_hdac_bus_update_rirb. This avoids a possible
1101 			 * race condition where codec response in RIRB may
1102 			 * remain unserviced by IRQ, eventually falling back
1103 			 * to polling mode in azx_rirb_get_response.
1104 			 */
1105 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1106 			active = true;
1107 			if (status & RIRB_INT_RESPONSE) {
1108 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1109 					udelay(80);
1110 				snd_hdac_bus_update_rirb(bus);
1111 			}
1112 		}
1113 	} while (active && ++repeat < 10);
1114 
1115  unlock:
1116 	spin_unlock(&bus->reg_lock);
1117 
1118 	return IRQ_RETVAL(handled);
1119 }
1120 EXPORT_SYMBOL_GPL(azx_interrupt);
1121 
1122 /*
1123  * Codec initerface
1124  */
1125 
1126 /*
1127  * Probe the given codec address
1128  */
1129 static int probe_codec(struct azx *chip, int addr)
1130 {
1131 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1132 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1133 	struct hdac_bus *bus = azx_bus(chip);
1134 	int err;
1135 	unsigned int res = -1;
1136 
1137 	mutex_lock(&bus->cmd_mutex);
1138 	chip->probing = 1;
1139 	azx_send_cmd(bus, cmd);
1140 	err = azx_get_response(bus, addr, &res);
1141 	chip->probing = 0;
1142 	mutex_unlock(&bus->cmd_mutex);
1143 	if (err < 0 || res == -1)
1144 		return -EIO;
1145 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1146 	return 0;
1147 }
1148 
1149 void snd_hda_bus_reset(struct hda_bus *bus)
1150 {
1151 	struct azx *chip = bus_to_azx(&bus->core);
1152 
1153 	bus->in_reset = 1;
1154 	azx_stop_chip(chip);
1155 	azx_init_chip(chip, true);
1156 	if (bus->core.chip_init)
1157 		snd_hda_bus_reset_codecs(bus);
1158 	bus->in_reset = 0;
1159 }
1160 
1161 /* HD-audio bus initialization */
1162 int azx_bus_init(struct azx *chip, const char *model)
1163 {
1164 	struct hda_bus *bus = &chip->bus;
1165 	int err;
1166 
1167 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1168 	if (err < 0)
1169 		return err;
1170 
1171 	bus->card = chip->card;
1172 	mutex_init(&bus->prepare_mutex);
1173 	bus->pci = chip->pci;
1174 	bus->modelname = model;
1175 	bus->mixer_assigned = -1;
1176 	bus->core.snoop = azx_snoop(chip);
1177 	if (chip->get_position[0] != azx_get_pos_lpib ||
1178 	    chip->get_position[1] != azx_get_pos_lpib)
1179 		bus->core.use_posbuf = true;
1180 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1181 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1182 		bus->core.corbrp_self_clear = true;
1183 
1184 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1185 		bus->core.align_bdle_4k = true;
1186 
1187 	/* enable sync_write flag for stable communication as default */
1188 	bus->core.sync_write = 1;
1189 
1190 	return 0;
1191 }
1192 EXPORT_SYMBOL_GPL(azx_bus_init);
1193 
1194 /* Probe codecs */
1195 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1196 {
1197 	struct hdac_bus *bus = azx_bus(chip);
1198 	int c, codecs, err;
1199 
1200 	codecs = 0;
1201 	if (!max_slots)
1202 		max_slots = AZX_DEFAULT_CODECS;
1203 
1204 	/* First try to probe all given codec slots */
1205 	for (c = 0; c < max_slots; c++) {
1206 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1207 			if (probe_codec(chip, c) < 0) {
1208 				/* Some BIOSen give you wrong codec addresses
1209 				 * that don't exist
1210 				 */
1211 				dev_warn(chip->card->dev,
1212 					 "Codec #%d probe error; disabling it...\n", c);
1213 				bus->codec_mask &= ~(1 << c);
1214 				/* More badly, accessing to a non-existing
1215 				 * codec often screws up the controller chip,
1216 				 * and disturbs the further communications.
1217 				 * Thus if an error occurs during probing,
1218 				 * better to reset the controller chip to
1219 				 * get back to the sanity state.
1220 				 */
1221 				azx_stop_chip(chip);
1222 				azx_init_chip(chip, true);
1223 			}
1224 		}
1225 	}
1226 
1227 	/* Then create codec instances */
1228 	for (c = 0; c < max_slots; c++) {
1229 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1230 			struct hda_codec *codec;
1231 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1232 			if (err < 0)
1233 				continue;
1234 			codec->jackpoll_interval = chip->jackpoll_interval;
1235 			codec->beep_mode = chip->beep_mode;
1236 			codecs++;
1237 		}
1238 	}
1239 	if (!codecs) {
1240 		dev_err(chip->card->dev, "no codecs initialized\n");
1241 		return -ENXIO;
1242 	}
1243 	return 0;
1244 }
1245 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1246 
1247 /* configure each codec instance */
1248 int azx_codec_configure(struct azx *chip)
1249 {
1250 	struct hda_codec *codec, *next;
1251 	int success = 0;
1252 
1253 	list_for_each_codec(codec, &chip->bus) {
1254 		if (!snd_hda_codec_configure(codec))
1255 			success++;
1256 	}
1257 
1258 	if (success) {
1259 		/* unregister failed codecs if any codec has been probed */
1260 		list_for_each_codec_safe(codec, next, &chip->bus) {
1261 			if (!codec->configured) {
1262 				codec_err(codec, "Unable to configure, disabling\n");
1263 				snd_hdac_device_unregister(&codec->core);
1264 			}
1265 		}
1266 	}
1267 
1268 	return success ? 0 : -ENODEV;
1269 }
1270 EXPORT_SYMBOL_GPL(azx_codec_configure);
1271 
1272 static int stream_direction(struct azx *chip, unsigned char index)
1273 {
1274 	if (index >= chip->capture_index_offset &&
1275 	    index < chip->capture_index_offset + chip->capture_streams)
1276 		return SNDRV_PCM_STREAM_CAPTURE;
1277 	return SNDRV_PCM_STREAM_PLAYBACK;
1278 }
1279 
1280 /* initialize SD streams */
1281 int azx_init_streams(struct azx *chip)
1282 {
1283 	int i;
1284 	int stream_tags[2] = { 0, 0 };
1285 
1286 	/* initialize each stream (aka device)
1287 	 * assign the starting bdl address to each stream (device)
1288 	 * and initialize
1289 	 */
1290 	for (i = 0; i < chip->num_streams; i++) {
1291 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1292 		int dir, tag;
1293 
1294 		if (!azx_dev)
1295 			return -ENOMEM;
1296 
1297 		dir = stream_direction(chip, i);
1298 		/* stream tag must be unique throughout
1299 		 * the stream direction group,
1300 		 * valid values 1...15
1301 		 * use separate stream tag if the flag
1302 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1303 		 */
1304 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1305 			tag = ++stream_tags[dir];
1306 		else
1307 			tag = i + 1;
1308 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1309 				     i, dir, tag);
1310 	}
1311 
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL_GPL(azx_init_streams);
1315 
1316 void azx_free_streams(struct azx *chip)
1317 {
1318 	struct hdac_bus *bus = azx_bus(chip);
1319 	struct hdac_stream *s;
1320 
1321 	while (!list_empty(&bus->stream_list)) {
1322 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1323 		list_del(&s->list);
1324 		kfree(stream_to_azx_dev(s));
1325 	}
1326 }
1327 EXPORT_SYMBOL_GPL(azx_free_streams);
1328