xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 67559900)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 #include "hda_local.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "hda_controller_trace.h"
32 
33 /* DSP lock helpers */
34 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
35 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
36 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
37 
38 /* assign a stream for the PCM */
39 static inline struct azx_dev *
40 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
41 {
42 	struct hdac_stream *s;
43 
44 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
45 	if (!s)
46 		return NULL;
47 	return stream_to_azx_dev(s);
48 }
49 
50 /* release the assigned stream */
51 static inline void azx_release_device(struct azx_dev *azx_dev)
52 {
53 	snd_hdac_stream_release(azx_stream(azx_dev));
54 }
55 
56 static inline struct hda_pcm_stream *
57 to_hda_pcm_stream(struct snd_pcm_substream *substream)
58 {
59 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
60 	return &apcm->info->stream[substream->stream];
61 }
62 
63 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
64 				u64 nsec)
65 {
66 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
67 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
68 	u64 codec_frames, codec_nsecs;
69 
70 	if (!hinfo->ops.get_delay)
71 		return nsec;
72 
73 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
74 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
75 			      substream->runtime->rate);
76 
77 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
78 		return nsec + codec_nsecs;
79 
80 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
81 }
82 
83 /*
84  * PCM ops
85  */
86 
87 static int azx_pcm_close(struct snd_pcm_substream *substream)
88 {
89 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
90 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
91 	struct azx *chip = apcm->chip;
92 	struct azx_dev *azx_dev = get_azx_dev(substream);
93 
94 	trace_azx_pcm_close(chip, azx_dev);
95 	mutex_lock(&chip->open_mutex);
96 	azx_release_device(azx_dev);
97 	if (hinfo->ops.close)
98 		hinfo->ops.close(hinfo, apcm->codec, substream);
99 	snd_hda_power_down(apcm->codec);
100 	mutex_unlock(&chip->open_mutex);
101 	snd_hda_codec_pcm_put(apcm->info);
102 	return 0;
103 }
104 
105 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
106 			     struct snd_pcm_hw_params *hw_params)
107 {
108 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
109 	struct azx *chip = apcm->chip;
110 	struct azx_dev *azx_dev = get_azx_dev(substream);
111 	int ret = 0;
112 
113 	trace_azx_pcm_hw_params(chip, azx_dev);
114 	dsp_lock(azx_dev);
115 	if (dsp_is_locked(azx_dev)) {
116 		ret = -EBUSY;
117 		goto unlock;
118 	}
119 
120 	azx_dev->core.bufsize = 0;
121 	azx_dev->core.period_bytes = 0;
122 	azx_dev->core.format_val = 0;
123 
124 unlock:
125 	dsp_unlock(azx_dev);
126 	return ret;
127 }
128 
129 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
130 {
131 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
132 	struct azx_dev *azx_dev = get_azx_dev(substream);
133 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
134 
135 	/* reset BDL address */
136 	dsp_lock(azx_dev);
137 	if (!dsp_is_locked(azx_dev))
138 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
139 
140 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
141 
142 	azx_stream(azx_dev)->prepared = 0;
143 	dsp_unlock(azx_dev);
144 	return 0;
145 }
146 
147 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
148 {
149 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
150 	struct azx *chip = apcm->chip;
151 	struct azx_dev *azx_dev = get_azx_dev(substream);
152 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
153 	struct snd_pcm_runtime *runtime = substream->runtime;
154 	unsigned int format_val, stream_tag;
155 	int err;
156 	struct hda_spdif_out *spdif =
157 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
158 	unsigned short ctls = spdif ? spdif->ctls : 0;
159 
160 	trace_azx_pcm_prepare(chip, azx_dev);
161 	dsp_lock(azx_dev);
162 	if (dsp_is_locked(azx_dev)) {
163 		err = -EBUSY;
164 		goto unlock;
165 	}
166 
167 	snd_hdac_stream_reset(azx_stream(azx_dev));
168 	format_val = snd_hdac_calc_stream_format(runtime->rate,
169 						runtime->channels,
170 						runtime->format,
171 						hinfo->maxbps,
172 						ctls);
173 	if (!format_val) {
174 		dev_err(chip->card->dev,
175 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
176 			runtime->rate, runtime->channels, runtime->format);
177 		err = -EINVAL;
178 		goto unlock;
179 	}
180 
181 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
182 	if (err < 0)
183 		goto unlock;
184 
185 	snd_hdac_stream_setup(azx_stream(azx_dev));
186 
187 	stream_tag = azx_dev->core.stream_tag;
188 	/* CA-IBG chips need the playback stream starting from 1 */
189 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
190 	    stream_tag > chip->capture_streams)
191 		stream_tag -= chip->capture_streams;
192 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
193 				     azx_dev->core.format_val, substream);
194 
195  unlock:
196 	if (!err)
197 		azx_stream(azx_dev)->prepared = 1;
198 	dsp_unlock(azx_dev);
199 	return err;
200 }
201 
202 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
203 {
204 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
205 	struct azx *chip = apcm->chip;
206 	struct hdac_bus *bus = azx_bus(chip);
207 	struct azx_dev *azx_dev;
208 	struct snd_pcm_substream *s;
209 	struct hdac_stream *hstr;
210 	bool start;
211 	int sbits = 0;
212 	int sync_reg;
213 
214 	azx_dev = get_azx_dev(substream);
215 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
216 
217 	hstr = azx_stream(azx_dev);
218 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
219 		sync_reg = AZX_REG_OLD_SSYNC;
220 	else
221 		sync_reg = AZX_REG_SSYNC;
222 
223 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
224 		return -EPIPE;
225 
226 	switch (cmd) {
227 	case SNDRV_PCM_TRIGGER_START:
228 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
229 	case SNDRV_PCM_TRIGGER_RESUME:
230 		start = true;
231 		break;
232 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
233 	case SNDRV_PCM_TRIGGER_SUSPEND:
234 	case SNDRV_PCM_TRIGGER_STOP:
235 		start = false;
236 		break;
237 	default:
238 		return -EINVAL;
239 	}
240 
241 	snd_pcm_group_for_each_entry(s, substream) {
242 		if (s->pcm->card != substream->pcm->card)
243 			continue;
244 		azx_dev = get_azx_dev(s);
245 		sbits |= 1 << azx_dev->core.index;
246 		snd_pcm_trigger_done(s, substream);
247 	}
248 
249 	spin_lock(&bus->reg_lock);
250 
251 	/* first, set SYNC bits of corresponding streams */
252 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
253 
254 	snd_pcm_group_for_each_entry(s, substream) {
255 		if (s->pcm->card != substream->pcm->card)
256 			continue;
257 		azx_dev = get_azx_dev(s);
258 		if (start) {
259 			azx_dev->insufficient = 1;
260 			snd_hdac_stream_start(azx_stream(azx_dev), true);
261 		} else {
262 			snd_hdac_stream_stop(azx_stream(azx_dev));
263 		}
264 	}
265 	spin_unlock(&bus->reg_lock);
266 
267 	snd_hdac_stream_sync(hstr, start, sbits);
268 
269 	spin_lock(&bus->reg_lock);
270 	/* reset SYNC bits */
271 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
272 	if (start)
273 		snd_hdac_stream_timecounter_init(hstr, sbits);
274 	spin_unlock(&bus->reg_lock);
275 	return 0;
276 }
277 
278 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
279 {
280 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
281 }
282 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
283 
284 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
285 {
286 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
287 }
288 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
289 
290 unsigned int azx_get_position(struct azx *chip,
291 			      struct azx_dev *azx_dev)
292 {
293 	struct snd_pcm_substream *substream = azx_dev->core.substream;
294 	unsigned int pos;
295 	int stream = substream->stream;
296 	int delay = 0;
297 
298 	if (chip->get_position[stream])
299 		pos = chip->get_position[stream](chip, azx_dev);
300 	else /* use the position buffer as default */
301 		pos = azx_get_pos_posbuf(chip, azx_dev);
302 
303 	if (pos >= azx_dev->core.bufsize)
304 		pos = 0;
305 
306 	if (substream->runtime) {
307 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
308 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
309 
310 		if (chip->get_delay[stream])
311 			delay += chip->get_delay[stream](chip, azx_dev, pos);
312 		if (hinfo->ops.get_delay)
313 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
314 						      substream);
315 		substream->runtime->delay = delay;
316 	}
317 
318 	trace_azx_get_position(chip, azx_dev, pos, delay);
319 	return pos;
320 }
321 EXPORT_SYMBOL_GPL(azx_get_position);
322 
323 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
324 {
325 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
326 	struct azx *chip = apcm->chip;
327 	struct azx_dev *azx_dev = get_azx_dev(substream);
328 	return bytes_to_frames(substream->runtime,
329 			       azx_get_position(chip, azx_dev));
330 }
331 
332 /*
333  * azx_scale64: Scale base by mult/div while not overflowing sanely
334  *
335  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
336  *
337  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
338  * is about 384307 ie ~4.5 days.
339  *
340  * This scales the calculation so that overflow will happen but after 2^64 /
341  * 48000 secs, which is pretty large!
342  *
343  * In caln below:
344  *	base may overflow, but since there isn’t any additional division
345  *	performed on base it’s OK
346  *	rem can’t overflow because both are 32-bit values
347  */
348 
349 #ifdef CONFIG_X86
350 static u64 azx_scale64(u64 base, u32 num, u32 den)
351 {
352 	u64 rem;
353 
354 	rem = do_div(base, den);
355 
356 	base *= num;
357 	rem *= num;
358 
359 	do_div(rem, den);
360 
361 	return base + rem;
362 }
363 
364 static int azx_get_sync_time(ktime_t *device,
365 		struct system_counterval_t *system, void *ctx)
366 {
367 	struct snd_pcm_substream *substream = ctx;
368 	struct azx_dev *azx_dev = get_azx_dev(substream);
369 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
370 	struct azx *chip = apcm->chip;
371 	struct snd_pcm_runtime *runtime;
372 	u64 ll_counter, ll_counter_l, ll_counter_h;
373 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
374 	u32 wallclk_ctr, wallclk_cycles;
375 	bool direction;
376 	u32 dma_select;
377 	u32 timeout;
378 	u32 retry_count = 0;
379 
380 	runtime = substream->runtime;
381 
382 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
383 		direction = 1;
384 	else
385 		direction = 0;
386 
387 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
388 	do {
389 		timeout = 100;
390 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
391 					(azx_dev->core.stream_tag - 1);
392 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
393 
394 		/* Enable the capture */
395 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
396 
397 		while (timeout) {
398 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
399 						GTSCC_TSCCD_MASK)
400 				break;
401 
402 			timeout--;
403 		}
404 
405 		if (!timeout) {
406 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
407 			return -EIO;
408 		}
409 
410 		/* Read wall clock counter */
411 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
412 
413 		/* Read TSC counter */
414 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
415 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
416 
417 		/* Read Link counter */
418 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
419 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
420 
421 		/* Ack: registers read done */
422 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
423 
424 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
425 						tsc_counter_l;
426 
427 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
428 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
429 
430 		/*
431 		 * An error occurs near frame "rollover". The clocks in
432 		 * frame value indicates whether this error may have
433 		 * occurred. Here we use the value of 10 i.e.,
434 		 * HDA_MAX_CYCLE_OFFSET
435 		 */
436 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
437 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
438 			break;
439 
440 		/*
441 		 * Sleep before we read again, else we may again get
442 		 * value near to MAX_CYCLE. Try to sleep for different
443 		 * amount of time so we dont hit the same number again
444 		 */
445 		udelay(retry_count++);
446 
447 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
448 
449 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
450 		dev_err_ratelimited(chip->card->dev,
451 			"Error in WALFCC cycle count\n");
452 		return -EIO;
453 	}
454 
455 	*device = ns_to_ktime(azx_scale64(ll_counter,
456 				NSEC_PER_SEC, runtime->rate));
457 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
458 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
459 
460 	*system = convert_art_to_tsc(tsc_counter);
461 
462 	return 0;
463 }
464 
465 #else
466 static int azx_get_sync_time(ktime_t *device,
467 		struct system_counterval_t *system, void *ctx)
468 {
469 	return -ENXIO;
470 }
471 #endif
472 
473 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
474 			      struct system_device_crosststamp *xtstamp)
475 {
476 	return get_device_system_crosststamp(azx_get_sync_time,
477 					substream, NULL, xtstamp);
478 }
479 
480 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
481 				struct snd_pcm_audio_tstamp_config *ts)
482 {
483 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
484 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
485 			return true;
486 
487 	return false;
488 }
489 
490 static int azx_get_time_info(struct snd_pcm_substream *substream,
491 			struct timespec64 *system_ts, struct timespec64 *audio_ts,
492 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
493 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
494 {
495 	struct azx_dev *azx_dev = get_azx_dev(substream);
496 	struct snd_pcm_runtime *runtime = substream->runtime;
497 	struct system_device_crosststamp xtstamp;
498 	int ret;
499 	u64 nsec;
500 
501 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
502 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
503 
504 		snd_pcm_gettime(substream->runtime, system_ts);
505 
506 		nsec = timecounter_read(&azx_dev->core.tc);
507 		nsec = div_u64(nsec, 3); /* can be optimized */
508 		if (audio_tstamp_config->report_delay)
509 			nsec = azx_adjust_codec_delay(substream, nsec);
510 
511 		*audio_ts = ns_to_timespec64(nsec);
512 
513 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
514 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
515 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
516 
517 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
518 
519 		ret = azx_get_crosststamp(substream, &xtstamp);
520 		if (ret)
521 			return ret;
522 
523 		switch (runtime->tstamp_type) {
524 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
525 			return -EINVAL;
526 
527 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
528 			*system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
529 			break;
530 
531 		default:
532 			*system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
533 			break;
534 
535 		}
536 
537 		*audio_ts = ktime_to_timespec64(xtstamp.device);
538 
539 		audio_tstamp_report->actual_type =
540 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
541 		audio_tstamp_report->accuracy_report = 1;
542 		/* 24 MHz WallClock == 42ns resolution */
543 		audio_tstamp_report->accuracy = 42;
544 
545 	} else {
546 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
547 	}
548 
549 	return 0;
550 }
551 
552 static const struct snd_pcm_hardware azx_pcm_hw = {
553 	.info =			(SNDRV_PCM_INFO_MMAP |
554 				 SNDRV_PCM_INFO_INTERLEAVED |
555 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
556 				 SNDRV_PCM_INFO_MMAP_VALID |
557 				 /* No full-resume yet implemented */
558 				 /* SNDRV_PCM_INFO_RESUME |*/
559 				 SNDRV_PCM_INFO_PAUSE |
560 				 SNDRV_PCM_INFO_SYNC_START |
561 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
562 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
563 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
564 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
565 	.rates =		SNDRV_PCM_RATE_48000,
566 	.rate_min =		48000,
567 	.rate_max =		48000,
568 	.channels_min =		2,
569 	.channels_max =		2,
570 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
571 	.period_bytes_min =	128,
572 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
573 	.periods_min =		2,
574 	.periods_max =		AZX_MAX_FRAG,
575 	.fifo_size =		0,
576 };
577 
578 static int azx_pcm_open(struct snd_pcm_substream *substream)
579 {
580 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
581 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
582 	struct azx *chip = apcm->chip;
583 	struct azx_dev *azx_dev;
584 	struct snd_pcm_runtime *runtime = substream->runtime;
585 	int err;
586 	int buff_step;
587 
588 	snd_hda_codec_pcm_get(apcm->info);
589 	mutex_lock(&chip->open_mutex);
590 	azx_dev = azx_assign_device(chip, substream);
591 	trace_azx_pcm_open(chip, azx_dev);
592 	if (azx_dev == NULL) {
593 		err = -EBUSY;
594 		goto unlock;
595 	}
596 	runtime->private_data = azx_dev;
597 
598 	runtime->hw = azx_pcm_hw;
599 	if (chip->gts_present)
600 		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
601 	runtime->hw.channels_min = hinfo->channels_min;
602 	runtime->hw.channels_max = hinfo->channels_max;
603 	runtime->hw.formats = hinfo->formats;
604 	runtime->hw.rates = hinfo->rates;
605 	snd_pcm_limit_hw_rates(runtime);
606 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
607 
608 	/* avoid wrap-around with wall-clock */
609 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
610 				     20,
611 				     178000000);
612 
613 	if (chip->align_buffer_size)
614 		/* constrain buffer sizes to be multiple of 128
615 		   bytes. This is more efficient in terms of memory
616 		   access but isn't required by the HDA spec and
617 		   prevents users from specifying exact period/buffer
618 		   sizes. For example for 44.1kHz, a period size set
619 		   to 20ms will be rounded to 19.59ms. */
620 		buff_step = 128;
621 	else
622 		/* Don't enforce steps on buffer sizes, still need to
623 		   be multiple of 4 bytes (HDA spec). Tested on Intel
624 		   HDA controllers, may not work on all devices where
625 		   option needs to be disabled */
626 		buff_step = 4;
627 
628 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
629 				   buff_step);
630 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
631 				   buff_step);
632 	snd_hda_power_up(apcm->codec);
633 	if (hinfo->ops.open)
634 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
635 	else
636 		err = -ENODEV;
637 	if (err < 0) {
638 		azx_release_device(azx_dev);
639 		goto powerdown;
640 	}
641 	snd_pcm_limit_hw_rates(runtime);
642 	/* sanity check */
643 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
644 	    snd_BUG_ON(!runtime->hw.channels_max) ||
645 	    snd_BUG_ON(!runtime->hw.formats) ||
646 	    snd_BUG_ON(!runtime->hw.rates)) {
647 		azx_release_device(azx_dev);
648 		if (hinfo->ops.close)
649 			hinfo->ops.close(hinfo, apcm->codec, substream);
650 		err = -EINVAL;
651 		goto powerdown;
652 	}
653 
654 	/* disable LINK_ATIME timestamps for capture streams
655 	   until we figure out how to handle digital inputs */
656 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
657 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
658 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
659 	}
660 
661 	snd_pcm_set_sync(substream);
662 	mutex_unlock(&chip->open_mutex);
663 	return 0;
664 
665  powerdown:
666 	snd_hda_power_down(apcm->codec);
667  unlock:
668 	mutex_unlock(&chip->open_mutex);
669 	snd_hda_codec_pcm_put(apcm->info);
670 	return err;
671 }
672 
673 static const struct snd_pcm_ops azx_pcm_ops = {
674 	.open = azx_pcm_open,
675 	.close = azx_pcm_close,
676 	.hw_params = azx_pcm_hw_params,
677 	.hw_free = azx_pcm_hw_free,
678 	.prepare = azx_pcm_prepare,
679 	.trigger = azx_pcm_trigger,
680 	.pointer = azx_pcm_pointer,
681 	.get_time_info =  azx_get_time_info,
682 };
683 
684 static void azx_pcm_free(struct snd_pcm *pcm)
685 {
686 	struct azx_pcm *apcm = pcm->private_data;
687 	if (apcm) {
688 		list_del(&apcm->list);
689 		apcm->info->pcm = NULL;
690 		kfree(apcm);
691 	}
692 }
693 
694 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
695 
696 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
697 			      struct hda_pcm *cpcm)
698 {
699 	struct hdac_bus *bus = &_bus->core;
700 	struct azx *chip = bus_to_azx(bus);
701 	struct snd_pcm *pcm;
702 	struct azx_pcm *apcm;
703 	int pcm_dev = cpcm->device;
704 	unsigned int size;
705 	int s, err;
706 	int type = SNDRV_DMA_TYPE_DEV_SG;
707 
708 	list_for_each_entry(apcm, &chip->pcm_list, list) {
709 		if (apcm->pcm->device == pcm_dev) {
710 			dev_err(chip->card->dev, "PCM %d already exists\n",
711 				pcm_dev);
712 			return -EBUSY;
713 		}
714 	}
715 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
716 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
717 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
718 			  &pcm);
719 	if (err < 0)
720 		return err;
721 	strscpy(pcm->name, cpcm->name, sizeof(pcm->name));
722 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
723 	if (apcm == NULL) {
724 		snd_device_free(chip->card, pcm);
725 		return -ENOMEM;
726 	}
727 	apcm->chip = chip;
728 	apcm->pcm = pcm;
729 	apcm->codec = codec;
730 	apcm->info = cpcm;
731 	pcm->private_data = apcm;
732 	pcm->private_free = azx_pcm_free;
733 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
734 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
735 	list_add_tail(&apcm->list, &chip->pcm_list);
736 	cpcm->pcm = pcm;
737 	for (s = 0; s < 2; s++) {
738 		if (cpcm->stream[s].substreams)
739 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
740 	}
741 	/* buffer pre-allocation */
742 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
743 	if (size > MAX_PREALLOC_SIZE)
744 		size = MAX_PREALLOC_SIZE;
745 	if (chip->uc_buffer)
746 		type = SNDRV_DMA_TYPE_DEV_WC_SG;
747 	snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
748 				       size, MAX_PREALLOC_SIZE);
749 	return 0;
750 }
751 
752 static unsigned int azx_command_addr(u32 cmd)
753 {
754 	unsigned int addr = cmd >> 28;
755 
756 	if (addr >= AZX_MAX_CODECS) {
757 		snd_BUG();
758 		addr = 0;
759 	}
760 
761 	return addr;
762 }
763 
764 /* receive a response */
765 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
766 				 unsigned int *res)
767 {
768 	struct azx *chip = bus_to_azx(bus);
769 	struct hda_bus *hbus = &chip->bus;
770 	int err;
771 
772  again:
773 	err = snd_hdac_bus_get_response(bus, addr, res);
774 	if (!err)
775 		return 0;
776 
777 	if (hbus->no_response_fallback)
778 		return -EIO;
779 
780 	if (!bus->polling_mode) {
781 		dev_warn(chip->card->dev,
782 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
783 			 bus->last_cmd[addr]);
784 		bus->polling_mode = 1;
785 		goto again;
786 	}
787 
788 	if (chip->msi) {
789 		dev_warn(chip->card->dev,
790 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
791 			 bus->last_cmd[addr]);
792 		if (chip->ops->disable_msi_reset_irq &&
793 		    chip->ops->disable_msi_reset_irq(chip) < 0)
794 			return -EIO;
795 		goto again;
796 	}
797 
798 	if (chip->probing) {
799 		/* If this critical timeout happens during the codec probing
800 		 * phase, this is likely an access to a non-existing codec
801 		 * slot.  Better to return an error and reset the system.
802 		 */
803 		return -EIO;
804 	}
805 
806 	/* no fallback mechanism? */
807 	if (!chip->fallback_to_single_cmd)
808 		return -EIO;
809 
810 	/* a fatal communication error; need either to reset or to fallback
811 	 * to the single_cmd mode
812 	 */
813 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
814 		hbus->response_reset = 1;
815 		dev_err(chip->card->dev,
816 			"No response from codec, resetting bus: last cmd=0x%08x\n",
817 			bus->last_cmd[addr]);
818 		return -EAGAIN; /* give a chance to retry */
819 	}
820 
821 	dev_err(chip->card->dev,
822 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
823 		bus->last_cmd[addr]);
824 	chip->single_cmd = 1;
825 	hbus->response_reset = 0;
826 	snd_hdac_bus_stop_cmd_io(bus);
827 	return -EIO;
828 }
829 
830 /*
831  * Use the single immediate command instead of CORB/RIRB for simplicity
832  *
833  * Note: according to Intel, this is not preferred use.  The command was
834  *       intended for the BIOS only, and may get confused with unsolicited
835  *       responses.  So, we shouldn't use it for normal operation from the
836  *       driver.
837  *       I left the codes, however, for debugging/testing purposes.
838  */
839 
840 /* receive a response */
841 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
842 {
843 	int timeout = 50;
844 
845 	while (timeout--) {
846 		/* check IRV busy bit */
847 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
848 			/* reuse rirb.res as the response return value */
849 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
850 			return 0;
851 		}
852 		udelay(1);
853 	}
854 	if (printk_ratelimit())
855 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
856 			azx_readw(chip, IRS));
857 	azx_bus(chip)->rirb.res[addr] = -1;
858 	return -EIO;
859 }
860 
861 /* send a command */
862 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
863 {
864 	struct azx *chip = bus_to_azx(bus);
865 	unsigned int addr = azx_command_addr(val);
866 	int timeout = 50;
867 
868 	bus->last_cmd[azx_command_addr(val)] = val;
869 	while (timeout--) {
870 		/* check ICB busy bit */
871 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
872 			/* Clear IRV valid bit */
873 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
874 				   AZX_IRS_VALID);
875 			azx_writel(chip, IC, val);
876 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
877 				   AZX_IRS_BUSY);
878 			return azx_single_wait_for_response(chip, addr);
879 		}
880 		udelay(1);
881 	}
882 	if (printk_ratelimit())
883 		dev_dbg(chip->card->dev,
884 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
885 			azx_readw(chip, IRS), val);
886 	return -EIO;
887 }
888 
889 /* receive a response */
890 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
891 				   unsigned int *res)
892 {
893 	if (res)
894 		*res = bus->rirb.res[addr];
895 	return 0;
896 }
897 
898 /*
899  * The below are the main callbacks from hda_codec.
900  *
901  * They are just the skeleton to call sub-callbacks according to the
902  * current setting of chip->single_cmd.
903  */
904 
905 /* send a command */
906 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
907 {
908 	struct azx *chip = bus_to_azx(bus);
909 
910 	if (chip->disabled)
911 		return 0;
912 	if (chip->single_cmd)
913 		return azx_single_send_cmd(bus, val);
914 	else
915 		return snd_hdac_bus_send_cmd(bus, val);
916 }
917 
918 /* get a response */
919 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
920 			    unsigned int *res)
921 {
922 	struct azx *chip = bus_to_azx(bus);
923 
924 	if (chip->disabled)
925 		return 0;
926 	if (chip->single_cmd)
927 		return azx_single_get_response(bus, addr, res);
928 	else
929 		return azx_rirb_get_response(bus, addr, res);
930 }
931 
932 static const struct hdac_bus_ops bus_core_ops = {
933 	.command = azx_send_cmd,
934 	.get_response = azx_get_response,
935 };
936 
937 #ifdef CONFIG_SND_HDA_DSP_LOADER
938 /*
939  * DSP loading code (e.g. for CA0132)
940  */
941 
942 /* use the first stream for loading DSP */
943 static struct azx_dev *
944 azx_get_dsp_loader_dev(struct azx *chip)
945 {
946 	struct hdac_bus *bus = azx_bus(chip);
947 	struct hdac_stream *s;
948 
949 	list_for_each_entry(s, &bus->stream_list, list)
950 		if (s->index == chip->playback_index_offset)
951 			return stream_to_azx_dev(s);
952 
953 	return NULL;
954 }
955 
956 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
957 				   unsigned int byte_size,
958 				   struct snd_dma_buffer *bufp)
959 {
960 	struct hdac_bus *bus = &codec->bus->core;
961 	struct azx *chip = bus_to_azx(bus);
962 	struct azx_dev *azx_dev;
963 	struct hdac_stream *hstr;
964 	bool saved = false;
965 	int err;
966 
967 	azx_dev = azx_get_dsp_loader_dev(chip);
968 	hstr = azx_stream(azx_dev);
969 	spin_lock_irq(&bus->reg_lock);
970 	if (hstr->opened) {
971 		chip->saved_azx_dev = *azx_dev;
972 		saved = true;
973 	}
974 	spin_unlock_irq(&bus->reg_lock);
975 
976 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
977 	if (err < 0) {
978 		spin_lock_irq(&bus->reg_lock);
979 		if (saved)
980 			*azx_dev = chip->saved_azx_dev;
981 		spin_unlock_irq(&bus->reg_lock);
982 		return err;
983 	}
984 
985 	hstr->prepared = 0;
986 	return err;
987 }
988 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
989 
990 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
991 {
992 	struct hdac_bus *bus = &codec->bus->core;
993 	struct azx *chip = bus_to_azx(bus);
994 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
995 
996 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
997 }
998 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
999 
1000 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1001 				    struct snd_dma_buffer *dmab)
1002 {
1003 	struct hdac_bus *bus = &codec->bus->core;
1004 	struct azx *chip = bus_to_azx(bus);
1005 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1006 	struct hdac_stream *hstr = azx_stream(azx_dev);
1007 
1008 	if (!dmab->area || !hstr->locked)
1009 		return;
1010 
1011 	snd_hdac_dsp_cleanup(hstr, dmab);
1012 	spin_lock_irq(&bus->reg_lock);
1013 	if (hstr->opened)
1014 		*azx_dev = chip->saved_azx_dev;
1015 	hstr->locked = false;
1016 	spin_unlock_irq(&bus->reg_lock);
1017 }
1018 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1019 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1020 
1021 /*
1022  * reset and start the controller registers
1023  */
1024 void azx_init_chip(struct azx *chip, bool full_reset)
1025 {
1026 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1027 		/* correct RINTCNT for CXT */
1028 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1029 			azx_writew(chip, RINTCNT, 0xc0);
1030 	}
1031 }
1032 EXPORT_SYMBOL_GPL(azx_init_chip);
1033 
1034 void azx_stop_all_streams(struct azx *chip)
1035 {
1036 	struct hdac_bus *bus = azx_bus(chip);
1037 	struct hdac_stream *s;
1038 
1039 	list_for_each_entry(s, &bus->stream_list, list)
1040 		snd_hdac_stream_stop(s);
1041 }
1042 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1043 
1044 void azx_stop_chip(struct azx *chip)
1045 {
1046 	snd_hdac_bus_stop_chip(azx_bus(chip));
1047 }
1048 EXPORT_SYMBOL_GPL(azx_stop_chip);
1049 
1050 /*
1051  * interrupt handler
1052  */
1053 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1054 {
1055 	struct azx *chip = bus_to_azx(bus);
1056 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1057 
1058 	/* check whether this IRQ is really acceptable */
1059 	if (!chip->ops->position_check ||
1060 	    chip->ops->position_check(chip, azx_dev)) {
1061 		spin_unlock(&bus->reg_lock);
1062 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1063 		spin_lock(&bus->reg_lock);
1064 	}
1065 }
1066 
1067 irqreturn_t azx_interrupt(int irq, void *dev_id)
1068 {
1069 	struct azx *chip = dev_id;
1070 	struct hdac_bus *bus = azx_bus(chip);
1071 	u32 status;
1072 	bool active, handled = false;
1073 	int repeat = 0; /* count for avoiding endless loop */
1074 
1075 #ifdef CONFIG_PM
1076 	if (azx_has_pm_runtime(chip))
1077 		if (!pm_runtime_active(chip->card->dev))
1078 			return IRQ_NONE;
1079 #endif
1080 
1081 	spin_lock(&bus->reg_lock);
1082 
1083 	if (chip->disabled)
1084 		goto unlock;
1085 
1086 	do {
1087 		status = azx_readl(chip, INTSTS);
1088 		if (status == 0 || status == 0xffffffff)
1089 			break;
1090 
1091 		handled = true;
1092 		active = false;
1093 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1094 			active = true;
1095 
1096 		status = azx_readb(chip, RIRBSTS);
1097 		if (status & RIRB_INT_MASK) {
1098 			/*
1099 			 * Clearing the interrupt status here ensures that no
1100 			 * interrupt gets masked after the RIRB wp is read in
1101 			 * snd_hdac_bus_update_rirb. This avoids a possible
1102 			 * race condition where codec response in RIRB may
1103 			 * remain unserviced by IRQ, eventually falling back
1104 			 * to polling mode in azx_rirb_get_response.
1105 			 */
1106 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1107 			active = true;
1108 			if (status & RIRB_INT_RESPONSE) {
1109 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1110 					udelay(80);
1111 				snd_hdac_bus_update_rirb(bus);
1112 			}
1113 		}
1114 	} while (active && ++repeat < 10);
1115 
1116  unlock:
1117 	spin_unlock(&bus->reg_lock);
1118 
1119 	return IRQ_RETVAL(handled);
1120 }
1121 EXPORT_SYMBOL_GPL(azx_interrupt);
1122 
1123 /*
1124  * Codec initerface
1125  */
1126 
1127 /*
1128  * Probe the given codec address
1129  */
1130 static int probe_codec(struct azx *chip, int addr)
1131 {
1132 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1133 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1134 	struct hdac_bus *bus = azx_bus(chip);
1135 	int err;
1136 	unsigned int res = -1;
1137 
1138 	mutex_lock(&bus->cmd_mutex);
1139 	chip->probing = 1;
1140 	azx_send_cmd(bus, cmd);
1141 	err = azx_get_response(bus, addr, &res);
1142 	chip->probing = 0;
1143 	mutex_unlock(&bus->cmd_mutex);
1144 	if (err < 0 || res == -1)
1145 		return -EIO;
1146 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1147 	return 0;
1148 }
1149 
1150 void snd_hda_bus_reset(struct hda_bus *bus)
1151 {
1152 	struct azx *chip = bus_to_azx(&bus->core);
1153 
1154 	bus->in_reset = 1;
1155 	azx_stop_chip(chip);
1156 	azx_init_chip(chip, true);
1157 	if (bus->core.chip_init)
1158 		snd_hda_bus_reset_codecs(bus);
1159 	bus->in_reset = 0;
1160 }
1161 
1162 /* HD-audio bus initialization */
1163 int azx_bus_init(struct azx *chip, const char *model)
1164 {
1165 	struct hda_bus *bus = &chip->bus;
1166 	int err;
1167 
1168 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1169 	if (err < 0)
1170 		return err;
1171 
1172 	bus->card = chip->card;
1173 	mutex_init(&bus->prepare_mutex);
1174 	bus->pci = chip->pci;
1175 	bus->modelname = model;
1176 	bus->mixer_assigned = -1;
1177 	bus->core.snoop = azx_snoop(chip);
1178 	if (chip->get_position[0] != azx_get_pos_lpib ||
1179 	    chip->get_position[1] != azx_get_pos_lpib)
1180 		bus->core.use_posbuf = true;
1181 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1182 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1183 		bus->core.corbrp_self_clear = true;
1184 
1185 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1186 		bus->core.align_bdle_4k = true;
1187 
1188 	/* enable sync_write flag for stable communication as default */
1189 	bus->core.sync_write = 1;
1190 
1191 	return 0;
1192 }
1193 EXPORT_SYMBOL_GPL(azx_bus_init);
1194 
1195 /* Probe codecs */
1196 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1197 {
1198 	struct hdac_bus *bus = azx_bus(chip);
1199 	int c, codecs, err;
1200 
1201 	codecs = 0;
1202 	if (!max_slots)
1203 		max_slots = AZX_DEFAULT_CODECS;
1204 
1205 	/* First try to probe all given codec slots */
1206 	for (c = 0; c < max_slots; c++) {
1207 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1208 			if (probe_codec(chip, c) < 0) {
1209 				/* Some BIOSen give you wrong codec addresses
1210 				 * that don't exist
1211 				 */
1212 				dev_warn(chip->card->dev,
1213 					 "Codec #%d probe error; disabling it...\n", c);
1214 				bus->codec_mask &= ~(1 << c);
1215 				/* More badly, accessing to a non-existing
1216 				 * codec often screws up the controller chip,
1217 				 * and disturbs the further communications.
1218 				 * Thus if an error occurs during probing,
1219 				 * better to reset the controller chip to
1220 				 * get back to the sanity state.
1221 				 */
1222 				azx_stop_chip(chip);
1223 				azx_init_chip(chip, true);
1224 			}
1225 		}
1226 	}
1227 
1228 	/* Then create codec instances */
1229 	for (c = 0; c < max_slots; c++) {
1230 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1231 			struct hda_codec *codec;
1232 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1233 			if (err < 0)
1234 				continue;
1235 			codec->jackpoll_interval = chip->jackpoll_interval;
1236 			codec->beep_mode = chip->beep_mode;
1237 			codecs++;
1238 		}
1239 	}
1240 	if (!codecs) {
1241 		dev_err(chip->card->dev, "no codecs initialized\n");
1242 		return -ENXIO;
1243 	}
1244 	return 0;
1245 }
1246 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1247 
1248 /* configure each codec instance */
1249 int azx_codec_configure(struct azx *chip)
1250 {
1251 	struct hda_codec *codec, *next;
1252 	int success = 0;
1253 
1254 	list_for_each_codec(codec, &chip->bus) {
1255 		if (!snd_hda_codec_configure(codec))
1256 			success++;
1257 	}
1258 
1259 	if (success) {
1260 		/* unregister failed codecs if any codec has been probed */
1261 		list_for_each_codec_safe(codec, next, &chip->bus) {
1262 			if (!codec->configured) {
1263 				codec_err(codec, "Unable to configure, disabling\n");
1264 				snd_hdac_device_unregister(&codec->core);
1265 			}
1266 		}
1267 	}
1268 
1269 	return success ? 0 : -ENODEV;
1270 }
1271 EXPORT_SYMBOL_GPL(azx_codec_configure);
1272 
1273 static int stream_direction(struct azx *chip, unsigned char index)
1274 {
1275 	if (index >= chip->capture_index_offset &&
1276 	    index < chip->capture_index_offset + chip->capture_streams)
1277 		return SNDRV_PCM_STREAM_CAPTURE;
1278 	return SNDRV_PCM_STREAM_PLAYBACK;
1279 }
1280 
1281 /* initialize SD streams */
1282 int azx_init_streams(struct azx *chip)
1283 {
1284 	int i;
1285 	int stream_tags[2] = { 0, 0 };
1286 
1287 	/* initialize each stream (aka device)
1288 	 * assign the starting bdl address to each stream (device)
1289 	 * and initialize
1290 	 */
1291 	for (i = 0; i < chip->num_streams; i++) {
1292 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1293 		int dir, tag;
1294 
1295 		if (!azx_dev)
1296 			return -ENOMEM;
1297 
1298 		dir = stream_direction(chip, i);
1299 		/* stream tag must be unique throughout
1300 		 * the stream direction group,
1301 		 * valid values 1...15
1302 		 * use separate stream tag if the flag
1303 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1304 		 */
1305 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1306 			tag = ++stream_tags[dir];
1307 		else
1308 			tag = i + 1;
1309 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1310 				     i, dir, tag);
1311 	}
1312 
1313 	return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(azx_init_streams);
1316 
1317 void azx_free_streams(struct azx *chip)
1318 {
1319 	struct hdac_bus *bus = azx_bus(chip);
1320 	struct hdac_stream *s;
1321 
1322 	while (!list_empty(&bus->stream_list)) {
1323 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1324 		list_del(&s->list);
1325 		kfree(stream_to_azx_dev(s));
1326 	}
1327 }
1328 EXPORT_SYMBOL_GPL(azx_free_streams);
1329