xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 8e774e02)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
31 
32 /* DSP lock helpers */
33 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
36 
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
40 {
41 	struct hdac_stream *s;
42 
43 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
44 	if (!s)
45 		return NULL;
46 	return stream_to_azx_dev(s);
47 }
48 
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
51 {
52 	snd_hdac_stream_release(azx_stream(azx_dev));
53 }
54 
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
57 {
58 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 	return &apcm->info->stream[substream->stream];
60 }
61 
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
63 				u64 nsec)
64 {
65 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 	u64 codec_frames, codec_nsecs;
68 
69 	if (!hinfo->ops.get_delay)
70 		return nsec;
71 
72 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 			      substream->runtime->rate);
75 
76 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 		return nsec + codec_nsecs;
78 
79 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
80 }
81 
82 /*
83  * PCM ops
84  */
85 
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
87 {
88 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 	struct azx *chip = apcm->chip;
91 	struct azx_dev *azx_dev = get_azx_dev(substream);
92 
93 	trace_azx_pcm_close(chip, azx_dev);
94 	mutex_lock(&chip->open_mutex);
95 	azx_release_device(azx_dev);
96 	if (hinfo->ops.close)
97 		hinfo->ops.close(hinfo, apcm->codec, substream);
98 	snd_hda_power_down(apcm->codec);
99 	mutex_unlock(&chip->open_mutex);
100 	snd_hda_codec_pcm_put(apcm->info);
101 	return 0;
102 }
103 
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 			     struct snd_pcm_hw_params *hw_params)
106 {
107 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 	struct azx *chip = apcm->chip;
109 	struct azx_dev *azx_dev = get_azx_dev(substream);
110 	int ret;
111 
112 	trace_azx_pcm_hw_params(chip, azx_dev);
113 	dsp_lock(azx_dev);
114 	if (dsp_is_locked(azx_dev)) {
115 		ret = -EBUSY;
116 		goto unlock;
117 	}
118 
119 	azx_dev->core.bufsize = 0;
120 	azx_dev->core.period_bytes = 0;
121 	azx_dev->core.format_val = 0;
122 	ret = snd_pcm_lib_malloc_pages(substream,
123 				       params_buffer_bytes(hw_params));
124 
125 unlock:
126 	dsp_unlock(azx_dev);
127 	return ret;
128 }
129 
130 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
131 {
132 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
133 	struct azx_dev *azx_dev = get_azx_dev(substream);
134 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
135 	int err;
136 
137 	/* reset BDL address */
138 	dsp_lock(azx_dev);
139 	if (!dsp_is_locked(azx_dev))
140 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
141 
142 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
143 
144 	err = snd_pcm_lib_free_pages(substream);
145 	azx_stream(azx_dev)->prepared = 0;
146 	dsp_unlock(azx_dev);
147 	return err;
148 }
149 
150 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
151 {
152 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
153 	struct azx *chip = apcm->chip;
154 	struct azx_dev *azx_dev = get_azx_dev(substream);
155 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
156 	struct snd_pcm_runtime *runtime = substream->runtime;
157 	unsigned int format_val, stream_tag;
158 	int err;
159 	struct hda_spdif_out *spdif =
160 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
161 	unsigned short ctls = spdif ? spdif->ctls : 0;
162 
163 	trace_azx_pcm_prepare(chip, azx_dev);
164 	dsp_lock(azx_dev);
165 	if (dsp_is_locked(azx_dev)) {
166 		err = -EBUSY;
167 		goto unlock;
168 	}
169 
170 	snd_hdac_stream_reset(azx_stream(azx_dev));
171 	format_val = snd_hdac_calc_stream_format(runtime->rate,
172 						runtime->channels,
173 						runtime->format,
174 						hinfo->maxbps,
175 						ctls);
176 	if (!format_val) {
177 		dev_err(chip->card->dev,
178 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
179 			runtime->rate, runtime->channels, runtime->format);
180 		err = -EINVAL;
181 		goto unlock;
182 	}
183 
184 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
185 	if (err < 0)
186 		goto unlock;
187 
188 	snd_hdac_stream_setup(azx_stream(azx_dev));
189 
190 	stream_tag = azx_dev->core.stream_tag;
191 	/* CA-IBG chips need the playback stream starting from 1 */
192 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
193 	    stream_tag > chip->capture_streams)
194 		stream_tag -= chip->capture_streams;
195 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
196 				     azx_dev->core.format_val, substream);
197 
198  unlock:
199 	if (!err)
200 		azx_stream(azx_dev)->prepared = 1;
201 	dsp_unlock(azx_dev);
202 	return err;
203 }
204 
205 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
206 {
207 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
208 	struct azx *chip = apcm->chip;
209 	struct hdac_bus *bus = azx_bus(chip);
210 	struct azx_dev *azx_dev;
211 	struct snd_pcm_substream *s;
212 	struct hdac_stream *hstr;
213 	bool start;
214 	int sbits = 0;
215 	int sync_reg;
216 
217 	azx_dev = get_azx_dev(substream);
218 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
219 
220 	hstr = azx_stream(azx_dev);
221 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
222 		sync_reg = AZX_REG_OLD_SSYNC;
223 	else
224 		sync_reg = AZX_REG_SSYNC;
225 
226 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
227 		return -EPIPE;
228 
229 	switch (cmd) {
230 	case SNDRV_PCM_TRIGGER_START:
231 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
232 	case SNDRV_PCM_TRIGGER_RESUME:
233 		start = true;
234 		break;
235 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
236 	case SNDRV_PCM_TRIGGER_SUSPEND:
237 	case SNDRV_PCM_TRIGGER_STOP:
238 		start = false;
239 		break;
240 	default:
241 		return -EINVAL;
242 	}
243 
244 	snd_pcm_group_for_each_entry(s, substream) {
245 		if (s->pcm->card != substream->pcm->card)
246 			continue;
247 		azx_dev = get_azx_dev(s);
248 		sbits |= 1 << azx_dev->core.index;
249 		snd_pcm_trigger_done(s, substream);
250 	}
251 
252 	spin_lock(&bus->reg_lock);
253 
254 	/* first, set SYNC bits of corresponding streams */
255 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
256 
257 	snd_pcm_group_for_each_entry(s, substream) {
258 		if (s->pcm->card != substream->pcm->card)
259 			continue;
260 		azx_dev = get_azx_dev(s);
261 		if (start) {
262 			azx_dev->insufficient = 1;
263 			snd_hdac_stream_start(azx_stream(azx_dev), true);
264 		} else {
265 			snd_hdac_stream_stop(azx_stream(azx_dev));
266 		}
267 	}
268 	spin_unlock(&bus->reg_lock);
269 
270 	snd_hdac_stream_sync(hstr, start, sbits);
271 
272 	spin_lock(&bus->reg_lock);
273 	/* reset SYNC bits */
274 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
275 	if (start)
276 		snd_hdac_stream_timecounter_init(hstr, sbits);
277 	spin_unlock(&bus->reg_lock);
278 	return 0;
279 }
280 
281 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
282 {
283 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
284 }
285 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
286 
287 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
288 {
289 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
290 }
291 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
292 
293 unsigned int azx_get_position(struct azx *chip,
294 			      struct azx_dev *azx_dev)
295 {
296 	struct snd_pcm_substream *substream = azx_dev->core.substream;
297 	unsigned int pos;
298 	int stream = substream->stream;
299 	int delay = 0;
300 
301 	if (chip->get_position[stream])
302 		pos = chip->get_position[stream](chip, azx_dev);
303 	else /* use the position buffer as default */
304 		pos = azx_get_pos_posbuf(chip, azx_dev);
305 
306 	if (pos >= azx_dev->core.bufsize)
307 		pos = 0;
308 
309 	if (substream->runtime) {
310 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
311 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
312 
313 		if (chip->get_delay[stream])
314 			delay += chip->get_delay[stream](chip, azx_dev, pos);
315 		if (hinfo->ops.get_delay)
316 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
317 						      substream);
318 		substream->runtime->delay = delay;
319 	}
320 
321 	trace_azx_get_position(chip, azx_dev, pos, delay);
322 	return pos;
323 }
324 EXPORT_SYMBOL_GPL(azx_get_position);
325 
326 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
327 {
328 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
329 	struct azx *chip = apcm->chip;
330 	struct azx_dev *azx_dev = get_azx_dev(substream);
331 	return bytes_to_frames(substream->runtime,
332 			       azx_get_position(chip, azx_dev));
333 }
334 
335 /*
336  * azx_scale64: Scale base by mult/div while not overflowing sanely
337  *
338  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
339  *
340  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
341  * is about 384307 ie ~4.5 days.
342  *
343  * This scales the calculation so that overflow will happen but after 2^64 /
344  * 48000 secs, which is pretty large!
345  *
346  * In caln below:
347  *	base may overflow, but since there isn’t any additional division
348  *	performed on base it’s OK
349  *	rem can’t overflow because both are 32-bit values
350  */
351 
352 #ifdef CONFIG_X86
353 static u64 azx_scale64(u64 base, u32 num, u32 den)
354 {
355 	u64 rem;
356 
357 	rem = do_div(base, den);
358 
359 	base *= num;
360 	rem *= num;
361 
362 	do_div(rem, den);
363 
364 	return base + rem;
365 }
366 
367 static int azx_get_sync_time(ktime_t *device,
368 		struct system_counterval_t *system, void *ctx)
369 {
370 	struct snd_pcm_substream *substream = ctx;
371 	struct azx_dev *azx_dev = get_azx_dev(substream);
372 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
373 	struct azx *chip = apcm->chip;
374 	struct snd_pcm_runtime *runtime;
375 	u64 ll_counter, ll_counter_l, ll_counter_h;
376 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
377 	u32 wallclk_ctr, wallclk_cycles;
378 	bool direction;
379 	u32 dma_select;
380 	u32 timeout = 200;
381 	u32 retry_count = 0;
382 
383 	runtime = substream->runtime;
384 
385 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
386 		direction = 1;
387 	else
388 		direction = 0;
389 
390 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
391 	do {
392 		timeout = 100;
393 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
394 					(azx_dev->core.stream_tag - 1);
395 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
396 
397 		/* Enable the capture */
398 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
399 
400 		while (timeout) {
401 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
402 						GTSCC_TSCCD_MASK)
403 				break;
404 
405 			timeout--;
406 		}
407 
408 		if (!timeout) {
409 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
410 			return -EIO;
411 		}
412 
413 		/* Read wall clock counter */
414 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
415 
416 		/* Read TSC counter */
417 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
418 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
419 
420 		/* Read Link counter */
421 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
422 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
423 
424 		/* Ack: registers read done */
425 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
426 
427 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
428 						tsc_counter_l;
429 
430 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
431 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
432 
433 		/*
434 		 * An error occurs near frame "rollover". The clocks in
435 		 * frame value indicates whether this error may have
436 		 * occurred. Here we use the value of 10 i.e.,
437 		 * HDA_MAX_CYCLE_OFFSET
438 		 */
439 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
440 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
441 			break;
442 
443 		/*
444 		 * Sleep before we read again, else we may again get
445 		 * value near to MAX_CYCLE. Try to sleep for different
446 		 * amount of time so we dont hit the same number again
447 		 */
448 		udelay(retry_count++);
449 
450 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
451 
452 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
453 		dev_err_ratelimited(chip->card->dev,
454 			"Error in WALFCC cycle count\n");
455 		return -EIO;
456 	}
457 
458 	*device = ns_to_ktime(azx_scale64(ll_counter,
459 				NSEC_PER_SEC, runtime->rate));
460 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
461 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
462 
463 	*system = convert_art_to_tsc(tsc_counter);
464 
465 	return 0;
466 }
467 
468 #else
469 static int azx_get_sync_time(ktime_t *device,
470 		struct system_counterval_t *system, void *ctx)
471 {
472 	return -ENXIO;
473 }
474 #endif
475 
476 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
477 			      struct system_device_crosststamp *xtstamp)
478 {
479 	return get_device_system_crosststamp(azx_get_sync_time,
480 					substream, NULL, xtstamp);
481 }
482 
483 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
484 				struct snd_pcm_audio_tstamp_config *ts)
485 {
486 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
487 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
488 			return true;
489 
490 	return false;
491 }
492 
493 static int azx_get_time_info(struct snd_pcm_substream *substream,
494 			struct timespec *system_ts, struct timespec *audio_ts,
495 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
496 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
497 {
498 	struct azx_dev *azx_dev = get_azx_dev(substream);
499 	struct snd_pcm_runtime *runtime = substream->runtime;
500 	struct system_device_crosststamp xtstamp;
501 	int ret;
502 	u64 nsec;
503 
504 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
505 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
506 
507 		snd_pcm_gettime(substream->runtime, system_ts);
508 
509 		nsec = timecounter_read(&azx_dev->core.tc);
510 		nsec = div_u64(nsec, 3); /* can be optimized */
511 		if (audio_tstamp_config->report_delay)
512 			nsec = azx_adjust_codec_delay(substream, nsec);
513 
514 		*audio_ts = ns_to_timespec(nsec);
515 
516 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
517 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
518 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
519 
520 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
521 
522 		ret = azx_get_crosststamp(substream, &xtstamp);
523 		if (ret)
524 			return ret;
525 
526 		switch (runtime->tstamp_type) {
527 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
528 			return -EINVAL;
529 
530 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
531 			*system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
532 			break;
533 
534 		default:
535 			*system_ts = ktime_to_timespec(xtstamp.sys_realtime);
536 			break;
537 
538 		}
539 
540 		*audio_ts = ktime_to_timespec(xtstamp.device);
541 
542 		audio_tstamp_report->actual_type =
543 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
544 		audio_tstamp_report->accuracy_report = 1;
545 		/* 24 MHz WallClock == 42ns resolution */
546 		audio_tstamp_report->accuracy = 42;
547 
548 	} else {
549 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
550 	}
551 
552 	return 0;
553 }
554 
555 static struct snd_pcm_hardware azx_pcm_hw = {
556 	.info =			(SNDRV_PCM_INFO_MMAP |
557 				 SNDRV_PCM_INFO_INTERLEAVED |
558 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
559 				 SNDRV_PCM_INFO_MMAP_VALID |
560 				 /* No full-resume yet implemented */
561 				 /* SNDRV_PCM_INFO_RESUME |*/
562 				 SNDRV_PCM_INFO_PAUSE |
563 				 SNDRV_PCM_INFO_SYNC_START |
564 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
565 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
566 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
567 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
568 	.rates =		SNDRV_PCM_RATE_48000,
569 	.rate_min =		48000,
570 	.rate_max =		48000,
571 	.channels_min =		2,
572 	.channels_max =		2,
573 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
574 	.period_bytes_min =	128,
575 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
576 	.periods_min =		2,
577 	.periods_max =		AZX_MAX_FRAG,
578 	.fifo_size =		0,
579 };
580 
581 static int azx_pcm_open(struct snd_pcm_substream *substream)
582 {
583 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
584 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
585 	struct azx *chip = apcm->chip;
586 	struct azx_dev *azx_dev;
587 	struct snd_pcm_runtime *runtime = substream->runtime;
588 	int err;
589 	int buff_step;
590 
591 	snd_hda_codec_pcm_get(apcm->info);
592 	mutex_lock(&chip->open_mutex);
593 	azx_dev = azx_assign_device(chip, substream);
594 	trace_azx_pcm_open(chip, azx_dev);
595 	if (azx_dev == NULL) {
596 		err = -EBUSY;
597 		goto unlock;
598 	}
599 	runtime->private_data = azx_dev;
600 
601 	if (chip->gts_present)
602 		azx_pcm_hw.info = azx_pcm_hw.info |
603 			SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
604 
605 	runtime->hw = azx_pcm_hw;
606 	runtime->hw.channels_min = hinfo->channels_min;
607 	runtime->hw.channels_max = hinfo->channels_max;
608 	runtime->hw.formats = hinfo->formats;
609 	runtime->hw.rates = hinfo->rates;
610 	snd_pcm_limit_hw_rates(runtime);
611 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
612 
613 	/* avoid wrap-around with wall-clock */
614 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
615 				     20,
616 				     178000000);
617 
618 	if (chip->align_buffer_size)
619 		/* constrain buffer sizes to be multiple of 128
620 		   bytes. This is more efficient in terms of memory
621 		   access but isn't required by the HDA spec and
622 		   prevents users from specifying exact period/buffer
623 		   sizes. For example for 44.1kHz, a period size set
624 		   to 20ms will be rounded to 19.59ms. */
625 		buff_step = 128;
626 	else
627 		/* Don't enforce steps on buffer sizes, still need to
628 		   be multiple of 4 bytes (HDA spec). Tested on Intel
629 		   HDA controllers, may not work on all devices where
630 		   option needs to be disabled */
631 		buff_step = 4;
632 
633 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
634 				   buff_step);
635 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
636 				   buff_step);
637 	snd_hda_power_up(apcm->codec);
638 	if (hinfo->ops.open)
639 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
640 	else
641 		err = -ENODEV;
642 	if (err < 0) {
643 		azx_release_device(azx_dev);
644 		goto powerdown;
645 	}
646 	snd_pcm_limit_hw_rates(runtime);
647 	/* sanity check */
648 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
649 	    snd_BUG_ON(!runtime->hw.channels_max) ||
650 	    snd_BUG_ON(!runtime->hw.formats) ||
651 	    snd_BUG_ON(!runtime->hw.rates)) {
652 		azx_release_device(azx_dev);
653 		if (hinfo->ops.close)
654 			hinfo->ops.close(hinfo, apcm->codec, substream);
655 		err = -EINVAL;
656 		goto powerdown;
657 	}
658 
659 	/* disable LINK_ATIME timestamps for capture streams
660 	   until we figure out how to handle digital inputs */
661 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
662 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
663 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
664 	}
665 
666 	snd_pcm_set_sync(substream);
667 	mutex_unlock(&chip->open_mutex);
668 	return 0;
669 
670  powerdown:
671 	snd_hda_power_down(apcm->codec);
672  unlock:
673 	mutex_unlock(&chip->open_mutex);
674 	snd_hda_codec_pcm_put(apcm->info);
675 	return err;
676 }
677 
678 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
679 			struct vm_area_struct *area)
680 {
681 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
682 	struct azx *chip = apcm->chip;
683 	if (chip->ops->pcm_mmap_prepare)
684 		chip->ops->pcm_mmap_prepare(substream, area);
685 	return snd_pcm_lib_default_mmap(substream, area);
686 }
687 
688 static const struct snd_pcm_ops azx_pcm_ops = {
689 	.open = azx_pcm_open,
690 	.close = azx_pcm_close,
691 	.ioctl = snd_pcm_lib_ioctl,
692 	.hw_params = azx_pcm_hw_params,
693 	.hw_free = azx_pcm_hw_free,
694 	.prepare = azx_pcm_prepare,
695 	.trigger = azx_pcm_trigger,
696 	.pointer = azx_pcm_pointer,
697 	.get_time_info =  azx_get_time_info,
698 	.mmap = azx_pcm_mmap,
699 	.page = snd_pcm_sgbuf_ops_page,
700 };
701 
702 static void azx_pcm_free(struct snd_pcm *pcm)
703 {
704 	struct azx_pcm *apcm = pcm->private_data;
705 	if (apcm) {
706 		list_del(&apcm->list);
707 		apcm->info->pcm = NULL;
708 		kfree(apcm);
709 	}
710 }
711 
712 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
713 
714 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
715 			      struct hda_pcm *cpcm)
716 {
717 	struct hdac_bus *bus = &_bus->core;
718 	struct azx *chip = bus_to_azx(bus);
719 	struct snd_pcm *pcm;
720 	struct azx_pcm *apcm;
721 	int pcm_dev = cpcm->device;
722 	unsigned int size;
723 	int s, err;
724 	int type = SNDRV_DMA_TYPE_DEV_SG;
725 
726 	list_for_each_entry(apcm, &chip->pcm_list, list) {
727 		if (apcm->pcm->device == pcm_dev) {
728 			dev_err(chip->card->dev, "PCM %d already exists\n",
729 				pcm_dev);
730 			return -EBUSY;
731 		}
732 	}
733 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
734 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
735 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
736 			  &pcm);
737 	if (err < 0)
738 		return err;
739 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
740 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
741 	if (apcm == NULL) {
742 		snd_device_free(chip->card, pcm);
743 		return -ENOMEM;
744 	}
745 	apcm->chip = chip;
746 	apcm->pcm = pcm;
747 	apcm->codec = codec;
748 	apcm->info = cpcm;
749 	pcm->private_data = apcm;
750 	pcm->private_free = azx_pcm_free;
751 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
752 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
753 	list_add_tail(&apcm->list, &chip->pcm_list);
754 	cpcm->pcm = pcm;
755 	for (s = 0; s < 2; s++) {
756 		if (cpcm->stream[s].substreams)
757 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
758 	}
759 	/* buffer pre-allocation */
760 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
761 	if (size > MAX_PREALLOC_SIZE)
762 		size = MAX_PREALLOC_SIZE;
763 	if (chip->uc_buffer)
764 		type = SNDRV_DMA_TYPE_DEV_UC_SG;
765 	snd_pcm_lib_preallocate_pages_for_all(pcm, type,
766 					      chip->card->dev,
767 					      size, MAX_PREALLOC_SIZE);
768 	return 0;
769 }
770 
771 static unsigned int azx_command_addr(u32 cmd)
772 {
773 	unsigned int addr = cmd >> 28;
774 
775 	if (addr >= AZX_MAX_CODECS) {
776 		snd_BUG();
777 		addr = 0;
778 	}
779 
780 	return addr;
781 }
782 
783 /* receive a response */
784 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
785 				 unsigned int *res)
786 {
787 	struct azx *chip = bus_to_azx(bus);
788 	struct hda_bus *hbus = &chip->bus;
789 	unsigned long timeout;
790 	unsigned long loopcounter;
791 	int do_poll = 0;
792 
793  again:
794 	timeout = jiffies + msecs_to_jiffies(1000);
795 
796 	for (loopcounter = 0;; loopcounter++) {
797 		spin_lock_irq(&bus->reg_lock);
798 		if (bus->polling_mode || do_poll)
799 			snd_hdac_bus_update_rirb(bus);
800 		if (!bus->rirb.cmds[addr]) {
801 			if (!do_poll)
802 				bus->poll_count = 0;
803 			if (res)
804 				*res = bus->rirb.res[addr]; /* the last value */
805 			spin_unlock_irq(&bus->reg_lock);
806 			return 0;
807 		}
808 		spin_unlock_irq(&bus->reg_lock);
809 		if (time_after(jiffies, timeout))
810 			break;
811 		if (hbus->needs_damn_long_delay || loopcounter > 3000)
812 			msleep(2); /* temporary workaround */
813 		else {
814 			udelay(10);
815 			cond_resched();
816 		}
817 	}
818 
819 	if (hbus->no_response_fallback)
820 		return -EIO;
821 
822 	if (!bus->polling_mode && bus->poll_count < 2) {
823 		dev_dbg(chip->card->dev,
824 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
825 			bus->last_cmd[addr]);
826 		do_poll = 1;
827 		bus->poll_count++;
828 		goto again;
829 	}
830 
831 
832 	if (!bus->polling_mode) {
833 		dev_warn(chip->card->dev,
834 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
835 			 bus->last_cmd[addr]);
836 		bus->polling_mode = 1;
837 		goto again;
838 	}
839 
840 	if (chip->msi) {
841 		dev_warn(chip->card->dev,
842 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
843 			 bus->last_cmd[addr]);
844 		if (chip->ops->disable_msi_reset_irq &&
845 		    chip->ops->disable_msi_reset_irq(chip) < 0)
846 			return -EIO;
847 		goto again;
848 	}
849 
850 	if (chip->probing) {
851 		/* If this critical timeout happens during the codec probing
852 		 * phase, this is likely an access to a non-existing codec
853 		 * slot.  Better to return an error and reset the system.
854 		 */
855 		return -EIO;
856 	}
857 
858 	/* no fallback mechanism? */
859 	if (!chip->fallback_to_single_cmd)
860 		return -EIO;
861 
862 	/* a fatal communication error; need either to reset or to fallback
863 	 * to the single_cmd mode
864 	 */
865 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
866 		hbus->response_reset = 1;
867 		dev_err(chip->card->dev,
868 			"No response from codec, resetting bus: last cmd=0x%08x\n",
869 			bus->last_cmd[addr]);
870 		return -EAGAIN; /* give a chance to retry */
871 	}
872 
873 	dev_WARN(chip->card->dev,
874 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
875 		bus->last_cmd[addr]);
876 	chip->single_cmd = 1;
877 	hbus->response_reset = 0;
878 	snd_hdac_bus_stop_cmd_io(bus);
879 	return -EIO;
880 }
881 
882 /*
883  * Use the single immediate command instead of CORB/RIRB for simplicity
884  *
885  * Note: according to Intel, this is not preferred use.  The command was
886  *       intended for the BIOS only, and may get confused with unsolicited
887  *       responses.  So, we shouldn't use it for normal operation from the
888  *       driver.
889  *       I left the codes, however, for debugging/testing purposes.
890  */
891 
892 /* receive a response */
893 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
894 {
895 	int timeout = 50;
896 
897 	while (timeout--) {
898 		/* check IRV busy bit */
899 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
900 			/* reuse rirb.res as the response return value */
901 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
902 			return 0;
903 		}
904 		udelay(1);
905 	}
906 	if (printk_ratelimit())
907 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
908 			azx_readw(chip, IRS));
909 	azx_bus(chip)->rirb.res[addr] = -1;
910 	return -EIO;
911 }
912 
913 /* send a command */
914 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
915 {
916 	struct azx *chip = bus_to_azx(bus);
917 	unsigned int addr = azx_command_addr(val);
918 	int timeout = 50;
919 
920 	bus->last_cmd[azx_command_addr(val)] = val;
921 	while (timeout--) {
922 		/* check ICB busy bit */
923 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
924 			/* Clear IRV valid bit */
925 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
926 				   AZX_IRS_VALID);
927 			azx_writel(chip, IC, val);
928 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
929 				   AZX_IRS_BUSY);
930 			return azx_single_wait_for_response(chip, addr);
931 		}
932 		udelay(1);
933 	}
934 	if (printk_ratelimit())
935 		dev_dbg(chip->card->dev,
936 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
937 			azx_readw(chip, IRS), val);
938 	return -EIO;
939 }
940 
941 /* receive a response */
942 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
943 				   unsigned int *res)
944 {
945 	if (res)
946 		*res = bus->rirb.res[addr];
947 	return 0;
948 }
949 
950 /*
951  * The below are the main callbacks from hda_codec.
952  *
953  * They are just the skeleton to call sub-callbacks according to the
954  * current setting of chip->single_cmd.
955  */
956 
957 /* send a command */
958 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
959 {
960 	struct azx *chip = bus_to_azx(bus);
961 
962 	if (chip->disabled)
963 		return 0;
964 	if (chip->single_cmd)
965 		return azx_single_send_cmd(bus, val);
966 	else
967 		return snd_hdac_bus_send_cmd(bus, val);
968 }
969 
970 /* get a response */
971 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
972 			    unsigned int *res)
973 {
974 	struct azx *chip = bus_to_azx(bus);
975 
976 	if (chip->disabled)
977 		return 0;
978 	if (chip->single_cmd)
979 		return azx_single_get_response(bus, addr, res);
980 	else
981 		return azx_rirb_get_response(bus, addr, res);
982 }
983 
984 static const struct hdac_bus_ops bus_core_ops = {
985 	.command = azx_send_cmd,
986 	.get_response = azx_get_response,
987 };
988 
989 #ifdef CONFIG_SND_HDA_DSP_LOADER
990 /*
991  * DSP loading code (e.g. for CA0132)
992  */
993 
994 /* use the first stream for loading DSP */
995 static struct azx_dev *
996 azx_get_dsp_loader_dev(struct azx *chip)
997 {
998 	struct hdac_bus *bus = azx_bus(chip);
999 	struct hdac_stream *s;
1000 
1001 	list_for_each_entry(s, &bus->stream_list, list)
1002 		if (s->index == chip->playback_index_offset)
1003 			return stream_to_azx_dev(s);
1004 
1005 	return NULL;
1006 }
1007 
1008 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1009 				   unsigned int byte_size,
1010 				   struct snd_dma_buffer *bufp)
1011 {
1012 	struct hdac_bus *bus = &codec->bus->core;
1013 	struct azx *chip = bus_to_azx(bus);
1014 	struct azx_dev *azx_dev;
1015 	struct hdac_stream *hstr;
1016 	bool saved = false;
1017 	int err;
1018 
1019 	azx_dev = azx_get_dsp_loader_dev(chip);
1020 	hstr = azx_stream(azx_dev);
1021 	spin_lock_irq(&bus->reg_lock);
1022 	if (hstr->opened) {
1023 		chip->saved_azx_dev = *azx_dev;
1024 		saved = true;
1025 	}
1026 	spin_unlock_irq(&bus->reg_lock);
1027 
1028 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1029 	if (err < 0) {
1030 		spin_lock_irq(&bus->reg_lock);
1031 		if (saved)
1032 			*azx_dev = chip->saved_azx_dev;
1033 		spin_unlock_irq(&bus->reg_lock);
1034 		return err;
1035 	}
1036 
1037 	hstr->prepared = 0;
1038 	return err;
1039 }
1040 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1041 
1042 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1043 {
1044 	struct hdac_bus *bus = &codec->bus->core;
1045 	struct azx *chip = bus_to_azx(bus);
1046 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1047 
1048 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1049 }
1050 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1051 
1052 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1053 				    struct snd_dma_buffer *dmab)
1054 {
1055 	struct hdac_bus *bus = &codec->bus->core;
1056 	struct azx *chip = bus_to_azx(bus);
1057 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1058 	struct hdac_stream *hstr = azx_stream(azx_dev);
1059 
1060 	if (!dmab->area || !hstr->locked)
1061 		return;
1062 
1063 	snd_hdac_dsp_cleanup(hstr, dmab);
1064 	spin_lock_irq(&bus->reg_lock);
1065 	if (hstr->opened)
1066 		*azx_dev = chip->saved_azx_dev;
1067 	hstr->locked = false;
1068 	spin_unlock_irq(&bus->reg_lock);
1069 }
1070 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1071 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1072 
1073 /*
1074  * reset and start the controller registers
1075  */
1076 void azx_init_chip(struct azx *chip, bool full_reset)
1077 {
1078 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1079 		/* correct RINTCNT for CXT */
1080 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1081 			azx_writew(chip, RINTCNT, 0xc0);
1082 	}
1083 }
1084 EXPORT_SYMBOL_GPL(azx_init_chip);
1085 
1086 void azx_stop_all_streams(struct azx *chip)
1087 {
1088 	struct hdac_bus *bus = azx_bus(chip);
1089 	struct hdac_stream *s;
1090 
1091 	list_for_each_entry(s, &bus->stream_list, list)
1092 		snd_hdac_stream_stop(s);
1093 }
1094 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1095 
1096 void azx_stop_chip(struct azx *chip)
1097 {
1098 	snd_hdac_bus_stop_chip(azx_bus(chip));
1099 }
1100 EXPORT_SYMBOL_GPL(azx_stop_chip);
1101 
1102 /*
1103  * interrupt handler
1104  */
1105 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1106 {
1107 	struct azx *chip = bus_to_azx(bus);
1108 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1109 
1110 	/* check whether this IRQ is really acceptable */
1111 	if (!chip->ops->position_check ||
1112 	    chip->ops->position_check(chip, azx_dev)) {
1113 		spin_unlock(&bus->reg_lock);
1114 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1115 		spin_lock(&bus->reg_lock);
1116 	}
1117 }
1118 
1119 irqreturn_t azx_interrupt(int irq, void *dev_id)
1120 {
1121 	struct azx *chip = dev_id;
1122 	struct hdac_bus *bus = azx_bus(chip);
1123 	u32 status;
1124 	bool active, handled = false;
1125 	int repeat = 0; /* count for avoiding endless loop */
1126 
1127 #ifdef CONFIG_PM
1128 	if (azx_has_pm_runtime(chip))
1129 		if (!pm_runtime_active(chip->card->dev))
1130 			return IRQ_NONE;
1131 #endif
1132 
1133 	spin_lock(&bus->reg_lock);
1134 
1135 	if (chip->disabled)
1136 		goto unlock;
1137 
1138 	do {
1139 		status = azx_readl(chip, INTSTS);
1140 		if (status == 0 || status == 0xffffffff)
1141 			break;
1142 
1143 		handled = true;
1144 		active = false;
1145 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1146 			active = true;
1147 
1148 		/* clear rirb int */
1149 		status = azx_readb(chip, RIRBSTS);
1150 		if (status & RIRB_INT_MASK) {
1151 			active = true;
1152 			if (status & RIRB_INT_RESPONSE) {
1153 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1154 					udelay(80);
1155 				snd_hdac_bus_update_rirb(bus);
1156 			}
1157 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1158 		}
1159 	} while (active && ++repeat < 10);
1160 
1161  unlock:
1162 	spin_unlock(&bus->reg_lock);
1163 
1164 	return IRQ_RETVAL(handled);
1165 }
1166 EXPORT_SYMBOL_GPL(azx_interrupt);
1167 
1168 /*
1169  * Codec initerface
1170  */
1171 
1172 /*
1173  * Probe the given codec address
1174  */
1175 static int probe_codec(struct azx *chip, int addr)
1176 {
1177 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1178 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1179 	struct hdac_bus *bus = azx_bus(chip);
1180 	int err;
1181 	unsigned int res = -1;
1182 
1183 	mutex_lock(&bus->cmd_mutex);
1184 	chip->probing = 1;
1185 	azx_send_cmd(bus, cmd);
1186 	err = azx_get_response(bus, addr, &res);
1187 	chip->probing = 0;
1188 	mutex_unlock(&bus->cmd_mutex);
1189 	if (err < 0 || res == -1)
1190 		return -EIO;
1191 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1192 	return 0;
1193 }
1194 
1195 void snd_hda_bus_reset(struct hda_bus *bus)
1196 {
1197 	struct azx *chip = bus_to_azx(&bus->core);
1198 
1199 	bus->in_reset = 1;
1200 	azx_stop_chip(chip);
1201 	azx_init_chip(chip, true);
1202 	if (bus->core.chip_init)
1203 		snd_hda_bus_reset_codecs(bus);
1204 	bus->in_reset = 0;
1205 }
1206 
1207 /* HD-audio bus initialization */
1208 int azx_bus_init(struct azx *chip, const char *model,
1209 		 const struct hdac_io_ops *io_ops)
1210 {
1211 	struct hda_bus *bus = &chip->bus;
1212 	int err;
1213 
1214 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops,
1215 				io_ops);
1216 	if (err < 0)
1217 		return err;
1218 
1219 	bus->card = chip->card;
1220 	mutex_init(&bus->prepare_mutex);
1221 	bus->pci = chip->pci;
1222 	bus->modelname = model;
1223 	bus->mixer_assigned = -1;
1224 	bus->core.snoop = azx_snoop(chip);
1225 	if (chip->get_position[0] != azx_get_pos_lpib ||
1226 	    chip->get_position[1] != azx_get_pos_lpib)
1227 		bus->core.use_posbuf = true;
1228 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1229 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1230 		bus->core.corbrp_self_clear = true;
1231 
1232 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1233 		bus->core.align_bdle_4k = true;
1234 
1235 	/* AMD chipsets often cause the communication stalls upon certain
1236 	 * sequence like the pin-detection.  It seems that forcing the synced
1237 	 * access works around the stall.  Grrr...
1238 	 */
1239 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1240 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1241 		bus->core.sync_write = 1;
1242 		bus->allow_bus_reset = 1;
1243 	}
1244 
1245 	return 0;
1246 }
1247 EXPORT_SYMBOL_GPL(azx_bus_init);
1248 
1249 /* Probe codecs */
1250 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1251 {
1252 	struct hdac_bus *bus = azx_bus(chip);
1253 	int c, codecs, err;
1254 
1255 	codecs = 0;
1256 	if (!max_slots)
1257 		max_slots = AZX_DEFAULT_CODECS;
1258 
1259 	/* First try to probe all given codec slots */
1260 	for (c = 0; c < max_slots; c++) {
1261 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1262 			if (probe_codec(chip, c) < 0) {
1263 				/* Some BIOSen give you wrong codec addresses
1264 				 * that don't exist
1265 				 */
1266 				dev_warn(chip->card->dev,
1267 					 "Codec #%d probe error; disabling it...\n", c);
1268 				bus->codec_mask &= ~(1 << c);
1269 				/* More badly, accessing to a non-existing
1270 				 * codec often screws up the controller chip,
1271 				 * and disturbs the further communications.
1272 				 * Thus if an error occurs during probing,
1273 				 * better to reset the controller chip to
1274 				 * get back to the sanity state.
1275 				 */
1276 				azx_stop_chip(chip);
1277 				azx_init_chip(chip, true);
1278 			}
1279 		}
1280 	}
1281 
1282 	/* Then create codec instances */
1283 	for (c = 0; c < max_slots; c++) {
1284 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1285 			struct hda_codec *codec;
1286 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1287 			if (err < 0)
1288 				continue;
1289 			codec->jackpoll_interval = chip->jackpoll_interval;
1290 			codec->beep_mode = chip->beep_mode;
1291 			codecs++;
1292 		}
1293 	}
1294 	if (!codecs) {
1295 		dev_err(chip->card->dev, "no codecs initialized\n");
1296 		return -ENXIO;
1297 	}
1298 	return 0;
1299 }
1300 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1301 
1302 /* configure each codec instance */
1303 int azx_codec_configure(struct azx *chip)
1304 {
1305 	struct hda_codec *codec, *next;
1306 
1307 	/* use _safe version here since snd_hda_codec_configure() deregisters
1308 	 * the device upon error and deletes itself from the bus list.
1309 	 */
1310 	list_for_each_codec_safe(codec, next, &chip->bus) {
1311 		snd_hda_codec_configure(codec);
1312 	}
1313 
1314 	if (!azx_bus(chip)->num_codecs)
1315 		return -ENODEV;
1316 	return 0;
1317 }
1318 EXPORT_SYMBOL_GPL(azx_codec_configure);
1319 
1320 static int stream_direction(struct azx *chip, unsigned char index)
1321 {
1322 	if (index >= chip->capture_index_offset &&
1323 	    index < chip->capture_index_offset + chip->capture_streams)
1324 		return SNDRV_PCM_STREAM_CAPTURE;
1325 	return SNDRV_PCM_STREAM_PLAYBACK;
1326 }
1327 
1328 /* initialize SD streams */
1329 int azx_init_streams(struct azx *chip)
1330 {
1331 	int i;
1332 	int stream_tags[2] = { 0, 0 };
1333 
1334 	/* initialize each stream (aka device)
1335 	 * assign the starting bdl address to each stream (device)
1336 	 * and initialize
1337 	 */
1338 	for (i = 0; i < chip->num_streams; i++) {
1339 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1340 		int dir, tag;
1341 
1342 		if (!azx_dev)
1343 			return -ENOMEM;
1344 
1345 		dir = stream_direction(chip, i);
1346 		/* stream tag must be unique throughout
1347 		 * the stream direction group,
1348 		 * valid values 1...15
1349 		 * use separate stream tag if the flag
1350 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1351 		 */
1352 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1353 			tag = ++stream_tags[dir];
1354 		else
1355 			tag = i + 1;
1356 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1357 				     i, dir, tag);
1358 	}
1359 
1360 	return 0;
1361 }
1362 EXPORT_SYMBOL_GPL(azx_init_streams);
1363 
1364 void azx_free_streams(struct azx *chip)
1365 {
1366 	struct hdac_bus *bus = azx_bus(chip);
1367 	struct hdac_stream *s;
1368 
1369 	while (!list_empty(&bus->stream_list)) {
1370 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1371 		list_del(&s->list);
1372 		kfree(stream_to_azx_dev(s));
1373 	}
1374 }
1375 EXPORT_SYMBOL_GPL(azx_free_streams);
1376