xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision cc6c6912)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
31 
32 /* DSP lock helpers */
33 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
36 
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
40 {
41 	struct hdac_stream *s;
42 
43 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
44 	if (!s)
45 		return NULL;
46 	return stream_to_azx_dev(s);
47 }
48 
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
51 {
52 	snd_hdac_stream_release(azx_stream(azx_dev));
53 }
54 
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
57 {
58 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 	return &apcm->info->stream[substream->stream];
60 }
61 
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
63 				u64 nsec)
64 {
65 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 	u64 codec_frames, codec_nsecs;
68 
69 	if (!hinfo->ops.get_delay)
70 		return nsec;
71 
72 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 			      substream->runtime->rate);
75 
76 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 		return nsec + codec_nsecs;
78 
79 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
80 }
81 
82 /*
83  * PCM ops
84  */
85 
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
87 {
88 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 	struct azx *chip = apcm->chip;
91 	struct azx_dev *azx_dev = get_azx_dev(substream);
92 
93 	trace_azx_pcm_close(chip, azx_dev);
94 	mutex_lock(&chip->open_mutex);
95 	azx_release_device(azx_dev);
96 	if (hinfo->ops.close)
97 		hinfo->ops.close(hinfo, apcm->codec, substream);
98 	snd_hda_power_down(apcm->codec);
99 	mutex_unlock(&chip->open_mutex);
100 	snd_hda_codec_pcm_put(apcm->info);
101 	return 0;
102 }
103 
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 			     struct snd_pcm_hw_params *hw_params)
106 {
107 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 	struct azx *chip = apcm->chip;
109 	struct azx_dev *azx_dev = get_azx_dev(substream);
110 	int ret = 0;
111 
112 	trace_azx_pcm_hw_params(chip, azx_dev);
113 	dsp_lock(azx_dev);
114 	if (dsp_is_locked(azx_dev)) {
115 		ret = -EBUSY;
116 		goto unlock;
117 	}
118 
119 	azx_dev->core.bufsize = 0;
120 	azx_dev->core.period_bytes = 0;
121 	azx_dev->core.format_val = 0;
122 
123 unlock:
124 	dsp_unlock(azx_dev);
125 	return ret;
126 }
127 
128 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
129 {
130 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
131 	struct azx_dev *azx_dev = get_azx_dev(substream);
132 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
133 
134 	/* reset BDL address */
135 	dsp_lock(azx_dev);
136 	if (!dsp_is_locked(azx_dev))
137 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
138 
139 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
140 
141 	azx_stream(azx_dev)->prepared = 0;
142 	dsp_unlock(azx_dev);
143 	return 0;
144 }
145 
146 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
147 {
148 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
149 	struct azx *chip = apcm->chip;
150 	struct azx_dev *azx_dev = get_azx_dev(substream);
151 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
152 	struct snd_pcm_runtime *runtime = substream->runtime;
153 	unsigned int format_val, stream_tag;
154 	int err;
155 	struct hda_spdif_out *spdif =
156 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
157 	unsigned short ctls = spdif ? spdif->ctls : 0;
158 
159 	trace_azx_pcm_prepare(chip, azx_dev);
160 	dsp_lock(azx_dev);
161 	if (dsp_is_locked(azx_dev)) {
162 		err = -EBUSY;
163 		goto unlock;
164 	}
165 
166 	snd_hdac_stream_reset(azx_stream(azx_dev));
167 	format_val = snd_hdac_calc_stream_format(runtime->rate,
168 						runtime->channels,
169 						runtime->format,
170 						hinfo->maxbps,
171 						ctls);
172 	if (!format_val) {
173 		dev_err(chip->card->dev,
174 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
175 			runtime->rate, runtime->channels, runtime->format);
176 		err = -EINVAL;
177 		goto unlock;
178 	}
179 
180 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
181 	if (err < 0)
182 		goto unlock;
183 
184 	snd_hdac_stream_setup(azx_stream(azx_dev));
185 
186 	stream_tag = azx_dev->core.stream_tag;
187 	/* CA-IBG chips need the playback stream starting from 1 */
188 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
189 	    stream_tag > chip->capture_streams)
190 		stream_tag -= chip->capture_streams;
191 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
192 				     azx_dev->core.format_val, substream);
193 
194  unlock:
195 	if (!err)
196 		azx_stream(azx_dev)->prepared = 1;
197 	dsp_unlock(azx_dev);
198 	return err;
199 }
200 
201 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202 {
203 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
204 	struct azx *chip = apcm->chip;
205 	struct hdac_bus *bus = azx_bus(chip);
206 	struct azx_dev *azx_dev;
207 	struct snd_pcm_substream *s;
208 	struct hdac_stream *hstr;
209 	bool start;
210 	int sbits = 0;
211 	int sync_reg;
212 
213 	azx_dev = get_azx_dev(substream);
214 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
215 
216 	hstr = azx_stream(azx_dev);
217 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
218 		sync_reg = AZX_REG_OLD_SSYNC;
219 	else
220 		sync_reg = AZX_REG_SSYNC;
221 
222 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
223 		return -EPIPE;
224 
225 	switch (cmd) {
226 	case SNDRV_PCM_TRIGGER_START:
227 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
228 	case SNDRV_PCM_TRIGGER_RESUME:
229 		start = true;
230 		break;
231 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
232 	case SNDRV_PCM_TRIGGER_SUSPEND:
233 	case SNDRV_PCM_TRIGGER_STOP:
234 		start = false;
235 		break;
236 	default:
237 		return -EINVAL;
238 	}
239 
240 	snd_pcm_group_for_each_entry(s, substream) {
241 		if (s->pcm->card != substream->pcm->card)
242 			continue;
243 		azx_dev = get_azx_dev(s);
244 		sbits |= 1 << azx_dev->core.index;
245 		snd_pcm_trigger_done(s, substream);
246 	}
247 
248 	spin_lock(&bus->reg_lock);
249 
250 	/* first, set SYNC bits of corresponding streams */
251 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
252 
253 	snd_pcm_group_for_each_entry(s, substream) {
254 		if (s->pcm->card != substream->pcm->card)
255 			continue;
256 		azx_dev = get_azx_dev(s);
257 		if (start) {
258 			azx_dev->insufficient = 1;
259 			snd_hdac_stream_start(azx_stream(azx_dev), true);
260 		} else {
261 			snd_hdac_stream_stop(azx_stream(azx_dev));
262 		}
263 	}
264 	spin_unlock(&bus->reg_lock);
265 
266 	snd_hdac_stream_sync(hstr, start, sbits);
267 
268 	spin_lock(&bus->reg_lock);
269 	/* reset SYNC bits */
270 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
271 	if (start)
272 		snd_hdac_stream_timecounter_init(hstr, sbits);
273 	spin_unlock(&bus->reg_lock);
274 	return 0;
275 }
276 
277 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
278 {
279 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
280 }
281 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
282 
283 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
284 {
285 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
286 }
287 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
288 
289 unsigned int azx_get_position(struct azx *chip,
290 			      struct azx_dev *azx_dev)
291 {
292 	struct snd_pcm_substream *substream = azx_dev->core.substream;
293 	unsigned int pos;
294 	int stream = substream->stream;
295 	int delay = 0;
296 
297 	if (chip->get_position[stream])
298 		pos = chip->get_position[stream](chip, azx_dev);
299 	else /* use the position buffer as default */
300 		pos = azx_get_pos_posbuf(chip, azx_dev);
301 
302 	if (pos >= azx_dev->core.bufsize)
303 		pos = 0;
304 
305 	if (substream->runtime) {
306 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
307 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
308 
309 		if (chip->get_delay[stream])
310 			delay += chip->get_delay[stream](chip, azx_dev, pos);
311 		if (hinfo->ops.get_delay)
312 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
313 						      substream);
314 		substream->runtime->delay = delay;
315 	}
316 
317 	trace_azx_get_position(chip, azx_dev, pos, delay);
318 	return pos;
319 }
320 EXPORT_SYMBOL_GPL(azx_get_position);
321 
322 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
323 {
324 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
325 	struct azx *chip = apcm->chip;
326 	struct azx_dev *azx_dev = get_azx_dev(substream);
327 	return bytes_to_frames(substream->runtime,
328 			       azx_get_position(chip, azx_dev));
329 }
330 
331 /*
332  * azx_scale64: Scale base by mult/div while not overflowing sanely
333  *
334  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
335  *
336  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
337  * is about 384307 ie ~4.5 days.
338  *
339  * This scales the calculation so that overflow will happen but after 2^64 /
340  * 48000 secs, which is pretty large!
341  *
342  * In caln below:
343  *	base may overflow, but since there isn’t any additional division
344  *	performed on base it’s OK
345  *	rem can’t overflow because both are 32-bit values
346  */
347 
348 #ifdef CONFIG_X86
349 static u64 azx_scale64(u64 base, u32 num, u32 den)
350 {
351 	u64 rem;
352 
353 	rem = do_div(base, den);
354 
355 	base *= num;
356 	rem *= num;
357 
358 	do_div(rem, den);
359 
360 	return base + rem;
361 }
362 
363 static int azx_get_sync_time(ktime_t *device,
364 		struct system_counterval_t *system, void *ctx)
365 {
366 	struct snd_pcm_substream *substream = ctx;
367 	struct azx_dev *azx_dev = get_azx_dev(substream);
368 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
369 	struct azx *chip = apcm->chip;
370 	struct snd_pcm_runtime *runtime;
371 	u64 ll_counter, ll_counter_l, ll_counter_h;
372 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
373 	u32 wallclk_ctr, wallclk_cycles;
374 	bool direction;
375 	u32 dma_select;
376 	u32 timeout = 200;
377 	u32 retry_count = 0;
378 
379 	runtime = substream->runtime;
380 
381 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
382 		direction = 1;
383 	else
384 		direction = 0;
385 
386 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
387 	do {
388 		timeout = 100;
389 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
390 					(azx_dev->core.stream_tag - 1);
391 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
392 
393 		/* Enable the capture */
394 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
395 
396 		while (timeout) {
397 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
398 						GTSCC_TSCCD_MASK)
399 				break;
400 
401 			timeout--;
402 		}
403 
404 		if (!timeout) {
405 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
406 			return -EIO;
407 		}
408 
409 		/* Read wall clock counter */
410 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
411 
412 		/* Read TSC counter */
413 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
414 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
415 
416 		/* Read Link counter */
417 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
418 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
419 
420 		/* Ack: registers read done */
421 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
422 
423 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
424 						tsc_counter_l;
425 
426 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
427 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
428 
429 		/*
430 		 * An error occurs near frame "rollover". The clocks in
431 		 * frame value indicates whether this error may have
432 		 * occurred. Here we use the value of 10 i.e.,
433 		 * HDA_MAX_CYCLE_OFFSET
434 		 */
435 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
436 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
437 			break;
438 
439 		/*
440 		 * Sleep before we read again, else we may again get
441 		 * value near to MAX_CYCLE. Try to sleep for different
442 		 * amount of time so we dont hit the same number again
443 		 */
444 		udelay(retry_count++);
445 
446 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
447 
448 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
449 		dev_err_ratelimited(chip->card->dev,
450 			"Error in WALFCC cycle count\n");
451 		return -EIO;
452 	}
453 
454 	*device = ns_to_ktime(azx_scale64(ll_counter,
455 				NSEC_PER_SEC, runtime->rate));
456 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
457 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
458 
459 	*system = convert_art_to_tsc(tsc_counter);
460 
461 	return 0;
462 }
463 
464 #else
465 static int azx_get_sync_time(ktime_t *device,
466 		struct system_counterval_t *system, void *ctx)
467 {
468 	return -ENXIO;
469 }
470 #endif
471 
472 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
473 			      struct system_device_crosststamp *xtstamp)
474 {
475 	return get_device_system_crosststamp(azx_get_sync_time,
476 					substream, NULL, xtstamp);
477 }
478 
479 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
480 				struct snd_pcm_audio_tstamp_config *ts)
481 {
482 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
483 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
484 			return true;
485 
486 	return false;
487 }
488 
489 static int azx_get_time_info(struct snd_pcm_substream *substream,
490 			struct timespec *system_ts, struct timespec *audio_ts,
491 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
492 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
493 {
494 	struct azx_dev *azx_dev = get_azx_dev(substream);
495 	struct snd_pcm_runtime *runtime = substream->runtime;
496 	struct system_device_crosststamp xtstamp;
497 	int ret;
498 	u64 nsec;
499 
500 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
501 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
502 
503 		snd_pcm_gettime(substream->runtime, system_ts);
504 
505 		nsec = timecounter_read(&azx_dev->core.tc);
506 		nsec = div_u64(nsec, 3); /* can be optimized */
507 		if (audio_tstamp_config->report_delay)
508 			nsec = azx_adjust_codec_delay(substream, nsec);
509 
510 		*audio_ts = ns_to_timespec(nsec);
511 
512 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
513 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
514 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
515 
516 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
517 
518 		ret = azx_get_crosststamp(substream, &xtstamp);
519 		if (ret)
520 			return ret;
521 
522 		switch (runtime->tstamp_type) {
523 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
524 			return -EINVAL;
525 
526 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
527 			*system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
528 			break;
529 
530 		default:
531 			*system_ts = ktime_to_timespec(xtstamp.sys_realtime);
532 			break;
533 
534 		}
535 
536 		*audio_ts = ktime_to_timespec(xtstamp.device);
537 
538 		audio_tstamp_report->actual_type =
539 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
540 		audio_tstamp_report->accuracy_report = 1;
541 		/* 24 MHz WallClock == 42ns resolution */
542 		audio_tstamp_report->accuracy = 42;
543 
544 	} else {
545 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
546 	}
547 
548 	return 0;
549 }
550 
551 static struct snd_pcm_hardware azx_pcm_hw = {
552 	.info =			(SNDRV_PCM_INFO_MMAP |
553 				 SNDRV_PCM_INFO_INTERLEAVED |
554 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
555 				 SNDRV_PCM_INFO_MMAP_VALID |
556 				 /* No full-resume yet implemented */
557 				 /* SNDRV_PCM_INFO_RESUME |*/
558 				 SNDRV_PCM_INFO_PAUSE |
559 				 SNDRV_PCM_INFO_SYNC_START |
560 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
561 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
562 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
563 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
564 	.rates =		SNDRV_PCM_RATE_48000,
565 	.rate_min =		48000,
566 	.rate_max =		48000,
567 	.channels_min =		2,
568 	.channels_max =		2,
569 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
570 	.period_bytes_min =	128,
571 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
572 	.periods_min =		2,
573 	.periods_max =		AZX_MAX_FRAG,
574 	.fifo_size =		0,
575 };
576 
577 static int azx_pcm_open(struct snd_pcm_substream *substream)
578 {
579 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
580 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
581 	struct azx *chip = apcm->chip;
582 	struct azx_dev *azx_dev;
583 	struct snd_pcm_runtime *runtime = substream->runtime;
584 	int err;
585 	int buff_step;
586 
587 	snd_hda_codec_pcm_get(apcm->info);
588 	mutex_lock(&chip->open_mutex);
589 	azx_dev = azx_assign_device(chip, substream);
590 	trace_azx_pcm_open(chip, azx_dev);
591 	if (azx_dev == NULL) {
592 		err = -EBUSY;
593 		goto unlock;
594 	}
595 	runtime->private_data = azx_dev;
596 
597 	runtime->hw = azx_pcm_hw;
598 	if (chip->gts_present)
599 		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
600 	runtime->hw.channels_min = hinfo->channels_min;
601 	runtime->hw.channels_max = hinfo->channels_max;
602 	runtime->hw.formats = hinfo->formats;
603 	runtime->hw.rates = hinfo->rates;
604 	snd_pcm_limit_hw_rates(runtime);
605 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
606 
607 	/* avoid wrap-around with wall-clock */
608 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
609 				     20,
610 				     178000000);
611 
612 	/* by some reason, the playback stream stalls on PulseAudio with
613 	 * tsched=1 when a capture stream triggers.  Until we figure out the
614 	 * real cause, disable tsched mode by telling the PCM info flag.
615 	 */
616 	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
617 		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
618 
619 	if (chip->align_buffer_size)
620 		/* constrain buffer sizes to be multiple of 128
621 		   bytes. This is more efficient in terms of memory
622 		   access but isn't required by the HDA spec and
623 		   prevents users from specifying exact period/buffer
624 		   sizes. For example for 44.1kHz, a period size set
625 		   to 20ms will be rounded to 19.59ms. */
626 		buff_step = 128;
627 	else
628 		/* Don't enforce steps on buffer sizes, still need to
629 		   be multiple of 4 bytes (HDA spec). Tested on Intel
630 		   HDA controllers, may not work on all devices where
631 		   option needs to be disabled */
632 		buff_step = 4;
633 
634 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
635 				   buff_step);
636 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
637 				   buff_step);
638 	snd_hda_power_up(apcm->codec);
639 	if (hinfo->ops.open)
640 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
641 	else
642 		err = -ENODEV;
643 	if (err < 0) {
644 		azx_release_device(azx_dev);
645 		goto powerdown;
646 	}
647 	snd_pcm_limit_hw_rates(runtime);
648 	/* sanity check */
649 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
650 	    snd_BUG_ON(!runtime->hw.channels_max) ||
651 	    snd_BUG_ON(!runtime->hw.formats) ||
652 	    snd_BUG_ON(!runtime->hw.rates)) {
653 		azx_release_device(azx_dev);
654 		if (hinfo->ops.close)
655 			hinfo->ops.close(hinfo, apcm->codec, substream);
656 		err = -EINVAL;
657 		goto powerdown;
658 	}
659 
660 	/* disable LINK_ATIME timestamps for capture streams
661 	   until we figure out how to handle digital inputs */
662 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
663 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
664 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
665 	}
666 
667 	snd_pcm_set_sync(substream);
668 	mutex_unlock(&chip->open_mutex);
669 	return 0;
670 
671  powerdown:
672 	snd_hda_power_down(apcm->codec);
673  unlock:
674 	mutex_unlock(&chip->open_mutex);
675 	snd_hda_codec_pcm_put(apcm->info);
676 	return err;
677 }
678 
679 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
680 			struct vm_area_struct *area)
681 {
682 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
683 	struct azx *chip = apcm->chip;
684 	if (chip->ops->pcm_mmap_prepare)
685 		chip->ops->pcm_mmap_prepare(substream, area);
686 	return snd_pcm_lib_default_mmap(substream, area);
687 }
688 
689 static const struct snd_pcm_ops azx_pcm_ops = {
690 	.open = azx_pcm_open,
691 	.close = azx_pcm_close,
692 	.ioctl = snd_pcm_lib_ioctl,
693 	.hw_params = azx_pcm_hw_params,
694 	.hw_free = azx_pcm_hw_free,
695 	.prepare = azx_pcm_prepare,
696 	.trigger = azx_pcm_trigger,
697 	.pointer = azx_pcm_pointer,
698 	.get_time_info =  azx_get_time_info,
699 	.mmap = azx_pcm_mmap,
700 };
701 
702 static void azx_pcm_free(struct snd_pcm *pcm)
703 {
704 	struct azx_pcm *apcm = pcm->private_data;
705 	if (apcm) {
706 		list_del(&apcm->list);
707 		apcm->info->pcm = NULL;
708 		kfree(apcm);
709 	}
710 }
711 
712 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
713 
714 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
715 			      struct hda_pcm *cpcm)
716 {
717 	struct hdac_bus *bus = &_bus->core;
718 	struct azx *chip = bus_to_azx(bus);
719 	struct snd_pcm *pcm;
720 	struct azx_pcm *apcm;
721 	int pcm_dev = cpcm->device;
722 	unsigned int size;
723 	int s, err;
724 	int type = SNDRV_DMA_TYPE_DEV_SG;
725 
726 	list_for_each_entry(apcm, &chip->pcm_list, list) {
727 		if (apcm->pcm->device == pcm_dev) {
728 			dev_err(chip->card->dev, "PCM %d already exists\n",
729 				pcm_dev);
730 			return -EBUSY;
731 		}
732 	}
733 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
734 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
735 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
736 			  &pcm);
737 	if (err < 0)
738 		return err;
739 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
740 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
741 	if (apcm == NULL) {
742 		snd_device_free(chip->card, pcm);
743 		return -ENOMEM;
744 	}
745 	apcm->chip = chip;
746 	apcm->pcm = pcm;
747 	apcm->codec = codec;
748 	apcm->info = cpcm;
749 	pcm->private_data = apcm;
750 	pcm->private_free = azx_pcm_free;
751 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
752 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
753 	list_add_tail(&apcm->list, &chip->pcm_list);
754 	cpcm->pcm = pcm;
755 	for (s = 0; s < 2; s++) {
756 		if (cpcm->stream[s].substreams)
757 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
758 	}
759 	/* buffer pre-allocation */
760 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
761 	if (size > MAX_PREALLOC_SIZE)
762 		size = MAX_PREALLOC_SIZE;
763 	if (chip->uc_buffer)
764 		type = SNDRV_DMA_TYPE_DEV_UC_SG;
765 	snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
766 				       size, MAX_PREALLOC_SIZE);
767 	return 0;
768 }
769 
770 static unsigned int azx_command_addr(u32 cmd)
771 {
772 	unsigned int addr = cmd >> 28;
773 
774 	if (addr >= AZX_MAX_CODECS) {
775 		snd_BUG();
776 		addr = 0;
777 	}
778 
779 	return addr;
780 }
781 
782 /* receive a response */
783 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
784 				 unsigned int *res)
785 {
786 	struct azx *chip = bus_to_azx(bus);
787 	struct hda_bus *hbus = &chip->bus;
788 	unsigned long timeout;
789 	unsigned long loopcounter;
790 	wait_queue_entry_t wait;
791 	bool warned = false;
792 
793 	init_wait_entry(&wait, 0);
794  again:
795 	timeout = jiffies + msecs_to_jiffies(1000);
796 
797 	for (loopcounter = 0;; loopcounter++) {
798 		spin_lock_irq(&bus->reg_lock);
799 		if (!bus->polling_mode)
800 			prepare_to_wait(&bus->rirb_wq, &wait,
801 					TASK_UNINTERRUPTIBLE);
802 		if (bus->polling_mode)
803 			snd_hdac_bus_update_rirb(bus);
804 		if (!bus->rirb.cmds[addr]) {
805 			if (res)
806 				*res = bus->rirb.res[addr]; /* the last value */
807 			if (!bus->polling_mode)
808 				finish_wait(&bus->rirb_wq, &wait);
809 			spin_unlock_irq(&bus->reg_lock);
810 			return 0;
811 		}
812 		spin_unlock_irq(&bus->reg_lock);
813 		if (time_after(jiffies, timeout))
814 			break;
815 #define LOOP_COUNT_MAX	3000
816 		if (!bus->polling_mode) {
817 			schedule_timeout(msecs_to_jiffies(2));
818 		} else if (hbus->needs_damn_long_delay ||
819 		    loopcounter > LOOP_COUNT_MAX) {
820 			if (loopcounter > LOOP_COUNT_MAX && !warned) {
821 				dev_dbg_ratelimited(chip->card->dev,
822 						    "too slow response, last cmd=%#08x\n",
823 						    bus->last_cmd[addr]);
824 				warned = true;
825 			}
826 			msleep(2); /* temporary workaround */
827 		} else {
828 			udelay(10);
829 			cond_resched();
830 		}
831 	}
832 
833 	if (!bus->polling_mode)
834 		finish_wait(&bus->rirb_wq, &wait);
835 
836 	if (hbus->no_response_fallback)
837 		return -EIO;
838 
839 	if (!bus->polling_mode) {
840 		dev_warn(chip->card->dev,
841 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
842 			 bus->last_cmd[addr]);
843 		bus->polling_mode = 1;
844 		goto again;
845 	}
846 
847 	if (chip->msi) {
848 		dev_warn(chip->card->dev,
849 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
850 			 bus->last_cmd[addr]);
851 		if (chip->ops->disable_msi_reset_irq &&
852 		    chip->ops->disable_msi_reset_irq(chip) < 0)
853 			return -EIO;
854 		goto again;
855 	}
856 
857 	if (chip->probing) {
858 		/* If this critical timeout happens during the codec probing
859 		 * phase, this is likely an access to a non-existing codec
860 		 * slot.  Better to return an error and reset the system.
861 		 */
862 		return -EIO;
863 	}
864 
865 	/* no fallback mechanism? */
866 	if (!chip->fallback_to_single_cmd)
867 		return -EIO;
868 
869 	/* a fatal communication error; need either to reset or to fallback
870 	 * to the single_cmd mode
871 	 */
872 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
873 		hbus->response_reset = 1;
874 		dev_err(chip->card->dev,
875 			"No response from codec, resetting bus: last cmd=0x%08x\n",
876 			bus->last_cmd[addr]);
877 		return -EAGAIN; /* give a chance to retry */
878 	}
879 
880 	dev_WARN(chip->card->dev,
881 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
882 		bus->last_cmd[addr]);
883 	chip->single_cmd = 1;
884 	hbus->response_reset = 0;
885 	snd_hdac_bus_stop_cmd_io(bus);
886 	return -EIO;
887 }
888 
889 /*
890  * Use the single immediate command instead of CORB/RIRB for simplicity
891  *
892  * Note: according to Intel, this is not preferred use.  The command was
893  *       intended for the BIOS only, and may get confused with unsolicited
894  *       responses.  So, we shouldn't use it for normal operation from the
895  *       driver.
896  *       I left the codes, however, for debugging/testing purposes.
897  */
898 
899 /* receive a response */
900 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
901 {
902 	int timeout = 50;
903 
904 	while (timeout--) {
905 		/* check IRV busy bit */
906 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
907 			/* reuse rirb.res as the response return value */
908 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
909 			return 0;
910 		}
911 		udelay(1);
912 	}
913 	if (printk_ratelimit())
914 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
915 			azx_readw(chip, IRS));
916 	azx_bus(chip)->rirb.res[addr] = -1;
917 	return -EIO;
918 }
919 
920 /* send a command */
921 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
922 {
923 	struct azx *chip = bus_to_azx(bus);
924 	unsigned int addr = azx_command_addr(val);
925 	int timeout = 50;
926 
927 	bus->last_cmd[azx_command_addr(val)] = val;
928 	while (timeout--) {
929 		/* check ICB busy bit */
930 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
931 			/* Clear IRV valid bit */
932 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
933 				   AZX_IRS_VALID);
934 			azx_writel(chip, IC, val);
935 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
936 				   AZX_IRS_BUSY);
937 			return azx_single_wait_for_response(chip, addr);
938 		}
939 		udelay(1);
940 	}
941 	if (printk_ratelimit())
942 		dev_dbg(chip->card->dev,
943 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
944 			azx_readw(chip, IRS), val);
945 	return -EIO;
946 }
947 
948 /* receive a response */
949 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
950 				   unsigned int *res)
951 {
952 	if (res)
953 		*res = bus->rirb.res[addr];
954 	return 0;
955 }
956 
957 /*
958  * The below are the main callbacks from hda_codec.
959  *
960  * They are just the skeleton to call sub-callbacks according to the
961  * current setting of chip->single_cmd.
962  */
963 
964 /* send a command */
965 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
966 {
967 	struct azx *chip = bus_to_azx(bus);
968 
969 	if (chip->disabled)
970 		return 0;
971 	if (chip->single_cmd)
972 		return azx_single_send_cmd(bus, val);
973 	else
974 		return snd_hdac_bus_send_cmd(bus, val);
975 }
976 
977 /* get a response */
978 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
979 			    unsigned int *res)
980 {
981 	struct azx *chip = bus_to_azx(bus);
982 
983 	if (chip->disabled)
984 		return 0;
985 	if (chip->single_cmd)
986 		return azx_single_get_response(bus, addr, res);
987 	else
988 		return azx_rirb_get_response(bus, addr, res);
989 }
990 
991 static const struct hdac_bus_ops bus_core_ops = {
992 	.command = azx_send_cmd,
993 	.get_response = azx_get_response,
994 };
995 
996 #ifdef CONFIG_SND_HDA_DSP_LOADER
997 /*
998  * DSP loading code (e.g. for CA0132)
999  */
1000 
1001 /* use the first stream for loading DSP */
1002 static struct azx_dev *
1003 azx_get_dsp_loader_dev(struct azx *chip)
1004 {
1005 	struct hdac_bus *bus = azx_bus(chip);
1006 	struct hdac_stream *s;
1007 
1008 	list_for_each_entry(s, &bus->stream_list, list)
1009 		if (s->index == chip->playback_index_offset)
1010 			return stream_to_azx_dev(s);
1011 
1012 	return NULL;
1013 }
1014 
1015 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1016 				   unsigned int byte_size,
1017 				   struct snd_dma_buffer *bufp)
1018 {
1019 	struct hdac_bus *bus = &codec->bus->core;
1020 	struct azx *chip = bus_to_azx(bus);
1021 	struct azx_dev *azx_dev;
1022 	struct hdac_stream *hstr;
1023 	bool saved = false;
1024 	int err;
1025 
1026 	azx_dev = azx_get_dsp_loader_dev(chip);
1027 	hstr = azx_stream(azx_dev);
1028 	spin_lock_irq(&bus->reg_lock);
1029 	if (hstr->opened) {
1030 		chip->saved_azx_dev = *azx_dev;
1031 		saved = true;
1032 	}
1033 	spin_unlock_irq(&bus->reg_lock);
1034 
1035 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1036 	if (err < 0) {
1037 		spin_lock_irq(&bus->reg_lock);
1038 		if (saved)
1039 			*azx_dev = chip->saved_azx_dev;
1040 		spin_unlock_irq(&bus->reg_lock);
1041 		return err;
1042 	}
1043 
1044 	hstr->prepared = 0;
1045 	return err;
1046 }
1047 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1048 
1049 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1050 {
1051 	struct hdac_bus *bus = &codec->bus->core;
1052 	struct azx *chip = bus_to_azx(bus);
1053 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1054 
1055 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1056 }
1057 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1058 
1059 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1060 				    struct snd_dma_buffer *dmab)
1061 {
1062 	struct hdac_bus *bus = &codec->bus->core;
1063 	struct azx *chip = bus_to_azx(bus);
1064 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1065 	struct hdac_stream *hstr = azx_stream(azx_dev);
1066 
1067 	if (!dmab->area || !hstr->locked)
1068 		return;
1069 
1070 	snd_hdac_dsp_cleanup(hstr, dmab);
1071 	spin_lock_irq(&bus->reg_lock);
1072 	if (hstr->opened)
1073 		*azx_dev = chip->saved_azx_dev;
1074 	hstr->locked = false;
1075 	spin_unlock_irq(&bus->reg_lock);
1076 }
1077 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1078 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1079 
1080 /*
1081  * reset and start the controller registers
1082  */
1083 void azx_init_chip(struct azx *chip, bool full_reset)
1084 {
1085 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1086 		/* correct RINTCNT for CXT */
1087 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1088 			azx_writew(chip, RINTCNT, 0xc0);
1089 	}
1090 }
1091 EXPORT_SYMBOL_GPL(azx_init_chip);
1092 
1093 void azx_stop_all_streams(struct azx *chip)
1094 {
1095 	struct hdac_bus *bus = azx_bus(chip);
1096 	struct hdac_stream *s;
1097 
1098 	list_for_each_entry(s, &bus->stream_list, list)
1099 		snd_hdac_stream_stop(s);
1100 }
1101 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1102 
1103 void azx_stop_chip(struct azx *chip)
1104 {
1105 	snd_hdac_bus_stop_chip(azx_bus(chip));
1106 }
1107 EXPORT_SYMBOL_GPL(azx_stop_chip);
1108 
1109 /*
1110  * interrupt handler
1111  */
1112 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1113 {
1114 	struct azx *chip = bus_to_azx(bus);
1115 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1116 
1117 	/* check whether this IRQ is really acceptable */
1118 	if (!chip->ops->position_check ||
1119 	    chip->ops->position_check(chip, azx_dev)) {
1120 		spin_unlock(&bus->reg_lock);
1121 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1122 		spin_lock(&bus->reg_lock);
1123 	}
1124 }
1125 
1126 irqreturn_t azx_interrupt(int irq, void *dev_id)
1127 {
1128 	struct azx *chip = dev_id;
1129 	struct hdac_bus *bus = azx_bus(chip);
1130 	u32 status;
1131 	bool active, handled = false;
1132 	int repeat = 0; /* count for avoiding endless loop */
1133 
1134 #ifdef CONFIG_PM
1135 	if (azx_has_pm_runtime(chip))
1136 		if (!pm_runtime_active(chip->card->dev))
1137 			return IRQ_NONE;
1138 #endif
1139 
1140 	spin_lock(&bus->reg_lock);
1141 
1142 	if (chip->disabled)
1143 		goto unlock;
1144 
1145 	do {
1146 		status = azx_readl(chip, INTSTS);
1147 		if (status == 0 || status == 0xffffffff)
1148 			break;
1149 
1150 		handled = true;
1151 		active = false;
1152 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1153 			active = true;
1154 
1155 		/* clear rirb int */
1156 		status = azx_readb(chip, RIRBSTS);
1157 		if (status & RIRB_INT_MASK) {
1158 			active = true;
1159 			if (status & RIRB_INT_RESPONSE) {
1160 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1161 					udelay(80);
1162 				snd_hdac_bus_update_rirb(bus);
1163 			}
1164 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1165 		}
1166 	} while (active && ++repeat < 10);
1167 
1168  unlock:
1169 	spin_unlock(&bus->reg_lock);
1170 
1171 	return IRQ_RETVAL(handled);
1172 }
1173 EXPORT_SYMBOL_GPL(azx_interrupt);
1174 
1175 /*
1176  * Codec initerface
1177  */
1178 
1179 /*
1180  * Probe the given codec address
1181  */
1182 static int probe_codec(struct azx *chip, int addr)
1183 {
1184 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1185 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1186 	struct hdac_bus *bus = azx_bus(chip);
1187 	int err;
1188 	unsigned int res = -1;
1189 
1190 	mutex_lock(&bus->cmd_mutex);
1191 	chip->probing = 1;
1192 	azx_send_cmd(bus, cmd);
1193 	err = azx_get_response(bus, addr, &res);
1194 	chip->probing = 0;
1195 	mutex_unlock(&bus->cmd_mutex);
1196 	if (err < 0 || res == -1)
1197 		return -EIO;
1198 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1199 	return 0;
1200 }
1201 
1202 void snd_hda_bus_reset(struct hda_bus *bus)
1203 {
1204 	struct azx *chip = bus_to_azx(&bus->core);
1205 
1206 	bus->in_reset = 1;
1207 	azx_stop_chip(chip);
1208 	azx_init_chip(chip, true);
1209 	if (bus->core.chip_init)
1210 		snd_hda_bus_reset_codecs(bus);
1211 	bus->in_reset = 0;
1212 }
1213 
1214 /* HD-audio bus initialization */
1215 int azx_bus_init(struct azx *chip, const char *model)
1216 {
1217 	struct hda_bus *bus = &chip->bus;
1218 	int err;
1219 
1220 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1221 	if (err < 0)
1222 		return err;
1223 
1224 	bus->card = chip->card;
1225 	mutex_init(&bus->prepare_mutex);
1226 	bus->pci = chip->pci;
1227 	bus->modelname = model;
1228 	bus->mixer_assigned = -1;
1229 	bus->core.snoop = azx_snoop(chip);
1230 	if (chip->get_position[0] != azx_get_pos_lpib ||
1231 	    chip->get_position[1] != azx_get_pos_lpib)
1232 		bus->core.use_posbuf = true;
1233 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1234 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1235 		bus->core.corbrp_self_clear = true;
1236 
1237 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1238 		bus->core.align_bdle_4k = true;
1239 
1240 	/* AMD chipsets often cause the communication stalls upon certain
1241 	 * sequence like the pin-detection.  It seems that forcing the synced
1242 	 * access works around the stall.  Grrr...
1243 	 */
1244 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1245 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1246 		bus->core.sync_write = 1;
1247 		bus->allow_bus_reset = 1;
1248 	}
1249 
1250 	return 0;
1251 }
1252 EXPORT_SYMBOL_GPL(azx_bus_init);
1253 
1254 /* Probe codecs */
1255 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1256 {
1257 	struct hdac_bus *bus = azx_bus(chip);
1258 	int c, codecs, err;
1259 
1260 	codecs = 0;
1261 	if (!max_slots)
1262 		max_slots = AZX_DEFAULT_CODECS;
1263 
1264 	/* First try to probe all given codec slots */
1265 	for (c = 0; c < max_slots; c++) {
1266 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1267 			if (probe_codec(chip, c) < 0) {
1268 				/* Some BIOSen give you wrong codec addresses
1269 				 * that don't exist
1270 				 */
1271 				dev_warn(chip->card->dev,
1272 					 "Codec #%d probe error; disabling it...\n", c);
1273 				bus->codec_mask &= ~(1 << c);
1274 				/* More badly, accessing to a non-existing
1275 				 * codec often screws up the controller chip,
1276 				 * and disturbs the further communications.
1277 				 * Thus if an error occurs during probing,
1278 				 * better to reset the controller chip to
1279 				 * get back to the sanity state.
1280 				 */
1281 				azx_stop_chip(chip);
1282 				azx_init_chip(chip, true);
1283 			}
1284 		}
1285 	}
1286 
1287 	/* Then create codec instances */
1288 	for (c = 0; c < max_slots; c++) {
1289 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1290 			struct hda_codec *codec;
1291 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1292 			if (err < 0)
1293 				continue;
1294 			codec->jackpoll_interval = chip->jackpoll_interval;
1295 			codec->beep_mode = chip->beep_mode;
1296 			codecs++;
1297 		}
1298 	}
1299 	if (!codecs) {
1300 		dev_err(chip->card->dev, "no codecs initialized\n");
1301 		return -ENXIO;
1302 	}
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1306 
1307 /* configure each codec instance */
1308 int azx_codec_configure(struct azx *chip)
1309 {
1310 	struct hda_codec *codec, *next;
1311 
1312 	/* use _safe version here since snd_hda_codec_configure() deregisters
1313 	 * the device upon error and deletes itself from the bus list.
1314 	 */
1315 	list_for_each_codec_safe(codec, next, &chip->bus) {
1316 		snd_hda_codec_configure(codec);
1317 	}
1318 
1319 	if (!azx_bus(chip)->num_codecs)
1320 		return -ENODEV;
1321 	return 0;
1322 }
1323 EXPORT_SYMBOL_GPL(azx_codec_configure);
1324 
1325 static int stream_direction(struct azx *chip, unsigned char index)
1326 {
1327 	if (index >= chip->capture_index_offset &&
1328 	    index < chip->capture_index_offset + chip->capture_streams)
1329 		return SNDRV_PCM_STREAM_CAPTURE;
1330 	return SNDRV_PCM_STREAM_PLAYBACK;
1331 }
1332 
1333 /* initialize SD streams */
1334 int azx_init_streams(struct azx *chip)
1335 {
1336 	int i;
1337 	int stream_tags[2] = { 0, 0 };
1338 
1339 	/* initialize each stream (aka device)
1340 	 * assign the starting bdl address to each stream (device)
1341 	 * and initialize
1342 	 */
1343 	for (i = 0; i < chip->num_streams; i++) {
1344 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1345 		int dir, tag;
1346 
1347 		if (!azx_dev)
1348 			return -ENOMEM;
1349 
1350 		dir = stream_direction(chip, i);
1351 		/* stream tag must be unique throughout
1352 		 * the stream direction group,
1353 		 * valid values 1...15
1354 		 * use separate stream tag if the flag
1355 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1356 		 */
1357 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1358 			tag = ++stream_tags[dir];
1359 		else
1360 			tag = i + 1;
1361 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1362 				     i, dir, tag);
1363 	}
1364 
1365 	return 0;
1366 }
1367 EXPORT_SYMBOL_GPL(azx_init_streams);
1368 
1369 void azx_free_streams(struct azx *chip)
1370 {
1371 	struct hdac_bus *bus = azx_bus(chip);
1372 	struct hdac_stream *s;
1373 
1374 	while (!list_empty(&bus->stream_list)) {
1375 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1376 		list_del(&s->list);
1377 		kfree(stream_to_azx_dev(s));
1378 	}
1379 }
1380 EXPORT_SYMBOL_GPL(azx_free_streams);
1381