xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 9fb29c73)
1 /*
2  *
3  *  Implementation of primary alsa driver code base for Intel HD Audio.
4  *
5  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
6  *
7  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8  *                     PeiSen Hou <pshou@realtek.com.tw>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the Free
12  *  Software Foundation; either version 2 of the License, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  *  more details.
19  *
20  *
21  */
22 
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 
31 #ifdef CONFIG_X86
32 /* for art-tsc conversion */
33 #include <asm/tsc.h>
34 #endif
35 
36 #include <sound/core.h>
37 #include <sound/initval.h>
38 #include "hda_controller.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include "hda_controller_trace.h"
42 
43 /* DSP lock helpers */
44 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
45 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
46 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
47 
48 /* assign a stream for the PCM */
49 static inline struct azx_dev *
50 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
51 {
52 	struct hdac_stream *s;
53 
54 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
55 	if (!s)
56 		return NULL;
57 	return stream_to_azx_dev(s);
58 }
59 
60 /* release the assigned stream */
61 static inline void azx_release_device(struct azx_dev *azx_dev)
62 {
63 	snd_hdac_stream_release(azx_stream(azx_dev));
64 }
65 
66 static inline struct hda_pcm_stream *
67 to_hda_pcm_stream(struct snd_pcm_substream *substream)
68 {
69 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
70 	return &apcm->info->stream[substream->stream];
71 }
72 
73 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
74 				u64 nsec)
75 {
76 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
77 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
78 	u64 codec_frames, codec_nsecs;
79 
80 	if (!hinfo->ops.get_delay)
81 		return nsec;
82 
83 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
84 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
85 			      substream->runtime->rate);
86 
87 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
88 		return nsec + codec_nsecs;
89 
90 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
91 }
92 
93 /*
94  * PCM ops
95  */
96 
97 static int azx_pcm_close(struct snd_pcm_substream *substream)
98 {
99 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
100 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
101 	struct azx *chip = apcm->chip;
102 	struct azx_dev *azx_dev = get_azx_dev(substream);
103 
104 	trace_azx_pcm_close(chip, azx_dev);
105 	mutex_lock(&chip->open_mutex);
106 	azx_release_device(azx_dev);
107 	if (hinfo->ops.close)
108 		hinfo->ops.close(hinfo, apcm->codec, substream);
109 	snd_hda_power_down(apcm->codec);
110 	mutex_unlock(&chip->open_mutex);
111 	snd_hda_codec_pcm_put(apcm->info);
112 	return 0;
113 }
114 
115 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
116 			     struct snd_pcm_hw_params *hw_params)
117 {
118 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
119 	struct azx *chip = apcm->chip;
120 	struct azx_dev *azx_dev = get_azx_dev(substream);
121 	int ret;
122 
123 	trace_azx_pcm_hw_params(chip, azx_dev);
124 	dsp_lock(azx_dev);
125 	if (dsp_is_locked(azx_dev)) {
126 		ret = -EBUSY;
127 		goto unlock;
128 	}
129 
130 	azx_dev->core.bufsize = 0;
131 	azx_dev->core.period_bytes = 0;
132 	azx_dev->core.format_val = 0;
133 	ret = snd_pcm_lib_malloc_pages(substream,
134 				       params_buffer_bytes(hw_params));
135 
136 unlock:
137 	dsp_unlock(azx_dev);
138 	return ret;
139 }
140 
141 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
142 {
143 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
144 	struct azx_dev *azx_dev = get_azx_dev(substream);
145 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
146 	int err;
147 
148 	/* reset BDL address */
149 	dsp_lock(azx_dev);
150 	if (!dsp_is_locked(azx_dev))
151 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
152 
153 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
154 
155 	err = snd_pcm_lib_free_pages(substream);
156 	azx_stream(azx_dev)->prepared = 0;
157 	dsp_unlock(azx_dev);
158 	return err;
159 }
160 
161 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
162 {
163 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
164 	struct azx *chip = apcm->chip;
165 	struct azx_dev *azx_dev = get_azx_dev(substream);
166 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
167 	struct snd_pcm_runtime *runtime = substream->runtime;
168 	unsigned int format_val, stream_tag;
169 	int err;
170 	struct hda_spdif_out *spdif =
171 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
172 	unsigned short ctls = spdif ? spdif->ctls : 0;
173 
174 	trace_azx_pcm_prepare(chip, azx_dev);
175 	dsp_lock(azx_dev);
176 	if (dsp_is_locked(azx_dev)) {
177 		err = -EBUSY;
178 		goto unlock;
179 	}
180 
181 	snd_hdac_stream_reset(azx_stream(azx_dev));
182 	format_val = snd_hdac_calc_stream_format(runtime->rate,
183 						runtime->channels,
184 						runtime->format,
185 						hinfo->maxbps,
186 						ctls);
187 	if (!format_val) {
188 		dev_err(chip->card->dev,
189 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
190 			runtime->rate, runtime->channels, runtime->format);
191 		err = -EINVAL;
192 		goto unlock;
193 	}
194 
195 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
196 	if (err < 0)
197 		goto unlock;
198 
199 	snd_hdac_stream_setup(azx_stream(azx_dev));
200 
201 	stream_tag = azx_dev->core.stream_tag;
202 	/* CA-IBG chips need the playback stream starting from 1 */
203 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
204 	    stream_tag > chip->capture_streams)
205 		stream_tag -= chip->capture_streams;
206 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
207 				     azx_dev->core.format_val, substream);
208 
209  unlock:
210 	if (!err)
211 		azx_stream(azx_dev)->prepared = 1;
212 	dsp_unlock(azx_dev);
213 	return err;
214 }
215 
216 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
217 {
218 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
219 	struct azx *chip = apcm->chip;
220 	struct hdac_bus *bus = azx_bus(chip);
221 	struct azx_dev *azx_dev;
222 	struct snd_pcm_substream *s;
223 	struct hdac_stream *hstr;
224 	bool start;
225 	int sbits = 0;
226 	int sync_reg;
227 
228 	azx_dev = get_azx_dev(substream);
229 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
230 
231 	hstr = azx_stream(azx_dev);
232 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
233 		sync_reg = AZX_REG_OLD_SSYNC;
234 	else
235 		sync_reg = AZX_REG_SSYNC;
236 
237 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
238 		return -EPIPE;
239 
240 	switch (cmd) {
241 	case SNDRV_PCM_TRIGGER_START:
242 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
243 	case SNDRV_PCM_TRIGGER_RESUME:
244 		start = true;
245 		break;
246 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
247 	case SNDRV_PCM_TRIGGER_SUSPEND:
248 	case SNDRV_PCM_TRIGGER_STOP:
249 		start = false;
250 		break;
251 	default:
252 		return -EINVAL;
253 	}
254 
255 	snd_pcm_group_for_each_entry(s, substream) {
256 		if (s->pcm->card != substream->pcm->card)
257 			continue;
258 		azx_dev = get_azx_dev(s);
259 		sbits |= 1 << azx_dev->core.index;
260 		snd_pcm_trigger_done(s, substream);
261 	}
262 
263 	spin_lock(&bus->reg_lock);
264 
265 	/* first, set SYNC bits of corresponding streams */
266 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
267 
268 	snd_pcm_group_for_each_entry(s, substream) {
269 		if (s->pcm->card != substream->pcm->card)
270 			continue;
271 		azx_dev = get_azx_dev(s);
272 		if (start) {
273 			azx_dev->insufficient = 1;
274 			snd_hdac_stream_start(azx_stream(azx_dev), true);
275 		} else {
276 			snd_hdac_stream_stop(azx_stream(azx_dev));
277 		}
278 	}
279 	spin_unlock(&bus->reg_lock);
280 
281 	snd_hdac_stream_sync(hstr, start, sbits);
282 
283 	spin_lock(&bus->reg_lock);
284 	/* reset SYNC bits */
285 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
286 	if (start)
287 		snd_hdac_stream_timecounter_init(hstr, sbits);
288 	spin_unlock(&bus->reg_lock);
289 	return 0;
290 }
291 
292 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
293 {
294 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
295 }
296 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
297 
298 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
299 {
300 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
301 }
302 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
303 
304 unsigned int azx_get_position(struct azx *chip,
305 			      struct azx_dev *azx_dev)
306 {
307 	struct snd_pcm_substream *substream = azx_dev->core.substream;
308 	unsigned int pos;
309 	int stream = substream->stream;
310 	int delay = 0;
311 
312 	if (chip->get_position[stream])
313 		pos = chip->get_position[stream](chip, azx_dev);
314 	else /* use the position buffer as default */
315 		pos = azx_get_pos_posbuf(chip, azx_dev);
316 
317 	if (pos >= azx_dev->core.bufsize)
318 		pos = 0;
319 
320 	if (substream->runtime) {
321 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
322 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
323 
324 		if (chip->get_delay[stream])
325 			delay += chip->get_delay[stream](chip, azx_dev, pos);
326 		if (hinfo->ops.get_delay)
327 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
328 						      substream);
329 		substream->runtime->delay = delay;
330 	}
331 
332 	trace_azx_get_position(chip, azx_dev, pos, delay);
333 	return pos;
334 }
335 EXPORT_SYMBOL_GPL(azx_get_position);
336 
337 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
338 {
339 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
340 	struct azx *chip = apcm->chip;
341 	struct azx_dev *azx_dev = get_azx_dev(substream);
342 	return bytes_to_frames(substream->runtime,
343 			       azx_get_position(chip, azx_dev));
344 }
345 
346 /*
347  * azx_scale64: Scale base by mult/div while not overflowing sanely
348  *
349  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
350  *
351  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
352  * is about 384307 ie ~4.5 days.
353  *
354  * This scales the calculation so that overflow will happen but after 2^64 /
355  * 48000 secs, which is pretty large!
356  *
357  * In caln below:
358  *	base may overflow, but since there isn’t any additional division
359  *	performed on base it’s OK
360  *	rem can’t overflow because both are 32-bit values
361  */
362 
363 #ifdef CONFIG_X86
364 static u64 azx_scale64(u64 base, u32 num, u32 den)
365 {
366 	u64 rem;
367 
368 	rem = do_div(base, den);
369 
370 	base *= num;
371 	rem *= num;
372 
373 	do_div(rem, den);
374 
375 	return base + rem;
376 }
377 
378 static int azx_get_sync_time(ktime_t *device,
379 		struct system_counterval_t *system, void *ctx)
380 {
381 	struct snd_pcm_substream *substream = ctx;
382 	struct azx_dev *azx_dev = get_azx_dev(substream);
383 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
384 	struct azx *chip = apcm->chip;
385 	struct snd_pcm_runtime *runtime;
386 	u64 ll_counter, ll_counter_l, ll_counter_h;
387 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
388 	u32 wallclk_ctr, wallclk_cycles;
389 	bool direction;
390 	u32 dma_select;
391 	u32 timeout = 200;
392 	u32 retry_count = 0;
393 
394 	runtime = substream->runtime;
395 
396 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
397 		direction = 1;
398 	else
399 		direction = 0;
400 
401 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
402 	do {
403 		timeout = 100;
404 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
405 					(azx_dev->core.stream_tag - 1);
406 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
407 
408 		/* Enable the capture */
409 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
410 
411 		while (timeout) {
412 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
413 						GTSCC_TSCCD_MASK)
414 				break;
415 
416 			timeout--;
417 		}
418 
419 		if (!timeout) {
420 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
421 			return -EIO;
422 		}
423 
424 		/* Read wall clock counter */
425 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
426 
427 		/* Read TSC counter */
428 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
429 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
430 
431 		/* Read Link counter */
432 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
433 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
434 
435 		/* Ack: registers read done */
436 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
437 
438 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
439 						tsc_counter_l;
440 
441 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
442 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
443 
444 		/*
445 		 * An error occurs near frame "rollover". The clocks in
446 		 * frame value indicates whether this error may have
447 		 * occurred. Here we use the value of 10 i.e.,
448 		 * HDA_MAX_CYCLE_OFFSET
449 		 */
450 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
451 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
452 			break;
453 
454 		/*
455 		 * Sleep before we read again, else we may again get
456 		 * value near to MAX_CYCLE. Try to sleep for different
457 		 * amount of time so we dont hit the same number again
458 		 */
459 		udelay(retry_count++);
460 
461 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
462 
463 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
464 		dev_err_ratelimited(chip->card->dev,
465 			"Error in WALFCC cycle count\n");
466 		return -EIO;
467 	}
468 
469 	*device = ns_to_ktime(azx_scale64(ll_counter,
470 				NSEC_PER_SEC, runtime->rate));
471 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
472 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
473 
474 	*system = convert_art_to_tsc(tsc_counter);
475 
476 	return 0;
477 }
478 
479 #else
480 static int azx_get_sync_time(ktime_t *device,
481 		struct system_counterval_t *system, void *ctx)
482 {
483 	return -ENXIO;
484 }
485 #endif
486 
487 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
488 			      struct system_device_crosststamp *xtstamp)
489 {
490 	return get_device_system_crosststamp(azx_get_sync_time,
491 					substream, NULL, xtstamp);
492 }
493 
494 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
495 				struct snd_pcm_audio_tstamp_config *ts)
496 {
497 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
498 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
499 			return true;
500 
501 	return false;
502 }
503 
504 static int azx_get_time_info(struct snd_pcm_substream *substream,
505 			struct timespec *system_ts, struct timespec *audio_ts,
506 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
507 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
508 {
509 	struct azx_dev *azx_dev = get_azx_dev(substream);
510 	struct snd_pcm_runtime *runtime = substream->runtime;
511 	struct system_device_crosststamp xtstamp;
512 	int ret;
513 	u64 nsec;
514 
515 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
516 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
517 
518 		snd_pcm_gettime(substream->runtime, system_ts);
519 
520 		nsec = timecounter_read(&azx_dev->core.tc);
521 		nsec = div_u64(nsec, 3); /* can be optimized */
522 		if (audio_tstamp_config->report_delay)
523 			nsec = azx_adjust_codec_delay(substream, nsec);
524 
525 		*audio_ts = ns_to_timespec(nsec);
526 
527 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
528 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
529 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
530 
531 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
532 
533 		ret = azx_get_crosststamp(substream, &xtstamp);
534 		if (ret)
535 			return ret;
536 
537 		switch (runtime->tstamp_type) {
538 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
539 			return -EINVAL;
540 
541 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
542 			*system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
543 			break;
544 
545 		default:
546 			*system_ts = ktime_to_timespec(xtstamp.sys_realtime);
547 			break;
548 
549 		}
550 
551 		*audio_ts = ktime_to_timespec(xtstamp.device);
552 
553 		audio_tstamp_report->actual_type =
554 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
555 		audio_tstamp_report->accuracy_report = 1;
556 		/* 24 MHz WallClock == 42ns resolution */
557 		audio_tstamp_report->accuracy = 42;
558 
559 	} else {
560 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
561 	}
562 
563 	return 0;
564 }
565 
566 static struct snd_pcm_hardware azx_pcm_hw = {
567 	.info =			(SNDRV_PCM_INFO_MMAP |
568 				 SNDRV_PCM_INFO_INTERLEAVED |
569 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
570 				 SNDRV_PCM_INFO_MMAP_VALID |
571 				 /* No full-resume yet implemented */
572 				 /* SNDRV_PCM_INFO_RESUME |*/
573 				 SNDRV_PCM_INFO_PAUSE |
574 				 SNDRV_PCM_INFO_SYNC_START |
575 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
576 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
577 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
578 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
579 	.rates =		SNDRV_PCM_RATE_48000,
580 	.rate_min =		48000,
581 	.rate_max =		48000,
582 	.channels_min =		2,
583 	.channels_max =		2,
584 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
585 	.period_bytes_min =	128,
586 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
587 	.periods_min =		2,
588 	.periods_max =		AZX_MAX_FRAG,
589 	.fifo_size =		0,
590 };
591 
592 static int azx_pcm_open(struct snd_pcm_substream *substream)
593 {
594 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
595 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
596 	struct azx *chip = apcm->chip;
597 	struct azx_dev *azx_dev;
598 	struct snd_pcm_runtime *runtime = substream->runtime;
599 	int err;
600 	int buff_step;
601 
602 	snd_hda_codec_pcm_get(apcm->info);
603 	mutex_lock(&chip->open_mutex);
604 	azx_dev = azx_assign_device(chip, substream);
605 	trace_azx_pcm_open(chip, azx_dev);
606 	if (azx_dev == NULL) {
607 		err = -EBUSY;
608 		goto unlock;
609 	}
610 	runtime->private_data = azx_dev;
611 
612 	if (chip->gts_present)
613 		azx_pcm_hw.info = azx_pcm_hw.info |
614 			SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
615 
616 	runtime->hw = azx_pcm_hw;
617 	runtime->hw.channels_min = hinfo->channels_min;
618 	runtime->hw.channels_max = hinfo->channels_max;
619 	runtime->hw.formats = hinfo->formats;
620 	runtime->hw.rates = hinfo->rates;
621 	snd_pcm_limit_hw_rates(runtime);
622 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
623 
624 	/* avoid wrap-around with wall-clock */
625 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
626 				     20,
627 				     178000000);
628 
629 	if (chip->align_buffer_size)
630 		/* constrain buffer sizes to be multiple of 128
631 		   bytes. This is more efficient in terms of memory
632 		   access but isn't required by the HDA spec and
633 		   prevents users from specifying exact period/buffer
634 		   sizes. For example for 44.1kHz, a period size set
635 		   to 20ms will be rounded to 19.59ms. */
636 		buff_step = 128;
637 	else
638 		/* Don't enforce steps on buffer sizes, still need to
639 		   be multiple of 4 bytes (HDA spec). Tested on Intel
640 		   HDA controllers, may not work on all devices where
641 		   option needs to be disabled */
642 		buff_step = 4;
643 
644 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
645 				   buff_step);
646 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
647 				   buff_step);
648 	snd_hda_power_up(apcm->codec);
649 	if (hinfo->ops.open)
650 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
651 	else
652 		err = -ENODEV;
653 	if (err < 0) {
654 		azx_release_device(azx_dev);
655 		goto powerdown;
656 	}
657 	snd_pcm_limit_hw_rates(runtime);
658 	/* sanity check */
659 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
660 	    snd_BUG_ON(!runtime->hw.channels_max) ||
661 	    snd_BUG_ON(!runtime->hw.formats) ||
662 	    snd_BUG_ON(!runtime->hw.rates)) {
663 		azx_release_device(azx_dev);
664 		if (hinfo->ops.close)
665 			hinfo->ops.close(hinfo, apcm->codec, substream);
666 		err = -EINVAL;
667 		goto powerdown;
668 	}
669 
670 	/* disable LINK_ATIME timestamps for capture streams
671 	   until we figure out how to handle digital inputs */
672 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
673 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
674 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
675 	}
676 
677 	snd_pcm_set_sync(substream);
678 	mutex_unlock(&chip->open_mutex);
679 	return 0;
680 
681  powerdown:
682 	snd_hda_power_down(apcm->codec);
683  unlock:
684 	mutex_unlock(&chip->open_mutex);
685 	snd_hda_codec_pcm_put(apcm->info);
686 	return err;
687 }
688 
689 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
690 			struct vm_area_struct *area)
691 {
692 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
693 	struct azx *chip = apcm->chip;
694 	if (chip->ops->pcm_mmap_prepare)
695 		chip->ops->pcm_mmap_prepare(substream, area);
696 	return snd_pcm_lib_default_mmap(substream, area);
697 }
698 
699 static const struct snd_pcm_ops azx_pcm_ops = {
700 	.open = azx_pcm_open,
701 	.close = azx_pcm_close,
702 	.ioctl = snd_pcm_lib_ioctl,
703 	.hw_params = azx_pcm_hw_params,
704 	.hw_free = azx_pcm_hw_free,
705 	.prepare = azx_pcm_prepare,
706 	.trigger = azx_pcm_trigger,
707 	.pointer = azx_pcm_pointer,
708 	.get_time_info =  azx_get_time_info,
709 	.mmap = azx_pcm_mmap,
710 	.page = snd_pcm_sgbuf_ops_page,
711 };
712 
713 static void azx_pcm_free(struct snd_pcm *pcm)
714 {
715 	struct azx_pcm *apcm = pcm->private_data;
716 	if (apcm) {
717 		list_del(&apcm->list);
718 		apcm->info->pcm = NULL;
719 		kfree(apcm);
720 	}
721 }
722 
723 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
724 
725 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
726 			      struct hda_pcm *cpcm)
727 {
728 	struct hdac_bus *bus = &_bus->core;
729 	struct azx *chip = bus_to_azx(bus);
730 	struct snd_pcm *pcm;
731 	struct azx_pcm *apcm;
732 	int pcm_dev = cpcm->device;
733 	unsigned int size;
734 	int s, err;
735 	int type = SNDRV_DMA_TYPE_DEV_SG;
736 
737 	list_for_each_entry(apcm, &chip->pcm_list, list) {
738 		if (apcm->pcm->device == pcm_dev) {
739 			dev_err(chip->card->dev, "PCM %d already exists\n",
740 				pcm_dev);
741 			return -EBUSY;
742 		}
743 	}
744 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
745 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
746 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
747 			  &pcm);
748 	if (err < 0)
749 		return err;
750 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
751 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
752 	if (apcm == NULL) {
753 		snd_device_free(chip->card, pcm);
754 		return -ENOMEM;
755 	}
756 	apcm->chip = chip;
757 	apcm->pcm = pcm;
758 	apcm->codec = codec;
759 	apcm->info = cpcm;
760 	pcm->private_data = apcm;
761 	pcm->private_free = azx_pcm_free;
762 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
763 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
764 	list_add_tail(&apcm->list, &chip->pcm_list);
765 	cpcm->pcm = pcm;
766 	for (s = 0; s < 2; s++) {
767 		if (cpcm->stream[s].substreams)
768 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
769 	}
770 	/* buffer pre-allocation */
771 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
772 	if (size > MAX_PREALLOC_SIZE)
773 		size = MAX_PREALLOC_SIZE;
774 	if (chip->uc_buffer)
775 		type = SNDRV_DMA_TYPE_DEV_UC_SG;
776 	snd_pcm_lib_preallocate_pages_for_all(pcm, type,
777 					      chip->card->dev,
778 					      size, MAX_PREALLOC_SIZE);
779 	return 0;
780 }
781 
782 static unsigned int azx_command_addr(u32 cmd)
783 {
784 	unsigned int addr = cmd >> 28;
785 
786 	if (addr >= AZX_MAX_CODECS) {
787 		snd_BUG();
788 		addr = 0;
789 	}
790 
791 	return addr;
792 }
793 
794 /* receive a response */
795 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
796 				 unsigned int *res)
797 {
798 	struct azx *chip = bus_to_azx(bus);
799 	struct hda_bus *hbus = &chip->bus;
800 	unsigned long timeout;
801 	unsigned long loopcounter;
802 	int do_poll = 0;
803 
804  again:
805 	timeout = jiffies + msecs_to_jiffies(1000);
806 
807 	for (loopcounter = 0;; loopcounter++) {
808 		spin_lock_irq(&bus->reg_lock);
809 		if (chip->polling_mode || do_poll)
810 			snd_hdac_bus_update_rirb(bus);
811 		if (!bus->rirb.cmds[addr]) {
812 			if (!do_poll)
813 				chip->poll_count = 0;
814 			if (res)
815 				*res = bus->rirb.res[addr]; /* the last value */
816 			spin_unlock_irq(&bus->reg_lock);
817 			return 0;
818 		}
819 		spin_unlock_irq(&bus->reg_lock);
820 		if (time_after(jiffies, timeout))
821 			break;
822 		if (hbus->needs_damn_long_delay || loopcounter > 3000)
823 			msleep(2); /* temporary workaround */
824 		else {
825 			udelay(10);
826 			cond_resched();
827 		}
828 	}
829 
830 	if (hbus->no_response_fallback)
831 		return -EIO;
832 
833 	if (!chip->polling_mode && chip->poll_count < 2) {
834 		dev_dbg(chip->card->dev,
835 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
836 			bus->last_cmd[addr]);
837 		do_poll = 1;
838 		chip->poll_count++;
839 		goto again;
840 	}
841 
842 
843 	if (!chip->polling_mode) {
844 		dev_warn(chip->card->dev,
845 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
846 			 bus->last_cmd[addr]);
847 		chip->polling_mode = 1;
848 		goto again;
849 	}
850 
851 	if (chip->msi) {
852 		dev_warn(chip->card->dev,
853 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
854 			 bus->last_cmd[addr]);
855 		if (chip->ops->disable_msi_reset_irq &&
856 		    chip->ops->disable_msi_reset_irq(chip) < 0)
857 			return -EIO;
858 		goto again;
859 	}
860 
861 	if (chip->probing) {
862 		/* If this critical timeout happens during the codec probing
863 		 * phase, this is likely an access to a non-existing codec
864 		 * slot.  Better to return an error and reset the system.
865 		 */
866 		return -EIO;
867 	}
868 
869 	/* no fallback mechanism? */
870 	if (!chip->fallback_to_single_cmd)
871 		return -EIO;
872 
873 	/* a fatal communication error; need either to reset or to fallback
874 	 * to the single_cmd mode
875 	 */
876 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
877 		hbus->response_reset = 1;
878 		return -EAGAIN; /* give a chance to retry */
879 	}
880 
881 	dev_err(chip->card->dev,
882 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
883 		bus->last_cmd[addr]);
884 	chip->single_cmd = 1;
885 	hbus->response_reset = 0;
886 	snd_hdac_bus_stop_cmd_io(bus);
887 	return -EIO;
888 }
889 
890 /*
891  * Use the single immediate command instead of CORB/RIRB for simplicity
892  *
893  * Note: according to Intel, this is not preferred use.  The command was
894  *       intended for the BIOS only, and may get confused with unsolicited
895  *       responses.  So, we shouldn't use it for normal operation from the
896  *       driver.
897  *       I left the codes, however, for debugging/testing purposes.
898  */
899 
900 /* receive a response */
901 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
902 {
903 	int timeout = 50;
904 
905 	while (timeout--) {
906 		/* check IRV busy bit */
907 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
908 			/* reuse rirb.res as the response return value */
909 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
910 			return 0;
911 		}
912 		udelay(1);
913 	}
914 	if (printk_ratelimit())
915 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
916 			azx_readw(chip, IRS));
917 	azx_bus(chip)->rirb.res[addr] = -1;
918 	return -EIO;
919 }
920 
921 /* send a command */
922 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
923 {
924 	struct azx *chip = bus_to_azx(bus);
925 	unsigned int addr = azx_command_addr(val);
926 	int timeout = 50;
927 
928 	bus->last_cmd[azx_command_addr(val)] = val;
929 	while (timeout--) {
930 		/* check ICB busy bit */
931 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
932 			/* Clear IRV valid bit */
933 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
934 				   AZX_IRS_VALID);
935 			azx_writel(chip, IC, val);
936 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
937 				   AZX_IRS_BUSY);
938 			return azx_single_wait_for_response(chip, addr);
939 		}
940 		udelay(1);
941 	}
942 	if (printk_ratelimit())
943 		dev_dbg(chip->card->dev,
944 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
945 			azx_readw(chip, IRS), val);
946 	return -EIO;
947 }
948 
949 /* receive a response */
950 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
951 				   unsigned int *res)
952 {
953 	if (res)
954 		*res = bus->rirb.res[addr];
955 	return 0;
956 }
957 
958 /*
959  * The below are the main callbacks from hda_codec.
960  *
961  * They are just the skeleton to call sub-callbacks according to the
962  * current setting of chip->single_cmd.
963  */
964 
965 /* send a command */
966 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
967 {
968 	struct azx *chip = bus_to_azx(bus);
969 
970 	if (chip->disabled)
971 		return 0;
972 	if (chip->single_cmd)
973 		return azx_single_send_cmd(bus, val);
974 	else
975 		return snd_hdac_bus_send_cmd(bus, val);
976 }
977 
978 /* get a response */
979 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
980 			    unsigned int *res)
981 {
982 	struct azx *chip = bus_to_azx(bus);
983 
984 	if (chip->disabled)
985 		return 0;
986 	if (chip->single_cmd)
987 		return azx_single_get_response(bus, addr, res);
988 	else
989 		return azx_rirb_get_response(bus, addr, res);
990 }
991 
992 static const struct hdac_bus_ops bus_core_ops = {
993 	.command = azx_send_cmd,
994 	.get_response = azx_get_response,
995 };
996 
997 #ifdef CONFIG_SND_HDA_DSP_LOADER
998 /*
999  * DSP loading code (e.g. for CA0132)
1000  */
1001 
1002 /* use the first stream for loading DSP */
1003 static struct azx_dev *
1004 azx_get_dsp_loader_dev(struct azx *chip)
1005 {
1006 	struct hdac_bus *bus = azx_bus(chip);
1007 	struct hdac_stream *s;
1008 
1009 	list_for_each_entry(s, &bus->stream_list, list)
1010 		if (s->index == chip->playback_index_offset)
1011 			return stream_to_azx_dev(s);
1012 
1013 	return NULL;
1014 }
1015 
1016 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1017 				   unsigned int byte_size,
1018 				   struct snd_dma_buffer *bufp)
1019 {
1020 	struct hdac_bus *bus = &codec->bus->core;
1021 	struct azx *chip = bus_to_azx(bus);
1022 	struct azx_dev *azx_dev;
1023 	struct hdac_stream *hstr;
1024 	bool saved = false;
1025 	int err;
1026 
1027 	azx_dev = azx_get_dsp_loader_dev(chip);
1028 	hstr = azx_stream(azx_dev);
1029 	spin_lock_irq(&bus->reg_lock);
1030 	if (hstr->opened) {
1031 		chip->saved_azx_dev = *azx_dev;
1032 		saved = true;
1033 	}
1034 	spin_unlock_irq(&bus->reg_lock);
1035 
1036 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1037 	if (err < 0) {
1038 		spin_lock_irq(&bus->reg_lock);
1039 		if (saved)
1040 			*azx_dev = chip->saved_azx_dev;
1041 		spin_unlock_irq(&bus->reg_lock);
1042 		return err;
1043 	}
1044 
1045 	hstr->prepared = 0;
1046 	return err;
1047 }
1048 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1049 
1050 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1051 {
1052 	struct hdac_bus *bus = &codec->bus->core;
1053 	struct azx *chip = bus_to_azx(bus);
1054 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1055 
1056 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1057 }
1058 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1059 
1060 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1061 				    struct snd_dma_buffer *dmab)
1062 {
1063 	struct hdac_bus *bus = &codec->bus->core;
1064 	struct azx *chip = bus_to_azx(bus);
1065 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1066 	struct hdac_stream *hstr = azx_stream(azx_dev);
1067 
1068 	if (!dmab->area || !hstr->locked)
1069 		return;
1070 
1071 	snd_hdac_dsp_cleanup(hstr, dmab);
1072 	spin_lock_irq(&bus->reg_lock);
1073 	if (hstr->opened)
1074 		*azx_dev = chip->saved_azx_dev;
1075 	hstr->locked = false;
1076 	spin_unlock_irq(&bus->reg_lock);
1077 }
1078 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1079 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1080 
1081 /*
1082  * reset and start the controller registers
1083  */
1084 void azx_init_chip(struct azx *chip, bool full_reset)
1085 {
1086 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1087 		/* correct RINTCNT for CXT */
1088 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1089 			azx_writew(chip, RINTCNT, 0xc0);
1090 	}
1091 }
1092 EXPORT_SYMBOL_GPL(azx_init_chip);
1093 
1094 void azx_stop_all_streams(struct azx *chip)
1095 {
1096 	struct hdac_bus *bus = azx_bus(chip);
1097 	struct hdac_stream *s;
1098 
1099 	list_for_each_entry(s, &bus->stream_list, list)
1100 		snd_hdac_stream_stop(s);
1101 }
1102 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1103 
1104 void azx_stop_chip(struct azx *chip)
1105 {
1106 	snd_hdac_bus_stop_chip(azx_bus(chip));
1107 }
1108 EXPORT_SYMBOL_GPL(azx_stop_chip);
1109 
1110 /*
1111  * interrupt handler
1112  */
1113 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1114 {
1115 	struct azx *chip = bus_to_azx(bus);
1116 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1117 
1118 	/* check whether this IRQ is really acceptable */
1119 	if (!chip->ops->position_check ||
1120 	    chip->ops->position_check(chip, azx_dev)) {
1121 		spin_unlock(&bus->reg_lock);
1122 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1123 		spin_lock(&bus->reg_lock);
1124 	}
1125 }
1126 
1127 irqreturn_t azx_interrupt(int irq, void *dev_id)
1128 {
1129 	struct azx *chip = dev_id;
1130 	struct hdac_bus *bus = azx_bus(chip);
1131 	u32 status;
1132 	bool active, handled = false;
1133 	int repeat = 0; /* count for avoiding endless loop */
1134 
1135 #ifdef CONFIG_PM
1136 	if (azx_has_pm_runtime(chip))
1137 		if (!pm_runtime_active(chip->card->dev))
1138 			return IRQ_NONE;
1139 #endif
1140 
1141 	spin_lock(&bus->reg_lock);
1142 
1143 	if (chip->disabled)
1144 		goto unlock;
1145 
1146 	do {
1147 		status = azx_readl(chip, INTSTS);
1148 		if (status == 0 || status == 0xffffffff)
1149 			break;
1150 
1151 		handled = true;
1152 		active = false;
1153 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1154 			active = true;
1155 
1156 		/* clear rirb int */
1157 		status = azx_readb(chip, RIRBSTS);
1158 		if (status & RIRB_INT_MASK) {
1159 			active = true;
1160 			if (status & RIRB_INT_RESPONSE) {
1161 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1162 					udelay(80);
1163 				snd_hdac_bus_update_rirb(bus);
1164 			}
1165 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1166 		}
1167 	} while (active && ++repeat < 10);
1168 
1169  unlock:
1170 	spin_unlock(&bus->reg_lock);
1171 
1172 	return IRQ_RETVAL(handled);
1173 }
1174 EXPORT_SYMBOL_GPL(azx_interrupt);
1175 
1176 /*
1177  * Codec initerface
1178  */
1179 
1180 /*
1181  * Probe the given codec address
1182  */
1183 static int probe_codec(struct azx *chip, int addr)
1184 {
1185 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1186 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1187 	struct hdac_bus *bus = azx_bus(chip);
1188 	int err;
1189 	unsigned int res = -1;
1190 
1191 	mutex_lock(&bus->cmd_mutex);
1192 	chip->probing = 1;
1193 	azx_send_cmd(bus, cmd);
1194 	err = azx_get_response(bus, addr, &res);
1195 	chip->probing = 0;
1196 	mutex_unlock(&bus->cmd_mutex);
1197 	if (err < 0 || res == -1)
1198 		return -EIO;
1199 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1200 	return 0;
1201 }
1202 
1203 void snd_hda_bus_reset(struct hda_bus *bus)
1204 {
1205 	struct azx *chip = bus_to_azx(&bus->core);
1206 
1207 	bus->in_reset = 1;
1208 	azx_stop_chip(chip);
1209 	azx_init_chip(chip, true);
1210 	if (bus->core.chip_init)
1211 		snd_hda_bus_reset_codecs(bus);
1212 	bus->in_reset = 0;
1213 }
1214 
1215 /* HD-audio bus initialization */
1216 int azx_bus_init(struct azx *chip, const char *model,
1217 		 const struct hdac_io_ops *io_ops)
1218 {
1219 	struct hda_bus *bus = &chip->bus;
1220 	int err;
1221 
1222 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops,
1223 				io_ops);
1224 	if (err < 0)
1225 		return err;
1226 
1227 	bus->card = chip->card;
1228 	mutex_init(&bus->prepare_mutex);
1229 	bus->pci = chip->pci;
1230 	bus->modelname = model;
1231 	bus->mixer_assigned = -1;
1232 	bus->core.snoop = azx_snoop(chip);
1233 	if (chip->get_position[0] != azx_get_pos_lpib ||
1234 	    chip->get_position[1] != azx_get_pos_lpib)
1235 		bus->core.use_posbuf = true;
1236 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1237 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1238 		bus->core.corbrp_self_clear = true;
1239 
1240 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1241 		bus->core.align_bdle_4k = true;
1242 
1243 	/* AMD chipsets often cause the communication stalls upon certain
1244 	 * sequence like the pin-detection.  It seems that forcing the synced
1245 	 * access works around the stall.  Grrr...
1246 	 */
1247 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1248 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1249 		bus->core.sync_write = 1;
1250 		bus->allow_bus_reset = 1;
1251 	}
1252 
1253 	return 0;
1254 }
1255 EXPORT_SYMBOL_GPL(azx_bus_init);
1256 
1257 /* Probe codecs */
1258 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1259 {
1260 	struct hdac_bus *bus = azx_bus(chip);
1261 	int c, codecs, err;
1262 
1263 	codecs = 0;
1264 	if (!max_slots)
1265 		max_slots = AZX_DEFAULT_CODECS;
1266 
1267 	/* First try to probe all given codec slots */
1268 	for (c = 0; c < max_slots; c++) {
1269 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1270 			if (probe_codec(chip, c) < 0) {
1271 				/* Some BIOSen give you wrong codec addresses
1272 				 * that don't exist
1273 				 */
1274 				dev_warn(chip->card->dev,
1275 					 "Codec #%d probe error; disabling it...\n", c);
1276 				bus->codec_mask &= ~(1 << c);
1277 				/* More badly, accessing to a non-existing
1278 				 * codec often screws up the controller chip,
1279 				 * and disturbs the further communications.
1280 				 * Thus if an error occurs during probing,
1281 				 * better to reset the controller chip to
1282 				 * get back to the sanity state.
1283 				 */
1284 				azx_stop_chip(chip);
1285 				azx_init_chip(chip, true);
1286 			}
1287 		}
1288 	}
1289 
1290 	/* Then create codec instances */
1291 	for (c = 0; c < max_slots; c++) {
1292 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1293 			struct hda_codec *codec;
1294 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1295 			if (err < 0)
1296 				continue;
1297 			codec->jackpoll_interval = chip->jackpoll_interval;
1298 			codec->beep_mode = chip->beep_mode;
1299 			codecs++;
1300 		}
1301 	}
1302 	if (!codecs) {
1303 		dev_err(chip->card->dev, "no codecs initialized\n");
1304 		return -ENXIO;
1305 	}
1306 	return 0;
1307 }
1308 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1309 
1310 /* configure each codec instance */
1311 int azx_codec_configure(struct azx *chip)
1312 {
1313 	struct hda_codec *codec, *next;
1314 
1315 	/* use _safe version here since snd_hda_codec_configure() deregisters
1316 	 * the device upon error and deletes itself from the bus list.
1317 	 */
1318 	list_for_each_codec_safe(codec, next, &chip->bus) {
1319 		snd_hda_codec_configure(codec);
1320 	}
1321 
1322 	if (!azx_bus(chip)->num_codecs)
1323 		return -ENODEV;
1324 	return 0;
1325 }
1326 EXPORT_SYMBOL_GPL(azx_codec_configure);
1327 
1328 static int stream_direction(struct azx *chip, unsigned char index)
1329 {
1330 	if (index >= chip->capture_index_offset &&
1331 	    index < chip->capture_index_offset + chip->capture_streams)
1332 		return SNDRV_PCM_STREAM_CAPTURE;
1333 	return SNDRV_PCM_STREAM_PLAYBACK;
1334 }
1335 
1336 /* initialize SD streams */
1337 int azx_init_streams(struct azx *chip)
1338 {
1339 	int i;
1340 	int stream_tags[2] = { 0, 0 };
1341 
1342 	/* initialize each stream (aka device)
1343 	 * assign the starting bdl address to each stream (device)
1344 	 * and initialize
1345 	 */
1346 	for (i = 0; i < chip->num_streams; i++) {
1347 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1348 		int dir, tag;
1349 
1350 		if (!azx_dev)
1351 			return -ENOMEM;
1352 
1353 		dir = stream_direction(chip, i);
1354 		/* stream tag must be unique throughout
1355 		 * the stream direction group,
1356 		 * valid values 1...15
1357 		 * use separate stream tag if the flag
1358 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1359 		 */
1360 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1361 			tag = ++stream_tags[dir];
1362 		else
1363 			tag = i + 1;
1364 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1365 				     i, dir, tag);
1366 	}
1367 
1368 	return 0;
1369 }
1370 EXPORT_SYMBOL_GPL(azx_init_streams);
1371 
1372 void azx_free_streams(struct azx *chip)
1373 {
1374 	struct hdac_bus *bus = azx_bus(chip);
1375 	struct hdac_stream *s;
1376 
1377 	while (!list_empty(&bus->stream_list)) {
1378 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1379 		list_del(&s->list);
1380 		kfree(stream_to_azx_dev(s));
1381 	}
1382 }
1383 EXPORT_SYMBOL_GPL(azx_free_streams);
1384