xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision d34e1b7b9a7fa87fc5309447e4a14bd511de4bc9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Implementation of primary alsa driver code base for Intel HD Audio.
5  *
6  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
7  *
8  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9  *                     PeiSen Hou <pshou@realtek.com.tw>
10  */
11 
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24 
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
31 
32 /* DSP lock helpers */
33 #define dsp_lock(dev)		snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev)		snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev)	snd_hdac_stream_is_locked(azx_stream(dev))
36 
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
40 {
41 	struct hdac_stream *s;
42 
43 	s = snd_hdac_stream_assign(azx_bus(chip), substream);
44 	if (!s)
45 		return NULL;
46 	return stream_to_azx_dev(s);
47 }
48 
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
51 {
52 	snd_hdac_stream_release(azx_stream(azx_dev));
53 }
54 
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
57 {
58 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 	return &apcm->info->stream[substream->stream];
60 }
61 
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
63 				u64 nsec)
64 {
65 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 	u64 codec_frames, codec_nsecs;
68 
69 	if (!hinfo->ops.get_delay)
70 		return nsec;
71 
72 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 			      substream->runtime->rate);
75 
76 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 		return nsec + codec_nsecs;
78 
79 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
80 }
81 
82 /*
83  * PCM ops
84  */
85 
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
87 {
88 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 	struct azx *chip = apcm->chip;
91 	struct azx_dev *azx_dev = get_azx_dev(substream);
92 
93 	trace_azx_pcm_close(chip, azx_dev);
94 	mutex_lock(&chip->open_mutex);
95 	azx_release_device(azx_dev);
96 	if (hinfo->ops.close)
97 		hinfo->ops.close(hinfo, apcm->codec, substream);
98 	snd_hda_power_down(apcm->codec);
99 	mutex_unlock(&chip->open_mutex);
100 	snd_hda_codec_pcm_put(apcm->info);
101 	return 0;
102 }
103 
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 			     struct snd_pcm_hw_params *hw_params)
106 {
107 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 	struct azx *chip = apcm->chip;
109 	struct azx_dev *azx_dev = get_azx_dev(substream);
110 	int ret = 0;
111 
112 	trace_azx_pcm_hw_params(chip, azx_dev);
113 	dsp_lock(azx_dev);
114 	if (dsp_is_locked(azx_dev)) {
115 		ret = -EBUSY;
116 		goto unlock;
117 	}
118 
119 	azx_dev->core.bufsize = 0;
120 	azx_dev->core.period_bytes = 0;
121 	azx_dev->core.format_val = 0;
122 
123 unlock:
124 	dsp_unlock(azx_dev);
125 	return ret;
126 }
127 
128 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
129 {
130 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
131 	struct azx_dev *azx_dev = get_azx_dev(substream);
132 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
133 
134 	/* reset BDL address */
135 	dsp_lock(azx_dev);
136 	if (!dsp_is_locked(azx_dev))
137 		snd_hdac_stream_cleanup(azx_stream(azx_dev));
138 
139 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
140 
141 	azx_stream(azx_dev)->prepared = 0;
142 	dsp_unlock(azx_dev);
143 	return 0;
144 }
145 
146 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
147 {
148 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
149 	struct azx *chip = apcm->chip;
150 	struct azx_dev *azx_dev = get_azx_dev(substream);
151 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
152 	struct snd_pcm_runtime *runtime = substream->runtime;
153 	unsigned int format_val, stream_tag;
154 	int err;
155 	struct hda_spdif_out *spdif =
156 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
157 	unsigned short ctls = spdif ? spdif->ctls : 0;
158 
159 	trace_azx_pcm_prepare(chip, azx_dev);
160 	dsp_lock(azx_dev);
161 	if (dsp_is_locked(azx_dev)) {
162 		err = -EBUSY;
163 		goto unlock;
164 	}
165 
166 	snd_hdac_stream_reset(azx_stream(azx_dev));
167 	format_val = snd_hdac_calc_stream_format(runtime->rate,
168 						runtime->channels,
169 						runtime->format,
170 						hinfo->maxbps,
171 						ctls);
172 	if (!format_val) {
173 		dev_err(chip->card->dev,
174 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
175 			runtime->rate, runtime->channels, runtime->format);
176 		err = -EINVAL;
177 		goto unlock;
178 	}
179 
180 	err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
181 	if (err < 0)
182 		goto unlock;
183 
184 	snd_hdac_stream_setup(azx_stream(azx_dev));
185 
186 	stream_tag = azx_dev->core.stream_tag;
187 	/* CA-IBG chips need the playback stream starting from 1 */
188 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
189 	    stream_tag > chip->capture_streams)
190 		stream_tag -= chip->capture_streams;
191 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
192 				     azx_dev->core.format_val, substream);
193 
194  unlock:
195 	if (!err)
196 		azx_stream(azx_dev)->prepared = 1;
197 	dsp_unlock(azx_dev);
198 	return err;
199 }
200 
201 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202 {
203 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
204 	struct azx *chip = apcm->chip;
205 	struct hdac_bus *bus = azx_bus(chip);
206 	struct azx_dev *azx_dev;
207 	struct snd_pcm_substream *s;
208 	struct hdac_stream *hstr;
209 	bool start;
210 	int sbits = 0;
211 	int sync_reg;
212 
213 	azx_dev = get_azx_dev(substream);
214 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
215 
216 	hstr = azx_stream(azx_dev);
217 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
218 		sync_reg = AZX_REG_OLD_SSYNC;
219 	else
220 		sync_reg = AZX_REG_SSYNC;
221 
222 	if (dsp_is_locked(azx_dev) || !hstr->prepared)
223 		return -EPIPE;
224 
225 	switch (cmd) {
226 	case SNDRV_PCM_TRIGGER_START:
227 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
228 	case SNDRV_PCM_TRIGGER_RESUME:
229 		start = true;
230 		break;
231 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
232 	case SNDRV_PCM_TRIGGER_SUSPEND:
233 	case SNDRV_PCM_TRIGGER_STOP:
234 		start = false;
235 		break;
236 	default:
237 		return -EINVAL;
238 	}
239 
240 	snd_pcm_group_for_each_entry(s, substream) {
241 		if (s->pcm->card != substream->pcm->card)
242 			continue;
243 		azx_dev = get_azx_dev(s);
244 		sbits |= 1 << azx_dev->core.index;
245 		snd_pcm_trigger_done(s, substream);
246 	}
247 
248 	spin_lock(&bus->reg_lock);
249 
250 	/* first, set SYNC bits of corresponding streams */
251 	snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
252 
253 	snd_pcm_group_for_each_entry(s, substream) {
254 		if (s->pcm->card != substream->pcm->card)
255 			continue;
256 		azx_dev = get_azx_dev(s);
257 		if (start) {
258 			azx_dev->insufficient = 1;
259 			snd_hdac_stream_start(azx_stream(azx_dev), true);
260 		} else {
261 			snd_hdac_stream_stop(azx_stream(azx_dev));
262 		}
263 	}
264 	spin_unlock(&bus->reg_lock);
265 
266 	snd_hdac_stream_sync(hstr, start, sbits);
267 
268 	spin_lock(&bus->reg_lock);
269 	/* reset SYNC bits */
270 	snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
271 	if (start)
272 		snd_hdac_stream_timecounter_init(hstr, sbits);
273 	spin_unlock(&bus->reg_lock);
274 	return 0;
275 }
276 
277 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
278 {
279 	return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
280 }
281 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
282 
283 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
284 {
285 	return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
286 }
287 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
288 
289 unsigned int azx_get_position(struct azx *chip,
290 			      struct azx_dev *azx_dev)
291 {
292 	struct snd_pcm_substream *substream = azx_dev->core.substream;
293 	unsigned int pos;
294 	int stream = substream->stream;
295 	int delay = 0;
296 
297 	if (chip->get_position[stream])
298 		pos = chip->get_position[stream](chip, azx_dev);
299 	else /* use the position buffer as default */
300 		pos = azx_get_pos_posbuf(chip, azx_dev);
301 
302 	if (pos >= azx_dev->core.bufsize)
303 		pos = 0;
304 
305 	if (substream->runtime) {
306 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
307 		struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
308 
309 		if (chip->get_delay[stream])
310 			delay += chip->get_delay[stream](chip, azx_dev, pos);
311 		if (hinfo->ops.get_delay)
312 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
313 						      substream);
314 		substream->runtime->delay = delay;
315 	}
316 
317 	trace_azx_get_position(chip, azx_dev, pos, delay);
318 	return pos;
319 }
320 EXPORT_SYMBOL_GPL(azx_get_position);
321 
322 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
323 {
324 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
325 	struct azx *chip = apcm->chip;
326 	struct azx_dev *azx_dev = get_azx_dev(substream);
327 	return bytes_to_frames(substream->runtime,
328 			       azx_get_position(chip, azx_dev));
329 }
330 
331 /*
332  * azx_scale64: Scale base by mult/div while not overflowing sanely
333  *
334  * Derived from scale64_check_overflow in kernel/time/timekeeping.c
335  *
336  * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
337  * is about 384307 ie ~4.5 days.
338  *
339  * This scales the calculation so that overflow will happen but after 2^64 /
340  * 48000 secs, which is pretty large!
341  *
342  * In caln below:
343  *	base may overflow, but since there isn’t any additional division
344  *	performed on base it’s OK
345  *	rem can’t overflow because both are 32-bit values
346  */
347 
348 #ifdef CONFIG_X86
349 static u64 azx_scale64(u64 base, u32 num, u32 den)
350 {
351 	u64 rem;
352 
353 	rem = do_div(base, den);
354 
355 	base *= num;
356 	rem *= num;
357 
358 	do_div(rem, den);
359 
360 	return base + rem;
361 }
362 
363 static int azx_get_sync_time(ktime_t *device,
364 		struct system_counterval_t *system, void *ctx)
365 {
366 	struct snd_pcm_substream *substream = ctx;
367 	struct azx_dev *azx_dev = get_azx_dev(substream);
368 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
369 	struct azx *chip = apcm->chip;
370 	struct snd_pcm_runtime *runtime;
371 	u64 ll_counter, ll_counter_l, ll_counter_h;
372 	u64 tsc_counter, tsc_counter_l, tsc_counter_h;
373 	u32 wallclk_ctr, wallclk_cycles;
374 	bool direction;
375 	u32 dma_select;
376 	u32 timeout = 200;
377 	u32 retry_count = 0;
378 
379 	runtime = substream->runtime;
380 
381 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
382 		direction = 1;
383 	else
384 		direction = 0;
385 
386 	/* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
387 	do {
388 		timeout = 100;
389 		dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
390 					(azx_dev->core.stream_tag - 1);
391 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
392 
393 		/* Enable the capture */
394 		snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
395 
396 		while (timeout) {
397 			if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
398 						GTSCC_TSCCD_MASK)
399 				break;
400 
401 			timeout--;
402 		}
403 
404 		if (!timeout) {
405 			dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
406 			return -EIO;
407 		}
408 
409 		/* Read wall clock counter */
410 		wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
411 
412 		/* Read TSC counter */
413 		tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
414 		tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
415 
416 		/* Read Link counter */
417 		ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
418 		ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
419 
420 		/* Ack: registers read done */
421 		snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
422 
423 		tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
424 						tsc_counter_l;
425 
426 		ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) |	ll_counter_l;
427 		wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
428 
429 		/*
430 		 * An error occurs near frame "rollover". The clocks in
431 		 * frame value indicates whether this error may have
432 		 * occurred. Here we use the value of 10 i.e.,
433 		 * HDA_MAX_CYCLE_OFFSET
434 		 */
435 		if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
436 					&& wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
437 			break;
438 
439 		/*
440 		 * Sleep before we read again, else we may again get
441 		 * value near to MAX_CYCLE. Try to sleep for different
442 		 * amount of time so we dont hit the same number again
443 		 */
444 		udelay(retry_count++);
445 
446 	} while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
447 
448 	if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
449 		dev_err_ratelimited(chip->card->dev,
450 			"Error in WALFCC cycle count\n");
451 		return -EIO;
452 	}
453 
454 	*device = ns_to_ktime(azx_scale64(ll_counter,
455 				NSEC_PER_SEC, runtime->rate));
456 	*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
457 			       ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
458 
459 	*system = convert_art_to_tsc(tsc_counter);
460 
461 	return 0;
462 }
463 
464 #else
465 static int azx_get_sync_time(ktime_t *device,
466 		struct system_counterval_t *system, void *ctx)
467 {
468 	return -ENXIO;
469 }
470 #endif
471 
472 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
473 			      struct system_device_crosststamp *xtstamp)
474 {
475 	return get_device_system_crosststamp(azx_get_sync_time,
476 					substream, NULL, xtstamp);
477 }
478 
479 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
480 				struct snd_pcm_audio_tstamp_config *ts)
481 {
482 	if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
483 		if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
484 			return true;
485 
486 	return false;
487 }
488 
489 static int azx_get_time_info(struct snd_pcm_substream *substream,
490 			struct timespec *system_ts, struct timespec *audio_ts,
491 			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
492 			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
493 {
494 	struct azx_dev *azx_dev = get_azx_dev(substream);
495 	struct snd_pcm_runtime *runtime = substream->runtime;
496 	struct system_device_crosststamp xtstamp;
497 	int ret;
498 	u64 nsec;
499 
500 	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
501 		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
502 
503 		snd_pcm_gettime(substream->runtime, system_ts);
504 
505 		nsec = timecounter_read(&azx_dev->core.tc);
506 		nsec = div_u64(nsec, 3); /* can be optimized */
507 		if (audio_tstamp_config->report_delay)
508 			nsec = azx_adjust_codec_delay(substream, nsec);
509 
510 		*audio_ts = ns_to_timespec(nsec);
511 
512 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
513 		audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
514 		audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
515 
516 	} else if (is_link_time_supported(runtime, audio_tstamp_config)) {
517 
518 		ret = azx_get_crosststamp(substream, &xtstamp);
519 		if (ret)
520 			return ret;
521 
522 		switch (runtime->tstamp_type) {
523 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
524 			return -EINVAL;
525 
526 		case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
527 			*system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
528 			break;
529 
530 		default:
531 			*system_ts = ktime_to_timespec(xtstamp.sys_realtime);
532 			break;
533 
534 		}
535 
536 		*audio_ts = ktime_to_timespec(xtstamp.device);
537 
538 		audio_tstamp_report->actual_type =
539 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
540 		audio_tstamp_report->accuracy_report = 1;
541 		/* 24 MHz WallClock == 42ns resolution */
542 		audio_tstamp_report->accuracy = 42;
543 
544 	} else {
545 		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
546 	}
547 
548 	return 0;
549 }
550 
551 static struct snd_pcm_hardware azx_pcm_hw = {
552 	.info =			(SNDRV_PCM_INFO_MMAP |
553 				 SNDRV_PCM_INFO_INTERLEAVED |
554 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
555 				 SNDRV_PCM_INFO_MMAP_VALID |
556 				 /* No full-resume yet implemented */
557 				 /* SNDRV_PCM_INFO_RESUME |*/
558 				 SNDRV_PCM_INFO_PAUSE |
559 				 SNDRV_PCM_INFO_SYNC_START |
560 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
561 				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
562 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
563 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
564 	.rates =		SNDRV_PCM_RATE_48000,
565 	.rate_min =		48000,
566 	.rate_max =		48000,
567 	.channels_min =		2,
568 	.channels_max =		2,
569 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
570 	.period_bytes_min =	128,
571 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
572 	.periods_min =		2,
573 	.periods_max =		AZX_MAX_FRAG,
574 	.fifo_size =		0,
575 };
576 
577 static int azx_pcm_open(struct snd_pcm_substream *substream)
578 {
579 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
580 	struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
581 	struct azx *chip = apcm->chip;
582 	struct azx_dev *azx_dev;
583 	struct snd_pcm_runtime *runtime = substream->runtime;
584 	int err;
585 	int buff_step;
586 
587 	snd_hda_codec_pcm_get(apcm->info);
588 	mutex_lock(&chip->open_mutex);
589 	azx_dev = azx_assign_device(chip, substream);
590 	trace_azx_pcm_open(chip, azx_dev);
591 	if (azx_dev == NULL) {
592 		err = -EBUSY;
593 		goto unlock;
594 	}
595 	runtime->private_data = azx_dev;
596 
597 	runtime->hw = azx_pcm_hw;
598 	if (chip->gts_present)
599 		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
600 	runtime->hw.channels_min = hinfo->channels_min;
601 	runtime->hw.channels_max = hinfo->channels_max;
602 	runtime->hw.formats = hinfo->formats;
603 	runtime->hw.rates = hinfo->rates;
604 	snd_pcm_limit_hw_rates(runtime);
605 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
606 
607 	/* avoid wrap-around with wall-clock */
608 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
609 				     20,
610 				     178000000);
611 
612 	/* by some reason, the playback stream stalls on PulseAudio with
613 	 * tsched=1 when a capture stream triggers.  Until we figure out the
614 	 * real cause, disable tsched mode by telling the PCM info flag.
615 	 */
616 	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
617 		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
618 
619 	if (chip->align_buffer_size)
620 		/* constrain buffer sizes to be multiple of 128
621 		   bytes. This is more efficient in terms of memory
622 		   access but isn't required by the HDA spec and
623 		   prevents users from specifying exact period/buffer
624 		   sizes. For example for 44.1kHz, a period size set
625 		   to 20ms will be rounded to 19.59ms. */
626 		buff_step = 128;
627 	else
628 		/* Don't enforce steps on buffer sizes, still need to
629 		   be multiple of 4 bytes (HDA spec). Tested on Intel
630 		   HDA controllers, may not work on all devices where
631 		   option needs to be disabled */
632 		buff_step = 4;
633 
634 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
635 				   buff_step);
636 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
637 				   buff_step);
638 	snd_hda_power_up(apcm->codec);
639 	if (hinfo->ops.open)
640 		err = hinfo->ops.open(hinfo, apcm->codec, substream);
641 	else
642 		err = -ENODEV;
643 	if (err < 0) {
644 		azx_release_device(azx_dev);
645 		goto powerdown;
646 	}
647 	snd_pcm_limit_hw_rates(runtime);
648 	/* sanity check */
649 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
650 	    snd_BUG_ON(!runtime->hw.channels_max) ||
651 	    snd_BUG_ON(!runtime->hw.formats) ||
652 	    snd_BUG_ON(!runtime->hw.rates)) {
653 		azx_release_device(azx_dev);
654 		if (hinfo->ops.close)
655 			hinfo->ops.close(hinfo, apcm->codec, substream);
656 		err = -EINVAL;
657 		goto powerdown;
658 	}
659 
660 	/* disable LINK_ATIME timestamps for capture streams
661 	   until we figure out how to handle digital inputs */
662 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
663 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
664 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
665 	}
666 
667 	snd_pcm_set_sync(substream);
668 	mutex_unlock(&chip->open_mutex);
669 	return 0;
670 
671  powerdown:
672 	snd_hda_power_down(apcm->codec);
673  unlock:
674 	mutex_unlock(&chip->open_mutex);
675 	snd_hda_codec_pcm_put(apcm->info);
676 	return err;
677 }
678 
679 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
680 			struct vm_area_struct *area)
681 {
682 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
683 	struct azx *chip = apcm->chip;
684 	if (chip->ops->pcm_mmap_prepare)
685 		chip->ops->pcm_mmap_prepare(substream, area);
686 	return snd_pcm_lib_default_mmap(substream, area);
687 }
688 
689 static const struct snd_pcm_ops azx_pcm_ops = {
690 	.open = azx_pcm_open,
691 	.close = azx_pcm_close,
692 	.hw_params = azx_pcm_hw_params,
693 	.hw_free = azx_pcm_hw_free,
694 	.prepare = azx_pcm_prepare,
695 	.trigger = azx_pcm_trigger,
696 	.pointer = azx_pcm_pointer,
697 	.get_time_info =  azx_get_time_info,
698 	.mmap = azx_pcm_mmap,
699 };
700 
701 static void azx_pcm_free(struct snd_pcm *pcm)
702 {
703 	struct azx_pcm *apcm = pcm->private_data;
704 	if (apcm) {
705 		list_del(&apcm->list);
706 		apcm->info->pcm = NULL;
707 		kfree(apcm);
708 	}
709 }
710 
711 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
712 
713 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
714 			      struct hda_pcm *cpcm)
715 {
716 	struct hdac_bus *bus = &_bus->core;
717 	struct azx *chip = bus_to_azx(bus);
718 	struct snd_pcm *pcm;
719 	struct azx_pcm *apcm;
720 	int pcm_dev = cpcm->device;
721 	unsigned int size;
722 	int s, err;
723 	int type = SNDRV_DMA_TYPE_DEV_SG;
724 
725 	list_for_each_entry(apcm, &chip->pcm_list, list) {
726 		if (apcm->pcm->device == pcm_dev) {
727 			dev_err(chip->card->dev, "PCM %d already exists\n",
728 				pcm_dev);
729 			return -EBUSY;
730 		}
731 	}
732 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
733 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
734 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
735 			  &pcm);
736 	if (err < 0)
737 		return err;
738 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
739 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
740 	if (apcm == NULL) {
741 		snd_device_free(chip->card, pcm);
742 		return -ENOMEM;
743 	}
744 	apcm->chip = chip;
745 	apcm->pcm = pcm;
746 	apcm->codec = codec;
747 	apcm->info = cpcm;
748 	pcm->private_data = apcm;
749 	pcm->private_free = azx_pcm_free;
750 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
751 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
752 	list_add_tail(&apcm->list, &chip->pcm_list);
753 	cpcm->pcm = pcm;
754 	for (s = 0; s < 2; s++) {
755 		if (cpcm->stream[s].substreams)
756 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
757 	}
758 	/* buffer pre-allocation */
759 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
760 	if (size > MAX_PREALLOC_SIZE)
761 		size = MAX_PREALLOC_SIZE;
762 	if (chip->uc_buffer)
763 		type = SNDRV_DMA_TYPE_DEV_UC_SG;
764 	snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
765 				       size, MAX_PREALLOC_SIZE);
766 	return 0;
767 }
768 
769 static unsigned int azx_command_addr(u32 cmd)
770 {
771 	unsigned int addr = cmd >> 28;
772 
773 	if (addr >= AZX_MAX_CODECS) {
774 		snd_BUG();
775 		addr = 0;
776 	}
777 
778 	return addr;
779 }
780 
781 /* receive a response */
782 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
783 				 unsigned int *res)
784 {
785 	struct azx *chip = bus_to_azx(bus);
786 	struct hda_bus *hbus = &chip->bus;
787 	unsigned long timeout;
788 	unsigned long loopcounter;
789 	wait_queue_entry_t wait;
790 	bool warned = false;
791 
792 	init_wait_entry(&wait, 0);
793  again:
794 	timeout = jiffies + msecs_to_jiffies(1000);
795 
796 	for (loopcounter = 0;; loopcounter++) {
797 		spin_lock_irq(&bus->reg_lock);
798 		if (!bus->polling_mode)
799 			prepare_to_wait(&bus->rirb_wq, &wait,
800 					TASK_UNINTERRUPTIBLE);
801 		if (bus->polling_mode)
802 			snd_hdac_bus_update_rirb(bus);
803 		if (!bus->rirb.cmds[addr]) {
804 			if (res)
805 				*res = bus->rirb.res[addr]; /* the last value */
806 			if (!bus->polling_mode)
807 				finish_wait(&bus->rirb_wq, &wait);
808 			spin_unlock_irq(&bus->reg_lock);
809 			return 0;
810 		}
811 		spin_unlock_irq(&bus->reg_lock);
812 		if (time_after(jiffies, timeout))
813 			break;
814 #define LOOP_COUNT_MAX	3000
815 		if (!bus->polling_mode) {
816 			schedule_timeout(msecs_to_jiffies(2));
817 		} else if (hbus->needs_damn_long_delay ||
818 		    loopcounter > LOOP_COUNT_MAX) {
819 			if (loopcounter > LOOP_COUNT_MAX && !warned) {
820 				dev_dbg_ratelimited(chip->card->dev,
821 						    "too slow response, last cmd=%#08x\n",
822 						    bus->last_cmd[addr]);
823 				warned = true;
824 			}
825 			msleep(2); /* temporary workaround */
826 		} else {
827 			udelay(10);
828 			cond_resched();
829 		}
830 	}
831 
832 	if (!bus->polling_mode)
833 		finish_wait(&bus->rirb_wq, &wait);
834 
835 	if (hbus->no_response_fallback)
836 		return -EIO;
837 
838 	if (!bus->polling_mode) {
839 		dev_warn(chip->card->dev,
840 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
841 			 bus->last_cmd[addr]);
842 		bus->polling_mode = 1;
843 		goto again;
844 	}
845 
846 	if (chip->msi) {
847 		dev_warn(chip->card->dev,
848 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
849 			 bus->last_cmd[addr]);
850 		if (chip->ops->disable_msi_reset_irq &&
851 		    chip->ops->disable_msi_reset_irq(chip) < 0)
852 			return -EIO;
853 		goto again;
854 	}
855 
856 	if (chip->probing) {
857 		/* If this critical timeout happens during the codec probing
858 		 * phase, this is likely an access to a non-existing codec
859 		 * slot.  Better to return an error and reset the system.
860 		 */
861 		return -EIO;
862 	}
863 
864 	/* no fallback mechanism? */
865 	if (!chip->fallback_to_single_cmd)
866 		return -EIO;
867 
868 	/* a fatal communication error; need either to reset or to fallback
869 	 * to the single_cmd mode
870 	 */
871 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
872 		hbus->response_reset = 1;
873 		dev_err(chip->card->dev,
874 			"No response from codec, resetting bus: last cmd=0x%08x\n",
875 			bus->last_cmd[addr]);
876 		return -EAGAIN; /* give a chance to retry */
877 	}
878 
879 	dev_WARN(chip->card->dev,
880 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
881 		bus->last_cmd[addr]);
882 	chip->single_cmd = 1;
883 	hbus->response_reset = 0;
884 	snd_hdac_bus_stop_cmd_io(bus);
885 	return -EIO;
886 }
887 
888 /*
889  * Use the single immediate command instead of CORB/RIRB for simplicity
890  *
891  * Note: according to Intel, this is not preferred use.  The command was
892  *       intended for the BIOS only, and may get confused with unsolicited
893  *       responses.  So, we shouldn't use it for normal operation from the
894  *       driver.
895  *       I left the codes, however, for debugging/testing purposes.
896  */
897 
898 /* receive a response */
899 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
900 {
901 	int timeout = 50;
902 
903 	while (timeout--) {
904 		/* check IRV busy bit */
905 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
906 			/* reuse rirb.res as the response return value */
907 			azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
908 			return 0;
909 		}
910 		udelay(1);
911 	}
912 	if (printk_ratelimit())
913 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
914 			azx_readw(chip, IRS));
915 	azx_bus(chip)->rirb.res[addr] = -1;
916 	return -EIO;
917 }
918 
919 /* send a command */
920 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
921 {
922 	struct azx *chip = bus_to_azx(bus);
923 	unsigned int addr = azx_command_addr(val);
924 	int timeout = 50;
925 
926 	bus->last_cmd[azx_command_addr(val)] = val;
927 	while (timeout--) {
928 		/* check ICB busy bit */
929 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
930 			/* Clear IRV valid bit */
931 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
932 				   AZX_IRS_VALID);
933 			azx_writel(chip, IC, val);
934 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
935 				   AZX_IRS_BUSY);
936 			return azx_single_wait_for_response(chip, addr);
937 		}
938 		udelay(1);
939 	}
940 	if (printk_ratelimit())
941 		dev_dbg(chip->card->dev,
942 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
943 			azx_readw(chip, IRS), val);
944 	return -EIO;
945 }
946 
947 /* receive a response */
948 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
949 				   unsigned int *res)
950 {
951 	if (res)
952 		*res = bus->rirb.res[addr];
953 	return 0;
954 }
955 
956 /*
957  * The below are the main callbacks from hda_codec.
958  *
959  * They are just the skeleton to call sub-callbacks according to the
960  * current setting of chip->single_cmd.
961  */
962 
963 /* send a command */
964 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
965 {
966 	struct azx *chip = bus_to_azx(bus);
967 
968 	if (chip->disabled)
969 		return 0;
970 	if (chip->single_cmd)
971 		return azx_single_send_cmd(bus, val);
972 	else
973 		return snd_hdac_bus_send_cmd(bus, val);
974 }
975 
976 /* get a response */
977 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
978 			    unsigned int *res)
979 {
980 	struct azx *chip = bus_to_azx(bus);
981 
982 	if (chip->disabled)
983 		return 0;
984 	if (chip->single_cmd)
985 		return azx_single_get_response(bus, addr, res);
986 	else
987 		return azx_rirb_get_response(bus, addr, res);
988 }
989 
990 static const struct hdac_bus_ops bus_core_ops = {
991 	.command = azx_send_cmd,
992 	.get_response = azx_get_response,
993 };
994 
995 #ifdef CONFIG_SND_HDA_DSP_LOADER
996 /*
997  * DSP loading code (e.g. for CA0132)
998  */
999 
1000 /* use the first stream for loading DSP */
1001 static struct azx_dev *
1002 azx_get_dsp_loader_dev(struct azx *chip)
1003 {
1004 	struct hdac_bus *bus = azx_bus(chip);
1005 	struct hdac_stream *s;
1006 
1007 	list_for_each_entry(s, &bus->stream_list, list)
1008 		if (s->index == chip->playback_index_offset)
1009 			return stream_to_azx_dev(s);
1010 
1011 	return NULL;
1012 }
1013 
1014 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1015 				   unsigned int byte_size,
1016 				   struct snd_dma_buffer *bufp)
1017 {
1018 	struct hdac_bus *bus = &codec->bus->core;
1019 	struct azx *chip = bus_to_azx(bus);
1020 	struct azx_dev *azx_dev;
1021 	struct hdac_stream *hstr;
1022 	bool saved = false;
1023 	int err;
1024 
1025 	azx_dev = azx_get_dsp_loader_dev(chip);
1026 	hstr = azx_stream(azx_dev);
1027 	spin_lock_irq(&bus->reg_lock);
1028 	if (hstr->opened) {
1029 		chip->saved_azx_dev = *azx_dev;
1030 		saved = true;
1031 	}
1032 	spin_unlock_irq(&bus->reg_lock);
1033 
1034 	err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1035 	if (err < 0) {
1036 		spin_lock_irq(&bus->reg_lock);
1037 		if (saved)
1038 			*azx_dev = chip->saved_azx_dev;
1039 		spin_unlock_irq(&bus->reg_lock);
1040 		return err;
1041 	}
1042 
1043 	hstr->prepared = 0;
1044 	return err;
1045 }
1046 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1047 
1048 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1049 {
1050 	struct hdac_bus *bus = &codec->bus->core;
1051 	struct azx *chip = bus_to_azx(bus);
1052 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1053 
1054 	snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1055 }
1056 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1057 
1058 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1059 				    struct snd_dma_buffer *dmab)
1060 {
1061 	struct hdac_bus *bus = &codec->bus->core;
1062 	struct azx *chip = bus_to_azx(bus);
1063 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1064 	struct hdac_stream *hstr = azx_stream(azx_dev);
1065 
1066 	if (!dmab->area || !hstr->locked)
1067 		return;
1068 
1069 	snd_hdac_dsp_cleanup(hstr, dmab);
1070 	spin_lock_irq(&bus->reg_lock);
1071 	if (hstr->opened)
1072 		*azx_dev = chip->saved_azx_dev;
1073 	hstr->locked = false;
1074 	spin_unlock_irq(&bus->reg_lock);
1075 }
1076 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1077 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1078 
1079 /*
1080  * reset and start the controller registers
1081  */
1082 void azx_init_chip(struct azx *chip, bool full_reset)
1083 {
1084 	if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1085 		/* correct RINTCNT for CXT */
1086 		if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1087 			azx_writew(chip, RINTCNT, 0xc0);
1088 	}
1089 }
1090 EXPORT_SYMBOL_GPL(azx_init_chip);
1091 
1092 void azx_stop_all_streams(struct azx *chip)
1093 {
1094 	struct hdac_bus *bus = azx_bus(chip);
1095 	struct hdac_stream *s;
1096 
1097 	list_for_each_entry(s, &bus->stream_list, list)
1098 		snd_hdac_stream_stop(s);
1099 }
1100 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1101 
1102 void azx_stop_chip(struct azx *chip)
1103 {
1104 	snd_hdac_bus_stop_chip(azx_bus(chip));
1105 }
1106 EXPORT_SYMBOL_GPL(azx_stop_chip);
1107 
1108 /*
1109  * interrupt handler
1110  */
1111 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1112 {
1113 	struct azx *chip = bus_to_azx(bus);
1114 	struct azx_dev *azx_dev = stream_to_azx_dev(s);
1115 
1116 	/* check whether this IRQ is really acceptable */
1117 	if (!chip->ops->position_check ||
1118 	    chip->ops->position_check(chip, azx_dev)) {
1119 		spin_unlock(&bus->reg_lock);
1120 		snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1121 		spin_lock(&bus->reg_lock);
1122 	}
1123 }
1124 
1125 irqreturn_t azx_interrupt(int irq, void *dev_id)
1126 {
1127 	struct azx *chip = dev_id;
1128 	struct hdac_bus *bus = azx_bus(chip);
1129 	u32 status;
1130 	bool active, handled = false;
1131 	int repeat = 0; /* count for avoiding endless loop */
1132 
1133 #ifdef CONFIG_PM
1134 	if (azx_has_pm_runtime(chip))
1135 		if (!pm_runtime_active(chip->card->dev))
1136 			return IRQ_NONE;
1137 #endif
1138 
1139 	spin_lock(&bus->reg_lock);
1140 
1141 	if (chip->disabled)
1142 		goto unlock;
1143 
1144 	do {
1145 		status = azx_readl(chip, INTSTS);
1146 		if (status == 0 || status == 0xffffffff)
1147 			break;
1148 
1149 		handled = true;
1150 		active = false;
1151 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1152 			active = true;
1153 
1154 		/* clear rirb int */
1155 		status = azx_readb(chip, RIRBSTS);
1156 		if (status & RIRB_INT_MASK) {
1157 			active = true;
1158 			if (status & RIRB_INT_RESPONSE) {
1159 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1160 					udelay(80);
1161 				snd_hdac_bus_update_rirb(bus);
1162 			}
1163 			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1164 		}
1165 	} while (active && ++repeat < 10);
1166 
1167  unlock:
1168 	spin_unlock(&bus->reg_lock);
1169 
1170 	return IRQ_RETVAL(handled);
1171 }
1172 EXPORT_SYMBOL_GPL(azx_interrupt);
1173 
1174 /*
1175  * Codec initerface
1176  */
1177 
1178 /*
1179  * Probe the given codec address
1180  */
1181 static int probe_codec(struct azx *chip, int addr)
1182 {
1183 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1184 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1185 	struct hdac_bus *bus = azx_bus(chip);
1186 	int err;
1187 	unsigned int res = -1;
1188 
1189 	mutex_lock(&bus->cmd_mutex);
1190 	chip->probing = 1;
1191 	azx_send_cmd(bus, cmd);
1192 	err = azx_get_response(bus, addr, &res);
1193 	chip->probing = 0;
1194 	mutex_unlock(&bus->cmd_mutex);
1195 	if (err < 0 || res == -1)
1196 		return -EIO;
1197 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1198 	return 0;
1199 }
1200 
1201 void snd_hda_bus_reset(struct hda_bus *bus)
1202 {
1203 	struct azx *chip = bus_to_azx(&bus->core);
1204 
1205 	bus->in_reset = 1;
1206 	azx_stop_chip(chip);
1207 	azx_init_chip(chip, true);
1208 	if (bus->core.chip_init)
1209 		snd_hda_bus_reset_codecs(bus);
1210 	bus->in_reset = 0;
1211 }
1212 
1213 /* HD-audio bus initialization */
1214 int azx_bus_init(struct azx *chip, const char *model)
1215 {
1216 	struct hda_bus *bus = &chip->bus;
1217 	int err;
1218 
1219 	err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1220 	if (err < 0)
1221 		return err;
1222 
1223 	bus->card = chip->card;
1224 	mutex_init(&bus->prepare_mutex);
1225 	bus->pci = chip->pci;
1226 	bus->modelname = model;
1227 	bus->mixer_assigned = -1;
1228 	bus->core.snoop = azx_snoop(chip);
1229 	if (chip->get_position[0] != azx_get_pos_lpib ||
1230 	    chip->get_position[1] != azx_get_pos_lpib)
1231 		bus->core.use_posbuf = true;
1232 	bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1233 	if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1234 		bus->core.corbrp_self_clear = true;
1235 
1236 	if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1237 		bus->core.align_bdle_4k = true;
1238 
1239 	/* AMD chipsets often cause the communication stalls upon certain
1240 	 * sequence like the pin-detection.  It seems that forcing the synced
1241 	 * access works around the stall.  Grrr...
1242 	 */
1243 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1244 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1245 		bus->core.sync_write = 1;
1246 		bus->allow_bus_reset = 1;
1247 	}
1248 
1249 	return 0;
1250 }
1251 EXPORT_SYMBOL_GPL(azx_bus_init);
1252 
1253 /* Probe codecs */
1254 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1255 {
1256 	struct hdac_bus *bus = azx_bus(chip);
1257 	int c, codecs, err;
1258 
1259 	codecs = 0;
1260 	if (!max_slots)
1261 		max_slots = AZX_DEFAULT_CODECS;
1262 
1263 	/* First try to probe all given codec slots */
1264 	for (c = 0; c < max_slots; c++) {
1265 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1266 			if (probe_codec(chip, c) < 0) {
1267 				/* Some BIOSen give you wrong codec addresses
1268 				 * that don't exist
1269 				 */
1270 				dev_warn(chip->card->dev,
1271 					 "Codec #%d probe error; disabling it...\n", c);
1272 				bus->codec_mask &= ~(1 << c);
1273 				/* More badly, accessing to a non-existing
1274 				 * codec often screws up the controller chip,
1275 				 * and disturbs the further communications.
1276 				 * Thus if an error occurs during probing,
1277 				 * better to reset the controller chip to
1278 				 * get back to the sanity state.
1279 				 */
1280 				azx_stop_chip(chip);
1281 				azx_init_chip(chip, true);
1282 			}
1283 		}
1284 	}
1285 
1286 	/* Then create codec instances */
1287 	for (c = 0; c < max_slots; c++) {
1288 		if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1289 			struct hda_codec *codec;
1290 			err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1291 			if (err < 0)
1292 				continue;
1293 			codec->jackpoll_interval = chip->jackpoll_interval;
1294 			codec->beep_mode = chip->beep_mode;
1295 			codecs++;
1296 		}
1297 	}
1298 	if (!codecs) {
1299 		dev_err(chip->card->dev, "no codecs initialized\n");
1300 		return -ENXIO;
1301 	}
1302 	return 0;
1303 }
1304 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1305 
1306 /* configure each codec instance */
1307 int azx_codec_configure(struct azx *chip)
1308 {
1309 	struct hda_codec *codec, *next;
1310 
1311 	/* use _safe version here since snd_hda_codec_configure() deregisters
1312 	 * the device upon error and deletes itself from the bus list.
1313 	 */
1314 	list_for_each_codec_safe(codec, next, &chip->bus) {
1315 		snd_hda_codec_configure(codec);
1316 	}
1317 
1318 	if (!azx_bus(chip)->num_codecs)
1319 		return -ENODEV;
1320 	return 0;
1321 }
1322 EXPORT_SYMBOL_GPL(azx_codec_configure);
1323 
1324 static int stream_direction(struct azx *chip, unsigned char index)
1325 {
1326 	if (index >= chip->capture_index_offset &&
1327 	    index < chip->capture_index_offset + chip->capture_streams)
1328 		return SNDRV_PCM_STREAM_CAPTURE;
1329 	return SNDRV_PCM_STREAM_PLAYBACK;
1330 }
1331 
1332 /* initialize SD streams */
1333 int azx_init_streams(struct azx *chip)
1334 {
1335 	int i;
1336 	int stream_tags[2] = { 0, 0 };
1337 
1338 	/* initialize each stream (aka device)
1339 	 * assign the starting bdl address to each stream (device)
1340 	 * and initialize
1341 	 */
1342 	for (i = 0; i < chip->num_streams; i++) {
1343 		struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1344 		int dir, tag;
1345 
1346 		if (!azx_dev)
1347 			return -ENOMEM;
1348 
1349 		dir = stream_direction(chip, i);
1350 		/* stream tag must be unique throughout
1351 		 * the stream direction group,
1352 		 * valid values 1...15
1353 		 * use separate stream tag if the flag
1354 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1355 		 */
1356 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1357 			tag = ++stream_tags[dir];
1358 		else
1359 			tag = i + 1;
1360 		snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1361 				     i, dir, tag);
1362 	}
1363 
1364 	return 0;
1365 }
1366 EXPORT_SYMBOL_GPL(azx_init_streams);
1367 
1368 void azx_free_streams(struct azx *chip)
1369 {
1370 	struct hdac_bus *bus = azx_bus(chip);
1371 	struct hdac_stream *s;
1372 
1373 	while (!list_empty(&bus->stream_list)) {
1374 		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1375 		list_del(&s->list);
1376 		kfree(stream_to_azx_dev(s));
1377 	}
1378 }
1379 EXPORT_SYMBOL_GPL(azx_free_streams);
1380