xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 5e29a910)
1 /*
2  *
3  *  Implementation of primary alsa driver code base for Intel HD Audio.
4  *
5  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
6  *
7  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8  *                     PeiSen Hou <pshou@realtek.com.tw>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the Free
12  *  Software Foundation; either version 2 of the License, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  *  more details.
19  *
20  *
21  */
22 
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_priv.h"
34 #include "hda_controller.h"
35 
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
38 
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev)	mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev)		mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev)		mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev)	((dev)->locked)
45 #else
46 #define dsp_lock_init(dev)	do {} while (0)
47 #define dsp_lock(dev)		do {} while (0)
48 #define dsp_unlock(dev)		do {} while (0)
49 #define dsp_is_locked(dev)	0
50 #endif
51 
52 /*
53  * AZX stream operations.
54  */
55 
56 /* start a stream */
57 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 {
59 	/*
60 	 * Before stream start, initialize parameter
61 	 */
62 	azx_dev->insufficient = 1;
63 
64 	/* enable SIE */
65 	azx_writel(chip, INTCTL,
66 		   azx_readl(chip, INTCTL) | (1 << azx_dev->index));
67 	/* set DMA start and interrupt mask */
68 	azx_sd_writeb(chip, azx_dev, SD_CTL,
69 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
70 		      SD_CTL_DMA_START | SD_INT_MASK);
71 }
72 
73 /* stop DMA */
74 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 {
76 	azx_sd_writeb(chip, azx_dev, SD_CTL,
77 		      azx_sd_readb(chip, azx_dev, SD_CTL) &
78 		      ~(SD_CTL_DMA_START | SD_INT_MASK));
79 	azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 }
81 
82 /* stop a stream */
83 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 {
85 	azx_stream_clear(chip, azx_dev);
86 	/* disable SIE */
87 	azx_writel(chip, INTCTL,
88 		   azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 }
90 EXPORT_SYMBOL_GPL(azx_stream_stop);
91 
92 /* reset stream */
93 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
94 {
95 	unsigned char val;
96 	int timeout;
97 
98 	azx_stream_clear(chip, azx_dev);
99 
100 	azx_sd_writeb(chip, azx_dev, SD_CTL,
101 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
102 		      SD_CTL_STREAM_RESET);
103 	udelay(3);
104 	timeout = 300;
105 	while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
106 		 SD_CTL_STREAM_RESET) && --timeout)
107 		;
108 	val &= ~SD_CTL_STREAM_RESET;
109 	azx_sd_writeb(chip, azx_dev, SD_CTL, val);
110 	udelay(3);
111 
112 	timeout = 300;
113 	/* waiting for hardware to report that the stream is out of reset */
114 	while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
115 		SD_CTL_STREAM_RESET) && --timeout)
116 		;
117 
118 	/* reset first position - may not be synced with hw at this time */
119 	*azx_dev->posbuf = 0;
120 }
121 
122 /*
123  * set up the SD for streaming
124  */
125 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
126 {
127 	unsigned int val;
128 	/* make sure the run bit is zero for SD */
129 	azx_stream_clear(chip, azx_dev);
130 	/* program the stream_tag */
131 	val = azx_sd_readl(chip, azx_dev, SD_CTL);
132 	val = (val & ~SD_CTL_STREAM_TAG_MASK) |
133 		(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
134 	if (!azx_snoop(chip))
135 		val |= SD_CTL_TRAFFIC_PRIO;
136 	azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 
138 	/* program the length of samples in cyclic buffer */
139 	azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 
141 	/* program the stream format */
142 	/* this value needs to be the same as the one programmed */
143 	azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 
145 	/* program the stream LVI (last valid index) of the BDL */
146 	azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 
148 	/* program the BDL address */
149 	/* lower BDL address */
150 	azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
151 	/* upper BDL address */
152 	azx_sd_writel(chip, azx_dev, SD_BDLPU,
153 		      upper_32_bits(azx_dev->bdl.addr));
154 
155 	/* enable the position buffer */
156 	if (chip->get_position[0] != azx_get_pos_lpib ||
157 	    chip->get_position[1] != azx_get_pos_lpib) {
158 		if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
159 			azx_writel(chip, DPLBASE,
160 				(u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
161 	}
162 
163 	/* set the interrupt enable bits in the descriptor control register */
164 	azx_sd_writel(chip, azx_dev, SD_CTL,
165 		      azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
166 
167 	return 0;
168 }
169 
170 /* assign a stream for the PCM */
171 static inline struct azx_dev *
172 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
173 {
174 	int dev, i, nums;
175 	struct azx_dev *res = NULL;
176 	/* make a non-zero unique key for the substream */
177 	int key = (substream->pcm->device << 16) | (substream->number << 2) |
178 		(substream->stream + 1);
179 
180 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
181 		dev = chip->playback_index_offset;
182 		nums = chip->playback_streams;
183 	} else {
184 		dev = chip->capture_index_offset;
185 		nums = chip->capture_streams;
186 	}
187 	for (i = 0; i < nums; i++, dev++) {
188 		struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 		dsp_lock(azx_dev);
190 		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 			if (azx_dev->assigned_key == key) {
192 				azx_dev->opened = 1;
193 				azx_dev->assigned_key = key;
194 				dsp_unlock(azx_dev);
195 				return azx_dev;
196 			}
197 			if (!res ||
198 			    (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
199 				res = azx_dev;
200 		}
201 		dsp_unlock(azx_dev);
202 	}
203 	if (res) {
204 		dsp_lock(res);
205 		res->opened = 1;
206 		res->assigned_key = key;
207 		dsp_unlock(res);
208 	}
209 	return res;
210 }
211 
212 /* release the assigned stream */
213 static inline void azx_release_device(struct azx_dev *azx_dev)
214 {
215 	azx_dev->opened = 0;
216 }
217 
218 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 {
220 	struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
221 	struct snd_pcm_substream *substream = azx_dev->substream;
222 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
223 	struct azx *chip = apcm->chip;
224 
225 	return azx_readl(chip, WALLCLK);
226 }
227 
228 static void azx_timecounter_init(struct snd_pcm_substream *substream,
229 				bool force, cycle_t last)
230 {
231 	struct azx_dev *azx_dev = get_azx_dev(substream);
232 	struct timecounter *tc = &azx_dev->azx_tc;
233 	struct cyclecounter *cc = &azx_dev->azx_cc;
234 	u64 nsec;
235 
236 	cc->read = azx_cc_read;
237 	cc->mask = CLOCKSOURCE_MASK(32);
238 
239 	/*
240 	 * Converting from 24 MHz to ns means applying a 125/3 factor.
241 	 * To avoid any saturation issues in intermediate operations,
242 	 * the 125 factor is applied first. The division is applied
243 	 * last after reading the timecounter value.
244 	 * Applying the 1/3 factor as part of the multiplication
245 	 * requires at least 20 bits for a decent precision, however
246 	 * overflows occur after about 4 hours or less, not a option.
247 	 */
248 
249 	cc->mult = 125; /* saturation after 195 years */
250 	cc->shift = 0;
251 
252 	nsec = 0; /* audio time is elapsed time since trigger */
253 	timecounter_init(tc, cc, nsec);
254 	if (force)
255 		/*
256 		 * force timecounter to use predefined value,
257 		 * used for synchronized starts
258 		 */
259 		tc->cycle_last = last;
260 }
261 
262 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
263 				u64 nsec)
264 {
265 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
266 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
267 	u64 codec_frames, codec_nsecs;
268 
269 	if (!hinfo->ops.get_delay)
270 		return nsec;
271 
272 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
273 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
274 			      substream->runtime->rate);
275 
276 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
277 		return nsec + codec_nsecs;
278 
279 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
280 }
281 
282 /*
283  * set up a BDL entry
284  */
285 static int setup_bdle(struct azx *chip,
286 		      struct snd_dma_buffer *dmab,
287 		      struct azx_dev *azx_dev, u32 **bdlp,
288 		      int ofs, int size, int with_ioc)
289 {
290 	u32 *bdl = *bdlp;
291 
292 	while (size > 0) {
293 		dma_addr_t addr;
294 		int chunk;
295 
296 		if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
297 			return -EINVAL;
298 
299 		addr = snd_sgbuf_get_addr(dmab, ofs);
300 		/* program the address field of the BDL entry */
301 		bdl[0] = cpu_to_le32((u32)addr);
302 		bdl[1] = cpu_to_le32(upper_32_bits(addr));
303 		/* program the size field of the BDL entry */
304 		chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
305 		/* one BDLE cannot cross 4K boundary on CTHDA chips */
306 		if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
307 			u32 remain = 0x1000 - (ofs & 0xfff);
308 			if (chunk > remain)
309 				chunk = remain;
310 		}
311 		bdl[2] = cpu_to_le32(chunk);
312 		/* program the IOC to enable interrupt
313 		 * only when the whole fragment is processed
314 		 */
315 		size -= chunk;
316 		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
317 		bdl += 4;
318 		azx_dev->frags++;
319 		ofs += chunk;
320 	}
321 	*bdlp = bdl;
322 	return ofs;
323 }
324 
325 /*
326  * set up BDL entries
327  */
328 static int azx_setup_periods(struct azx *chip,
329 			     struct snd_pcm_substream *substream,
330 			     struct azx_dev *azx_dev)
331 {
332 	u32 *bdl;
333 	int i, ofs, periods, period_bytes;
334 	int pos_adj = 0;
335 
336 	/* reset BDL address */
337 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
338 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339 
340 	period_bytes = azx_dev->period_bytes;
341 	periods = azx_dev->bufsize / period_bytes;
342 
343 	/* program the initial BDL entries */
344 	bdl = (u32 *)azx_dev->bdl.area;
345 	ofs = 0;
346 	azx_dev->frags = 0;
347 
348 	if (chip->bdl_pos_adj)
349 		pos_adj = chip->bdl_pos_adj[chip->dev_index];
350 	if (!azx_dev->no_period_wakeup && pos_adj > 0) {
351 		struct snd_pcm_runtime *runtime = substream->runtime;
352 		int pos_align = pos_adj;
353 		pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
354 		if (!pos_adj)
355 			pos_adj = pos_align;
356 		else
357 			pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 				pos_align;
359 		pos_adj = frames_to_bytes(runtime, pos_adj);
360 		if (pos_adj >= period_bytes) {
361 			dev_warn(chip->card->dev,"Too big adjustment %d\n",
362 				 pos_adj);
363 			pos_adj = 0;
364 		} else {
365 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 					 azx_dev,
367 					 &bdl, ofs, pos_adj, true);
368 			if (ofs < 0)
369 				goto error;
370 		}
371 	} else
372 		pos_adj = 0;
373 
374 	for (i = 0; i < periods; i++) {
375 		if (i == periods - 1 && pos_adj)
376 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 					 azx_dev, &bdl, ofs,
378 					 period_bytes - pos_adj, 0);
379 		else
380 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
381 					 azx_dev, &bdl, ofs,
382 					 period_bytes,
383 					 !azx_dev->no_period_wakeup);
384 		if (ofs < 0)
385 			goto error;
386 	}
387 	return 0;
388 
389  error:
390 	dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
391 		azx_dev->bufsize, period_bytes);
392 	return -EINVAL;
393 }
394 
395 /*
396  * PCM ops
397  */
398 
399 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 {
401 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
402 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
403 	struct azx *chip = apcm->chip;
404 	struct azx_dev *azx_dev = get_azx_dev(substream);
405 	unsigned long flags;
406 
407 	mutex_lock(&chip->open_mutex);
408 	spin_lock_irqsave(&chip->reg_lock, flags);
409 	azx_dev->substream = NULL;
410 	azx_dev->running = 0;
411 	spin_unlock_irqrestore(&chip->reg_lock, flags);
412 	azx_release_device(azx_dev);
413 	hinfo->ops.close(hinfo, apcm->codec, substream);
414 	snd_hda_power_down(apcm->codec);
415 	mutex_unlock(&chip->open_mutex);
416 	return 0;
417 }
418 
419 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
420 			     struct snd_pcm_hw_params *hw_params)
421 {
422 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
423 	struct azx *chip = apcm->chip;
424 	int ret;
425 
426 	dsp_lock(get_azx_dev(substream));
427 	if (dsp_is_locked(get_azx_dev(substream))) {
428 		ret = -EBUSY;
429 		goto unlock;
430 	}
431 
432 	ret = chip->ops->substream_alloc_pages(chip, substream,
433 					  params_buffer_bytes(hw_params));
434 unlock:
435 	dsp_unlock(get_azx_dev(substream));
436 	return ret;
437 }
438 
439 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 {
441 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
442 	struct azx_dev *azx_dev = get_azx_dev(substream);
443 	struct azx *chip = apcm->chip;
444 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
445 	int err;
446 
447 	/* reset BDL address */
448 	dsp_lock(azx_dev);
449 	if (!dsp_is_locked(azx_dev)) {
450 		azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
451 		azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
452 		azx_sd_writel(chip, azx_dev, SD_CTL, 0);
453 		azx_dev->bufsize = 0;
454 		azx_dev->period_bytes = 0;
455 		azx_dev->format_val = 0;
456 	}
457 
458 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459 
460 	err = chip->ops->substream_free_pages(chip, substream);
461 	azx_dev->prepared = 0;
462 	dsp_unlock(azx_dev);
463 	return err;
464 }
465 
466 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 {
468 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
469 	struct azx *chip = apcm->chip;
470 	struct azx_dev *azx_dev = get_azx_dev(substream);
471 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
472 	struct snd_pcm_runtime *runtime = substream->runtime;
473 	unsigned int bufsize, period_bytes, format_val, stream_tag;
474 	int err;
475 	struct hda_spdif_out *spdif =
476 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
477 	unsigned short ctls = spdif ? spdif->ctls : 0;
478 
479 	dsp_lock(azx_dev);
480 	if (dsp_is_locked(azx_dev)) {
481 		err = -EBUSY;
482 		goto unlock;
483 	}
484 
485 	azx_stream_reset(chip, azx_dev);
486 	format_val = snd_hda_calc_stream_format(apcm->codec,
487 						runtime->rate,
488 						runtime->channels,
489 						runtime->format,
490 						hinfo->maxbps,
491 						ctls);
492 	if (!format_val) {
493 		dev_err(chip->card->dev,
494 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
495 			runtime->rate, runtime->channels, runtime->format);
496 		err = -EINVAL;
497 		goto unlock;
498 	}
499 
500 	bufsize = snd_pcm_lib_buffer_bytes(substream);
501 	period_bytes = snd_pcm_lib_period_bytes(substream);
502 
503 	dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
504 		bufsize, format_val);
505 
506 	if (bufsize != azx_dev->bufsize ||
507 	    period_bytes != azx_dev->period_bytes ||
508 	    format_val != azx_dev->format_val ||
509 	    runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
510 		azx_dev->bufsize = bufsize;
511 		azx_dev->period_bytes = period_bytes;
512 		azx_dev->format_val = format_val;
513 		azx_dev->no_period_wakeup = runtime->no_period_wakeup;
514 		err = azx_setup_periods(chip, substream, azx_dev);
515 		if (err < 0)
516 			goto unlock;
517 	}
518 
519 	/* when LPIB delay correction gives a small negative value,
520 	 * we ignore it; currently set the threshold statically to
521 	 * 64 frames
522 	 */
523 	if (runtime->period_size > 64)
524 		azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
525 	else
526 		azx_dev->delay_negative_threshold = 0;
527 
528 	/* wallclk has 24Mhz clock source */
529 	azx_dev->period_wallclk = (((runtime->period_size * 24000) /
530 						runtime->rate) * 1000);
531 	azx_setup_controller(chip, azx_dev);
532 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
533 		azx_dev->fifo_size =
534 			azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
535 	else
536 		azx_dev->fifo_size = 0;
537 
538 	stream_tag = azx_dev->stream_tag;
539 	/* CA-IBG chips need the playback stream starting from 1 */
540 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
541 	    stream_tag > chip->capture_streams)
542 		stream_tag -= chip->capture_streams;
543 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
544 				     azx_dev->format_val, substream);
545 
546  unlock:
547 	if (!err)
548 		azx_dev->prepared = 1;
549 	dsp_unlock(azx_dev);
550 	return err;
551 }
552 
553 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
554 {
555 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
556 	struct azx *chip = apcm->chip;
557 	struct azx_dev *azx_dev;
558 	struct snd_pcm_substream *s;
559 	int rstart = 0, start, nsync = 0, sbits = 0;
560 	int nwait, timeout;
561 
562 	azx_dev = get_azx_dev(substream);
563 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
564 
565 	if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
566 		return -EPIPE;
567 
568 	switch (cmd) {
569 	case SNDRV_PCM_TRIGGER_START:
570 		rstart = 1;
571 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
572 	case SNDRV_PCM_TRIGGER_RESUME:
573 		start = 1;
574 		break;
575 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
576 	case SNDRV_PCM_TRIGGER_SUSPEND:
577 	case SNDRV_PCM_TRIGGER_STOP:
578 		start = 0;
579 		break;
580 	default:
581 		return -EINVAL;
582 	}
583 
584 	snd_pcm_group_for_each_entry(s, substream) {
585 		if (s->pcm->card != substream->pcm->card)
586 			continue;
587 		azx_dev = get_azx_dev(s);
588 		sbits |= 1 << azx_dev->index;
589 		nsync++;
590 		snd_pcm_trigger_done(s, substream);
591 	}
592 
593 	spin_lock(&chip->reg_lock);
594 
595 	/* first, set SYNC bits of corresponding streams */
596 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
597 		azx_writel(chip, OLD_SSYNC,
598 			azx_readl(chip, OLD_SSYNC) | sbits);
599 	else
600 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
601 
602 	snd_pcm_group_for_each_entry(s, substream) {
603 		if (s->pcm->card != substream->pcm->card)
604 			continue;
605 		azx_dev = get_azx_dev(s);
606 		if (start) {
607 			azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
608 			if (!rstart)
609 				azx_dev->start_wallclk -=
610 						azx_dev->period_wallclk;
611 			azx_stream_start(chip, azx_dev);
612 		} else {
613 			azx_stream_stop(chip, azx_dev);
614 		}
615 		azx_dev->running = start;
616 	}
617 	spin_unlock(&chip->reg_lock);
618 	if (start) {
619 		/* wait until all FIFOs get ready */
620 		for (timeout = 5000; timeout; timeout--) {
621 			nwait = 0;
622 			snd_pcm_group_for_each_entry(s, substream) {
623 				if (s->pcm->card != substream->pcm->card)
624 					continue;
625 				azx_dev = get_azx_dev(s);
626 				if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
627 				      SD_STS_FIFO_READY))
628 					nwait++;
629 			}
630 			if (!nwait)
631 				break;
632 			cpu_relax();
633 		}
634 	} else {
635 		/* wait until all RUN bits are cleared */
636 		for (timeout = 5000; timeout; timeout--) {
637 			nwait = 0;
638 			snd_pcm_group_for_each_entry(s, substream) {
639 				if (s->pcm->card != substream->pcm->card)
640 					continue;
641 				azx_dev = get_azx_dev(s);
642 				if (azx_sd_readb(chip, azx_dev, SD_CTL) &
643 				    SD_CTL_DMA_START)
644 					nwait++;
645 			}
646 			if (!nwait)
647 				break;
648 			cpu_relax();
649 		}
650 	}
651 	spin_lock(&chip->reg_lock);
652 	/* reset SYNC bits */
653 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
654 		azx_writel(chip, OLD_SSYNC,
655 			azx_readl(chip, OLD_SSYNC) & ~sbits);
656 	else
657 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
658 	if (start) {
659 		azx_timecounter_init(substream, 0, 0);
660 		snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
661 		substream->runtime->trigger_tstamp_latched = true;
662 
663 		if (nsync > 1) {
664 			cycle_t cycle_last;
665 
666 			/* same start cycle for master and group */
667 			azx_dev = get_azx_dev(substream);
668 			cycle_last = azx_dev->azx_tc.cycle_last;
669 
670 			snd_pcm_group_for_each_entry(s, substream) {
671 				if (s->pcm->card != substream->pcm->card)
672 					continue;
673 				azx_timecounter_init(s, 1, cycle_last);
674 			}
675 		}
676 	}
677 	spin_unlock(&chip->reg_lock);
678 	return 0;
679 }
680 
681 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
682 {
683 	return azx_sd_readl(chip, azx_dev, SD_LPIB);
684 }
685 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
686 
687 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
688 {
689 	return le32_to_cpu(*azx_dev->posbuf);
690 }
691 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
692 
693 unsigned int azx_get_position(struct azx *chip,
694 			      struct azx_dev *azx_dev)
695 {
696 	struct snd_pcm_substream *substream = azx_dev->substream;
697 	unsigned int pos;
698 	int stream = substream->stream;
699 	int delay = 0;
700 
701 	if (chip->get_position[stream])
702 		pos = chip->get_position[stream](chip, azx_dev);
703 	else /* use the position buffer as default */
704 		pos = azx_get_pos_posbuf(chip, azx_dev);
705 
706 	if (pos >= azx_dev->bufsize)
707 		pos = 0;
708 
709 	if (substream->runtime) {
710 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
711 		struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
712 
713 		if (chip->get_delay[stream])
714 			delay += chip->get_delay[stream](chip, azx_dev, pos);
715 		if (hinfo->ops.get_delay)
716 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
717 						      substream);
718 		substream->runtime->delay = delay;
719 	}
720 
721 	trace_azx_get_position(chip, azx_dev, pos, delay);
722 	return pos;
723 }
724 EXPORT_SYMBOL_GPL(azx_get_position);
725 
726 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
727 {
728 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
729 	struct azx *chip = apcm->chip;
730 	struct azx_dev *azx_dev = get_azx_dev(substream);
731 	return bytes_to_frames(substream->runtime,
732 			       azx_get_position(chip, azx_dev));
733 }
734 
735 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
736 				struct timespec *ts)
737 {
738 	struct azx_dev *azx_dev = get_azx_dev(substream);
739 	u64 nsec;
740 
741 	nsec = timecounter_read(&azx_dev->azx_tc);
742 	nsec = div_u64(nsec, 3); /* can be optimized */
743 	nsec = azx_adjust_codec_delay(substream, nsec);
744 
745 	*ts = ns_to_timespec(nsec);
746 
747 	return 0;
748 }
749 
750 static struct snd_pcm_hardware azx_pcm_hw = {
751 	.info =			(SNDRV_PCM_INFO_MMAP |
752 				 SNDRV_PCM_INFO_INTERLEAVED |
753 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
754 				 SNDRV_PCM_INFO_MMAP_VALID |
755 				 /* No full-resume yet implemented */
756 				 /* SNDRV_PCM_INFO_RESUME |*/
757 				 SNDRV_PCM_INFO_PAUSE |
758 				 SNDRV_PCM_INFO_SYNC_START |
759 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
760 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
761 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
762 	.rates =		SNDRV_PCM_RATE_48000,
763 	.rate_min =		48000,
764 	.rate_max =		48000,
765 	.channels_min =		2,
766 	.channels_max =		2,
767 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
768 	.period_bytes_min =	128,
769 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
770 	.periods_min =		2,
771 	.periods_max =		AZX_MAX_FRAG,
772 	.fifo_size =		0,
773 };
774 
775 static int azx_pcm_open(struct snd_pcm_substream *substream)
776 {
777 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
778 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
779 	struct azx *chip = apcm->chip;
780 	struct azx_dev *azx_dev;
781 	struct snd_pcm_runtime *runtime = substream->runtime;
782 	unsigned long flags;
783 	int err;
784 	int buff_step;
785 
786 	mutex_lock(&chip->open_mutex);
787 	azx_dev = azx_assign_device(chip, substream);
788 	if (azx_dev == NULL) {
789 		mutex_unlock(&chip->open_mutex);
790 		return -EBUSY;
791 	}
792 	runtime->hw = azx_pcm_hw;
793 	runtime->hw.channels_min = hinfo->channels_min;
794 	runtime->hw.channels_max = hinfo->channels_max;
795 	runtime->hw.formats = hinfo->formats;
796 	runtime->hw.rates = hinfo->rates;
797 	snd_pcm_limit_hw_rates(runtime);
798 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
799 
800 	/* avoid wrap-around with wall-clock */
801 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
802 				     20,
803 				     178000000);
804 
805 	if (chip->align_buffer_size)
806 		/* constrain buffer sizes to be multiple of 128
807 		   bytes. This is more efficient in terms of memory
808 		   access but isn't required by the HDA spec and
809 		   prevents users from specifying exact period/buffer
810 		   sizes. For example for 44.1kHz, a period size set
811 		   to 20ms will be rounded to 19.59ms. */
812 		buff_step = 128;
813 	else
814 		/* Don't enforce steps on buffer sizes, still need to
815 		   be multiple of 4 bytes (HDA spec). Tested on Intel
816 		   HDA controllers, may not work on all devices where
817 		   option needs to be disabled */
818 		buff_step = 4;
819 
820 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
821 				   buff_step);
822 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
823 				   buff_step);
824 	snd_hda_power_up_d3wait(apcm->codec);
825 	err = hinfo->ops.open(hinfo, apcm->codec, substream);
826 	if (err < 0) {
827 		azx_release_device(azx_dev);
828 		snd_hda_power_down(apcm->codec);
829 		mutex_unlock(&chip->open_mutex);
830 		return err;
831 	}
832 	snd_pcm_limit_hw_rates(runtime);
833 	/* sanity check */
834 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
835 	    snd_BUG_ON(!runtime->hw.channels_max) ||
836 	    snd_BUG_ON(!runtime->hw.formats) ||
837 	    snd_BUG_ON(!runtime->hw.rates)) {
838 		azx_release_device(azx_dev);
839 		hinfo->ops.close(hinfo, apcm->codec, substream);
840 		snd_hda_power_down(apcm->codec);
841 		mutex_unlock(&chip->open_mutex);
842 		return -EINVAL;
843 	}
844 
845 	/* disable WALLCLOCK timestamps for capture streams
846 	   until we figure out how to handle digital inputs */
847 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
848 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
849 
850 	spin_lock_irqsave(&chip->reg_lock, flags);
851 	azx_dev->substream = substream;
852 	azx_dev->running = 0;
853 	spin_unlock_irqrestore(&chip->reg_lock, flags);
854 
855 	runtime->private_data = azx_dev;
856 	snd_pcm_set_sync(substream);
857 	mutex_unlock(&chip->open_mutex);
858 	return 0;
859 }
860 
861 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
862 			struct vm_area_struct *area)
863 {
864 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
865 	struct azx *chip = apcm->chip;
866 	if (chip->ops->pcm_mmap_prepare)
867 		chip->ops->pcm_mmap_prepare(substream, area);
868 	return snd_pcm_lib_default_mmap(substream, area);
869 }
870 
871 static struct snd_pcm_ops azx_pcm_ops = {
872 	.open = azx_pcm_open,
873 	.close = azx_pcm_close,
874 	.ioctl = snd_pcm_lib_ioctl,
875 	.hw_params = azx_pcm_hw_params,
876 	.hw_free = azx_pcm_hw_free,
877 	.prepare = azx_pcm_prepare,
878 	.trigger = azx_pcm_trigger,
879 	.pointer = azx_pcm_pointer,
880 	.wall_clock =  azx_get_wallclock_tstamp,
881 	.mmap = azx_pcm_mmap,
882 	.page = snd_pcm_sgbuf_ops_page,
883 };
884 
885 static void azx_pcm_free(struct snd_pcm *pcm)
886 {
887 	struct azx_pcm *apcm = pcm->private_data;
888 	if (apcm) {
889 		list_del(&apcm->list);
890 		kfree(apcm);
891 	}
892 }
893 
894 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
895 
896 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
897 				 struct hda_pcm *cpcm)
898 {
899 	struct azx *chip = bus->private_data;
900 	struct snd_pcm *pcm;
901 	struct azx_pcm *apcm;
902 	int pcm_dev = cpcm->device;
903 	unsigned int size;
904 	int s, err;
905 
906 	list_for_each_entry(apcm, &chip->pcm_list, list) {
907 		if (apcm->pcm->device == pcm_dev) {
908 			dev_err(chip->card->dev, "PCM %d already exists\n",
909 				pcm_dev);
910 			return -EBUSY;
911 		}
912 	}
913 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
914 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
915 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
916 			  &pcm);
917 	if (err < 0)
918 		return err;
919 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
920 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
921 	if (apcm == NULL)
922 		return -ENOMEM;
923 	apcm->chip = chip;
924 	apcm->pcm = pcm;
925 	apcm->codec = codec;
926 	pcm->private_data = apcm;
927 	pcm->private_free = azx_pcm_free;
928 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
929 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
930 	list_add_tail(&apcm->list, &chip->pcm_list);
931 	cpcm->pcm = pcm;
932 	for (s = 0; s < 2; s++) {
933 		apcm->hinfo[s] = &cpcm->stream[s];
934 		if (cpcm->stream[s].substreams)
935 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
936 	}
937 	/* buffer pre-allocation */
938 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
939 	if (size > MAX_PREALLOC_SIZE)
940 		size = MAX_PREALLOC_SIZE;
941 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
942 					      chip->card->dev,
943 					      size, MAX_PREALLOC_SIZE);
944 	/* link to codec */
945 	for (s = 0; s < 2; s++)
946 		pcm->streams[s].dev.parent = &codec->dev;
947 	return 0;
948 }
949 
950 /*
951  * CORB / RIRB interface
952  */
953 static int azx_alloc_cmd_io(struct azx *chip)
954 {
955 	int err;
956 
957 	/* single page (at least 4096 bytes) must suffice for both ringbuffes */
958 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
959 					 PAGE_SIZE, &chip->rb);
960 	if (err < 0)
961 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
962 	return err;
963 }
964 
965 static void azx_init_cmd_io(struct azx *chip)
966 {
967 	int timeout;
968 
969 	spin_lock_irq(&chip->reg_lock);
970 	/* CORB set up */
971 	chip->corb.addr = chip->rb.addr;
972 	chip->corb.buf = (u32 *)chip->rb.area;
973 	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
974 	azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
975 
976 	/* set the corb size to 256 entries (ULI requires explicitly) */
977 	azx_writeb(chip, CORBSIZE, 0x02);
978 	/* set the corb write pointer to 0 */
979 	azx_writew(chip, CORBWP, 0);
980 
981 	/* reset the corb hw read pointer */
982 	azx_writew(chip, CORBRP, AZX_CORBRP_RST);
983 	if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
984 		for (timeout = 1000; timeout > 0; timeout--) {
985 			if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
986 				break;
987 			udelay(1);
988 		}
989 		if (timeout <= 0)
990 			dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
991 				azx_readw(chip, CORBRP));
992 
993 		azx_writew(chip, CORBRP, 0);
994 		for (timeout = 1000; timeout > 0; timeout--) {
995 			if (azx_readw(chip, CORBRP) == 0)
996 				break;
997 			udelay(1);
998 		}
999 		if (timeout <= 0)
1000 			dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1001 				azx_readw(chip, CORBRP));
1002 	}
1003 
1004 	/* enable corb dma */
1005 	azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1006 
1007 	/* RIRB set up */
1008 	chip->rirb.addr = chip->rb.addr + 2048;
1009 	chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1010 	chip->rirb.wp = chip->rirb.rp = 0;
1011 	memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1012 	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1013 	azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1014 
1015 	/* set the rirb size to 256 entries (ULI requires explicitly) */
1016 	azx_writeb(chip, RIRBSIZE, 0x02);
1017 	/* reset the rirb hw write pointer */
1018 	azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1019 	/* set N=1, get RIRB response interrupt for new entry */
1020 	if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1021 		azx_writew(chip, RINTCNT, 0xc0);
1022 	else
1023 		azx_writew(chip, RINTCNT, 1);
1024 	/* enable rirb dma and response irq */
1025 	azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1026 	spin_unlock_irq(&chip->reg_lock);
1027 }
1028 
1029 static void azx_free_cmd_io(struct azx *chip)
1030 {
1031 	spin_lock_irq(&chip->reg_lock);
1032 	/* disable ringbuffer DMAs */
1033 	azx_writeb(chip, RIRBCTL, 0);
1034 	azx_writeb(chip, CORBCTL, 0);
1035 	spin_unlock_irq(&chip->reg_lock);
1036 }
1037 
1038 static unsigned int azx_command_addr(u32 cmd)
1039 {
1040 	unsigned int addr = cmd >> 28;
1041 
1042 	if (addr >= AZX_MAX_CODECS) {
1043 		snd_BUG();
1044 		addr = 0;
1045 	}
1046 
1047 	return addr;
1048 }
1049 
1050 /* send a command */
1051 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1052 {
1053 	struct azx *chip = bus->private_data;
1054 	unsigned int addr = azx_command_addr(val);
1055 	unsigned int wp, rp;
1056 
1057 	spin_lock_irq(&chip->reg_lock);
1058 
1059 	/* add command to corb */
1060 	wp = azx_readw(chip, CORBWP);
1061 	if (wp == 0xffff) {
1062 		/* something wrong, controller likely turned to D3 */
1063 		spin_unlock_irq(&chip->reg_lock);
1064 		return -EIO;
1065 	}
1066 	wp++;
1067 	wp %= AZX_MAX_CORB_ENTRIES;
1068 
1069 	rp = azx_readw(chip, CORBRP);
1070 	if (wp == rp) {
1071 		/* oops, it's full */
1072 		spin_unlock_irq(&chip->reg_lock);
1073 		return -EAGAIN;
1074 	}
1075 
1076 	chip->rirb.cmds[addr]++;
1077 	chip->corb.buf[wp] = cpu_to_le32(val);
1078 	azx_writew(chip, CORBWP, wp);
1079 
1080 	spin_unlock_irq(&chip->reg_lock);
1081 
1082 	return 0;
1083 }
1084 
1085 #define AZX_RIRB_EX_UNSOL_EV	(1<<4)
1086 
1087 /* retrieve RIRB entry - called from interrupt handler */
1088 static void azx_update_rirb(struct azx *chip)
1089 {
1090 	unsigned int rp, wp;
1091 	unsigned int addr;
1092 	u32 res, res_ex;
1093 
1094 	wp = azx_readw(chip, RIRBWP);
1095 	if (wp == 0xffff) {
1096 		/* something wrong, controller likely turned to D3 */
1097 		return;
1098 	}
1099 
1100 	if (wp == chip->rirb.wp)
1101 		return;
1102 	chip->rirb.wp = wp;
1103 
1104 	while (chip->rirb.rp != wp) {
1105 		chip->rirb.rp++;
1106 		chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1107 
1108 		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1109 		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1110 		res = le32_to_cpu(chip->rirb.buf[rp]);
1111 		addr = res_ex & 0xf;
1112 		if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1113 			dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1114 				res, res_ex,
1115 				chip->rirb.rp, wp);
1116 			snd_BUG();
1117 		} else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1118 			snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1119 		else if (chip->rirb.cmds[addr]) {
1120 			chip->rirb.res[addr] = res;
1121 			smp_wmb();
1122 			chip->rirb.cmds[addr]--;
1123 		} else if (printk_ratelimit()) {
1124 			dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1125 				res, res_ex,
1126 				chip->last_cmd[addr]);
1127 		}
1128 	}
1129 }
1130 
1131 /* receive a response */
1132 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1133 					  unsigned int addr)
1134 {
1135 	struct azx *chip = bus->private_data;
1136 	unsigned long timeout;
1137 	unsigned long loopcounter;
1138 	int do_poll = 0;
1139 
1140  again:
1141 	timeout = jiffies + msecs_to_jiffies(1000);
1142 
1143 	for (loopcounter = 0;; loopcounter++) {
1144 		if (chip->polling_mode || do_poll) {
1145 			spin_lock_irq(&chip->reg_lock);
1146 			azx_update_rirb(chip);
1147 			spin_unlock_irq(&chip->reg_lock);
1148 		}
1149 		if (!chip->rirb.cmds[addr]) {
1150 			smp_rmb();
1151 			bus->rirb_error = 0;
1152 
1153 			if (!do_poll)
1154 				chip->poll_count = 0;
1155 			return chip->rirb.res[addr]; /* the last value */
1156 		}
1157 		if (time_after(jiffies, timeout))
1158 			break;
1159 		if (bus->needs_damn_long_delay || loopcounter > 3000)
1160 			msleep(2); /* temporary workaround */
1161 		else {
1162 			udelay(10);
1163 			cond_resched();
1164 		}
1165 	}
1166 
1167 	if (!bus->no_response_fallback)
1168 		return -1;
1169 
1170 	if (!chip->polling_mode && chip->poll_count < 2) {
1171 		dev_dbg(chip->card->dev,
1172 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1173 			chip->last_cmd[addr]);
1174 		do_poll = 1;
1175 		chip->poll_count++;
1176 		goto again;
1177 	}
1178 
1179 
1180 	if (!chip->polling_mode) {
1181 		dev_warn(chip->card->dev,
1182 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1183 			 chip->last_cmd[addr]);
1184 		chip->polling_mode = 1;
1185 		goto again;
1186 	}
1187 
1188 	if (chip->msi) {
1189 		dev_warn(chip->card->dev,
1190 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1191 			 chip->last_cmd[addr]);
1192 		if (chip->ops->disable_msi_reset_irq(chip) &&
1193 		    chip->ops->disable_msi_reset_irq(chip) < 0) {
1194 			bus->rirb_error = 1;
1195 			return -1;
1196 		}
1197 		goto again;
1198 	}
1199 
1200 	if (chip->probing) {
1201 		/* If this critical timeout happens during the codec probing
1202 		 * phase, this is likely an access to a non-existing codec
1203 		 * slot.  Better to return an error and reset the system.
1204 		 */
1205 		return -1;
1206 	}
1207 
1208 	/* a fatal communication error; need either to reset or to fallback
1209 	 * to the single_cmd mode
1210 	 */
1211 	bus->rirb_error = 1;
1212 	if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1213 		bus->response_reset = 1;
1214 		return -1; /* give a chance to retry */
1215 	}
1216 
1217 	dev_err(chip->card->dev,
1218 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1219 		chip->last_cmd[addr]);
1220 	chip->single_cmd = 1;
1221 	bus->response_reset = 0;
1222 	/* release CORB/RIRB */
1223 	azx_free_cmd_io(chip);
1224 	/* disable unsolicited responses */
1225 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1226 	return -1;
1227 }
1228 
1229 /*
1230  * Use the single immediate command instead of CORB/RIRB for simplicity
1231  *
1232  * Note: according to Intel, this is not preferred use.  The command was
1233  *       intended for the BIOS only, and may get confused with unsolicited
1234  *       responses.  So, we shouldn't use it for normal operation from the
1235  *       driver.
1236  *       I left the codes, however, for debugging/testing purposes.
1237  */
1238 
1239 /* receive a response */
1240 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1241 {
1242 	int timeout = 50;
1243 
1244 	while (timeout--) {
1245 		/* check IRV busy bit */
1246 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1247 			/* reuse rirb.res as the response return value */
1248 			chip->rirb.res[addr] = azx_readl(chip, IR);
1249 			return 0;
1250 		}
1251 		udelay(1);
1252 	}
1253 	if (printk_ratelimit())
1254 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1255 			azx_readw(chip, IRS));
1256 	chip->rirb.res[addr] = -1;
1257 	return -EIO;
1258 }
1259 
1260 /* send a command */
1261 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1262 {
1263 	struct azx *chip = bus->private_data;
1264 	unsigned int addr = azx_command_addr(val);
1265 	int timeout = 50;
1266 
1267 	bus->rirb_error = 0;
1268 	while (timeout--) {
1269 		/* check ICB busy bit */
1270 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1271 			/* Clear IRV valid bit */
1272 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1273 				   AZX_IRS_VALID);
1274 			azx_writel(chip, IC, val);
1275 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1276 				   AZX_IRS_BUSY);
1277 			return azx_single_wait_for_response(chip, addr);
1278 		}
1279 		udelay(1);
1280 	}
1281 	if (printk_ratelimit())
1282 		dev_dbg(chip->card->dev,
1283 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
1284 			azx_readw(chip, IRS), val);
1285 	return -EIO;
1286 }
1287 
1288 /* receive a response */
1289 static unsigned int azx_single_get_response(struct hda_bus *bus,
1290 					    unsigned int addr)
1291 {
1292 	struct azx *chip = bus->private_data;
1293 	return chip->rirb.res[addr];
1294 }
1295 
1296 /*
1297  * The below are the main callbacks from hda_codec.
1298  *
1299  * They are just the skeleton to call sub-callbacks according to the
1300  * current setting of chip->single_cmd.
1301  */
1302 
1303 /* send a command */
1304 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1305 {
1306 	struct azx *chip = bus->private_data;
1307 
1308 	if (chip->disabled)
1309 		return 0;
1310 	chip->last_cmd[azx_command_addr(val)] = val;
1311 	if (chip->single_cmd)
1312 		return azx_single_send_cmd(bus, val);
1313 	else
1314 		return azx_corb_send_cmd(bus, val);
1315 }
1316 
1317 /* get a response */
1318 static unsigned int azx_get_response(struct hda_bus *bus,
1319 				     unsigned int addr)
1320 {
1321 	struct azx *chip = bus->private_data;
1322 	if (chip->disabled)
1323 		return 0;
1324 	if (chip->single_cmd)
1325 		return azx_single_get_response(bus, addr);
1326 	else
1327 		return azx_rirb_get_response(bus, addr);
1328 }
1329 
1330 #ifdef CONFIG_SND_HDA_DSP_LOADER
1331 /*
1332  * DSP loading code (e.g. for CA0132)
1333  */
1334 
1335 /* use the first stream for loading DSP */
1336 static struct azx_dev *
1337 azx_get_dsp_loader_dev(struct azx *chip)
1338 {
1339 	return &chip->azx_dev[chip->playback_index_offset];
1340 }
1341 
1342 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1343 				unsigned int byte_size,
1344 				struct snd_dma_buffer *bufp)
1345 {
1346 	u32 *bdl;
1347 	struct azx *chip = bus->private_data;
1348 	struct azx_dev *azx_dev;
1349 	int err;
1350 
1351 	azx_dev = azx_get_dsp_loader_dev(chip);
1352 
1353 	dsp_lock(azx_dev);
1354 	spin_lock_irq(&chip->reg_lock);
1355 	if (azx_dev->running || azx_dev->locked) {
1356 		spin_unlock_irq(&chip->reg_lock);
1357 		err = -EBUSY;
1358 		goto unlock;
1359 	}
1360 	azx_dev->prepared = 0;
1361 	chip->saved_azx_dev = *azx_dev;
1362 	azx_dev->locked = 1;
1363 	spin_unlock_irq(&chip->reg_lock);
1364 
1365 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1366 					 byte_size, bufp);
1367 	if (err < 0)
1368 		goto err_alloc;
1369 
1370 	azx_dev->bufsize = byte_size;
1371 	azx_dev->period_bytes = byte_size;
1372 	azx_dev->format_val = format;
1373 
1374 	azx_stream_reset(chip, azx_dev);
1375 
1376 	/* reset BDL address */
1377 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1378 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1379 
1380 	azx_dev->frags = 0;
1381 	bdl = (u32 *)azx_dev->bdl.area;
1382 	err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1383 	if (err < 0)
1384 		goto error;
1385 
1386 	azx_setup_controller(chip, azx_dev);
1387 	dsp_unlock(azx_dev);
1388 	return azx_dev->stream_tag;
1389 
1390  error:
1391 	chip->ops->dma_free_pages(chip, bufp);
1392  err_alloc:
1393 	spin_lock_irq(&chip->reg_lock);
1394 	if (azx_dev->opened)
1395 		*azx_dev = chip->saved_azx_dev;
1396 	azx_dev->locked = 0;
1397 	spin_unlock_irq(&chip->reg_lock);
1398  unlock:
1399 	dsp_unlock(azx_dev);
1400 	return err;
1401 }
1402 
1403 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1404 {
1405 	struct azx *chip = bus->private_data;
1406 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1407 
1408 	if (start)
1409 		azx_stream_start(chip, azx_dev);
1410 	else
1411 		azx_stream_stop(chip, azx_dev);
1412 	azx_dev->running = start;
1413 }
1414 
1415 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1416 				 struct snd_dma_buffer *dmab)
1417 {
1418 	struct azx *chip = bus->private_data;
1419 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1420 
1421 	if (!dmab->area || !azx_dev->locked)
1422 		return;
1423 
1424 	dsp_lock(azx_dev);
1425 	/* reset BDL address */
1426 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1427 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1428 	azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1429 	azx_dev->bufsize = 0;
1430 	azx_dev->period_bytes = 0;
1431 	azx_dev->format_val = 0;
1432 
1433 	chip->ops->dma_free_pages(chip, dmab);
1434 	dmab->area = NULL;
1435 
1436 	spin_lock_irq(&chip->reg_lock);
1437 	if (azx_dev->opened)
1438 		*azx_dev = chip->saved_azx_dev;
1439 	azx_dev->locked = 0;
1440 	spin_unlock_irq(&chip->reg_lock);
1441 	dsp_unlock(azx_dev);
1442 }
1443 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1444 
1445 int azx_alloc_stream_pages(struct azx *chip)
1446 {
1447 	int i, err;
1448 	struct snd_card *card = chip->card;
1449 
1450 	for (i = 0; i < chip->num_streams; i++) {
1451 		dsp_lock_init(&chip->azx_dev[i]);
1452 		/* allocate memory for the BDL for each stream */
1453 		err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1454 						 BDL_SIZE,
1455 						 &chip->azx_dev[i].bdl);
1456 		if (err < 0) {
1457 			dev_err(card->dev, "cannot allocate BDL\n");
1458 			return -ENOMEM;
1459 		}
1460 	}
1461 	/* allocate memory for the position buffer */
1462 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1463 					 chip->num_streams * 8, &chip->posbuf);
1464 	if (err < 0) {
1465 		dev_err(card->dev, "cannot allocate posbuf\n");
1466 		return -ENOMEM;
1467 	}
1468 
1469 	/* allocate CORB/RIRB */
1470 	err = azx_alloc_cmd_io(chip);
1471 	if (err < 0)
1472 		return err;
1473 	return 0;
1474 }
1475 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1476 
1477 void azx_free_stream_pages(struct azx *chip)
1478 {
1479 	int i;
1480 	if (chip->azx_dev) {
1481 		for (i = 0; i < chip->num_streams; i++)
1482 			if (chip->azx_dev[i].bdl.area)
1483 				chip->ops->dma_free_pages(
1484 					chip, &chip->azx_dev[i].bdl);
1485 	}
1486 	if (chip->rb.area)
1487 		chip->ops->dma_free_pages(chip, &chip->rb);
1488 	if (chip->posbuf.area)
1489 		chip->ops->dma_free_pages(chip, &chip->posbuf);
1490 }
1491 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1492 
1493 /*
1494  * Lowlevel interface
1495  */
1496 
1497 /* enter link reset */
1498 void azx_enter_link_reset(struct azx *chip)
1499 {
1500 	unsigned long timeout;
1501 
1502 	/* reset controller */
1503 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1504 
1505 	timeout = jiffies + msecs_to_jiffies(100);
1506 	while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1507 			time_before(jiffies, timeout))
1508 		usleep_range(500, 1000);
1509 }
1510 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1511 
1512 /* exit link reset */
1513 static void azx_exit_link_reset(struct azx *chip)
1514 {
1515 	unsigned long timeout;
1516 
1517 	azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1518 
1519 	timeout = jiffies + msecs_to_jiffies(100);
1520 	while (!azx_readb(chip, GCTL) &&
1521 			time_before(jiffies, timeout))
1522 		usleep_range(500, 1000);
1523 }
1524 
1525 /* reset codec link */
1526 static int azx_reset(struct azx *chip, bool full_reset)
1527 {
1528 	if (!full_reset)
1529 		goto __skip;
1530 
1531 	/* clear STATESTS */
1532 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1533 
1534 	/* reset controller */
1535 	azx_enter_link_reset(chip);
1536 
1537 	/* delay for >= 100us for codec PLL to settle per spec
1538 	 * Rev 0.9 section 5.5.1
1539 	 */
1540 	usleep_range(500, 1000);
1541 
1542 	/* Bring controller out of reset */
1543 	azx_exit_link_reset(chip);
1544 
1545 	/* Brent Chartrand said to wait >= 540us for codecs to initialize */
1546 	usleep_range(1000, 1200);
1547 
1548       __skip:
1549 	/* check to see if controller is ready */
1550 	if (!azx_readb(chip, GCTL)) {
1551 		dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1552 		return -EBUSY;
1553 	}
1554 
1555 	/* Accept unsolicited responses */
1556 	if (!chip->single_cmd)
1557 		azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1558 			   AZX_GCTL_UNSOL);
1559 
1560 	/* detect codecs */
1561 	if (!chip->codec_mask) {
1562 		chip->codec_mask = azx_readw(chip, STATESTS);
1563 		dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1564 			chip->codec_mask);
1565 	}
1566 
1567 	return 0;
1568 }
1569 
1570 /* enable interrupts */
1571 static void azx_int_enable(struct azx *chip)
1572 {
1573 	/* enable controller CIE and GIE */
1574 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1575 		   AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1576 }
1577 
1578 /* disable interrupts */
1579 static void azx_int_disable(struct azx *chip)
1580 {
1581 	int i;
1582 
1583 	/* disable interrupts in stream descriptor */
1584 	for (i = 0; i < chip->num_streams; i++) {
1585 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1586 		azx_sd_writeb(chip, azx_dev, SD_CTL,
1587 			      azx_sd_readb(chip, azx_dev, SD_CTL) &
1588 					~SD_INT_MASK);
1589 	}
1590 
1591 	/* disable SIE for all streams */
1592 	azx_writeb(chip, INTCTL, 0);
1593 
1594 	/* disable controller CIE and GIE */
1595 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1596 		   ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1597 }
1598 
1599 /* clear interrupts */
1600 static void azx_int_clear(struct azx *chip)
1601 {
1602 	int i;
1603 
1604 	/* clear stream status */
1605 	for (i = 0; i < chip->num_streams; i++) {
1606 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1607 		azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1608 	}
1609 
1610 	/* clear STATESTS */
1611 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1612 
1613 	/* clear rirb status */
1614 	azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1615 
1616 	/* clear int status */
1617 	azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1618 }
1619 
1620 /*
1621  * reset and start the controller registers
1622  */
1623 void azx_init_chip(struct azx *chip, bool full_reset)
1624 {
1625 	if (chip->initialized)
1626 		return;
1627 
1628 	/* reset controller */
1629 	azx_reset(chip, full_reset);
1630 
1631 	/* initialize interrupts */
1632 	azx_int_clear(chip);
1633 	azx_int_enable(chip);
1634 
1635 	/* initialize the codec command I/O */
1636 	if (!chip->single_cmd)
1637 		azx_init_cmd_io(chip);
1638 
1639 	/* program the position buffer */
1640 	azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1641 	azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1642 
1643 	chip->initialized = 1;
1644 }
1645 EXPORT_SYMBOL_GPL(azx_init_chip);
1646 
1647 void azx_stop_chip(struct azx *chip)
1648 {
1649 	if (!chip->initialized)
1650 		return;
1651 
1652 	/* disable interrupts */
1653 	azx_int_disable(chip);
1654 	azx_int_clear(chip);
1655 
1656 	/* disable CORB/RIRB */
1657 	azx_free_cmd_io(chip);
1658 
1659 	/* disable position buffer */
1660 	azx_writel(chip, DPLBASE, 0);
1661 	azx_writel(chip, DPUBASE, 0);
1662 
1663 	chip->initialized = 0;
1664 }
1665 EXPORT_SYMBOL_GPL(azx_stop_chip);
1666 
1667 /*
1668  * interrupt handler
1669  */
1670 irqreturn_t azx_interrupt(int irq, void *dev_id)
1671 {
1672 	struct azx *chip = dev_id;
1673 	struct azx_dev *azx_dev;
1674 	u32 status;
1675 	u8 sd_status;
1676 	int i;
1677 
1678 #ifdef CONFIG_PM
1679 	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1680 		if (!pm_runtime_active(chip->card->dev))
1681 			return IRQ_NONE;
1682 #endif
1683 
1684 	spin_lock(&chip->reg_lock);
1685 
1686 	if (chip->disabled) {
1687 		spin_unlock(&chip->reg_lock);
1688 		return IRQ_NONE;
1689 	}
1690 
1691 	status = azx_readl(chip, INTSTS);
1692 	if (status == 0 || status == 0xffffffff) {
1693 		spin_unlock(&chip->reg_lock);
1694 		return IRQ_NONE;
1695 	}
1696 
1697 	for (i = 0; i < chip->num_streams; i++) {
1698 		azx_dev = &chip->azx_dev[i];
1699 		if (status & azx_dev->sd_int_sta_mask) {
1700 			sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1701 			azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1702 			if (!azx_dev->substream || !azx_dev->running ||
1703 			    !(sd_status & SD_INT_COMPLETE))
1704 				continue;
1705 			/* check whether this IRQ is really acceptable */
1706 			if (!chip->ops->position_check ||
1707 			    chip->ops->position_check(chip, azx_dev)) {
1708 				spin_unlock(&chip->reg_lock);
1709 				snd_pcm_period_elapsed(azx_dev->substream);
1710 				spin_lock(&chip->reg_lock);
1711 			}
1712 		}
1713 	}
1714 
1715 	/* clear rirb int */
1716 	status = azx_readb(chip, RIRBSTS);
1717 	if (status & RIRB_INT_MASK) {
1718 		if (status & RIRB_INT_RESPONSE) {
1719 			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1720 				udelay(80);
1721 			azx_update_rirb(chip);
1722 		}
1723 		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1724 	}
1725 
1726 	spin_unlock(&chip->reg_lock);
1727 
1728 	return IRQ_HANDLED;
1729 }
1730 EXPORT_SYMBOL_GPL(azx_interrupt);
1731 
1732 /*
1733  * Codec initerface
1734  */
1735 
1736 /*
1737  * Probe the given codec address
1738  */
1739 static int probe_codec(struct azx *chip, int addr)
1740 {
1741 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1742 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1743 	unsigned int res;
1744 
1745 	mutex_lock(&chip->bus->cmd_mutex);
1746 	chip->probing = 1;
1747 	azx_send_cmd(chip->bus, cmd);
1748 	res = azx_get_response(chip->bus, addr);
1749 	chip->probing = 0;
1750 	mutex_unlock(&chip->bus->cmd_mutex);
1751 	if (res == -1)
1752 		return -EIO;
1753 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1754 	return 0;
1755 }
1756 
1757 static void azx_bus_reset(struct hda_bus *bus)
1758 {
1759 	struct azx *chip = bus->private_data;
1760 
1761 	bus->in_reset = 1;
1762 	azx_stop_chip(chip);
1763 	azx_init_chip(chip, true);
1764 #ifdef CONFIG_PM
1765 	if (chip->initialized) {
1766 		struct azx_pcm *p;
1767 		list_for_each_entry(p, &chip->pcm_list, list)
1768 			snd_pcm_suspend_all(p->pcm);
1769 		snd_hda_suspend(chip->bus);
1770 		snd_hda_resume(chip->bus);
1771 	}
1772 #endif
1773 	bus->in_reset = 0;
1774 }
1775 
1776 #ifdef CONFIG_PM
1777 /* power-up/down the controller */
1778 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1779 {
1780 	struct azx *chip = bus->private_data;
1781 
1782 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1783 		return;
1784 
1785 	if (power_up)
1786 		pm_runtime_get_sync(chip->card->dev);
1787 	else
1788 		pm_runtime_put_sync(chip->card->dev);
1789 }
1790 #endif
1791 
1792 static int get_jackpoll_interval(struct azx *chip)
1793 {
1794 	int i;
1795 	unsigned int j;
1796 
1797 	if (!chip->jackpoll_ms)
1798 		return 0;
1799 
1800 	i = chip->jackpoll_ms[chip->dev_index];
1801 	if (i == 0)
1802 		return 0;
1803 	if (i < 50 || i > 60000)
1804 		j = 0;
1805 	else
1806 		j = msecs_to_jiffies(i);
1807 	if (j == 0)
1808 		dev_warn(chip->card->dev,
1809 			 "jackpoll_ms value out of range: %d\n", i);
1810 	return j;
1811 }
1812 
1813 /* Codec initialization */
1814 int azx_codec_create(struct azx *chip, const char *model,
1815 		     unsigned int max_slots,
1816 		     int *power_save_to)
1817 {
1818 	struct hda_bus_template bus_temp;
1819 	int c, codecs, err;
1820 
1821 	memset(&bus_temp, 0, sizeof(bus_temp));
1822 	bus_temp.private_data = chip;
1823 	bus_temp.modelname = model;
1824 	bus_temp.pci = chip->pci;
1825 	bus_temp.ops.command = azx_send_cmd;
1826 	bus_temp.ops.get_response = azx_get_response;
1827 	bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1828 	bus_temp.ops.bus_reset = azx_bus_reset;
1829 #ifdef CONFIG_PM
1830 	bus_temp.power_save = power_save_to;
1831 	bus_temp.ops.pm_notify = azx_power_notify;
1832 #endif
1833 #ifdef CONFIG_SND_HDA_DSP_LOADER
1834 	bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1835 	bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1836 	bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1837 #endif
1838 
1839 	err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1840 	if (err < 0)
1841 		return err;
1842 
1843 	if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1844 		dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1845 		chip->bus->needs_damn_long_delay = 1;
1846 	}
1847 
1848 	codecs = 0;
1849 	if (!max_slots)
1850 		max_slots = AZX_DEFAULT_CODECS;
1851 
1852 	/* First try to probe all given codec slots */
1853 	for (c = 0; c < max_slots; c++) {
1854 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1855 			if (probe_codec(chip, c) < 0) {
1856 				/* Some BIOSen give you wrong codec addresses
1857 				 * that don't exist
1858 				 */
1859 				dev_warn(chip->card->dev,
1860 					 "Codec #%d probe error; disabling it...\n", c);
1861 				chip->codec_mask &= ~(1 << c);
1862 				/* More badly, accessing to a non-existing
1863 				 * codec often screws up the controller chip,
1864 				 * and disturbs the further communications.
1865 				 * Thus if an error occurs during probing,
1866 				 * better to reset the controller chip to
1867 				 * get back to the sanity state.
1868 				 */
1869 				azx_stop_chip(chip);
1870 				azx_init_chip(chip, true);
1871 			}
1872 		}
1873 	}
1874 
1875 	/* AMD chipsets often cause the communication stalls upon certain
1876 	 * sequence like the pin-detection.  It seems that forcing the synced
1877 	 * access works around the stall.  Grrr...
1878 	 */
1879 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1880 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1881 		chip->bus->sync_write = 1;
1882 		chip->bus->allow_bus_reset = 1;
1883 	}
1884 
1885 	/* Then create codec instances */
1886 	for (c = 0; c < max_slots; c++) {
1887 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1888 			struct hda_codec *codec;
1889 			err = snd_hda_codec_new(chip->bus, c, &codec);
1890 			if (err < 0)
1891 				continue;
1892 			codec->jackpoll_interval = get_jackpoll_interval(chip);
1893 			codec->beep_mode = chip->beep_mode;
1894 			codecs++;
1895 		}
1896 	}
1897 	if (!codecs) {
1898 		dev_err(chip->card->dev, "no codecs initialized\n");
1899 		return -ENXIO;
1900 	}
1901 	return 0;
1902 }
1903 EXPORT_SYMBOL_GPL(azx_codec_create);
1904 
1905 /* configure each codec instance */
1906 int azx_codec_configure(struct azx *chip)
1907 {
1908 	struct hda_codec *codec;
1909 	list_for_each_entry(codec, &chip->bus->codec_list, list) {
1910 		snd_hda_codec_configure(codec);
1911 	}
1912 	return 0;
1913 }
1914 EXPORT_SYMBOL_GPL(azx_codec_configure);
1915 
1916 /* mixer creation - all stuff is implemented in hda module */
1917 int azx_mixer_create(struct azx *chip)
1918 {
1919 	return snd_hda_build_controls(chip->bus);
1920 }
1921 EXPORT_SYMBOL_GPL(azx_mixer_create);
1922 
1923 
1924 static bool is_input_stream(struct azx *chip, unsigned char index)
1925 {
1926 	return (index >= chip->capture_index_offset &&
1927 		index < chip->capture_index_offset + chip->capture_streams);
1928 }
1929 
1930 /* initialize SD streams */
1931 int azx_init_stream(struct azx *chip)
1932 {
1933 	int i;
1934 	int in_stream_tag = 0;
1935 	int out_stream_tag = 0;
1936 
1937 	/* initialize each stream (aka device)
1938 	 * assign the starting bdl address to each stream (device)
1939 	 * and initialize
1940 	 */
1941 	for (i = 0; i < chip->num_streams; i++) {
1942 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1943 		azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1944 		/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1945 		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1946 		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1947 		azx_dev->sd_int_sta_mask = 1 << i;
1948 		azx_dev->index = i;
1949 
1950 		/* stream tag must be unique throughout
1951 		 * the stream direction group,
1952 		 * valid values 1...15
1953 		 * use separate stream tag if the flag
1954 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1955 		 */
1956 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1957 			azx_dev->stream_tag =
1958 				is_input_stream(chip, i) ?
1959 				++in_stream_tag :
1960 				++out_stream_tag;
1961 		else
1962 			azx_dev->stream_tag = i + 1;
1963 	}
1964 
1965 	return 0;
1966 }
1967 EXPORT_SYMBOL_GPL(azx_init_stream);
1968 
1969 /*
1970  * reboot notifier for hang-up problem at power-down
1971  */
1972 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1973 {
1974 	struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1975 	snd_hda_bus_reboot_notify(chip->bus);
1976 	azx_stop_chip(chip);
1977 	return NOTIFY_OK;
1978 }
1979 
1980 void azx_notifier_register(struct azx *chip)
1981 {
1982 	chip->reboot_notifier.notifier_call = azx_halt;
1983 	register_reboot_notifier(&chip->reboot_notifier);
1984 }
1985 EXPORT_SYMBOL_GPL(azx_notifier_register);
1986 
1987 void azx_notifier_unregister(struct azx *chip)
1988 {
1989 	if (chip->reboot_notifier.notifier_call)
1990 		unregister_reboot_notifier(&chip->reboot_notifier);
1991 }
1992 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1993 
1994 MODULE_LICENSE("GPL");
1995 MODULE_DESCRIPTION("Common HDA driver functions");
1996