xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision afb46f79)
1 /*
2  *
3  *  Implementation of primary alsa driver code base for Intel HD Audio.
4  *
5  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
6  *
7  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8  *                     PeiSen Hou <pshou@realtek.com.tw>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the Free
12  *  Software Foundation; either version 2 of the License, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  *  more details.
19  *
20  *
21  */
22 
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include "hda_priv.h"
33 #include "hda_controller.h"
34 
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
37 
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev)	mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev)		mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev)		mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev)	((dev)->locked)
44 #else
45 #define dsp_lock_init(dev)	do {} while (0)
46 #define dsp_lock(dev)		do {} while (0)
47 #define dsp_unlock(dev)		do {} while (0)
48 #define dsp_is_locked(dev)	0
49 #endif
50 
51 /*
52  * AZX stream operations.
53  */
54 
55 /* start a stream */
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
57 {
58 	/*
59 	 * Before stream start, initialize parameter
60 	 */
61 	azx_dev->insufficient = 1;
62 
63 	/* enable SIE */
64 	azx_writel(chip, INTCTL,
65 		   azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 	/* set DMA start and interrupt mask */
67 	azx_sd_writeb(chip, azx_dev, SD_CTL,
68 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
69 		      SD_CTL_DMA_START | SD_INT_MASK);
70 }
71 
72 /* stop DMA */
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 {
75 	azx_sd_writeb(chip, azx_dev, SD_CTL,
76 		      azx_sd_readb(chip, azx_dev, SD_CTL) &
77 		      ~(SD_CTL_DMA_START | SD_INT_MASK));
78 	azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79 }
80 
81 /* stop a stream */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 {
84 	azx_stream_clear(chip, azx_dev);
85 	/* disable SIE */
86 	azx_writel(chip, INTCTL,
87 		   azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 }
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
90 
91 /* reset stream */
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 {
94 	unsigned char val;
95 	int timeout;
96 
97 	azx_stream_clear(chip, azx_dev);
98 
99 	azx_sd_writeb(chip, azx_dev, SD_CTL,
100 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
101 		      SD_CTL_STREAM_RESET);
102 	udelay(3);
103 	timeout = 300;
104 	while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 		 SD_CTL_STREAM_RESET) && --timeout)
106 		;
107 	val &= ~SD_CTL_STREAM_RESET;
108 	azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 	udelay(3);
110 
111 	timeout = 300;
112 	/* waiting for hardware to report that the stream is out of reset */
113 	while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 		SD_CTL_STREAM_RESET) && --timeout)
115 		;
116 
117 	/* reset first position - may not be synced with hw at this time */
118 	*azx_dev->posbuf = 0;
119 }
120 
121 /*
122  * set up the SD for streaming
123  */
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 {
126 	unsigned int val;
127 	/* make sure the run bit is zero for SD */
128 	azx_stream_clear(chip, azx_dev);
129 	/* program the stream_tag */
130 	val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 	val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 		(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 	if (!azx_snoop(chip))
134 		val |= SD_CTL_TRAFFIC_PRIO;
135 	azx_sd_writel(chip, azx_dev, SD_CTL, val);
136 
137 	/* program the length of samples in cyclic buffer */
138 	azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139 
140 	/* program the stream format */
141 	/* this value needs to be the same as the one programmed */
142 	azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143 
144 	/* program the stream LVI (last valid index) of the BDL */
145 	azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146 
147 	/* program the BDL address */
148 	/* lower BDL address */
149 	azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 	/* upper BDL address */
151 	azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 		      upper_32_bits(azx_dev->bdl.addr));
153 
154 	/* enable the position buffer */
155 	if (chip->position_fix[0] != POS_FIX_LPIB ||
156 	    chip->position_fix[1] != POS_FIX_LPIB) {
157 		if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 			azx_writel(chip, DPLBASE,
159 				(u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
160 	}
161 
162 	/* set the interrupt enable bits in the descriptor control register */
163 	azx_sd_writel(chip, azx_dev, SD_CTL,
164 		      azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165 
166 	return 0;
167 }
168 
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 {
173 	int dev, i, nums;
174 	struct azx_dev *res = NULL;
175 	/* make a non-zero unique key for the substream */
176 	int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 		(substream->stream + 1);
178 
179 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 		dev = chip->playback_index_offset;
181 		nums = chip->playback_streams;
182 	} else {
183 		dev = chip->capture_index_offset;
184 		nums = chip->capture_streams;
185 	}
186 	for (i = 0; i < nums; i++, dev++) {
187 		struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 		dsp_lock(azx_dev);
189 		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 			if (azx_dev->assigned_key == key) {
191 				azx_dev->opened = 1;
192 				azx_dev->assigned_key = key;
193 				dsp_unlock(azx_dev);
194 				return azx_dev;
195 			}
196 			if (!res)
197 				res = azx_dev;
198 		}
199 		dsp_unlock(azx_dev);
200 	}
201 	if (res) {
202 		dsp_lock(res);
203 		res->opened = 1;
204 		res->assigned_key = key;
205 		dsp_unlock(res);
206 	}
207 	return res;
208 }
209 
210 /* release the assigned stream */
211 static inline void azx_release_device(struct azx_dev *azx_dev)
212 {
213 	azx_dev->opened = 0;
214 }
215 
216 static cycle_t azx_cc_read(const struct cyclecounter *cc)
217 {
218 	struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
219 	struct snd_pcm_substream *substream = azx_dev->substream;
220 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
221 	struct azx *chip = apcm->chip;
222 
223 	return azx_readl(chip, WALLCLK);
224 }
225 
226 static void azx_timecounter_init(struct snd_pcm_substream *substream,
227 				bool force, cycle_t last)
228 {
229 	struct azx_dev *azx_dev = get_azx_dev(substream);
230 	struct timecounter *tc = &azx_dev->azx_tc;
231 	struct cyclecounter *cc = &azx_dev->azx_cc;
232 	u64 nsec;
233 
234 	cc->read = azx_cc_read;
235 	cc->mask = CLOCKSOURCE_MASK(32);
236 
237 	/*
238 	 * Converting from 24 MHz to ns means applying a 125/3 factor.
239 	 * To avoid any saturation issues in intermediate operations,
240 	 * the 125 factor is applied first. The division is applied
241 	 * last after reading the timecounter value.
242 	 * Applying the 1/3 factor as part of the multiplication
243 	 * requires at least 20 bits for a decent precision, however
244 	 * overflows occur after about 4 hours or less, not a option.
245 	 */
246 
247 	cc->mult = 125; /* saturation after 195 years */
248 	cc->shift = 0;
249 
250 	nsec = 0; /* audio time is elapsed time since trigger */
251 	timecounter_init(tc, cc, nsec);
252 	if (force)
253 		/*
254 		 * force timecounter to use predefined value,
255 		 * used for synchronized starts
256 		 */
257 		tc->cycle_last = last;
258 }
259 
260 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
261 				u64 nsec)
262 {
263 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
264 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
265 	u64 codec_frames, codec_nsecs;
266 
267 	if (!hinfo->ops.get_delay)
268 		return nsec;
269 
270 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
271 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
272 			      substream->runtime->rate);
273 
274 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
275 		return nsec + codec_nsecs;
276 
277 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
278 }
279 
280 /*
281  * set up a BDL entry
282  */
283 static int setup_bdle(struct azx *chip,
284 		      struct snd_dma_buffer *dmab,
285 		      struct azx_dev *azx_dev, u32 **bdlp,
286 		      int ofs, int size, int with_ioc)
287 {
288 	u32 *bdl = *bdlp;
289 
290 	while (size > 0) {
291 		dma_addr_t addr;
292 		int chunk;
293 
294 		if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
295 			return -EINVAL;
296 
297 		addr = snd_sgbuf_get_addr(dmab, ofs);
298 		/* program the address field of the BDL entry */
299 		bdl[0] = cpu_to_le32((u32)addr);
300 		bdl[1] = cpu_to_le32(upper_32_bits(addr));
301 		/* program the size field of the BDL entry */
302 		chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
303 		/* one BDLE cannot cross 4K boundary on CTHDA chips */
304 		if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
305 			u32 remain = 0x1000 - (ofs & 0xfff);
306 			if (chunk > remain)
307 				chunk = remain;
308 		}
309 		bdl[2] = cpu_to_le32(chunk);
310 		/* program the IOC to enable interrupt
311 		 * only when the whole fragment is processed
312 		 */
313 		size -= chunk;
314 		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
315 		bdl += 4;
316 		azx_dev->frags++;
317 		ofs += chunk;
318 	}
319 	*bdlp = bdl;
320 	return ofs;
321 }
322 
323 /*
324  * set up BDL entries
325  */
326 static int azx_setup_periods(struct azx *chip,
327 			     struct snd_pcm_substream *substream,
328 			     struct azx_dev *azx_dev)
329 {
330 	u32 *bdl;
331 	int i, ofs, periods, period_bytes;
332 	int pos_adj = 0;
333 
334 	/* reset BDL address */
335 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
336 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
337 
338 	period_bytes = azx_dev->period_bytes;
339 	periods = azx_dev->bufsize / period_bytes;
340 
341 	/* program the initial BDL entries */
342 	bdl = (u32 *)azx_dev->bdl.area;
343 	ofs = 0;
344 	azx_dev->frags = 0;
345 
346 	if (chip->bdl_pos_adj)
347 		pos_adj = chip->bdl_pos_adj[chip->dev_index];
348 	if (!azx_dev->no_period_wakeup && pos_adj > 0) {
349 		struct snd_pcm_runtime *runtime = substream->runtime;
350 		int pos_align = pos_adj;
351 		pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
352 		if (!pos_adj)
353 			pos_adj = pos_align;
354 		else
355 			pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
356 				pos_align;
357 		pos_adj = frames_to_bytes(runtime, pos_adj);
358 		if (pos_adj >= period_bytes) {
359 			dev_warn(chip->card->dev,"Too big adjustment %d\n",
360 				 pos_adj);
361 			pos_adj = 0;
362 		} else {
363 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
364 					 azx_dev,
365 					 &bdl, ofs, pos_adj, true);
366 			if (ofs < 0)
367 				goto error;
368 		}
369 	} else
370 		pos_adj = 0;
371 
372 	for (i = 0; i < periods; i++) {
373 		if (i == periods - 1 && pos_adj)
374 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
375 					 azx_dev, &bdl, ofs,
376 					 period_bytes - pos_adj, 0);
377 		else
378 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
379 					 azx_dev, &bdl, ofs,
380 					 period_bytes,
381 					 !azx_dev->no_period_wakeup);
382 		if (ofs < 0)
383 			goto error;
384 	}
385 	return 0;
386 
387  error:
388 	dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
389 		azx_dev->bufsize, period_bytes);
390 	return -EINVAL;
391 }
392 
393 /*
394  * PCM ops
395  */
396 
397 static int azx_pcm_close(struct snd_pcm_substream *substream)
398 {
399 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
400 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
401 	struct azx *chip = apcm->chip;
402 	struct azx_dev *azx_dev = get_azx_dev(substream);
403 	unsigned long flags;
404 
405 	mutex_lock(&chip->open_mutex);
406 	spin_lock_irqsave(&chip->reg_lock, flags);
407 	azx_dev->substream = NULL;
408 	azx_dev->running = 0;
409 	spin_unlock_irqrestore(&chip->reg_lock, flags);
410 	azx_release_device(azx_dev);
411 	hinfo->ops.close(hinfo, apcm->codec, substream);
412 	snd_hda_power_down(apcm->codec);
413 	mutex_unlock(&chip->open_mutex);
414 	return 0;
415 }
416 
417 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
418 			     struct snd_pcm_hw_params *hw_params)
419 {
420 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
421 	struct azx *chip = apcm->chip;
422 	int ret;
423 
424 	dsp_lock(get_azx_dev(substream));
425 	if (dsp_is_locked(get_azx_dev(substream))) {
426 		ret = -EBUSY;
427 		goto unlock;
428 	}
429 
430 	ret = chip->ops->substream_alloc_pages(chip, substream,
431 					  params_buffer_bytes(hw_params));
432 unlock:
433 	dsp_unlock(get_azx_dev(substream));
434 	return ret;
435 }
436 
437 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
438 {
439 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
440 	struct azx_dev *azx_dev = get_azx_dev(substream);
441 	struct azx *chip = apcm->chip;
442 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
443 	int err;
444 
445 	/* reset BDL address */
446 	dsp_lock(azx_dev);
447 	if (!dsp_is_locked(azx_dev)) {
448 		azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
449 		azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
450 		azx_sd_writel(chip, azx_dev, SD_CTL, 0);
451 		azx_dev->bufsize = 0;
452 		azx_dev->period_bytes = 0;
453 		azx_dev->format_val = 0;
454 	}
455 
456 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
457 
458 	err = chip->ops->substream_free_pages(chip, substream);
459 	azx_dev->prepared = 0;
460 	dsp_unlock(azx_dev);
461 	return err;
462 }
463 
464 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
465 {
466 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
467 	struct azx *chip = apcm->chip;
468 	struct azx_dev *azx_dev = get_azx_dev(substream);
469 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
470 	struct snd_pcm_runtime *runtime = substream->runtime;
471 	unsigned int bufsize, period_bytes, format_val, stream_tag;
472 	int err;
473 	struct hda_spdif_out *spdif =
474 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
475 	unsigned short ctls = spdif ? spdif->ctls : 0;
476 
477 	dsp_lock(azx_dev);
478 	if (dsp_is_locked(azx_dev)) {
479 		err = -EBUSY;
480 		goto unlock;
481 	}
482 
483 	azx_stream_reset(chip, azx_dev);
484 	format_val = snd_hda_calc_stream_format(runtime->rate,
485 						runtime->channels,
486 						runtime->format,
487 						hinfo->maxbps,
488 						ctls);
489 	if (!format_val) {
490 		dev_err(chip->card->dev,
491 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
492 			runtime->rate, runtime->channels, runtime->format);
493 		err = -EINVAL;
494 		goto unlock;
495 	}
496 
497 	bufsize = snd_pcm_lib_buffer_bytes(substream);
498 	period_bytes = snd_pcm_lib_period_bytes(substream);
499 
500 	dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
501 		bufsize, format_val);
502 
503 	if (bufsize != azx_dev->bufsize ||
504 	    period_bytes != azx_dev->period_bytes ||
505 	    format_val != azx_dev->format_val ||
506 	    runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
507 		azx_dev->bufsize = bufsize;
508 		azx_dev->period_bytes = period_bytes;
509 		azx_dev->format_val = format_val;
510 		azx_dev->no_period_wakeup = runtime->no_period_wakeup;
511 		err = azx_setup_periods(chip, substream, azx_dev);
512 		if (err < 0)
513 			goto unlock;
514 	}
515 
516 	/* when LPIB delay correction gives a small negative value,
517 	 * we ignore it; currently set the threshold statically to
518 	 * 64 frames
519 	 */
520 	if (runtime->period_size > 64)
521 		azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
522 	else
523 		azx_dev->delay_negative_threshold = 0;
524 
525 	/* wallclk has 24Mhz clock source */
526 	azx_dev->period_wallclk = (((runtime->period_size * 24000) /
527 						runtime->rate) * 1000);
528 	azx_setup_controller(chip, azx_dev);
529 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
530 		azx_dev->fifo_size =
531 			azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
532 	else
533 		azx_dev->fifo_size = 0;
534 
535 	stream_tag = azx_dev->stream_tag;
536 	/* CA-IBG chips need the playback stream starting from 1 */
537 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
538 	    stream_tag > chip->capture_streams)
539 		stream_tag -= chip->capture_streams;
540 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
541 				     azx_dev->format_val, substream);
542 
543  unlock:
544 	if (!err)
545 		azx_dev->prepared = 1;
546 	dsp_unlock(azx_dev);
547 	return err;
548 }
549 
550 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
551 {
552 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
553 	struct azx *chip = apcm->chip;
554 	struct azx_dev *azx_dev;
555 	struct snd_pcm_substream *s;
556 	int rstart = 0, start, nsync = 0, sbits = 0;
557 	int nwait, timeout;
558 
559 	azx_dev = get_azx_dev(substream);
560 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
561 
562 	if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
563 		return -EPIPE;
564 
565 	switch (cmd) {
566 	case SNDRV_PCM_TRIGGER_START:
567 		rstart = 1;
568 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
569 	case SNDRV_PCM_TRIGGER_RESUME:
570 		start = 1;
571 		break;
572 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
573 	case SNDRV_PCM_TRIGGER_SUSPEND:
574 	case SNDRV_PCM_TRIGGER_STOP:
575 		start = 0;
576 		break;
577 	default:
578 		return -EINVAL;
579 	}
580 
581 	snd_pcm_group_for_each_entry(s, substream) {
582 		if (s->pcm->card != substream->pcm->card)
583 			continue;
584 		azx_dev = get_azx_dev(s);
585 		sbits |= 1 << azx_dev->index;
586 		nsync++;
587 		snd_pcm_trigger_done(s, substream);
588 	}
589 
590 	spin_lock(&chip->reg_lock);
591 
592 	/* first, set SYNC bits of corresponding streams */
593 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
594 		azx_writel(chip, OLD_SSYNC,
595 			azx_readl(chip, OLD_SSYNC) | sbits);
596 	else
597 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
598 
599 	snd_pcm_group_for_each_entry(s, substream) {
600 		if (s->pcm->card != substream->pcm->card)
601 			continue;
602 		azx_dev = get_azx_dev(s);
603 		if (start) {
604 			azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
605 			if (!rstart)
606 				azx_dev->start_wallclk -=
607 						azx_dev->period_wallclk;
608 			azx_stream_start(chip, azx_dev);
609 		} else {
610 			azx_stream_stop(chip, azx_dev);
611 		}
612 		azx_dev->running = start;
613 	}
614 	spin_unlock(&chip->reg_lock);
615 	if (start) {
616 		/* wait until all FIFOs get ready */
617 		for (timeout = 5000; timeout; timeout--) {
618 			nwait = 0;
619 			snd_pcm_group_for_each_entry(s, substream) {
620 				if (s->pcm->card != substream->pcm->card)
621 					continue;
622 				azx_dev = get_azx_dev(s);
623 				if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
624 				      SD_STS_FIFO_READY))
625 					nwait++;
626 			}
627 			if (!nwait)
628 				break;
629 			cpu_relax();
630 		}
631 	} else {
632 		/* wait until all RUN bits are cleared */
633 		for (timeout = 5000; timeout; timeout--) {
634 			nwait = 0;
635 			snd_pcm_group_for_each_entry(s, substream) {
636 				if (s->pcm->card != substream->pcm->card)
637 					continue;
638 				azx_dev = get_azx_dev(s);
639 				if (azx_sd_readb(chip, azx_dev, SD_CTL) &
640 				    SD_CTL_DMA_START)
641 					nwait++;
642 			}
643 			if (!nwait)
644 				break;
645 			cpu_relax();
646 		}
647 	}
648 	spin_lock(&chip->reg_lock);
649 	/* reset SYNC bits */
650 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
651 		azx_writel(chip, OLD_SSYNC,
652 			azx_readl(chip, OLD_SSYNC) & ~sbits);
653 	else
654 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
655 	if (start) {
656 		azx_timecounter_init(substream, 0, 0);
657 		if (nsync > 1) {
658 			cycle_t cycle_last;
659 
660 			/* same start cycle for master and group */
661 			azx_dev = get_azx_dev(substream);
662 			cycle_last = azx_dev->azx_tc.cycle_last;
663 
664 			snd_pcm_group_for_each_entry(s, substream) {
665 				if (s->pcm->card != substream->pcm->card)
666 					continue;
667 				azx_timecounter_init(s, 1, cycle_last);
668 			}
669 		}
670 	}
671 	spin_unlock(&chip->reg_lock);
672 	return 0;
673 }
674 
675 /* get the current DMA position with correction on VIA chips */
676 static unsigned int azx_via_get_position(struct azx *chip,
677 					 struct azx_dev *azx_dev)
678 {
679 	unsigned int link_pos, mini_pos, bound_pos;
680 	unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
681 	unsigned int fifo_size;
682 
683 	link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
684 	if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
685 		/* Playback, no problem using link position */
686 		return link_pos;
687 	}
688 
689 	/* Capture */
690 	/* For new chipset,
691 	 * use mod to get the DMA position just like old chipset
692 	 */
693 	mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
694 	mod_dma_pos %= azx_dev->period_bytes;
695 
696 	/* azx_dev->fifo_size can't get FIFO size of in stream.
697 	 * Get from base address + offset.
698 	 */
699 	fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
700 
701 	if (azx_dev->insufficient) {
702 		/* Link position never gather than FIFO size */
703 		if (link_pos <= fifo_size)
704 			return 0;
705 
706 		azx_dev->insufficient = 0;
707 	}
708 
709 	if (link_pos <= fifo_size)
710 		mini_pos = azx_dev->bufsize + link_pos - fifo_size;
711 	else
712 		mini_pos = link_pos - fifo_size;
713 
714 	/* Find nearest previous boudary */
715 	mod_mini_pos = mini_pos % azx_dev->period_bytes;
716 	mod_link_pos = link_pos % azx_dev->period_bytes;
717 	if (mod_link_pos >= fifo_size)
718 		bound_pos = link_pos - mod_link_pos;
719 	else if (mod_dma_pos >= mod_mini_pos)
720 		bound_pos = mini_pos - mod_mini_pos;
721 	else {
722 		bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
723 		if (bound_pos >= azx_dev->bufsize)
724 			bound_pos = 0;
725 	}
726 
727 	/* Calculate real DMA position we want */
728 	return bound_pos + mod_dma_pos;
729 }
730 
731 unsigned int azx_get_position(struct azx *chip,
732 			      struct azx_dev *azx_dev,
733 			      bool with_check)
734 {
735 	struct snd_pcm_substream *substream = azx_dev->substream;
736 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
737 	unsigned int pos;
738 	int stream = substream->stream;
739 	struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
740 	int delay = 0;
741 
742 	switch (chip->position_fix[stream]) {
743 	case POS_FIX_LPIB:
744 		/* read LPIB */
745 		pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
746 		break;
747 	case POS_FIX_VIACOMBO:
748 		pos = azx_via_get_position(chip, azx_dev);
749 		break;
750 	default:
751 		/* use the position buffer */
752 		pos = le32_to_cpu(*azx_dev->posbuf);
753 		if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
754 			if (!pos || pos == (u32)-1) {
755 				dev_info(chip->card->dev,
756 					 "Invalid position buffer, using LPIB read method instead.\n");
757 				chip->position_fix[stream] = POS_FIX_LPIB;
758 				pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
759 			} else
760 				chip->position_fix[stream] = POS_FIX_POSBUF;
761 		}
762 		break;
763 	}
764 
765 	if (pos >= azx_dev->bufsize)
766 		pos = 0;
767 
768 	/* calculate runtime delay from LPIB */
769 	if (substream->runtime &&
770 	    chip->position_fix[stream] == POS_FIX_POSBUF &&
771 	    (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
772 		unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
773 		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
774 			delay = pos - lpib_pos;
775 		else
776 			delay = lpib_pos - pos;
777 		if (delay < 0) {
778 			if (delay >= azx_dev->delay_negative_threshold)
779 				delay = 0;
780 			else
781 				delay += azx_dev->bufsize;
782 		}
783 		if (delay >= azx_dev->period_bytes) {
784 			dev_info(chip->card->dev,
785 				 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
786 				 delay, azx_dev->period_bytes);
787 			delay = 0;
788 			chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
789 		}
790 		delay = bytes_to_frames(substream->runtime, delay);
791 	}
792 
793 	if (substream->runtime) {
794 		if (hinfo->ops.get_delay)
795 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
796 						      substream);
797 		substream->runtime->delay = delay;
798 	}
799 
800 	trace_azx_get_position(chip, azx_dev, pos, delay);
801 	return pos;
802 }
803 EXPORT_SYMBOL_GPL(azx_get_position);
804 
805 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
806 {
807 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
808 	struct azx *chip = apcm->chip;
809 	struct azx_dev *azx_dev = get_azx_dev(substream);
810 	return bytes_to_frames(substream->runtime,
811 			       azx_get_position(chip, azx_dev, false));
812 }
813 
814 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
815 				struct timespec *ts)
816 {
817 	struct azx_dev *azx_dev = get_azx_dev(substream);
818 	u64 nsec;
819 
820 	nsec = timecounter_read(&azx_dev->azx_tc);
821 	nsec = div_u64(nsec, 3); /* can be optimized */
822 	nsec = azx_adjust_codec_delay(substream, nsec);
823 
824 	*ts = ns_to_timespec(nsec);
825 
826 	return 0;
827 }
828 
829 static struct snd_pcm_hardware azx_pcm_hw = {
830 	.info =			(SNDRV_PCM_INFO_MMAP |
831 				 SNDRV_PCM_INFO_INTERLEAVED |
832 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
833 				 SNDRV_PCM_INFO_MMAP_VALID |
834 				 /* No full-resume yet implemented */
835 				 /* SNDRV_PCM_INFO_RESUME |*/
836 				 SNDRV_PCM_INFO_PAUSE |
837 				 SNDRV_PCM_INFO_SYNC_START |
838 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
839 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
840 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
841 	.rates =		SNDRV_PCM_RATE_48000,
842 	.rate_min =		48000,
843 	.rate_max =		48000,
844 	.channels_min =		2,
845 	.channels_max =		2,
846 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
847 	.period_bytes_min =	128,
848 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
849 	.periods_min =		2,
850 	.periods_max =		AZX_MAX_FRAG,
851 	.fifo_size =		0,
852 };
853 
854 static int azx_pcm_open(struct snd_pcm_substream *substream)
855 {
856 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
857 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
858 	struct azx *chip = apcm->chip;
859 	struct azx_dev *azx_dev;
860 	struct snd_pcm_runtime *runtime = substream->runtime;
861 	unsigned long flags;
862 	int err;
863 	int buff_step;
864 
865 	mutex_lock(&chip->open_mutex);
866 	azx_dev = azx_assign_device(chip, substream);
867 	if (azx_dev == NULL) {
868 		mutex_unlock(&chip->open_mutex);
869 		return -EBUSY;
870 	}
871 	runtime->hw = azx_pcm_hw;
872 	runtime->hw.channels_min = hinfo->channels_min;
873 	runtime->hw.channels_max = hinfo->channels_max;
874 	runtime->hw.formats = hinfo->formats;
875 	runtime->hw.rates = hinfo->rates;
876 	snd_pcm_limit_hw_rates(runtime);
877 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
878 
879 	/* avoid wrap-around with wall-clock */
880 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
881 				     20,
882 				     178000000);
883 
884 	if (chip->align_buffer_size)
885 		/* constrain buffer sizes to be multiple of 128
886 		   bytes. This is more efficient in terms of memory
887 		   access but isn't required by the HDA spec and
888 		   prevents users from specifying exact period/buffer
889 		   sizes. For example for 44.1kHz, a period size set
890 		   to 20ms will be rounded to 19.59ms. */
891 		buff_step = 128;
892 	else
893 		/* Don't enforce steps on buffer sizes, still need to
894 		   be multiple of 4 bytes (HDA spec). Tested on Intel
895 		   HDA controllers, may not work on all devices where
896 		   option needs to be disabled */
897 		buff_step = 4;
898 
899 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
900 				   buff_step);
901 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
902 				   buff_step);
903 	snd_hda_power_up_d3wait(apcm->codec);
904 	err = hinfo->ops.open(hinfo, apcm->codec, substream);
905 	if (err < 0) {
906 		azx_release_device(azx_dev);
907 		snd_hda_power_down(apcm->codec);
908 		mutex_unlock(&chip->open_mutex);
909 		return err;
910 	}
911 	snd_pcm_limit_hw_rates(runtime);
912 	/* sanity check */
913 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
914 	    snd_BUG_ON(!runtime->hw.channels_max) ||
915 	    snd_BUG_ON(!runtime->hw.formats) ||
916 	    snd_BUG_ON(!runtime->hw.rates)) {
917 		azx_release_device(azx_dev);
918 		hinfo->ops.close(hinfo, apcm->codec, substream);
919 		snd_hda_power_down(apcm->codec);
920 		mutex_unlock(&chip->open_mutex);
921 		return -EINVAL;
922 	}
923 
924 	/* disable WALLCLOCK timestamps for capture streams
925 	   until we figure out how to handle digital inputs */
926 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
927 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
928 
929 	spin_lock_irqsave(&chip->reg_lock, flags);
930 	azx_dev->substream = substream;
931 	azx_dev->running = 0;
932 	spin_unlock_irqrestore(&chip->reg_lock, flags);
933 
934 	runtime->private_data = azx_dev;
935 	snd_pcm_set_sync(substream);
936 	mutex_unlock(&chip->open_mutex);
937 	return 0;
938 }
939 
940 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
941 			struct vm_area_struct *area)
942 {
943 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
944 	struct azx *chip = apcm->chip;
945 	if (chip->ops->pcm_mmap_prepare)
946 		chip->ops->pcm_mmap_prepare(substream, area);
947 	return snd_pcm_lib_default_mmap(substream, area);
948 }
949 
950 static struct snd_pcm_ops azx_pcm_ops = {
951 	.open = azx_pcm_open,
952 	.close = azx_pcm_close,
953 	.ioctl = snd_pcm_lib_ioctl,
954 	.hw_params = azx_pcm_hw_params,
955 	.hw_free = azx_pcm_hw_free,
956 	.prepare = azx_pcm_prepare,
957 	.trigger = azx_pcm_trigger,
958 	.pointer = azx_pcm_pointer,
959 	.wall_clock =  azx_get_wallclock_tstamp,
960 	.mmap = azx_pcm_mmap,
961 	.page = snd_pcm_sgbuf_ops_page,
962 };
963 
964 static void azx_pcm_free(struct snd_pcm *pcm)
965 {
966 	struct azx_pcm *apcm = pcm->private_data;
967 	if (apcm) {
968 		list_del(&apcm->list);
969 		kfree(apcm);
970 	}
971 }
972 
973 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
974 
975 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
976 				 struct hda_pcm *cpcm)
977 {
978 	struct azx *chip = bus->private_data;
979 	struct snd_pcm *pcm;
980 	struct azx_pcm *apcm;
981 	int pcm_dev = cpcm->device;
982 	unsigned int size;
983 	int s, err;
984 
985 	list_for_each_entry(apcm, &chip->pcm_list, list) {
986 		if (apcm->pcm->device == pcm_dev) {
987 			dev_err(chip->card->dev, "PCM %d already exists\n",
988 				pcm_dev);
989 			return -EBUSY;
990 		}
991 	}
992 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
993 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
994 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
995 			  &pcm);
996 	if (err < 0)
997 		return err;
998 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
999 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1000 	if (apcm == NULL)
1001 		return -ENOMEM;
1002 	apcm->chip = chip;
1003 	apcm->pcm = pcm;
1004 	apcm->codec = codec;
1005 	pcm->private_data = apcm;
1006 	pcm->private_free = azx_pcm_free;
1007 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1008 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1009 	list_add_tail(&apcm->list, &chip->pcm_list);
1010 	cpcm->pcm = pcm;
1011 	for (s = 0; s < 2; s++) {
1012 		apcm->hinfo[s] = &cpcm->stream[s];
1013 		if (cpcm->stream[s].substreams)
1014 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1015 	}
1016 	/* buffer pre-allocation */
1017 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1018 	if (size > MAX_PREALLOC_SIZE)
1019 		size = MAX_PREALLOC_SIZE;
1020 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1021 					      chip->card->dev,
1022 					      size, MAX_PREALLOC_SIZE);
1023 	/* link to codec */
1024 	pcm->dev = &codec->dev;
1025 	return 0;
1026 }
1027 
1028 /*
1029  * CORB / RIRB interface
1030  */
1031 static int azx_alloc_cmd_io(struct azx *chip)
1032 {
1033 	int err;
1034 
1035 	/* single page (at least 4096 bytes) must suffice for both ringbuffes */
1036 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1037 					 PAGE_SIZE, &chip->rb);
1038 	if (err < 0)
1039 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1040 	return err;
1041 }
1042 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1043 
1044 static void azx_init_cmd_io(struct azx *chip)
1045 {
1046 	int timeout;
1047 
1048 	spin_lock_irq(&chip->reg_lock);
1049 	/* CORB set up */
1050 	chip->corb.addr = chip->rb.addr;
1051 	chip->corb.buf = (u32 *)chip->rb.area;
1052 	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1053 	azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1054 
1055 	/* set the corb size to 256 entries (ULI requires explicitly) */
1056 	azx_writeb(chip, CORBSIZE, 0x02);
1057 	/* set the corb write pointer to 0 */
1058 	azx_writew(chip, CORBWP, 0);
1059 
1060 	/* reset the corb hw read pointer */
1061 	azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1062 	for (timeout = 1000; timeout > 0; timeout--) {
1063 		if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1064 			break;
1065 		udelay(1);
1066 	}
1067 	if (timeout <= 0)
1068 		dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1069 			azx_readw(chip, CORBRP));
1070 
1071 	azx_writew(chip, CORBRP, 0);
1072 	for (timeout = 1000; timeout > 0; timeout--) {
1073 		if (azx_readw(chip, CORBRP) == 0)
1074 			break;
1075 		udelay(1);
1076 	}
1077 	if (timeout <= 0)
1078 		dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1079 			azx_readw(chip, CORBRP));
1080 
1081 	/* enable corb dma */
1082 	azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1083 
1084 	/* RIRB set up */
1085 	chip->rirb.addr = chip->rb.addr + 2048;
1086 	chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1087 	chip->rirb.wp = chip->rirb.rp = 0;
1088 	memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1089 	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1090 	azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1091 
1092 	/* set the rirb size to 256 entries (ULI requires explicitly) */
1093 	azx_writeb(chip, RIRBSIZE, 0x02);
1094 	/* reset the rirb hw write pointer */
1095 	azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1096 	/* set N=1, get RIRB response interrupt for new entry */
1097 	if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1098 		azx_writew(chip, RINTCNT, 0xc0);
1099 	else
1100 		azx_writew(chip, RINTCNT, 1);
1101 	/* enable rirb dma and response irq */
1102 	azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1103 	spin_unlock_irq(&chip->reg_lock);
1104 }
1105 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1106 
1107 static void azx_free_cmd_io(struct azx *chip)
1108 {
1109 	spin_lock_irq(&chip->reg_lock);
1110 	/* disable ringbuffer DMAs */
1111 	azx_writeb(chip, RIRBCTL, 0);
1112 	azx_writeb(chip, CORBCTL, 0);
1113 	spin_unlock_irq(&chip->reg_lock);
1114 }
1115 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1116 
1117 static unsigned int azx_command_addr(u32 cmd)
1118 {
1119 	unsigned int addr = cmd >> 28;
1120 
1121 	if (addr >= AZX_MAX_CODECS) {
1122 		snd_BUG();
1123 		addr = 0;
1124 	}
1125 
1126 	return addr;
1127 }
1128 
1129 /* send a command */
1130 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1131 {
1132 	struct azx *chip = bus->private_data;
1133 	unsigned int addr = azx_command_addr(val);
1134 	unsigned int wp, rp;
1135 
1136 	spin_lock_irq(&chip->reg_lock);
1137 
1138 	/* add command to corb */
1139 	wp = azx_readw(chip, CORBWP);
1140 	if (wp == 0xffff) {
1141 		/* something wrong, controller likely turned to D3 */
1142 		spin_unlock_irq(&chip->reg_lock);
1143 		return -EIO;
1144 	}
1145 	wp++;
1146 	wp %= ICH6_MAX_CORB_ENTRIES;
1147 
1148 	rp = azx_readw(chip, CORBRP);
1149 	if (wp == rp) {
1150 		/* oops, it's full */
1151 		spin_unlock_irq(&chip->reg_lock);
1152 		return -EAGAIN;
1153 	}
1154 
1155 	chip->rirb.cmds[addr]++;
1156 	chip->corb.buf[wp] = cpu_to_le32(val);
1157 	azx_writew(chip, CORBWP, wp);
1158 
1159 	spin_unlock_irq(&chip->reg_lock);
1160 
1161 	return 0;
1162 }
1163 
1164 #define ICH6_RIRB_EX_UNSOL_EV	(1<<4)
1165 
1166 /* retrieve RIRB entry - called from interrupt handler */
1167 static void azx_update_rirb(struct azx *chip)
1168 {
1169 	unsigned int rp, wp;
1170 	unsigned int addr;
1171 	u32 res, res_ex;
1172 
1173 	wp = azx_readw(chip, RIRBWP);
1174 	if (wp == 0xffff) {
1175 		/* something wrong, controller likely turned to D3 */
1176 		return;
1177 	}
1178 
1179 	if (wp == chip->rirb.wp)
1180 		return;
1181 	chip->rirb.wp = wp;
1182 
1183 	while (chip->rirb.rp != wp) {
1184 		chip->rirb.rp++;
1185 		chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1186 
1187 		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1188 		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1189 		res = le32_to_cpu(chip->rirb.buf[rp]);
1190 		addr = res_ex & 0xf;
1191 		if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1192 			dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1193 				res, res_ex,
1194 				chip->rirb.rp, wp);
1195 			snd_BUG();
1196 		}
1197 		else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1198 			snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1199 		else if (chip->rirb.cmds[addr]) {
1200 			chip->rirb.res[addr] = res;
1201 			smp_wmb();
1202 			chip->rirb.cmds[addr]--;
1203 		} else if (printk_ratelimit()) {
1204 			dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1205 				res, res_ex,
1206 				chip->last_cmd[addr]);
1207 		}
1208 	}
1209 }
1210 
1211 /* receive a response */
1212 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1213 					  unsigned int addr)
1214 {
1215 	struct azx *chip = bus->private_data;
1216 	unsigned long timeout;
1217 	unsigned long loopcounter;
1218 	int do_poll = 0;
1219 
1220  again:
1221 	timeout = jiffies + msecs_to_jiffies(1000);
1222 
1223 	for (loopcounter = 0;; loopcounter++) {
1224 		if (chip->polling_mode || do_poll) {
1225 			spin_lock_irq(&chip->reg_lock);
1226 			azx_update_rirb(chip);
1227 			spin_unlock_irq(&chip->reg_lock);
1228 		}
1229 		if (!chip->rirb.cmds[addr]) {
1230 			smp_rmb();
1231 			bus->rirb_error = 0;
1232 
1233 			if (!do_poll)
1234 				chip->poll_count = 0;
1235 			return chip->rirb.res[addr]; /* the last value */
1236 		}
1237 		if (time_after(jiffies, timeout))
1238 			break;
1239 		if (bus->needs_damn_long_delay || loopcounter > 3000)
1240 			msleep(2); /* temporary workaround */
1241 		else {
1242 			udelay(10);
1243 			cond_resched();
1244 		}
1245 	}
1246 
1247 	if (!bus->no_response_fallback)
1248 		return -1;
1249 
1250 	if (!chip->polling_mode && chip->poll_count < 2) {
1251 		dev_dbg(chip->card->dev,
1252 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1253 			chip->last_cmd[addr]);
1254 		do_poll = 1;
1255 		chip->poll_count++;
1256 		goto again;
1257 	}
1258 
1259 
1260 	if (!chip->polling_mode) {
1261 		dev_warn(chip->card->dev,
1262 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1263 			 chip->last_cmd[addr]);
1264 		chip->polling_mode = 1;
1265 		goto again;
1266 	}
1267 
1268 	if (chip->msi) {
1269 		dev_warn(chip->card->dev,
1270 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1271 			 chip->last_cmd[addr]);
1272 		if (chip->ops->disable_msi_reset_irq(chip) &&
1273 		    chip->ops->disable_msi_reset_irq(chip) < 0) {
1274 			bus->rirb_error = 1;
1275 			return -1;
1276 		}
1277 		goto again;
1278 	}
1279 
1280 	if (chip->probing) {
1281 		/* If this critical timeout happens during the codec probing
1282 		 * phase, this is likely an access to a non-existing codec
1283 		 * slot.  Better to return an error and reset the system.
1284 		 */
1285 		return -1;
1286 	}
1287 
1288 	/* a fatal communication error; need either to reset or to fallback
1289 	 * to the single_cmd mode
1290 	 */
1291 	bus->rirb_error = 1;
1292 	if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1293 		bus->response_reset = 1;
1294 		return -1; /* give a chance to retry */
1295 	}
1296 
1297 	dev_err(chip->card->dev,
1298 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1299 		chip->last_cmd[addr]);
1300 	chip->single_cmd = 1;
1301 	bus->response_reset = 0;
1302 	/* release CORB/RIRB */
1303 	azx_free_cmd_io(chip);
1304 	/* disable unsolicited responses */
1305 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1306 	return -1;
1307 }
1308 
1309 /*
1310  * Use the single immediate command instead of CORB/RIRB for simplicity
1311  *
1312  * Note: according to Intel, this is not preferred use.  The command was
1313  *       intended for the BIOS only, and may get confused with unsolicited
1314  *       responses.  So, we shouldn't use it for normal operation from the
1315  *       driver.
1316  *       I left the codes, however, for debugging/testing purposes.
1317  */
1318 
1319 /* receive a response */
1320 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1321 {
1322 	int timeout = 50;
1323 
1324 	while (timeout--) {
1325 		/* check IRV busy bit */
1326 		if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1327 			/* reuse rirb.res as the response return value */
1328 			chip->rirb.res[addr] = azx_readl(chip, IR);
1329 			return 0;
1330 		}
1331 		udelay(1);
1332 	}
1333 	if (printk_ratelimit())
1334 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1335 			azx_readw(chip, IRS));
1336 	chip->rirb.res[addr] = -1;
1337 	return -EIO;
1338 }
1339 
1340 /* send a command */
1341 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1342 {
1343 	struct azx *chip = bus->private_data;
1344 	unsigned int addr = azx_command_addr(val);
1345 	int timeout = 50;
1346 
1347 	bus->rirb_error = 0;
1348 	while (timeout--) {
1349 		/* check ICB busy bit */
1350 		if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1351 			/* Clear IRV valid bit */
1352 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1353 				   ICH6_IRS_VALID);
1354 			azx_writel(chip, IC, val);
1355 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1356 				   ICH6_IRS_BUSY);
1357 			return azx_single_wait_for_response(chip, addr);
1358 		}
1359 		udelay(1);
1360 	}
1361 	if (printk_ratelimit())
1362 		dev_dbg(chip->card->dev,
1363 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
1364 			azx_readw(chip, IRS), val);
1365 	return -EIO;
1366 }
1367 
1368 /* receive a response */
1369 static unsigned int azx_single_get_response(struct hda_bus *bus,
1370 					    unsigned int addr)
1371 {
1372 	struct azx *chip = bus->private_data;
1373 	return chip->rirb.res[addr];
1374 }
1375 
1376 /*
1377  * The below are the main callbacks from hda_codec.
1378  *
1379  * They are just the skeleton to call sub-callbacks according to the
1380  * current setting of chip->single_cmd.
1381  */
1382 
1383 /* send a command */
1384 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1385 {
1386 	struct azx *chip = bus->private_data;
1387 
1388 	if (chip->disabled)
1389 		return 0;
1390 	chip->last_cmd[azx_command_addr(val)] = val;
1391 	if (chip->single_cmd)
1392 		return azx_single_send_cmd(bus, val);
1393 	else
1394 		return azx_corb_send_cmd(bus, val);
1395 }
1396 EXPORT_SYMBOL_GPL(azx_send_cmd);
1397 
1398 /* get a response */
1399 static unsigned int azx_get_response(struct hda_bus *bus,
1400 				     unsigned int addr)
1401 {
1402 	struct azx *chip = bus->private_data;
1403 	if (chip->disabled)
1404 		return 0;
1405 	if (chip->single_cmd)
1406 		return azx_single_get_response(bus, addr);
1407 	else
1408 		return azx_rirb_get_response(bus, addr);
1409 }
1410 EXPORT_SYMBOL_GPL(azx_get_response);
1411 
1412 #ifdef CONFIG_SND_HDA_DSP_LOADER
1413 /*
1414  * DSP loading code (e.g. for CA0132)
1415  */
1416 
1417 /* use the first stream for loading DSP */
1418 static struct azx_dev *
1419 azx_get_dsp_loader_dev(struct azx *chip)
1420 {
1421 	return &chip->azx_dev[chip->playback_index_offset];
1422 }
1423 
1424 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1425 				unsigned int byte_size,
1426 				struct snd_dma_buffer *bufp)
1427 {
1428 	u32 *bdl;
1429 	struct azx *chip = bus->private_data;
1430 	struct azx_dev *azx_dev;
1431 	int err;
1432 
1433 	azx_dev = azx_get_dsp_loader_dev(chip);
1434 
1435 	dsp_lock(azx_dev);
1436 	spin_lock_irq(&chip->reg_lock);
1437 	if (azx_dev->running || azx_dev->locked) {
1438 		spin_unlock_irq(&chip->reg_lock);
1439 		err = -EBUSY;
1440 		goto unlock;
1441 	}
1442 	azx_dev->prepared = 0;
1443 	chip->saved_azx_dev = *azx_dev;
1444 	azx_dev->locked = 1;
1445 	spin_unlock_irq(&chip->reg_lock);
1446 
1447 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1448 					 byte_size, bufp);
1449 	if (err < 0)
1450 		goto err_alloc;
1451 
1452 	azx_dev->bufsize = byte_size;
1453 	azx_dev->period_bytes = byte_size;
1454 	azx_dev->format_val = format;
1455 
1456 	azx_stream_reset(chip, azx_dev);
1457 
1458 	/* reset BDL address */
1459 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1460 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1461 
1462 	azx_dev->frags = 0;
1463 	bdl = (u32 *)azx_dev->bdl.area;
1464 	err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1465 	if (err < 0)
1466 		goto error;
1467 
1468 	azx_setup_controller(chip, azx_dev);
1469 	dsp_unlock(azx_dev);
1470 	return azx_dev->stream_tag;
1471 
1472  error:
1473 	chip->ops->dma_free_pages(chip, bufp);
1474  err_alloc:
1475 	spin_lock_irq(&chip->reg_lock);
1476 	if (azx_dev->opened)
1477 		*azx_dev = chip->saved_azx_dev;
1478 	azx_dev->locked = 0;
1479 	spin_unlock_irq(&chip->reg_lock);
1480  unlock:
1481 	dsp_unlock(azx_dev);
1482 	return err;
1483 }
1484 
1485 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1486 {
1487 	struct azx *chip = bus->private_data;
1488 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1489 
1490 	if (start)
1491 		azx_stream_start(chip, azx_dev);
1492 	else
1493 		azx_stream_stop(chip, azx_dev);
1494 	azx_dev->running = start;
1495 }
1496 
1497 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1498 				 struct snd_dma_buffer *dmab)
1499 {
1500 	struct azx *chip = bus->private_data;
1501 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1502 
1503 	if (!dmab->area || !azx_dev->locked)
1504 		return;
1505 
1506 	dsp_lock(azx_dev);
1507 	/* reset BDL address */
1508 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1509 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1510 	azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1511 	azx_dev->bufsize = 0;
1512 	azx_dev->period_bytes = 0;
1513 	azx_dev->format_val = 0;
1514 
1515 	chip->ops->dma_free_pages(chip, dmab);
1516 	dmab->area = NULL;
1517 
1518 	spin_lock_irq(&chip->reg_lock);
1519 	if (azx_dev->opened)
1520 		*azx_dev = chip->saved_azx_dev;
1521 	azx_dev->locked = 0;
1522 	spin_unlock_irq(&chip->reg_lock);
1523 	dsp_unlock(azx_dev);
1524 }
1525 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1526 
1527 int azx_alloc_stream_pages(struct azx *chip)
1528 {
1529 	int i, err;
1530 	struct snd_card *card = chip->card;
1531 
1532 	for (i = 0; i < chip->num_streams; i++) {
1533 		dsp_lock_init(&chip->azx_dev[i]);
1534 		/* allocate memory for the BDL for each stream */
1535 		err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1536 						 BDL_SIZE,
1537 						 &chip->azx_dev[i].bdl);
1538 		if (err < 0) {
1539 			dev_err(card->dev, "cannot allocate BDL\n");
1540 			return -ENOMEM;
1541 		}
1542 	}
1543 	/* allocate memory for the position buffer */
1544 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1545 					 chip->num_streams * 8, &chip->posbuf);
1546 	if (err < 0) {
1547 		dev_err(card->dev, "cannot allocate posbuf\n");
1548 		return -ENOMEM;
1549 	}
1550 
1551 	/* allocate CORB/RIRB */
1552 	err = azx_alloc_cmd_io(chip);
1553 	if (err < 0)
1554 		return err;
1555 	return 0;
1556 }
1557 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1558 
1559 void azx_free_stream_pages(struct azx *chip)
1560 {
1561 	int i;
1562 	if (chip->azx_dev) {
1563 		for (i = 0; i < chip->num_streams; i++)
1564 			if (chip->azx_dev[i].bdl.area)
1565 				chip->ops->dma_free_pages(
1566 					chip, &chip->azx_dev[i].bdl);
1567 	}
1568 	if (chip->rb.area)
1569 		chip->ops->dma_free_pages(chip, &chip->rb);
1570 	if (chip->posbuf.area)
1571 		chip->ops->dma_free_pages(chip, &chip->posbuf);
1572 }
1573 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1574 
1575 /*
1576  * Lowlevel interface
1577  */
1578 
1579 /* enter link reset */
1580 void azx_enter_link_reset(struct azx *chip)
1581 {
1582 	unsigned long timeout;
1583 
1584 	/* reset controller */
1585 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1586 
1587 	timeout = jiffies + msecs_to_jiffies(100);
1588 	while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1589 			time_before(jiffies, timeout))
1590 		usleep_range(500, 1000);
1591 }
1592 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1593 
1594 /* exit link reset */
1595 static void azx_exit_link_reset(struct azx *chip)
1596 {
1597 	unsigned long timeout;
1598 
1599 	azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1600 
1601 	timeout = jiffies + msecs_to_jiffies(100);
1602 	while (!azx_readb(chip, GCTL) &&
1603 			time_before(jiffies, timeout))
1604 		usleep_range(500, 1000);
1605 }
1606 
1607 /* reset codec link */
1608 static int azx_reset(struct azx *chip, bool full_reset)
1609 {
1610 	if (!full_reset)
1611 		goto __skip;
1612 
1613 	/* clear STATESTS */
1614 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1615 
1616 	/* reset controller */
1617 	azx_enter_link_reset(chip);
1618 
1619 	/* delay for >= 100us for codec PLL to settle per spec
1620 	 * Rev 0.9 section 5.5.1
1621 	 */
1622 	usleep_range(500, 1000);
1623 
1624 	/* Bring controller out of reset */
1625 	azx_exit_link_reset(chip);
1626 
1627 	/* Brent Chartrand said to wait >= 540us for codecs to initialize */
1628 	usleep_range(1000, 1200);
1629 
1630       __skip:
1631 	/* check to see if controller is ready */
1632 	if (!azx_readb(chip, GCTL)) {
1633 		dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1634 		return -EBUSY;
1635 	}
1636 
1637 	/* Accept unsolicited responses */
1638 	if (!chip->single_cmd)
1639 		azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1640 			   ICH6_GCTL_UNSOL);
1641 
1642 	/* detect codecs */
1643 	if (!chip->codec_mask) {
1644 		chip->codec_mask = azx_readw(chip, STATESTS);
1645 		dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1646 			chip->codec_mask);
1647 	}
1648 
1649 	return 0;
1650 }
1651 
1652 /* enable interrupts */
1653 static void azx_int_enable(struct azx *chip)
1654 {
1655 	/* enable controller CIE and GIE */
1656 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1657 		   ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1658 }
1659 
1660 /* disable interrupts */
1661 static void azx_int_disable(struct azx *chip)
1662 {
1663 	int i;
1664 
1665 	/* disable interrupts in stream descriptor */
1666 	for (i = 0; i < chip->num_streams; i++) {
1667 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1668 		azx_sd_writeb(chip, azx_dev, SD_CTL,
1669 			      azx_sd_readb(chip, azx_dev, SD_CTL) &
1670 					~SD_INT_MASK);
1671 	}
1672 
1673 	/* disable SIE for all streams */
1674 	azx_writeb(chip, INTCTL, 0);
1675 
1676 	/* disable controller CIE and GIE */
1677 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1678 		   ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1679 }
1680 
1681 /* clear interrupts */
1682 static void azx_int_clear(struct azx *chip)
1683 {
1684 	int i;
1685 
1686 	/* clear stream status */
1687 	for (i = 0; i < chip->num_streams; i++) {
1688 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1689 		azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1690 	}
1691 
1692 	/* clear STATESTS */
1693 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1694 
1695 	/* clear rirb status */
1696 	azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1697 
1698 	/* clear int status */
1699 	azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1700 }
1701 
1702 /*
1703  * reset and start the controller registers
1704  */
1705 void azx_init_chip(struct azx *chip, bool full_reset)
1706 {
1707 	if (chip->initialized)
1708 		return;
1709 
1710 	/* reset controller */
1711 	azx_reset(chip, full_reset);
1712 
1713 	/* initialize interrupts */
1714 	azx_int_clear(chip);
1715 	azx_int_enable(chip);
1716 
1717 	/* initialize the codec command I/O */
1718 	if (!chip->single_cmd)
1719 		azx_init_cmd_io(chip);
1720 
1721 	/* program the position buffer */
1722 	azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1723 	azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1724 
1725 	chip->initialized = 1;
1726 }
1727 EXPORT_SYMBOL_GPL(azx_init_chip);
1728 
1729 void azx_stop_chip(struct azx *chip)
1730 {
1731 	if (!chip->initialized)
1732 		return;
1733 
1734 	/* disable interrupts */
1735 	azx_int_disable(chip);
1736 	azx_int_clear(chip);
1737 
1738 	/* disable CORB/RIRB */
1739 	azx_free_cmd_io(chip);
1740 
1741 	/* disable position buffer */
1742 	azx_writel(chip, DPLBASE, 0);
1743 	azx_writel(chip, DPUBASE, 0);
1744 
1745 	chip->initialized = 0;
1746 }
1747 EXPORT_SYMBOL_GPL(azx_stop_chip);
1748 
1749 /*
1750  * interrupt handler
1751  */
1752 irqreturn_t azx_interrupt(int irq, void *dev_id)
1753 {
1754 	struct azx *chip = dev_id;
1755 	struct azx_dev *azx_dev;
1756 	u32 status;
1757 	u8 sd_status;
1758 	int i;
1759 
1760 #ifdef CONFIG_PM_RUNTIME
1761 	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1762 		if (!pm_runtime_active(chip->card->dev))
1763 			return IRQ_NONE;
1764 #endif
1765 
1766 	spin_lock(&chip->reg_lock);
1767 
1768 	if (chip->disabled) {
1769 		spin_unlock(&chip->reg_lock);
1770 		return IRQ_NONE;
1771 	}
1772 
1773 	status = azx_readl(chip, INTSTS);
1774 	if (status == 0 || status == 0xffffffff) {
1775 		spin_unlock(&chip->reg_lock);
1776 		return IRQ_NONE;
1777 	}
1778 
1779 	for (i = 0; i < chip->num_streams; i++) {
1780 		azx_dev = &chip->azx_dev[i];
1781 		if (status & azx_dev->sd_int_sta_mask) {
1782 			sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1783 			azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1784 			if (!azx_dev->substream || !azx_dev->running ||
1785 			    !(sd_status & SD_INT_COMPLETE))
1786 				continue;
1787 			/* check whether this IRQ is really acceptable */
1788 			if (!chip->ops->position_check ||
1789 			    chip->ops->position_check(chip, azx_dev)) {
1790 				spin_unlock(&chip->reg_lock);
1791 				snd_pcm_period_elapsed(azx_dev->substream);
1792 				spin_lock(&chip->reg_lock);
1793 			}
1794 		}
1795 	}
1796 
1797 	/* clear rirb int */
1798 	status = azx_readb(chip, RIRBSTS);
1799 	if (status & RIRB_INT_MASK) {
1800 		if (status & RIRB_INT_RESPONSE) {
1801 			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1802 				udelay(80);
1803 			azx_update_rirb(chip);
1804 		}
1805 		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1806 	}
1807 
1808 	spin_unlock(&chip->reg_lock);
1809 
1810 	return IRQ_HANDLED;
1811 }
1812 EXPORT_SYMBOL_GPL(azx_interrupt);
1813 
1814 /*
1815  * Codec initerface
1816  */
1817 
1818 /*
1819  * Probe the given codec address
1820  */
1821 static int probe_codec(struct azx *chip, int addr)
1822 {
1823 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1824 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1825 	unsigned int res;
1826 
1827 	mutex_lock(&chip->bus->cmd_mutex);
1828 	chip->probing = 1;
1829 	azx_send_cmd(chip->bus, cmd);
1830 	res = azx_get_response(chip->bus, addr);
1831 	chip->probing = 0;
1832 	mutex_unlock(&chip->bus->cmd_mutex);
1833 	if (res == -1)
1834 		return -EIO;
1835 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1836 	return 0;
1837 }
1838 
1839 static void azx_bus_reset(struct hda_bus *bus)
1840 {
1841 	struct azx *chip = bus->private_data;
1842 
1843 	bus->in_reset = 1;
1844 	azx_stop_chip(chip);
1845 	azx_init_chip(chip, true);
1846 #ifdef CONFIG_PM
1847 	if (chip->initialized) {
1848 		struct azx_pcm *p;
1849 		list_for_each_entry(p, &chip->pcm_list, list)
1850 			snd_pcm_suspend_all(p->pcm);
1851 		snd_hda_suspend(chip->bus);
1852 		snd_hda_resume(chip->bus);
1853 	}
1854 #endif
1855 	bus->in_reset = 0;
1856 }
1857 
1858 #ifdef CONFIG_PM
1859 /* power-up/down the controller */
1860 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1861 {
1862 	struct azx *chip = bus->private_data;
1863 
1864 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1865 		return;
1866 
1867 	if (power_up)
1868 		pm_runtime_get_sync(chip->card->dev);
1869 	else
1870 		pm_runtime_put_sync(chip->card->dev);
1871 }
1872 #endif
1873 
1874 static int get_jackpoll_interval(struct azx *chip)
1875 {
1876 	int i;
1877 	unsigned int j;
1878 
1879 	if (!chip->jackpoll_ms)
1880 		return 0;
1881 
1882 	i = chip->jackpoll_ms[chip->dev_index];
1883 	if (i == 0)
1884 		return 0;
1885 	if (i < 50 || i > 60000)
1886 		j = 0;
1887 	else
1888 		j = msecs_to_jiffies(i);
1889 	if (j == 0)
1890 		dev_warn(chip->card->dev,
1891 			 "jackpoll_ms value out of range: %d\n", i);
1892 	return j;
1893 }
1894 
1895 /* Codec initialization */
1896 int azx_codec_create(struct azx *chip, const char *model,
1897 		     unsigned int max_slots,
1898 		     int *power_save_to)
1899 {
1900 	struct hda_bus_template bus_temp;
1901 	int c, codecs, err;
1902 
1903 	memset(&bus_temp, 0, sizeof(bus_temp));
1904 	bus_temp.private_data = chip;
1905 	bus_temp.modelname = model;
1906 	bus_temp.pci = chip->pci;
1907 	bus_temp.ops.command = azx_send_cmd;
1908 	bus_temp.ops.get_response = azx_get_response;
1909 	bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1910 	bus_temp.ops.bus_reset = azx_bus_reset;
1911 #ifdef CONFIG_PM
1912 	bus_temp.power_save = power_save_to;
1913 	bus_temp.ops.pm_notify = azx_power_notify;
1914 #endif
1915 #ifdef CONFIG_SND_HDA_DSP_LOADER
1916 	bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1917 	bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1918 	bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1919 #endif
1920 
1921 	err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1922 	if (err < 0)
1923 		return err;
1924 
1925 	if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1926 		dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1927 		chip->bus->needs_damn_long_delay = 1;
1928 	}
1929 
1930 	codecs = 0;
1931 	if (!max_slots)
1932 		max_slots = AZX_DEFAULT_CODECS;
1933 
1934 	/* First try to probe all given codec slots */
1935 	for (c = 0; c < max_slots; c++) {
1936 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1937 			if (probe_codec(chip, c) < 0) {
1938 				/* Some BIOSen give you wrong codec addresses
1939 				 * that don't exist
1940 				 */
1941 				dev_warn(chip->card->dev,
1942 					 "Codec #%d probe error; disabling it...\n", c);
1943 				chip->codec_mask &= ~(1 << c);
1944 				/* More badly, accessing to a non-existing
1945 				 * codec often screws up the controller chip,
1946 				 * and disturbs the further communications.
1947 				 * Thus if an error occurs during probing,
1948 				 * better to reset the controller chip to
1949 				 * get back to the sanity state.
1950 				 */
1951 				azx_stop_chip(chip);
1952 				azx_init_chip(chip, true);
1953 			}
1954 		}
1955 	}
1956 
1957 	/* AMD chipsets often cause the communication stalls upon certain
1958 	 * sequence like the pin-detection.  It seems that forcing the synced
1959 	 * access works around the stall.  Grrr...
1960 	 */
1961 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1962 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1963 		chip->bus->sync_write = 1;
1964 		chip->bus->allow_bus_reset = 1;
1965 	}
1966 
1967 	/* Then create codec instances */
1968 	for (c = 0; c < max_slots; c++) {
1969 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1970 			struct hda_codec *codec;
1971 			err = snd_hda_codec_new(chip->bus, c, &codec);
1972 			if (err < 0)
1973 				continue;
1974 			codec->jackpoll_interval = get_jackpoll_interval(chip);
1975 			codec->beep_mode = chip->beep_mode;
1976 			codecs++;
1977 		}
1978 	}
1979 	if (!codecs) {
1980 		dev_err(chip->card->dev, "no codecs initialized\n");
1981 		return -ENXIO;
1982 	}
1983 	return 0;
1984 }
1985 EXPORT_SYMBOL_GPL(azx_codec_create);
1986 
1987 /* configure each codec instance */
1988 int azx_codec_configure(struct azx *chip)
1989 {
1990 	struct hda_codec *codec;
1991 	list_for_each_entry(codec, &chip->bus->codec_list, list) {
1992 		snd_hda_codec_configure(codec);
1993 	}
1994 	return 0;
1995 }
1996 EXPORT_SYMBOL_GPL(azx_codec_configure);
1997 
1998 /* mixer creation - all stuff is implemented in hda module */
1999 int azx_mixer_create(struct azx *chip)
2000 {
2001 	return snd_hda_build_controls(chip->bus);
2002 }
2003 EXPORT_SYMBOL_GPL(azx_mixer_create);
2004 
2005 
2006 /* initialize SD streams */
2007 int azx_init_stream(struct azx *chip)
2008 {
2009 	int i;
2010 
2011 	/* initialize each stream (aka device)
2012 	 * assign the starting bdl address to each stream (device)
2013 	 * and initialize
2014 	 */
2015 	for (i = 0; i < chip->num_streams; i++) {
2016 		struct azx_dev *azx_dev = &chip->azx_dev[i];
2017 		azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2018 		/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2019 		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2020 		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2021 		azx_dev->sd_int_sta_mask = 1 << i;
2022 		/* stream tag: must be non-zero and unique */
2023 		azx_dev->index = i;
2024 		azx_dev->stream_tag = i + 1;
2025 	}
2026 
2027 	return 0;
2028 }
2029 EXPORT_SYMBOL_GPL(azx_init_stream);
2030 
2031 MODULE_LICENSE("GPL");
2032 MODULE_DESCRIPTION("Common HDA driver funcitons");
2033