xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision 62e7ca52)
1 /*
2  *
3  *  Implementation of primary alsa driver code base for Intel HD Audio.
4  *
5  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
6  *
7  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8  *                     PeiSen Hou <pshou@realtek.com.tw>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the Free
12  *  Software Foundation; either version 2 of the License, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  *  more details.
19  *
20  *
21  */
22 
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include "hda_priv.h"
33 #include "hda_controller.h"
34 
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
37 
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev)	mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev)		mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev)		mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev)	((dev)->locked)
44 #else
45 #define dsp_lock_init(dev)	do {} while (0)
46 #define dsp_lock(dev)		do {} while (0)
47 #define dsp_unlock(dev)		do {} while (0)
48 #define dsp_is_locked(dev)	0
49 #endif
50 
51 /*
52  * AZX stream operations.
53  */
54 
55 /* start a stream */
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
57 {
58 	/*
59 	 * Before stream start, initialize parameter
60 	 */
61 	azx_dev->insufficient = 1;
62 
63 	/* enable SIE */
64 	azx_writel(chip, INTCTL,
65 		   azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 	/* set DMA start and interrupt mask */
67 	azx_sd_writeb(chip, azx_dev, SD_CTL,
68 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
69 		      SD_CTL_DMA_START | SD_INT_MASK);
70 }
71 
72 /* stop DMA */
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 {
75 	azx_sd_writeb(chip, azx_dev, SD_CTL,
76 		      azx_sd_readb(chip, azx_dev, SD_CTL) &
77 		      ~(SD_CTL_DMA_START | SD_INT_MASK));
78 	azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79 }
80 
81 /* stop a stream */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 {
84 	azx_stream_clear(chip, azx_dev);
85 	/* disable SIE */
86 	azx_writel(chip, INTCTL,
87 		   azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 }
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
90 
91 /* reset stream */
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 {
94 	unsigned char val;
95 	int timeout;
96 
97 	azx_stream_clear(chip, azx_dev);
98 
99 	azx_sd_writeb(chip, azx_dev, SD_CTL,
100 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
101 		      SD_CTL_STREAM_RESET);
102 	udelay(3);
103 	timeout = 300;
104 	while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 		 SD_CTL_STREAM_RESET) && --timeout)
106 		;
107 	val &= ~SD_CTL_STREAM_RESET;
108 	azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 	udelay(3);
110 
111 	timeout = 300;
112 	/* waiting for hardware to report that the stream is out of reset */
113 	while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 		SD_CTL_STREAM_RESET) && --timeout)
115 		;
116 
117 	/* reset first position - may not be synced with hw at this time */
118 	*azx_dev->posbuf = 0;
119 }
120 
121 /*
122  * set up the SD for streaming
123  */
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 {
126 	unsigned int val;
127 	/* make sure the run bit is zero for SD */
128 	azx_stream_clear(chip, azx_dev);
129 	/* program the stream_tag */
130 	val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 	val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 		(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 	if (!azx_snoop(chip))
134 		val |= SD_CTL_TRAFFIC_PRIO;
135 	azx_sd_writel(chip, azx_dev, SD_CTL, val);
136 
137 	/* program the length of samples in cyclic buffer */
138 	azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139 
140 	/* program the stream format */
141 	/* this value needs to be the same as the one programmed */
142 	azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143 
144 	/* program the stream LVI (last valid index) of the BDL */
145 	azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146 
147 	/* program the BDL address */
148 	/* lower BDL address */
149 	azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 	/* upper BDL address */
151 	azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 		      upper_32_bits(azx_dev->bdl.addr));
153 
154 	/* enable the position buffer */
155 	if (chip->position_fix[0] != POS_FIX_LPIB ||
156 	    chip->position_fix[1] != POS_FIX_LPIB) {
157 		if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 			azx_writel(chip, DPLBASE,
159 				(u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
160 	}
161 
162 	/* set the interrupt enable bits in the descriptor control register */
163 	azx_sd_writel(chip, azx_dev, SD_CTL,
164 		      azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165 
166 	return 0;
167 }
168 
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 {
173 	int dev, i, nums;
174 	struct azx_dev *res = NULL;
175 	/* make a non-zero unique key for the substream */
176 	int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 		(substream->stream + 1);
178 
179 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 		dev = chip->playback_index_offset;
181 		nums = chip->playback_streams;
182 	} else {
183 		dev = chip->capture_index_offset;
184 		nums = chip->capture_streams;
185 	}
186 	for (i = 0; i < nums; i++, dev++) {
187 		struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 		dsp_lock(azx_dev);
189 		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 			if (azx_dev->assigned_key == key) {
191 				azx_dev->opened = 1;
192 				azx_dev->assigned_key = key;
193 				dsp_unlock(azx_dev);
194 				return azx_dev;
195 			}
196 			if (!res ||
197 			    (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
198 				res = azx_dev;
199 		}
200 		dsp_unlock(azx_dev);
201 	}
202 	if (res) {
203 		dsp_lock(res);
204 		res->opened = 1;
205 		res->assigned_key = key;
206 		dsp_unlock(res);
207 	}
208 	return res;
209 }
210 
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
213 {
214 	azx_dev->opened = 0;
215 }
216 
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
218 {
219 	struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 	struct snd_pcm_substream *substream = azx_dev->substream;
221 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 	struct azx *chip = apcm->chip;
223 
224 	return azx_readl(chip, WALLCLK);
225 }
226 
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 				bool force, cycle_t last)
229 {
230 	struct azx_dev *azx_dev = get_azx_dev(substream);
231 	struct timecounter *tc = &azx_dev->azx_tc;
232 	struct cyclecounter *cc = &azx_dev->azx_cc;
233 	u64 nsec;
234 
235 	cc->read = azx_cc_read;
236 	cc->mask = CLOCKSOURCE_MASK(32);
237 
238 	/*
239 	 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 	 * To avoid any saturation issues in intermediate operations,
241 	 * the 125 factor is applied first. The division is applied
242 	 * last after reading the timecounter value.
243 	 * Applying the 1/3 factor as part of the multiplication
244 	 * requires at least 20 bits for a decent precision, however
245 	 * overflows occur after about 4 hours or less, not a option.
246 	 */
247 
248 	cc->mult = 125; /* saturation after 195 years */
249 	cc->shift = 0;
250 
251 	nsec = 0; /* audio time is elapsed time since trigger */
252 	timecounter_init(tc, cc, nsec);
253 	if (force)
254 		/*
255 		 * force timecounter to use predefined value,
256 		 * used for synchronized starts
257 		 */
258 		tc->cycle_last = last;
259 }
260 
261 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
262 				u64 nsec)
263 {
264 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
266 	u64 codec_frames, codec_nsecs;
267 
268 	if (!hinfo->ops.get_delay)
269 		return nsec;
270 
271 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
272 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
273 			      substream->runtime->rate);
274 
275 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
276 		return nsec + codec_nsecs;
277 
278 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
279 }
280 
281 /*
282  * set up a BDL entry
283  */
284 static int setup_bdle(struct azx *chip,
285 		      struct snd_dma_buffer *dmab,
286 		      struct azx_dev *azx_dev, u32 **bdlp,
287 		      int ofs, int size, int with_ioc)
288 {
289 	u32 *bdl = *bdlp;
290 
291 	while (size > 0) {
292 		dma_addr_t addr;
293 		int chunk;
294 
295 		if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
296 			return -EINVAL;
297 
298 		addr = snd_sgbuf_get_addr(dmab, ofs);
299 		/* program the address field of the BDL entry */
300 		bdl[0] = cpu_to_le32((u32)addr);
301 		bdl[1] = cpu_to_le32(upper_32_bits(addr));
302 		/* program the size field of the BDL entry */
303 		chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
304 		/* one BDLE cannot cross 4K boundary on CTHDA chips */
305 		if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
306 			u32 remain = 0x1000 - (ofs & 0xfff);
307 			if (chunk > remain)
308 				chunk = remain;
309 		}
310 		bdl[2] = cpu_to_le32(chunk);
311 		/* program the IOC to enable interrupt
312 		 * only when the whole fragment is processed
313 		 */
314 		size -= chunk;
315 		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
316 		bdl += 4;
317 		azx_dev->frags++;
318 		ofs += chunk;
319 	}
320 	*bdlp = bdl;
321 	return ofs;
322 }
323 
324 /*
325  * set up BDL entries
326  */
327 static int azx_setup_periods(struct azx *chip,
328 			     struct snd_pcm_substream *substream,
329 			     struct azx_dev *azx_dev)
330 {
331 	u32 *bdl;
332 	int i, ofs, periods, period_bytes;
333 	int pos_adj = 0;
334 
335 	/* reset BDL address */
336 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
337 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
338 
339 	period_bytes = azx_dev->period_bytes;
340 	periods = azx_dev->bufsize / period_bytes;
341 
342 	/* program the initial BDL entries */
343 	bdl = (u32 *)azx_dev->bdl.area;
344 	ofs = 0;
345 	azx_dev->frags = 0;
346 
347 	if (chip->bdl_pos_adj)
348 		pos_adj = chip->bdl_pos_adj[chip->dev_index];
349 	if (!azx_dev->no_period_wakeup && pos_adj > 0) {
350 		struct snd_pcm_runtime *runtime = substream->runtime;
351 		int pos_align = pos_adj;
352 		pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
353 		if (!pos_adj)
354 			pos_adj = pos_align;
355 		else
356 			pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
357 				pos_align;
358 		pos_adj = frames_to_bytes(runtime, pos_adj);
359 		if (pos_adj >= period_bytes) {
360 			dev_warn(chip->card->dev,"Too big adjustment %d\n",
361 				 pos_adj);
362 			pos_adj = 0;
363 		} else {
364 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
365 					 azx_dev,
366 					 &bdl, ofs, pos_adj, true);
367 			if (ofs < 0)
368 				goto error;
369 		}
370 	} else
371 		pos_adj = 0;
372 
373 	for (i = 0; i < periods; i++) {
374 		if (i == periods - 1 && pos_adj)
375 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
376 					 azx_dev, &bdl, ofs,
377 					 period_bytes - pos_adj, 0);
378 		else
379 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
380 					 azx_dev, &bdl, ofs,
381 					 period_bytes,
382 					 !azx_dev->no_period_wakeup);
383 		if (ofs < 0)
384 			goto error;
385 	}
386 	return 0;
387 
388  error:
389 	dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
390 		azx_dev->bufsize, period_bytes);
391 	return -EINVAL;
392 }
393 
394 /*
395  * PCM ops
396  */
397 
398 static int azx_pcm_close(struct snd_pcm_substream *substream)
399 {
400 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
401 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
402 	struct azx *chip = apcm->chip;
403 	struct azx_dev *azx_dev = get_azx_dev(substream);
404 	unsigned long flags;
405 
406 	mutex_lock(&chip->open_mutex);
407 	spin_lock_irqsave(&chip->reg_lock, flags);
408 	azx_dev->substream = NULL;
409 	azx_dev->running = 0;
410 	spin_unlock_irqrestore(&chip->reg_lock, flags);
411 	azx_release_device(azx_dev);
412 	hinfo->ops.close(hinfo, apcm->codec, substream);
413 	snd_hda_power_down(apcm->codec);
414 	mutex_unlock(&chip->open_mutex);
415 	return 0;
416 }
417 
418 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
419 			     struct snd_pcm_hw_params *hw_params)
420 {
421 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
422 	struct azx *chip = apcm->chip;
423 	int ret;
424 
425 	dsp_lock(get_azx_dev(substream));
426 	if (dsp_is_locked(get_azx_dev(substream))) {
427 		ret = -EBUSY;
428 		goto unlock;
429 	}
430 
431 	ret = chip->ops->substream_alloc_pages(chip, substream,
432 					  params_buffer_bytes(hw_params));
433 unlock:
434 	dsp_unlock(get_azx_dev(substream));
435 	return ret;
436 }
437 
438 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
439 {
440 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
441 	struct azx_dev *azx_dev = get_azx_dev(substream);
442 	struct azx *chip = apcm->chip;
443 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
444 	int err;
445 
446 	/* reset BDL address */
447 	dsp_lock(azx_dev);
448 	if (!dsp_is_locked(azx_dev)) {
449 		azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
450 		azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
451 		azx_sd_writel(chip, azx_dev, SD_CTL, 0);
452 		azx_dev->bufsize = 0;
453 		azx_dev->period_bytes = 0;
454 		azx_dev->format_val = 0;
455 	}
456 
457 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
458 
459 	err = chip->ops->substream_free_pages(chip, substream);
460 	azx_dev->prepared = 0;
461 	dsp_unlock(azx_dev);
462 	return err;
463 }
464 
465 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
466 {
467 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
468 	struct azx *chip = apcm->chip;
469 	struct azx_dev *azx_dev = get_azx_dev(substream);
470 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
471 	struct snd_pcm_runtime *runtime = substream->runtime;
472 	unsigned int bufsize, period_bytes, format_val, stream_tag;
473 	int err;
474 	struct hda_spdif_out *spdif =
475 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
476 	unsigned short ctls = spdif ? spdif->ctls : 0;
477 
478 	dsp_lock(azx_dev);
479 	if (dsp_is_locked(azx_dev)) {
480 		err = -EBUSY;
481 		goto unlock;
482 	}
483 
484 	azx_stream_reset(chip, azx_dev);
485 	format_val = snd_hda_calc_stream_format(runtime->rate,
486 						runtime->channels,
487 						runtime->format,
488 						hinfo->maxbps,
489 						ctls);
490 	if (!format_val) {
491 		dev_err(chip->card->dev,
492 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
493 			runtime->rate, runtime->channels, runtime->format);
494 		err = -EINVAL;
495 		goto unlock;
496 	}
497 
498 	bufsize = snd_pcm_lib_buffer_bytes(substream);
499 	period_bytes = snd_pcm_lib_period_bytes(substream);
500 
501 	dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
502 		bufsize, format_val);
503 
504 	if (bufsize != azx_dev->bufsize ||
505 	    period_bytes != azx_dev->period_bytes ||
506 	    format_val != azx_dev->format_val ||
507 	    runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
508 		azx_dev->bufsize = bufsize;
509 		azx_dev->period_bytes = period_bytes;
510 		azx_dev->format_val = format_val;
511 		azx_dev->no_period_wakeup = runtime->no_period_wakeup;
512 		err = azx_setup_periods(chip, substream, azx_dev);
513 		if (err < 0)
514 			goto unlock;
515 	}
516 
517 	/* when LPIB delay correction gives a small negative value,
518 	 * we ignore it; currently set the threshold statically to
519 	 * 64 frames
520 	 */
521 	if (runtime->period_size > 64)
522 		azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
523 	else
524 		azx_dev->delay_negative_threshold = 0;
525 
526 	/* wallclk has 24Mhz clock source */
527 	azx_dev->period_wallclk = (((runtime->period_size * 24000) /
528 						runtime->rate) * 1000);
529 	azx_setup_controller(chip, azx_dev);
530 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
531 		azx_dev->fifo_size =
532 			azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
533 	else
534 		azx_dev->fifo_size = 0;
535 
536 	stream_tag = azx_dev->stream_tag;
537 	/* CA-IBG chips need the playback stream starting from 1 */
538 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
539 	    stream_tag > chip->capture_streams)
540 		stream_tag -= chip->capture_streams;
541 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
542 				     azx_dev->format_val, substream);
543 
544  unlock:
545 	if (!err)
546 		azx_dev->prepared = 1;
547 	dsp_unlock(azx_dev);
548 	return err;
549 }
550 
551 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
552 {
553 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
554 	struct azx *chip = apcm->chip;
555 	struct azx_dev *azx_dev;
556 	struct snd_pcm_substream *s;
557 	int rstart = 0, start, nsync = 0, sbits = 0;
558 	int nwait, timeout;
559 
560 	azx_dev = get_azx_dev(substream);
561 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
562 
563 	if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
564 		return -EPIPE;
565 
566 	switch (cmd) {
567 	case SNDRV_PCM_TRIGGER_START:
568 		rstart = 1;
569 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
570 	case SNDRV_PCM_TRIGGER_RESUME:
571 		start = 1;
572 		break;
573 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
574 	case SNDRV_PCM_TRIGGER_SUSPEND:
575 	case SNDRV_PCM_TRIGGER_STOP:
576 		start = 0;
577 		break;
578 	default:
579 		return -EINVAL;
580 	}
581 
582 	snd_pcm_group_for_each_entry(s, substream) {
583 		if (s->pcm->card != substream->pcm->card)
584 			continue;
585 		azx_dev = get_azx_dev(s);
586 		sbits |= 1 << azx_dev->index;
587 		nsync++;
588 		snd_pcm_trigger_done(s, substream);
589 	}
590 
591 	spin_lock(&chip->reg_lock);
592 
593 	/* first, set SYNC bits of corresponding streams */
594 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
595 		azx_writel(chip, OLD_SSYNC,
596 			azx_readl(chip, OLD_SSYNC) | sbits);
597 	else
598 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
599 
600 	snd_pcm_group_for_each_entry(s, substream) {
601 		if (s->pcm->card != substream->pcm->card)
602 			continue;
603 		azx_dev = get_azx_dev(s);
604 		if (start) {
605 			azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
606 			if (!rstart)
607 				azx_dev->start_wallclk -=
608 						azx_dev->period_wallclk;
609 			azx_stream_start(chip, azx_dev);
610 		} else {
611 			azx_stream_stop(chip, azx_dev);
612 		}
613 		azx_dev->running = start;
614 	}
615 	spin_unlock(&chip->reg_lock);
616 	if (start) {
617 		/* wait until all FIFOs get ready */
618 		for (timeout = 5000; timeout; timeout--) {
619 			nwait = 0;
620 			snd_pcm_group_for_each_entry(s, substream) {
621 				if (s->pcm->card != substream->pcm->card)
622 					continue;
623 				azx_dev = get_azx_dev(s);
624 				if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
625 				      SD_STS_FIFO_READY))
626 					nwait++;
627 			}
628 			if (!nwait)
629 				break;
630 			cpu_relax();
631 		}
632 	} else {
633 		/* wait until all RUN bits are cleared */
634 		for (timeout = 5000; timeout; timeout--) {
635 			nwait = 0;
636 			snd_pcm_group_for_each_entry(s, substream) {
637 				if (s->pcm->card != substream->pcm->card)
638 					continue;
639 				azx_dev = get_azx_dev(s);
640 				if (azx_sd_readb(chip, azx_dev, SD_CTL) &
641 				    SD_CTL_DMA_START)
642 					nwait++;
643 			}
644 			if (!nwait)
645 				break;
646 			cpu_relax();
647 		}
648 	}
649 	spin_lock(&chip->reg_lock);
650 	/* reset SYNC bits */
651 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
652 		azx_writel(chip, OLD_SSYNC,
653 			azx_readl(chip, OLD_SSYNC) & ~sbits);
654 	else
655 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
656 	if (start) {
657 		azx_timecounter_init(substream, 0, 0);
658 		if (nsync > 1) {
659 			cycle_t cycle_last;
660 
661 			/* same start cycle for master and group */
662 			azx_dev = get_azx_dev(substream);
663 			cycle_last = azx_dev->azx_tc.cycle_last;
664 
665 			snd_pcm_group_for_each_entry(s, substream) {
666 				if (s->pcm->card != substream->pcm->card)
667 					continue;
668 				azx_timecounter_init(s, 1, cycle_last);
669 			}
670 		}
671 	}
672 	spin_unlock(&chip->reg_lock);
673 	return 0;
674 }
675 
676 /* get the current DMA position with correction on VIA chips */
677 static unsigned int azx_via_get_position(struct azx *chip,
678 					 struct azx_dev *azx_dev)
679 {
680 	unsigned int link_pos, mini_pos, bound_pos;
681 	unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
682 	unsigned int fifo_size;
683 
684 	link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
685 	if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
686 		/* Playback, no problem using link position */
687 		return link_pos;
688 	}
689 
690 	/* Capture */
691 	/* For new chipset,
692 	 * use mod to get the DMA position just like old chipset
693 	 */
694 	mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
695 	mod_dma_pos %= azx_dev->period_bytes;
696 
697 	/* azx_dev->fifo_size can't get FIFO size of in stream.
698 	 * Get from base address + offset.
699 	 */
700 	fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
701 
702 	if (azx_dev->insufficient) {
703 		/* Link position never gather than FIFO size */
704 		if (link_pos <= fifo_size)
705 			return 0;
706 
707 		azx_dev->insufficient = 0;
708 	}
709 
710 	if (link_pos <= fifo_size)
711 		mini_pos = azx_dev->bufsize + link_pos - fifo_size;
712 	else
713 		mini_pos = link_pos - fifo_size;
714 
715 	/* Find nearest previous boudary */
716 	mod_mini_pos = mini_pos % azx_dev->period_bytes;
717 	mod_link_pos = link_pos % azx_dev->period_bytes;
718 	if (mod_link_pos >= fifo_size)
719 		bound_pos = link_pos - mod_link_pos;
720 	else if (mod_dma_pos >= mod_mini_pos)
721 		bound_pos = mini_pos - mod_mini_pos;
722 	else {
723 		bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
724 		if (bound_pos >= azx_dev->bufsize)
725 			bound_pos = 0;
726 	}
727 
728 	/* Calculate real DMA position we want */
729 	return bound_pos + mod_dma_pos;
730 }
731 
732 unsigned int azx_get_position(struct azx *chip,
733 			      struct azx_dev *azx_dev,
734 			      bool with_check)
735 {
736 	struct snd_pcm_substream *substream = azx_dev->substream;
737 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
738 	unsigned int pos;
739 	int stream = substream->stream;
740 	struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
741 	int delay = 0;
742 
743 	switch (chip->position_fix[stream]) {
744 	case POS_FIX_LPIB:
745 		/* read LPIB */
746 		pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
747 		break;
748 	case POS_FIX_VIACOMBO:
749 		pos = azx_via_get_position(chip, azx_dev);
750 		break;
751 	default:
752 		/* use the position buffer */
753 		pos = le32_to_cpu(*azx_dev->posbuf);
754 		if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
755 			if (!pos || pos == (u32)-1) {
756 				dev_info(chip->card->dev,
757 					 "Invalid position buffer, using LPIB read method instead.\n");
758 				chip->position_fix[stream] = POS_FIX_LPIB;
759 				pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
760 			} else
761 				chip->position_fix[stream] = POS_FIX_POSBUF;
762 		}
763 		break;
764 	}
765 
766 	if (pos >= azx_dev->bufsize)
767 		pos = 0;
768 
769 	/* calculate runtime delay from LPIB */
770 	if (substream->runtime &&
771 	    chip->position_fix[stream] == POS_FIX_POSBUF &&
772 	    (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
773 		unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
774 		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
775 			delay = pos - lpib_pos;
776 		else
777 			delay = lpib_pos - pos;
778 		if (delay < 0) {
779 			if (delay >= azx_dev->delay_negative_threshold)
780 				delay = 0;
781 			else
782 				delay += azx_dev->bufsize;
783 		}
784 		if (delay >= azx_dev->period_bytes) {
785 			dev_info(chip->card->dev,
786 				 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
787 				 delay, azx_dev->period_bytes);
788 			delay = 0;
789 			chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
790 		}
791 		delay = bytes_to_frames(substream->runtime, delay);
792 	}
793 
794 	if (substream->runtime) {
795 		if (hinfo->ops.get_delay)
796 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
797 						      substream);
798 		substream->runtime->delay = delay;
799 	}
800 
801 	trace_azx_get_position(chip, azx_dev, pos, delay);
802 	return pos;
803 }
804 EXPORT_SYMBOL_GPL(azx_get_position);
805 
806 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
807 {
808 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
809 	struct azx *chip = apcm->chip;
810 	struct azx_dev *azx_dev = get_azx_dev(substream);
811 	return bytes_to_frames(substream->runtime,
812 			       azx_get_position(chip, azx_dev, false));
813 }
814 
815 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
816 				struct timespec *ts)
817 {
818 	struct azx_dev *azx_dev = get_azx_dev(substream);
819 	u64 nsec;
820 
821 	nsec = timecounter_read(&azx_dev->azx_tc);
822 	nsec = div_u64(nsec, 3); /* can be optimized */
823 	nsec = azx_adjust_codec_delay(substream, nsec);
824 
825 	*ts = ns_to_timespec(nsec);
826 
827 	return 0;
828 }
829 
830 static struct snd_pcm_hardware azx_pcm_hw = {
831 	.info =			(SNDRV_PCM_INFO_MMAP |
832 				 SNDRV_PCM_INFO_INTERLEAVED |
833 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
834 				 SNDRV_PCM_INFO_MMAP_VALID |
835 				 /* No full-resume yet implemented */
836 				 /* SNDRV_PCM_INFO_RESUME |*/
837 				 SNDRV_PCM_INFO_PAUSE |
838 				 SNDRV_PCM_INFO_SYNC_START |
839 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
840 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
841 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
842 	.rates =		SNDRV_PCM_RATE_48000,
843 	.rate_min =		48000,
844 	.rate_max =		48000,
845 	.channels_min =		2,
846 	.channels_max =		2,
847 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
848 	.period_bytes_min =	128,
849 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
850 	.periods_min =		2,
851 	.periods_max =		AZX_MAX_FRAG,
852 	.fifo_size =		0,
853 };
854 
855 static int azx_pcm_open(struct snd_pcm_substream *substream)
856 {
857 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
858 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
859 	struct azx *chip = apcm->chip;
860 	struct azx_dev *azx_dev;
861 	struct snd_pcm_runtime *runtime = substream->runtime;
862 	unsigned long flags;
863 	int err;
864 	int buff_step;
865 
866 	mutex_lock(&chip->open_mutex);
867 	azx_dev = azx_assign_device(chip, substream);
868 	if (azx_dev == NULL) {
869 		mutex_unlock(&chip->open_mutex);
870 		return -EBUSY;
871 	}
872 	runtime->hw = azx_pcm_hw;
873 	runtime->hw.channels_min = hinfo->channels_min;
874 	runtime->hw.channels_max = hinfo->channels_max;
875 	runtime->hw.formats = hinfo->formats;
876 	runtime->hw.rates = hinfo->rates;
877 	snd_pcm_limit_hw_rates(runtime);
878 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
879 
880 	/* avoid wrap-around with wall-clock */
881 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
882 				     20,
883 				     178000000);
884 
885 	if (chip->align_buffer_size)
886 		/* constrain buffer sizes to be multiple of 128
887 		   bytes. This is more efficient in terms of memory
888 		   access but isn't required by the HDA spec and
889 		   prevents users from specifying exact period/buffer
890 		   sizes. For example for 44.1kHz, a period size set
891 		   to 20ms will be rounded to 19.59ms. */
892 		buff_step = 128;
893 	else
894 		/* Don't enforce steps on buffer sizes, still need to
895 		   be multiple of 4 bytes (HDA spec). Tested on Intel
896 		   HDA controllers, may not work on all devices where
897 		   option needs to be disabled */
898 		buff_step = 4;
899 
900 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
901 				   buff_step);
902 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
903 				   buff_step);
904 	snd_hda_power_up_d3wait(apcm->codec);
905 	err = hinfo->ops.open(hinfo, apcm->codec, substream);
906 	if (err < 0) {
907 		azx_release_device(azx_dev);
908 		snd_hda_power_down(apcm->codec);
909 		mutex_unlock(&chip->open_mutex);
910 		return err;
911 	}
912 	snd_pcm_limit_hw_rates(runtime);
913 	/* sanity check */
914 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
915 	    snd_BUG_ON(!runtime->hw.channels_max) ||
916 	    snd_BUG_ON(!runtime->hw.formats) ||
917 	    snd_BUG_ON(!runtime->hw.rates)) {
918 		azx_release_device(azx_dev);
919 		hinfo->ops.close(hinfo, apcm->codec, substream);
920 		snd_hda_power_down(apcm->codec);
921 		mutex_unlock(&chip->open_mutex);
922 		return -EINVAL;
923 	}
924 
925 	/* disable WALLCLOCK timestamps for capture streams
926 	   until we figure out how to handle digital inputs */
927 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
928 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
929 
930 	spin_lock_irqsave(&chip->reg_lock, flags);
931 	azx_dev->substream = substream;
932 	azx_dev->running = 0;
933 	spin_unlock_irqrestore(&chip->reg_lock, flags);
934 
935 	runtime->private_data = azx_dev;
936 	snd_pcm_set_sync(substream);
937 	mutex_unlock(&chip->open_mutex);
938 	return 0;
939 }
940 
941 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
942 			struct vm_area_struct *area)
943 {
944 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
945 	struct azx *chip = apcm->chip;
946 	if (chip->ops->pcm_mmap_prepare)
947 		chip->ops->pcm_mmap_prepare(substream, area);
948 	return snd_pcm_lib_default_mmap(substream, area);
949 }
950 
951 static struct snd_pcm_ops azx_pcm_ops = {
952 	.open = azx_pcm_open,
953 	.close = azx_pcm_close,
954 	.ioctl = snd_pcm_lib_ioctl,
955 	.hw_params = azx_pcm_hw_params,
956 	.hw_free = azx_pcm_hw_free,
957 	.prepare = azx_pcm_prepare,
958 	.trigger = azx_pcm_trigger,
959 	.pointer = azx_pcm_pointer,
960 	.wall_clock =  azx_get_wallclock_tstamp,
961 	.mmap = azx_pcm_mmap,
962 	.page = snd_pcm_sgbuf_ops_page,
963 };
964 
965 static void azx_pcm_free(struct snd_pcm *pcm)
966 {
967 	struct azx_pcm *apcm = pcm->private_data;
968 	if (apcm) {
969 		list_del(&apcm->list);
970 		kfree(apcm);
971 	}
972 }
973 
974 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
975 
976 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
977 				 struct hda_pcm *cpcm)
978 {
979 	struct azx *chip = bus->private_data;
980 	struct snd_pcm *pcm;
981 	struct azx_pcm *apcm;
982 	int pcm_dev = cpcm->device;
983 	unsigned int size;
984 	int s, err;
985 
986 	list_for_each_entry(apcm, &chip->pcm_list, list) {
987 		if (apcm->pcm->device == pcm_dev) {
988 			dev_err(chip->card->dev, "PCM %d already exists\n",
989 				pcm_dev);
990 			return -EBUSY;
991 		}
992 	}
993 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
994 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
995 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
996 			  &pcm);
997 	if (err < 0)
998 		return err;
999 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
1000 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1001 	if (apcm == NULL)
1002 		return -ENOMEM;
1003 	apcm->chip = chip;
1004 	apcm->pcm = pcm;
1005 	apcm->codec = codec;
1006 	pcm->private_data = apcm;
1007 	pcm->private_free = azx_pcm_free;
1008 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1009 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1010 	list_add_tail(&apcm->list, &chip->pcm_list);
1011 	cpcm->pcm = pcm;
1012 	for (s = 0; s < 2; s++) {
1013 		apcm->hinfo[s] = &cpcm->stream[s];
1014 		if (cpcm->stream[s].substreams)
1015 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1016 	}
1017 	/* buffer pre-allocation */
1018 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1019 	if (size > MAX_PREALLOC_SIZE)
1020 		size = MAX_PREALLOC_SIZE;
1021 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1022 					      chip->card->dev,
1023 					      size, MAX_PREALLOC_SIZE);
1024 	/* link to codec */
1025 	pcm->dev = &codec->dev;
1026 	return 0;
1027 }
1028 
1029 /*
1030  * CORB / RIRB interface
1031  */
1032 static int azx_alloc_cmd_io(struct azx *chip)
1033 {
1034 	int err;
1035 
1036 	/* single page (at least 4096 bytes) must suffice for both ringbuffes */
1037 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1038 					 PAGE_SIZE, &chip->rb);
1039 	if (err < 0)
1040 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1041 	return err;
1042 }
1043 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1044 
1045 static void azx_init_cmd_io(struct azx *chip)
1046 {
1047 	int timeout;
1048 
1049 	spin_lock_irq(&chip->reg_lock);
1050 	/* CORB set up */
1051 	chip->corb.addr = chip->rb.addr;
1052 	chip->corb.buf = (u32 *)chip->rb.area;
1053 	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1054 	azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1055 
1056 	/* set the corb size to 256 entries (ULI requires explicitly) */
1057 	azx_writeb(chip, CORBSIZE, 0x02);
1058 	/* set the corb write pointer to 0 */
1059 	azx_writew(chip, CORBWP, 0);
1060 
1061 	/* reset the corb hw read pointer */
1062 	azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1063 	if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1064 		for (timeout = 1000; timeout > 0; timeout--) {
1065 			if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1066 				break;
1067 			udelay(1);
1068 		}
1069 		if (timeout <= 0)
1070 			dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1071 				azx_readw(chip, CORBRP));
1072 
1073 		azx_writew(chip, CORBRP, 0);
1074 		for (timeout = 1000; timeout > 0; timeout--) {
1075 			if (azx_readw(chip, CORBRP) == 0)
1076 				break;
1077 			udelay(1);
1078 		}
1079 		if (timeout <= 0)
1080 			dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1081 				azx_readw(chip, CORBRP));
1082 	}
1083 
1084 	/* enable corb dma */
1085 	azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1086 
1087 	/* RIRB set up */
1088 	chip->rirb.addr = chip->rb.addr + 2048;
1089 	chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1090 	chip->rirb.wp = chip->rirb.rp = 0;
1091 	memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1092 	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1093 	azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1094 
1095 	/* set the rirb size to 256 entries (ULI requires explicitly) */
1096 	azx_writeb(chip, RIRBSIZE, 0x02);
1097 	/* reset the rirb hw write pointer */
1098 	azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1099 	/* set N=1, get RIRB response interrupt for new entry */
1100 	if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1101 		azx_writew(chip, RINTCNT, 0xc0);
1102 	else
1103 		azx_writew(chip, RINTCNT, 1);
1104 	/* enable rirb dma and response irq */
1105 	azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1106 	spin_unlock_irq(&chip->reg_lock);
1107 }
1108 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1109 
1110 static void azx_free_cmd_io(struct azx *chip)
1111 {
1112 	spin_lock_irq(&chip->reg_lock);
1113 	/* disable ringbuffer DMAs */
1114 	azx_writeb(chip, RIRBCTL, 0);
1115 	azx_writeb(chip, CORBCTL, 0);
1116 	spin_unlock_irq(&chip->reg_lock);
1117 }
1118 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1119 
1120 static unsigned int azx_command_addr(u32 cmd)
1121 {
1122 	unsigned int addr = cmd >> 28;
1123 
1124 	if (addr >= AZX_MAX_CODECS) {
1125 		snd_BUG();
1126 		addr = 0;
1127 	}
1128 
1129 	return addr;
1130 }
1131 
1132 /* send a command */
1133 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1134 {
1135 	struct azx *chip = bus->private_data;
1136 	unsigned int addr = azx_command_addr(val);
1137 	unsigned int wp, rp;
1138 
1139 	spin_lock_irq(&chip->reg_lock);
1140 
1141 	/* add command to corb */
1142 	wp = azx_readw(chip, CORBWP);
1143 	if (wp == 0xffff) {
1144 		/* something wrong, controller likely turned to D3 */
1145 		spin_unlock_irq(&chip->reg_lock);
1146 		return -EIO;
1147 	}
1148 	wp++;
1149 	wp %= ICH6_MAX_CORB_ENTRIES;
1150 
1151 	rp = azx_readw(chip, CORBRP);
1152 	if (wp == rp) {
1153 		/* oops, it's full */
1154 		spin_unlock_irq(&chip->reg_lock);
1155 		return -EAGAIN;
1156 	}
1157 
1158 	chip->rirb.cmds[addr]++;
1159 	chip->corb.buf[wp] = cpu_to_le32(val);
1160 	azx_writew(chip, CORBWP, wp);
1161 
1162 	spin_unlock_irq(&chip->reg_lock);
1163 
1164 	return 0;
1165 }
1166 
1167 #define ICH6_RIRB_EX_UNSOL_EV	(1<<4)
1168 
1169 /* retrieve RIRB entry - called from interrupt handler */
1170 static void azx_update_rirb(struct azx *chip)
1171 {
1172 	unsigned int rp, wp;
1173 	unsigned int addr;
1174 	u32 res, res_ex;
1175 
1176 	wp = azx_readw(chip, RIRBWP);
1177 	if (wp == 0xffff) {
1178 		/* something wrong, controller likely turned to D3 */
1179 		return;
1180 	}
1181 
1182 	if (wp == chip->rirb.wp)
1183 		return;
1184 	chip->rirb.wp = wp;
1185 
1186 	while (chip->rirb.rp != wp) {
1187 		chip->rirb.rp++;
1188 		chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1189 
1190 		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1191 		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1192 		res = le32_to_cpu(chip->rirb.buf[rp]);
1193 		addr = res_ex & 0xf;
1194 		if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1195 			dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1196 				res, res_ex,
1197 				chip->rirb.rp, wp);
1198 			snd_BUG();
1199 		}
1200 		else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1201 			snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1202 		else if (chip->rirb.cmds[addr]) {
1203 			chip->rirb.res[addr] = res;
1204 			smp_wmb();
1205 			chip->rirb.cmds[addr]--;
1206 		} else if (printk_ratelimit()) {
1207 			dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1208 				res, res_ex,
1209 				chip->last_cmd[addr]);
1210 		}
1211 	}
1212 }
1213 
1214 /* receive a response */
1215 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1216 					  unsigned int addr)
1217 {
1218 	struct azx *chip = bus->private_data;
1219 	unsigned long timeout;
1220 	unsigned long loopcounter;
1221 	int do_poll = 0;
1222 
1223  again:
1224 	timeout = jiffies + msecs_to_jiffies(1000);
1225 
1226 	for (loopcounter = 0;; loopcounter++) {
1227 		if (chip->polling_mode || do_poll) {
1228 			spin_lock_irq(&chip->reg_lock);
1229 			azx_update_rirb(chip);
1230 			spin_unlock_irq(&chip->reg_lock);
1231 		}
1232 		if (!chip->rirb.cmds[addr]) {
1233 			smp_rmb();
1234 			bus->rirb_error = 0;
1235 
1236 			if (!do_poll)
1237 				chip->poll_count = 0;
1238 			return chip->rirb.res[addr]; /* the last value */
1239 		}
1240 		if (time_after(jiffies, timeout))
1241 			break;
1242 		if (bus->needs_damn_long_delay || loopcounter > 3000)
1243 			msleep(2); /* temporary workaround */
1244 		else {
1245 			udelay(10);
1246 			cond_resched();
1247 		}
1248 	}
1249 
1250 	if (!bus->no_response_fallback)
1251 		return -1;
1252 
1253 	if (!chip->polling_mode && chip->poll_count < 2) {
1254 		dev_dbg(chip->card->dev,
1255 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1256 			chip->last_cmd[addr]);
1257 		do_poll = 1;
1258 		chip->poll_count++;
1259 		goto again;
1260 	}
1261 
1262 
1263 	if (!chip->polling_mode) {
1264 		dev_warn(chip->card->dev,
1265 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1266 			 chip->last_cmd[addr]);
1267 		chip->polling_mode = 1;
1268 		goto again;
1269 	}
1270 
1271 	if (chip->msi) {
1272 		dev_warn(chip->card->dev,
1273 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1274 			 chip->last_cmd[addr]);
1275 		if (chip->ops->disable_msi_reset_irq(chip) &&
1276 		    chip->ops->disable_msi_reset_irq(chip) < 0) {
1277 			bus->rirb_error = 1;
1278 			return -1;
1279 		}
1280 		goto again;
1281 	}
1282 
1283 	if (chip->probing) {
1284 		/* If this critical timeout happens during the codec probing
1285 		 * phase, this is likely an access to a non-existing codec
1286 		 * slot.  Better to return an error and reset the system.
1287 		 */
1288 		return -1;
1289 	}
1290 
1291 	/* a fatal communication error; need either to reset or to fallback
1292 	 * to the single_cmd mode
1293 	 */
1294 	bus->rirb_error = 1;
1295 	if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1296 		bus->response_reset = 1;
1297 		return -1; /* give a chance to retry */
1298 	}
1299 
1300 	dev_err(chip->card->dev,
1301 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1302 		chip->last_cmd[addr]);
1303 	chip->single_cmd = 1;
1304 	bus->response_reset = 0;
1305 	/* release CORB/RIRB */
1306 	azx_free_cmd_io(chip);
1307 	/* disable unsolicited responses */
1308 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1309 	return -1;
1310 }
1311 
1312 /*
1313  * Use the single immediate command instead of CORB/RIRB for simplicity
1314  *
1315  * Note: according to Intel, this is not preferred use.  The command was
1316  *       intended for the BIOS only, and may get confused with unsolicited
1317  *       responses.  So, we shouldn't use it for normal operation from the
1318  *       driver.
1319  *       I left the codes, however, for debugging/testing purposes.
1320  */
1321 
1322 /* receive a response */
1323 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1324 {
1325 	int timeout = 50;
1326 
1327 	while (timeout--) {
1328 		/* check IRV busy bit */
1329 		if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1330 			/* reuse rirb.res as the response return value */
1331 			chip->rirb.res[addr] = azx_readl(chip, IR);
1332 			return 0;
1333 		}
1334 		udelay(1);
1335 	}
1336 	if (printk_ratelimit())
1337 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1338 			azx_readw(chip, IRS));
1339 	chip->rirb.res[addr] = -1;
1340 	return -EIO;
1341 }
1342 
1343 /* send a command */
1344 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1345 {
1346 	struct azx *chip = bus->private_data;
1347 	unsigned int addr = azx_command_addr(val);
1348 	int timeout = 50;
1349 
1350 	bus->rirb_error = 0;
1351 	while (timeout--) {
1352 		/* check ICB busy bit */
1353 		if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1354 			/* Clear IRV valid bit */
1355 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1356 				   ICH6_IRS_VALID);
1357 			azx_writel(chip, IC, val);
1358 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1359 				   ICH6_IRS_BUSY);
1360 			return azx_single_wait_for_response(chip, addr);
1361 		}
1362 		udelay(1);
1363 	}
1364 	if (printk_ratelimit())
1365 		dev_dbg(chip->card->dev,
1366 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
1367 			azx_readw(chip, IRS), val);
1368 	return -EIO;
1369 }
1370 
1371 /* receive a response */
1372 static unsigned int azx_single_get_response(struct hda_bus *bus,
1373 					    unsigned int addr)
1374 {
1375 	struct azx *chip = bus->private_data;
1376 	return chip->rirb.res[addr];
1377 }
1378 
1379 /*
1380  * The below are the main callbacks from hda_codec.
1381  *
1382  * They are just the skeleton to call sub-callbacks according to the
1383  * current setting of chip->single_cmd.
1384  */
1385 
1386 /* send a command */
1387 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1388 {
1389 	struct azx *chip = bus->private_data;
1390 
1391 	if (chip->disabled)
1392 		return 0;
1393 	chip->last_cmd[azx_command_addr(val)] = val;
1394 	if (chip->single_cmd)
1395 		return azx_single_send_cmd(bus, val);
1396 	else
1397 		return azx_corb_send_cmd(bus, val);
1398 }
1399 EXPORT_SYMBOL_GPL(azx_send_cmd);
1400 
1401 /* get a response */
1402 static unsigned int azx_get_response(struct hda_bus *bus,
1403 				     unsigned int addr)
1404 {
1405 	struct azx *chip = bus->private_data;
1406 	if (chip->disabled)
1407 		return 0;
1408 	if (chip->single_cmd)
1409 		return azx_single_get_response(bus, addr);
1410 	else
1411 		return azx_rirb_get_response(bus, addr);
1412 }
1413 EXPORT_SYMBOL_GPL(azx_get_response);
1414 
1415 #ifdef CONFIG_SND_HDA_DSP_LOADER
1416 /*
1417  * DSP loading code (e.g. for CA0132)
1418  */
1419 
1420 /* use the first stream for loading DSP */
1421 static struct azx_dev *
1422 azx_get_dsp_loader_dev(struct azx *chip)
1423 {
1424 	return &chip->azx_dev[chip->playback_index_offset];
1425 }
1426 
1427 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1428 				unsigned int byte_size,
1429 				struct snd_dma_buffer *bufp)
1430 {
1431 	u32 *bdl;
1432 	struct azx *chip = bus->private_data;
1433 	struct azx_dev *azx_dev;
1434 	int err;
1435 
1436 	azx_dev = azx_get_dsp_loader_dev(chip);
1437 
1438 	dsp_lock(azx_dev);
1439 	spin_lock_irq(&chip->reg_lock);
1440 	if (azx_dev->running || azx_dev->locked) {
1441 		spin_unlock_irq(&chip->reg_lock);
1442 		err = -EBUSY;
1443 		goto unlock;
1444 	}
1445 	azx_dev->prepared = 0;
1446 	chip->saved_azx_dev = *azx_dev;
1447 	azx_dev->locked = 1;
1448 	spin_unlock_irq(&chip->reg_lock);
1449 
1450 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1451 					 byte_size, bufp);
1452 	if (err < 0)
1453 		goto err_alloc;
1454 
1455 	azx_dev->bufsize = byte_size;
1456 	azx_dev->period_bytes = byte_size;
1457 	azx_dev->format_val = format;
1458 
1459 	azx_stream_reset(chip, azx_dev);
1460 
1461 	/* reset BDL address */
1462 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1463 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1464 
1465 	azx_dev->frags = 0;
1466 	bdl = (u32 *)azx_dev->bdl.area;
1467 	err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1468 	if (err < 0)
1469 		goto error;
1470 
1471 	azx_setup_controller(chip, azx_dev);
1472 	dsp_unlock(azx_dev);
1473 	return azx_dev->stream_tag;
1474 
1475  error:
1476 	chip->ops->dma_free_pages(chip, bufp);
1477  err_alloc:
1478 	spin_lock_irq(&chip->reg_lock);
1479 	if (azx_dev->opened)
1480 		*azx_dev = chip->saved_azx_dev;
1481 	azx_dev->locked = 0;
1482 	spin_unlock_irq(&chip->reg_lock);
1483  unlock:
1484 	dsp_unlock(azx_dev);
1485 	return err;
1486 }
1487 
1488 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1489 {
1490 	struct azx *chip = bus->private_data;
1491 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1492 
1493 	if (start)
1494 		azx_stream_start(chip, azx_dev);
1495 	else
1496 		azx_stream_stop(chip, azx_dev);
1497 	azx_dev->running = start;
1498 }
1499 
1500 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1501 				 struct snd_dma_buffer *dmab)
1502 {
1503 	struct azx *chip = bus->private_data;
1504 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1505 
1506 	if (!dmab->area || !azx_dev->locked)
1507 		return;
1508 
1509 	dsp_lock(azx_dev);
1510 	/* reset BDL address */
1511 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1512 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1513 	azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1514 	azx_dev->bufsize = 0;
1515 	azx_dev->period_bytes = 0;
1516 	azx_dev->format_val = 0;
1517 
1518 	chip->ops->dma_free_pages(chip, dmab);
1519 	dmab->area = NULL;
1520 
1521 	spin_lock_irq(&chip->reg_lock);
1522 	if (azx_dev->opened)
1523 		*azx_dev = chip->saved_azx_dev;
1524 	azx_dev->locked = 0;
1525 	spin_unlock_irq(&chip->reg_lock);
1526 	dsp_unlock(azx_dev);
1527 }
1528 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1529 
1530 int azx_alloc_stream_pages(struct azx *chip)
1531 {
1532 	int i, err;
1533 	struct snd_card *card = chip->card;
1534 
1535 	for (i = 0; i < chip->num_streams; i++) {
1536 		dsp_lock_init(&chip->azx_dev[i]);
1537 		/* allocate memory for the BDL for each stream */
1538 		err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1539 						 BDL_SIZE,
1540 						 &chip->azx_dev[i].bdl);
1541 		if (err < 0) {
1542 			dev_err(card->dev, "cannot allocate BDL\n");
1543 			return -ENOMEM;
1544 		}
1545 	}
1546 	/* allocate memory for the position buffer */
1547 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1548 					 chip->num_streams * 8, &chip->posbuf);
1549 	if (err < 0) {
1550 		dev_err(card->dev, "cannot allocate posbuf\n");
1551 		return -ENOMEM;
1552 	}
1553 
1554 	/* allocate CORB/RIRB */
1555 	err = azx_alloc_cmd_io(chip);
1556 	if (err < 0)
1557 		return err;
1558 	return 0;
1559 }
1560 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1561 
1562 void azx_free_stream_pages(struct azx *chip)
1563 {
1564 	int i;
1565 	if (chip->azx_dev) {
1566 		for (i = 0; i < chip->num_streams; i++)
1567 			if (chip->azx_dev[i].bdl.area)
1568 				chip->ops->dma_free_pages(
1569 					chip, &chip->azx_dev[i].bdl);
1570 	}
1571 	if (chip->rb.area)
1572 		chip->ops->dma_free_pages(chip, &chip->rb);
1573 	if (chip->posbuf.area)
1574 		chip->ops->dma_free_pages(chip, &chip->posbuf);
1575 }
1576 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1577 
1578 /*
1579  * Lowlevel interface
1580  */
1581 
1582 /* enter link reset */
1583 void azx_enter_link_reset(struct azx *chip)
1584 {
1585 	unsigned long timeout;
1586 
1587 	/* reset controller */
1588 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1589 
1590 	timeout = jiffies + msecs_to_jiffies(100);
1591 	while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1592 			time_before(jiffies, timeout))
1593 		usleep_range(500, 1000);
1594 }
1595 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1596 
1597 /* exit link reset */
1598 static void azx_exit_link_reset(struct azx *chip)
1599 {
1600 	unsigned long timeout;
1601 
1602 	azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1603 
1604 	timeout = jiffies + msecs_to_jiffies(100);
1605 	while (!azx_readb(chip, GCTL) &&
1606 			time_before(jiffies, timeout))
1607 		usleep_range(500, 1000);
1608 }
1609 
1610 /* reset codec link */
1611 static int azx_reset(struct azx *chip, bool full_reset)
1612 {
1613 	if (!full_reset)
1614 		goto __skip;
1615 
1616 	/* clear STATESTS */
1617 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1618 
1619 	/* reset controller */
1620 	azx_enter_link_reset(chip);
1621 
1622 	/* delay for >= 100us for codec PLL to settle per spec
1623 	 * Rev 0.9 section 5.5.1
1624 	 */
1625 	usleep_range(500, 1000);
1626 
1627 	/* Bring controller out of reset */
1628 	azx_exit_link_reset(chip);
1629 
1630 	/* Brent Chartrand said to wait >= 540us for codecs to initialize */
1631 	usleep_range(1000, 1200);
1632 
1633       __skip:
1634 	/* check to see if controller is ready */
1635 	if (!azx_readb(chip, GCTL)) {
1636 		dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1637 		return -EBUSY;
1638 	}
1639 
1640 	/* Accept unsolicited responses */
1641 	if (!chip->single_cmd)
1642 		azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1643 			   ICH6_GCTL_UNSOL);
1644 
1645 	/* detect codecs */
1646 	if (!chip->codec_mask) {
1647 		chip->codec_mask = azx_readw(chip, STATESTS);
1648 		dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1649 			chip->codec_mask);
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 /* enable interrupts */
1656 static void azx_int_enable(struct azx *chip)
1657 {
1658 	/* enable controller CIE and GIE */
1659 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1660 		   ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1661 }
1662 
1663 /* disable interrupts */
1664 static void azx_int_disable(struct azx *chip)
1665 {
1666 	int i;
1667 
1668 	/* disable interrupts in stream descriptor */
1669 	for (i = 0; i < chip->num_streams; i++) {
1670 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1671 		azx_sd_writeb(chip, azx_dev, SD_CTL,
1672 			      azx_sd_readb(chip, azx_dev, SD_CTL) &
1673 					~SD_INT_MASK);
1674 	}
1675 
1676 	/* disable SIE for all streams */
1677 	azx_writeb(chip, INTCTL, 0);
1678 
1679 	/* disable controller CIE and GIE */
1680 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1681 		   ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1682 }
1683 
1684 /* clear interrupts */
1685 static void azx_int_clear(struct azx *chip)
1686 {
1687 	int i;
1688 
1689 	/* clear stream status */
1690 	for (i = 0; i < chip->num_streams; i++) {
1691 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1692 		azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1693 	}
1694 
1695 	/* clear STATESTS */
1696 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1697 
1698 	/* clear rirb status */
1699 	azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1700 
1701 	/* clear int status */
1702 	azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1703 }
1704 
1705 /*
1706  * reset and start the controller registers
1707  */
1708 void azx_init_chip(struct azx *chip, bool full_reset)
1709 {
1710 	if (chip->initialized)
1711 		return;
1712 
1713 	/* reset controller */
1714 	azx_reset(chip, full_reset);
1715 
1716 	/* initialize interrupts */
1717 	azx_int_clear(chip);
1718 	azx_int_enable(chip);
1719 
1720 	/* initialize the codec command I/O */
1721 	if (!chip->single_cmd)
1722 		azx_init_cmd_io(chip);
1723 
1724 	/* program the position buffer */
1725 	azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1726 	azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1727 
1728 	chip->initialized = 1;
1729 }
1730 EXPORT_SYMBOL_GPL(azx_init_chip);
1731 
1732 void azx_stop_chip(struct azx *chip)
1733 {
1734 	if (!chip->initialized)
1735 		return;
1736 
1737 	/* disable interrupts */
1738 	azx_int_disable(chip);
1739 	azx_int_clear(chip);
1740 
1741 	/* disable CORB/RIRB */
1742 	azx_free_cmd_io(chip);
1743 
1744 	/* disable position buffer */
1745 	azx_writel(chip, DPLBASE, 0);
1746 	azx_writel(chip, DPUBASE, 0);
1747 
1748 	chip->initialized = 0;
1749 }
1750 EXPORT_SYMBOL_GPL(azx_stop_chip);
1751 
1752 /*
1753  * interrupt handler
1754  */
1755 irqreturn_t azx_interrupt(int irq, void *dev_id)
1756 {
1757 	struct azx *chip = dev_id;
1758 	struct azx_dev *azx_dev;
1759 	u32 status;
1760 	u8 sd_status;
1761 	int i;
1762 
1763 #ifdef CONFIG_PM_RUNTIME
1764 	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1765 		if (!pm_runtime_active(chip->card->dev))
1766 			return IRQ_NONE;
1767 #endif
1768 
1769 	spin_lock(&chip->reg_lock);
1770 
1771 	if (chip->disabled) {
1772 		spin_unlock(&chip->reg_lock);
1773 		return IRQ_NONE;
1774 	}
1775 
1776 	status = azx_readl(chip, INTSTS);
1777 	if (status == 0 || status == 0xffffffff) {
1778 		spin_unlock(&chip->reg_lock);
1779 		return IRQ_NONE;
1780 	}
1781 
1782 	for (i = 0; i < chip->num_streams; i++) {
1783 		azx_dev = &chip->azx_dev[i];
1784 		if (status & azx_dev->sd_int_sta_mask) {
1785 			sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1786 			azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1787 			if (!azx_dev->substream || !azx_dev->running ||
1788 			    !(sd_status & SD_INT_COMPLETE))
1789 				continue;
1790 			/* check whether this IRQ is really acceptable */
1791 			if (!chip->ops->position_check ||
1792 			    chip->ops->position_check(chip, azx_dev)) {
1793 				spin_unlock(&chip->reg_lock);
1794 				snd_pcm_period_elapsed(azx_dev->substream);
1795 				spin_lock(&chip->reg_lock);
1796 			}
1797 		}
1798 	}
1799 
1800 	/* clear rirb int */
1801 	status = azx_readb(chip, RIRBSTS);
1802 	if (status & RIRB_INT_MASK) {
1803 		if (status & RIRB_INT_RESPONSE) {
1804 			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1805 				udelay(80);
1806 			azx_update_rirb(chip);
1807 		}
1808 		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1809 	}
1810 
1811 	spin_unlock(&chip->reg_lock);
1812 
1813 	return IRQ_HANDLED;
1814 }
1815 EXPORT_SYMBOL_GPL(azx_interrupt);
1816 
1817 /*
1818  * Codec initerface
1819  */
1820 
1821 /*
1822  * Probe the given codec address
1823  */
1824 static int probe_codec(struct azx *chip, int addr)
1825 {
1826 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1827 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1828 	unsigned int res;
1829 
1830 	mutex_lock(&chip->bus->cmd_mutex);
1831 	chip->probing = 1;
1832 	azx_send_cmd(chip->bus, cmd);
1833 	res = azx_get_response(chip->bus, addr);
1834 	chip->probing = 0;
1835 	mutex_unlock(&chip->bus->cmd_mutex);
1836 	if (res == -1)
1837 		return -EIO;
1838 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1839 	return 0;
1840 }
1841 
1842 static void azx_bus_reset(struct hda_bus *bus)
1843 {
1844 	struct azx *chip = bus->private_data;
1845 
1846 	bus->in_reset = 1;
1847 	azx_stop_chip(chip);
1848 	azx_init_chip(chip, true);
1849 #ifdef CONFIG_PM
1850 	if (chip->initialized) {
1851 		struct azx_pcm *p;
1852 		list_for_each_entry(p, &chip->pcm_list, list)
1853 			snd_pcm_suspend_all(p->pcm);
1854 		snd_hda_suspend(chip->bus);
1855 		snd_hda_resume(chip->bus);
1856 	}
1857 #endif
1858 	bus->in_reset = 0;
1859 }
1860 
1861 #ifdef CONFIG_PM
1862 /* power-up/down the controller */
1863 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1864 {
1865 	struct azx *chip = bus->private_data;
1866 
1867 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1868 		return;
1869 
1870 	if (power_up)
1871 		pm_runtime_get_sync(chip->card->dev);
1872 	else
1873 		pm_runtime_put_sync(chip->card->dev);
1874 }
1875 #endif
1876 
1877 static int get_jackpoll_interval(struct azx *chip)
1878 {
1879 	int i;
1880 	unsigned int j;
1881 
1882 	if (!chip->jackpoll_ms)
1883 		return 0;
1884 
1885 	i = chip->jackpoll_ms[chip->dev_index];
1886 	if (i == 0)
1887 		return 0;
1888 	if (i < 50 || i > 60000)
1889 		j = 0;
1890 	else
1891 		j = msecs_to_jiffies(i);
1892 	if (j == 0)
1893 		dev_warn(chip->card->dev,
1894 			 "jackpoll_ms value out of range: %d\n", i);
1895 	return j;
1896 }
1897 
1898 /* Codec initialization */
1899 int azx_codec_create(struct azx *chip, const char *model,
1900 		     unsigned int max_slots,
1901 		     int *power_save_to)
1902 {
1903 	struct hda_bus_template bus_temp;
1904 	int c, codecs, err;
1905 
1906 	memset(&bus_temp, 0, sizeof(bus_temp));
1907 	bus_temp.private_data = chip;
1908 	bus_temp.modelname = model;
1909 	bus_temp.pci = chip->pci;
1910 	bus_temp.ops.command = azx_send_cmd;
1911 	bus_temp.ops.get_response = azx_get_response;
1912 	bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1913 	bus_temp.ops.bus_reset = azx_bus_reset;
1914 #ifdef CONFIG_PM
1915 	bus_temp.power_save = power_save_to;
1916 	bus_temp.ops.pm_notify = azx_power_notify;
1917 #endif
1918 #ifdef CONFIG_SND_HDA_DSP_LOADER
1919 	bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1920 	bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1921 	bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1922 #endif
1923 
1924 	err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1925 	if (err < 0)
1926 		return err;
1927 
1928 	if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1929 		dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1930 		chip->bus->needs_damn_long_delay = 1;
1931 	}
1932 
1933 	codecs = 0;
1934 	if (!max_slots)
1935 		max_slots = AZX_DEFAULT_CODECS;
1936 
1937 	/* First try to probe all given codec slots */
1938 	for (c = 0; c < max_slots; c++) {
1939 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1940 			if (probe_codec(chip, c) < 0) {
1941 				/* Some BIOSen give you wrong codec addresses
1942 				 * that don't exist
1943 				 */
1944 				dev_warn(chip->card->dev,
1945 					 "Codec #%d probe error; disabling it...\n", c);
1946 				chip->codec_mask &= ~(1 << c);
1947 				/* More badly, accessing to a non-existing
1948 				 * codec often screws up the controller chip,
1949 				 * and disturbs the further communications.
1950 				 * Thus if an error occurs during probing,
1951 				 * better to reset the controller chip to
1952 				 * get back to the sanity state.
1953 				 */
1954 				azx_stop_chip(chip);
1955 				azx_init_chip(chip, true);
1956 			}
1957 		}
1958 	}
1959 
1960 	/* AMD chipsets often cause the communication stalls upon certain
1961 	 * sequence like the pin-detection.  It seems that forcing the synced
1962 	 * access works around the stall.  Grrr...
1963 	 */
1964 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1965 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1966 		chip->bus->sync_write = 1;
1967 		chip->bus->allow_bus_reset = 1;
1968 	}
1969 
1970 	/* Then create codec instances */
1971 	for (c = 0; c < max_slots; c++) {
1972 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1973 			struct hda_codec *codec;
1974 			err = snd_hda_codec_new(chip->bus, c, &codec);
1975 			if (err < 0)
1976 				continue;
1977 			codec->jackpoll_interval = get_jackpoll_interval(chip);
1978 			codec->beep_mode = chip->beep_mode;
1979 			codecs++;
1980 		}
1981 	}
1982 	if (!codecs) {
1983 		dev_err(chip->card->dev, "no codecs initialized\n");
1984 		return -ENXIO;
1985 	}
1986 	return 0;
1987 }
1988 EXPORT_SYMBOL_GPL(azx_codec_create);
1989 
1990 /* configure each codec instance */
1991 int azx_codec_configure(struct azx *chip)
1992 {
1993 	struct hda_codec *codec;
1994 	list_for_each_entry(codec, &chip->bus->codec_list, list) {
1995 		snd_hda_codec_configure(codec);
1996 	}
1997 	return 0;
1998 }
1999 EXPORT_SYMBOL_GPL(azx_codec_configure);
2000 
2001 /* mixer creation - all stuff is implemented in hda module */
2002 int azx_mixer_create(struct azx *chip)
2003 {
2004 	return snd_hda_build_controls(chip->bus);
2005 }
2006 EXPORT_SYMBOL_GPL(azx_mixer_create);
2007 
2008 
2009 /* initialize SD streams */
2010 int azx_init_stream(struct azx *chip)
2011 {
2012 	int i;
2013 
2014 	/* initialize each stream (aka device)
2015 	 * assign the starting bdl address to each stream (device)
2016 	 * and initialize
2017 	 */
2018 	for (i = 0; i < chip->num_streams; i++) {
2019 		struct azx_dev *azx_dev = &chip->azx_dev[i];
2020 		azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2021 		/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2022 		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2023 		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2024 		azx_dev->sd_int_sta_mask = 1 << i;
2025 		/* stream tag: must be non-zero and unique */
2026 		azx_dev->index = i;
2027 		azx_dev->stream_tag = i + 1;
2028 	}
2029 
2030 	return 0;
2031 }
2032 EXPORT_SYMBOL_GPL(azx_init_stream);
2033 
2034 MODULE_LICENSE("GPL");
2035 MODULE_DESCRIPTION("Common HDA driver funcitons");
2036