xref: /openbmc/linux/sound/pci/hda/hda_controller.c (revision df3305156f989339529b3d6744b898d498fb1f7b)
1 /*
2  *
3  *  Implementation of primary alsa driver code base for Intel HD Audio.
4  *
5  *  Copyright(c) 2004 Intel Corporation. All rights reserved.
6  *
7  *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8  *                     PeiSen Hou <pshou@realtek.com.tw>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the Free
12  *  Software Foundation; either version 2 of the License, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  *  more details.
19  *
20  *
21  */
22 
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_priv.h"
34 #include "hda_controller.h"
35 
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
38 
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev)	mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev)		mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev)		mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev)	((dev)->locked)
45 #else
46 #define dsp_lock_init(dev)	do {} while (0)
47 #define dsp_lock(dev)		do {} while (0)
48 #define dsp_unlock(dev)		do {} while (0)
49 #define dsp_is_locked(dev)	0
50 #endif
51 
52 /*
53  * AZX stream operations.
54  */
55 
56 /* start a stream */
57 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 {
59 	/*
60 	 * Before stream start, initialize parameter
61 	 */
62 	azx_dev->insufficient = 1;
63 
64 	/* enable SIE */
65 	azx_writel(chip, INTCTL,
66 		   azx_readl(chip, INTCTL) | (1 << azx_dev->index));
67 	/* set DMA start and interrupt mask */
68 	azx_sd_writeb(chip, azx_dev, SD_CTL,
69 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
70 		      SD_CTL_DMA_START | SD_INT_MASK);
71 }
72 
73 /* stop DMA */
74 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 {
76 	azx_sd_writeb(chip, azx_dev, SD_CTL,
77 		      azx_sd_readb(chip, azx_dev, SD_CTL) &
78 		      ~(SD_CTL_DMA_START | SD_INT_MASK));
79 	azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 }
81 
82 /* stop a stream */
83 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 {
85 	azx_stream_clear(chip, azx_dev);
86 	/* disable SIE */
87 	azx_writel(chip, INTCTL,
88 		   azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 }
90 EXPORT_SYMBOL_GPL(azx_stream_stop);
91 
92 /* reset stream */
93 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
94 {
95 	unsigned char val;
96 	int timeout;
97 
98 	azx_stream_clear(chip, azx_dev);
99 
100 	azx_sd_writeb(chip, azx_dev, SD_CTL,
101 		      azx_sd_readb(chip, azx_dev, SD_CTL) |
102 		      SD_CTL_STREAM_RESET);
103 	udelay(3);
104 	timeout = 300;
105 	while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
106 		 SD_CTL_STREAM_RESET) && --timeout)
107 		;
108 	val &= ~SD_CTL_STREAM_RESET;
109 	azx_sd_writeb(chip, azx_dev, SD_CTL, val);
110 	udelay(3);
111 
112 	timeout = 300;
113 	/* waiting for hardware to report that the stream is out of reset */
114 	while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
115 		SD_CTL_STREAM_RESET) && --timeout)
116 		;
117 
118 	/* reset first position - may not be synced with hw at this time */
119 	*azx_dev->posbuf = 0;
120 }
121 
122 /*
123  * set up the SD for streaming
124  */
125 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
126 {
127 	unsigned int val;
128 	/* make sure the run bit is zero for SD */
129 	azx_stream_clear(chip, azx_dev);
130 	/* program the stream_tag */
131 	val = azx_sd_readl(chip, azx_dev, SD_CTL);
132 	val = (val & ~SD_CTL_STREAM_TAG_MASK) |
133 		(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
134 	if (!azx_snoop(chip))
135 		val |= SD_CTL_TRAFFIC_PRIO;
136 	azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 
138 	/* program the length of samples in cyclic buffer */
139 	azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 
141 	/* program the stream format */
142 	/* this value needs to be the same as the one programmed */
143 	azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 
145 	/* program the stream LVI (last valid index) of the BDL */
146 	azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 
148 	/* program the BDL address */
149 	/* lower BDL address */
150 	azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
151 	/* upper BDL address */
152 	azx_sd_writel(chip, azx_dev, SD_BDLPU,
153 		      upper_32_bits(azx_dev->bdl.addr));
154 
155 	/* enable the position buffer */
156 	if (chip->get_position[0] != azx_get_pos_lpib ||
157 	    chip->get_position[1] != azx_get_pos_lpib) {
158 		if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
159 			azx_writel(chip, DPLBASE,
160 				(u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
161 	}
162 
163 	/* set the interrupt enable bits in the descriptor control register */
164 	azx_sd_writel(chip, azx_dev, SD_CTL,
165 		      azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
166 
167 	return 0;
168 }
169 
170 /* assign a stream for the PCM */
171 static inline struct azx_dev *
172 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
173 {
174 	int dev, i, nums;
175 	struct azx_dev *res = NULL;
176 	/* make a non-zero unique key for the substream */
177 	int key = (substream->pcm->device << 16) | (substream->number << 2) |
178 		(substream->stream + 1);
179 
180 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
181 		dev = chip->playback_index_offset;
182 		nums = chip->playback_streams;
183 	} else {
184 		dev = chip->capture_index_offset;
185 		nums = chip->capture_streams;
186 	}
187 	for (i = 0; i < nums; i++, dev++) {
188 		struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 		dsp_lock(azx_dev);
190 		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 			if (azx_dev->assigned_key == key) {
192 				azx_dev->opened = 1;
193 				azx_dev->assigned_key = key;
194 				dsp_unlock(azx_dev);
195 				return azx_dev;
196 			}
197 			if (!res ||
198 			    (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
199 				res = azx_dev;
200 		}
201 		dsp_unlock(azx_dev);
202 	}
203 	if (res) {
204 		dsp_lock(res);
205 		res->opened = 1;
206 		res->assigned_key = key;
207 		dsp_unlock(res);
208 	}
209 	return res;
210 }
211 
212 /* release the assigned stream */
213 static inline void azx_release_device(struct azx_dev *azx_dev)
214 {
215 	azx_dev->opened = 0;
216 }
217 
218 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 {
220 	struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
221 	struct snd_pcm_substream *substream = azx_dev->substream;
222 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
223 	struct azx *chip = apcm->chip;
224 
225 	return azx_readl(chip, WALLCLK);
226 }
227 
228 static void azx_timecounter_init(struct snd_pcm_substream *substream,
229 				bool force, cycle_t last)
230 {
231 	struct azx_dev *azx_dev = get_azx_dev(substream);
232 	struct timecounter *tc = &azx_dev->azx_tc;
233 	struct cyclecounter *cc = &azx_dev->azx_cc;
234 	u64 nsec;
235 
236 	cc->read = azx_cc_read;
237 	cc->mask = CLOCKSOURCE_MASK(32);
238 
239 	/*
240 	 * Converting from 24 MHz to ns means applying a 125/3 factor.
241 	 * To avoid any saturation issues in intermediate operations,
242 	 * the 125 factor is applied first. The division is applied
243 	 * last after reading the timecounter value.
244 	 * Applying the 1/3 factor as part of the multiplication
245 	 * requires at least 20 bits for a decent precision, however
246 	 * overflows occur after about 4 hours or less, not a option.
247 	 */
248 
249 	cc->mult = 125; /* saturation after 195 years */
250 	cc->shift = 0;
251 
252 	nsec = 0; /* audio time is elapsed time since trigger */
253 	timecounter_init(tc, cc, nsec);
254 	if (force)
255 		/*
256 		 * force timecounter to use predefined value,
257 		 * used for synchronized starts
258 		 */
259 		tc->cycle_last = last;
260 }
261 
262 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
263 				u64 nsec)
264 {
265 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
266 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
267 	u64 codec_frames, codec_nsecs;
268 
269 	if (!hinfo->ops.get_delay)
270 		return nsec;
271 
272 	codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
273 	codec_nsecs = div_u64(codec_frames * 1000000000LL,
274 			      substream->runtime->rate);
275 
276 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
277 		return nsec + codec_nsecs;
278 
279 	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
280 }
281 
282 /*
283  * set up a BDL entry
284  */
285 static int setup_bdle(struct azx *chip,
286 		      struct snd_dma_buffer *dmab,
287 		      struct azx_dev *azx_dev, u32 **bdlp,
288 		      int ofs, int size, int with_ioc)
289 {
290 	u32 *bdl = *bdlp;
291 
292 	while (size > 0) {
293 		dma_addr_t addr;
294 		int chunk;
295 
296 		if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
297 			return -EINVAL;
298 
299 		addr = snd_sgbuf_get_addr(dmab, ofs);
300 		/* program the address field of the BDL entry */
301 		bdl[0] = cpu_to_le32((u32)addr);
302 		bdl[1] = cpu_to_le32(upper_32_bits(addr));
303 		/* program the size field of the BDL entry */
304 		chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
305 		/* one BDLE cannot cross 4K boundary on CTHDA chips */
306 		if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
307 			u32 remain = 0x1000 - (ofs & 0xfff);
308 			if (chunk > remain)
309 				chunk = remain;
310 		}
311 		bdl[2] = cpu_to_le32(chunk);
312 		/* program the IOC to enable interrupt
313 		 * only when the whole fragment is processed
314 		 */
315 		size -= chunk;
316 		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
317 		bdl += 4;
318 		azx_dev->frags++;
319 		ofs += chunk;
320 	}
321 	*bdlp = bdl;
322 	return ofs;
323 }
324 
325 /*
326  * set up BDL entries
327  */
328 static int azx_setup_periods(struct azx *chip,
329 			     struct snd_pcm_substream *substream,
330 			     struct azx_dev *azx_dev)
331 {
332 	u32 *bdl;
333 	int i, ofs, periods, period_bytes;
334 	int pos_adj = 0;
335 
336 	/* reset BDL address */
337 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
338 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339 
340 	period_bytes = azx_dev->period_bytes;
341 	periods = azx_dev->bufsize / period_bytes;
342 
343 	/* program the initial BDL entries */
344 	bdl = (u32 *)azx_dev->bdl.area;
345 	ofs = 0;
346 	azx_dev->frags = 0;
347 
348 	if (chip->bdl_pos_adj)
349 		pos_adj = chip->bdl_pos_adj[chip->dev_index];
350 	if (!azx_dev->no_period_wakeup && pos_adj > 0) {
351 		struct snd_pcm_runtime *runtime = substream->runtime;
352 		int pos_align = pos_adj;
353 		pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
354 		if (!pos_adj)
355 			pos_adj = pos_align;
356 		else
357 			pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 				pos_align;
359 		pos_adj = frames_to_bytes(runtime, pos_adj);
360 		if (pos_adj >= period_bytes) {
361 			dev_warn(chip->card->dev,"Too big adjustment %d\n",
362 				 pos_adj);
363 			pos_adj = 0;
364 		} else {
365 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 					 azx_dev,
367 					 &bdl, ofs, pos_adj, true);
368 			if (ofs < 0)
369 				goto error;
370 		}
371 	} else
372 		pos_adj = 0;
373 
374 	for (i = 0; i < periods; i++) {
375 		if (i == periods - 1 && pos_adj)
376 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 					 azx_dev, &bdl, ofs,
378 					 period_bytes - pos_adj, 0);
379 		else
380 			ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
381 					 azx_dev, &bdl, ofs,
382 					 period_bytes,
383 					 !azx_dev->no_period_wakeup);
384 		if (ofs < 0)
385 			goto error;
386 	}
387 	return 0;
388 
389  error:
390 	dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
391 		azx_dev->bufsize, period_bytes);
392 	return -EINVAL;
393 }
394 
395 /*
396  * PCM ops
397  */
398 
399 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 {
401 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
402 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
403 	struct azx *chip = apcm->chip;
404 	struct azx_dev *azx_dev = get_azx_dev(substream);
405 	unsigned long flags;
406 
407 	mutex_lock(&chip->open_mutex);
408 	spin_lock_irqsave(&chip->reg_lock, flags);
409 	azx_dev->substream = NULL;
410 	azx_dev->running = 0;
411 	spin_unlock_irqrestore(&chip->reg_lock, flags);
412 	azx_release_device(azx_dev);
413 	hinfo->ops.close(hinfo, apcm->codec, substream);
414 	snd_hda_power_down(apcm->codec);
415 	mutex_unlock(&chip->open_mutex);
416 	return 0;
417 }
418 
419 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
420 			     struct snd_pcm_hw_params *hw_params)
421 {
422 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
423 	struct azx *chip = apcm->chip;
424 	int ret;
425 
426 	dsp_lock(get_azx_dev(substream));
427 	if (dsp_is_locked(get_azx_dev(substream))) {
428 		ret = -EBUSY;
429 		goto unlock;
430 	}
431 
432 	ret = chip->ops->substream_alloc_pages(chip, substream,
433 					  params_buffer_bytes(hw_params));
434 unlock:
435 	dsp_unlock(get_azx_dev(substream));
436 	return ret;
437 }
438 
439 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 {
441 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
442 	struct azx_dev *azx_dev = get_azx_dev(substream);
443 	struct azx *chip = apcm->chip;
444 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
445 	int err;
446 
447 	/* reset BDL address */
448 	dsp_lock(azx_dev);
449 	if (!dsp_is_locked(azx_dev)) {
450 		azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
451 		azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
452 		azx_sd_writel(chip, azx_dev, SD_CTL, 0);
453 		azx_dev->bufsize = 0;
454 		azx_dev->period_bytes = 0;
455 		azx_dev->format_val = 0;
456 	}
457 
458 	snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459 
460 	err = chip->ops->substream_free_pages(chip, substream);
461 	azx_dev->prepared = 0;
462 	dsp_unlock(azx_dev);
463 	return err;
464 }
465 
466 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 {
468 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
469 	struct azx *chip = apcm->chip;
470 	struct azx_dev *azx_dev = get_azx_dev(substream);
471 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
472 	struct snd_pcm_runtime *runtime = substream->runtime;
473 	unsigned int bufsize, period_bytes, format_val, stream_tag;
474 	int err;
475 	struct hda_spdif_out *spdif =
476 		snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
477 	unsigned short ctls = spdif ? spdif->ctls : 0;
478 
479 	dsp_lock(azx_dev);
480 	if (dsp_is_locked(azx_dev)) {
481 		err = -EBUSY;
482 		goto unlock;
483 	}
484 
485 	azx_stream_reset(chip, azx_dev);
486 	format_val = snd_hda_calc_stream_format(apcm->codec,
487 						runtime->rate,
488 						runtime->channels,
489 						runtime->format,
490 						hinfo->maxbps,
491 						ctls);
492 	if (!format_val) {
493 		dev_err(chip->card->dev,
494 			"invalid format_val, rate=%d, ch=%d, format=%d\n",
495 			runtime->rate, runtime->channels, runtime->format);
496 		err = -EINVAL;
497 		goto unlock;
498 	}
499 
500 	bufsize = snd_pcm_lib_buffer_bytes(substream);
501 	period_bytes = snd_pcm_lib_period_bytes(substream);
502 
503 	dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
504 		bufsize, format_val);
505 
506 	if (bufsize != azx_dev->bufsize ||
507 	    period_bytes != azx_dev->period_bytes ||
508 	    format_val != azx_dev->format_val ||
509 	    runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
510 		azx_dev->bufsize = bufsize;
511 		azx_dev->period_bytes = period_bytes;
512 		azx_dev->format_val = format_val;
513 		azx_dev->no_period_wakeup = runtime->no_period_wakeup;
514 		err = azx_setup_periods(chip, substream, azx_dev);
515 		if (err < 0)
516 			goto unlock;
517 	}
518 
519 	/* when LPIB delay correction gives a small negative value,
520 	 * we ignore it; currently set the threshold statically to
521 	 * 64 frames
522 	 */
523 	if (runtime->period_size > 64)
524 		azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
525 	else
526 		azx_dev->delay_negative_threshold = 0;
527 
528 	/* wallclk has 24Mhz clock source */
529 	azx_dev->period_wallclk = (((runtime->period_size * 24000) /
530 						runtime->rate) * 1000);
531 	azx_setup_controller(chip, azx_dev);
532 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
533 		azx_dev->fifo_size =
534 			azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
535 	else
536 		azx_dev->fifo_size = 0;
537 
538 	stream_tag = azx_dev->stream_tag;
539 	/* CA-IBG chips need the playback stream starting from 1 */
540 	if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
541 	    stream_tag > chip->capture_streams)
542 		stream_tag -= chip->capture_streams;
543 	err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
544 				     azx_dev->format_val, substream);
545 
546  unlock:
547 	if (!err)
548 		azx_dev->prepared = 1;
549 	dsp_unlock(azx_dev);
550 	return err;
551 }
552 
553 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
554 {
555 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
556 	struct azx *chip = apcm->chip;
557 	struct azx_dev *azx_dev;
558 	struct snd_pcm_substream *s;
559 	int rstart = 0, start, nsync = 0, sbits = 0;
560 	int nwait, timeout;
561 
562 	azx_dev = get_azx_dev(substream);
563 	trace_azx_pcm_trigger(chip, azx_dev, cmd);
564 
565 	if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
566 		return -EPIPE;
567 
568 	switch (cmd) {
569 	case SNDRV_PCM_TRIGGER_START:
570 		rstart = 1;
571 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
572 	case SNDRV_PCM_TRIGGER_RESUME:
573 		start = 1;
574 		break;
575 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
576 	case SNDRV_PCM_TRIGGER_SUSPEND:
577 	case SNDRV_PCM_TRIGGER_STOP:
578 		start = 0;
579 		break;
580 	default:
581 		return -EINVAL;
582 	}
583 
584 	snd_pcm_group_for_each_entry(s, substream) {
585 		if (s->pcm->card != substream->pcm->card)
586 			continue;
587 		azx_dev = get_azx_dev(s);
588 		sbits |= 1 << azx_dev->index;
589 		nsync++;
590 		snd_pcm_trigger_done(s, substream);
591 	}
592 
593 	spin_lock(&chip->reg_lock);
594 
595 	/* first, set SYNC bits of corresponding streams */
596 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
597 		azx_writel(chip, OLD_SSYNC,
598 			azx_readl(chip, OLD_SSYNC) | sbits);
599 	else
600 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
601 
602 	snd_pcm_group_for_each_entry(s, substream) {
603 		if (s->pcm->card != substream->pcm->card)
604 			continue;
605 		azx_dev = get_azx_dev(s);
606 		if (start) {
607 			azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
608 			if (!rstart)
609 				azx_dev->start_wallclk -=
610 						azx_dev->period_wallclk;
611 			azx_stream_start(chip, azx_dev);
612 		} else {
613 			azx_stream_stop(chip, azx_dev);
614 		}
615 		azx_dev->running = start;
616 	}
617 	spin_unlock(&chip->reg_lock);
618 	if (start) {
619 		/* wait until all FIFOs get ready */
620 		for (timeout = 5000; timeout; timeout--) {
621 			nwait = 0;
622 			snd_pcm_group_for_each_entry(s, substream) {
623 				if (s->pcm->card != substream->pcm->card)
624 					continue;
625 				azx_dev = get_azx_dev(s);
626 				if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
627 				      SD_STS_FIFO_READY))
628 					nwait++;
629 			}
630 			if (!nwait)
631 				break;
632 			cpu_relax();
633 		}
634 	} else {
635 		/* wait until all RUN bits are cleared */
636 		for (timeout = 5000; timeout; timeout--) {
637 			nwait = 0;
638 			snd_pcm_group_for_each_entry(s, substream) {
639 				if (s->pcm->card != substream->pcm->card)
640 					continue;
641 				azx_dev = get_azx_dev(s);
642 				if (azx_sd_readb(chip, azx_dev, SD_CTL) &
643 				    SD_CTL_DMA_START)
644 					nwait++;
645 			}
646 			if (!nwait)
647 				break;
648 			cpu_relax();
649 		}
650 	}
651 	spin_lock(&chip->reg_lock);
652 	/* reset SYNC bits */
653 	if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
654 		azx_writel(chip, OLD_SSYNC,
655 			azx_readl(chip, OLD_SSYNC) & ~sbits);
656 	else
657 		azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
658 	if (start) {
659 		azx_timecounter_init(substream, 0, 0);
660 		snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
661 		substream->runtime->trigger_tstamp_latched = true;
662 
663 		if (nsync > 1) {
664 			cycle_t cycle_last;
665 
666 			/* same start cycle for master and group */
667 			azx_dev = get_azx_dev(substream);
668 			cycle_last = azx_dev->azx_tc.cycle_last;
669 
670 			snd_pcm_group_for_each_entry(s, substream) {
671 				if (s->pcm->card != substream->pcm->card)
672 					continue;
673 				azx_timecounter_init(s, 1, cycle_last);
674 			}
675 		}
676 	}
677 	spin_unlock(&chip->reg_lock);
678 	return 0;
679 }
680 
681 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
682 {
683 	return azx_sd_readl(chip, azx_dev, SD_LPIB);
684 }
685 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
686 
687 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
688 {
689 	return le32_to_cpu(*azx_dev->posbuf);
690 }
691 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
692 
693 unsigned int azx_get_position(struct azx *chip,
694 			      struct azx_dev *azx_dev)
695 {
696 	struct snd_pcm_substream *substream = azx_dev->substream;
697 	unsigned int pos;
698 	int stream = substream->stream;
699 	int delay = 0;
700 
701 	if (chip->get_position[stream])
702 		pos = chip->get_position[stream](chip, azx_dev);
703 	else /* use the position buffer as default */
704 		pos = azx_get_pos_posbuf(chip, azx_dev);
705 
706 	if (pos >= azx_dev->bufsize)
707 		pos = 0;
708 
709 	if (substream->runtime) {
710 		struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
711 		struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
712 
713 		if (chip->get_delay[stream])
714 			delay += chip->get_delay[stream](chip, azx_dev, pos);
715 		if (hinfo->ops.get_delay)
716 			delay += hinfo->ops.get_delay(hinfo, apcm->codec,
717 						      substream);
718 		substream->runtime->delay = delay;
719 	}
720 
721 	trace_azx_get_position(chip, azx_dev, pos, delay);
722 	return pos;
723 }
724 EXPORT_SYMBOL_GPL(azx_get_position);
725 
726 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
727 {
728 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
729 	struct azx *chip = apcm->chip;
730 	struct azx_dev *azx_dev = get_azx_dev(substream);
731 	return bytes_to_frames(substream->runtime,
732 			       azx_get_position(chip, azx_dev));
733 }
734 
735 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
736 				struct timespec *ts)
737 {
738 	struct azx_dev *azx_dev = get_azx_dev(substream);
739 	u64 nsec;
740 
741 	nsec = timecounter_read(&azx_dev->azx_tc);
742 	nsec = div_u64(nsec, 3); /* can be optimized */
743 	nsec = azx_adjust_codec_delay(substream, nsec);
744 
745 	*ts = ns_to_timespec(nsec);
746 
747 	return 0;
748 }
749 
750 static struct snd_pcm_hardware azx_pcm_hw = {
751 	.info =			(SNDRV_PCM_INFO_MMAP |
752 				 SNDRV_PCM_INFO_INTERLEAVED |
753 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
754 				 SNDRV_PCM_INFO_MMAP_VALID |
755 				 /* No full-resume yet implemented */
756 				 /* SNDRV_PCM_INFO_RESUME |*/
757 				 SNDRV_PCM_INFO_PAUSE |
758 				 SNDRV_PCM_INFO_SYNC_START |
759 				 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
760 				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
761 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
762 	.rates =		SNDRV_PCM_RATE_48000,
763 	.rate_min =		48000,
764 	.rate_max =		48000,
765 	.channels_min =		2,
766 	.channels_max =		2,
767 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
768 	.period_bytes_min =	128,
769 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
770 	.periods_min =		2,
771 	.periods_max =		AZX_MAX_FRAG,
772 	.fifo_size =		0,
773 };
774 
775 static int azx_pcm_open(struct snd_pcm_substream *substream)
776 {
777 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
778 	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
779 	struct azx *chip = apcm->chip;
780 	struct azx_dev *azx_dev;
781 	struct snd_pcm_runtime *runtime = substream->runtime;
782 	unsigned long flags;
783 	int err;
784 	int buff_step;
785 
786 	mutex_lock(&chip->open_mutex);
787 	azx_dev = azx_assign_device(chip, substream);
788 	if (azx_dev == NULL) {
789 		mutex_unlock(&chip->open_mutex);
790 		return -EBUSY;
791 	}
792 	runtime->hw = azx_pcm_hw;
793 	runtime->hw.channels_min = hinfo->channels_min;
794 	runtime->hw.channels_max = hinfo->channels_max;
795 	runtime->hw.formats = hinfo->formats;
796 	runtime->hw.rates = hinfo->rates;
797 	snd_pcm_limit_hw_rates(runtime);
798 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
799 
800 	/* avoid wrap-around with wall-clock */
801 	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
802 				     20,
803 				     178000000);
804 
805 	if (chip->align_buffer_size)
806 		/* constrain buffer sizes to be multiple of 128
807 		   bytes. This is more efficient in terms of memory
808 		   access but isn't required by the HDA spec and
809 		   prevents users from specifying exact period/buffer
810 		   sizes. For example for 44.1kHz, a period size set
811 		   to 20ms will be rounded to 19.59ms. */
812 		buff_step = 128;
813 	else
814 		/* Don't enforce steps on buffer sizes, still need to
815 		   be multiple of 4 bytes (HDA spec). Tested on Intel
816 		   HDA controllers, may not work on all devices where
817 		   option needs to be disabled */
818 		buff_step = 4;
819 
820 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
821 				   buff_step);
822 	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
823 				   buff_step);
824 	snd_hda_power_up_d3wait(apcm->codec);
825 	err = hinfo->ops.open(hinfo, apcm->codec, substream);
826 	if (err < 0) {
827 		azx_release_device(azx_dev);
828 		snd_hda_power_down(apcm->codec);
829 		mutex_unlock(&chip->open_mutex);
830 		return err;
831 	}
832 	snd_pcm_limit_hw_rates(runtime);
833 	/* sanity check */
834 	if (snd_BUG_ON(!runtime->hw.channels_min) ||
835 	    snd_BUG_ON(!runtime->hw.channels_max) ||
836 	    snd_BUG_ON(!runtime->hw.formats) ||
837 	    snd_BUG_ON(!runtime->hw.rates)) {
838 		azx_release_device(azx_dev);
839 		hinfo->ops.close(hinfo, apcm->codec, substream);
840 		snd_hda_power_down(apcm->codec);
841 		mutex_unlock(&chip->open_mutex);
842 		return -EINVAL;
843 	}
844 
845 	/* disable WALLCLOCK timestamps for capture streams
846 	   until we figure out how to handle digital inputs */
847 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
848 		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
849 
850 	spin_lock_irqsave(&chip->reg_lock, flags);
851 	azx_dev->substream = substream;
852 	azx_dev->running = 0;
853 	spin_unlock_irqrestore(&chip->reg_lock, flags);
854 
855 	runtime->private_data = azx_dev;
856 	snd_pcm_set_sync(substream);
857 	mutex_unlock(&chip->open_mutex);
858 	return 0;
859 }
860 
861 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
862 			struct vm_area_struct *area)
863 {
864 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
865 	struct azx *chip = apcm->chip;
866 	if (chip->ops->pcm_mmap_prepare)
867 		chip->ops->pcm_mmap_prepare(substream, area);
868 	return snd_pcm_lib_default_mmap(substream, area);
869 }
870 
871 static struct snd_pcm_ops azx_pcm_ops = {
872 	.open = azx_pcm_open,
873 	.close = azx_pcm_close,
874 	.ioctl = snd_pcm_lib_ioctl,
875 	.hw_params = azx_pcm_hw_params,
876 	.hw_free = azx_pcm_hw_free,
877 	.prepare = azx_pcm_prepare,
878 	.trigger = azx_pcm_trigger,
879 	.pointer = azx_pcm_pointer,
880 	.wall_clock =  azx_get_wallclock_tstamp,
881 	.mmap = azx_pcm_mmap,
882 	.page = snd_pcm_sgbuf_ops_page,
883 };
884 
885 static void azx_pcm_free(struct snd_pcm *pcm)
886 {
887 	struct azx_pcm *apcm = pcm->private_data;
888 	if (apcm) {
889 		list_del(&apcm->list);
890 		kfree(apcm);
891 	}
892 }
893 
894 #define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
895 
896 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
897 				 struct hda_pcm *cpcm)
898 {
899 	struct azx *chip = bus->private_data;
900 	struct snd_pcm *pcm;
901 	struct azx_pcm *apcm;
902 	int pcm_dev = cpcm->device;
903 	unsigned int size;
904 	int s, err;
905 
906 	list_for_each_entry(apcm, &chip->pcm_list, list) {
907 		if (apcm->pcm->device == pcm_dev) {
908 			dev_err(chip->card->dev, "PCM %d already exists\n",
909 				pcm_dev);
910 			return -EBUSY;
911 		}
912 	}
913 	err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
914 			  cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
915 			  cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
916 			  &pcm);
917 	if (err < 0)
918 		return err;
919 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
920 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
921 	if (apcm == NULL)
922 		return -ENOMEM;
923 	apcm->chip = chip;
924 	apcm->pcm = pcm;
925 	apcm->codec = codec;
926 	pcm->private_data = apcm;
927 	pcm->private_free = azx_pcm_free;
928 	if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
929 		pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
930 	list_add_tail(&apcm->list, &chip->pcm_list);
931 	cpcm->pcm = pcm;
932 	for (s = 0; s < 2; s++) {
933 		apcm->hinfo[s] = &cpcm->stream[s];
934 		if (cpcm->stream[s].substreams)
935 			snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
936 	}
937 	/* buffer pre-allocation */
938 	size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
939 	if (size > MAX_PREALLOC_SIZE)
940 		size = MAX_PREALLOC_SIZE;
941 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
942 					      chip->card->dev,
943 					      size, MAX_PREALLOC_SIZE);
944 	/* link to codec */
945 	for (s = 0; s < 2; s++)
946 		pcm->streams[s].dev.parent = &codec->dev;
947 	return 0;
948 }
949 
950 /*
951  * CORB / RIRB interface
952  */
953 static int azx_alloc_cmd_io(struct azx *chip)
954 {
955 	int err;
956 
957 	/* single page (at least 4096 bytes) must suffice for both ringbuffes */
958 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
959 					 PAGE_SIZE, &chip->rb);
960 	if (err < 0)
961 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
962 	return err;
963 }
964 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
965 
966 static void azx_init_cmd_io(struct azx *chip)
967 {
968 	int timeout;
969 
970 	spin_lock_irq(&chip->reg_lock);
971 	/* CORB set up */
972 	chip->corb.addr = chip->rb.addr;
973 	chip->corb.buf = (u32 *)chip->rb.area;
974 	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
975 	azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
976 
977 	/* set the corb size to 256 entries (ULI requires explicitly) */
978 	azx_writeb(chip, CORBSIZE, 0x02);
979 	/* set the corb write pointer to 0 */
980 	azx_writew(chip, CORBWP, 0);
981 
982 	/* reset the corb hw read pointer */
983 	azx_writew(chip, CORBRP, AZX_CORBRP_RST);
984 	if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
985 		for (timeout = 1000; timeout > 0; timeout--) {
986 			if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
987 				break;
988 			udelay(1);
989 		}
990 		if (timeout <= 0)
991 			dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
992 				azx_readw(chip, CORBRP));
993 
994 		azx_writew(chip, CORBRP, 0);
995 		for (timeout = 1000; timeout > 0; timeout--) {
996 			if (azx_readw(chip, CORBRP) == 0)
997 				break;
998 			udelay(1);
999 		}
1000 		if (timeout <= 0)
1001 			dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1002 				azx_readw(chip, CORBRP));
1003 	}
1004 
1005 	/* enable corb dma */
1006 	azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1007 
1008 	/* RIRB set up */
1009 	chip->rirb.addr = chip->rb.addr + 2048;
1010 	chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1011 	chip->rirb.wp = chip->rirb.rp = 0;
1012 	memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1013 	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1014 	azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1015 
1016 	/* set the rirb size to 256 entries (ULI requires explicitly) */
1017 	azx_writeb(chip, RIRBSIZE, 0x02);
1018 	/* reset the rirb hw write pointer */
1019 	azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1020 	/* set N=1, get RIRB response interrupt for new entry */
1021 	if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1022 		azx_writew(chip, RINTCNT, 0xc0);
1023 	else
1024 		azx_writew(chip, RINTCNT, 1);
1025 	/* enable rirb dma and response irq */
1026 	azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1027 	spin_unlock_irq(&chip->reg_lock);
1028 }
1029 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1030 
1031 static void azx_free_cmd_io(struct azx *chip)
1032 {
1033 	spin_lock_irq(&chip->reg_lock);
1034 	/* disable ringbuffer DMAs */
1035 	azx_writeb(chip, RIRBCTL, 0);
1036 	azx_writeb(chip, CORBCTL, 0);
1037 	spin_unlock_irq(&chip->reg_lock);
1038 }
1039 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1040 
1041 static unsigned int azx_command_addr(u32 cmd)
1042 {
1043 	unsigned int addr = cmd >> 28;
1044 
1045 	if (addr >= AZX_MAX_CODECS) {
1046 		snd_BUG();
1047 		addr = 0;
1048 	}
1049 
1050 	return addr;
1051 }
1052 
1053 /* send a command */
1054 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1055 {
1056 	struct azx *chip = bus->private_data;
1057 	unsigned int addr = azx_command_addr(val);
1058 	unsigned int wp, rp;
1059 
1060 	spin_lock_irq(&chip->reg_lock);
1061 
1062 	/* add command to corb */
1063 	wp = azx_readw(chip, CORBWP);
1064 	if (wp == 0xffff) {
1065 		/* something wrong, controller likely turned to D3 */
1066 		spin_unlock_irq(&chip->reg_lock);
1067 		return -EIO;
1068 	}
1069 	wp++;
1070 	wp %= AZX_MAX_CORB_ENTRIES;
1071 
1072 	rp = azx_readw(chip, CORBRP);
1073 	if (wp == rp) {
1074 		/* oops, it's full */
1075 		spin_unlock_irq(&chip->reg_lock);
1076 		return -EAGAIN;
1077 	}
1078 
1079 	chip->rirb.cmds[addr]++;
1080 	chip->corb.buf[wp] = cpu_to_le32(val);
1081 	azx_writew(chip, CORBWP, wp);
1082 
1083 	spin_unlock_irq(&chip->reg_lock);
1084 
1085 	return 0;
1086 }
1087 
1088 #define AZX_RIRB_EX_UNSOL_EV	(1<<4)
1089 
1090 /* retrieve RIRB entry - called from interrupt handler */
1091 static void azx_update_rirb(struct azx *chip)
1092 {
1093 	unsigned int rp, wp;
1094 	unsigned int addr;
1095 	u32 res, res_ex;
1096 
1097 	wp = azx_readw(chip, RIRBWP);
1098 	if (wp == 0xffff) {
1099 		/* something wrong, controller likely turned to D3 */
1100 		return;
1101 	}
1102 
1103 	if (wp == chip->rirb.wp)
1104 		return;
1105 	chip->rirb.wp = wp;
1106 
1107 	while (chip->rirb.rp != wp) {
1108 		chip->rirb.rp++;
1109 		chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1110 
1111 		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1112 		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1113 		res = le32_to_cpu(chip->rirb.buf[rp]);
1114 		addr = res_ex & 0xf;
1115 		if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1116 			dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1117 				res, res_ex,
1118 				chip->rirb.rp, wp);
1119 			snd_BUG();
1120 		} else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1121 			snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1122 		else if (chip->rirb.cmds[addr]) {
1123 			chip->rirb.res[addr] = res;
1124 			smp_wmb();
1125 			chip->rirb.cmds[addr]--;
1126 		} else if (printk_ratelimit()) {
1127 			dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1128 				res, res_ex,
1129 				chip->last_cmd[addr]);
1130 		}
1131 	}
1132 }
1133 
1134 /* receive a response */
1135 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1136 					  unsigned int addr)
1137 {
1138 	struct azx *chip = bus->private_data;
1139 	unsigned long timeout;
1140 	unsigned long loopcounter;
1141 	int do_poll = 0;
1142 
1143  again:
1144 	timeout = jiffies + msecs_to_jiffies(1000);
1145 
1146 	for (loopcounter = 0;; loopcounter++) {
1147 		if (chip->polling_mode || do_poll) {
1148 			spin_lock_irq(&chip->reg_lock);
1149 			azx_update_rirb(chip);
1150 			spin_unlock_irq(&chip->reg_lock);
1151 		}
1152 		if (!chip->rirb.cmds[addr]) {
1153 			smp_rmb();
1154 			bus->rirb_error = 0;
1155 
1156 			if (!do_poll)
1157 				chip->poll_count = 0;
1158 			return chip->rirb.res[addr]; /* the last value */
1159 		}
1160 		if (time_after(jiffies, timeout))
1161 			break;
1162 		if (bus->needs_damn_long_delay || loopcounter > 3000)
1163 			msleep(2); /* temporary workaround */
1164 		else {
1165 			udelay(10);
1166 			cond_resched();
1167 		}
1168 	}
1169 
1170 	if (!bus->no_response_fallback)
1171 		return -1;
1172 
1173 	if (!chip->polling_mode && chip->poll_count < 2) {
1174 		dev_dbg(chip->card->dev,
1175 			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1176 			chip->last_cmd[addr]);
1177 		do_poll = 1;
1178 		chip->poll_count++;
1179 		goto again;
1180 	}
1181 
1182 
1183 	if (!chip->polling_mode) {
1184 		dev_warn(chip->card->dev,
1185 			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1186 			 chip->last_cmd[addr]);
1187 		chip->polling_mode = 1;
1188 		goto again;
1189 	}
1190 
1191 	if (chip->msi) {
1192 		dev_warn(chip->card->dev,
1193 			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1194 			 chip->last_cmd[addr]);
1195 		if (chip->ops->disable_msi_reset_irq(chip) &&
1196 		    chip->ops->disable_msi_reset_irq(chip) < 0) {
1197 			bus->rirb_error = 1;
1198 			return -1;
1199 		}
1200 		goto again;
1201 	}
1202 
1203 	if (chip->probing) {
1204 		/* If this critical timeout happens during the codec probing
1205 		 * phase, this is likely an access to a non-existing codec
1206 		 * slot.  Better to return an error and reset the system.
1207 		 */
1208 		return -1;
1209 	}
1210 
1211 	/* a fatal communication error; need either to reset or to fallback
1212 	 * to the single_cmd mode
1213 	 */
1214 	bus->rirb_error = 1;
1215 	if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1216 		bus->response_reset = 1;
1217 		return -1; /* give a chance to retry */
1218 	}
1219 
1220 	dev_err(chip->card->dev,
1221 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1222 		chip->last_cmd[addr]);
1223 	chip->single_cmd = 1;
1224 	bus->response_reset = 0;
1225 	/* release CORB/RIRB */
1226 	azx_free_cmd_io(chip);
1227 	/* disable unsolicited responses */
1228 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1229 	return -1;
1230 }
1231 
1232 /*
1233  * Use the single immediate command instead of CORB/RIRB for simplicity
1234  *
1235  * Note: according to Intel, this is not preferred use.  The command was
1236  *       intended for the BIOS only, and may get confused with unsolicited
1237  *       responses.  So, we shouldn't use it for normal operation from the
1238  *       driver.
1239  *       I left the codes, however, for debugging/testing purposes.
1240  */
1241 
1242 /* receive a response */
1243 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1244 {
1245 	int timeout = 50;
1246 
1247 	while (timeout--) {
1248 		/* check IRV busy bit */
1249 		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1250 			/* reuse rirb.res as the response return value */
1251 			chip->rirb.res[addr] = azx_readl(chip, IR);
1252 			return 0;
1253 		}
1254 		udelay(1);
1255 	}
1256 	if (printk_ratelimit())
1257 		dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1258 			azx_readw(chip, IRS));
1259 	chip->rirb.res[addr] = -1;
1260 	return -EIO;
1261 }
1262 
1263 /* send a command */
1264 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1265 {
1266 	struct azx *chip = bus->private_data;
1267 	unsigned int addr = azx_command_addr(val);
1268 	int timeout = 50;
1269 
1270 	bus->rirb_error = 0;
1271 	while (timeout--) {
1272 		/* check ICB busy bit */
1273 		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1274 			/* Clear IRV valid bit */
1275 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1276 				   AZX_IRS_VALID);
1277 			azx_writel(chip, IC, val);
1278 			azx_writew(chip, IRS, azx_readw(chip, IRS) |
1279 				   AZX_IRS_BUSY);
1280 			return azx_single_wait_for_response(chip, addr);
1281 		}
1282 		udelay(1);
1283 	}
1284 	if (printk_ratelimit())
1285 		dev_dbg(chip->card->dev,
1286 			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
1287 			azx_readw(chip, IRS), val);
1288 	return -EIO;
1289 }
1290 
1291 /* receive a response */
1292 static unsigned int azx_single_get_response(struct hda_bus *bus,
1293 					    unsigned int addr)
1294 {
1295 	struct azx *chip = bus->private_data;
1296 	return chip->rirb.res[addr];
1297 }
1298 
1299 /*
1300  * The below are the main callbacks from hda_codec.
1301  *
1302  * They are just the skeleton to call sub-callbacks according to the
1303  * current setting of chip->single_cmd.
1304  */
1305 
1306 /* send a command */
1307 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1308 {
1309 	struct azx *chip = bus->private_data;
1310 
1311 	if (chip->disabled)
1312 		return 0;
1313 	chip->last_cmd[azx_command_addr(val)] = val;
1314 	if (chip->single_cmd)
1315 		return azx_single_send_cmd(bus, val);
1316 	else
1317 		return azx_corb_send_cmd(bus, val);
1318 }
1319 EXPORT_SYMBOL_GPL(azx_send_cmd);
1320 
1321 /* get a response */
1322 static unsigned int azx_get_response(struct hda_bus *bus,
1323 				     unsigned int addr)
1324 {
1325 	struct azx *chip = bus->private_data;
1326 	if (chip->disabled)
1327 		return 0;
1328 	if (chip->single_cmd)
1329 		return azx_single_get_response(bus, addr);
1330 	else
1331 		return azx_rirb_get_response(bus, addr);
1332 }
1333 EXPORT_SYMBOL_GPL(azx_get_response);
1334 
1335 #ifdef CONFIG_SND_HDA_DSP_LOADER
1336 /*
1337  * DSP loading code (e.g. for CA0132)
1338  */
1339 
1340 /* use the first stream for loading DSP */
1341 static struct azx_dev *
1342 azx_get_dsp_loader_dev(struct azx *chip)
1343 {
1344 	return &chip->azx_dev[chip->playback_index_offset];
1345 }
1346 
1347 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1348 				unsigned int byte_size,
1349 				struct snd_dma_buffer *bufp)
1350 {
1351 	u32 *bdl;
1352 	struct azx *chip = bus->private_data;
1353 	struct azx_dev *azx_dev;
1354 	int err;
1355 
1356 	azx_dev = azx_get_dsp_loader_dev(chip);
1357 
1358 	dsp_lock(azx_dev);
1359 	spin_lock_irq(&chip->reg_lock);
1360 	if (azx_dev->running || azx_dev->locked) {
1361 		spin_unlock_irq(&chip->reg_lock);
1362 		err = -EBUSY;
1363 		goto unlock;
1364 	}
1365 	azx_dev->prepared = 0;
1366 	chip->saved_azx_dev = *azx_dev;
1367 	azx_dev->locked = 1;
1368 	spin_unlock_irq(&chip->reg_lock);
1369 
1370 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1371 					 byte_size, bufp);
1372 	if (err < 0)
1373 		goto err_alloc;
1374 
1375 	azx_dev->bufsize = byte_size;
1376 	azx_dev->period_bytes = byte_size;
1377 	azx_dev->format_val = format;
1378 
1379 	azx_stream_reset(chip, azx_dev);
1380 
1381 	/* reset BDL address */
1382 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1383 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1384 
1385 	azx_dev->frags = 0;
1386 	bdl = (u32 *)azx_dev->bdl.area;
1387 	err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1388 	if (err < 0)
1389 		goto error;
1390 
1391 	azx_setup_controller(chip, azx_dev);
1392 	dsp_unlock(azx_dev);
1393 	return azx_dev->stream_tag;
1394 
1395  error:
1396 	chip->ops->dma_free_pages(chip, bufp);
1397  err_alloc:
1398 	spin_lock_irq(&chip->reg_lock);
1399 	if (azx_dev->opened)
1400 		*azx_dev = chip->saved_azx_dev;
1401 	azx_dev->locked = 0;
1402 	spin_unlock_irq(&chip->reg_lock);
1403  unlock:
1404 	dsp_unlock(azx_dev);
1405 	return err;
1406 }
1407 
1408 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1409 {
1410 	struct azx *chip = bus->private_data;
1411 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1412 
1413 	if (start)
1414 		azx_stream_start(chip, azx_dev);
1415 	else
1416 		azx_stream_stop(chip, azx_dev);
1417 	azx_dev->running = start;
1418 }
1419 
1420 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1421 				 struct snd_dma_buffer *dmab)
1422 {
1423 	struct azx *chip = bus->private_data;
1424 	struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1425 
1426 	if (!dmab->area || !azx_dev->locked)
1427 		return;
1428 
1429 	dsp_lock(azx_dev);
1430 	/* reset BDL address */
1431 	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1432 	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1433 	azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1434 	azx_dev->bufsize = 0;
1435 	azx_dev->period_bytes = 0;
1436 	azx_dev->format_val = 0;
1437 
1438 	chip->ops->dma_free_pages(chip, dmab);
1439 	dmab->area = NULL;
1440 
1441 	spin_lock_irq(&chip->reg_lock);
1442 	if (azx_dev->opened)
1443 		*azx_dev = chip->saved_azx_dev;
1444 	azx_dev->locked = 0;
1445 	spin_unlock_irq(&chip->reg_lock);
1446 	dsp_unlock(azx_dev);
1447 }
1448 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1449 
1450 int azx_alloc_stream_pages(struct azx *chip)
1451 {
1452 	int i, err;
1453 	struct snd_card *card = chip->card;
1454 
1455 	for (i = 0; i < chip->num_streams; i++) {
1456 		dsp_lock_init(&chip->azx_dev[i]);
1457 		/* allocate memory for the BDL for each stream */
1458 		err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1459 						 BDL_SIZE,
1460 						 &chip->azx_dev[i].bdl);
1461 		if (err < 0) {
1462 			dev_err(card->dev, "cannot allocate BDL\n");
1463 			return -ENOMEM;
1464 		}
1465 	}
1466 	/* allocate memory for the position buffer */
1467 	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1468 					 chip->num_streams * 8, &chip->posbuf);
1469 	if (err < 0) {
1470 		dev_err(card->dev, "cannot allocate posbuf\n");
1471 		return -ENOMEM;
1472 	}
1473 
1474 	/* allocate CORB/RIRB */
1475 	err = azx_alloc_cmd_io(chip);
1476 	if (err < 0)
1477 		return err;
1478 	return 0;
1479 }
1480 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1481 
1482 void azx_free_stream_pages(struct azx *chip)
1483 {
1484 	int i;
1485 	if (chip->azx_dev) {
1486 		for (i = 0; i < chip->num_streams; i++)
1487 			if (chip->azx_dev[i].bdl.area)
1488 				chip->ops->dma_free_pages(
1489 					chip, &chip->azx_dev[i].bdl);
1490 	}
1491 	if (chip->rb.area)
1492 		chip->ops->dma_free_pages(chip, &chip->rb);
1493 	if (chip->posbuf.area)
1494 		chip->ops->dma_free_pages(chip, &chip->posbuf);
1495 }
1496 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1497 
1498 /*
1499  * Lowlevel interface
1500  */
1501 
1502 /* enter link reset */
1503 void azx_enter_link_reset(struct azx *chip)
1504 {
1505 	unsigned long timeout;
1506 
1507 	/* reset controller */
1508 	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1509 
1510 	timeout = jiffies + msecs_to_jiffies(100);
1511 	while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1512 			time_before(jiffies, timeout))
1513 		usleep_range(500, 1000);
1514 }
1515 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1516 
1517 /* exit link reset */
1518 static void azx_exit_link_reset(struct azx *chip)
1519 {
1520 	unsigned long timeout;
1521 
1522 	azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1523 
1524 	timeout = jiffies + msecs_to_jiffies(100);
1525 	while (!azx_readb(chip, GCTL) &&
1526 			time_before(jiffies, timeout))
1527 		usleep_range(500, 1000);
1528 }
1529 
1530 /* reset codec link */
1531 static int azx_reset(struct azx *chip, bool full_reset)
1532 {
1533 	if (!full_reset)
1534 		goto __skip;
1535 
1536 	/* clear STATESTS */
1537 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1538 
1539 	/* reset controller */
1540 	azx_enter_link_reset(chip);
1541 
1542 	/* delay for >= 100us for codec PLL to settle per spec
1543 	 * Rev 0.9 section 5.5.1
1544 	 */
1545 	usleep_range(500, 1000);
1546 
1547 	/* Bring controller out of reset */
1548 	azx_exit_link_reset(chip);
1549 
1550 	/* Brent Chartrand said to wait >= 540us for codecs to initialize */
1551 	usleep_range(1000, 1200);
1552 
1553       __skip:
1554 	/* check to see if controller is ready */
1555 	if (!azx_readb(chip, GCTL)) {
1556 		dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1557 		return -EBUSY;
1558 	}
1559 
1560 	/* Accept unsolicited responses */
1561 	if (!chip->single_cmd)
1562 		azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1563 			   AZX_GCTL_UNSOL);
1564 
1565 	/* detect codecs */
1566 	if (!chip->codec_mask) {
1567 		chip->codec_mask = azx_readw(chip, STATESTS);
1568 		dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1569 			chip->codec_mask);
1570 	}
1571 
1572 	return 0;
1573 }
1574 
1575 /* enable interrupts */
1576 static void azx_int_enable(struct azx *chip)
1577 {
1578 	/* enable controller CIE and GIE */
1579 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1580 		   AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1581 }
1582 
1583 /* disable interrupts */
1584 static void azx_int_disable(struct azx *chip)
1585 {
1586 	int i;
1587 
1588 	/* disable interrupts in stream descriptor */
1589 	for (i = 0; i < chip->num_streams; i++) {
1590 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1591 		azx_sd_writeb(chip, azx_dev, SD_CTL,
1592 			      azx_sd_readb(chip, azx_dev, SD_CTL) &
1593 					~SD_INT_MASK);
1594 	}
1595 
1596 	/* disable SIE for all streams */
1597 	azx_writeb(chip, INTCTL, 0);
1598 
1599 	/* disable controller CIE and GIE */
1600 	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1601 		   ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1602 }
1603 
1604 /* clear interrupts */
1605 static void azx_int_clear(struct azx *chip)
1606 {
1607 	int i;
1608 
1609 	/* clear stream status */
1610 	for (i = 0; i < chip->num_streams; i++) {
1611 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1612 		azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1613 	}
1614 
1615 	/* clear STATESTS */
1616 	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1617 
1618 	/* clear rirb status */
1619 	azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1620 
1621 	/* clear int status */
1622 	azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1623 }
1624 
1625 /*
1626  * reset and start the controller registers
1627  */
1628 void azx_init_chip(struct azx *chip, bool full_reset)
1629 {
1630 	if (chip->initialized)
1631 		return;
1632 
1633 	/* reset controller */
1634 	azx_reset(chip, full_reset);
1635 
1636 	/* initialize interrupts */
1637 	azx_int_clear(chip);
1638 	azx_int_enable(chip);
1639 
1640 	/* initialize the codec command I/O */
1641 	if (!chip->single_cmd)
1642 		azx_init_cmd_io(chip);
1643 
1644 	/* program the position buffer */
1645 	azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1646 	azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1647 
1648 	chip->initialized = 1;
1649 }
1650 EXPORT_SYMBOL_GPL(azx_init_chip);
1651 
1652 void azx_stop_chip(struct azx *chip)
1653 {
1654 	if (!chip->initialized)
1655 		return;
1656 
1657 	/* disable interrupts */
1658 	azx_int_disable(chip);
1659 	azx_int_clear(chip);
1660 
1661 	/* disable CORB/RIRB */
1662 	azx_free_cmd_io(chip);
1663 
1664 	/* disable position buffer */
1665 	azx_writel(chip, DPLBASE, 0);
1666 	azx_writel(chip, DPUBASE, 0);
1667 
1668 	chip->initialized = 0;
1669 }
1670 EXPORT_SYMBOL_GPL(azx_stop_chip);
1671 
1672 /*
1673  * interrupt handler
1674  */
1675 irqreturn_t azx_interrupt(int irq, void *dev_id)
1676 {
1677 	struct azx *chip = dev_id;
1678 	struct azx_dev *azx_dev;
1679 	u32 status;
1680 	u8 sd_status;
1681 	int i;
1682 
1683 #ifdef CONFIG_PM
1684 	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1685 		if (!pm_runtime_active(chip->card->dev))
1686 			return IRQ_NONE;
1687 #endif
1688 
1689 	spin_lock(&chip->reg_lock);
1690 
1691 	if (chip->disabled) {
1692 		spin_unlock(&chip->reg_lock);
1693 		return IRQ_NONE;
1694 	}
1695 
1696 	status = azx_readl(chip, INTSTS);
1697 	if (status == 0 || status == 0xffffffff) {
1698 		spin_unlock(&chip->reg_lock);
1699 		return IRQ_NONE;
1700 	}
1701 
1702 	for (i = 0; i < chip->num_streams; i++) {
1703 		azx_dev = &chip->azx_dev[i];
1704 		if (status & azx_dev->sd_int_sta_mask) {
1705 			sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1706 			azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1707 			if (!azx_dev->substream || !azx_dev->running ||
1708 			    !(sd_status & SD_INT_COMPLETE))
1709 				continue;
1710 			/* check whether this IRQ is really acceptable */
1711 			if (!chip->ops->position_check ||
1712 			    chip->ops->position_check(chip, azx_dev)) {
1713 				spin_unlock(&chip->reg_lock);
1714 				snd_pcm_period_elapsed(azx_dev->substream);
1715 				spin_lock(&chip->reg_lock);
1716 			}
1717 		}
1718 	}
1719 
1720 	/* clear rirb int */
1721 	status = azx_readb(chip, RIRBSTS);
1722 	if (status & RIRB_INT_MASK) {
1723 		if (status & RIRB_INT_RESPONSE) {
1724 			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1725 				udelay(80);
1726 			azx_update_rirb(chip);
1727 		}
1728 		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1729 	}
1730 
1731 	spin_unlock(&chip->reg_lock);
1732 
1733 	return IRQ_HANDLED;
1734 }
1735 EXPORT_SYMBOL_GPL(azx_interrupt);
1736 
1737 /*
1738  * Codec initerface
1739  */
1740 
1741 /*
1742  * Probe the given codec address
1743  */
1744 static int probe_codec(struct azx *chip, int addr)
1745 {
1746 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1747 		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1748 	unsigned int res;
1749 
1750 	mutex_lock(&chip->bus->cmd_mutex);
1751 	chip->probing = 1;
1752 	azx_send_cmd(chip->bus, cmd);
1753 	res = azx_get_response(chip->bus, addr);
1754 	chip->probing = 0;
1755 	mutex_unlock(&chip->bus->cmd_mutex);
1756 	if (res == -1)
1757 		return -EIO;
1758 	dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1759 	return 0;
1760 }
1761 
1762 static void azx_bus_reset(struct hda_bus *bus)
1763 {
1764 	struct azx *chip = bus->private_data;
1765 
1766 	bus->in_reset = 1;
1767 	azx_stop_chip(chip);
1768 	azx_init_chip(chip, true);
1769 #ifdef CONFIG_PM
1770 	if (chip->initialized) {
1771 		struct azx_pcm *p;
1772 		list_for_each_entry(p, &chip->pcm_list, list)
1773 			snd_pcm_suspend_all(p->pcm);
1774 		snd_hda_suspend(chip->bus);
1775 		snd_hda_resume(chip->bus);
1776 	}
1777 #endif
1778 	bus->in_reset = 0;
1779 }
1780 
1781 #ifdef CONFIG_PM
1782 /* power-up/down the controller */
1783 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1784 {
1785 	struct azx *chip = bus->private_data;
1786 
1787 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1788 		return;
1789 
1790 	if (power_up)
1791 		pm_runtime_get_sync(chip->card->dev);
1792 	else
1793 		pm_runtime_put_sync(chip->card->dev);
1794 }
1795 #endif
1796 
1797 static int get_jackpoll_interval(struct azx *chip)
1798 {
1799 	int i;
1800 	unsigned int j;
1801 
1802 	if (!chip->jackpoll_ms)
1803 		return 0;
1804 
1805 	i = chip->jackpoll_ms[chip->dev_index];
1806 	if (i == 0)
1807 		return 0;
1808 	if (i < 50 || i > 60000)
1809 		j = 0;
1810 	else
1811 		j = msecs_to_jiffies(i);
1812 	if (j == 0)
1813 		dev_warn(chip->card->dev,
1814 			 "jackpoll_ms value out of range: %d\n", i);
1815 	return j;
1816 }
1817 
1818 /* Codec initialization */
1819 int azx_codec_create(struct azx *chip, const char *model,
1820 		     unsigned int max_slots,
1821 		     int *power_save_to)
1822 {
1823 	struct hda_bus_template bus_temp;
1824 	int c, codecs, err;
1825 
1826 	memset(&bus_temp, 0, sizeof(bus_temp));
1827 	bus_temp.private_data = chip;
1828 	bus_temp.modelname = model;
1829 	bus_temp.pci = chip->pci;
1830 	bus_temp.ops.command = azx_send_cmd;
1831 	bus_temp.ops.get_response = azx_get_response;
1832 	bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1833 	bus_temp.ops.bus_reset = azx_bus_reset;
1834 #ifdef CONFIG_PM
1835 	bus_temp.power_save = power_save_to;
1836 	bus_temp.ops.pm_notify = azx_power_notify;
1837 #endif
1838 #ifdef CONFIG_SND_HDA_DSP_LOADER
1839 	bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1840 	bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1841 	bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1842 #endif
1843 
1844 	err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1845 	if (err < 0)
1846 		return err;
1847 
1848 	if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1849 		dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1850 		chip->bus->needs_damn_long_delay = 1;
1851 	}
1852 
1853 	codecs = 0;
1854 	if (!max_slots)
1855 		max_slots = AZX_DEFAULT_CODECS;
1856 
1857 	/* First try to probe all given codec slots */
1858 	for (c = 0; c < max_slots; c++) {
1859 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1860 			if (probe_codec(chip, c) < 0) {
1861 				/* Some BIOSen give you wrong codec addresses
1862 				 * that don't exist
1863 				 */
1864 				dev_warn(chip->card->dev,
1865 					 "Codec #%d probe error; disabling it...\n", c);
1866 				chip->codec_mask &= ~(1 << c);
1867 				/* More badly, accessing to a non-existing
1868 				 * codec often screws up the controller chip,
1869 				 * and disturbs the further communications.
1870 				 * Thus if an error occurs during probing,
1871 				 * better to reset the controller chip to
1872 				 * get back to the sanity state.
1873 				 */
1874 				azx_stop_chip(chip);
1875 				azx_init_chip(chip, true);
1876 			}
1877 		}
1878 	}
1879 
1880 	/* AMD chipsets often cause the communication stalls upon certain
1881 	 * sequence like the pin-detection.  It seems that forcing the synced
1882 	 * access works around the stall.  Grrr...
1883 	 */
1884 	if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1885 		dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1886 		chip->bus->sync_write = 1;
1887 		chip->bus->allow_bus_reset = 1;
1888 	}
1889 
1890 	/* Then create codec instances */
1891 	for (c = 0; c < max_slots; c++) {
1892 		if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1893 			struct hda_codec *codec;
1894 			err = snd_hda_codec_new(chip->bus, c, &codec);
1895 			if (err < 0)
1896 				continue;
1897 			codec->jackpoll_interval = get_jackpoll_interval(chip);
1898 			codec->beep_mode = chip->beep_mode;
1899 			codecs++;
1900 		}
1901 	}
1902 	if (!codecs) {
1903 		dev_err(chip->card->dev, "no codecs initialized\n");
1904 		return -ENXIO;
1905 	}
1906 	return 0;
1907 }
1908 EXPORT_SYMBOL_GPL(azx_codec_create);
1909 
1910 /* configure each codec instance */
1911 int azx_codec_configure(struct azx *chip)
1912 {
1913 	struct hda_codec *codec;
1914 	list_for_each_entry(codec, &chip->bus->codec_list, list) {
1915 		snd_hda_codec_configure(codec);
1916 	}
1917 	return 0;
1918 }
1919 EXPORT_SYMBOL_GPL(azx_codec_configure);
1920 
1921 /* mixer creation - all stuff is implemented in hda module */
1922 int azx_mixer_create(struct azx *chip)
1923 {
1924 	return snd_hda_build_controls(chip->bus);
1925 }
1926 EXPORT_SYMBOL_GPL(azx_mixer_create);
1927 
1928 
1929 static bool is_input_stream(struct azx *chip, unsigned char index)
1930 {
1931 	return (index >= chip->capture_index_offset &&
1932 		index < chip->capture_index_offset + chip->capture_streams);
1933 }
1934 
1935 /* initialize SD streams */
1936 int azx_init_stream(struct azx *chip)
1937 {
1938 	int i;
1939 	int in_stream_tag = 0;
1940 	int out_stream_tag = 0;
1941 
1942 	/* initialize each stream (aka device)
1943 	 * assign the starting bdl address to each stream (device)
1944 	 * and initialize
1945 	 */
1946 	for (i = 0; i < chip->num_streams; i++) {
1947 		struct azx_dev *azx_dev = &chip->azx_dev[i];
1948 		azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1949 		/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1950 		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1951 		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1952 		azx_dev->sd_int_sta_mask = 1 << i;
1953 		azx_dev->index = i;
1954 
1955 		/* stream tag must be unique throughout
1956 		 * the stream direction group,
1957 		 * valid values 1...15
1958 		 * use separate stream tag if the flag
1959 		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1960 		 */
1961 		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1962 			azx_dev->stream_tag =
1963 				is_input_stream(chip, i) ?
1964 				++in_stream_tag :
1965 				++out_stream_tag;
1966 		else
1967 			azx_dev->stream_tag = i + 1;
1968 	}
1969 
1970 	return 0;
1971 }
1972 EXPORT_SYMBOL_GPL(azx_init_stream);
1973 
1974 /*
1975  * reboot notifier for hang-up problem at power-down
1976  */
1977 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1978 {
1979 	struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1980 	snd_hda_bus_reboot_notify(chip->bus);
1981 	azx_stop_chip(chip);
1982 	return NOTIFY_OK;
1983 }
1984 
1985 void azx_notifier_register(struct azx *chip)
1986 {
1987 	chip->reboot_notifier.notifier_call = azx_halt;
1988 	register_reboot_notifier(&chip->reboot_notifier);
1989 }
1990 EXPORT_SYMBOL_GPL(azx_notifier_register);
1991 
1992 void azx_notifier_unregister(struct azx *chip)
1993 {
1994 	if (chip->reboot_notifier.notifier_call)
1995 		unregister_reboot_notifier(&chip->reboot_notifier);
1996 }
1997 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1998 
1999 MODULE_LICENSE("GPL");
2000 MODULE_DESCRIPTION("Common HDA driver functions");
2001