xref: /openbmc/linux/sound/core/pcm_lib.c (revision 9dd7c463)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  *                   Abramo Bagnara <abramo@alsa-project.org>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
20 
21 #include "pcm_local.h"
22 
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
26 #else
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
31 #endif
32 
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35 
36 /*
37  * fill ring buffer with silence
38  * runtime->silence_start: starting pointer to silence area
39  * runtime->silence_filled: size filled with silence
40  * runtime->silence_threshold: threshold from application
41  * runtime->silence_size: maximal size from application
42  *
43  * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
44  */
45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
46 {
47 	struct snd_pcm_runtime *runtime = substream->runtime;
48 	snd_pcm_uframes_t frames, ofs, transfer;
49 	int err;
50 
51 	if (runtime->silence_size < runtime->boundary) {
52 		snd_pcm_sframes_t noise_dist, n;
53 		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
54 		if (runtime->silence_start != appl_ptr) {
55 			n = appl_ptr - runtime->silence_start;
56 			if (n < 0)
57 				n += runtime->boundary;
58 			if ((snd_pcm_uframes_t)n < runtime->silence_filled)
59 				runtime->silence_filled -= n;
60 			else
61 				runtime->silence_filled = 0;
62 			runtime->silence_start = appl_ptr;
63 		}
64 		if (runtime->silence_filled >= runtime->buffer_size)
65 			return;
66 		noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
67 		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
68 			return;
69 		frames = runtime->silence_threshold - noise_dist;
70 		if (frames > runtime->silence_size)
71 			frames = runtime->silence_size;
72 	} else {
73 		if (new_hw_ptr == ULONG_MAX) {	/* initialization */
74 			snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
75 			if (avail > runtime->buffer_size)
76 				avail = runtime->buffer_size;
77 			runtime->silence_filled = avail > 0 ? avail : 0;
78 			runtime->silence_start = (runtime->status->hw_ptr +
79 						  runtime->silence_filled) %
80 						 runtime->boundary;
81 		} else {
82 			ofs = runtime->status->hw_ptr;
83 			frames = new_hw_ptr - ofs;
84 			if ((snd_pcm_sframes_t)frames < 0)
85 				frames += runtime->boundary;
86 			runtime->silence_filled -= frames;
87 			if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
88 				runtime->silence_filled = 0;
89 				runtime->silence_start = new_hw_ptr;
90 			} else {
91 				runtime->silence_start = ofs;
92 			}
93 		}
94 		frames = runtime->buffer_size - runtime->silence_filled;
95 	}
96 	if (snd_BUG_ON(frames > runtime->buffer_size))
97 		return;
98 	if (frames == 0)
99 		return;
100 	ofs = runtime->silence_start % runtime->buffer_size;
101 	while (frames > 0) {
102 		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
103 		err = fill_silence_frames(substream, ofs, transfer);
104 		snd_BUG_ON(err < 0);
105 		runtime->silence_filled += transfer;
106 		frames -= transfer;
107 		ofs = 0;
108 	}
109 	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
110 }
111 
112 #ifdef CONFIG_SND_DEBUG
113 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
114 			   char *name, size_t len)
115 {
116 	snprintf(name, len, "pcmC%dD%d%c:%d",
117 		 substream->pcm->card->number,
118 		 substream->pcm->device,
119 		 substream->stream ? 'c' : 'p',
120 		 substream->number);
121 }
122 EXPORT_SYMBOL(snd_pcm_debug_name);
123 #endif
124 
125 #define XRUN_DEBUG_BASIC	(1<<0)
126 #define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
127 #define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
128 
129 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
130 
131 #define xrun_debug(substream, mask) \
132 			((substream)->pstr->xrun_debug & (mask))
133 #else
134 #define xrun_debug(substream, mask)	0
135 #endif
136 
137 #define dump_stack_on_xrun(substream) do {			\
138 		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
139 			dump_stack();				\
140 	} while (0)
141 
142 /* call with stream lock held */
143 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
144 {
145 	struct snd_pcm_runtime *runtime = substream->runtime;
146 
147 	trace_xrun(substream);
148 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
149 		struct timespec64 tstamp;
150 
151 		snd_pcm_gettime(runtime, &tstamp);
152 		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
153 		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
154 	}
155 	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
156 	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
157 		char name[16];
158 		snd_pcm_debug_name(substream, name, sizeof(name));
159 		pcm_warn(substream->pcm, "XRUN: %s\n", name);
160 		dump_stack_on_xrun(substream);
161 	}
162 }
163 
164 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
165 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
166 	do {								\
167 		trace_hw_ptr_error(substream, reason);	\
168 		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
169 			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
170 					   (in_interrupt) ? 'Q' : 'P', ##args);	\
171 			dump_stack_on_xrun(substream);			\
172 		}							\
173 	} while (0)
174 
175 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
176 
177 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
178 
179 #endif
180 
181 int snd_pcm_update_state(struct snd_pcm_substream *substream,
182 			 struct snd_pcm_runtime *runtime)
183 {
184 	snd_pcm_uframes_t avail;
185 
186 	avail = snd_pcm_avail(substream);
187 	if (avail > runtime->avail_max)
188 		runtime->avail_max = avail;
189 	if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
190 		if (avail >= runtime->buffer_size) {
191 			snd_pcm_drain_done(substream);
192 			return -EPIPE;
193 		}
194 	} else {
195 		if (avail >= runtime->stop_threshold) {
196 			__snd_pcm_xrun(substream);
197 			return -EPIPE;
198 		}
199 	}
200 	if (runtime->twake) {
201 		if (avail >= runtime->twake)
202 			wake_up(&runtime->tsleep);
203 	} else if (avail >= runtime->control->avail_min)
204 		wake_up(&runtime->sleep);
205 	return 0;
206 }
207 
208 static void update_audio_tstamp(struct snd_pcm_substream *substream,
209 				struct timespec64 *curr_tstamp,
210 				struct timespec64 *audio_tstamp)
211 {
212 	struct snd_pcm_runtime *runtime = substream->runtime;
213 	u64 audio_frames, audio_nsecs;
214 	struct timespec64 driver_tstamp;
215 
216 	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
217 		return;
218 
219 	if (!(substream->ops->get_time_info) ||
220 		(runtime->audio_tstamp_report.actual_type ==
221 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
222 
223 		/*
224 		 * provide audio timestamp derived from pointer position
225 		 * add delay only if requested
226 		 */
227 
228 		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
229 
230 		if (runtime->audio_tstamp_config.report_delay) {
231 			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
232 				audio_frames -=  runtime->delay;
233 			else
234 				audio_frames +=  runtime->delay;
235 		}
236 		audio_nsecs = div_u64(audio_frames * 1000000000LL,
237 				runtime->rate);
238 		*audio_tstamp = ns_to_timespec64(audio_nsecs);
239 	}
240 
241 	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
242 	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
243 		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
244 		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
245 		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
246 		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
247 	}
248 
249 
250 	/*
251 	 * re-take a driver timestamp to let apps detect if the reference tstamp
252 	 * read by low-level hardware was provided with a delay
253 	 */
254 	snd_pcm_gettime(substream->runtime, &driver_tstamp);
255 	runtime->driver_tstamp = driver_tstamp;
256 }
257 
258 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
259 				  unsigned int in_interrupt)
260 {
261 	struct snd_pcm_runtime *runtime = substream->runtime;
262 	snd_pcm_uframes_t pos;
263 	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
264 	snd_pcm_sframes_t hdelta, delta;
265 	unsigned long jdelta;
266 	unsigned long curr_jiffies;
267 	struct timespec64 curr_tstamp;
268 	struct timespec64 audio_tstamp;
269 	int crossed_boundary = 0;
270 
271 	old_hw_ptr = runtime->status->hw_ptr;
272 
273 	/*
274 	 * group pointer, time and jiffies reads to allow for more
275 	 * accurate correlations/corrections.
276 	 * The values are stored at the end of this routine after
277 	 * corrections for hw_ptr position
278 	 */
279 	pos = substream->ops->pointer(substream);
280 	curr_jiffies = jiffies;
281 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
282 		if ((substream->ops->get_time_info) &&
283 			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
284 			substream->ops->get_time_info(substream, &curr_tstamp,
285 						&audio_tstamp,
286 						&runtime->audio_tstamp_config,
287 						&runtime->audio_tstamp_report);
288 
289 			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
290 			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
291 				snd_pcm_gettime(runtime, &curr_tstamp);
292 		} else
293 			snd_pcm_gettime(runtime, &curr_tstamp);
294 	}
295 
296 	if (pos == SNDRV_PCM_POS_XRUN) {
297 		__snd_pcm_xrun(substream);
298 		return -EPIPE;
299 	}
300 	if (pos >= runtime->buffer_size) {
301 		if (printk_ratelimit()) {
302 			char name[16];
303 			snd_pcm_debug_name(substream, name, sizeof(name));
304 			pcm_err(substream->pcm,
305 				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
306 				name, pos, runtime->buffer_size,
307 				runtime->period_size);
308 		}
309 		pos = 0;
310 	}
311 	pos -= pos % runtime->min_align;
312 	trace_hwptr(substream, pos, in_interrupt);
313 	hw_base = runtime->hw_ptr_base;
314 	new_hw_ptr = hw_base + pos;
315 	if (in_interrupt) {
316 		/* we know that one period was processed */
317 		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
318 		delta = runtime->hw_ptr_interrupt + runtime->period_size;
319 		if (delta > new_hw_ptr) {
320 			/* check for double acknowledged interrupts */
321 			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
322 			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
323 				hw_base += runtime->buffer_size;
324 				if (hw_base >= runtime->boundary) {
325 					hw_base = 0;
326 					crossed_boundary++;
327 				}
328 				new_hw_ptr = hw_base + pos;
329 				goto __delta;
330 			}
331 		}
332 	}
333 	/* new_hw_ptr might be lower than old_hw_ptr in case when */
334 	/* pointer crosses the end of the ring buffer */
335 	if (new_hw_ptr < old_hw_ptr) {
336 		hw_base += runtime->buffer_size;
337 		if (hw_base >= runtime->boundary) {
338 			hw_base = 0;
339 			crossed_boundary++;
340 		}
341 		new_hw_ptr = hw_base + pos;
342 	}
343       __delta:
344 	delta = new_hw_ptr - old_hw_ptr;
345 	if (delta < 0)
346 		delta += runtime->boundary;
347 
348 	if (runtime->no_period_wakeup) {
349 		snd_pcm_sframes_t xrun_threshold;
350 		/*
351 		 * Without regular period interrupts, we have to check
352 		 * the elapsed time to detect xruns.
353 		 */
354 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
355 		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
356 			goto no_delta_check;
357 		hdelta = jdelta - delta * HZ / runtime->rate;
358 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
359 		while (hdelta > xrun_threshold) {
360 			delta += runtime->buffer_size;
361 			hw_base += runtime->buffer_size;
362 			if (hw_base >= runtime->boundary) {
363 				hw_base = 0;
364 				crossed_boundary++;
365 			}
366 			new_hw_ptr = hw_base + pos;
367 			hdelta -= runtime->hw_ptr_buffer_jiffies;
368 		}
369 		goto no_delta_check;
370 	}
371 
372 	/* something must be really wrong */
373 	if (delta >= runtime->buffer_size + runtime->period_size) {
374 		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
375 			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
376 			     substream->stream, (long)pos,
377 			     (long)new_hw_ptr, (long)old_hw_ptr);
378 		return 0;
379 	}
380 
381 	/* Do jiffies check only in xrun_debug mode */
382 	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
383 		goto no_jiffies_check;
384 
385 	/* Skip the jiffies check for hardwares with BATCH flag.
386 	 * Such hardware usually just increases the position at each IRQ,
387 	 * thus it can't give any strange position.
388 	 */
389 	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
390 		goto no_jiffies_check;
391 	hdelta = delta;
392 	if (hdelta < runtime->delay)
393 		goto no_jiffies_check;
394 	hdelta -= runtime->delay;
395 	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
396 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
397 		delta = jdelta /
398 			(((runtime->period_size * HZ) / runtime->rate)
399 								+ HZ/100);
400 		/* move new_hw_ptr according jiffies not pos variable */
401 		new_hw_ptr = old_hw_ptr;
402 		hw_base = delta;
403 		/* use loop to avoid checks for delta overflows */
404 		/* the delta value is small or zero in most cases */
405 		while (delta > 0) {
406 			new_hw_ptr += runtime->period_size;
407 			if (new_hw_ptr >= runtime->boundary) {
408 				new_hw_ptr -= runtime->boundary;
409 				crossed_boundary--;
410 			}
411 			delta--;
412 		}
413 		/* align hw_base to buffer_size */
414 		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
415 			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
416 			     (long)pos, (long)hdelta,
417 			     (long)runtime->period_size, jdelta,
418 			     ((hdelta * HZ) / runtime->rate), hw_base,
419 			     (unsigned long)old_hw_ptr,
420 			     (unsigned long)new_hw_ptr);
421 		/* reset values to proper state */
422 		delta = 0;
423 		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
424 	}
425  no_jiffies_check:
426 	if (delta > runtime->period_size + runtime->period_size / 2) {
427 		hw_ptr_error(substream, in_interrupt,
428 			     "Lost interrupts?",
429 			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
430 			     substream->stream, (long)delta,
431 			     (long)new_hw_ptr,
432 			     (long)old_hw_ptr);
433 	}
434 
435  no_delta_check:
436 	if (runtime->status->hw_ptr == new_hw_ptr) {
437 		runtime->hw_ptr_jiffies = curr_jiffies;
438 		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
439 		return 0;
440 	}
441 
442 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
443 	    runtime->silence_size > 0)
444 		snd_pcm_playback_silence(substream, new_hw_ptr);
445 
446 	if (in_interrupt) {
447 		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
448 		if (delta < 0)
449 			delta += runtime->boundary;
450 		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
451 		runtime->hw_ptr_interrupt += delta;
452 		if (runtime->hw_ptr_interrupt >= runtime->boundary)
453 			runtime->hw_ptr_interrupt -= runtime->boundary;
454 	}
455 	runtime->hw_ptr_base = hw_base;
456 	runtime->status->hw_ptr = new_hw_ptr;
457 	runtime->hw_ptr_jiffies = curr_jiffies;
458 	if (crossed_boundary) {
459 		snd_BUG_ON(crossed_boundary != 1);
460 		runtime->hw_ptr_wrap += runtime->boundary;
461 	}
462 
463 	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
464 
465 	return snd_pcm_update_state(substream, runtime);
466 }
467 
468 /* CAUTION: call it with irq disabled */
469 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
470 {
471 	return snd_pcm_update_hw_ptr0(substream, 0);
472 }
473 
474 /**
475  * snd_pcm_set_ops - set the PCM operators
476  * @pcm: the pcm instance
477  * @direction: stream direction, SNDRV_PCM_STREAM_XXX
478  * @ops: the operator table
479  *
480  * Sets the given PCM operators to the pcm instance.
481  */
482 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
483 		     const struct snd_pcm_ops *ops)
484 {
485 	struct snd_pcm_str *stream = &pcm->streams[direction];
486 	struct snd_pcm_substream *substream;
487 
488 	for (substream = stream->substream; substream != NULL; substream = substream->next)
489 		substream->ops = ops;
490 }
491 EXPORT_SYMBOL(snd_pcm_set_ops);
492 
493 /**
494  * snd_pcm_set_sync - set the PCM sync id
495  * @substream: the pcm substream
496  *
497  * Sets the PCM sync identifier for the card.
498  */
499 void snd_pcm_set_sync(struct snd_pcm_substream *substream)
500 {
501 	struct snd_pcm_runtime *runtime = substream->runtime;
502 
503 	runtime->sync.id32[0] = substream->pcm->card->number;
504 	runtime->sync.id32[1] = -1;
505 	runtime->sync.id32[2] = -1;
506 	runtime->sync.id32[3] = -1;
507 }
508 EXPORT_SYMBOL(snd_pcm_set_sync);
509 
510 /*
511  *  Standard ioctl routine
512  */
513 
514 static inline unsigned int div32(unsigned int a, unsigned int b,
515 				 unsigned int *r)
516 {
517 	if (b == 0) {
518 		*r = 0;
519 		return UINT_MAX;
520 	}
521 	*r = a % b;
522 	return a / b;
523 }
524 
525 static inline unsigned int div_down(unsigned int a, unsigned int b)
526 {
527 	if (b == 0)
528 		return UINT_MAX;
529 	return a / b;
530 }
531 
532 static inline unsigned int div_up(unsigned int a, unsigned int b)
533 {
534 	unsigned int r;
535 	unsigned int q;
536 	if (b == 0)
537 		return UINT_MAX;
538 	q = div32(a, b, &r);
539 	if (r)
540 		++q;
541 	return q;
542 }
543 
544 static inline unsigned int mul(unsigned int a, unsigned int b)
545 {
546 	if (a == 0)
547 		return 0;
548 	if (div_down(UINT_MAX, a) < b)
549 		return UINT_MAX;
550 	return a * b;
551 }
552 
553 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
554 				    unsigned int c, unsigned int *r)
555 {
556 	u_int64_t n = (u_int64_t) a * b;
557 	if (c == 0) {
558 		*r = 0;
559 		return UINT_MAX;
560 	}
561 	n = div_u64_rem(n, c, r);
562 	if (n >= UINT_MAX) {
563 		*r = 0;
564 		return UINT_MAX;
565 	}
566 	return n;
567 }
568 
569 /**
570  * snd_interval_refine - refine the interval value of configurator
571  * @i: the interval value to refine
572  * @v: the interval value to refer to
573  *
574  * Refines the interval value with the reference value.
575  * The interval is changed to the range satisfying both intervals.
576  * The interval status (min, max, integer, etc.) are evaluated.
577  *
578  * Return: Positive if the value is changed, zero if it's not changed, or a
579  * negative error code.
580  */
581 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
582 {
583 	int changed = 0;
584 	if (snd_BUG_ON(snd_interval_empty(i)))
585 		return -EINVAL;
586 	if (i->min < v->min) {
587 		i->min = v->min;
588 		i->openmin = v->openmin;
589 		changed = 1;
590 	} else if (i->min == v->min && !i->openmin && v->openmin) {
591 		i->openmin = 1;
592 		changed = 1;
593 	}
594 	if (i->max > v->max) {
595 		i->max = v->max;
596 		i->openmax = v->openmax;
597 		changed = 1;
598 	} else if (i->max == v->max && !i->openmax && v->openmax) {
599 		i->openmax = 1;
600 		changed = 1;
601 	}
602 	if (!i->integer && v->integer) {
603 		i->integer = 1;
604 		changed = 1;
605 	}
606 	if (i->integer) {
607 		if (i->openmin) {
608 			i->min++;
609 			i->openmin = 0;
610 		}
611 		if (i->openmax) {
612 			i->max--;
613 			i->openmax = 0;
614 		}
615 	} else if (!i->openmin && !i->openmax && i->min == i->max)
616 		i->integer = 1;
617 	if (snd_interval_checkempty(i)) {
618 		snd_interval_none(i);
619 		return -EINVAL;
620 	}
621 	return changed;
622 }
623 EXPORT_SYMBOL(snd_interval_refine);
624 
625 static int snd_interval_refine_first(struct snd_interval *i)
626 {
627 	const unsigned int last_max = i->max;
628 
629 	if (snd_BUG_ON(snd_interval_empty(i)))
630 		return -EINVAL;
631 	if (snd_interval_single(i))
632 		return 0;
633 	i->max = i->min;
634 	if (i->openmin)
635 		i->max++;
636 	/* only exclude max value if also excluded before refine */
637 	i->openmax = (i->openmax && i->max >= last_max);
638 	return 1;
639 }
640 
641 static int snd_interval_refine_last(struct snd_interval *i)
642 {
643 	const unsigned int last_min = i->min;
644 
645 	if (snd_BUG_ON(snd_interval_empty(i)))
646 		return -EINVAL;
647 	if (snd_interval_single(i))
648 		return 0;
649 	i->min = i->max;
650 	if (i->openmax)
651 		i->min--;
652 	/* only exclude min value if also excluded before refine */
653 	i->openmin = (i->openmin && i->min <= last_min);
654 	return 1;
655 }
656 
657 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
658 {
659 	if (a->empty || b->empty) {
660 		snd_interval_none(c);
661 		return;
662 	}
663 	c->empty = 0;
664 	c->min = mul(a->min, b->min);
665 	c->openmin = (a->openmin || b->openmin);
666 	c->max = mul(a->max,  b->max);
667 	c->openmax = (a->openmax || b->openmax);
668 	c->integer = (a->integer && b->integer);
669 }
670 
671 /**
672  * snd_interval_div - refine the interval value with division
673  * @a: dividend
674  * @b: divisor
675  * @c: quotient
676  *
677  * c = a / b
678  *
679  * Returns non-zero if the value is changed, zero if not changed.
680  */
681 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
682 {
683 	unsigned int r;
684 	if (a->empty || b->empty) {
685 		snd_interval_none(c);
686 		return;
687 	}
688 	c->empty = 0;
689 	c->min = div32(a->min, b->max, &r);
690 	c->openmin = (r || a->openmin || b->openmax);
691 	if (b->min > 0) {
692 		c->max = div32(a->max, b->min, &r);
693 		if (r) {
694 			c->max++;
695 			c->openmax = 1;
696 		} else
697 			c->openmax = (a->openmax || b->openmin);
698 	} else {
699 		c->max = UINT_MAX;
700 		c->openmax = 0;
701 	}
702 	c->integer = 0;
703 }
704 
705 /**
706  * snd_interval_muldivk - refine the interval value
707  * @a: dividend 1
708  * @b: dividend 2
709  * @k: divisor (as integer)
710  * @c: result
711   *
712  * c = a * b / k
713  *
714  * Returns non-zero if the value is changed, zero if not changed.
715  */
716 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
717 		      unsigned int k, struct snd_interval *c)
718 {
719 	unsigned int r;
720 	if (a->empty || b->empty) {
721 		snd_interval_none(c);
722 		return;
723 	}
724 	c->empty = 0;
725 	c->min = muldiv32(a->min, b->min, k, &r);
726 	c->openmin = (r || a->openmin || b->openmin);
727 	c->max = muldiv32(a->max, b->max, k, &r);
728 	if (r) {
729 		c->max++;
730 		c->openmax = 1;
731 	} else
732 		c->openmax = (a->openmax || b->openmax);
733 	c->integer = 0;
734 }
735 
736 /**
737  * snd_interval_mulkdiv - refine the interval value
738  * @a: dividend 1
739  * @k: dividend 2 (as integer)
740  * @b: divisor
741  * @c: result
742  *
743  * c = a * k / b
744  *
745  * Returns non-zero if the value is changed, zero if not changed.
746  */
747 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
748 		      const struct snd_interval *b, struct snd_interval *c)
749 {
750 	unsigned int r;
751 	if (a->empty || b->empty) {
752 		snd_interval_none(c);
753 		return;
754 	}
755 	c->empty = 0;
756 	c->min = muldiv32(a->min, k, b->max, &r);
757 	c->openmin = (r || a->openmin || b->openmax);
758 	if (b->min > 0) {
759 		c->max = muldiv32(a->max, k, b->min, &r);
760 		if (r) {
761 			c->max++;
762 			c->openmax = 1;
763 		} else
764 			c->openmax = (a->openmax || b->openmin);
765 	} else {
766 		c->max = UINT_MAX;
767 		c->openmax = 0;
768 	}
769 	c->integer = 0;
770 }
771 
772 /* ---- */
773 
774 
775 /**
776  * snd_interval_ratnum - refine the interval value
777  * @i: interval to refine
778  * @rats_count: number of ratnum_t
779  * @rats: ratnum_t array
780  * @nump: pointer to store the resultant numerator
781  * @denp: pointer to store the resultant denominator
782  *
783  * Return: Positive if the value is changed, zero if it's not changed, or a
784  * negative error code.
785  */
786 int snd_interval_ratnum(struct snd_interval *i,
787 			unsigned int rats_count, const struct snd_ratnum *rats,
788 			unsigned int *nump, unsigned int *denp)
789 {
790 	unsigned int best_num, best_den;
791 	int best_diff;
792 	unsigned int k;
793 	struct snd_interval t;
794 	int err;
795 	unsigned int result_num, result_den;
796 	int result_diff;
797 
798 	best_num = best_den = best_diff = 0;
799 	for (k = 0; k < rats_count; ++k) {
800 		unsigned int num = rats[k].num;
801 		unsigned int den;
802 		unsigned int q = i->min;
803 		int diff;
804 		if (q == 0)
805 			q = 1;
806 		den = div_up(num, q);
807 		if (den < rats[k].den_min)
808 			continue;
809 		if (den > rats[k].den_max)
810 			den = rats[k].den_max;
811 		else {
812 			unsigned int r;
813 			r = (den - rats[k].den_min) % rats[k].den_step;
814 			if (r != 0)
815 				den -= r;
816 		}
817 		diff = num - q * den;
818 		if (diff < 0)
819 			diff = -diff;
820 		if (best_num == 0 ||
821 		    diff * best_den < best_diff * den) {
822 			best_diff = diff;
823 			best_den = den;
824 			best_num = num;
825 		}
826 	}
827 	if (best_den == 0) {
828 		i->empty = 1;
829 		return -EINVAL;
830 	}
831 	t.min = div_down(best_num, best_den);
832 	t.openmin = !!(best_num % best_den);
833 
834 	result_num = best_num;
835 	result_diff = best_diff;
836 	result_den = best_den;
837 	best_num = best_den = best_diff = 0;
838 	for (k = 0; k < rats_count; ++k) {
839 		unsigned int num = rats[k].num;
840 		unsigned int den;
841 		unsigned int q = i->max;
842 		int diff;
843 		if (q == 0) {
844 			i->empty = 1;
845 			return -EINVAL;
846 		}
847 		den = div_down(num, q);
848 		if (den > rats[k].den_max)
849 			continue;
850 		if (den < rats[k].den_min)
851 			den = rats[k].den_min;
852 		else {
853 			unsigned int r;
854 			r = (den - rats[k].den_min) % rats[k].den_step;
855 			if (r != 0)
856 				den += rats[k].den_step - r;
857 		}
858 		diff = q * den - num;
859 		if (diff < 0)
860 			diff = -diff;
861 		if (best_num == 0 ||
862 		    diff * best_den < best_diff * den) {
863 			best_diff = diff;
864 			best_den = den;
865 			best_num = num;
866 		}
867 	}
868 	if (best_den == 0) {
869 		i->empty = 1;
870 		return -EINVAL;
871 	}
872 	t.max = div_up(best_num, best_den);
873 	t.openmax = !!(best_num % best_den);
874 	t.integer = 0;
875 	err = snd_interval_refine(i, &t);
876 	if (err < 0)
877 		return err;
878 
879 	if (snd_interval_single(i)) {
880 		if (best_diff * result_den < result_diff * best_den) {
881 			result_num = best_num;
882 			result_den = best_den;
883 		}
884 		if (nump)
885 			*nump = result_num;
886 		if (denp)
887 			*denp = result_den;
888 	}
889 	return err;
890 }
891 EXPORT_SYMBOL(snd_interval_ratnum);
892 
893 /**
894  * snd_interval_ratden - refine the interval value
895  * @i: interval to refine
896  * @rats_count: number of struct ratden
897  * @rats: struct ratden array
898  * @nump: pointer to store the resultant numerator
899  * @denp: pointer to store the resultant denominator
900  *
901  * Return: Positive if the value is changed, zero if it's not changed, or a
902  * negative error code.
903  */
904 static int snd_interval_ratden(struct snd_interval *i,
905 			       unsigned int rats_count,
906 			       const struct snd_ratden *rats,
907 			       unsigned int *nump, unsigned int *denp)
908 {
909 	unsigned int best_num, best_diff, best_den;
910 	unsigned int k;
911 	struct snd_interval t;
912 	int err;
913 
914 	best_num = best_den = best_diff = 0;
915 	for (k = 0; k < rats_count; ++k) {
916 		unsigned int num;
917 		unsigned int den = rats[k].den;
918 		unsigned int q = i->min;
919 		int diff;
920 		num = mul(q, den);
921 		if (num > rats[k].num_max)
922 			continue;
923 		if (num < rats[k].num_min)
924 			num = rats[k].num_max;
925 		else {
926 			unsigned int r;
927 			r = (num - rats[k].num_min) % rats[k].num_step;
928 			if (r != 0)
929 				num += rats[k].num_step - r;
930 		}
931 		diff = num - q * den;
932 		if (best_num == 0 ||
933 		    diff * best_den < best_diff * den) {
934 			best_diff = diff;
935 			best_den = den;
936 			best_num = num;
937 		}
938 	}
939 	if (best_den == 0) {
940 		i->empty = 1;
941 		return -EINVAL;
942 	}
943 	t.min = div_down(best_num, best_den);
944 	t.openmin = !!(best_num % best_den);
945 
946 	best_num = best_den = best_diff = 0;
947 	for (k = 0; k < rats_count; ++k) {
948 		unsigned int num;
949 		unsigned int den = rats[k].den;
950 		unsigned int q = i->max;
951 		int diff;
952 		num = mul(q, den);
953 		if (num < rats[k].num_min)
954 			continue;
955 		if (num > rats[k].num_max)
956 			num = rats[k].num_max;
957 		else {
958 			unsigned int r;
959 			r = (num - rats[k].num_min) % rats[k].num_step;
960 			if (r != 0)
961 				num -= r;
962 		}
963 		diff = q * den - num;
964 		if (best_num == 0 ||
965 		    diff * best_den < best_diff * den) {
966 			best_diff = diff;
967 			best_den = den;
968 			best_num = num;
969 		}
970 	}
971 	if (best_den == 0) {
972 		i->empty = 1;
973 		return -EINVAL;
974 	}
975 	t.max = div_up(best_num, best_den);
976 	t.openmax = !!(best_num % best_den);
977 	t.integer = 0;
978 	err = snd_interval_refine(i, &t);
979 	if (err < 0)
980 		return err;
981 
982 	if (snd_interval_single(i)) {
983 		if (nump)
984 			*nump = best_num;
985 		if (denp)
986 			*denp = best_den;
987 	}
988 	return err;
989 }
990 
991 /**
992  * snd_interval_list - refine the interval value from the list
993  * @i: the interval value to refine
994  * @count: the number of elements in the list
995  * @list: the value list
996  * @mask: the bit-mask to evaluate
997  *
998  * Refines the interval value from the list.
999  * When mask is non-zero, only the elements corresponding to bit 1 are
1000  * evaluated.
1001  *
1002  * Return: Positive if the value is changed, zero if it's not changed, or a
1003  * negative error code.
1004  */
1005 int snd_interval_list(struct snd_interval *i, unsigned int count,
1006 		      const unsigned int *list, unsigned int mask)
1007 {
1008         unsigned int k;
1009 	struct snd_interval list_range;
1010 
1011 	if (!count) {
1012 		i->empty = 1;
1013 		return -EINVAL;
1014 	}
1015 	snd_interval_any(&list_range);
1016 	list_range.min = UINT_MAX;
1017 	list_range.max = 0;
1018         for (k = 0; k < count; k++) {
1019 		if (mask && !(mask & (1 << k)))
1020 			continue;
1021 		if (!snd_interval_test(i, list[k]))
1022 			continue;
1023 		list_range.min = min(list_range.min, list[k]);
1024 		list_range.max = max(list_range.max, list[k]);
1025         }
1026 	return snd_interval_refine(i, &list_range);
1027 }
1028 EXPORT_SYMBOL(snd_interval_list);
1029 
1030 /**
1031  * snd_interval_ranges - refine the interval value from the list of ranges
1032  * @i: the interval value to refine
1033  * @count: the number of elements in the list of ranges
1034  * @ranges: the ranges list
1035  * @mask: the bit-mask to evaluate
1036  *
1037  * Refines the interval value from the list of ranges.
1038  * When mask is non-zero, only the elements corresponding to bit 1 are
1039  * evaluated.
1040  *
1041  * Return: Positive if the value is changed, zero if it's not changed, or a
1042  * negative error code.
1043  */
1044 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1045 			const struct snd_interval *ranges, unsigned int mask)
1046 {
1047 	unsigned int k;
1048 	struct snd_interval range_union;
1049 	struct snd_interval range;
1050 
1051 	if (!count) {
1052 		snd_interval_none(i);
1053 		return -EINVAL;
1054 	}
1055 	snd_interval_any(&range_union);
1056 	range_union.min = UINT_MAX;
1057 	range_union.max = 0;
1058 	for (k = 0; k < count; k++) {
1059 		if (mask && !(mask & (1 << k)))
1060 			continue;
1061 		snd_interval_copy(&range, &ranges[k]);
1062 		if (snd_interval_refine(&range, i) < 0)
1063 			continue;
1064 		if (snd_interval_empty(&range))
1065 			continue;
1066 
1067 		if (range.min < range_union.min) {
1068 			range_union.min = range.min;
1069 			range_union.openmin = 1;
1070 		}
1071 		if (range.min == range_union.min && !range.openmin)
1072 			range_union.openmin = 0;
1073 		if (range.max > range_union.max) {
1074 			range_union.max = range.max;
1075 			range_union.openmax = 1;
1076 		}
1077 		if (range.max == range_union.max && !range.openmax)
1078 			range_union.openmax = 0;
1079 	}
1080 	return snd_interval_refine(i, &range_union);
1081 }
1082 EXPORT_SYMBOL(snd_interval_ranges);
1083 
1084 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1085 {
1086 	unsigned int n;
1087 	int changed = 0;
1088 	n = i->min % step;
1089 	if (n != 0 || i->openmin) {
1090 		i->min += step - n;
1091 		i->openmin = 0;
1092 		changed = 1;
1093 	}
1094 	n = i->max % step;
1095 	if (n != 0 || i->openmax) {
1096 		i->max -= n;
1097 		i->openmax = 0;
1098 		changed = 1;
1099 	}
1100 	if (snd_interval_checkempty(i)) {
1101 		i->empty = 1;
1102 		return -EINVAL;
1103 	}
1104 	return changed;
1105 }
1106 
1107 /* Info constraints helpers */
1108 
1109 /**
1110  * snd_pcm_hw_rule_add - add the hw-constraint rule
1111  * @runtime: the pcm runtime instance
1112  * @cond: condition bits
1113  * @var: the variable to evaluate
1114  * @func: the evaluation function
1115  * @private: the private data pointer passed to function
1116  * @dep: the dependent variables
1117  *
1118  * Return: Zero if successful, or a negative error code on failure.
1119  */
1120 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1121 			int var,
1122 			snd_pcm_hw_rule_func_t func, void *private,
1123 			int dep, ...)
1124 {
1125 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1126 	struct snd_pcm_hw_rule *c;
1127 	unsigned int k;
1128 	va_list args;
1129 	va_start(args, dep);
1130 	if (constrs->rules_num >= constrs->rules_all) {
1131 		struct snd_pcm_hw_rule *new;
1132 		unsigned int new_rules = constrs->rules_all + 16;
1133 		new = krealloc_array(constrs->rules, new_rules,
1134 				     sizeof(*c), GFP_KERNEL);
1135 		if (!new) {
1136 			va_end(args);
1137 			return -ENOMEM;
1138 		}
1139 		constrs->rules = new;
1140 		constrs->rules_all = new_rules;
1141 	}
1142 	c = &constrs->rules[constrs->rules_num];
1143 	c->cond = cond;
1144 	c->func = func;
1145 	c->var = var;
1146 	c->private = private;
1147 	k = 0;
1148 	while (1) {
1149 		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1150 			va_end(args);
1151 			return -EINVAL;
1152 		}
1153 		c->deps[k++] = dep;
1154 		if (dep < 0)
1155 			break;
1156 		dep = va_arg(args, int);
1157 	}
1158 	constrs->rules_num++;
1159 	va_end(args);
1160 	return 0;
1161 }
1162 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1163 
1164 /**
1165  * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1166  * @runtime: PCM runtime instance
1167  * @var: hw_params variable to apply the mask
1168  * @mask: the bitmap mask
1169  *
1170  * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1171  *
1172  * Return: Zero if successful, or a negative error code on failure.
1173  */
1174 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1175 			       u_int32_t mask)
1176 {
1177 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1178 	struct snd_mask *maskp = constrs_mask(constrs, var);
1179 	*maskp->bits &= mask;
1180 	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1181 	if (*maskp->bits == 0)
1182 		return -EINVAL;
1183 	return 0;
1184 }
1185 
1186 /**
1187  * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1188  * @runtime: PCM runtime instance
1189  * @var: hw_params variable to apply the mask
1190  * @mask: the 64bit bitmap mask
1191  *
1192  * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1193  *
1194  * Return: Zero if successful, or a negative error code on failure.
1195  */
1196 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1197 				 u_int64_t mask)
1198 {
1199 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1200 	struct snd_mask *maskp = constrs_mask(constrs, var);
1201 	maskp->bits[0] &= (u_int32_t)mask;
1202 	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1203 	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1204 	if (! maskp->bits[0] && ! maskp->bits[1])
1205 		return -EINVAL;
1206 	return 0;
1207 }
1208 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1209 
1210 /**
1211  * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1212  * @runtime: PCM runtime instance
1213  * @var: hw_params variable to apply the integer constraint
1214  *
1215  * Apply the constraint of integer to an interval parameter.
1216  *
1217  * Return: Positive if the value is changed, zero if it's not changed, or a
1218  * negative error code.
1219  */
1220 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1221 {
1222 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 	return snd_interval_setinteger(constrs_interval(constrs, var));
1224 }
1225 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1226 
1227 /**
1228  * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1229  * @runtime: PCM runtime instance
1230  * @var: hw_params variable to apply the range
1231  * @min: the minimal value
1232  * @max: the maximal value
1233  *
1234  * Apply the min/max range constraint to an interval parameter.
1235  *
1236  * Return: Positive if the value is changed, zero if it's not changed, or a
1237  * negative error code.
1238  */
1239 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1240 				 unsigned int min, unsigned int max)
1241 {
1242 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1243 	struct snd_interval t;
1244 	t.min = min;
1245 	t.max = max;
1246 	t.openmin = t.openmax = 0;
1247 	t.integer = 0;
1248 	return snd_interval_refine(constrs_interval(constrs, var), &t);
1249 }
1250 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1251 
1252 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1253 				struct snd_pcm_hw_rule *rule)
1254 {
1255 	struct snd_pcm_hw_constraint_list *list = rule->private;
1256 	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1257 }
1258 
1259 
1260 /**
1261  * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1262  * @runtime: PCM runtime instance
1263  * @cond: condition bits
1264  * @var: hw_params variable to apply the list constraint
1265  * @l: list
1266  *
1267  * Apply the list of constraints to an interval parameter.
1268  *
1269  * Return: Zero if successful, or a negative error code on failure.
1270  */
1271 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1272 			       unsigned int cond,
1273 			       snd_pcm_hw_param_t var,
1274 			       const struct snd_pcm_hw_constraint_list *l)
1275 {
1276 	return snd_pcm_hw_rule_add(runtime, cond, var,
1277 				   snd_pcm_hw_rule_list, (void *)l,
1278 				   var, -1);
1279 }
1280 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1281 
1282 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1283 				  struct snd_pcm_hw_rule *rule)
1284 {
1285 	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1286 	return snd_interval_ranges(hw_param_interval(params, rule->var),
1287 				   r->count, r->ranges, r->mask);
1288 }
1289 
1290 
1291 /**
1292  * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1293  * @runtime: PCM runtime instance
1294  * @cond: condition bits
1295  * @var: hw_params variable to apply the list of range constraints
1296  * @r: ranges
1297  *
1298  * Apply the list of range constraints to an interval parameter.
1299  *
1300  * Return: Zero if successful, or a negative error code on failure.
1301  */
1302 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1303 				 unsigned int cond,
1304 				 snd_pcm_hw_param_t var,
1305 				 const struct snd_pcm_hw_constraint_ranges *r)
1306 {
1307 	return snd_pcm_hw_rule_add(runtime, cond, var,
1308 				   snd_pcm_hw_rule_ranges, (void *)r,
1309 				   var, -1);
1310 }
1311 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1312 
1313 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1314 				   struct snd_pcm_hw_rule *rule)
1315 {
1316 	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1317 	unsigned int num = 0, den = 0;
1318 	int err;
1319 	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1320 				  r->nrats, r->rats, &num, &den);
1321 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1322 		params->rate_num = num;
1323 		params->rate_den = den;
1324 	}
1325 	return err;
1326 }
1327 
1328 /**
1329  * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1330  * @runtime: PCM runtime instance
1331  * @cond: condition bits
1332  * @var: hw_params variable to apply the ratnums constraint
1333  * @r: struct snd_ratnums constriants
1334  *
1335  * Return: Zero if successful, or a negative error code on failure.
1336  */
1337 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1338 				  unsigned int cond,
1339 				  snd_pcm_hw_param_t var,
1340 				  const struct snd_pcm_hw_constraint_ratnums *r)
1341 {
1342 	return snd_pcm_hw_rule_add(runtime, cond, var,
1343 				   snd_pcm_hw_rule_ratnums, (void *)r,
1344 				   var, -1);
1345 }
1346 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1347 
1348 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1349 				   struct snd_pcm_hw_rule *rule)
1350 {
1351 	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1352 	unsigned int num = 0, den = 0;
1353 	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1354 				  r->nrats, r->rats, &num, &den);
1355 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1356 		params->rate_num = num;
1357 		params->rate_den = den;
1358 	}
1359 	return err;
1360 }
1361 
1362 /**
1363  * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1364  * @runtime: PCM runtime instance
1365  * @cond: condition bits
1366  * @var: hw_params variable to apply the ratdens constraint
1367  * @r: struct snd_ratdens constriants
1368  *
1369  * Return: Zero if successful, or a negative error code on failure.
1370  */
1371 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1372 				  unsigned int cond,
1373 				  snd_pcm_hw_param_t var,
1374 				  const struct snd_pcm_hw_constraint_ratdens *r)
1375 {
1376 	return snd_pcm_hw_rule_add(runtime, cond, var,
1377 				   snd_pcm_hw_rule_ratdens, (void *)r,
1378 				   var, -1);
1379 }
1380 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1381 
1382 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1383 				  struct snd_pcm_hw_rule *rule)
1384 {
1385 	unsigned int l = (unsigned long) rule->private;
1386 	int width = l & 0xffff;
1387 	unsigned int msbits = l >> 16;
1388 	const struct snd_interval *i =
1389 		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1390 
1391 	if (!snd_interval_single(i))
1392 		return 0;
1393 
1394 	if ((snd_interval_value(i) == width) ||
1395 	    (width == 0 && snd_interval_value(i) > msbits))
1396 		params->msbits = min_not_zero(params->msbits, msbits);
1397 
1398 	return 0;
1399 }
1400 
1401 /**
1402  * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1403  * @runtime: PCM runtime instance
1404  * @cond: condition bits
1405  * @width: sample bits width
1406  * @msbits: msbits width
1407  *
1408  * This constraint will set the number of most significant bits (msbits) if a
1409  * sample format with the specified width has been select. If width is set to 0
1410  * the msbits will be set for any sample format with a width larger than the
1411  * specified msbits.
1412  *
1413  * Return: Zero if successful, or a negative error code on failure.
1414  */
1415 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1416 				 unsigned int cond,
1417 				 unsigned int width,
1418 				 unsigned int msbits)
1419 {
1420 	unsigned long l = (msbits << 16) | width;
1421 	return snd_pcm_hw_rule_add(runtime, cond, -1,
1422 				    snd_pcm_hw_rule_msbits,
1423 				    (void*) l,
1424 				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1425 }
1426 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1427 
1428 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1429 				struct snd_pcm_hw_rule *rule)
1430 {
1431 	unsigned long step = (unsigned long) rule->private;
1432 	return snd_interval_step(hw_param_interval(params, rule->var), step);
1433 }
1434 
1435 /**
1436  * snd_pcm_hw_constraint_step - add a hw constraint step rule
1437  * @runtime: PCM runtime instance
1438  * @cond: condition bits
1439  * @var: hw_params variable to apply the step constraint
1440  * @step: step size
1441  *
1442  * Return: Zero if successful, or a negative error code on failure.
1443  */
1444 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1445 			       unsigned int cond,
1446 			       snd_pcm_hw_param_t var,
1447 			       unsigned long step)
1448 {
1449 	return snd_pcm_hw_rule_add(runtime, cond, var,
1450 				   snd_pcm_hw_rule_step, (void *) step,
1451 				   var, -1);
1452 }
1453 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1454 
1455 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1456 {
1457 	static const unsigned int pow2_sizes[] = {
1458 		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1459 		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1460 		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1461 		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1462 	};
1463 	return snd_interval_list(hw_param_interval(params, rule->var),
1464 				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1465 }
1466 
1467 /**
1468  * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1469  * @runtime: PCM runtime instance
1470  * @cond: condition bits
1471  * @var: hw_params variable to apply the power-of-2 constraint
1472  *
1473  * Return: Zero if successful, or a negative error code on failure.
1474  */
1475 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1476 			       unsigned int cond,
1477 			       snd_pcm_hw_param_t var)
1478 {
1479 	return snd_pcm_hw_rule_add(runtime, cond, var,
1480 				   snd_pcm_hw_rule_pow2, NULL,
1481 				   var, -1);
1482 }
1483 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1484 
1485 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1486 					   struct snd_pcm_hw_rule *rule)
1487 {
1488 	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1489 	struct snd_interval *rate;
1490 
1491 	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1492 	return snd_interval_list(rate, 1, &base_rate, 0);
1493 }
1494 
1495 /**
1496  * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1497  * @runtime: PCM runtime instance
1498  * @base_rate: the rate at which the hardware does not resample
1499  *
1500  * Return: Zero if successful, or a negative error code on failure.
1501  */
1502 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1503 			       unsigned int base_rate)
1504 {
1505 	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1506 				   SNDRV_PCM_HW_PARAM_RATE,
1507 				   snd_pcm_hw_rule_noresample_func,
1508 				   (void *)(uintptr_t)base_rate,
1509 				   SNDRV_PCM_HW_PARAM_RATE, -1);
1510 }
1511 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1512 
1513 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1514 				  snd_pcm_hw_param_t var)
1515 {
1516 	if (hw_is_mask(var)) {
1517 		snd_mask_any(hw_param_mask(params, var));
1518 		params->cmask |= 1 << var;
1519 		params->rmask |= 1 << var;
1520 		return;
1521 	}
1522 	if (hw_is_interval(var)) {
1523 		snd_interval_any(hw_param_interval(params, var));
1524 		params->cmask |= 1 << var;
1525 		params->rmask |= 1 << var;
1526 		return;
1527 	}
1528 	snd_BUG();
1529 }
1530 
1531 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1532 {
1533 	unsigned int k;
1534 	memset(params, 0, sizeof(*params));
1535 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1536 		_snd_pcm_hw_param_any(params, k);
1537 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1538 		_snd_pcm_hw_param_any(params, k);
1539 	params->info = ~0U;
1540 }
1541 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1542 
1543 /**
1544  * snd_pcm_hw_param_value - return @params field @var value
1545  * @params: the hw_params instance
1546  * @var: parameter to retrieve
1547  * @dir: pointer to the direction (-1,0,1) or %NULL
1548  *
1549  * Return: The value for field @var if it's fixed in configuration space
1550  * defined by @params. -%EINVAL otherwise.
1551  */
1552 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1553 			   snd_pcm_hw_param_t var, int *dir)
1554 {
1555 	if (hw_is_mask(var)) {
1556 		const struct snd_mask *mask = hw_param_mask_c(params, var);
1557 		if (!snd_mask_single(mask))
1558 			return -EINVAL;
1559 		if (dir)
1560 			*dir = 0;
1561 		return snd_mask_value(mask);
1562 	}
1563 	if (hw_is_interval(var)) {
1564 		const struct snd_interval *i = hw_param_interval_c(params, var);
1565 		if (!snd_interval_single(i))
1566 			return -EINVAL;
1567 		if (dir)
1568 			*dir = i->openmin;
1569 		return snd_interval_value(i);
1570 	}
1571 	return -EINVAL;
1572 }
1573 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1574 
1575 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1576 				snd_pcm_hw_param_t var)
1577 {
1578 	if (hw_is_mask(var)) {
1579 		snd_mask_none(hw_param_mask(params, var));
1580 		params->cmask |= 1 << var;
1581 		params->rmask |= 1 << var;
1582 	} else if (hw_is_interval(var)) {
1583 		snd_interval_none(hw_param_interval(params, var));
1584 		params->cmask |= 1 << var;
1585 		params->rmask |= 1 << var;
1586 	} else {
1587 		snd_BUG();
1588 	}
1589 }
1590 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1591 
1592 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1593 				   snd_pcm_hw_param_t var)
1594 {
1595 	int changed;
1596 	if (hw_is_mask(var))
1597 		changed = snd_mask_refine_first(hw_param_mask(params, var));
1598 	else if (hw_is_interval(var))
1599 		changed = snd_interval_refine_first(hw_param_interval(params, var));
1600 	else
1601 		return -EINVAL;
1602 	if (changed > 0) {
1603 		params->cmask |= 1 << var;
1604 		params->rmask |= 1 << var;
1605 	}
1606 	return changed;
1607 }
1608 
1609 
1610 /**
1611  * snd_pcm_hw_param_first - refine config space and return minimum value
1612  * @pcm: PCM instance
1613  * @params: the hw_params instance
1614  * @var: parameter to retrieve
1615  * @dir: pointer to the direction (-1,0,1) or %NULL
1616  *
1617  * Inside configuration space defined by @params remove from @var all
1618  * values > minimum. Reduce configuration space accordingly.
1619  *
1620  * Return: The minimum, or a negative error code on failure.
1621  */
1622 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1623 			   struct snd_pcm_hw_params *params,
1624 			   snd_pcm_hw_param_t var, int *dir)
1625 {
1626 	int changed = _snd_pcm_hw_param_first(params, var);
1627 	if (changed < 0)
1628 		return changed;
1629 	if (params->rmask) {
1630 		int err = snd_pcm_hw_refine(pcm, params);
1631 		if (err < 0)
1632 			return err;
1633 	}
1634 	return snd_pcm_hw_param_value(params, var, dir);
1635 }
1636 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1637 
1638 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1639 				  snd_pcm_hw_param_t var)
1640 {
1641 	int changed;
1642 	if (hw_is_mask(var))
1643 		changed = snd_mask_refine_last(hw_param_mask(params, var));
1644 	else if (hw_is_interval(var))
1645 		changed = snd_interval_refine_last(hw_param_interval(params, var));
1646 	else
1647 		return -EINVAL;
1648 	if (changed > 0) {
1649 		params->cmask |= 1 << var;
1650 		params->rmask |= 1 << var;
1651 	}
1652 	return changed;
1653 }
1654 
1655 
1656 /**
1657  * snd_pcm_hw_param_last - refine config space and return maximum value
1658  * @pcm: PCM instance
1659  * @params: the hw_params instance
1660  * @var: parameter to retrieve
1661  * @dir: pointer to the direction (-1,0,1) or %NULL
1662  *
1663  * Inside configuration space defined by @params remove from @var all
1664  * values < maximum. Reduce configuration space accordingly.
1665  *
1666  * Return: The maximum, or a negative error code on failure.
1667  */
1668 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1669 			  struct snd_pcm_hw_params *params,
1670 			  snd_pcm_hw_param_t var, int *dir)
1671 {
1672 	int changed = _snd_pcm_hw_param_last(params, var);
1673 	if (changed < 0)
1674 		return changed;
1675 	if (params->rmask) {
1676 		int err = snd_pcm_hw_refine(pcm, params);
1677 		if (err < 0)
1678 			return err;
1679 	}
1680 	return snd_pcm_hw_param_value(params, var, dir);
1681 }
1682 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1683 
1684 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1685 				   void *arg)
1686 {
1687 	struct snd_pcm_runtime *runtime = substream->runtime;
1688 	unsigned long flags;
1689 	snd_pcm_stream_lock_irqsave(substream, flags);
1690 	if (snd_pcm_running(substream) &&
1691 	    snd_pcm_update_hw_ptr(substream) >= 0)
1692 		runtime->status->hw_ptr %= runtime->buffer_size;
1693 	else {
1694 		runtime->status->hw_ptr = 0;
1695 		runtime->hw_ptr_wrap = 0;
1696 	}
1697 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1698 	return 0;
1699 }
1700 
1701 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1702 					  void *arg)
1703 {
1704 	struct snd_pcm_channel_info *info = arg;
1705 	struct snd_pcm_runtime *runtime = substream->runtime;
1706 	int width;
1707 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1708 		info->offset = -1;
1709 		return 0;
1710 	}
1711 	width = snd_pcm_format_physical_width(runtime->format);
1712 	if (width < 0)
1713 		return width;
1714 	info->offset = 0;
1715 	switch (runtime->access) {
1716 	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1717 	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1718 		info->first = info->channel * width;
1719 		info->step = runtime->channels * width;
1720 		break;
1721 	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1722 	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1723 	{
1724 		size_t size = runtime->dma_bytes / runtime->channels;
1725 		info->first = info->channel * size * 8;
1726 		info->step = width;
1727 		break;
1728 	}
1729 	default:
1730 		snd_BUG();
1731 		break;
1732 	}
1733 	return 0;
1734 }
1735 
1736 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1737 				       void *arg)
1738 {
1739 	struct snd_pcm_hw_params *params = arg;
1740 	snd_pcm_format_t format;
1741 	int channels;
1742 	ssize_t frame_size;
1743 
1744 	params->fifo_size = substream->runtime->hw.fifo_size;
1745 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1746 		format = params_format(params);
1747 		channels = params_channels(params);
1748 		frame_size = snd_pcm_format_size(format, channels);
1749 		if (frame_size > 0)
1750 			params->fifo_size /= frame_size;
1751 	}
1752 	return 0;
1753 }
1754 
1755 /**
1756  * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1757  * @substream: the pcm substream instance
1758  * @cmd: ioctl command
1759  * @arg: ioctl argument
1760  *
1761  * Processes the generic ioctl commands for PCM.
1762  * Can be passed as the ioctl callback for PCM ops.
1763  *
1764  * Return: Zero if successful, or a negative error code on failure.
1765  */
1766 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1767 		      unsigned int cmd, void *arg)
1768 {
1769 	switch (cmd) {
1770 	case SNDRV_PCM_IOCTL1_RESET:
1771 		return snd_pcm_lib_ioctl_reset(substream, arg);
1772 	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1773 		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1774 	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1775 		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1776 	}
1777 	return -ENXIO;
1778 }
1779 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1780 
1781 /**
1782  * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1783  *						under acquired lock of PCM substream.
1784  * @substream: the instance of pcm substream.
1785  *
1786  * This function is called when the batch of audio data frames as the same size as the period of
1787  * buffer is already processed in audio data transmission.
1788  *
1789  * The call of function updates the status of runtime with the latest position of audio data
1790  * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1791  * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1792  * substream according to configured threshold.
1793  *
1794  * The function is intended to use for the case that PCM driver operates audio data frames under
1795  * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1796  * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1797  * since lock of PCM substream should be acquired in advance.
1798  *
1799  * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1800  * function:
1801  *
1802  * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1803  * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1804  * - .get_time_info - to retrieve audio time stamp if needed.
1805  *
1806  * Even if more than one periods have elapsed since the last call, you have to call this only once.
1807  */
1808 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1809 {
1810 	struct snd_pcm_runtime *runtime;
1811 
1812 	if (PCM_RUNTIME_CHECK(substream))
1813 		return;
1814 	runtime = substream->runtime;
1815 
1816 	if (!snd_pcm_running(substream) ||
1817 	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1818 		goto _end;
1819 
1820 #ifdef CONFIG_SND_PCM_TIMER
1821 	if (substream->timer_running)
1822 		snd_timer_interrupt(substream->timer, 1);
1823 #endif
1824  _end:
1825 	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1826 }
1827 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1828 
1829 /**
1830  * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1831  *			      PCM substream.
1832  * @substream: the instance of PCM substream.
1833  *
1834  * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1835  * acquiring lock of PCM substream voluntarily.
1836  *
1837  * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1838  * the batch of audio data frames as the same size as the period of buffer is already processed in
1839  * audio data transmission.
1840  */
1841 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1842 {
1843 	unsigned long flags;
1844 
1845 	if (snd_BUG_ON(!substream))
1846 		return;
1847 
1848 	snd_pcm_stream_lock_irqsave(substream, flags);
1849 	snd_pcm_period_elapsed_under_stream_lock(substream);
1850 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1851 }
1852 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1853 
1854 /*
1855  * Wait until avail_min data becomes available
1856  * Returns a negative error code if any error occurs during operation.
1857  * The available space is stored on availp.  When err = 0 and avail = 0
1858  * on the capture stream, it indicates the stream is in DRAINING state.
1859  */
1860 static int wait_for_avail(struct snd_pcm_substream *substream,
1861 			      snd_pcm_uframes_t *availp)
1862 {
1863 	struct snd_pcm_runtime *runtime = substream->runtime;
1864 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1865 	wait_queue_entry_t wait;
1866 	int err = 0;
1867 	snd_pcm_uframes_t avail = 0;
1868 	long wait_time, tout;
1869 
1870 	init_waitqueue_entry(&wait, current);
1871 	set_current_state(TASK_INTERRUPTIBLE);
1872 	add_wait_queue(&runtime->tsleep, &wait);
1873 
1874 	if (runtime->no_period_wakeup)
1875 		wait_time = MAX_SCHEDULE_TIMEOUT;
1876 	else {
1877 		/* use wait time from substream if available */
1878 		if (substream->wait_time) {
1879 			wait_time = substream->wait_time;
1880 		} else {
1881 			wait_time = 10;
1882 
1883 			if (runtime->rate) {
1884 				long t = runtime->period_size * 2 /
1885 					 runtime->rate;
1886 				wait_time = max(t, wait_time);
1887 			}
1888 			wait_time = msecs_to_jiffies(wait_time * 1000);
1889 		}
1890 	}
1891 
1892 	for (;;) {
1893 		if (signal_pending(current)) {
1894 			err = -ERESTARTSYS;
1895 			break;
1896 		}
1897 
1898 		/*
1899 		 * We need to check if space became available already
1900 		 * (and thus the wakeup happened already) first to close
1901 		 * the race of space already having become available.
1902 		 * This check must happen after been added to the waitqueue
1903 		 * and having current state be INTERRUPTIBLE.
1904 		 */
1905 		avail = snd_pcm_avail(substream);
1906 		if (avail >= runtime->twake)
1907 			break;
1908 		snd_pcm_stream_unlock_irq(substream);
1909 
1910 		tout = schedule_timeout(wait_time);
1911 
1912 		snd_pcm_stream_lock_irq(substream);
1913 		set_current_state(TASK_INTERRUPTIBLE);
1914 		switch (runtime->status->state) {
1915 		case SNDRV_PCM_STATE_SUSPENDED:
1916 			err = -ESTRPIPE;
1917 			goto _endloop;
1918 		case SNDRV_PCM_STATE_XRUN:
1919 			err = -EPIPE;
1920 			goto _endloop;
1921 		case SNDRV_PCM_STATE_DRAINING:
1922 			if (is_playback)
1923 				err = -EPIPE;
1924 			else
1925 				avail = 0; /* indicate draining */
1926 			goto _endloop;
1927 		case SNDRV_PCM_STATE_OPEN:
1928 		case SNDRV_PCM_STATE_SETUP:
1929 		case SNDRV_PCM_STATE_DISCONNECTED:
1930 			err = -EBADFD;
1931 			goto _endloop;
1932 		case SNDRV_PCM_STATE_PAUSED:
1933 			continue;
1934 		}
1935 		if (!tout) {
1936 			pcm_dbg(substream->pcm,
1937 				"%s write error (DMA or IRQ trouble?)\n",
1938 				is_playback ? "playback" : "capture");
1939 			err = -EIO;
1940 			break;
1941 		}
1942 	}
1943  _endloop:
1944 	set_current_state(TASK_RUNNING);
1945 	remove_wait_queue(&runtime->tsleep, &wait);
1946 	*availp = avail;
1947 	return err;
1948 }
1949 
1950 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1951 			      int channel, unsigned long hwoff,
1952 			      void *buf, unsigned long bytes);
1953 
1954 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1955 			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1956 
1957 /* calculate the target DMA-buffer position to be written/read */
1958 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1959 			   int channel, unsigned long hwoff)
1960 {
1961 	return runtime->dma_area + hwoff +
1962 		channel * (runtime->dma_bytes / runtime->channels);
1963 }
1964 
1965 /* default copy_user ops for write; used for both interleaved and non- modes */
1966 static int default_write_copy(struct snd_pcm_substream *substream,
1967 			      int channel, unsigned long hwoff,
1968 			      void *buf, unsigned long bytes)
1969 {
1970 	if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1971 			   (void __user *)buf, bytes))
1972 		return -EFAULT;
1973 	return 0;
1974 }
1975 
1976 /* default copy_kernel ops for write */
1977 static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1978 				     int channel, unsigned long hwoff,
1979 				     void *buf, unsigned long bytes)
1980 {
1981 	memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1982 	return 0;
1983 }
1984 
1985 /* fill silence instead of copy data; called as a transfer helper
1986  * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1987  * a NULL buffer is passed
1988  */
1989 static int fill_silence(struct snd_pcm_substream *substream, int channel,
1990 			unsigned long hwoff, void *buf, unsigned long bytes)
1991 {
1992 	struct snd_pcm_runtime *runtime = substream->runtime;
1993 
1994 	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1995 		return 0;
1996 	if (substream->ops->fill_silence)
1997 		return substream->ops->fill_silence(substream, channel,
1998 						    hwoff, bytes);
1999 
2000 	snd_pcm_format_set_silence(runtime->format,
2001 				   get_dma_ptr(runtime, channel, hwoff),
2002 				   bytes_to_samples(runtime, bytes));
2003 	return 0;
2004 }
2005 
2006 /* default copy_user ops for read; used for both interleaved and non- modes */
2007 static int default_read_copy(struct snd_pcm_substream *substream,
2008 			     int channel, unsigned long hwoff,
2009 			     void *buf, unsigned long bytes)
2010 {
2011 	if (copy_to_user((void __user *)buf,
2012 			 get_dma_ptr(substream->runtime, channel, hwoff),
2013 			 bytes))
2014 		return -EFAULT;
2015 	return 0;
2016 }
2017 
2018 /* default copy_kernel ops for read */
2019 static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2020 				    int channel, unsigned long hwoff,
2021 				    void *buf, unsigned long bytes)
2022 {
2023 	memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2024 	return 0;
2025 }
2026 
2027 /* call transfer function with the converted pointers and sizes;
2028  * for interleaved mode, it's one shot for all samples
2029  */
2030 static int interleaved_copy(struct snd_pcm_substream *substream,
2031 			    snd_pcm_uframes_t hwoff, void *data,
2032 			    snd_pcm_uframes_t off,
2033 			    snd_pcm_uframes_t frames,
2034 			    pcm_transfer_f transfer)
2035 {
2036 	struct snd_pcm_runtime *runtime = substream->runtime;
2037 
2038 	/* convert to bytes */
2039 	hwoff = frames_to_bytes(runtime, hwoff);
2040 	off = frames_to_bytes(runtime, off);
2041 	frames = frames_to_bytes(runtime, frames);
2042 	return transfer(substream, 0, hwoff, data + off, frames);
2043 }
2044 
2045 /* call transfer function with the converted pointers and sizes for each
2046  * non-interleaved channel; when buffer is NULL, silencing instead of copying
2047  */
2048 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2049 			       snd_pcm_uframes_t hwoff, void *data,
2050 			       snd_pcm_uframes_t off,
2051 			       snd_pcm_uframes_t frames,
2052 			       pcm_transfer_f transfer)
2053 {
2054 	struct snd_pcm_runtime *runtime = substream->runtime;
2055 	int channels = runtime->channels;
2056 	void **bufs = data;
2057 	int c, err;
2058 
2059 	/* convert to bytes; note that it's not frames_to_bytes() here.
2060 	 * in non-interleaved mode, we copy for each channel, thus
2061 	 * each copy is n_samples bytes x channels = whole frames.
2062 	 */
2063 	off = samples_to_bytes(runtime, off);
2064 	frames = samples_to_bytes(runtime, frames);
2065 	hwoff = samples_to_bytes(runtime, hwoff);
2066 	for (c = 0; c < channels; ++c, ++bufs) {
2067 		if (!data || !*bufs)
2068 			err = fill_silence(substream, c, hwoff, NULL, frames);
2069 		else
2070 			err = transfer(substream, c, hwoff, *bufs + off,
2071 				       frames);
2072 		if (err < 0)
2073 			return err;
2074 	}
2075 	return 0;
2076 }
2077 
2078 /* fill silence on the given buffer position;
2079  * called from snd_pcm_playback_silence()
2080  */
2081 static int fill_silence_frames(struct snd_pcm_substream *substream,
2082 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2083 {
2084 	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2085 	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2086 		return interleaved_copy(substream, off, NULL, 0, frames,
2087 					fill_silence);
2088 	else
2089 		return noninterleaved_copy(substream, off, NULL, 0, frames,
2090 					   fill_silence);
2091 }
2092 
2093 /* sanity-check for read/write methods */
2094 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2095 {
2096 	struct snd_pcm_runtime *runtime;
2097 	if (PCM_RUNTIME_CHECK(substream))
2098 		return -ENXIO;
2099 	runtime = substream->runtime;
2100 	if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2101 		return -EINVAL;
2102 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2103 		return -EBADFD;
2104 	return 0;
2105 }
2106 
2107 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2108 {
2109 	switch (runtime->status->state) {
2110 	case SNDRV_PCM_STATE_PREPARED:
2111 	case SNDRV_PCM_STATE_RUNNING:
2112 	case SNDRV_PCM_STATE_PAUSED:
2113 		return 0;
2114 	case SNDRV_PCM_STATE_XRUN:
2115 		return -EPIPE;
2116 	case SNDRV_PCM_STATE_SUSPENDED:
2117 		return -ESTRPIPE;
2118 	default:
2119 		return -EBADFD;
2120 	}
2121 }
2122 
2123 /* update to the given appl_ptr and call ack callback if needed;
2124  * when an error is returned, take back to the original value
2125  */
2126 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2127 			   snd_pcm_uframes_t appl_ptr)
2128 {
2129 	struct snd_pcm_runtime *runtime = substream->runtime;
2130 	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2131 	snd_pcm_sframes_t diff;
2132 	int ret;
2133 
2134 	if (old_appl_ptr == appl_ptr)
2135 		return 0;
2136 
2137 	if (appl_ptr >= runtime->boundary)
2138 		return -EINVAL;
2139 	/*
2140 	 * check if a rewind is requested by the application
2141 	 */
2142 	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2143 		diff = appl_ptr - old_appl_ptr;
2144 		if (diff >= 0) {
2145 			if (diff > runtime->buffer_size)
2146 				return -EINVAL;
2147 		} else {
2148 			if (runtime->boundary + diff > runtime->buffer_size)
2149 				return -EINVAL;
2150 		}
2151 	}
2152 
2153 	runtime->control->appl_ptr = appl_ptr;
2154 	if (substream->ops->ack) {
2155 		ret = substream->ops->ack(substream);
2156 		if (ret < 0) {
2157 			runtime->control->appl_ptr = old_appl_ptr;
2158 			return ret;
2159 		}
2160 	}
2161 
2162 	trace_applptr(substream, old_appl_ptr, appl_ptr);
2163 
2164 	return 0;
2165 }
2166 
2167 /* the common loop for read/write data */
2168 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2169 				     void *data, bool interleaved,
2170 				     snd_pcm_uframes_t size, bool in_kernel)
2171 {
2172 	struct snd_pcm_runtime *runtime = substream->runtime;
2173 	snd_pcm_uframes_t xfer = 0;
2174 	snd_pcm_uframes_t offset = 0;
2175 	snd_pcm_uframes_t avail;
2176 	pcm_copy_f writer;
2177 	pcm_transfer_f transfer;
2178 	bool nonblock;
2179 	bool is_playback;
2180 	int err;
2181 
2182 	err = pcm_sanity_check(substream);
2183 	if (err < 0)
2184 		return err;
2185 
2186 	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2187 	if (interleaved) {
2188 		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2189 		    runtime->channels > 1)
2190 			return -EINVAL;
2191 		writer = interleaved_copy;
2192 	} else {
2193 		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2194 			return -EINVAL;
2195 		writer = noninterleaved_copy;
2196 	}
2197 
2198 	if (!data) {
2199 		if (is_playback)
2200 			transfer = fill_silence;
2201 		else
2202 			return -EINVAL;
2203 	} else if (in_kernel) {
2204 		if (substream->ops->copy_kernel)
2205 			transfer = substream->ops->copy_kernel;
2206 		else
2207 			transfer = is_playback ?
2208 				default_write_copy_kernel : default_read_copy_kernel;
2209 	} else {
2210 		if (substream->ops->copy_user)
2211 			transfer = (pcm_transfer_f)substream->ops->copy_user;
2212 		else
2213 			transfer = is_playback ?
2214 				default_write_copy : default_read_copy;
2215 	}
2216 
2217 	if (size == 0)
2218 		return 0;
2219 
2220 	nonblock = !!(substream->f_flags & O_NONBLOCK);
2221 
2222 	snd_pcm_stream_lock_irq(substream);
2223 	err = pcm_accessible_state(runtime);
2224 	if (err < 0)
2225 		goto _end_unlock;
2226 
2227 	runtime->twake = runtime->control->avail_min ? : 1;
2228 	if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2229 		snd_pcm_update_hw_ptr(substream);
2230 
2231 	/*
2232 	 * If size < start_threshold, wait indefinitely. Another
2233 	 * thread may start capture
2234 	 */
2235 	if (!is_playback &&
2236 	    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2237 	    size >= runtime->start_threshold) {
2238 		err = snd_pcm_start(substream);
2239 		if (err < 0)
2240 			goto _end_unlock;
2241 	}
2242 
2243 	avail = snd_pcm_avail(substream);
2244 
2245 	while (size > 0) {
2246 		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2247 		snd_pcm_uframes_t cont;
2248 		if (!avail) {
2249 			if (!is_playback &&
2250 			    runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2251 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2252 				goto _end_unlock;
2253 			}
2254 			if (nonblock) {
2255 				err = -EAGAIN;
2256 				goto _end_unlock;
2257 			}
2258 			runtime->twake = min_t(snd_pcm_uframes_t, size,
2259 					runtime->control->avail_min ? : 1);
2260 			err = wait_for_avail(substream, &avail);
2261 			if (err < 0)
2262 				goto _end_unlock;
2263 			if (!avail)
2264 				continue; /* draining */
2265 		}
2266 		frames = size > avail ? avail : size;
2267 		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2268 		appl_ofs = appl_ptr % runtime->buffer_size;
2269 		cont = runtime->buffer_size - appl_ofs;
2270 		if (frames > cont)
2271 			frames = cont;
2272 		if (snd_BUG_ON(!frames)) {
2273 			err = -EINVAL;
2274 			goto _end_unlock;
2275 		}
2276 		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2277 			err = -EBUSY;
2278 			goto _end_unlock;
2279 		}
2280 		snd_pcm_stream_unlock_irq(substream);
2281 		if (!is_playback)
2282 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2283 		err = writer(substream, appl_ofs, data, offset, frames,
2284 			     transfer);
2285 		if (is_playback)
2286 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2287 		snd_pcm_stream_lock_irq(substream);
2288 		atomic_dec(&runtime->buffer_accessing);
2289 		if (err < 0)
2290 			goto _end_unlock;
2291 		err = pcm_accessible_state(runtime);
2292 		if (err < 0)
2293 			goto _end_unlock;
2294 		appl_ptr += frames;
2295 		if (appl_ptr >= runtime->boundary)
2296 			appl_ptr -= runtime->boundary;
2297 		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2298 		if (err < 0)
2299 			goto _end_unlock;
2300 
2301 		offset += frames;
2302 		size -= frames;
2303 		xfer += frames;
2304 		avail -= frames;
2305 		if (is_playback &&
2306 		    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2307 		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2308 			err = snd_pcm_start(substream);
2309 			if (err < 0)
2310 				goto _end_unlock;
2311 		}
2312 	}
2313  _end_unlock:
2314 	runtime->twake = 0;
2315 	if (xfer > 0 && err >= 0)
2316 		snd_pcm_update_state(substream, runtime);
2317 	snd_pcm_stream_unlock_irq(substream);
2318 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2319 }
2320 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2321 
2322 /*
2323  * standard channel mapping helpers
2324  */
2325 
2326 /* default channel maps for multi-channel playbacks, up to 8 channels */
2327 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2328 	{ .channels = 1,
2329 	  .map = { SNDRV_CHMAP_MONO } },
2330 	{ .channels = 2,
2331 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2332 	{ .channels = 4,
2333 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2334 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2335 	{ .channels = 6,
2336 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2337 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2338 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2339 	{ .channels = 8,
2340 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2341 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2342 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2343 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2344 	{ }
2345 };
2346 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2347 
2348 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2349 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2350 	{ .channels = 1,
2351 	  .map = { SNDRV_CHMAP_MONO } },
2352 	{ .channels = 2,
2353 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2354 	{ .channels = 4,
2355 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2356 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2357 	{ .channels = 6,
2358 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2359 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2360 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2361 	{ .channels = 8,
2362 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2363 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2364 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2365 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2366 	{ }
2367 };
2368 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2369 
2370 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2371 {
2372 	if (ch > info->max_channels)
2373 		return false;
2374 	return !info->channel_mask || (info->channel_mask & (1U << ch));
2375 }
2376 
2377 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2378 			      struct snd_ctl_elem_info *uinfo)
2379 {
2380 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2381 
2382 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2383 	uinfo->count = info->max_channels;
2384 	uinfo->value.integer.min = 0;
2385 	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2386 	return 0;
2387 }
2388 
2389 /* get callback for channel map ctl element
2390  * stores the channel position firstly matching with the current channels
2391  */
2392 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2393 			     struct snd_ctl_elem_value *ucontrol)
2394 {
2395 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2396 	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2397 	struct snd_pcm_substream *substream;
2398 	const struct snd_pcm_chmap_elem *map;
2399 
2400 	if (!info->chmap)
2401 		return -EINVAL;
2402 	substream = snd_pcm_chmap_substream(info, idx);
2403 	if (!substream)
2404 		return -ENODEV;
2405 	memset(ucontrol->value.integer.value, 0,
2406 	       sizeof(long) * info->max_channels);
2407 	if (!substream->runtime)
2408 		return 0; /* no channels set */
2409 	for (map = info->chmap; map->channels; map++) {
2410 		int i;
2411 		if (map->channels == substream->runtime->channels &&
2412 		    valid_chmap_channels(info, map->channels)) {
2413 			for (i = 0; i < map->channels; i++)
2414 				ucontrol->value.integer.value[i] = map->map[i];
2415 			return 0;
2416 		}
2417 	}
2418 	return -EINVAL;
2419 }
2420 
2421 /* tlv callback for channel map ctl element
2422  * expands the pre-defined channel maps in a form of TLV
2423  */
2424 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2425 			     unsigned int size, unsigned int __user *tlv)
2426 {
2427 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2428 	const struct snd_pcm_chmap_elem *map;
2429 	unsigned int __user *dst;
2430 	int c, count = 0;
2431 
2432 	if (!info->chmap)
2433 		return -EINVAL;
2434 	if (size < 8)
2435 		return -ENOMEM;
2436 	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2437 		return -EFAULT;
2438 	size -= 8;
2439 	dst = tlv + 2;
2440 	for (map = info->chmap; map->channels; map++) {
2441 		int chs_bytes = map->channels * 4;
2442 		if (!valid_chmap_channels(info, map->channels))
2443 			continue;
2444 		if (size < 8)
2445 			return -ENOMEM;
2446 		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2447 		    put_user(chs_bytes, dst + 1))
2448 			return -EFAULT;
2449 		dst += 2;
2450 		size -= 8;
2451 		count += 8;
2452 		if (size < chs_bytes)
2453 			return -ENOMEM;
2454 		size -= chs_bytes;
2455 		count += chs_bytes;
2456 		for (c = 0; c < map->channels; c++) {
2457 			if (put_user(map->map[c], dst))
2458 				return -EFAULT;
2459 			dst++;
2460 		}
2461 	}
2462 	if (put_user(count, tlv + 1))
2463 		return -EFAULT;
2464 	return 0;
2465 }
2466 
2467 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2468 {
2469 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2470 	info->pcm->streams[info->stream].chmap_kctl = NULL;
2471 	kfree(info);
2472 }
2473 
2474 /**
2475  * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2476  * @pcm: the assigned PCM instance
2477  * @stream: stream direction
2478  * @chmap: channel map elements (for query)
2479  * @max_channels: the max number of channels for the stream
2480  * @private_value: the value passed to each kcontrol's private_value field
2481  * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2482  *
2483  * Create channel-mapping control elements assigned to the given PCM stream(s).
2484  * Return: Zero if successful, or a negative error value.
2485  */
2486 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2487 			   const struct snd_pcm_chmap_elem *chmap,
2488 			   int max_channels,
2489 			   unsigned long private_value,
2490 			   struct snd_pcm_chmap **info_ret)
2491 {
2492 	struct snd_pcm_chmap *info;
2493 	struct snd_kcontrol_new knew = {
2494 		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2495 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2496 			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2497 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2498 		.info = pcm_chmap_ctl_info,
2499 		.get = pcm_chmap_ctl_get,
2500 		.tlv.c = pcm_chmap_ctl_tlv,
2501 	};
2502 	int err;
2503 
2504 	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2505 		return -EBUSY;
2506 	info = kzalloc(sizeof(*info), GFP_KERNEL);
2507 	if (!info)
2508 		return -ENOMEM;
2509 	info->pcm = pcm;
2510 	info->stream = stream;
2511 	info->chmap = chmap;
2512 	info->max_channels = max_channels;
2513 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2514 		knew.name = "Playback Channel Map";
2515 	else
2516 		knew.name = "Capture Channel Map";
2517 	knew.device = pcm->device;
2518 	knew.count = pcm->streams[stream].substream_count;
2519 	knew.private_value = private_value;
2520 	info->kctl = snd_ctl_new1(&knew, info);
2521 	if (!info->kctl) {
2522 		kfree(info);
2523 		return -ENOMEM;
2524 	}
2525 	info->kctl->private_free = pcm_chmap_ctl_private_free;
2526 	err = snd_ctl_add(pcm->card, info->kctl);
2527 	if (err < 0)
2528 		return err;
2529 	pcm->streams[stream].chmap_kctl = info->kctl;
2530 	if (info_ret)
2531 		*info_ret = info;
2532 	return 0;
2533 }
2534 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
2535