xref: /openbmc/linux/sound/core/pcm_native.c (revision ce7f93e2)
1 /*
2  *  Digital Audio (PCM) abstract layer
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  *
20  */
21 
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
29 #include <linux/io.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39 #include <linux/delay.h>
40 
41 #include "pcm_local.h"
42 
43 #ifdef CONFIG_SND_DEBUG
44 #define CREATE_TRACE_POINTS
45 #include "pcm_param_trace.h"
46 #else
47 #define trace_hw_mask_param_enabled()		0
48 #define trace_hw_interval_param_enabled()	0
49 #define trace_hw_mask_param(substream, type, index, prev, curr)
50 #define trace_hw_interval_param(substream, type, index, prev, curr)
51 #endif
52 
53 /*
54  *  Compatibility
55  */
56 
57 struct snd_pcm_hw_params_old {
58 	unsigned int flags;
59 	unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
60 			   SNDRV_PCM_HW_PARAM_ACCESS + 1];
61 	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
62 					SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
63 	unsigned int rmask;
64 	unsigned int cmask;
65 	unsigned int info;
66 	unsigned int msbits;
67 	unsigned int rate_num;
68 	unsigned int rate_den;
69 	snd_pcm_uframes_t fifo_size;
70 	unsigned char reserved[64];
71 };
72 
73 #ifdef CONFIG_SND_SUPPORT_OLD_API
74 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
75 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
76 
77 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
78 				      struct snd_pcm_hw_params_old __user * _oparams);
79 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
80 				      struct snd_pcm_hw_params_old __user * _oparams);
81 #endif
82 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
83 
84 /*
85  *
86  */
87 
88 static DEFINE_RWLOCK(snd_pcm_link_rwlock);
89 static DECLARE_RWSEM(snd_pcm_link_rwsem);
90 
91 /* Writer in rwsem may block readers even during its waiting in queue,
92  * and this may lead to a deadlock when the code path takes read sem
93  * twice (e.g. one in snd_pcm_action_nonatomic() and another in
94  * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
95  * sleep until all the readers are completed without blocking by writer.
96  */
97 static inline void down_write_nonfifo(struct rw_semaphore *lock)
98 {
99 	while (!down_write_trylock(lock))
100 		msleep(1);
101 }
102 
103 #define PCM_LOCK_DEFAULT	0
104 #define PCM_LOCK_IRQ	1
105 #define PCM_LOCK_IRQSAVE	2
106 
107 static unsigned long __snd_pcm_stream_lock_mode(struct snd_pcm_substream *substream,
108 						unsigned int mode)
109 {
110 	unsigned long flags = 0;
111 	if (substream->pcm->nonatomic) {
112 		down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
113 		mutex_lock(&substream->self_group.mutex);
114 	} else {
115 		switch (mode) {
116 		case PCM_LOCK_DEFAULT:
117 			read_lock(&snd_pcm_link_rwlock);
118 			break;
119 		case PCM_LOCK_IRQ:
120 			read_lock_irq(&snd_pcm_link_rwlock);
121 			break;
122 		case PCM_LOCK_IRQSAVE:
123 			read_lock_irqsave(&snd_pcm_link_rwlock, flags);
124 			break;
125 		}
126 		spin_lock(&substream->self_group.lock);
127 	}
128 	return flags;
129 }
130 
131 static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream *substream,
132 					 unsigned int mode, unsigned long flags)
133 {
134 	if (substream->pcm->nonatomic) {
135 		mutex_unlock(&substream->self_group.mutex);
136 		up_read(&snd_pcm_link_rwsem);
137 	} else {
138 		spin_unlock(&substream->self_group.lock);
139 
140 		switch (mode) {
141 		case PCM_LOCK_DEFAULT:
142 			read_unlock(&snd_pcm_link_rwlock);
143 			break;
144 		case PCM_LOCK_IRQ:
145 			read_unlock_irq(&snd_pcm_link_rwlock);
146 			break;
147 		case PCM_LOCK_IRQSAVE:
148 			read_unlock_irqrestore(&snd_pcm_link_rwlock, flags);
149 			break;
150 		}
151 	}
152 }
153 
154 /**
155  * snd_pcm_stream_lock - Lock the PCM stream
156  * @substream: PCM substream
157  *
158  * This locks the PCM stream's spinlock or mutex depending on the nonatomic
159  * flag of the given substream.  This also takes the global link rw lock
160  * (or rw sem), too, for avoiding the race with linked streams.
161  */
162 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
163 {
164 	__snd_pcm_stream_lock_mode(substream, PCM_LOCK_DEFAULT);
165 }
166 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
167 
168 /**
169  * snd_pcm_stream_lock - Unlock the PCM stream
170  * @substream: PCM substream
171  *
172  * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
173  */
174 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
175 {
176 	__snd_pcm_stream_unlock_mode(substream, PCM_LOCK_DEFAULT, 0);
177 }
178 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
179 
180 /**
181  * snd_pcm_stream_lock_irq - Lock the PCM stream
182  * @substream: PCM substream
183  *
184  * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
185  * IRQ (only when nonatomic is false).  In nonatomic case, this is identical
186  * as snd_pcm_stream_lock().
187  */
188 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
189 {
190 	__snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQ);
191 }
192 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
193 
194 /**
195  * snd_pcm_stream_unlock_irq - Unlock the PCM stream
196  * @substream: PCM substream
197  *
198  * This is a counter-part of snd_pcm_stream_lock_irq().
199  */
200 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
201 {
202 	__snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQ, 0);
203 }
204 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
205 
206 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
207 {
208 	return __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQSAVE);
209 }
210 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
211 
212 /**
213  * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
214  * @substream: PCM substream
215  * @flags: irq flags
216  *
217  * This is a counter-part of snd_pcm_stream_lock_irqsave().
218  */
219 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
220 				      unsigned long flags)
221 {
222 	__snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQSAVE, flags);
223 }
224 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
225 
226 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
227 {
228 	struct snd_pcm *pcm = substream->pcm;
229 	struct snd_pcm_str *pstr = substream->pstr;
230 
231 	memset(info, 0, sizeof(*info));
232 	info->card = pcm->card->number;
233 	info->device = pcm->device;
234 	info->stream = substream->stream;
235 	info->subdevice = substream->number;
236 	strlcpy(info->id, pcm->id, sizeof(info->id));
237 	strlcpy(info->name, pcm->name, sizeof(info->name));
238 	info->dev_class = pcm->dev_class;
239 	info->dev_subclass = pcm->dev_subclass;
240 	info->subdevices_count = pstr->substream_count;
241 	info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
242 	strlcpy(info->subname, substream->name, sizeof(info->subname));
243 
244 	return 0;
245 }
246 
247 int snd_pcm_info_user(struct snd_pcm_substream *substream,
248 		      struct snd_pcm_info __user * _info)
249 {
250 	struct snd_pcm_info *info;
251 	int err;
252 
253 	info = kmalloc(sizeof(*info), GFP_KERNEL);
254 	if (! info)
255 		return -ENOMEM;
256 	err = snd_pcm_info(substream, info);
257 	if (err >= 0) {
258 		if (copy_to_user(_info, info, sizeof(*info)))
259 			err = -EFAULT;
260 	}
261 	kfree(info);
262 	return err;
263 }
264 
265 static bool hw_support_mmap(struct snd_pcm_substream *substream)
266 {
267 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
268 		return false;
269 	/* architecture supports dma_mmap_coherent()? */
270 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
271 	if (!substream->ops->mmap &&
272 	    substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
273 		return false;
274 #endif
275 	return true;
276 }
277 
278 static int constrain_mask_params(struct snd_pcm_substream *substream,
279 				 struct snd_pcm_hw_params *params)
280 {
281 	struct snd_pcm_hw_constraints *constrs =
282 					&substream->runtime->hw_constraints;
283 	struct snd_mask *m;
284 	unsigned int k;
285 	struct snd_mask old_mask;
286 	int changed;
287 
288 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
289 		m = hw_param_mask(params, k);
290 		if (snd_mask_empty(m))
291 			return -EINVAL;
292 
293 		/* This parameter is not requested to change by a caller. */
294 		if (!(params->rmask & (1 << k)))
295 			continue;
296 
297 		if (trace_hw_mask_param_enabled())
298 			old_mask = *m;
299 
300 		changed = snd_mask_refine(m, constrs_mask(constrs, k));
301 		if (changed < 0)
302 			return changed;
303 		if (changed == 0)
304 			continue;
305 
306 		/* Set corresponding flag so that the caller gets it. */
307 		trace_hw_mask_param(substream, k, 0, &old_mask, m);
308 		params->cmask |= 1 << k;
309 	}
310 
311 	return 0;
312 }
313 
314 static int constrain_interval_params(struct snd_pcm_substream *substream,
315 				     struct snd_pcm_hw_params *params)
316 {
317 	struct snd_pcm_hw_constraints *constrs =
318 					&substream->runtime->hw_constraints;
319 	struct snd_interval *i;
320 	unsigned int k;
321 	struct snd_interval old_interval;
322 	int changed;
323 
324 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
325 		i = hw_param_interval(params, k);
326 		if (snd_interval_empty(i))
327 			return -EINVAL;
328 
329 		/* This parameter is not requested to change by a caller. */
330 		if (!(params->rmask & (1 << k)))
331 			continue;
332 
333 		if (trace_hw_interval_param_enabled())
334 			old_interval = *i;
335 
336 		changed = snd_interval_refine(i, constrs_interval(constrs, k));
337 		if (changed < 0)
338 			return changed;
339 		if (changed == 0)
340 			continue;
341 
342 		/* Set corresponding flag so that the caller gets it. */
343 		trace_hw_interval_param(substream, k, 0, &old_interval, i);
344 		params->cmask |= 1 << k;
345 	}
346 
347 	return 0;
348 }
349 
350 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
351 				     struct snd_pcm_hw_params *params)
352 {
353 	struct snd_pcm_hw_constraints *constrs =
354 					&substream->runtime->hw_constraints;
355 	unsigned int k;
356 	unsigned int *rstamps;
357 	unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
358 	unsigned int stamp;
359 	struct snd_pcm_hw_rule *r;
360 	unsigned int d;
361 	struct snd_mask old_mask;
362 	struct snd_interval old_interval;
363 	bool again;
364 	int changed, err = 0;
365 
366 	/*
367 	 * Each application of rule has own sequence number.
368 	 *
369 	 * Each member of 'rstamps' array represents the sequence number of
370 	 * recent application of corresponding rule.
371 	 */
372 	rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
373 	if (!rstamps)
374 		return -ENOMEM;
375 
376 	/*
377 	 * Each member of 'vstamps' array represents the sequence number of
378 	 * recent application of rule in which corresponding parameters were
379 	 * changed.
380 	 *
381 	 * In initial state, elements corresponding to parameters requested by
382 	 * a caller is 1. For unrequested parameters, corresponding members
383 	 * have 0 so that the parameters are never changed anymore.
384 	 */
385 	for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
386 		vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
387 
388 	/* Due to the above design, actual sequence number starts at 2. */
389 	stamp = 2;
390 retry:
391 	/* Apply all rules in order. */
392 	again = false;
393 	for (k = 0; k < constrs->rules_num; k++) {
394 		r = &constrs->rules[k];
395 
396 		/*
397 		 * Check condition bits of this rule. When the rule has
398 		 * some condition bits, parameter without the bits is
399 		 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
400 		 * is an example of the condition bits.
401 		 */
402 		if (r->cond && !(r->cond & params->flags))
403 			continue;
404 
405 		/*
406 		 * The 'deps' array includes maximum three dependencies
407 		 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
408 		 * member of this array is a sentinel and should be
409 		 * negative value.
410 		 *
411 		 * This rule should be processed in this time when dependent
412 		 * parameters were changed at former applications of the other
413 		 * rules.
414 		 */
415 		for (d = 0; r->deps[d] >= 0; d++) {
416 			if (vstamps[r->deps[d]] > rstamps[k])
417 				break;
418 		}
419 		if (r->deps[d] < 0)
420 			continue;
421 
422 		if (trace_hw_mask_param_enabled()) {
423 			if (hw_is_mask(r->var))
424 				old_mask = *hw_param_mask(params, r->var);
425 		}
426 		if (trace_hw_interval_param_enabled()) {
427 			if (hw_is_interval(r->var))
428 				old_interval = *hw_param_interval(params, r->var);
429 		}
430 
431 		changed = r->func(params, r);
432 		if (changed < 0) {
433 			err = changed;
434 			goto out;
435 		}
436 
437 		/*
438 		 * When the parameter is changed, notify it to the caller
439 		 * by corresponding returned bit, then preparing for next
440 		 * iteration.
441 		 */
442 		if (changed && r->var >= 0) {
443 			if (hw_is_mask(r->var)) {
444 				trace_hw_mask_param(substream, r->var,
445 					k + 1, &old_mask,
446 					hw_param_mask(params, r->var));
447 			}
448 			if (hw_is_interval(r->var)) {
449 				trace_hw_interval_param(substream, r->var,
450 					k + 1, &old_interval,
451 					hw_param_interval(params, r->var));
452 			}
453 
454 			params->cmask |= (1 << r->var);
455 			vstamps[r->var] = stamp;
456 			again = true;
457 		}
458 
459 		rstamps[k] = stamp++;
460 	}
461 
462 	/* Iterate to evaluate all rules till no parameters are changed. */
463 	if (again)
464 		goto retry;
465 
466  out:
467 	kfree(rstamps);
468 	return err;
469 }
470 
471 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
472 				     struct snd_pcm_hw_params *params)
473 {
474 	const struct snd_interval *i;
475 	const struct snd_mask *m;
476 	int err;
477 
478 	if (!params->msbits) {
479 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
480 		if (snd_interval_single(i))
481 			params->msbits = snd_interval_value(i);
482 	}
483 
484 	if (!params->rate_den) {
485 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
486 		if (snd_interval_single(i)) {
487 			params->rate_num = snd_interval_value(i);
488 			params->rate_den = 1;
489 		}
490 	}
491 
492 	if (!params->fifo_size) {
493 		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
494 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
495 		if (snd_mask_single(m) && snd_interval_single(i)) {
496 			err = substream->ops->ioctl(substream,
497 					SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
498 			if (err < 0)
499 				return err;
500 		}
501 	}
502 
503 	if (!params->info) {
504 		params->info = substream->runtime->hw.info;
505 		params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
506 				  SNDRV_PCM_INFO_DRAIN_TRIGGER);
507 		if (!hw_support_mmap(substream))
508 			params->info &= ~(SNDRV_PCM_INFO_MMAP |
509 					  SNDRV_PCM_INFO_MMAP_VALID);
510 	}
511 
512 	return 0;
513 }
514 
515 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
516 		      struct snd_pcm_hw_params *params)
517 {
518 	int err;
519 
520 	params->info = 0;
521 	params->fifo_size = 0;
522 	if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
523 		params->msbits = 0;
524 	if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
525 		params->rate_num = 0;
526 		params->rate_den = 0;
527 	}
528 
529 	err = constrain_mask_params(substream, params);
530 	if (err < 0)
531 		return err;
532 
533 	err = constrain_interval_params(substream, params);
534 	if (err < 0)
535 		return err;
536 
537 	err = constrain_params_by_rules(substream, params);
538 	if (err < 0)
539 		return err;
540 
541 	params->rmask = 0;
542 
543 	return 0;
544 }
545 EXPORT_SYMBOL(snd_pcm_hw_refine);
546 
547 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
548 				  struct snd_pcm_hw_params __user * _params)
549 {
550 	struct snd_pcm_hw_params *params;
551 	int err;
552 
553 	params = memdup_user(_params, sizeof(*params));
554 	if (IS_ERR(params))
555 		return PTR_ERR(params);
556 
557 	err = snd_pcm_hw_refine(substream, params);
558 	if (err < 0)
559 		goto end;
560 
561 	err = fixup_unreferenced_params(substream, params);
562 	if (err < 0)
563 		goto end;
564 
565 	if (copy_to_user(_params, params, sizeof(*params)))
566 		err = -EFAULT;
567 end:
568 	kfree(params);
569 	return err;
570 }
571 
572 static int period_to_usecs(struct snd_pcm_runtime *runtime)
573 {
574 	int usecs;
575 
576 	if (! runtime->rate)
577 		return -1; /* invalid */
578 
579 	/* take 75% of period time as the deadline */
580 	usecs = (750000 / runtime->rate) * runtime->period_size;
581 	usecs += ((750000 % runtime->rate) * runtime->period_size) /
582 		runtime->rate;
583 
584 	return usecs;
585 }
586 
587 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
588 {
589 	snd_pcm_stream_lock_irq(substream);
590 	if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
591 		substream->runtime->status->state = state;
592 	snd_pcm_stream_unlock_irq(substream);
593 }
594 
595 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
596 					int event)
597 {
598 #ifdef CONFIG_SND_PCM_TIMER
599 	if (substream->timer)
600 		snd_timer_notify(substream->timer, event,
601 					&substream->runtime->trigger_tstamp);
602 #endif
603 }
604 
605 /**
606  * snd_pcm_hw_param_choose - choose a configuration defined by @params
607  * @pcm: PCM instance
608  * @params: the hw_params instance
609  *
610  * Choose one configuration from configuration space defined by @params.
611  * The configuration chosen is that obtained fixing in this order:
612  * first access, first format, first subformat, min channels,
613  * min rate, min period time, max buffer size, min tick time
614  *
615  * Return: Zero if successful, or a negative error code on failure.
616  */
617 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
618 				    struct snd_pcm_hw_params *params)
619 {
620 	static const int vars[] = {
621 		SNDRV_PCM_HW_PARAM_ACCESS,
622 		SNDRV_PCM_HW_PARAM_FORMAT,
623 		SNDRV_PCM_HW_PARAM_SUBFORMAT,
624 		SNDRV_PCM_HW_PARAM_CHANNELS,
625 		SNDRV_PCM_HW_PARAM_RATE,
626 		SNDRV_PCM_HW_PARAM_PERIOD_TIME,
627 		SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
628 		SNDRV_PCM_HW_PARAM_TICK_TIME,
629 		-1
630 	};
631 	const int *v;
632 	struct snd_mask old_mask;
633 	struct snd_interval old_interval;
634 	int changed;
635 
636 	for (v = vars; *v != -1; v++) {
637 		/* Keep old parameter to trace. */
638 		if (trace_hw_mask_param_enabled()) {
639 			if (hw_is_mask(*v))
640 				old_mask = *hw_param_mask(params, *v);
641 		}
642 		if (trace_hw_interval_param_enabled()) {
643 			if (hw_is_interval(*v))
644 				old_interval = *hw_param_interval(params, *v);
645 		}
646 		if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
647 			changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
648 		else
649 			changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
650 		if (changed < 0)
651 			return changed;
652 		if (changed == 0)
653 			continue;
654 
655 		/* Trace the changed parameter. */
656 		if (hw_is_mask(*v)) {
657 			trace_hw_mask_param(pcm, *v, 0, &old_mask,
658 					    hw_param_mask(params, *v));
659 		}
660 		if (hw_is_interval(*v)) {
661 			trace_hw_interval_param(pcm, *v, 0, &old_interval,
662 						hw_param_interval(params, *v));
663 		}
664 	}
665 
666 	return 0;
667 }
668 
669 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
670 			     struct snd_pcm_hw_params *params)
671 {
672 	struct snd_pcm_runtime *runtime;
673 	int err, usecs;
674 	unsigned int bits;
675 	snd_pcm_uframes_t frames;
676 
677 	if (PCM_RUNTIME_CHECK(substream))
678 		return -ENXIO;
679 	runtime = substream->runtime;
680 	snd_pcm_stream_lock_irq(substream);
681 	switch (runtime->status->state) {
682 	case SNDRV_PCM_STATE_OPEN:
683 	case SNDRV_PCM_STATE_SETUP:
684 	case SNDRV_PCM_STATE_PREPARED:
685 		break;
686 	default:
687 		snd_pcm_stream_unlock_irq(substream);
688 		return -EBADFD;
689 	}
690 	snd_pcm_stream_unlock_irq(substream);
691 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
692 	if (!substream->oss.oss)
693 #endif
694 		if (atomic_read(&substream->mmap_count))
695 			return -EBADFD;
696 
697 	params->rmask = ~0U;
698 	err = snd_pcm_hw_refine(substream, params);
699 	if (err < 0)
700 		goto _error;
701 
702 	err = snd_pcm_hw_params_choose(substream, params);
703 	if (err < 0)
704 		goto _error;
705 
706 	err = fixup_unreferenced_params(substream, params);
707 	if (err < 0)
708 		goto _error;
709 
710 	if (substream->ops->hw_params != NULL) {
711 		err = substream->ops->hw_params(substream, params);
712 		if (err < 0)
713 			goto _error;
714 	}
715 
716 	runtime->access = params_access(params);
717 	runtime->format = params_format(params);
718 	runtime->subformat = params_subformat(params);
719 	runtime->channels = params_channels(params);
720 	runtime->rate = params_rate(params);
721 	runtime->period_size = params_period_size(params);
722 	runtime->periods = params_periods(params);
723 	runtime->buffer_size = params_buffer_size(params);
724 	runtime->info = params->info;
725 	runtime->rate_num = params->rate_num;
726 	runtime->rate_den = params->rate_den;
727 	runtime->no_period_wakeup =
728 			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
729 			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
730 
731 	bits = snd_pcm_format_physical_width(runtime->format);
732 	runtime->sample_bits = bits;
733 	bits *= runtime->channels;
734 	runtime->frame_bits = bits;
735 	frames = 1;
736 	while (bits % 8 != 0) {
737 		bits *= 2;
738 		frames *= 2;
739 	}
740 	runtime->byte_align = bits / 8;
741 	runtime->min_align = frames;
742 
743 	/* Default sw params */
744 	runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
745 	runtime->period_step = 1;
746 	runtime->control->avail_min = runtime->period_size;
747 	runtime->start_threshold = 1;
748 	runtime->stop_threshold = runtime->buffer_size;
749 	runtime->silence_threshold = 0;
750 	runtime->silence_size = 0;
751 	runtime->boundary = runtime->buffer_size;
752 	while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
753 		runtime->boundary *= 2;
754 
755 	snd_pcm_timer_resolution_change(substream);
756 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
757 
758 	if (pm_qos_request_active(&substream->latency_pm_qos_req))
759 		pm_qos_remove_request(&substream->latency_pm_qos_req);
760 	if ((usecs = period_to_usecs(runtime)) >= 0)
761 		pm_qos_add_request(&substream->latency_pm_qos_req,
762 				   PM_QOS_CPU_DMA_LATENCY, usecs);
763 	return 0;
764  _error:
765 	/* hardware might be unusable from this time,
766 	   so we force application to retry to set
767 	   the correct hardware parameter settings */
768 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
769 	if (substream->ops->hw_free != NULL)
770 		substream->ops->hw_free(substream);
771 	return err;
772 }
773 
774 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
775 				  struct snd_pcm_hw_params __user * _params)
776 {
777 	struct snd_pcm_hw_params *params;
778 	int err;
779 
780 	params = memdup_user(_params, sizeof(*params));
781 	if (IS_ERR(params))
782 		return PTR_ERR(params);
783 
784 	err = snd_pcm_hw_params(substream, params);
785 	if (err < 0)
786 		goto end;
787 
788 	if (copy_to_user(_params, params, sizeof(*params)))
789 		err = -EFAULT;
790 end:
791 	kfree(params);
792 	return err;
793 }
794 
795 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
796 {
797 	struct snd_pcm_runtime *runtime;
798 	int result = 0;
799 
800 	if (PCM_RUNTIME_CHECK(substream))
801 		return -ENXIO;
802 	runtime = substream->runtime;
803 	snd_pcm_stream_lock_irq(substream);
804 	switch (runtime->status->state) {
805 	case SNDRV_PCM_STATE_SETUP:
806 	case SNDRV_PCM_STATE_PREPARED:
807 		break;
808 	default:
809 		snd_pcm_stream_unlock_irq(substream);
810 		return -EBADFD;
811 	}
812 	snd_pcm_stream_unlock_irq(substream);
813 	if (atomic_read(&substream->mmap_count))
814 		return -EBADFD;
815 	if (substream->ops->hw_free)
816 		result = substream->ops->hw_free(substream);
817 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
818 	pm_qos_remove_request(&substream->latency_pm_qos_req);
819 	return result;
820 }
821 
822 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
823 			     struct snd_pcm_sw_params *params)
824 {
825 	struct snd_pcm_runtime *runtime;
826 	int err;
827 
828 	if (PCM_RUNTIME_CHECK(substream))
829 		return -ENXIO;
830 	runtime = substream->runtime;
831 	snd_pcm_stream_lock_irq(substream);
832 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
833 		snd_pcm_stream_unlock_irq(substream);
834 		return -EBADFD;
835 	}
836 	snd_pcm_stream_unlock_irq(substream);
837 
838 	if (params->tstamp_mode < 0 ||
839 	    params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
840 		return -EINVAL;
841 	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
842 	    params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
843 		return -EINVAL;
844 	if (params->avail_min == 0)
845 		return -EINVAL;
846 	if (params->silence_size >= runtime->boundary) {
847 		if (params->silence_threshold != 0)
848 			return -EINVAL;
849 	} else {
850 		if (params->silence_size > params->silence_threshold)
851 			return -EINVAL;
852 		if (params->silence_threshold > runtime->buffer_size)
853 			return -EINVAL;
854 	}
855 	err = 0;
856 	snd_pcm_stream_lock_irq(substream);
857 	runtime->tstamp_mode = params->tstamp_mode;
858 	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
859 		runtime->tstamp_type = params->tstamp_type;
860 	runtime->period_step = params->period_step;
861 	runtime->control->avail_min = params->avail_min;
862 	runtime->start_threshold = params->start_threshold;
863 	runtime->stop_threshold = params->stop_threshold;
864 	runtime->silence_threshold = params->silence_threshold;
865 	runtime->silence_size = params->silence_size;
866         params->boundary = runtime->boundary;
867 	if (snd_pcm_running(substream)) {
868 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
869 		    runtime->silence_size > 0)
870 			snd_pcm_playback_silence(substream, ULONG_MAX);
871 		err = snd_pcm_update_state(substream, runtime);
872 	}
873 	snd_pcm_stream_unlock_irq(substream);
874 	return err;
875 }
876 
877 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
878 				  struct snd_pcm_sw_params __user * _params)
879 {
880 	struct snd_pcm_sw_params params;
881 	int err;
882 	if (copy_from_user(&params, _params, sizeof(params)))
883 		return -EFAULT;
884 	err = snd_pcm_sw_params(substream, &params);
885 	if (copy_to_user(_params, &params, sizeof(params)))
886 		return -EFAULT;
887 	return err;
888 }
889 
890 static inline snd_pcm_uframes_t
891 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
892 {
893 	snd_pcm_uframes_t delay;
894 
895 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
896 		delay = snd_pcm_playback_hw_avail(substream->runtime);
897 	else
898 		delay = snd_pcm_capture_avail(substream->runtime);
899 	return delay + substream->runtime->delay;
900 }
901 
902 int snd_pcm_status(struct snd_pcm_substream *substream,
903 		   struct snd_pcm_status *status)
904 {
905 	struct snd_pcm_runtime *runtime = substream->runtime;
906 
907 	snd_pcm_stream_lock_irq(substream);
908 
909 	snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
910 					&runtime->audio_tstamp_config);
911 
912 	/* backwards compatible behavior */
913 	if (runtime->audio_tstamp_config.type_requested ==
914 		SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
915 		if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
916 			runtime->audio_tstamp_config.type_requested =
917 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
918 		else
919 			runtime->audio_tstamp_config.type_requested =
920 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
921 		runtime->audio_tstamp_report.valid = 0;
922 	} else
923 		runtime->audio_tstamp_report.valid = 1;
924 
925 	status->state = runtime->status->state;
926 	status->suspended_state = runtime->status->suspended_state;
927 	if (status->state == SNDRV_PCM_STATE_OPEN)
928 		goto _end;
929 	status->trigger_tstamp = runtime->trigger_tstamp;
930 	if (snd_pcm_running(substream)) {
931 		snd_pcm_update_hw_ptr(substream);
932 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
933 			status->tstamp = runtime->status->tstamp;
934 			status->driver_tstamp = runtime->driver_tstamp;
935 			status->audio_tstamp =
936 				runtime->status->audio_tstamp;
937 			if (runtime->audio_tstamp_report.valid == 1)
938 				/* backwards compatibility, no report provided in COMPAT mode */
939 				snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
940 								&status->audio_tstamp_accuracy,
941 								&runtime->audio_tstamp_report);
942 
943 			goto _tstamp_end;
944 		}
945 	} else {
946 		/* get tstamp only in fallback mode and only if enabled */
947 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
948 			snd_pcm_gettime(runtime, &status->tstamp);
949 	}
950  _tstamp_end:
951 	status->appl_ptr = runtime->control->appl_ptr;
952 	status->hw_ptr = runtime->status->hw_ptr;
953 	status->avail = snd_pcm_avail(substream);
954 	status->delay = snd_pcm_running(substream) ?
955 		snd_pcm_calc_delay(substream) : 0;
956 	status->avail_max = runtime->avail_max;
957 	status->overrange = runtime->overrange;
958 	runtime->avail_max = 0;
959 	runtime->overrange = 0;
960  _end:
961  	snd_pcm_stream_unlock_irq(substream);
962 	return 0;
963 }
964 
965 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
966 			       struct snd_pcm_status __user * _status,
967 			       bool ext)
968 {
969 	struct snd_pcm_status status;
970 	int res;
971 
972 	memset(&status, 0, sizeof(status));
973 	/*
974 	 * with extension, parameters are read/write,
975 	 * get audio_tstamp_data from user,
976 	 * ignore rest of status structure
977 	 */
978 	if (ext && get_user(status.audio_tstamp_data,
979 				(u32 __user *)(&_status->audio_tstamp_data)))
980 		return -EFAULT;
981 	res = snd_pcm_status(substream, &status);
982 	if (res < 0)
983 		return res;
984 	if (copy_to_user(_status, &status, sizeof(status)))
985 		return -EFAULT;
986 	return 0;
987 }
988 
989 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
990 				struct snd_pcm_channel_info * info)
991 {
992 	struct snd_pcm_runtime *runtime;
993 	unsigned int channel;
994 
995 	channel = info->channel;
996 	runtime = substream->runtime;
997 	snd_pcm_stream_lock_irq(substream);
998 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
999 		snd_pcm_stream_unlock_irq(substream);
1000 		return -EBADFD;
1001 	}
1002 	snd_pcm_stream_unlock_irq(substream);
1003 	if (channel >= runtime->channels)
1004 		return -EINVAL;
1005 	memset(info, 0, sizeof(*info));
1006 	info->channel = channel;
1007 	return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1008 }
1009 
1010 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1011 				     struct snd_pcm_channel_info __user * _info)
1012 {
1013 	struct snd_pcm_channel_info info;
1014 	int res;
1015 
1016 	if (copy_from_user(&info, _info, sizeof(info)))
1017 		return -EFAULT;
1018 	res = snd_pcm_channel_info(substream, &info);
1019 	if (res < 0)
1020 		return res;
1021 	if (copy_to_user(_info, &info, sizeof(info)))
1022 		return -EFAULT;
1023 	return 0;
1024 }
1025 
1026 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1027 {
1028 	struct snd_pcm_runtime *runtime = substream->runtime;
1029 	if (runtime->trigger_master == NULL)
1030 		return;
1031 	if (runtime->trigger_master == substream) {
1032 		if (!runtime->trigger_tstamp_latched)
1033 			snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1034 	} else {
1035 		snd_pcm_trigger_tstamp(runtime->trigger_master);
1036 		runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1037 	}
1038 	runtime->trigger_master = NULL;
1039 }
1040 
1041 struct action_ops {
1042 	int (*pre_action)(struct snd_pcm_substream *substream, int state);
1043 	int (*do_action)(struct snd_pcm_substream *substream, int state);
1044 	void (*undo_action)(struct snd_pcm_substream *substream, int state);
1045 	void (*post_action)(struct snd_pcm_substream *substream, int state);
1046 };
1047 
1048 /*
1049  *  this functions is core for handling of linked stream
1050  *  Note: the stream state might be changed also on failure
1051  *  Note2: call with calling stream lock + link lock
1052  */
1053 static int snd_pcm_action_group(const struct action_ops *ops,
1054 				struct snd_pcm_substream *substream,
1055 				int state, int do_lock)
1056 {
1057 	struct snd_pcm_substream *s = NULL;
1058 	struct snd_pcm_substream *s1;
1059 	int res = 0, depth = 1;
1060 
1061 	snd_pcm_group_for_each_entry(s, substream) {
1062 		if (do_lock && s != substream) {
1063 			if (s->pcm->nonatomic)
1064 				mutex_lock_nested(&s->self_group.mutex, depth);
1065 			else
1066 				spin_lock_nested(&s->self_group.lock, depth);
1067 			depth++;
1068 		}
1069 		res = ops->pre_action(s, state);
1070 		if (res < 0)
1071 			goto _unlock;
1072 	}
1073 	snd_pcm_group_for_each_entry(s, substream) {
1074 		res = ops->do_action(s, state);
1075 		if (res < 0) {
1076 			if (ops->undo_action) {
1077 				snd_pcm_group_for_each_entry(s1, substream) {
1078 					if (s1 == s) /* failed stream */
1079 						break;
1080 					ops->undo_action(s1, state);
1081 				}
1082 			}
1083 			s = NULL; /* unlock all */
1084 			goto _unlock;
1085 		}
1086 	}
1087 	snd_pcm_group_for_each_entry(s, substream) {
1088 		ops->post_action(s, state);
1089 	}
1090  _unlock:
1091 	if (do_lock) {
1092 		/* unlock streams */
1093 		snd_pcm_group_for_each_entry(s1, substream) {
1094 			if (s1 != substream) {
1095 				if (s1->pcm->nonatomic)
1096 					mutex_unlock(&s1->self_group.mutex);
1097 				else
1098 					spin_unlock(&s1->self_group.lock);
1099 			}
1100 			if (s1 == s)	/* end */
1101 				break;
1102 		}
1103 	}
1104 	return res;
1105 }
1106 
1107 /*
1108  *  Note: call with stream lock
1109  */
1110 static int snd_pcm_action_single(const struct action_ops *ops,
1111 				 struct snd_pcm_substream *substream,
1112 				 int state)
1113 {
1114 	int res;
1115 
1116 	res = ops->pre_action(substream, state);
1117 	if (res < 0)
1118 		return res;
1119 	res = ops->do_action(substream, state);
1120 	if (res == 0)
1121 		ops->post_action(substream, state);
1122 	else if (ops->undo_action)
1123 		ops->undo_action(substream, state);
1124 	return res;
1125 }
1126 
1127 /*
1128  *  Note: call with stream lock
1129  */
1130 static int snd_pcm_action(const struct action_ops *ops,
1131 			  struct snd_pcm_substream *substream,
1132 			  int state)
1133 {
1134 	int res;
1135 
1136 	if (!snd_pcm_stream_linked(substream))
1137 		return snd_pcm_action_single(ops, substream, state);
1138 
1139 	if (substream->pcm->nonatomic) {
1140 		if (!mutex_trylock(&substream->group->mutex)) {
1141 			mutex_unlock(&substream->self_group.mutex);
1142 			mutex_lock(&substream->group->mutex);
1143 			mutex_lock(&substream->self_group.mutex);
1144 		}
1145 		res = snd_pcm_action_group(ops, substream, state, 1);
1146 		mutex_unlock(&substream->group->mutex);
1147 	} else {
1148 		if (!spin_trylock(&substream->group->lock)) {
1149 			spin_unlock(&substream->self_group.lock);
1150 			spin_lock(&substream->group->lock);
1151 			spin_lock(&substream->self_group.lock);
1152 		}
1153 		res = snd_pcm_action_group(ops, substream, state, 1);
1154 		spin_unlock(&substream->group->lock);
1155 	}
1156 	return res;
1157 }
1158 
1159 /*
1160  *  Note: don't use any locks before
1161  */
1162 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1163 				   struct snd_pcm_substream *substream,
1164 				   int state)
1165 {
1166 	int res;
1167 
1168 	snd_pcm_stream_lock_irq(substream);
1169 	res = snd_pcm_action(ops, substream, state);
1170 	snd_pcm_stream_unlock_irq(substream);
1171 	return res;
1172 }
1173 
1174 /*
1175  */
1176 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1177 				    struct snd_pcm_substream *substream,
1178 				    int state)
1179 {
1180 	int res;
1181 
1182 	down_read(&snd_pcm_link_rwsem);
1183 	if (snd_pcm_stream_linked(substream))
1184 		res = snd_pcm_action_group(ops, substream, state, 0);
1185 	else
1186 		res = snd_pcm_action_single(ops, substream, state);
1187 	up_read(&snd_pcm_link_rwsem);
1188 	return res;
1189 }
1190 
1191 /*
1192  * start callbacks
1193  */
1194 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1195 {
1196 	struct snd_pcm_runtime *runtime = substream->runtime;
1197 	if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1198 		return -EBADFD;
1199 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1200 	    !snd_pcm_playback_data(substream))
1201 		return -EPIPE;
1202 	runtime->trigger_tstamp_latched = false;
1203 	runtime->trigger_master = substream;
1204 	return 0;
1205 }
1206 
1207 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1208 {
1209 	if (substream->runtime->trigger_master != substream)
1210 		return 0;
1211 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1212 }
1213 
1214 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1215 {
1216 	if (substream->runtime->trigger_master == substream)
1217 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1218 }
1219 
1220 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1221 {
1222 	struct snd_pcm_runtime *runtime = substream->runtime;
1223 	snd_pcm_trigger_tstamp(substream);
1224 	runtime->hw_ptr_jiffies = jiffies;
1225 	runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1226 							    runtime->rate;
1227 	runtime->status->state = state;
1228 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1229 	    runtime->silence_size > 0)
1230 		snd_pcm_playback_silence(substream, ULONG_MAX);
1231 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1232 }
1233 
1234 static const struct action_ops snd_pcm_action_start = {
1235 	.pre_action = snd_pcm_pre_start,
1236 	.do_action = snd_pcm_do_start,
1237 	.undo_action = snd_pcm_undo_start,
1238 	.post_action = snd_pcm_post_start
1239 };
1240 
1241 /**
1242  * snd_pcm_start - start all linked streams
1243  * @substream: the PCM substream instance
1244  *
1245  * Return: Zero if successful, or a negative error code.
1246  * The stream lock must be acquired before calling this function.
1247  */
1248 int snd_pcm_start(struct snd_pcm_substream *substream)
1249 {
1250 	return snd_pcm_action(&snd_pcm_action_start, substream,
1251 			      SNDRV_PCM_STATE_RUNNING);
1252 }
1253 
1254 /* take the stream lock and start the streams */
1255 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1256 {
1257 	return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1258 				       SNDRV_PCM_STATE_RUNNING);
1259 }
1260 
1261 /*
1262  * stop callbacks
1263  */
1264 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1265 {
1266 	struct snd_pcm_runtime *runtime = substream->runtime;
1267 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1268 		return -EBADFD;
1269 	runtime->trigger_master = substream;
1270 	return 0;
1271 }
1272 
1273 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1274 {
1275 	if (substream->runtime->trigger_master == substream &&
1276 	    snd_pcm_running(substream))
1277 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1278 	return 0; /* unconditonally stop all substreams */
1279 }
1280 
1281 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1282 {
1283 	struct snd_pcm_runtime *runtime = substream->runtime;
1284 	if (runtime->status->state != state) {
1285 		snd_pcm_trigger_tstamp(substream);
1286 		runtime->status->state = state;
1287 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1288 	}
1289 	wake_up(&runtime->sleep);
1290 	wake_up(&runtime->tsleep);
1291 }
1292 
1293 static const struct action_ops snd_pcm_action_stop = {
1294 	.pre_action = snd_pcm_pre_stop,
1295 	.do_action = snd_pcm_do_stop,
1296 	.post_action = snd_pcm_post_stop
1297 };
1298 
1299 /**
1300  * snd_pcm_stop - try to stop all running streams in the substream group
1301  * @substream: the PCM substream instance
1302  * @state: PCM state after stopping the stream
1303  *
1304  * The state of each stream is then changed to the given state unconditionally.
1305  *
1306  * Return: Zero if successful, or a negative error code.
1307  */
1308 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1309 {
1310 	return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1311 }
1312 EXPORT_SYMBOL(snd_pcm_stop);
1313 
1314 /**
1315  * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1316  * @substream: the PCM substream
1317  *
1318  * After stopping, the state is changed to SETUP.
1319  * Unlike snd_pcm_stop(), this affects only the given stream.
1320  *
1321  * Return: Zero if succesful, or a negative error code.
1322  */
1323 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1324 {
1325 	return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1326 				     SNDRV_PCM_STATE_SETUP);
1327 }
1328 
1329 /**
1330  * snd_pcm_stop_xrun - stop the running streams as XRUN
1331  * @substream: the PCM substream instance
1332  *
1333  * This stops the given running substream (and all linked substreams) as XRUN.
1334  * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1335  *
1336  * Return: Zero if successful, or a negative error code.
1337  */
1338 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1339 {
1340 	unsigned long flags;
1341 
1342 	snd_pcm_stream_lock_irqsave(substream, flags);
1343 	if (substream->runtime && snd_pcm_running(substream))
1344 		__snd_pcm_xrun(substream);
1345 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1346 	return 0;
1347 }
1348 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1349 
1350 /*
1351  * pause callbacks
1352  */
1353 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1354 {
1355 	struct snd_pcm_runtime *runtime = substream->runtime;
1356 	if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1357 		return -ENOSYS;
1358 	if (push) {
1359 		if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1360 			return -EBADFD;
1361 	} else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1362 		return -EBADFD;
1363 	runtime->trigger_master = substream;
1364 	return 0;
1365 }
1366 
1367 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1368 {
1369 	if (substream->runtime->trigger_master != substream)
1370 		return 0;
1371 	/* some drivers might use hw_ptr to recover from the pause -
1372 	   update the hw_ptr now */
1373 	if (push)
1374 		snd_pcm_update_hw_ptr(substream);
1375 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1376 	 * a delta between the current jiffies, this gives a large enough
1377 	 * delta, effectively to skip the check once.
1378 	 */
1379 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1380 	return substream->ops->trigger(substream,
1381 				       push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1382 					      SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1383 }
1384 
1385 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1386 {
1387 	if (substream->runtime->trigger_master == substream)
1388 		substream->ops->trigger(substream,
1389 					push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1390 					SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1391 }
1392 
1393 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1394 {
1395 	struct snd_pcm_runtime *runtime = substream->runtime;
1396 	snd_pcm_trigger_tstamp(substream);
1397 	if (push) {
1398 		runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1399 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1400 		wake_up(&runtime->sleep);
1401 		wake_up(&runtime->tsleep);
1402 	} else {
1403 		runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1404 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1405 	}
1406 }
1407 
1408 static const struct action_ops snd_pcm_action_pause = {
1409 	.pre_action = snd_pcm_pre_pause,
1410 	.do_action = snd_pcm_do_pause,
1411 	.undo_action = snd_pcm_undo_pause,
1412 	.post_action = snd_pcm_post_pause
1413 };
1414 
1415 /*
1416  * Push/release the pause for all linked streams.
1417  */
1418 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1419 {
1420 	return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1421 }
1422 
1423 #ifdef CONFIG_PM
1424 /* suspend */
1425 
1426 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1427 {
1428 	struct snd_pcm_runtime *runtime = substream->runtime;
1429 	if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1430 		return -EBUSY;
1431 	runtime->trigger_master = substream;
1432 	return 0;
1433 }
1434 
1435 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1436 {
1437 	struct snd_pcm_runtime *runtime = substream->runtime;
1438 	if (runtime->trigger_master != substream)
1439 		return 0;
1440 	if (! snd_pcm_running(substream))
1441 		return 0;
1442 	substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1443 	return 0; /* suspend unconditionally */
1444 }
1445 
1446 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1447 {
1448 	struct snd_pcm_runtime *runtime = substream->runtime;
1449 	snd_pcm_trigger_tstamp(substream);
1450 	runtime->status->suspended_state = runtime->status->state;
1451 	runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1452 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1453 	wake_up(&runtime->sleep);
1454 	wake_up(&runtime->tsleep);
1455 }
1456 
1457 static const struct action_ops snd_pcm_action_suspend = {
1458 	.pre_action = snd_pcm_pre_suspend,
1459 	.do_action = snd_pcm_do_suspend,
1460 	.post_action = snd_pcm_post_suspend
1461 };
1462 
1463 /*
1464  * snd_pcm_suspend - trigger SUSPEND to all linked streams
1465  * @substream: the PCM substream
1466  *
1467  * After this call, all streams are changed to SUSPENDED state.
1468  *
1469  * Return: Zero if successful, or a negative error code.
1470  */
1471 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1472 {
1473 	int err;
1474 	unsigned long flags;
1475 
1476 	snd_pcm_stream_lock_irqsave(substream, flags);
1477 	err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1478 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1479 	return err;
1480 }
1481 
1482 /**
1483  * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1484  * @pcm: the PCM instance
1485  *
1486  * After this call, all streams are changed to SUSPENDED state.
1487  *
1488  * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1489  */
1490 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1491 {
1492 	struct snd_pcm_substream *substream;
1493 	int stream, err = 0;
1494 
1495 	if (! pcm)
1496 		return 0;
1497 
1498 	for (stream = 0; stream < 2; stream++) {
1499 		for (substream = pcm->streams[stream].substream;
1500 		     substream; substream = substream->next) {
1501 			/* FIXME: the open/close code should lock this as well */
1502 			if (substream->runtime == NULL)
1503 				continue;
1504 			err = snd_pcm_suspend(substream);
1505 			if (err < 0 && err != -EBUSY)
1506 				return err;
1507 		}
1508 	}
1509 	return 0;
1510 }
1511 EXPORT_SYMBOL(snd_pcm_suspend_all);
1512 
1513 /* resume */
1514 
1515 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1516 {
1517 	struct snd_pcm_runtime *runtime = substream->runtime;
1518 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1519 		return -ENOSYS;
1520 	runtime->trigger_master = substream;
1521 	return 0;
1522 }
1523 
1524 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1525 {
1526 	struct snd_pcm_runtime *runtime = substream->runtime;
1527 	if (runtime->trigger_master != substream)
1528 		return 0;
1529 	/* DMA not running previously? */
1530 	if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1531 	    (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1532 	     substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1533 		return 0;
1534 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1535 }
1536 
1537 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1538 {
1539 	if (substream->runtime->trigger_master == substream &&
1540 	    snd_pcm_running(substream))
1541 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1542 }
1543 
1544 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1545 {
1546 	struct snd_pcm_runtime *runtime = substream->runtime;
1547 	snd_pcm_trigger_tstamp(substream);
1548 	runtime->status->state = runtime->status->suspended_state;
1549 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1550 }
1551 
1552 static const struct action_ops snd_pcm_action_resume = {
1553 	.pre_action = snd_pcm_pre_resume,
1554 	.do_action = snd_pcm_do_resume,
1555 	.undo_action = snd_pcm_undo_resume,
1556 	.post_action = snd_pcm_post_resume
1557 };
1558 
1559 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1560 {
1561 	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1562 }
1563 
1564 #else
1565 
1566 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1567 {
1568 	return -ENOSYS;
1569 }
1570 
1571 #endif /* CONFIG_PM */
1572 
1573 /*
1574  * xrun ioctl
1575  *
1576  * Change the RUNNING stream(s) to XRUN state.
1577  */
1578 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1579 {
1580 	struct snd_pcm_runtime *runtime = substream->runtime;
1581 	int result;
1582 
1583 	snd_pcm_stream_lock_irq(substream);
1584 	switch (runtime->status->state) {
1585 	case SNDRV_PCM_STATE_XRUN:
1586 		result = 0;	/* already there */
1587 		break;
1588 	case SNDRV_PCM_STATE_RUNNING:
1589 		__snd_pcm_xrun(substream);
1590 		result = 0;
1591 		break;
1592 	default:
1593 		result = -EBADFD;
1594 	}
1595 	snd_pcm_stream_unlock_irq(substream);
1596 	return result;
1597 }
1598 
1599 /*
1600  * reset ioctl
1601  */
1602 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1603 {
1604 	struct snd_pcm_runtime *runtime = substream->runtime;
1605 	switch (runtime->status->state) {
1606 	case SNDRV_PCM_STATE_RUNNING:
1607 	case SNDRV_PCM_STATE_PREPARED:
1608 	case SNDRV_PCM_STATE_PAUSED:
1609 	case SNDRV_PCM_STATE_SUSPENDED:
1610 		return 0;
1611 	default:
1612 		return -EBADFD;
1613 	}
1614 }
1615 
1616 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1617 {
1618 	struct snd_pcm_runtime *runtime = substream->runtime;
1619 	int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1620 	if (err < 0)
1621 		return err;
1622 	runtime->hw_ptr_base = 0;
1623 	runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1624 		runtime->status->hw_ptr % runtime->period_size;
1625 	runtime->silence_start = runtime->status->hw_ptr;
1626 	runtime->silence_filled = 0;
1627 	return 0;
1628 }
1629 
1630 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1631 {
1632 	struct snd_pcm_runtime *runtime = substream->runtime;
1633 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1634 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1635 	    runtime->silence_size > 0)
1636 		snd_pcm_playback_silence(substream, ULONG_MAX);
1637 }
1638 
1639 static const struct action_ops snd_pcm_action_reset = {
1640 	.pre_action = snd_pcm_pre_reset,
1641 	.do_action = snd_pcm_do_reset,
1642 	.post_action = snd_pcm_post_reset
1643 };
1644 
1645 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1646 {
1647 	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1648 }
1649 
1650 /*
1651  * prepare ioctl
1652  */
1653 /* we use the second argument for updating f_flags */
1654 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1655 			       int f_flags)
1656 {
1657 	struct snd_pcm_runtime *runtime = substream->runtime;
1658 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1659 	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1660 		return -EBADFD;
1661 	if (snd_pcm_running(substream))
1662 		return -EBUSY;
1663 	substream->f_flags = f_flags;
1664 	return 0;
1665 }
1666 
1667 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1668 {
1669 	int err;
1670 	err = substream->ops->prepare(substream);
1671 	if (err < 0)
1672 		return err;
1673 	return snd_pcm_do_reset(substream, 0);
1674 }
1675 
1676 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1677 {
1678 	struct snd_pcm_runtime *runtime = substream->runtime;
1679 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1680 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1681 }
1682 
1683 static const struct action_ops snd_pcm_action_prepare = {
1684 	.pre_action = snd_pcm_pre_prepare,
1685 	.do_action = snd_pcm_do_prepare,
1686 	.post_action = snd_pcm_post_prepare
1687 };
1688 
1689 /**
1690  * snd_pcm_prepare - prepare the PCM substream to be triggerable
1691  * @substream: the PCM substream instance
1692  * @file: file to refer f_flags
1693  *
1694  * Return: Zero if successful, or a negative error code.
1695  */
1696 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1697 			   struct file *file)
1698 {
1699 	int f_flags;
1700 
1701 	if (file)
1702 		f_flags = file->f_flags;
1703 	else
1704 		f_flags = substream->f_flags;
1705 
1706 	snd_pcm_stream_lock_irq(substream);
1707 	switch (substream->runtime->status->state) {
1708 	case SNDRV_PCM_STATE_PAUSED:
1709 		snd_pcm_pause(substream, 0);
1710 		/* fallthru */
1711 	case SNDRV_PCM_STATE_SUSPENDED:
1712 		snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1713 		break;
1714 	}
1715 	snd_pcm_stream_unlock_irq(substream);
1716 
1717 	return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1718 					substream, f_flags);
1719 }
1720 
1721 /*
1722  * drain ioctl
1723  */
1724 
1725 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1726 {
1727 	struct snd_pcm_runtime *runtime = substream->runtime;
1728 	switch (runtime->status->state) {
1729 	case SNDRV_PCM_STATE_OPEN:
1730 	case SNDRV_PCM_STATE_DISCONNECTED:
1731 	case SNDRV_PCM_STATE_SUSPENDED:
1732 		return -EBADFD;
1733 	}
1734 	runtime->trigger_master = substream;
1735 	return 0;
1736 }
1737 
1738 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1739 {
1740 	struct snd_pcm_runtime *runtime = substream->runtime;
1741 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1742 		switch (runtime->status->state) {
1743 		case SNDRV_PCM_STATE_PREPARED:
1744 			/* start playback stream if possible */
1745 			if (! snd_pcm_playback_empty(substream)) {
1746 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1747 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1748 			} else {
1749 				runtime->status->state = SNDRV_PCM_STATE_SETUP;
1750 			}
1751 			break;
1752 		case SNDRV_PCM_STATE_RUNNING:
1753 			runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1754 			break;
1755 		case SNDRV_PCM_STATE_XRUN:
1756 			runtime->status->state = SNDRV_PCM_STATE_SETUP;
1757 			break;
1758 		default:
1759 			break;
1760 		}
1761 	} else {
1762 		/* stop running stream */
1763 		if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1764 			int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1765 				SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1766 			snd_pcm_do_stop(substream, new_state);
1767 			snd_pcm_post_stop(substream, new_state);
1768 		}
1769 	}
1770 
1771 	if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1772 	    runtime->trigger_master == substream &&
1773 	    (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1774 		return substream->ops->trigger(substream,
1775 					       SNDRV_PCM_TRIGGER_DRAIN);
1776 
1777 	return 0;
1778 }
1779 
1780 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1781 {
1782 }
1783 
1784 static const struct action_ops snd_pcm_action_drain_init = {
1785 	.pre_action = snd_pcm_pre_drain_init,
1786 	.do_action = snd_pcm_do_drain_init,
1787 	.post_action = snd_pcm_post_drain_init
1788 };
1789 
1790 static int snd_pcm_drop(struct snd_pcm_substream *substream);
1791 
1792 /*
1793  * Drain the stream(s).
1794  * When the substream is linked, sync until the draining of all playback streams
1795  * is finished.
1796  * After this call, all streams are supposed to be either SETUP or DRAINING
1797  * (capture only) state.
1798  */
1799 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1800 			 struct file *file)
1801 {
1802 	struct snd_card *card;
1803 	struct snd_pcm_runtime *runtime;
1804 	struct snd_pcm_substream *s;
1805 	wait_queue_entry_t wait;
1806 	int result = 0;
1807 	int nonblock = 0;
1808 
1809 	card = substream->pcm->card;
1810 	runtime = substream->runtime;
1811 
1812 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1813 		return -EBADFD;
1814 
1815 	if (file) {
1816 		if (file->f_flags & O_NONBLOCK)
1817 			nonblock = 1;
1818 	} else if (substream->f_flags & O_NONBLOCK)
1819 		nonblock = 1;
1820 
1821 	down_read(&snd_pcm_link_rwsem);
1822 	snd_pcm_stream_lock_irq(substream);
1823 	/* resume pause */
1824 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1825 		snd_pcm_pause(substream, 0);
1826 
1827 	/* pre-start/stop - all running streams are changed to DRAINING state */
1828 	result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1829 	if (result < 0)
1830 		goto unlock;
1831 	/* in non-blocking, we don't wait in ioctl but let caller poll */
1832 	if (nonblock) {
1833 		result = -EAGAIN;
1834 		goto unlock;
1835 	}
1836 
1837 	for (;;) {
1838 		long tout;
1839 		struct snd_pcm_runtime *to_check;
1840 		if (signal_pending(current)) {
1841 			result = -ERESTARTSYS;
1842 			break;
1843 		}
1844 		/* find a substream to drain */
1845 		to_check = NULL;
1846 		snd_pcm_group_for_each_entry(s, substream) {
1847 			if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1848 				continue;
1849 			runtime = s->runtime;
1850 			if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1851 				to_check = runtime;
1852 				break;
1853 			}
1854 		}
1855 		if (!to_check)
1856 			break; /* all drained */
1857 		init_waitqueue_entry(&wait, current);
1858 		add_wait_queue(&to_check->sleep, &wait);
1859 		snd_pcm_stream_unlock_irq(substream);
1860 		up_read(&snd_pcm_link_rwsem);
1861 		if (runtime->no_period_wakeup)
1862 			tout = MAX_SCHEDULE_TIMEOUT;
1863 		else {
1864 			tout = 10;
1865 			if (runtime->rate) {
1866 				long t = runtime->period_size * 2 / runtime->rate;
1867 				tout = max(t, tout);
1868 			}
1869 			tout = msecs_to_jiffies(tout * 1000);
1870 		}
1871 		tout = schedule_timeout_interruptible(tout);
1872 		down_read(&snd_pcm_link_rwsem);
1873 		snd_pcm_stream_lock_irq(substream);
1874 		remove_wait_queue(&to_check->sleep, &wait);
1875 		if (card->shutdown) {
1876 			result = -ENODEV;
1877 			break;
1878 		}
1879 		if (tout == 0) {
1880 			if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1881 				result = -ESTRPIPE;
1882 			else {
1883 				dev_dbg(substream->pcm->card->dev,
1884 					"playback drain error (DMA or IRQ trouble?)\n");
1885 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1886 				result = -EIO;
1887 			}
1888 			break;
1889 		}
1890 	}
1891 
1892  unlock:
1893 	snd_pcm_stream_unlock_irq(substream);
1894 	up_read(&snd_pcm_link_rwsem);
1895 
1896 	return result;
1897 }
1898 
1899 /*
1900  * drop ioctl
1901  *
1902  * Immediately put all linked substreams into SETUP state.
1903  */
1904 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1905 {
1906 	struct snd_pcm_runtime *runtime;
1907 	int result = 0;
1908 
1909 	if (PCM_RUNTIME_CHECK(substream))
1910 		return -ENXIO;
1911 	runtime = substream->runtime;
1912 
1913 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1914 	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1915 		return -EBADFD;
1916 
1917 	snd_pcm_stream_lock_irq(substream);
1918 	/* resume pause */
1919 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1920 		snd_pcm_pause(substream, 0);
1921 
1922 	snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1923 	/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1924 	snd_pcm_stream_unlock_irq(substream);
1925 
1926 	return result;
1927 }
1928 
1929 
1930 static bool is_pcm_file(struct file *file)
1931 {
1932 	struct inode *inode = file_inode(file);
1933 	unsigned int minor;
1934 
1935 	if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1936 		return false;
1937 	minor = iminor(inode);
1938 	return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
1939 		snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1940 }
1941 
1942 /*
1943  * PCM link handling
1944  */
1945 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1946 {
1947 	int res = 0;
1948 	struct snd_pcm_file *pcm_file;
1949 	struct snd_pcm_substream *substream1;
1950 	struct snd_pcm_group *group;
1951 	struct fd f = fdget(fd);
1952 
1953 	if (!f.file)
1954 		return -EBADFD;
1955 	if (!is_pcm_file(f.file)) {
1956 		res = -EBADFD;
1957 		goto _badf;
1958 	}
1959 	pcm_file = f.file->private_data;
1960 	substream1 = pcm_file->substream;
1961 	group = kmalloc(sizeof(*group), GFP_KERNEL);
1962 	if (!group) {
1963 		res = -ENOMEM;
1964 		goto _nolock;
1965 	}
1966 	down_write_nonfifo(&snd_pcm_link_rwsem);
1967 	write_lock_irq(&snd_pcm_link_rwlock);
1968 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1969 	    substream->runtime->status->state != substream1->runtime->status->state ||
1970 	    substream->pcm->nonatomic != substream1->pcm->nonatomic) {
1971 		res = -EBADFD;
1972 		goto _end;
1973 	}
1974 	if (snd_pcm_stream_linked(substream1)) {
1975 		res = -EALREADY;
1976 		goto _end;
1977 	}
1978 	if (!snd_pcm_stream_linked(substream)) {
1979 		substream->group = group;
1980 		group = NULL;
1981 		spin_lock_init(&substream->group->lock);
1982 		mutex_init(&substream->group->mutex);
1983 		INIT_LIST_HEAD(&substream->group->substreams);
1984 		list_add_tail(&substream->link_list, &substream->group->substreams);
1985 		substream->group->count = 1;
1986 	}
1987 	list_add_tail(&substream1->link_list, &substream->group->substreams);
1988 	substream->group->count++;
1989 	substream1->group = substream->group;
1990  _end:
1991 	write_unlock_irq(&snd_pcm_link_rwlock);
1992 	up_write(&snd_pcm_link_rwsem);
1993  _nolock:
1994 	snd_card_unref(substream1->pcm->card);
1995 	kfree(group);
1996  _badf:
1997 	fdput(f);
1998 	return res;
1999 }
2000 
2001 static void relink_to_local(struct snd_pcm_substream *substream)
2002 {
2003 	substream->group = &substream->self_group;
2004 	INIT_LIST_HEAD(&substream->self_group.substreams);
2005 	list_add_tail(&substream->link_list, &substream->self_group.substreams);
2006 }
2007 
2008 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2009 {
2010 	struct snd_pcm_substream *s;
2011 	int res = 0;
2012 
2013 	down_write_nonfifo(&snd_pcm_link_rwsem);
2014 	write_lock_irq(&snd_pcm_link_rwlock);
2015 	if (!snd_pcm_stream_linked(substream)) {
2016 		res = -EALREADY;
2017 		goto _end;
2018 	}
2019 	list_del(&substream->link_list);
2020 	substream->group->count--;
2021 	if (substream->group->count == 1) {	/* detach the last stream, too */
2022 		snd_pcm_group_for_each_entry(s, substream) {
2023 			relink_to_local(s);
2024 			break;
2025 		}
2026 		kfree(substream->group);
2027 	}
2028 	relink_to_local(substream);
2029        _end:
2030 	write_unlock_irq(&snd_pcm_link_rwlock);
2031 	up_write(&snd_pcm_link_rwsem);
2032 	return res;
2033 }
2034 
2035 /*
2036  * hw configurator
2037  */
2038 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2039 			       struct snd_pcm_hw_rule *rule)
2040 {
2041 	struct snd_interval t;
2042 	snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2043 		     hw_param_interval_c(params, rule->deps[1]), &t);
2044 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2045 }
2046 
2047 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2048 			       struct snd_pcm_hw_rule *rule)
2049 {
2050 	struct snd_interval t;
2051 	snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2052 		     hw_param_interval_c(params, rule->deps[1]), &t);
2053 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2054 }
2055 
2056 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2057 				   struct snd_pcm_hw_rule *rule)
2058 {
2059 	struct snd_interval t;
2060 	snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2061 			 hw_param_interval_c(params, rule->deps[1]),
2062 			 (unsigned long) rule->private, &t);
2063 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2064 }
2065 
2066 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2067 				   struct snd_pcm_hw_rule *rule)
2068 {
2069 	struct snd_interval t;
2070 	snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2071 			 (unsigned long) rule->private,
2072 			 hw_param_interval_c(params, rule->deps[1]), &t);
2073 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2074 }
2075 
2076 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2077 				  struct snd_pcm_hw_rule *rule)
2078 {
2079 	unsigned int k;
2080 	const struct snd_interval *i =
2081 				hw_param_interval_c(params, rule->deps[0]);
2082 	struct snd_mask m;
2083 	struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2084 	snd_mask_any(&m);
2085 	for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2086 		int bits;
2087 		if (! snd_mask_test(mask, k))
2088 			continue;
2089 		bits = snd_pcm_format_physical_width(k);
2090 		if (bits <= 0)
2091 			continue; /* ignore invalid formats */
2092 		if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2093 			snd_mask_reset(&m, k);
2094 	}
2095 	return snd_mask_refine(mask, &m);
2096 }
2097 
2098 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2099 				       struct snd_pcm_hw_rule *rule)
2100 {
2101 	struct snd_interval t;
2102 	unsigned int k;
2103 	t.min = UINT_MAX;
2104 	t.max = 0;
2105 	t.openmin = 0;
2106 	t.openmax = 0;
2107 	for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2108 		int bits;
2109 		if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2110 			continue;
2111 		bits = snd_pcm_format_physical_width(k);
2112 		if (bits <= 0)
2113 			continue; /* ignore invalid formats */
2114 		if (t.min > (unsigned)bits)
2115 			t.min = bits;
2116 		if (t.max < (unsigned)bits)
2117 			t.max = bits;
2118 	}
2119 	t.integer = 1;
2120 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2121 }
2122 
2123 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2124 #error "Change this table"
2125 #endif
2126 
2127 static const unsigned int rates[] = {
2128 	5512, 8000, 11025, 16000, 22050, 32000, 44100,
2129 	48000, 64000, 88200, 96000, 176400, 192000
2130 };
2131 
2132 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2133 	.count = ARRAY_SIZE(rates),
2134 	.list = rates,
2135 };
2136 
2137 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2138 				struct snd_pcm_hw_rule *rule)
2139 {
2140 	struct snd_pcm_hardware *hw = rule->private;
2141 	return snd_interval_list(hw_param_interval(params, rule->var),
2142 				 snd_pcm_known_rates.count,
2143 				 snd_pcm_known_rates.list, hw->rates);
2144 }
2145 
2146 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2147 					    struct snd_pcm_hw_rule *rule)
2148 {
2149 	struct snd_interval t;
2150 	struct snd_pcm_substream *substream = rule->private;
2151 	t.min = 0;
2152 	t.max = substream->buffer_bytes_max;
2153 	t.openmin = 0;
2154 	t.openmax = 0;
2155 	t.integer = 1;
2156 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2157 }
2158 
2159 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2160 {
2161 	struct snd_pcm_runtime *runtime = substream->runtime;
2162 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2163 	int k, err;
2164 
2165 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2166 		snd_mask_any(constrs_mask(constrs, k));
2167 	}
2168 
2169 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2170 		snd_interval_any(constrs_interval(constrs, k));
2171 	}
2172 
2173 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2174 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2175 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2176 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2177 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2178 
2179 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2180 				   snd_pcm_hw_rule_format, NULL,
2181 				   SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2182 	if (err < 0)
2183 		return err;
2184 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2185 				  snd_pcm_hw_rule_sample_bits, NULL,
2186 				  SNDRV_PCM_HW_PARAM_FORMAT,
2187 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2188 	if (err < 0)
2189 		return err;
2190 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2191 				  snd_pcm_hw_rule_div, NULL,
2192 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2193 	if (err < 0)
2194 		return err;
2195 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2196 				  snd_pcm_hw_rule_mul, NULL,
2197 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2198 	if (err < 0)
2199 		return err;
2200 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2201 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2202 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2203 	if (err < 0)
2204 		return err;
2205 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2206 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2207 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2208 	if (err < 0)
2209 		return err;
2210 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2211 				  snd_pcm_hw_rule_div, NULL,
2212 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2213 	if (err < 0)
2214 		return err;
2215 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2216 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2217 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2218 	if (err < 0)
2219 		return err;
2220 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2221 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2222 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2223 	if (err < 0)
2224 		return err;
2225 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2226 				  snd_pcm_hw_rule_div, NULL,
2227 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2228 	if (err < 0)
2229 		return err;
2230 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2231 				  snd_pcm_hw_rule_div, NULL,
2232 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2233 	if (err < 0)
2234 		return err;
2235 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2236 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2237 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2238 	if (err < 0)
2239 		return err;
2240 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2241 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2242 				  SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2243 	if (err < 0)
2244 		return err;
2245 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2246 				  snd_pcm_hw_rule_mul, NULL,
2247 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2248 	if (err < 0)
2249 		return err;
2250 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2251 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2252 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2253 	if (err < 0)
2254 		return err;
2255 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2256 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2257 				  SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2258 	if (err < 0)
2259 		return err;
2260 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2261 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2262 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2263 	if (err < 0)
2264 		return err;
2265 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2266 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2267 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2268 	if (err < 0)
2269 		return err;
2270 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2271 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2272 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2273 	if (err < 0)
2274 		return err;
2275 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2276 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2277 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2278 	if (err < 0)
2279 		return err;
2280 	return 0;
2281 }
2282 
2283 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2284 {
2285 	struct snd_pcm_runtime *runtime = substream->runtime;
2286 	struct snd_pcm_hardware *hw = &runtime->hw;
2287 	int err;
2288 	unsigned int mask = 0;
2289 
2290         if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2291 		mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2292         if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2293 		mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2294 	if (hw_support_mmap(substream)) {
2295 		if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2296 			mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2297 		if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2298 			mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2299 		if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2300 			mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2301 	}
2302 	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2303 	if (err < 0)
2304 		return err;
2305 
2306 	err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2307 	if (err < 0)
2308 		return err;
2309 
2310 	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2311 	if (err < 0)
2312 		return err;
2313 
2314 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2315 					   hw->channels_min, hw->channels_max);
2316 	if (err < 0)
2317 		return err;
2318 
2319 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2320 					   hw->rate_min, hw->rate_max);
2321 	if (err < 0)
2322 		return err;
2323 
2324 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2325 					   hw->period_bytes_min, hw->period_bytes_max);
2326 	if (err < 0)
2327 		return err;
2328 
2329 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2330 					   hw->periods_min, hw->periods_max);
2331 	if (err < 0)
2332 		return err;
2333 
2334 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2335 					   hw->period_bytes_min, hw->buffer_bytes_max);
2336 	if (err < 0)
2337 		return err;
2338 
2339 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2340 				  snd_pcm_hw_rule_buffer_bytes_max, substream,
2341 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2342 	if (err < 0)
2343 		return err;
2344 
2345 	/* FIXME: remove */
2346 	if (runtime->dma_bytes) {
2347 		err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2348 		if (err < 0)
2349 			return err;
2350 	}
2351 
2352 	if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2353 		err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2354 					  snd_pcm_hw_rule_rate, hw,
2355 					  SNDRV_PCM_HW_PARAM_RATE, -1);
2356 		if (err < 0)
2357 			return err;
2358 	}
2359 
2360 	/* FIXME: this belong to lowlevel */
2361 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2362 
2363 	return 0;
2364 }
2365 
2366 static void pcm_release_private(struct snd_pcm_substream *substream)
2367 {
2368 	if (snd_pcm_stream_linked(substream))
2369 		snd_pcm_unlink(substream);
2370 }
2371 
2372 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2373 {
2374 	substream->ref_count--;
2375 	if (substream->ref_count > 0)
2376 		return;
2377 
2378 	snd_pcm_drop(substream);
2379 	if (substream->hw_opened) {
2380 		if (substream->ops->hw_free &&
2381 		    substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2382 			substream->ops->hw_free(substream);
2383 		substream->ops->close(substream);
2384 		substream->hw_opened = 0;
2385 	}
2386 	if (pm_qos_request_active(&substream->latency_pm_qos_req))
2387 		pm_qos_remove_request(&substream->latency_pm_qos_req);
2388 	if (substream->pcm_release) {
2389 		substream->pcm_release(substream);
2390 		substream->pcm_release = NULL;
2391 	}
2392 	snd_pcm_detach_substream(substream);
2393 }
2394 EXPORT_SYMBOL(snd_pcm_release_substream);
2395 
2396 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2397 			   struct file *file,
2398 			   struct snd_pcm_substream **rsubstream)
2399 {
2400 	struct snd_pcm_substream *substream;
2401 	int err;
2402 
2403 	err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2404 	if (err < 0)
2405 		return err;
2406 	if (substream->ref_count > 1) {
2407 		*rsubstream = substream;
2408 		return 0;
2409 	}
2410 
2411 	err = snd_pcm_hw_constraints_init(substream);
2412 	if (err < 0) {
2413 		pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2414 		goto error;
2415 	}
2416 
2417 	if ((err = substream->ops->open(substream)) < 0)
2418 		goto error;
2419 
2420 	substream->hw_opened = 1;
2421 
2422 	err = snd_pcm_hw_constraints_complete(substream);
2423 	if (err < 0) {
2424 		pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2425 		goto error;
2426 	}
2427 
2428 	*rsubstream = substream;
2429 	return 0;
2430 
2431  error:
2432 	snd_pcm_release_substream(substream);
2433 	return err;
2434 }
2435 EXPORT_SYMBOL(snd_pcm_open_substream);
2436 
2437 static int snd_pcm_open_file(struct file *file,
2438 			     struct snd_pcm *pcm,
2439 			     int stream)
2440 {
2441 	struct snd_pcm_file *pcm_file;
2442 	struct snd_pcm_substream *substream;
2443 	int err;
2444 
2445 	err = snd_pcm_open_substream(pcm, stream, file, &substream);
2446 	if (err < 0)
2447 		return err;
2448 
2449 	pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2450 	if (pcm_file == NULL) {
2451 		snd_pcm_release_substream(substream);
2452 		return -ENOMEM;
2453 	}
2454 	pcm_file->substream = substream;
2455 	if (substream->ref_count == 1) {
2456 		substream->file = pcm_file;
2457 		substream->pcm_release = pcm_release_private;
2458 	}
2459 	file->private_data = pcm_file;
2460 
2461 	return 0;
2462 }
2463 
2464 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2465 {
2466 	struct snd_pcm *pcm;
2467 	int err = nonseekable_open(inode, file);
2468 	if (err < 0)
2469 		return err;
2470 	pcm = snd_lookup_minor_data(iminor(inode),
2471 				    SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2472 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2473 	if (pcm)
2474 		snd_card_unref(pcm->card);
2475 	return err;
2476 }
2477 
2478 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2479 {
2480 	struct snd_pcm *pcm;
2481 	int err = nonseekable_open(inode, file);
2482 	if (err < 0)
2483 		return err;
2484 	pcm = snd_lookup_minor_data(iminor(inode),
2485 				    SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2486 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2487 	if (pcm)
2488 		snd_card_unref(pcm->card);
2489 	return err;
2490 }
2491 
2492 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2493 {
2494 	int err;
2495 	wait_queue_entry_t wait;
2496 
2497 	if (pcm == NULL) {
2498 		err = -ENODEV;
2499 		goto __error1;
2500 	}
2501 	err = snd_card_file_add(pcm->card, file);
2502 	if (err < 0)
2503 		goto __error1;
2504 	if (!try_module_get(pcm->card->module)) {
2505 		err = -EFAULT;
2506 		goto __error2;
2507 	}
2508 	init_waitqueue_entry(&wait, current);
2509 	add_wait_queue(&pcm->open_wait, &wait);
2510 	mutex_lock(&pcm->open_mutex);
2511 	while (1) {
2512 		err = snd_pcm_open_file(file, pcm, stream);
2513 		if (err >= 0)
2514 			break;
2515 		if (err == -EAGAIN) {
2516 			if (file->f_flags & O_NONBLOCK) {
2517 				err = -EBUSY;
2518 				break;
2519 			}
2520 		} else
2521 			break;
2522 		set_current_state(TASK_INTERRUPTIBLE);
2523 		mutex_unlock(&pcm->open_mutex);
2524 		schedule();
2525 		mutex_lock(&pcm->open_mutex);
2526 		if (pcm->card->shutdown) {
2527 			err = -ENODEV;
2528 			break;
2529 		}
2530 		if (signal_pending(current)) {
2531 			err = -ERESTARTSYS;
2532 			break;
2533 		}
2534 	}
2535 	remove_wait_queue(&pcm->open_wait, &wait);
2536 	mutex_unlock(&pcm->open_mutex);
2537 	if (err < 0)
2538 		goto __error;
2539 	return err;
2540 
2541       __error:
2542 	module_put(pcm->card->module);
2543       __error2:
2544       	snd_card_file_remove(pcm->card, file);
2545       __error1:
2546       	return err;
2547 }
2548 
2549 static int snd_pcm_release(struct inode *inode, struct file *file)
2550 {
2551 	struct snd_pcm *pcm;
2552 	struct snd_pcm_substream *substream;
2553 	struct snd_pcm_file *pcm_file;
2554 
2555 	pcm_file = file->private_data;
2556 	substream = pcm_file->substream;
2557 	if (snd_BUG_ON(!substream))
2558 		return -ENXIO;
2559 	pcm = substream->pcm;
2560 	mutex_lock(&pcm->open_mutex);
2561 	snd_pcm_release_substream(substream);
2562 	kfree(pcm_file);
2563 	mutex_unlock(&pcm->open_mutex);
2564 	wake_up(&pcm->open_wait);
2565 	module_put(pcm->card->module);
2566 	snd_card_file_remove(pcm->card, file);
2567 	return 0;
2568 }
2569 
2570 /* check and update PCM state; return 0 or a negative error
2571  * call this inside PCM lock
2572  */
2573 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2574 {
2575 	switch (substream->runtime->status->state) {
2576 	case SNDRV_PCM_STATE_DRAINING:
2577 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2578 			return -EBADFD;
2579 		/* Fall through */
2580 	case SNDRV_PCM_STATE_RUNNING:
2581 		return snd_pcm_update_hw_ptr(substream);
2582 	case SNDRV_PCM_STATE_PREPARED:
2583 	case SNDRV_PCM_STATE_PAUSED:
2584 		return 0;
2585 	case SNDRV_PCM_STATE_SUSPENDED:
2586 		return -ESTRPIPE;
2587 	case SNDRV_PCM_STATE_XRUN:
2588 		return -EPIPE;
2589 	default:
2590 		return -EBADFD;
2591 	}
2592 }
2593 
2594 /* increase the appl_ptr; returns the processed frames or a negative error */
2595 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2596 					  snd_pcm_uframes_t frames,
2597 					   snd_pcm_sframes_t avail)
2598 {
2599 	struct snd_pcm_runtime *runtime = substream->runtime;
2600 	snd_pcm_sframes_t appl_ptr;
2601 	int ret;
2602 
2603 	if (avail <= 0)
2604 		return 0;
2605 	if (frames > (snd_pcm_uframes_t)avail)
2606 		frames = avail;
2607 	appl_ptr = runtime->control->appl_ptr + frames;
2608 	if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2609 		appl_ptr -= runtime->boundary;
2610 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2611 	return ret < 0 ? ret : frames;
2612 }
2613 
2614 /* decrease the appl_ptr; returns the processed frames or zero for error */
2615 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2616 					 snd_pcm_uframes_t frames,
2617 					 snd_pcm_sframes_t avail)
2618 {
2619 	struct snd_pcm_runtime *runtime = substream->runtime;
2620 	snd_pcm_sframes_t appl_ptr;
2621 	int ret;
2622 
2623 	if (avail <= 0)
2624 		return 0;
2625 	if (frames > (snd_pcm_uframes_t)avail)
2626 		frames = avail;
2627 	appl_ptr = runtime->control->appl_ptr - frames;
2628 	if (appl_ptr < 0)
2629 		appl_ptr += runtime->boundary;
2630 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2631 	/* NOTE: we return zero for errors because PulseAudio gets depressed
2632 	 * upon receiving an error from rewind ioctl and stops processing
2633 	 * any longer.  Returning zero means that no rewind is done, so
2634 	 * it's not absolutely wrong to answer like that.
2635 	 */
2636 	return ret < 0 ? 0 : frames;
2637 }
2638 
2639 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2640 					snd_pcm_uframes_t frames)
2641 {
2642 	snd_pcm_sframes_t ret;
2643 
2644 	if (frames == 0)
2645 		return 0;
2646 
2647 	snd_pcm_stream_lock_irq(substream);
2648 	ret = do_pcm_hwsync(substream);
2649 	if (!ret)
2650 		ret = rewind_appl_ptr(substream, frames,
2651 				      snd_pcm_hw_avail(substream));
2652 	snd_pcm_stream_unlock_irq(substream);
2653 	return ret;
2654 }
2655 
2656 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2657 					 snd_pcm_uframes_t frames)
2658 {
2659 	snd_pcm_sframes_t ret;
2660 
2661 	if (frames == 0)
2662 		return 0;
2663 
2664 	snd_pcm_stream_lock_irq(substream);
2665 	ret = do_pcm_hwsync(substream);
2666 	if (!ret)
2667 		ret = forward_appl_ptr(substream, frames,
2668 				       snd_pcm_avail(substream));
2669 	snd_pcm_stream_unlock_irq(substream);
2670 	return ret;
2671 }
2672 
2673 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2674 {
2675 	int err;
2676 
2677 	snd_pcm_stream_lock_irq(substream);
2678 	err = do_pcm_hwsync(substream);
2679 	snd_pcm_stream_unlock_irq(substream);
2680 	return err;
2681 }
2682 
2683 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2684 			 snd_pcm_sframes_t *delay)
2685 {
2686 	int err;
2687 	snd_pcm_sframes_t n = 0;
2688 
2689 	snd_pcm_stream_lock_irq(substream);
2690 	err = do_pcm_hwsync(substream);
2691 	if (!err)
2692 		n = snd_pcm_calc_delay(substream);
2693 	snd_pcm_stream_unlock_irq(substream);
2694 	if (!err)
2695 		*delay = n;
2696 	return err;
2697 }
2698 
2699 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2700 			    struct snd_pcm_sync_ptr __user *_sync_ptr)
2701 {
2702 	struct snd_pcm_runtime *runtime = substream->runtime;
2703 	struct snd_pcm_sync_ptr sync_ptr;
2704 	volatile struct snd_pcm_mmap_status *status;
2705 	volatile struct snd_pcm_mmap_control *control;
2706 	int err;
2707 
2708 	memset(&sync_ptr, 0, sizeof(sync_ptr));
2709 	if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2710 		return -EFAULT;
2711 	if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2712 		return -EFAULT;
2713 	status = runtime->status;
2714 	control = runtime->control;
2715 	if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2716 		err = snd_pcm_hwsync(substream);
2717 		if (err < 0)
2718 			return err;
2719 	}
2720 	snd_pcm_stream_lock_irq(substream);
2721 	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2722 		err = pcm_lib_apply_appl_ptr(substream,
2723 					     sync_ptr.c.control.appl_ptr);
2724 		if (err < 0) {
2725 			snd_pcm_stream_unlock_irq(substream);
2726 			return err;
2727 		}
2728 	} else {
2729 		sync_ptr.c.control.appl_ptr = control->appl_ptr;
2730 	}
2731 	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2732 		control->avail_min = sync_ptr.c.control.avail_min;
2733 	else
2734 		sync_ptr.c.control.avail_min = control->avail_min;
2735 	sync_ptr.s.status.state = status->state;
2736 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
2737 	sync_ptr.s.status.tstamp = status->tstamp;
2738 	sync_ptr.s.status.suspended_state = status->suspended_state;
2739 	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2740 	snd_pcm_stream_unlock_irq(substream);
2741 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2742 		return -EFAULT;
2743 	return 0;
2744 }
2745 
2746 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2747 {
2748 	struct snd_pcm_runtime *runtime = substream->runtime;
2749 	int arg;
2750 
2751 	if (get_user(arg, _arg))
2752 		return -EFAULT;
2753 	if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2754 		return -EINVAL;
2755 	runtime->tstamp_type = arg;
2756 	return 0;
2757 }
2758 
2759 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2760 				      struct snd_xferi __user *_xferi)
2761 {
2762 	struct snd_xferi xferi;
2763 	struct snd_pcm_runtime *runtime = substream->runtime;
2764 	snd_pcm_sframes_t result;
2765 
2766 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2767 		return -EBADFD;
2768 	if (put_user(0, &_xferi->result))
2769 		return -EFAULT;
2770 	if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2771 		return -EFAULT;
2772 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2773 		result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2774 	else
2775 		result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2776 	__put_user(result, &_xferi->result);
2777 	return result < 0 ? result : 0;
2778 }
2779 
2780 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2781 				      struct snd_xfern __user *_xfern)
2782 {
2783 	struct snd_xfern xfern;
2784 	struct snd_pcm_runtime *runtime = substream->runtime;
2785 	void *bufs;
2786 	snd_pcm_sframes_t result;
2787 
2788 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2789 		return -EBADFD;
2790 	if (runtime->channels > 128)
2791 		return -EINVAL;
2792 	if (put_user(0, &_xfern->result))
2793 		return -EFAULT;
2794 	if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2795 		return -EFAULT;
2796 
2797 	bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2798 	if (IS_ERR(bufs))
2799 		return PTR_ERR(bufs);
2800 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2801 		result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2802 	else
2803 		result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2804 	kfree(bufs);
2805 	__put_user(result, &_xfern->result);
2806 	return result < 0 ? result : 0;
2807 }
2808 
2809 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2810 				snd_pcm_uframes_t __user *_frames)
2811 {
2812 	snd_pcm_uframes_t frames;
2813 	snd_pcm_sframes_t result;
2814 
2815 	if (get_user(frames, _frames))
2816 		return -EFAULT;
2817 	if (put_user(0, _frames))
2818 		return -EFAULT;
2819 	result = snd_pcm_rewind(substream, frames);
2820 	__put_user(result, _frames);
2821 	return result < 0 ? result : 0;
2822 }
2823 
2824 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2825 				 snd_pcm_uframes_t __user *_frames)
2826 {
2827 	snd_pcm_uframes_t frames;
2828 	snd_pcm_sframes_t result;
2829 
2830 	if (get_user(frames, _frames))
2831 		return -EFAULT;
2832 	if (put_user(0, _frames))
2833 		return -EFAULT;
2834 	result = snd_pcm_forward(substream, frames);
2835 	__put_user(result, _frames);
2836 	return result < 0 ? result : 0;
2837 }
2838 
2839 static int snd_pcm_common_ioctl(struct file *file,
2840 				 struct snd_pcm_substream *substream,
2841 				 unsigned int cmd, void __user *arg)
2842 {
2843 	struct snd_pcm_file *pcm_file = file->private_data;
2844 	int res;
2845 
2846 	if (PCM_RUNTIME_CHECK(substream))
2847 		return -ENXIO;
2848 
2849 	res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2850 	if (res < 0)
2851 		return res;
2852 
2853 	switch (cmd) {
2854 	case SNDRV_PCM_IOCTL_PVERSION:
2855 		return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2856 	case SNDRV_PCM_IOCTL_INFO:
2857 		return snd_pcm_info_user(substream, arg);
2858 	case SNDRV_PCM_IOCTL_TSTAMP:	/* just for compatibility */
2859 		return 0;
2860 	case SNDRV_PCM_IOCTL_TTSTAMP:
2861 		return snd_pcm_tstamp(substream, arg);
2862 	case SNDRV_PCM_IOCTL_USER_PVERSION:
2863 		if (get_user(pcm_file->user_pversion,
2864 			     (unsigned int __user *)arg))
2865 			return -EFAULT;
2866 		return 0;
2867 	case SNDRV_PCM_IOCTL_HW_REFINE:
2868 		return snd_pcm_hw_refine_user(substream, arg);
2869 	case SNDRV_PCM_IOCTL_HW_PARAMS:
2870 		return snd_pcm_hw_params_user(substream, arg);
2871 	case SNDRV_PCM_IOCTL_HW_FREE:
2872 		return snd_pcm_hw_free(substream);
2873 	case SNDRV_PCM_IOCTL_SW_PARAMS:
2874 		return snd_pcm_sw_params_user(substream, arg);
2875 	case SNDRV_PCM_IOCTL_STATUS:
2876 		return snd_pcm_status_user(substream, arg, false);
2877 	case SNDRV_PCM_IOCTL_STATUS_EXT:
2878 		return snd_pcm_status_user(substream, arg, true);
2879 	case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2880 		return snd_pcm_channel_info_user(substream, arg);
2881 	case SNDRV_PCM_IOCTL_PREPARE:
2882 		return snd_pcm_prepare(substream, file);
2883 	case SNDRV_PCM_IOCTL_RESET:
2884 		return snd_pcm_reset(substream);
2885 	case SNDRV_PCM_IOCTL_START:
2886 		return snd_pcm_start_lock_irq(substream);
2887 	case SNDRV_PCM_IOCTL_LINK:
2888 		return snd_pcm_link(substream, (int)(unsigned long) arg);
2889 	case SNDRV_PCM_IOCTL_UNLINK:
2890 		return snd_pcm_unlink(substream);
2891 	case SNDRV_PCM_IOCTL_RESUME:
2892 		return snd_pcm_resume(substream);
2893 	case SNDRV_PCM_IOCTL_XRUN:
2894 		return snd_pcm_xrun(substream);
2895 	case SNDRV_PCM_IOCTL_HWSYNC:
2896 		return snd_pcm_hwsync(substream);
2897 	case SNDRV_PCM_IOCTL_DELAY:
2898 	{
2899 		snd_pcm_sframes_t delay;
2900 		snd_pcm_sframes_t __user *res = arg;
2901 		int err;
2902 
2903 		err = snd_pcm_delay(substream, &delay);
2904 		if (err)
2905 			return err;
2906 		if (put_user(delay, res))
2907 			return -EFAULT;
2908 		return 0;
2909 	}
2910 	case SNDRV_PCM_IOCTL_SYNC_PTR:
2911 		return snd_pcm_sync_ptr(substream, arg);
2912 #ifdef CONFIG_SND_SUPPORT_OLD_API
2913 	case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2914 		return snd_pcm_hw_refine_old_user(substream, arg);
2915 	case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2916 		return snd_pcm_hw_params_old_user(substream, arg);
2917 #endif
2918 	case SNDRV_PCM_IOCTL_DRAIN:
2919 		return snd_pcm_drain(substream, file);
2920 	case SNDRV_PCM_IOCTL_DROP:
2921 		return snd_pcm_drop(substream);
2922 	case SNDRV_PCM_IOCTL_PAUSE:
2923 		return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2924 					       substream,
2925 					       (int)(unsigned long)arg);
2926 	case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2927 	case SNDRV_PCM_IOCTL_READI_FRAMES:
2928 		return snd_pcm_xferi_frames_ioctl(substream, arg);
2929 	case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2930 	case SNDRV_PCM_IOCTL_READN_FRAMES:
2931 		return snd_pcm_xfern_frames_ioctl(substream, arg);
2932 	case SNDRV_PCM_IOCTL_REWIND:
2933 		return snd_pcm_rewind_ioctl(substream, arg);
2934 	case SNDRV_PCM_IOCTL_FORWARD:
2935 		return snd_pcm_forward_ioctl(substream, arg);
2936 	}
2937 	pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2938 	return -ENOTTY;
2939 }
2940 
2941 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
2942 			  unsigned long arg)
2943 {
2944 	struct snd_pcm_file *pcm_file;
2945 
2946 	pcm_file = file->private_data;
2947 
2948 	if (((cmd >> 8) & 0xff) != 'A')
2949 		return -ENOTTY;
2950 
2951 	return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
2952 				     (void __user *)arg);
2953 }
2954 
2955 /**
2956  * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
2957  * @substream: PCM substream
2958  * @cmd: IOCTL cmd
2959  * @arg: IOCTL argument
2960  *
2961  * The function is provided primarily for OSS layer and USB gadget drivers,
2962  * and it allows only the limited set of ioctls (hw_params, sw_params,
2963  * prepare, start, drain, drop, forward).
2964  */
2965 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
2966 			 unsigned int cmd, void *arg)
2967 {
2968 	snd_pcm_uframes_t *frames = arg;
2969 	snd_pcm_sframes_t result;
2970 
2971 	switch (cmd) {
2972 	case SNDRV_PCM_IOCTL_FORWARD:
2973 	{
2974 		/* provided only for OSS; capture-only and no value returned */
2975 		if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
2976 			return -EINVAL;
2977 		result = snd_pcm_forward(substream, *frames);
2978 		return result < 0 ? result : 0;
2979 	}
2980 	case SNDRV_PCM_IOCTL_HW_PARAMS:
2981 		return snd_pcm_hw_params(substream, arg);
2982 	case SNDRV_PCM_IOCTL_SW_PARAMS:
2983 		return snd_pcm_sw_params(substream, arg);
2984 	case SNDRV_PCM_IOCTL_PREPARE:
2985 		return snd_pcm_prepare(substream, NULL);
2986 	case SNDRV_PCM_IOCTL_START:
2987 		return snd_pcm_start_lock_irq(substream);
2988 	case SNDRV_PCM_IOCTL_DRAIN:
2989 		return snd_pcm_drain(substream, NULL);
2990 	case SNDRV_PCM_IOCTL_DROP:
2991 		return snd_pcm_drop(substream);
2992 	case SNDRV_PCM_IOCTL_DELAY:
2993 		return snd_pcm_delay(substream, frames);
2994 	default:
2995 		return -EINVAL;
2996 	}
2997 }
2998 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
2999 
3000 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3001 			    loff_t * offset)
3002 {
3003 	struct snd_pcm_file *pcm_file;
3004 	struct snd_pcm_substream *substream;
3005 	struct snd_pcm_runtime *runtime;
3006 	snd_pcm_sframes_t result;
3007 
3008 	pcm_file = file->private_data;
3009 	substream = pcm_file->substream;
3010 	if (PCM_RUNTIME_CHECK(substream))
3011 		return -ENXIO;
3012 	runtime = substream->runtime;
3013 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3014 		return -EBADFD;
3015 	if (!frame_aligned(runtime, count))
3016 		return -EINVAL;
3017 	count = bytes_to_frames(runtime, count);
3018 	result = snd_pcm_lib_read(substream, buf, count);
3019 	if (result > 0)
3020 		result = frames_to_bytes(runtime, result);
3021 	return result;
3022 }
3023 
3024 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3025 			     size_t count, loff_t * offset)
3026 {
3027 	struct snd_pcm_file *pcm_file;
3028 	struct snd_pcm_substream *substream;
3029 	struct snd_pcm_runtime *runtime;
3030 	snd_pcm_sframes_t result;
3031 
3032 	pcm_file = file->private_data;
3033 	substream = pcm_file->substream;
3034 	if (PCM_RUNTIME_CHECK(substream))
3035 		return -ENXIO;
3036 	runtime = substream->runtime;
3037 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3038 		return -EBADFD;
3039 	if (!frame_aligned(runtime, count))
3040 		return -EINVAL;
3041 	count = bytes_to_frames(runtime, count);
3042 	result = snd_pcm_lib_write(substream, buf, count);
3043 	if (result > 0)
3044 		result = frames_to_bytes(runtime, result);
3045 	return result;
3046 }
3047 
3048 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3049 {
3050 	struct snd_pcm_file *pcm_file;
3051 	struct snd_pcm_substream *substream;
3052 	struct snd_pcm_runtime *runtime;
3053 	snd_pcm_sframes_t result;
3054 	unsigned long i;
3055 	void __user **bufs;
3056 	snd_pcm_uframes_t frames;
3057 
3058 	pcm_file = iocb->ki_filp->private_data;
3059 	substream = pcm_file->substream;
3060 	if (PCM_RUNTIME_CHECK(substream))
3061 		return -ENXIO;
3062 	runtime = substream->runtime;
3063 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3064 		return -EBADFD;
3065 	if (!iter_is_iovec(to))
3066 		return -EINVAL;
3067 	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3068 		return -EINVAL;
3069 	if (!frame_aligned(runtime, to->iov->iov_len))
3070 		return -EINVAL;
3071 	frames = bytes_to_samples(runtime, to->iov->iov_len);
3072 	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3073 	if (bufs == NULL)
3074 		return -ENOMEM;
3075 	for (i = 0; i < to->nr_segs; ++i)
3076 		bufs[i] = to->iov[i].iov_base;
3077 	result = snd_pcm_lib_readv(substream, bufs, frames);
3078 	if (result > 0)
3079 		result = frames_to_bytes(runtime, result);
3080 	kfree(bufs);
3081 	return result;
3082 }
3083 
3084 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3085 {
3086 	struct snd_pcm_file *pcm_file;
3087 	struct snd_pcm_substream *substream;
3088 	struct snd_pcm_runtime *runtime;
3089 	snd_pcm_sframes_t result;
3090 	unsigned long i;
3091 	void __user **bufs;
3092 	snd_pcm_uframes_t frames;
3093 
3094 	pcm_file = iocb->ki_filp->private_data;
3095 	substream = pcm_file->substream;
3096 	if (PCM_RUNTIME_CHECK(substream))
3097 		return -ENXIO;
3098 	runtime = substream->runtime;
3099 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3100 		return -EBADFD;
3101 	if (!iter_is_iovec(from))
3102 		return -EINVAL;
3103 	if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3104 	    !frame_aligned(runtime, from->iov->iov_len))
3105 		return -EINVAL;
3106 	frames = bytes_to_samples(runtime, from->iov->iov_len);
3107 	bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3108 	if (bufs == NULL)
3109 		return -ENOMEM;
3110 	for (i = 0; i < from->nr_segs; ++i)
3111 		bufs[i] = from->iov[i].iov_base;
3112 	result = snd_pcm_lib_writev(substream, bufs, frames);
3113 	if (result > 0)
3114 		result = frames_to_bytes(runtime, result);
3115 	kfree(bufs);
3116 	return result;
3117 }
3118 
3119 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3120 {
3121 	struct snd_pcm_file *pcm_file;
3122 	struct snd_pcm_substream *substream;
3123 	struct snd_pcm_runtime *runtime;
3124 	__poll_t mask, ok;
3125 	snd_pcm_uframes_t avail;
3126 
3127 	pcm_file = file->private_data;
3128 
3129 	substream = pcm_file->substream;
3130 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3131 		ok = EPOLLOUT | EPOLLWRNORM;
3132 	else
3133 		ok = EPOLLIN | EPOLLRDNORM;
3134 	if (PCM_RUNTIME_CHECK(substream))
3135 		return ok | EPOLLERR;
3136 
3137 	runtime = substream->runtime;
3138 	poll_wait(file, &runtime->sleep, wait);
3139 
3140 	mask = 0;
3141 	snd_pcm_stream_lock_irq(substream);
3142 	avail = snd_pcm_avail(substream);
3143 	switch (runtime->status->state) {
3144 	case SNDRV_PCM_STATE_RUNNING:
3145 	case SNDRV_PCM_STATE_PREPARED:
3146 	case SNDRV_PCM_STATE_PAUSED:
3147 		if (avail >= runtime->control->avail_min)
3148 			mask = ok;
3149 		break;
3150 	case SNDRV_PCM_STATE_DRAINING:
3151 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3152 			mask = ok;
3153 			if (!avail)
3154 				mask |= EPOLLERR;
3155 		}
3156 		break;
3157 	default:
3158 		mask = ok | EPOLLERR;
3159 		break;
3160 	}
3161 	snd_pcm_stream_unlock_irq(substream);
3162 	return mask;
3163 }
3164 
3165 /*
3166  * mmap support
3167  */
3168 
3169 /*
3170  * Only on coherent architectures, we can mmap the status and the control records
3171  * for effcient data transfer.  On others, we have to use HWSYNC ioctl...
3172  */
3173 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3174 /*
3175  * mmap status record
3176  */
3177 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3178 {
3179 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3180 	struct snd_pcm_runtime *runtime;
3181 
3182 	if (substream == NULL)
3183 		return VM_FAULT_SIGBUS;
3184 	runtime = substream->runtime;
3185 	vmf->page = virt_to_page(runtime->status);
3186 	get_page(vmf->page);
3187 	return 0;
3188 }
3189 
3190 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3191 {
3192 	.fault =	snd_pcm_mmap_status_fault,
3193 };
3194 
3195 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3196 			       struct vm_area_struct *area)
3197 {
3198 	long size;
3199 	if (!(area->vm_flags & VM_READ))
3200 		return -EINVAL;
3201 	size = area->vm_end - area->vm_start;
3202 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3203 		return -EINVAL;
3204 	area->vm_ops = &snd_pcm_vm_ops_status;
3205 	area->vm_private_data = substream;
3206 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3207 	return 0;
3208 }
3209 
3210 /*
3211  * mmap control record
3212  */
3213 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3214 {
3215 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3216 	struct snd_pcm_runtime *runtime;
3217 
3218 	if (substream == NULL)
3219 		return VM_FAULT_SIGBUS;
3220 	runtime = substream->runtime;
3221 	vmf->page = virt_to_page(runtime->control);
3222 	get_page(vmf->page);
3223 	return 0;
3224 }
3225 
3226 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3227 {
3228 	.fault =	snd_pcm_mmap_control_fault,
3229 };
3230 
3231 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3232 				struct vm_area_struct *area)
3233 {
3234 	long size;
3235 	if (!(area->vm_flags & VM_READ))
3236 		return -EINVAL;
3237 	size = area->vm_end - area->vm_start;
3238 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3239 		return -EINVAL;
3240 	area->vm_ops = &snd_pcm_vm_ops_control;
3241 	area->vm_private_data = substream;
3242 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3243 	return 0;
3244 }
3245 
3246 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3247 {
3248 	if (pcm_file->no_compat_mmap)
3249 		return false;
3250 	/* See pcm_control_mmap_allowed() below.
3251 	 * Since older alsa-lib requires both status and control mmaps to be
3252 	 * coupled, we have to disable the status mmap for old alsa-lib, too.
3253 	 */
3254 	if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3255 	    (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3256 		return false;
3257 	return true;
3258 }
3259 
3260 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3261 {
3262 	if (pcm_file->no_compat_mmap)
3263 		return false;
3264 	/* Disallow the control mmap when SYNC_APPLPTR flag is set;
3265 	 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3266 	 * thus it effectively assures the manual update of appl_ptr.
3267 	 */
3268 	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3269 		return false;
3270 	return true;
3271 }
3272 
3273 #else /* ! coherent mmap */
3274 /*
3275  * don't support mmap for status and control records.
3276  */
3277 #define pcm_status_mmap_allowed(pcm_file)	false
3278 #define pcm_control_mmap_allowed(pcm_file)	false
3279 
3280 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3281 			       struct vm_area_struct *area)
3282 {
3283 	return -ENXIO;
3284 }
3285 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3286 				struct vm_area_struct *area)
3287 {
3288 	return -ENXIO;
3289 }
3290 #endif /* coherent mmap */
3291 
3292 static inline struct page *
3293 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3294 {
3295 	void *vaddr = substream->runtime->dma_area + ofs;
3296 	return virt_to_page(vaddr);
3297 }
3298 
3299 /*
3300  * fault callback for mmapping a RAM page
3301  */
3302 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3303 {
3304 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3305 	struct snd_pcm_runtime *runtime;
3306 	unsigned long offset;
3307 	struct page * page;
3308 	size_t dma_bytes;
3309 
3310 	if (substream == NULL)
3311 		return VM_FAULT_SIGBUS;
3312 	runtime = substream->runtime;
3313 	offset = vmf->pgoff << PAGE_SHIFT;
3314 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3315 	if (offset > dma_bytes - PAGE_SIZE)
3316 		return VM_FAULT_SIGBUS;
3317 	if (substream->ops->page)
3318 		page = substream->ops->page(substream, offset);
3319 	else
3320 		page = snd_pcm_default_page_ops(substream, offset);
3321 	if (!page)
3322 		return VM_FAULT_SIGBUS;
3323 	get_page(page);
3324 	vmf->page = page;
3325 	return 0;
3326 }
3327 
3328 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3329 	.open =		snd_pcm_mmap_data_open,
3330 	.close =	snd_pcm_mmap_data_close,
3331 };
3332 
3333 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3334 	.open =		snd_pcm_mmap_data_open,
3335 	.close =	snd_pcm_mmap_data_close,
3336 	.fault =	snd_pcm_mmap_data_fault,
3337 };
3338 
3339 /*
3340  * mmap the DMA buffer on RAM
3341  */
3342 
3343 /**
3344  * snd_pcm_lib_default_mmap - Default PCM data mmap function
3345  * @substream: PCM substream
3346  * @area: VMA
3347  *
3348  * This is the default mmap handler for PCM data.  When mmap pcm_ops is NULL,
3349  * this function is invoked implicitly.
3350  */
3351 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3352 			     struct vm_area_struct *area)
3353 {
3354 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3355 #ifdef CONFIG_GENERIC_ALLOCATOR
3356 	if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3357 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3358 		return remap_pfn_range(area, area->vm_start,
3359 				substream->dma_buffer.addr >> PAGE_SHIFT,
3360 				area->vm_end - area->vm_start, area->vm_page_prot);
3361 	}
3362 #endif /* CONFIG_GENERIC_ALLOCATOR */
3363 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3364 	if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3365 	    substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3366 		return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3367 					 area,
3368 					 substream->runtime->dma_area,
3369 					 substream->runtime->dma_addr,
3370 					 substream->runtime->dma_bytes);
3371 #endif /* CONFIG_X86 */
3372 	/* mmap with fault handler */
3373 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
3374 	return 0;
3375 }
3376 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3377 
3378 /*
3379  * mmap the DMA buffer on I/O memory area
3380  */
3381 #if SNDRV_PCM_INFO_MMAP_IOMEM
3382 /**
3383  * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3384  * @substream: PCM substream
3385  * @area: VMA
3386  *
3387  * When your hardware uses the iomapped pages as the hardware buffer and
3388  * wants to mmap it, pass this function as mmap pcm_ops.  Note that this
3389  * is supposed to work only on limited architectures.
3390  */
3391 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3392 			   struct vm_area_struct *area)
3393 {
3394 	struct snd_pcm_runtime *runtime = substream->runtime;
3395 
3396 	area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3397 	return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3398 }
3399 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3400 #endif /* SNDRV_PCM_INFO_MMAP */
3401 
3402 /*
3403  * mmap DMA buffer
3404  */
3405 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3406 		      struct vm_area_struct *area)
3407 {
3408 	struct snd_pcm_runtime *runtime;
3409 	long size;
3410 	unsigned long offset;
3411 	size_t dma_bytes;
3412 	int err;
3413 
3414 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3415 		if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3416 			return -EINVAL;
3417 	} else {
3418 		if (!(area->vm_flags & VM_READ))
3419 			return -EINVAL;
3420 	}
3421 	runtime = substream->runtime;
3422 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3423 		return -EBADFD;
3424 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3425 		return -ENXIO;
3426 	if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3427 	    runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3428 		return -EINVAL;
3429 	size = area->vm_end - area->vm_start;
3430 	offset = area->vm_pgoff << PAGE_SHIFT;
3431 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3432 	if ((size_t)size > dma_bytes)
3433 		return -EINVAL;
3434 	if (offset > dma_bytes - size)
3435 		return -EINVAL;
3436 
3437 	area->vm_ops = &snd_pcm_vm_ops_data;
3438 	area->vm_private_data = substream;
3439 	if (substream->ops->mmap)
3440 		err = substream->ops->mmap(substream, area);
3441 	else
3442 		err = snd_pcm_lib_default_mmap(substream, area);
3443 	if (!err)
3444 		atomic_inc(&substream->mmap_count);
3445 	return err;
3446 }
3447 EXPORT_SYMBOL(snd_pcm_mmap_data);
3448 
3449 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3450 {
3451 	struct snd_pcm_file * pcm_file;
3452 	struct snd_pcm_substream *substream;
3453 	unsigned long offset;
3454 
3455 	pcm_file = file->private_data;
3456 	substream = pcm_file->substream;
3457 	if (PCM_RUNTIME_CHECK(substream))
3458 		return -ENXIO;
3459 
3460 	offset = area->vm_pgoff << PAGE_SHIFT;
3461 	switch (offset) {
3462 	case SNDRV_PCM_MMAP_OFFSET_STATUS:
3463 		if (!pcm_status_mmap_allowed(pcm_file))
3464 			return -ENXIO;
3465 		return snd_pcm_mmap_status(substream, file, area);
3466 	case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3467 		if (!pcm_control_mmap_allowed(pcm_file))
3468 			return -ENXIO;
3469 		return snd_pcm_mmap_control(substream, file, area);
3470 	default:
3471 		return snd_pcm_mmap_data(substream, file, area);
3472 	}
3473 	return 0;
3474 }
3475 
3476 static int snd_pcm_fasync(int fd, struct file * file, int on)
3477 {
3478 	struct snd_pcm_file * pcm_file;
3479 	struct snd_pcm_substream *substream;
3480 	struct snd_pcm_runtime *runtime;
3481 
3482 	pcm_file = file->private_data;
3483 	substream = pcm_file->substream;
3484 	if (PCM_RUNTIME_CHECK(substream))
3485 		return -ENXIO;
3486 	runtime = substream->runtime;
3487 	return fasync_helper(fd, file, on, &runtime->fasync);
3488 }
3489 
3490 /*
3491  * ioctl32 compat
3492  */
3493 #ifdef CONFIG_COMPAT
3494 #include "pcm_compat.c"
3495 #else
3496 #define snd_pcm_ioctl_compat	NULL
3497 #endif
3498 
3499 /*
3500  *  To be removed helpers to keep binary compatibility
3501  */
3502 
3503 #ifdef CONFIG_SND_SUPPORT_OLD_API
3504 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3505 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3506 
3507 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3508 					       struct snd_pcm_hw_params_old *oparams)
3509 {
3510 	unsigned int i;
3511 
3512 	memset(params, 0, sizeof(*params));
3513 	params->flags = oparams->flags;
3514 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3515 		params->masks[i].bits[0] = oparams->masks[i];
3516 	memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3517 	params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3518 	params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3519 	params->info = oparams->info;
3520 	params->msbits = oparams->msbits;
3521 	params->rate_num = oparams->rate_num;
3522 	params->rate_den = oparams->rate_den;
3523 	params->fifo_size = oparams->fifo_size;
3524 }
3525 
3526 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3527 					     struct snd_pcm_hw_params *params)
3528 {
3529 	unsigned int i;
3530 
3531 	memset(oparams, 0, sizeof(*oparams));
3532 	oparams->flags = params->flags;
3533 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3534 		oparams->masks[i] = params->masks[i].bits[0];
3535 	memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3536 	oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3537 	oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3538 	oparams->info = params->info;
3539 	oparams->msbits = params->msbits;
3540 	oparams->rate_num = params->rate_num;
3541 	oparams->rate_den = params->rate_den;
3542 	oparams->fifo_size = params->fifo_size;
3543 }
3544 
3545 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3546 				      struct snd_pcm_hw_params_old __user * _oparams)
3547 {
3548 	struct snd_pcm_hw_params *params;
3549 	struct snd_pcm_hw_params_old *oparams = NULL;
3550 	int err;
3551 
3552 	params = kmalloc(sizeof(*params), GFP_KERNEL);
3553 	if (!params)
3554 		return -ENOMEM;
3555 
3556 	oparams = memdup_user(_oparams, sizeof(*oparams));
3557 	if (IS_ERR(oparams)) {
3558 		err = PTR_ERR(oparams);
3559 		goto out;
3560 	}
3561 	snd_pcm_hw_convert_from_old_params(params, oparams);
3562 	err = snd_pcm_hw_refine(substream, params);
3563 	if (err < 0)
3564 		goto out_old;
3565 
3566 	err = fixup_unreferenced_params(substream, params);
3567 	if (err < 0)
3568 		goto out_old;
3569 
3570 	snd_pcm_hw_convert_to_old_params(oparams, params);
3571 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3572 		err = -EFAULT;
3573 out_old:
3574 	kfree(oparams);
3575 out:
3576 	kfree(params);
3577 	return err;
3578 }
3579 
3580 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3581 				      struct snd_pcm_hw_params_old __user * _oparams)
3582 {
3583 	struct snd_pcm_hw_params *params;
3584 	struct snd_pcm_hw_params_old *oparams = NULL;
3585 	int err;
3586 
3587 	params = kmalloc(sizeof(*params), GFP_KERNEL);
3588 	if (!params)
3589 		return -ENOMEM;
3590 
3591 	oparams = memdup_user(_oparams, sizeof(*oparams));
3592 	if (IS_ERR(oparams)) {
3593 		err = PTR_ERR(oparams);
3594 		goto out;
3595 	}
3596 
3597 	snd_pcm_hw_convert_from_old_params(params, oparams);
3598 	err = snd_pcm_hw_params(substream, params);
3599 	if (err < 0)
3600 		goto out_old;
3601 
3602 	snd_pcm_hw_convert_to_old_params(oparams, params);
3603 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3604 		err = -EFAULT;
3605 out_old:
3606 	kfree(oparams);
3607 out:
3608 	kfree(params);
3609 	return err;
3610 }
3611 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3612 
3613 #ifndef CONFIG_MMU
3614 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3615 					       unsigned long addr,
3616 					       unsigned long len,
3617 					       unsigned long pgoff,
3618 					       unsigned long flags)
3619 {
3620 	struct snd_pcm_file *pcm_file = file->private_data;
3621 	struct snd_pcm_substream *substream = pcm_file->substream;
3622 	struct snd_pcm_runtime *runtime = substream->runtime;
3623 	unsigned long offset = pgoff << PAGE_SHIFT;
3624 
3625 	switch (offset) {
3626 	case SNDRV_PCM_MMAP_OFFSET_STATUS:
3627 		return (unsigned long)runtime->status;
3628 	case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3629 		return (unsigned long)runtime->control;
3630 	default:
3631 		return (unsigned long)runtime->dma_area + offset;
3632 	}
3633 }
3634 #else
3635 # define snd_pcm_get_unmapped_area NULL
3636 #endif
3637 
3638 /*
3639  *  Register section
3640  */
3641 
3642 const struct file_operations snd_pcm_f_ops[2] = {
3643 	{
3644 		.owner =		THIS_MODULE,
3645 		.write =		snd_pcm_write,
3646 		.write_iter =		snd_pcm_writev,
3647 		.open =			snd_pcm_playback_open,
3648 		.release =		snd_pcm_release,
3649 		.llseek =		no_llseek,
3650 		.poll =			snd_pcm_poll,
3651 		.unlocked_ioctl =	snd_pcm_ioctl,
3652 		.compat_ioctl = 	snd_pcm_ioctl_compat,
3653 		.mmap =			snd_pcm_mmap,
3654 		.fasync =		snd_pcm_fasync,
3655 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
3656 	},
3657 	{
3658 		.owner =		THIS_MODULE,
3659 		.read =			snd_pcm_read,
3660 		.read_iter =		snd_pcm_readv,
3661 		.open =			snd_pcm_capture_open,
3662 		.release =		snd_pcm_release,
3663 		.llseek =		no_llseek,
3664 		.poll =			snd_pcm_poll,
3665 		.unlocked_ioctl =	snd_pcm_ioctl,
3666 		.compat_ioctl = 	snd_pcm_ioctl_compat,
3667 		.mmap =			snd_pcm_mmap,
3668 		.fasync =		snd_pcm_fasync,
3669 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
3670 	}
3671 };
3672