1 /* 2 * Digital Audio (PCM) abstract layer 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 4 * 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 */ 21 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/file.h> 25 #include <linux/slab.h> 26 #include <linux/sched/signal.h> 27 #include <linux/time.h> 28 #include <linux/pm_qos.h> 29 #include <linux/io.h> 30 #include <linux/dma-mapping.h> 31 #include <sound/core.h> 32 #include <sound/control.h> 33 #include <sound/info.h> 34 #include <sound/pcm.h> 35 #include <sound/pcm_params.h> 36 #include <sound/timer.h> 37 #include <sound/minors.h> 38 #include <linux/uio.h> 39 40 #include "pcm_local.h" 41 42 #ifdef CONFIG_SND_DEBUG 43 #define CREATE_TRACE_POINTS 44 #include "pcm_param_trace.h" 45 #else 46 #define trace_hw_mask_param_enabled() 0 47 #define trace_hw_interval_param_enabled() 0 48 #define trace_hw_mask_param(substream, type, index, prev, curr) 49 #define trace_hw_interval_param(substream, type, index, prev, curr) 50 #endif 51 52 /* 53 * Compatibility 54 */ 55 56 struct snd_pcm_hw_params_old { 57 unsigned int flags; 58 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT - 59 SNDRV_PCM_HW_PARAM_ACCESS + 1]; 60 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME - 61 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1]; 62 unsigned int rmask; 63 unsigned int cmask; 64 unsigned int info; 65 unsigned int msbits; 66 unsigned int rate_num; 67 unsigned int rate_den; 68 snd_pcm_uframes_t fifo_size; 69 unsigned char reserved[64]; 70 }; 71 72 #ifdef CONFIG_SND_SUPPORT_OLD_API 73 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old) 74 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old) 75 76 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, 77 struct snd_pcm_hw_params_old __user * _oparams); 78 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, 79 struct snd_pcm_hw_params_old __user * _oparams); 80 #endif 81 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); 82 83 /* 84 * 85 */ 86 87 static DEFINE_RWLOCK(snd_pcm_link_rwlock); 88 static DECLARE_RWSEM(snd_pcm_link_rwsem); 89 90 /* Writer in rwsem may block readers even during its waiting in queue, 91 * and this may lead to a deadlock when the code path takes read sem 92 * twice (e.g. one in snd_pcm_action_nonatomic() and another in 93 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to 94 * spin until it gets the lock. 95 */ 96 static inline void down_write_nonblock(struct rw_semaphore *lock) 97 { 98 while (!down_write_trylock(lock)) 99 cond_resched(); 100 } 101 102 #define PCM_LOCK_DEFAULT 0 103 #define PCM_LOCK_IRQ 1 104 #define PCM_LOCK_IRQSAVE 2 105 106 static unsigned long __snd_pcm_stream_lock_mode(struct snd_pcm_substream *substream, 107 unsigned int mode) 108 { 109 unsigned long flags = 0; 110 if (substream->pcm->nonatomic) { 111 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING); 112 mutex_lock(&substream->self_group.mutex); 113 } else { 114 switch (mode) { 115 case PCM_LOCK_DEFAULT: 116 read_lock(&snd_pcm_link_rwlock); 117 break; 118 case PCM_LOCK_IRQ: 119 read_lock_irq(&snd_pcm_link_rwlock); 120 break; 121 case PCM_LOCK_IRQSAVE: 122 read_lock_irqsave(&snd_pcm_link_rwlock, flags); 123 break; 124 } 125 spin_lock(&substream->self_group.lock); 126 } 127 return flags; 128 } 129 130 static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream *substream, 131 unsigned int mode, unsigned long flags) 132 { 133 if (substream->pcm->nonatomic) { 134 mutex_unlock(&substream->self_group.mutex); 135 up_read(&snd_pcm_link_rwsem); 136 } else { 137 spin_unlock(&substream->self_group.lock); 138 139 switch (mode) { 140 case PCM_LOCK_DEFAULT: 141 read_unlock(&snd_pcm_link_rwlock); 142 break; 143 case PCM_LOCK_IRQ: 144 read_unlock_irq(&snd_pcm_link_rwlock); 145 break; 146 case PCM_LOCK_IRQSAVE: 147 read_unlock_irqrestore(&snd_pcm_link_rwlock, flags); 148 break; 149 } 150 } 151 } 152 153 /** 154 * snd_pcm_stream_lock - Lock the PCM stream 155 * @substream: PCM substream 156 * 157 * This locks the PCM stream's spinlock or mutex depending on the nonatomic 158 * flag of the given substream. This also takes the global link rw lock 159 * (or rw sem), too, for avoiding the race with linked streams. 160 */ 161 void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 162 { 163 __snd_pcm_stream_lock_mode(substream, PCM_LOCK_DEFAULT); 164 } 165 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock); 166 167 /** 168 * snd_pcm_stream_lock - Unlock the PCM stream 169 * @substream: PCM substream 170 * 171 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock(). 172 */ 173 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) 174 { 175 __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_DEFAULT, 0); 176 } 177 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock); 178 179 /** 180 * snd_pcm_stream_lock_irq - Lock the PCM stream 181 * @substream: PCM substream 182 * 183 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local 184 * IRQ (only when nonatomic is false). In nonatomic case, this is identical 185 * as snd_pcm_stream_lock(). 186 */ 187 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) 188 { 189 __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQ); 190 } 191 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); 192 193 /** 194 * snd_pcm_stream_unlock_irq - Unlock the PCM stream 195 * @substream: PCM substream 196 * 197 * This is a counter-part of snd_pcm_stream_lock_irq(). 198 */ 199 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) 200 { 201 __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQ, 0); 202 } 203 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); 204 205 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) 206 { 207 return __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQSAVE); 208 } 209 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave); 210 211 /** 212 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream 213 * @substream: PCM substream 214 * @flags: irq flags 215 * 216 * This is a counter-part of snd_pcm_stream_lock_irqsave(). 217 */ 218 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, 219 unsigned long flags) 220 { 221 __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQSAVE, flags); 222 } 223 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); 224 225 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) 226 { 227 struct snd_pcm *pcm = substream->pcm; 228 struct snd_pcm_str *pstr = substream->pstr; 229 230 memset(info, 0, sizeof(*info)); 231 info->card = pcm->card->number; 232 info->device = pcm->device; 233 info->stream = substream->stream; 234 info->subdevice = substream->number; 235 strlcpy(info->id, pcm->id, sizeof(info->id)); 236 strlcpy(info->name, pcm->name, sizeof(info->name)); 237 info->dev_class = pcm->dev_class; 238 info->dev_subclass = pcm->dev_subclass; 239 info->subdevices_count = pstr->substream_count; 240 info->subdevices_avail = pstr->substream_count - pstr->substream_opened; 241 strlcpy(info->subname, substream->name, sizeof(info->subname)); 242 243 return 0; 244 } 245 246 int snd_pcm_info_user(struct snd_pcm_substream *substream, 247 struct snd_pcm_info __user * _info) 248 { 249 struct snd_pcm_info *info; 250 int err; 251 252 info = kmalloc(sizeof(*info), GFP_KERNEL); 253 if (! info) 254 return -ENOMEM; 255 err = snd_pcm_info(substream, info); 256 if (err >= 0) { 257 if (copy_to_user(_info, info, sizeof(*info))) 258 err = -EFAULT; 259 } 260 kfree(info); 261 return err; 262 } 263 264 static bool hw_support_mmap(struct snd_pcm_substream *substream) 265 { 266 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) 267 return false; 268 /* architecture supports dma_mmap_coherent()? */ 269 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA) 270 if (!substream->ops->mmap && 271 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) 272 return false; 273 #endif 274 return true; 275 } 276 277 static int constrain_mask_params(struct snd_pcm_substream *substream, 278 struct snd_pcm_hw_params *params) 279 { 280 struct snd_pcm_hw_constraints *constrs = 281 &substream->runtime->hw_constraints; 282 struct snd_mask *m; 283 unsigned int k; 284 struct snd_mask old_mask; 285 int changed; 286 287 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { 288 m = hw_param_mask(params, k); 289 if (snd_mask_empty(m)) 290 return -EINVAL; 291 292 /* This parameter is not requested to change by a caller. */ 293 if (!(params->rmask & (1 << k))) 294 continue; 295 296 if (trace_hw_mask_param_enabled()) 297 old_mask = *m; 298 299 changed = snd_mask_refine(m, constrs_mask(constrs, k)); 300 if (changed < 0) 301 return changed; 302 if (changed == 0) 303 continue; 304 305 /* Set corresponding flag so that the caller gets it. */ 306 trace_hw_mask_param(substream, k, 0, &old_mask, m); 307 params->cmask |= 1 << k; 308 } 309 310 return 0; 311 } 312 313 static int constrain_interval_params(struct snd_pcm_substream *substream, 314 struct snd_pcm_hw_params *params) 315 { 316 struct snd_pcm_hw_constraints *constrs = 317 &substream->runtime->hw_constraints; 318 struct snd_interval *i; 319 unsigned int k; 320 struct snd_interval old_interval; 321 int changed; 322 323 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { 324 i = hw_param_interval(params, k); 325 if (snd_interval_empty(i)) 326 return -EINVAL; 327 328 /* This parameter is not requested to change by a caller. */ 329 if (!(params->rmask & (1 << k))) 330 continue; 331 332 if (trace_hw_interval_param_enabled()) 333 old_interval = *i; 334 335 changed = snd_interval_refine(i, constrs_interval(constrs, k)); 336 if (changed < 0) 337 return changed; 338 if (changed == 0) 339 continue; 340 341 /* Set corresponding flag so that the caller gets it. */ 342 trace_hw_interval_param(substream, k, 0, &old_interval, i); 343 params->cmask |= 1 << k; 344 } 345 346 return 0; 347 } 348 349 static int constrain_params_by_rules(struct snd_pcm_substream *substream, 350 struct snd_pcm_hw_params *params) 351 { 352 struct snd_pcm_hw_constraints *constrs = 353 &substream->runtime->hw_constraints; 354 unsigned int k; 355 unsigned int *rstamps; 356 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1]; 357 unsigned int stamp; 358 struct snd_pcm_hw_rule *r; 359 unsigned int d; 360 struct snd_mask old_mask; 361 struct snd_interval old_interval; 362 bool again; 363 int changed, err = 0; 364 365 /* 366 * Each application of rule has own sequence number. 367 * 368 * Each member of 'rstamps' array represents the sequence number of 369 * recent application of corresponding rule. 370 */ 371 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL); 372 if (!rstamps) 373 return -ENOMEM; 374 375 /* 376 * Each member of 'vstamps' array represents the sequence number of 377 * recent application of rule in which corresponding parameters were 378 * changed. 379 * 380 * In initial state, elements corresponding to parameters requested by 381 * a caller is 1. For unrequested parameters, corresponding members 382 * have 0 so that the parameters are never changed anymore. 383 */ 384 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 385 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0; 386 387 /* Due to the above design, actual sequence number starts at 2. */ 388 stamp = 2; 389 retry: 390 /* Apply all rules in order. */ 391 again = false; 392 for (k = 0; k < constrs->rules_num; k++) { 393 r = &constrs->rules[k]; 394 395 /* 396 * Check condition bits of this rule. When the rule has 397 * some condition bits, parameter without the bits is 398 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP 399 * is an example of the condition bits. 400 */ 401 if (r->cond && !(r->cond & params->flags)) 402 continue; 403 404 /* 405 * The 'deps' array includes maximum three dependencies 406 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth 407 * member of this array is a sentinel and should be 408 * negative value. 409 * 410 * This rule should be processed in this time when dependent 411 * parameters were changed at former applications of the other 412 * rules. 413 */ 414 for (d = 0; r->deps[d] >= 0; d++) { 415 if (vstamps[r->deps[d]] > rstamps[k]) 416 break; 417 } 418 if (r->deps[d] < 0) 419 continue; 420 421 if (trace_hw_mask_param_enabled()) { 422 if (hw_is_mask(r->var)) 423 old_mask = *hw_param_mask(params, r->var); 424 } 425 if (trace_hw_interval_param_enabled()) { 426 if (hw_is_interval(r->var)) 427 old_interval = *hw_param_interval(params, r->var); 428 } 429 430 changed = r->func(params, r); 431 if (changed < 0) { 432 err = changed; 433 goto out; 434 } 435 436 /* 437 * When the parameter is changed, notify it to the caller 438 * by corresponding returned bit, then preparing for next 439 * iteration. 440 */ 441 if (changed && r->var >= 0) { 442 if (hw_is_mask(r->var)) { 443 trace_hw_mask_param(substream, r->var, 444 k + 1, &old_mask, 445 hw_param_mask(params, r->var)); 446 } 447 if (hw_is_interval(r->var)) { 448 trace_hw_interval_param(substream, r->var, 449 k + 1, &old_interval, 450 hw_param_interval(params, r->var)); 451 } 452 453 params->cmask |= (1 << r->var); 454 vstamps[r->var] = stamp; 455 again = true; 456 } 457 458 rstamps[k] = stamp++; 459 } 460 461 /* Iterate to evaluate all rules till no parameters are changed. */ 462 if (again) 463 goto retry; 464 465 out: 466 kfree(rstamps); 467 return err; 468 } 469 470 static int fixup_unreferenced_params(struct snd_pcm_substream *substream, 471 struct snd_pcm_hw_params *params) 472 { 473 const struct snd_interval *i; 474 const struct snd_mask *m; 475 int err; 476 477 if (!params->msbits) { 478 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 479 if (snd_interval_single(i)) 480 params->msbits = snd_interval_value(i); 481 } 482 483 if (!params->rate_den) { 484 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 485 if (snd_interval_single(i)) { 486 params->rate_num = snd_interval_value(i); 487 params->rate_den = 1; 488 } 489 } 490 491 if (!params->fifo_size) { 492 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); 493 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); 494 if (snd_mask_single(m) && snd_interval_single(i)) { 495 err = substream->ops->ioctl(substream, 496 SNDRV_PCM_IOCTL1_FIFO_SIZE, params); 497 if (err < 0) 498 return err; 499 } 500 } 501 502 if (!params->info) { 503 params->info = substream->runtime->hw.info; 504 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES | 505 SNDRV_PCM_INFO_DRAIN_TRIGGER); 506 if (!hw_support_mmap(substream)) 507 params->info &= ~(SNDRV_PCM_INFO_MMAP | 508 SNDRV_PCM_INFO_MMAP_VALID); 509 } 510 511 return 0; 512 } 513 514 int snd_pcm_hw_refine(struct snd_pcm_substream *substream, 515 struct snd_pcm_hw_params *params) 516 { 517 int err; 518 519 params->info = 0; 520 params->fifo_size = 0; 521 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS)) 522 params->msbits = 0; 523 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) { 524 params->rate_num = 0; 525 params->rate_den = 0; 526 } 527 528 err = constrain_mask_params(substream, params); 529 if (err < 0) 530 return err; 531 532 err = constrain_interval_params(substream, params); 533 if (err < 0) 534 return err; 535 536 err = constrain_params_by_rules(substream, params); 537 if (err < 0) 538 return err; 539 540 params->rmask = 0; 541 542 return 0; 543 } 544 EXPORT_SYMBOL(snd_pcm_hw_refine); 545 546 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream, 547 struct snd_pcm_hw_params __user * _params) 548 { 549 struct snd_pcm_hw_params *params; 550 int err; 551 552 params = memdup_user(_params, sizeof(*params)); 553 if (IS_ERR(params)) 554 return PTR_ERR(params); 555 556 err = snd_pcm_hw_refine(substream, params); 557 if (err < 0) 558 goto end; 559 560 err = fixup_unreferenced_params(substream, params); 561 if (err < 0) 562 goto end; 563 564 if (copy_to_user(_params, params, sizeof(*params))) 565 err = -EFAULT; 566 end: 567 kfree(params); 568 return err; 569 } 570 571 static int period_to_usecs(struct snd_pcm_runtime *runtime) 572 { 573 int usecs; 574 575 if (! runtime->rate) 576 return -1; /* invalid */ 577 578 /* take 75% of period time as the deadline */ 579 usecs = (750000 / runtime->rate) * runtime->period_size; 580 usecs += ((750000 % runtime->rate) * runtime->period_size) / 581 runtime->rate; 582 583 return usecs; 584 } 585 586 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state) 587 { 588 snd_pcm_stream_lock_irq(substream); 589 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED) 590 substream->runtime->status->state = state; 591 snd_pcm_stream_unlock_irq(substream); 592 } 593 594 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream, 595 int event) 596 { 597 #ifdef CONFIG_SND_PCM_TIMER 598 if (substream->timer) 599 snd_timer_notify(substream->timer, event, 600 &substream->runtime->trigger_tstamp); 601 #endif 602 } 603 604 /** 605 * snd_pcm_hw_param_choose - choose a configuration defined by @params 606 * @pcm: PCM instance 607 * @params: the hw_params instance 608 * 609 * Choose one configuration from configuration space defined by @params. 610 * The configuration chosen is that obtained fixing in this order: 611 * first access, first format, first subformat, min channels, 612 * min rate, min period time, max buffer size, min tick time 613 * 614 * Return: Zero if successful, or a negative error code on failure. 615 */ 616 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, 617 struct snd_pcm_hw_params *params) 618 { 619 static const int vars[] = { 620 SNDRV_PCM_HW_PARAM_ACCESS, 621 SNDRV_PCM_HW_PARAM_FORMAT, 622 SNDRV_PCM_HW_PARAM_SUBFORMAT, 623 SNDRV_PCM_HW_PARAM_CHANNELS, 624 SNDRV_PCM_HW_PARAM_RATE, 625 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 626 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 627 SNDRV_PCM_HW_PARAM_TICK_TIME, 628 -1 629 }; 630 const int *v; 631 struct snd_mask old_mask; 632 struct snd_interval old_interval; 633 int changed; 634 635 for (v = vars; *v != -1; v++) { 636 /* Keep old parameter to trace. */ 637 if (trace_hw_mask_param_enabled()) { 638 if (hw_is_mask(*v)) 639 old_mask = *hw_param_mask(params, *v); 640 } 641 if (trace_hw_interval_param_enabled()) { 642 if (hw_is_interval(*v)) 643 old_interval = *hw_param_interval(params, *v); 644 } 645 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE) 646 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL); 647 else 648 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL); 649 if (changed < 0) 650 return changed; 651 if (changed == 0) 652 continue; 653 654 /* Trace the changed parameter. */ 655 if (hw_is_mask(*v)) { 656 trace_hw_mask_param(pcm, *v, 0, &old_mask, 657 hw_param_mask(params, *v)); 658 } 659 if (hw_is_interval(*v)) { 660 trace_hw_interval_param(pcm, *v, 0, &old_interval, 661 hw_param_interval(params, *v)); 662 } 663 } 664 665 return 0; 666 } 667 668 static int snd_pcm_hw_params(struct snd_pcm_substream *substream, 669 struct snd_pcm_hw_params *params) 670 { 671 struct snd_pcm_runtime *runtime; 672 int err, usecs; 673 unsigned int bits; 674 snd_pcm_uframes_t frames; 675 676 if (PCM_RUNTIME_CHECK(substream)) 677 return -ENXIO; 678 runtime = substream->runtime; 679 snd_pcm_stream_lock_irq(substream); 680 switch (runtime->status->state) { 681 case SNDRV_PCM_STATE_OPEN: 682 case SNDRV_PCM_STATE_SETUP: 683 case SNDRV_PCM_STATE_PREPARED: 684 break; 685 default: 686 snd_pcm_stream_unlock_irq(substream); 687 return -EBADFD; 688 } 689 snd_pcm_stream_unlock_irq(substream); 690 #if IS_ENABLED(CONFIG_SND_PCM_OSS) 691 if (!substream->oss.oss) 692 #endif 693 if (atomic_read(&substream->mmap_count)) 694 return -EBADFD; 695 696 params->rmask = ~0U; 697 err = snd_pcm_hw_refine(substream, params); 698 if (err < 0) 699 goto _error; 700 701 err = snd_pcm_hw_params_choose(substream, params); 702 if (err < 0) 703 goto _error; 704 705 err = fixup_unreferenced_params(substream, params); 706 if (err < 0) 707 goto _error; 708 709 if (substream->ops->hw_params != NULL) { 710 err = substream->ops->hw_params(substream, params); 711 if (err < 0) 712 goto _error; 713 } 714 715 runtime->access = params_access(params); 716 runtime->format = params_format(params); 717 runtime->subformat = params_subformat(params); 718 runtime->channels = params_channels(params); 719 runtime->rate = params_rate(params); 720 runtime->period_size = params_period_size(params); 721 runtime->periods = params_periods(params); 722 runtime->buffer_size = params_buffer_size(params); 723 runtime->info = params->info; 724 runtime->rate_num = params->rate_num; 725 runtime->rate_den = params->rate_den; 726 runtime->no_period_wakeup = 727 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && 728 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); 729 730 bits = snd_pcm_format_physical_width(runtime->format); 731 runtime->sample_bits = bits; 732 bits *= runtime->channels; 733 runtime->frame_bits = bits; 734 frames = 1; 735 while (bits % 8 != 0) { 736 bits *= 2; 737 frames *= 2; 738 } 739 runtime->byte_align = bits / 8; 740 runtime->min_align = frames; 741 742 /* Default sw params */ 743 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; 744 runtime->period_step = 1; 745 runtime->control->avail_min = runtime->period_size; 746 runtime->start_threshold = 1; 747 runtime->stop_threshold = runtime->buffer_size; 748 runtime->silence_threshold = 0; 749 runtime->silence_size = 0; 750 runtime->boundary = runtime->buffer_size; 751 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) 752 runtime->boundary *= 2; 753 754 snd_pcm_timer_resolution_change(substream); 755 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); 756 757 if (pm_qos_request_active(&substream->latency_pm_qos_req)) 758 pm_qos_remove_request(&substream->latency_pm_qos_req); 759 if ((usecs = period_to_usecs(runtime)) >= 0) 760 pm_qos_add_request(&substream->latency_pm_qos_req, 761 PM_QOS_CPU_DMA_LATENCY, usecs); 762 return 0; 763 _error: 764 /* hardware might be unusable from this time, 765 so we force application to retry to set 766 the correct hardware parameter settings */ 767 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 768 if (substream->ops->hw_free != NULL) 769 substream->ops->hw_free(substream); 770 return err; 771 } 772 773 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream, 774 struct snd_pcm_hw_params __user * _params) 775 { 776 struct snd_pcm_hw_params *params; 777 int err; 778 779 params = memdup_user(_params, sizeof(*params)); 780 if (IS_ERR(params)) 781 return PTR_ERR(params); 782 783 err = snd_pcm_hw_params(substream, params); 784 if (err < 0) 785 goto end; 786 787 if (copy_to_user(_params, params, sizeof(*params))) 788 err = -EFAULT; 789 end: 790 kfree(params); 791 return err; 792 } 793 794 static int snd_pcm_hw_free(struct snd_pcm_substream *substream) 795 { 796 struct snd_pcm_runtime *runtime; 797 int result = 0; 798 799 if (PCM_RUNTIME_CHECK(substream)) 800 return -ENXIO; 801 runtime = substream->runtime; 802 snd_pcm_stream_lock_irq(substream); 803 switch (runtime->status->state) { 804 case SNDRV_PCM_STATE_SETUP: 805 case SNDRV_PCM_STATE_PREPARED: 806 break; 807 default: 808 snd_pcm_stream_unlock_irq(substream); 809 return -EBADFD; 810 } 811 snd_pcm_stream_unlock_irq(substream); 812 if (atomic_read(&substream->mmap_count)) 813 return -EBADFD; 814 if (substream->ops->hw_free) 815 result = substream->ops->hw_free(substream); 816 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 817 pm_qos_remove_request(&substream->latency_pm_qos_req); 818 return result; 819 } 820 821 static int snd_pcm_sw_params(struct snd_pcm_substream *substream, 822 struct snd_pcm_sw_params *params) 823 { 824 struct snd_pcm_runtime *runtime; 825 int err; 826 827 if (PCM_RUNTIME_CHECK(substream)) 828 return -ENXIO; 829 runtime = substream->runtime; 830 snd_pcm_stream_lock_irq(substream); 831 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 832 snd_pcm_stream_unlock_irq(substream); 833 return -EBADFD; 834 } 835 snd_pcm_stream_unlock_irq(substream); 836 837 if (params->tstamp_mode < 0 || 838 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST) 839 return -EINVAL; 840 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) && 841 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST) 842 return -EINVAL; 843 if (params->avail_min == 0) 844 return -EINVAL; 845 if (params->silence_size >= runtime->boundary) { 846 if (params->silence_threshold != 0) 847 return -EINVAL; 848 } else { 849 if (params->silence_size > params->silence_threshold) 850 return -EINVAL; 851 if (params->silence_threshold > runtime->buffer_size) 852 return -EINVAL; 853 } 854 err = 0; 855 snd_pcm_stream_lock_irq(substream); 856 runtime->tstamp_mode = params->tstamp_mode; 857 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12)) 858 runtime->tstamp_type = params->tstamp_type; 859 runtime->period_step = params->period_step; 860 runtime->control->avail_min = params->avail_min; 861 runtime->start_threshold = params->start_threshold; 862 runtime->stop_threshold = params->stop_threshold; 863 runtime->silence_threshold = params->silence_threshold; 864 runtime->silence_size = params->silence_size; 865 params->boundary = runtime->boundary; 866 if (snd_pcm_running(substream)) { 867 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 868 runtime->silence_size > 0) 869 snd_pcm_playback_silence(substream, ULONG_MAX); 870 err = snd_pcm_update_state(substream, runtime); 871 } 872 snd_pcm_stream_unlock_irq(substream); 873 return err; 874 } 875 876 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, 877 struct snd_pcm_sw_params __user * _params) 878 { 879 struct snd_pcm_sw_params params; 880 int err; 881 if (copy_from_user(¶ms, _params, sizeof(params))) 882 return -EFAULT; 883 err = snd_pcm_sw_params(substream, ¶ms); 884 if (copy_to_user(_params, ¶ms, sizeof(params))) 885 return -EFAULT; 886 return err; 887 } 888 889 static inline snd_pcm_uframes_t 890 snd_pcm_calc_delay(struct snd_pcm_substream *substream) 891 { 892 snd_pcm_uframes_t delay; 893 894 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 895 delay = snd_pcm_playback_hw_avail(substream->runtime); 896 else 897 delay = snd_pcm_capture_avail(substream->runtime); 898 return delay + substream->runtime->delay; 899 } 900 901 int snd_pcm_status(struct snd_pcm_substream *substream, 902 struct snd_pcm_status *status) 903 { 904 struct snd_pcm_runtime *runtime = substream->runtime; 905 906 snd_pcm_stream_lock_irq(substream); 907 908 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data, 909 &runtime->audio_tstamp_config); 910 911 /* backwards compatible behavior */ 912 if (runtime->audio_tstamp_config.type_requested == 913 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) { 914 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK) 915 runtime->audio_tstamp_config.type_requested = 916 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; 917 else 918 runtime->audio_tstamp_config.type_requested = 919 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; 920 runtime->audio_tstamp_report.valid = 0; 921 } else 922 runtime->audio_tstamp_report.valid = 1; 923 924 status->state = runtime->status->state; 925 status->suspended_state = runtime->status->suspended_state; 926 if (status->state == SNDRV_PCM_STATE_OPEN) 927 goto _end; 928 status->trigger_tstamp = runtime->trigger_tstamp; 929 if (snd_pcm_running(substream)) { 930 snd_pcm_update_hw_ptr(substream); 931 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 932 status->tstamp = runtime->status->tstamp; 933 status->driver_tstamp = runtime->driver_tstamp; 934 status->audio_tstamp = 935 runtime->status->audio_tstamp; 936 if (runtime->audio_tstamp_report.valid == 1) 937 /* backwards compatibility, no report provided in COMPAT mode */ 938 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data, 939 &status->audio_tstamp_accuracy, 940 &runtime->audio_tstamp_report); 941 942 goto _tstamp_end; 943 } 944 } else { 945 /* get tstamp only in fallback mode and only if enabled */ 946 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) 947 snd_pcm_gettime(runtime, &status->tstamp); 948 } 949 _tstamp_end: 950 status->appl_ptr = runtime->control->appl_ptr; 951 status->hw_ptr = runtime->status->hw_ptr; 952 status->avail = snd_pcm_avail(substream); 953 status->delay = snd_pcm_running(substream) ? 954 snd_pcm_calc_delay(substream) : 0; 955 status->avail_max = runtime->avail_max; 956 status->overrange = runtime->overrange; 957 runtime->avail_max = 0; 958 runtime->overrange = 0; 959 _end: 960 snd_pcm_stream_unlock_irq(substream); 961 return 0; 962 } 963 964 static int snd_pcm_status_user(struct snd_pcm_substream *substream, 965 struct snd_pcm_status __user * _status, 966 bool ext) 967 { 968 struct snd_pcm_status status; 969 int res; 970 971 memset(&status, 0, sizeof(status)); 972 /* 973 * with extension, parameters are read/write, 974 * get audio_tstamp_data from user, 975 * ignore rest of status structure 976 */ 977 if (ext && get_user(status.audio_tstamp_data, 978 (u32 __user *)(&_status->audio_tstamp_data))) 979 return -EFAULT; 980 res = snd_pcm_status(substream, &status); 981 if (res < 0) 982 return res; 983 if (copy_to_user(_status, &status, sizeof(status))) 984 return -EFAULT; 985 return 0; 986 } 987 988 static int snd_pcm_channel_info(struct snd_pcm_substream *substream, 989 struct snd_pcm_channel_info * info) 990 { 991 struct snd_pcm_runtime *runtime; 992 unsigned int channel; 993 994 channel = info->channel; 995 runtime = substream->runtime; 996 snd_pcm_stream_lock_irq(substream); 997 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 998 snd_pcm_stream_unlock_irq(substream); 999 return -EBADFD; 1000 } 1001 snd_pcm_stream_unlock_irq(substream); 1002 if (channel >= runtime->channels) 1003 return -EINVAL; 1004 memset(info, 0, sizeof(*info)); 1005 info->channel = channel; 1006 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info); 1007 } 1008 1009 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, 1010 struct snd_pcm_channel_info __user * _info) 1011 { 1012 struct snd_pcm_channel_info info; 1013 int res; 1014 1015 if (copy_from_user(&info, _info, sizeof(info))) 1016 return -EFAULT; 1017 res = snd_pcm_channel_info(substream, &info); 1018 if (res < 0) 1019 return res; 1020 if (copy_to_user(_info, &info, sizeof(info))) 1021 return -EFAULT; 1022 return 0; 1023 } 1024 1025 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) 1026 { 1027 struct snd_pcm_runtime *runtime = substream->runtime; 1028 if (runtime->trigger_master == NULL) 1029 return; 1030 if (runtime->trigger_master == substream) { 1031 if (!runtime->trigger_tstamp_latched) 1032 snd_pcm_gettime(runtime, &runtime->trigger_tstamp); 1033 } else { 1034 snd_pcm_trigger_tstamp(runtime->trigger_master); 1035 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp; 1036 } 1037 runtime->trigger_master = NULL; 1038 } 1039 1040 struct action_ops { 1041 int (*pre_action)(struct snd_pcm_substream *substream, int state); 1042 int (*do_action)(struct snd_pcm_substream *substream, int state); 1043 void (*undo_action)(struct snd_pcm_substream *substream, int state); 1044 void (*post_action)(struct snd_pcm_substream *substream, int state); 1045 }; 1046 1047 /* 1048 * this functions is core for handling of linked stream 1049 * Note: the stream state might be changed also on failure 1050 * Note2: call with calling stream lock + link lock 1051 */ 1052 static int snd_pcm_action_group(const struct action_ops *ops, 1053 struct snd_pcm_substream *substream, 1054 int state, int do_lock) 1055 { 1056 struct snd_pcm_substream *s = NULL; 1057 struct snd_pcm_substream *s1; 1058 int res = 0, depth = 1; 1059 1060 snd_pcm_group_for_each_entry(s, substream) { 1061 if (do_lock && s != substream) { 1062 if (s->pcm->nonatomic) 1063 mutex_lock_nested(&s->self_group.mutex, depth); 1064 else 1065 spin_lock_nested(&s->self_group.lock, depth); 1066 depth++; 1067 } 1068 res = ops->pre_action(s, state); 1069 if (res < 0) 1070 goto _unlock; 1071 } 1072 snd_pcm_group_for_each_entry(s, substream) { 1073 res = ops->do_action(s, state); 1074 if (res < 0) { 1075 if (ops->undo_action) { 1076 snd_pcm_group_for_each_entry(s1, substream) { 1077 if (s1 == s) /* failed stream */ 1078 break; 1079 ops->undo_action(s1, state); 1080 } 1081 } 1082 s = NULL; /* unlock all */ 1083 goto _unlock; 1084 } 1085 } 1086 snd_pcm_group_for_each_entry(s, substream) { 1087 ops->post_action(s, state); 1088 } 1089 _unlock: 1090 if (do_lock) { 1091 /* unlock streams */ 1092 snd_pcm_group_for_each_entry(s1, substream) { 1093 if (s1 != substream) { 1094 if (s1->pcm->nonatomic) 1095 mutex_unlock(&s1->self_group.mutex); 1096 else 1097 spin_unlock(&s1->self_group.lock); 1098 } 1099 if (s1 == s) /* end */ 1100 break; 1101 } 1102 } 1103 return res; 1104 } 1105 1106 /* 1107 * Note: call with stream lock 1108 */ 1109 static int snd_pcm_action_single(const struct action_ops *ops, 1110 struct snd_pcm_substream *substream, 1111 int state) 1112 { 1113 int res; 1114 1115 res = ops->pre_action(substream, state); 1116 if (res < 0) 1117 return res; 1118 res = ops->do_action(substream, state); 1119 if (res == 0) 1120 ops->post_action(substream, state); 1121 else if (ops->undo_action) 1122 ops->undo_action(substream, state); 1123 return res; 1124 } 1125 1126 /* 1127 * Note: call with stream lock 1128 */ 1129 static int snd_pcm_action(const struct action_ops *ops, 1130 struct snd_pcm_substream *substream, 1131 int state) 1132 { 1133 int res; 1134 1135 if (!snd_pcm_stream_linked(substream)) 1136 return snd_pcm_action_single(ops, substream, state); 1137 1138 if (substream->pcm->nonatomic) { 1139 if (!mutex_trylock(&substream->group->mutex)) { 1140 mutex_unlock(&substream->self_group.mutex); 1141 mutex_lock(&substream->group->mutex); 1142 mutex_lock(&substream->self_group.mutex); 1143 } 1144 res = snd_pcm_action_group(ops, substream, state, 1); 1145 mutex_unlock(&substream->group->mutex); 1146 } else { 1147 if (!spin_trylock(&substream->group->lock)) { 1148 spin_unlock(&substream->self_group.lock); 1149 spin_lock(&substream->group->lock); 1150 spin_lock(&substream->self_group.lock); 1151 } 1152 res = snd_pcm_action_group(ops, substream, state, 1); 1153 spin_unlock(&substream->group->lock); 1154 } 1155 return res; 1156 } 1157 1158 /* 1159 * Note: don't use any locks before 1160 */ 1161 static int snd_pcm_action_lock_irq(const struct action_ops *ops, 1162 struct snd_pcm_substream *substream, 1163 int state) 1164 { 1165 int res; 1166 1167 snd_pcm_stream_lock_irq(substream); 1168 res = snd_pcm_action(ops, substream, state); 1169 snd_pcm_stream_unlock_irq(substream); 1170 return res; 1171 } 1172 1173 /* 1174 */ 1175 static int snd_pcm_action_nonatomic(const struct action_ops *ops, 1176 struct snd_pcm_substream *substream, 1177 int state) 1178 { 1179 int res; 1180 1181 down_read(&snd_pcm_link_rwsem); 1182 if (snd_pcm_stream_linked(substream)) 1183 res = snd_pcm_action_group(ops, substream, state, 0); 1184 else 1185 res = snd_pcm_action_single(ops, substream, state); 1186 up_read(&snd_pcm_link_rwsem); 1187 return res; 1188 } 1189 1190 /* 1191 * start callbacks 1192 */ 1193 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) 1194 { 1195 struct snd_pcm_runtime *runtime = substream->runtime; 1196 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED) 1197 return -EBADFD; 1198 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1199 !snd_pcm_playback_data(substream)) 1200 return -EPIPE; 1201 runtime->trigger_tstamp_latched = false; 1202 runtime->trigger_master = substream; 1203 return 0; 1204 } 1205 1206 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) 1207 { 1208 if (substream->runtime->trigger_master != substream) 1209 return 0; 1210 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); 1211 } 1212 1213 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state) 1214 { 1215 if (substream->runtime->trigger_master == substream) 1216 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); 1217 } 1218 1219 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) 1220 { 1221 struct snd_pcm_runtime *runtime = substream->runtime; 1222 snd_pcm_trigger_tstamp(substream); 1223 runtime->hw_ptr_jiffies = jiffies; 1224 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) / 1225 runtime->rate; 1226 runtime->status->state = state; 1227 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1228 runtime->silence_size > 0) 1229 snd_pcm_playback_silence(substream, ULONG_MAX); 1230 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART); 1231 } 1232 1233 static const struct action_ops snd_pcm_action_start = { 1234 .pre_action = snd_pcm_pre_start, 1235 .do_action = snd_pcm_do_start, 1236 .undo_action = snd_pcm_undo_start, 1237 .post_action = snd_pcm_post_start 1238 }; 1239 1240 /** 1241 * snd_pcm_start - start all linked streams 1242 * @substream: the PCM substream instance 1243 * 1244 * Return: Zero if successful, or a negative error code. 1245 * The stream lock must be acquired before calling this function. 1246 */ 1247 int snd_pcm_start(struct snd_pcm_substream *substream) 1248 { 1249 return snd_pcm_action(&snd_pcm_action_start, substream, 1250 SNDRV_PCM_STATE_RUNNING); 1251 } 1252 1253 /* take the stream lock and start the streams */ 1254 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream) 1255 { 1256 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, 1257 SNDRV_PCM_STATE_RUNNING); 1258 } 1259 1260 /* 1261 * stop callbacks 1262 */ 1263 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) 1264 { 1265 struct snd_pcm_runtime *runtime = substream->runtime; 1266 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 1267 return -EBADFD; 1268 runtime->trigger_master = substream; 1269 return 0; 1270 } 1271 1272 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state) 1273 { 1274 if (substream->runtime->trigger_master == substream && 1275 snd_pcm_running(substream)) 1276 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); 1277 return 0; /* unconditonally stop all substreams */ 1278 } 1279 1280 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) 1281 { 1282 struct snd_pcm_runtime *runtime = substream->runtime; 1283 if (runtime->status->state != state) { 1284 snd_pcm_trigger_tstamp(substream); 1285 runtime->status->state = state; 1286 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP); 1287 } 1288 wake_up(&runtime->sleep); 1289 wake_up(&runtime->tsleep); 1290 } 1291 1292 static const struct action_ops snd_pcm_action_stop = { 1293 .pre_action = snd_pcm_pre_stop, 1294 .do_action = snd_pcm_do_stop, 1295 .post_action = snd_pcm_post_stop 1296 }; 1297 1298 /** 1299 * snd_pcm_stop - try to stop all running streams in the substream group 1300 * @substream: the PCM substream instance 1301 * @state: PCM state after stopping the stream 1302 * 1303 * The state of each stream is then changed to the given state unconditionally. 1304 * 1305 * Return: Zero if successful, or a negative error code. 1306 */ 1307 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) 1308 { 1309 return snd_pcm_action(&snd_pcm_action_stop, substream, state); 1310 } 1311 EXPORT_SYMBOL(snd_pcm_stop); 1312 1313 /** 1314 * snd_pcm_drain_done - stop the DMA only when the given stream is playback 1315 * @substream: the PCM substream 1316 * 1317 * After stopping, the state is changed to SETUP. 1318 * Unlike snd_pcm_stop(), this affects only the given stream. 1319 * 1320 * Return: Zero if succesful, or a negative error code. 1321 */ 1322 int snd_pcm_drain_done(struct snd_pcm_substream *substream) 1323 { 1324 return snd_pcm_action_single(&snd_pcm_action_stop, substream, 1325 SNDRV_PCM_STATE_SETUP); 1326 } 1327 1328 /** 1329 * snd_pcm_stop_xrun - stop the running streams as XRUN 1330 * @substream: the PCM substream instance 1331 * 1332 * This stops the given running substream (and all linked substreams) as XRUN. 1333 * Unlike snd_pcm_stop(), this function takes the substream lock by itself. 1334 * 1335 * Return: Zero if successful, or a negative error code. 1336 */ 1337 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream) 1338 { 1339 unsigned long flags; 1340 int ret = 0; 1341 1342 snd_pcm_stream_lock_irqsave(substream, flags); 1343 if (snd_pcm_running(substream)) 1344 ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 1345 snd_pcm_stream_unlock_irqrestore(substream, flags); 1346 return ret; 1347 } 1348 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun); 1349 1350 /* 1351 * pause callbacks 1352 */ 1353 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) 1354 { 1355 struct snd_pcm_runtime *runtime = substream->runtime; 1356 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE)) 1357 return -ENOSYS; 1358 if (push) { 1359 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING) 1360 return -EBADFD; 1361 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED) 1362 return -EBADFD; 1363 runtime->trigger_master = substream; 1364 return 0; 1365 } 1366 1367 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) 1368 { 1369 if (substream->runtime->trigger_master != substream) 1370 return 0; 1371 /* some drivers might use hw_ptr to recover from the pause - 1372 update the hw_ptr now */ 1373 if (push) 1374 snd_pcm_update_hw_ptr(substream); 1375 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by 1376 * a delta between the current jiffies, this gives a large enough 1377 * delta, effectively to skip the check once. 1378 */ 1379 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; 1380 return substream->ops->trigger(substream, 1381 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : 1382 SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 1383 } 1384 1385 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push) 1386 { 1387 if (substream->runtime->trigger_master == substream) 1388 substream->ops->trigger(substream, 1389 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE : 1390 SNDRV_PCM_TRIGGER_PAUSE_PUSH); 1391 } 1392 1393 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push) 1394 { 1395 struct snd_pcm_runtime *runtime = substream->runtime; 1396 snd_pcm_trigger_tstamp(substream); 1397 if (push) { 1398 runtime->status->state = SNDRV_PCM_STATE_PAUSED; 1399 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE); 1400 wake_up(&runtime->sleep); 1401 wake_up(&runtime->tsleep); 1402 } else { 1403 runtime->status->state = SNDRV_PCM_STATE_RUNNING; 1404 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE); 1405 } 1406 } 1407 1408 static const struct action_ops snd_pcm_action_pause = { 1409 .pre_action = snd_pcm_pre_pause, 1410 .do_action = snd_pcm_do_pause, 1411 .undo_action = snd_pcm_undo_pause, 1412 .post_action = snd_pcm_post_pause 1413 }; 1414 1415 /* 1416 * Push/release the pause for all linked streams. 1417 */ 1418 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) 1419 { 1420 return snd_pcm_action(&snd_pcm_action_pause, substream, push); 1421 } 1422 1423 #ifdef CONFIG_PM 1424 /* suspend */ 1425 1426 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) 1427 { 1428 struct snd_pcm_runtime *runtime = substream->runtime; 1429 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 1430 return -EBUSY; 1431 runtime->trigger_master = substream; 1432 return 0; 1433 } 1434 1435 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) 1436 { 1437 struct snd_pcm_runtime *runtime = substream->runtime; 1438 if (runtime->trigger_master != substream) 1439 return 0; 1440 if (! snd_pcm_running(substream)) 1441 return 0; 1442 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); 1443 return 0; /* suspend unconditionally */ 1444 } 1445 1446 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) 1447 { 1448 struct snd_pcm_runtime *runtime = substream->runtime; 1449 snd_pcm_trigger_tstamp(substream); 1450 runtime->status->suspended_state = runtime->status->state; 1451 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; 1452 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND); 1453 wake_up(&runtime->sleep); 1454 wake_up(&runtime->tsleep); 1455 } 1456 1457 static const struct action_ops snd_pcm_action_suspend = { 1458 .pre_action = snd_pcm_pre_suspend, 1459 .do_action = snd_pcm_do_suspend, 1460 .post_action = snd_pcm_post_suspend 1461 }; 1462 1463 /** 1464 * snd_pcm_suspend - trigger SUSPEND to all linked streams 1465 * @substream: the PCM substream 1466 * 1467 * After this call, all streams are changed to SUSPENDED state. 1468 * 1469 * Return: Zero if successful (or @substream is %NULL), or a negative error 1470 * code. 1471 */ 1472 int snd_pcm_suspend(struct snd_pcm_substream *substream) 1473 { 1474 int err; 1475 unsigned long flags; 1476 1477 if (! substream) 1478 return 0; 1479 1480 snd_pcm_stream_lock_irqsave(substream, flags); 1481 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0); 1482 snd_pcm_stream_unlock_irqrestore(substream, flags); 1483 return err; 1484 } 1485 EXPORT_SYMBOL(snd_pcm_suspend); 1486 1487 /** 1488 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm 1489 * @pcm: the PCM instance 1490 * 1491 * After this call, all streams are changed to SUSPENDED state. 1492 * 1493 * Return: Zero if successful (or @pcm is %NULL), or a negative error code. 1494 */ 1495 int snd_pcm_suspend_all(struct snd_pcm *pcm) 1496 { 1497 struct snd_pcm_substream *substream; 1498 int stream, err = 0; 1499 1500 if (! pcm) 1501 return 0; 1502 1503 for (stream = 0; stream < 2; stream++) { 1504 for (substream = pcm->streams[stream].substream; 1505 substream; substream = substream->next) { 1506 /* FIXME: the open/close code should lock this as well */ 1507 if (substream->runtime == NULL) 1508 continue; 1509 err = snd_pcm_suspend(substream); 1510 if (err < 0 && err != -EBUSY) 1511 return err; 1512 } 1513 } 1514 return 0; 1515 } 1516 EXPORT_SYMBOL(snd_pcm_suspend_all); 1517 1518 /* resume */ 1519 1520 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state) 1521 { 1522 struct snd_pcm_runtime *runtime = substream->runtime; 1523 if (!(runtime->info & SNDRV_PCM_INFO_RESUME)) 1524 return -ENOSYS; 1525 runtime->trigger_master = substream; 1526 return 0; 1527 } 1528 1529 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) 1530 { 1531 struct snd_pcm_runtime *runtime = substream->runtime; 1532 if (runtime->trigger_master != substream) 1533 return 0; 1534 /* DMA not running previously? */ 1535 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && 1536 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || 1537 substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) 1538 return 0; 1539 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME); 1540 } 1541 1542 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state) 1543 { 1544 if (substream->runtime->trigger_master == substream && 1545 snd_pcm_running(substream)) 1546 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); 1547 } 1548 1549 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) 1550 { 1551 struct snd_pcm_runtime *runtime = substream->runtime; 1552 snd_pcm_trigger_tstamp(substream); 1553 runtime->status->state = runtime->status->suspended_state; 1554 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME); 1555 } 1556 1557 static const struct action_ops snd_pcm_action_resume = { 1558 .pre_action = snd_pcm_pre_resume, 1559 .do_action = snd_pcm_do_resume, 1560 .undo_action = snd_pcm_undo_resume, 1561 .post_action = snd_pcm_post_resume 1562 }; 1563 1564 static int snd_pcm_resume(struct snd_pcm_substream *substream) 1565 { 1566 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0); 1567 } 1568 1569 #else 1570 1571 static int snd_pcm_resume(struct snd_pcm_substream *substream) 1572 { 1573 return -ENOSYS; 1574 } 1575 1576 #endif /* CONFIG_PM */ 1577 1578 /* 1579 * xrun ioctl 1580 * 1581 * Change the RUNNING stream(s) to XRUN state. 1582 */ 1583 static int snd_pcm_xrun(struct snd_pcm_substream *substream) 1584 { 1585 struct snd_pcm_runtime *runtime = substream->runtime; 1586 int result; 1587 1588 snd_pcm_stream_lock_irq(substream); 1589 switch (runtime->status->state) { 1590 case SNDRV_PCM_STATE_XRUN: 1591 result = 0; /* already there */ 1592 break; 1593 case SNDRV_PCM_STATE_RUNNING: 1594 result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 1595 break; 1596 default: 1597 result = -EBADFD; 1598 } 1599 snd_pcm_stream_unlock_irq(substream); 1600 return result; 1601 } 1602 1603 /* 1604 * reset ioctl 1605 */ 1606 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state) 1607 { 1608 struct snd_pcm_runtime *runtime = substream->runtime; 1609 switch (runtime->status->state) { 1610 case SNDRV_PCM_STATE_RUNNING: 1611 case SNDRV_PCM_STATE_PREPARED: 1612 case SNDRV_PCM_STATE_PAUSED: 1613 case SNDRV_PCM_STATE_SUSPENDED: 1614 return 0; 1615 default: 1616 return -EBADFD; 1617 } 1618 } 1619 1620 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state) 1621 { 1622 struct snd_pcm_runtime *runtime = substream->runtime; 1623 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); 1624 if (err < 0) 1625 return err; 1626 runtime->hw_ptr_base = 0; 1627 runtime->hw_ptr_interrupt = runtime->status->hw_ptr - 1628 runtime->status->hw_ptr % runtime->period_size; 1629 runtime->silence_start = runtime->status->hw_ptr; 1630 runtime->silence_filled = 0; 1631 return 0; 1632 } 1633 1634 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state) 1635 { 1636 struct snd_pcm_runtime *runtime = substream->runtime; 1637 runtime->control->appl_ptr = runtime->status->hw_ptr; 1638 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1639 runtime->silence_size > 0) 1640 snd_pcm_playback_silence(substream, ULONG_MAX); 1641 } 1642 1643 static const struct action_ops snd_pcm_action_reset = { 1644 .pre_action = snd_pcm_pre_reset, 1645 .do_action = snd_pcm_do_reset, 1646 .post_action = snd_pcm_post_reset 1647 }; 1648 1649 static int snd_pcm_reset(struct snd_pcm_substream *substream) 1650 { 1651 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0); 1652 } 1653 1654 /* 1655 * prepare ioctl 1656 */ 1657 /* we use the second argument for updating f_flags */ 1658 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, 1659 int f_flags) 1660 { 1661 struct snd_pcm_runtime *runtime = substream->runtime; 1662 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 1663 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) 1664 return -EBADFD; 1665 if (snd_pcm_running(substream)) 1666 return -EBUSY; 1667 substream->f_flags = f_flags; 1668 return 0; 1669 } 1670 1671 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state) 1672 { 1673 int err; 1674 err = substream->ops->prepare(substream); 1675 if (err < 0) 1676 return err; 1677 return snd_pcm_do_reset(substream, 0); 1678 } 1679 1680 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) 1681 { 1682 struct snd_pcm_runtime *runtime = substream->runtime; 1683 runtime->control->appl_ptr = runtime->status->hw_ptr; 1684 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED); 1685 } 1686 1687 static const struct action_ops snd_pcm_action_prepare = { 1688 .pre_action = snd_pcm_pre_prepare, 1689 .do_action = snd_pcm_do_prepare, 1690 .post_action = snd_pcm_post_prepare 1691 }; 1692 1693 /** 1694 * snd_pcm_prepare - prepare the PCM substream to be triggerable 1695 * @substream: the PCM substream instance 1696 * @file: file to refer f_flags 1697 * 1698 * Return: Zero if successful, or a negative error code. 1699 */ 1700 static int snd_pcm_prepare(struct snd_pcm_substream *substream, 1701 struct file *file) 1702 { 1703 int f_flags; 1704 1705 if (file) 1706 f_flags = file->f_flags; 1707 else 1708 f_flags = substream->f_flags; 1709 1710 snd_pcm_stream_lock_irq(substream); 1711 switch (substream->runtime->status->state) { 1712 case SNDRV_PCM_STATE_PAUSED: 1713 snd_pcm_pause(substream, 0); 1714 /* fallthru */ 1715 case SNDRV_PCM_STATE_SUSPENDED: 1716 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 1717 break; 1718 } 1719 snd_pcm_stream_unlock_irq(substream); 1720 1721 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare, 1722 substream, f_flags); 1723 } 1724 1725 /* 1726 * drain ioctl 1727 */ 1728 1729 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) 1730 { 1731 struct snd_pcm_runtime *runtime = substream->runtime; 1732 switch (runtime->status->state) { 1733 case SNDRV_PCM_STATE_OPEN: 1734 case SNDRV_PCM_STATE_DISCONNECTED: 1735 case SNDRV_PCM_STATE_SUSPENDED: 1736 return -EBADFD; 1737 } 1738 runtime->trigger_master = substream; 1739 return 0; 1740 } 1741 1742 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) 1743 { 1744 struct snd_pcm_runtime *runtime = substream->runtime; 1745 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1746 switch (runtime->status->state) { 1747 case SNDRV_PCM_STATE_PREPARED: 1748 /* start playback stream if possible */ 1749 if (! snd_pcm_playback_empty(substream)) { 1750 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); 1751 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); 1752 } else { 1753 runtime->status->state = SNDRV_PCM_STATE_SETUP; 1754 } 1755 break; 1756 case SNDRV_PCM_STATE_RUNNING: 1757 runtime->status->state = SNDRV_PCM_STATE_DRAINING; 1758 break; 1759 case SNDRV_PCM_STATE_XRUN: 1760 runtime->status->state = SNDRV_PCM_STATE_SETUP; 1761 break; 1762 default: 1763 break; 1764 } 1765 } else { 1766 /* stop running stream */ 1767 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { 1768 int new_state = snd_pcm_capture_avail(runtime) > 0 ? 1769 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; 1770 snd_pcm_do_stop(substream, new_state); 1771 snd_pcm_post_stop(substream, new_state); 1772 } 1773 } 1774 1775 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING && 1776 runtime->trigger_master == substream && 1777 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER)) 1778 return substream->ops->trigger(substream, 1779 SNDRV_PCM_TRIGGER_DRAIN); 1780 1781 return 0; 1782 } 1783 1784 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state) 1785 { 1786 } 1787 1788 static const struct action_ops snd_pcm_action_drain_init = { 1789 .pre_action = snd_pcm_pre_drain_init, 1790 .do_action = snd_pcm_do_drain_init, 1791 .post_action = snd_pcm_post_drain_init 1792 }; 1793 1794 static int snd_pcm_drop(struct snd_pcm_substream *substream); 1795 1796 /* 1797 * Drain the stream(s). 1798 * When the substream is linked, sync until the draining of all playback streams 1799 * is finished. 1800 * After this call, all streams are supposed to be either SETUP or DRAINING 1801 * (capture only) state. 1802 */ 1803 static int snd_pcm_drain(struct snd_pcm_substream *substream, 1804 struct file *file) 1805 { 1806 struct snd_card *card; 1807 struct snd_pcm_runtime *runtime; 1808 struct snd_pcm_substream *s; 1809 wait_queue_entry_t wait; 1810 int result = 0; 1811 int nonblock = 0; 1812 1813 card = substream->pcm->card; 1814 runtime = substream->runtime; 1815 1816 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 1817 return -EBADFD; 1818 1819 if (file) { 1820 if (file->f_flags & O_NONBLOCK) 1821 nonblock = 1; 1822 } else if (substream->f_flags & O_NONBLOCK) 1823 nonblock = 1; 1824 1825 down_read(&snd_pcm_link_rwsem); 1826 snd_pcm_stream_lock_irq(substream); 1827 /* resume pause */ 1828 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) 1829 snd_pcm_pause(substream, 0); 1830 1831 /* pre-start/stop - all running streams are changed to DRAINING state */ 1832 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); 1833 if (result < 0) 1834 goto unlock; 1835 /* in non-blocking, we don't wait in ioctl but let caller poll */ 1836 if (nonblock) { 1837 result = -EAGAIN; 1838 goto unlock; 1839 } 1840 1841 for (;;) { 1842 long tout; 1843 struct snd_pcm_runtime *to_check; 1844 if (signal_pending(current)) { 1845 result = -ERESTARTSYS; 1846 break; 1847 } 1848 /* find a substream to drain */ 1849 to_check = NULL; 1850 snd_pcm_group_for_each_entry(s, substream) { 1851 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) 1852 continue; 1853 runtime = s->runtime; 1854 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 1855 to_check = runtime; 1856 break; 1857 } 1858 } 1859 if (!to_check) 1860 break; /* all drained */ 1861 init_waitqueue_entry(&wait, current); 1862 add_wait_queue(&to_check->sleep, &wait); 1863 snd_pcm_stream_unlock_irq(substream); 1864 up_read(&snd_pcm_link_rwsem); 1865 if (runtime->no_period_wakeup) 1866 tout = MAX_SCHEDULE_TIMEOUT; 1867 else { 1868 tout = 10; 1869 if (runtime->rate) { 1870 long t = runtime->period_size * 2 / runtime->rate; 1871 tout = max(t, tout); 1872 } 1873 tout = msecs_to_jiffies(tout * 1000); 1874 } 1875 tout = schedule_timeout_interruptible(tout); 1876 down_read(&snd_pcm_link_rwsem); 1877 snd_pcm_stream_lock_irq(substream); 1878 remove_wait_queue(&to_check->sleep, &wait); 1879 if (card->shutdown) { 1880 result = -ENODEV; 1881 break; 1882 } 1883 if (tout == 0) { 1884 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 1885 result = -ESTRPIPE; 1886 else { 1887 dev_dbg(substream->pcm->card->dev, 1888 "playback drain error (DMA or IRQ trouble?)\n"); 1889 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 1890 result = -EIO; 1891 } 1892 break; 1893 } 1894 } 1895 1896 unlock: 1897 snd_pcm_stream_unlock_irq(substream); 1898 up_read(&snd_pcm_link_rwsem); 1899 1900 return result; 1901 } 1902 1903 /* 1904 * drop ioctl 1905 * 1906 * Immediately put all linked substreams into SETUP state. 1907 */ 1908 static int snd_pcm_drop(struct snd_pcm_substream *substream) 1909 { 1910 struct snd_pcm_runtime *runtime; 1911 int result = 0; 1912 1913 if (PCM_RUNTIME_CHECK(substream)) 1914 return -ENXIO; 1915 runtime = substream->runtime; 1916 1917 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 1918 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) 1919 return -EBADFD; 1920 1921 snd_pcm_stream_lock_irq(substream); 1922 /* resume pause */ 1923 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) 1924 snd_pcm_pause(substream, 0); 1925 1926 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 1927 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ 1928 snd_pcm_stream_unlock_irq(substream); 1929 1930 return result; 1931 } 1932 1933 1934 static bool is_pcm_file(struct file *file) 1935 { 1936 struct inode *inode = file_inode(file); 1937 unsigned int minor; 1938 1939 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major) 1940 return false; 1941 minor = iminor(inode); 1942 return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) || 1943 snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE); 1944 } 1945 1946 /* 1947 * PCM link handling 1948 */ 1949 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) 1950 { 1951 int res = 0; 1952 struct snd_pcm_file *pcm_file; 1953 struct snd_pcm_substream *substream1; 1954 struct snd_pcm_group *group; 1955 struct fd f = fdget(fd); 1956 1957 if (!f.file) 1958 return -EBADFD; 1959 if (!is_pcm_file(f.file)) { 1960 res = -EBADFD; 1961 goto _badf; 1962 } 1963 pcm_file = f.file->private_data; 1964 substream1 = pcm_file->substream; 1965 group = kmalloc(sizeof(*group), GFP_KERNEL); 1966 if (!group) { 1967 res = -ENOMEM; 1968 goto _nolock; 1969 } 1970 down_write_nonblock(&snd_pcm_link_rwsem); 1971 write_lock_irq(&snd_pcm_link_rwlock); 1972 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 1973 substream->runtime->status->state != substream1->runtime->status->state || 1974 substream->pcm->nonatomic != substream1->pcm->nonatomic) { 1975 res = -EBADFD; 1976 goto _end; 1977 } 1978 if (snd_pcm_stream_linked(substream1)) { 1979 res = -EALREADY; 1980 goto _end; 1981 } 1982 if (!snd_pcm_stream_linked(substream)) { 1983 substream->group = group; 1984 group = NULL; 1985 spin_lock_init(&substream->group->lock); 1986 mutex_init(&substream->group->mutex); 1987 INIT_LIST_HEAD(&substream->group->substreams); 1988 list_add_tail(&substream->link_list, &substream->group->substreams); 1989 substream->group->count = 1; 1990 } 1991 list_add_tail(&substream1->link_list, &substream->group->substreams); 1992 substream->group->count++; 1993 substream1->group = substream->group; 1994 _end: 1995 write_unlock_irq(&snd_pcm_link_rwlock); 1996 up_write(&snd_pcm_link_rwsem); 1997 _nolock: 1998 snd_card_unref(substream1->pcm->card); 1999 kfree(group); 2000 _badf: 2001 fdput(f); 2002 return res; 2003 } 2004 2005 static void relink_to_local(struct snd_pcm_substream *substream) 2006 { 2007 substream->group = &substream->self_group; 2008 INIT_LIST_HEAD(&substream->self_group.substreams); 2009 list_add_tail(&substream->link_list, &substream->self_group.substreams); 2010 } 2011 2012 static int snd_pcm_unlink(struct snd_pcm_substream *substream) 2013 { 2014 struct snd_pcm_substream *s; 2015 int res = 0; 2016 2017 down_write_nonblock(&snd_pcm_link_rwsem); 2018 write_lock_irq(&snd_pcm_link_rwlock); 2019 if (!snd_pcm_stream_linked(substream)) { 2020 res = -EALREADY; 2021 goto _end; 2022 } 2023 list_del(&substream->link_list); 2024 substream->group->count--; 2025 if (substream->group->count == 1) { /* detach the last stream, too */ 2026 snd_pcm_group_for_each_entry(s, substream) { 2027 relink_to_local(s); 2028 break; 2029 } 2030 kfree(substream->group); 2031 } 2032 relink_to_local(substream); 2033 _end: 2034 write_unlock_irq(&snd_pcm_link_rwlock); 2035 up_write(&snd_pcm_link_rwsem); 2036 return res; 2037 } 2038 2039 /* 2040 * hw configurator 2041 */ 2042 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, 2043 struct snd_pcm_hw_rule *rule) 2044 { 2045 struct snd_interval t; 2046 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]), 2047 hw_param_interval_c(params, rule->deps[1]), &t); 2048 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2049 } 2050 2051 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params, 2052 struct snd_pcm_hw_rule *rule) 2053 { 2054 struct snd_interval t; 2055 snd_interval_div(hw_param_interval_c(params, rule->deps[0]), 2056 hw_param_interval_c(params, rule->deps[1]), &t); 2057 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2058 } 2059 2060 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params, 2061 struct snd_pcm_hw_rule *rule) 2062 { 2063 struct snd_interval t; 2064 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]), 2065 hw_param_interval_c(params, rule->deps[1]), 2066 (unsigned long) rule->private, &t); 2067 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2068 } 2069 2070 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params, 2071 struct snd_pcm_hw_rule *rule) 2072 { 2073 struct snd_interval t; 2074 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]), 2075 (unsigned long) rule->private, 2076 hw_param_interval_c(params, rule->deps[1]), &t); 2077 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2078 } 2079 2080 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, 2081 struct snd_pcm_hw_rule *rule) 2082 { 2083 unsigned int k; 2084 const struct snd_interval *i = 2085 hw_param_interval_c(params, rule->deps[0]); 2086 struct snd_mask m; 2087 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); 2088 snd_mask_any(&m); 2089 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { 2090 int bits; 2091 if (! snd_mask_test(mask, k)) 2092 continue; 2093 bits = snd_pcm_format_physical_width(k); 2094 if (bits <= 0) 2095 continue; /* ignore invalid formats */ 2096 if ((unsigned)bits < i->min || (unsigned)bits > i->max) 2097 snd_mask_reset(&m, k); 2098 } 2099 return snd_mask_refine(mask, &m); 2100 } 2101 2102 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, 2103 struct snd_pcm_hw_rule *rule) 2104 { 2105 struct snd_interval t; 2106 unsigned int k; 2107 t.min = UINT_MAX; 2108 t.max = 0; 2109 t.openmin = 0; 2110 t.openmax = 0; 2111 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { 2112 int bits; 2113 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k)) 2114 continue; 2115 bits = snd_pcm_format_physical_width(k); 2116 if (bits <= 0) 2117 continue; /* ignore invalid formats */ 2118 if (t.min > (unsigned)bits) 2119 t.min = bits; 2120 if (t.max < (unsigned)bits) 2121 t.max = bits; 2122 } 2123 t.integer = 1; 2124 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2125 } 2126 2127 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 2128 #error "Change this table" 2129 #endif 2130 2131 static const unsigned int rates[] = { 2132 5512, 8000, 11025, 16000, 22050, 32000, 44100, 2133 48000, 64000, 88200, 96000, 176400, 192000 2134 }; 2135 2136 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = { 2137 .count = ARRAY_SIZE(rates), 2138 .list = rates, 2139 }; 2140 2141 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params, 2142 struct snd_pcm_hw_rule *rule) 2143 { 2144 struct snd_pcm_hardware *hw = rule->private; 2145 return snd_interval_list(hw_param_interval(params, rule->var), 2146 snd_pcm_known_rates.count, 2147 snd_pcm_known_rates.list, hw->rates); 2148 } 2149 2150 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params, 2151 struct snd_pcm_hw_rule *rule) 2152 { 2153 struct snd_interval t; 2154 struct snd_pcm_substream *substream = rule->private; 2155 t.min = 0; 2156 t.max = substream->buffer_bytes_max; 2157 t.openmin = 0; 2158 t.openmax = 0; 2159 t.integer = 1; 2160 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2161 } 2162 2163 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) 2164 { 2165 struct snd_pcm_runtime *runtime = substream->runtime; 2166 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 2167 int k, err; 2168 2169 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { 2170 snd_mask_any(constrs_mask(constrs, k)); 2171 } 2172 2173 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { 2174 snd_interval_any(constrs_interval(constrs, k)); 2175 } 2176 2177 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS)); 2178 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)); 2179 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)); 2180 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)); 2181 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS)); 2182 2183 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, 2184 snd_pcm_hw_rule_format, NULL, 2185 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2186 if (err < 0) 2187 return err; 2188 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 2189 snd_pcm_hw_rule_sample_bits, NULL, 2190 SNDRV_PCM_HW_PARAM_FORMAT, 2191 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2192 if (err < 0) 2193 return err; 2194 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 2195 snd_pcm_hw_rule_div, NULL, 2196 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); 2197 if (err < 0) 2198 return err; 2199 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2200 snd_pcm_hw_rule_mul, NULL, 2201 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); 2202 if (err < 0) 2203 return err; 2204 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2205 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2206 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); 2207 if (err < 0) 2208 return err; 2209 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2210 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2211 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); 2212 if (err < 0) 2213 return err; 2214 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 2215 snd_pcm_hw_rule_div, NULL, 2216 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2217 if (err < 0) 2218 return err; 2219 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2220 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2221 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1); 2222 if (err < 0) 2223 return err; 2224 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2225 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2226 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1); 2227 if (err < 0) 2228 return err; 2229 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, 2230 snd_pcm_hw_rule_div, NULL, 2231 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); 2232 if (err < 0) 2233 return err; 2234 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2235 snd_pcm_hw_rule_div, NULL, 2236 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); 2237 if (err < 0) 2238 return err; 2239 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2240 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2241 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2242 if (err < 0) 2243 return err; 2244 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2245 snd_pcm_hw_rule_muldivk, (void*) 1000000, 2246 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); 2247 if (err < 0) 2248 return err; 2249 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2250 snd_pcm_hw_rule_mul, NULL, 2251 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); 2252 if (err < 0) 2253 return err; 2254 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2255 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2256 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2257 if (err < 0) 2258 return err; 2259 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2260 snd_pcm_hw_rule_muldivk, (void*) 1000000, 2261 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); 2262 if (err < 0) 2263 return err; 2264 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2265 snd_pcm_hw_rule_muldivk, (void*) 8, 2266 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2267 if (err < 0) 2268 return err; 2269 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2270 snd_pcm_hw_rule_muldivk, (void*) 8, 2271 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2272 if (err < 0) 2273 return err; 2274 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 2275 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2276 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); 2277 if (err < 0) 2278 return err; 2279 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 2280 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2281 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); 2282 if (err < 0) 2283 return err; 2284 return 0; 2285 } 2286 2287 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) 2288 { 2289 struct snd_pcm_runtime *runtime = substream->runtime; 2290 struct snd_pcm_hardware *hw = &runtime->hw; 2291 int err; 2292 unsigned int mask = 0; 2293 2294 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) 2295 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; 2296 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) 2297 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; 2298 if (hw_support_mmap(substream)) { 2299 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) 2300 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; 2301 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) 2302 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED; 2303 if (hw->info & SNDRV_PCM_INFO_COMPLEX) 2304 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX; 2305 } 2306 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask); 2307 if (err < 0) 2308 return err; 2309 2310 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats); 2311 if (err < 0) 2312 return err; 2313 2314 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD); 2315 if (err < 0) 2316 return err; 2317 2318 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2319 hw->channels_min, hw->channels_max); 2320 if (err < 0) 2321 return err; 2322 2323 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, 2324 hw->rate_min, hw->rate_max); 2325 if (err < 0) 2326 return err; 2327 2328 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2329 hw->period_bytes_min, hw->period_bytes_max); 2330 if (err < 0) 2331 return err; 2332 2333 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, 2334 hw->periods_min, hw->periods_max); 2335 if (err < 0) 2336 return err; 2337 2338 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2339 hw->period_bytes_min, hw->buffer_bytes_max); 2340 if (err < 0) 2341 return err; 2342 2343 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2344 snd_pcm_hw_rule_buffer_bytes_max, substream, 2345 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1); 2346 if (err < 0) 2347 return err; 2348 2349 /* FIXME: remove */ 2350 if (runtime->dma_bytes) { 2351 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); 2352 if (err < 0) 2353 return err; 2354 } 2355 2356 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) { 2357 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2358 snd_pcm_hw_rule_rate, hw, 2359 SNDRV_PCM_HW_PARAM_RATE, -1); 2360 if (err < 0) 2361 return err; 2362 } 2363 2364 /* FIXME: this belong to lowlevel */ 2365 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); 2366 2367 return 0; 2368 } 2369 2370 static void pcm_release_private(struct snd_pcm_substream *substream) 2371 { 2372 snd_pcm_unlink(substream); 2373 } 2374 2375 void snd_pcm_release_substream(struct snd_pcm_substream *substream) 2376 { 2377 substream->ref_count--; 2378 if (substream->ref_count > 0) 2379 return; 2380 2381 snd_pcm_drop(substream); 2382 if (substream->hw_opened) { 2383 if (substream->ops->hw_free && 2384 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 2385 substream->ops->hw_free(substream); 2386 substream->ops->close(substream); 2387 substream->hw_opened = 0; 2388 } 2389 if (pm_qos_request_active(&substream->latency_pm_qos_req)) 2390 pm_qos_remove_request(&substream->latency_pm_qos_req); 2391 if (substream->pcm_release) { 2392 substream->pcm_release(substream); 2393 substream->pcm_release = NULL; 2394 } 2395 snd_pcm_detach_substream(substream); 2396 } 2397 EXPORT_SYMBOL(snd_pcm_release_substream); 2398 2399 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, 2400 struct file *file, 2401 struct snd_pcm_substream **rsubstream) 2402 { 2403 struct snd_pcm_substream *substream; 2404 int err; 2405 2406 err = snd_pcm_attach_substream(pcm, stream, file, &substream); 2407 if (err < 0) 2408 return err; 2409 if (substream->ref_count > 1) { 2410 *rsubstream = substream; 2411 return 0; 2412 } 2413 2414 err = snd_pcm_hw_constraints_init(substream); 2415 if (err < 0) { 2416 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n"); 2417 goto error; 2418 } 2419 2420 if ((err = substream->ops->open(substream)) < 0) 2421 goto error; 2422 2423 substream->hw_opened = 1; 2424 2425 err = snd_pcm_hw_constraints_complete(substream); 2426 if (err < 0) { 2427 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n"); 2428 goto error; 2429 } 2430 2431 *rsubstream = substream; 2432 return 0; 2433 2434 error: 2435 snd_pcm_release_substream(substream); 2436 return err; 2437 } 2438 EXPORT_SYMBOL(snd_pcm_open_substream); 2439 2440 static int snd_pcm_open_file(struct file *file, 2441 struct snd_pcm *pcm, 2442 int stream) 2443 { 2444 struct snd_pcm_file *pcm_file; 2445 struct snd_pcm_substream *substream; 2446 int err; 2447 2448 err = snd_pcm_open_substream(pcm, stream, file, &substream); 2449 if (err < 0) 2450 return err; 2451 2452 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); 2453 if (pcm_file == NULL) { 2454 snd_pcm_release_substream(substream); 2455 return -ENOMEM; 2456 } 2457 pcm_file->substream = substream; 2458 if (substream->ref_count == 1) { 2459 substream->file = pcm_file; 2460 substream->pcm_release = pcm_release_private; 2461 } 2462 file->private_data = pcm_file; 2463 2464 return 0; 2465 } 2466 2467 static int snd_pcm_playback_open(struct inode *inode, struct file *file) 2468 { 2469 struct snd_pcm *pcm; 2470 int err = nonseekable_open(inode, file); 2471 if (err < 0) 2472 return err; 2473 pcm = snd_lookup_minor_data(iminor(inode), 2474 SNDRV_DEVICE_TYPE_PCM_PLAYBACK); 2475 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); 2476 if (pcm) 2477 snd_card_unref(pcm->card); 2478 return err; 2479 } 2480 2481 static int snd_pcm_capture_open(struct inode *inode, struct file *file) 2482 { 2483 struct snd_pcm *pcm; 2484 int err = nonseekable_open(inode, file); 2485 if (err < 0) 2486 return err; 2487 pcm = snd_lookup_minor_data(iminor(inode), 2488 SNDRV_DEVICE_TYPE_PCM_CAPTURE); 2489 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); 2490 if (pcm) 2491 snd_card_unref(pcm->card); 2492 return err; 2493 } 2494 2495 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) 2496 { 2497 int err; 2498 wait_queue_entry_t wait; 2499 2500 if (pcm == NULL) { 2501 err = -ENODEV; 2502 goto __error1; 2503 } 2504 err = snd_card_file_add(pcm->card, file); 2505 if (err < 0) 2506 goto __error1; 2507 if (!try_module_get(pcm->card->module)) { 2508 err = -EFAULT; 2509 goto __error2; 2510 } 2511 init_waitqueue_entry(&wait, current); 2512 add_wait_queue(&pcm->open_wait, &wait); 2513 mutex_lock(&pcm->open_mutex); 2514 while (1) { 2515 err = snd_pcm_open_file(file, pcm, stream); 2516 if (err >= 0) 2517 break; 2518 if (err == -EAGAIN) { 2519 if (file->f_flags & O_NONBLOCK) { 2520 err = -EBUSY; 2521 break; 2522 } 2523 } else 2524 break; 2525 set_current_state(TASK_INTERRUPTIBLE); 2526 mutex_unlock(&pcm->open_mutex); 2527 schedule(); 2528 mutex_lock(&pcm->open_mutex); 2529 if (pcm->card->shutdown) { 2530 err = -ENODEV; 2531 break; 2532 } 2533 if (signal_pending(current)) { 2534 err = -ERESTARTSYS; 2535 break; 2536 } 2537 } 2538 remove_wait_queue(&pcm->open_wait, &wait); 2539 mutex_unlock(&pcm->open_mutex); 2540 if (err < 0) 2541 goto __error; 2542 return err; 2543 2544 __error: 2545 module_put(pcm->card->module); 2546 __error2: 2547 snd_card_file_remove(pcm->card, file); 2548 __error1: 2549 return err; 2550 } 2551 2552 static int snd_pcm_release(struct inode *inode, struct file *file) 2553 { 2554 struct snd_pcm *pcm; 2555 struct snd_pcm_substream *substream; 2556 struct snd_pcm_file *pcm_file; 2557 2558 pcm_file = file->private_data; 2559 substream = pcm_file->substream; 2560 if (snd_BUG_ON(!substream)) 2561 return -ENXIO; 2562 pcm = substream->pcm; 2563 mutex_lock(&pcm->open_mutex); 2564 snd_pcm_release_substream(substream); 2565 kfree(pcm_file); 2566 mutex_unlock(&pcm->open_mutex); 2567 wake_up(&pcm->open_wait); 2568 module_put(pcm->card->module); 2569 snd_card_file_remove(pcm->card, file); 2570 return 0; 2571 } 2572 2573 /* check and update PCM state; return 0 or a negative error 2574 * call this inside PCM lock 2575 */ 2576 static int do_pcm_hwsync(struct snd_pcm_substream *substream) 2577 { 2578 switch (substream->runtime->status->state) { 2579 case SNDRV_PCM_STATE_DRAINING: 2580 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 2581 return -EBADFD; 2582 /* Fall through */ 2583 case SNDRV_PCM_STATE_RUNNING: 2584 return snd_pcm_update_hw_ptr(substream); 2585 case SNDRV_PCM_STATE_PREPARED: 2586 case SNDRV_PCM_STATE_PAUSED: 2587 return 0; 2588 case SNDRV_PCM_STATE_SUSPENDED: 2589 return -ESTRPIPE; 2590 case SNDRV_PCM_STATE_XRUN: 2591 return -EPIPE; 2592 default: 2593 return -EBADFD; 2594 } 2595 } 2596 2597 /* increase the appl_ptr; returns the processed frames or a negative error */ 2598 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream, 2599 snd_pcm_uframes_t frames, 2600 snd_pcm_sframes_t avail) 2601 { 2602 struct snd_pcm_runtime *runtime = substream->runtime; 2603 snd_pcm_sframes_t appl_ptr; 2604 int ret; 2605 2606 if (avail <= 0) 2607 return 0; 2608 if (frames > (snd_pcm_uframes_t)avail) 2609 frames = avail; 2610 appl_ptr = runtime->control->appl_ptr + frames; 2611 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) 2612 appl_ptr -= runtime->boundary; 2613 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2614 return ret < 0 ? ret : frames; 2615 } 2616 2617 /* decrease the appl_ptr; returns the processed frames or zero for error */ 2618 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, 2619 snd_pcm_uframes_t frames, 2620 snd_pcm_sframes_t avail) 2621 { 2622 struct snd_pcm_runtime *runtime = substream->runtime; 2623 snd_pcm_sframes_t appl_ptr; 2624 int ret; 2625 2626 if (avail <= 0) 2627 return 0; 2628 if (frames > (snd_pcm_uframes_t)avail) 2629 frames = avail; 2630 appl_ptr = runtime->control->appl_ptr - frames; 2631 if (appl_ptr < 0) 2632 appl_ptr += runtime->boundary; 2633 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2634 /* NOTE: we return zero for errors because PulseAudio gets depressed 2635 * upon receiving an error from rewind ioctl and stops processing 2636 * any longer. Returning zero means that no rewind is done, so 2637 * it's not absolutely wrong to answer like that. 2638 */ 2639 return ret < 0 ? 0 : frames; 2640 } 2641 2642 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream, 2643 snd_pcm_uframes_t frames) 2644 { 2645 snd_pcm_sframes_t ret; 2646 2647 if (frames == 0) 2648 return 0; 2649 2650 snd_pcm_stream_lock_irq(substream); 2651 ret = do_pcm_hwsync(substream); 2652 if (!ret) 2653 ret = rewind_appl_ptr(substream, frames, 2654 snd_pcm_hw_avail(substream)); 2655 snd_pcm_stream_unlock_irq(substream); 2656 return ret; 2657 } 2658 2659 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream, 2660 snd_pcm_uframes_t frames) 2661 { 2662 snd_pcm_sframes_t ret; 2663 2664 if (frames == 0) 2665 return 0; 2666 2667 snd_pcm_stream_lock_irq(substream); 2668 ret = do_pcm_hwsync(substream); 2669 if (!ret) 2670 ret = forward_appl_ptr(substream, frames, 2671 snd_pcm_avail(substream)); 2672 snd_pcm_stream_unlock_irq(substream); 2673 return ret; 2674 } 2675 2676 static int snd_pcm_hwsync(struct snd_pcm_substream *substream) 2677 { 2678 int err; 2679 2680 snd_pcm_stream_lock_irq(substream); 2681 err = do_pcm_hwsync(substream); 2682 snd_pcm_stream_unlock_irq(substream); 2683 return err; 2684 } 2685 2686 static int snd_pcm_delay(struct snd_pcm_substream *substream, 2687 snd_pcm_sframes_t *delay) 2688 { 2689 int err; 2690 snd_pcm_sframes_t n = 0; 2691 2692 snd_pcm_stream_lock_irq(substream); 2693 err = do_pcm_hwsync(substream); 2694 if (!err) 2695 n = snd_pcm_calc_delay(substream); 2696 snd_pcm_stream_unlock_irq(substream); 2697 if (!err) 2698 *delay = n; 2699 return err; 2700 } 2701 2702 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, 2703 struct snd_pcm_sync_ptr __user *_sync_ptr) 2704 { 2705 struct snd_pcm_runtime *runtime = substream->runtime; 2706 struct snd_pcm_sync_ptr sync_ptr; 2707 volatile struct snd_pcm_mmap_status *status; 2708 volatile struct snd_pcm_mmap_control *control; 2709 int err; 2710 2711 memset(&sync_ptr, 0, sizeof(sync_ptr)); 2712 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags))) 2713 return -EFAULT; 2714 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) 2715 return -EFAULT; 2716 status = runtime->status; 2717 control = runtime->control; 2718 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) { 2719 err = snd_pcm_hwsync(substream); 2720 if (err < 0) 2721 return err; 2722 } 2723 snd_pcm_stream_lock_irq(substream); 2724 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) { 2725 err = pcm_lib_apply_appl_ptr(substream, 2726 sync_ptr.c.control.appl_ptr); 2727 if (err < 0) { 2728 snd_pcm_stream_unlock_irq(substream); 2729 return err; 2730 } 2731 } else { 2732 sync_ptr.c.control.appl_ptr = control->appl_ptr; 2733 } 2734 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) 2735 control->avail_min = sync_ptr.c.control.avail_min; 2736 else 2737 sync_ptr.c.control.avail_min = control->avail_min; 2738 sync_ptr.s.status.state = status->state; 2739 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2740 sync_ptr.s.status.tstamp = status->tstamp; 2741 sync_ptr.s.status.suspended_state = status->suspended_state; 2742 sync_ptr.s.status.audio_tstamp = status->audio_tstamp; 2743 snd_pcm_stream_unlock_irq(substream); 2744 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2745 return -EFAULT; 2746 return 0; 2747 } 2748 2749 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) 2750 { 2751 struct snd_pcm_runtime *runtime = substream->runtime; 2752 int arg; 2753 2754 if (get_user(arg, _arg)) 2755 return -EFAULT; 2756 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST) 2757 return -EINVAL; 2758 runtime->tstamp_type = arg; 2759 return 0; 2760 } 2761 2762 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream, 2763 struct snd_xferi __user *_xferi) 2764 { 2765 struct snd_xferi xferi; 2766 struct snd_pcm_runtime *runtime = substream->runtime; 2767 snd_pcm_sframes_t result; 2768 2769 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2770 return -EBADFD; 2771 if (put_user(0, &_xferi->result)) 2772 return -EFAULT; 2773 if (copy_from_user(&xferi, _xferi, sizeof(xferi))) 2774 return -EFAULT; 2775 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 2776 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames); 2777 else 2778 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames); 2779 __put_user(result, &_xferi->result); 2780 return result < 0 ? result : 0; 2781 } 2782 2783 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream, 2784 struct snd_xfern __user *_xfern) 2785 { 2786 struct snd_xfern xfern; 2787 struct snd_pcm_runtime *runtime = substream->runtime; 2788 void *bufs; 2789 snd_pcm_sframes_t result; 2790 2791 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2792 return -EBADFD; 2793 if (runtime->channels > 128) 2794 return -EINVAL; 2795 if (put_user(0, &_xfern->result)) 2796 return -EFAULT; 2797 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 2798 return -EFAULT; 2799 2800 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); 2801 if (IS_ERR(bufs)) 2802 return PTR_ERR(bufs); 2803 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 2804 result = snd_pcm_lib_writev(substream, bufs, xfern.frames); 2805 else 2806 result = snd_pcm_lib_readv(substream, bufs, xfern.frames); 2807 kfree(bufs); 2808 __put_user(result, &_xfern->result); 2809 return result < 0 ? result : 0; 2810 } 2811 2812 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream, 2813 snd_pcm_uframes_t __user *_frames) 2814 { 2815 snd_pcm_uframes_t frames; 2816 snd_pcm_sframes_t result; 2817 2818 if (get_user(frames, _frames)) 2819 return -EFAULT; 2820 if (put_user(0, _frames)) 2821 return -EFAULT; 2822 result = snd_pcm_rewind(substream, frames); 2823 __put_user(result, _frames); 2824 return result < 0 ? result : 0; 2825 } 2826 2827 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream, 2828 snd_pcm_uframes_t __user *_frames) 2829 { 2830 snd_pcm_uframes_t frames; 2831 snd_pcm_sframes_t result; 2832 2833 if (get_user(frames, _frames)) 2834 return -EFAULT; 2835 if (put_user(0, _frames)) 2836 return -EFAULT; 2837 result = snd_pcm_forward(substream, frames); 2838 __put_user(result, _frames); 2839 return result < 0 ? result : 0; 2840 } 2841 2842 static int snd_pcm_common_ioctl(struct file *file, 2843 struct snd_pcm_substream *substream, 2844 unsigned int cmd, void __user *arg) 2845 { 2846 struct snd_pcm_file *pcm_file = file->private_data; 2847 int res; 2848 2849 if (PCM_RUNTIME_CHECK(substream)) 2850 return -ENXIO; 2851 2852 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0); 2853 if (res < 0) 2854 return res; 2855 2856 switch (cmd) { 2857 case SNDRV_PCM_IOCTL_PVERSION: 2858 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; 2859 case SNDRV_PCM_IOCTL_INFO: 2860 return snd_pcm_info_user(substream, arg); 2861 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ 2862 return 0; 2863 case SNDRV_PCM_IOCTL_TTSTAMP: 2864 return snd_pcm_tstamp(substream, arg); 2865 case SNDRV_PCM_IOCTL_USER_PVERSION: 2866 if (get_user(pcm_file->user_pversion, 2867 (unsigned int __user *)arg)) 2868 return -EFAULT; 2869 return 0; 2870 case SNDRV_PCM_IOCTL_HW_REFINE: 2871 return snd_pcm_hw_refine_user(substream, arg); 2872 case SNDRV_PCM_IOCTL_HW_PARAMS: 2873 return snd_pcm_hw_params_user(substream, arg); 2874 case SNDRV_PCM_IOCTL_HW_FREE: 2875 return snd_pcm_hw_free(substream); 2876 case SNDRV_PCM_IOCTL_SW_PARAMS: 2877 return snd_pcm_sw_params_user(substream, arg); 2878 case SNDRV_PCM_IOCTL_STATUS: 2879 return snd_pcm_status_user(substream, arg, false); 2880 case SNDRV_PCM_IOCTL_STATUS_EXT: 2881 return snd_pcm_status_user(substream, arg, true); 2882 case SNDRV_PCM_IOCTL_CHANNEL_INFO: 2883 return snd_pcm_channel_info_user(substream, arg); 2884 case SNDRV_PCM_IOCTL_PREPARE: 2885 return snd_pcm_prepare(substream, file); 2886 case SNDRV_PCM_IOCTL_RESET: 2887 return snd_pcm_reset(substream); 2888 case SNDRV_PCM_IOCTL_START: 2889 return snd_pcm_start_lock_irq(substream); 2890 case SNDRV_PCM_IOCTL_LINK: 2891 return snd_pcm_link(substream, (int)(unsigned long) arg); 2892 case SNDRV_PCM_IOCTL_UNLINK: 2893 return snd_pcm_unlink(substream); 2894 case SNDRV_PCM_IOCTL_RESUME: 2895 return snd_pcm_resume(substream); 2896 case SNDRV_PCM_IOCTL_XRUN: 2897 return snd_pcm_xrun(substream); 2898 case SNDRV_PCM_IOCTL_HWSYNC: 2899 return snd_pcm_hwsync(substream); 2900 case SNDRV_PCM_IOCTL_DELAY: 2901 { 2902 snd_pcm_sframes_t delay; 2903 snd_pcm_sframes_t __user *res = arg; 2904 int err; 2905 2906 err = snd_pcm_delay(substream, &delay); 2907 if (err) 2908 return err; 2909 if (put_user(delay, res)) 2910 return -EFAULT; 2911 return 0; 2912 } 2913 case SNDRV_PCM_IOCTL_SYNC_PTR: 2914 return snd_pcm_sync_ptr(substream, arg); 2915 #ifdef CONFIG_SND_SUPPORT_OLD_API 2916 case SNDRV_PCM_IOCTL_HW_REFINE_OLD: 2917 return snd_pcm_hw_refine_old_user(substream, arg); 2918 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD: 2919 return snd_pcm_hw_params_old_user(substream, arg); 2920 #endif 2921 case SNDRV_PCM_IOCTL_DRAIN: 2922 return snd_pcm_drain(substream, file); 2923 case SNDRV_PCM_IOCTL_DROP: 2924 return snd_pcm_drop(substream); 2925 case SNDRV_PCM_IOCTL_PAUSE: 2926 return snd_pcm_action_lock_irq(&snd_pcm_action_pause, 2927 substream, 2928 (int)(unsigned long)arg); 2929 case SNDRV_PCM_IOCTL_WRITEI_FRAMES: 2930 case SNDRV_PCM_IOCTL_READI_FRAMES: 2931 return snd_pcm_xferi_frames_ioctl(substream, arg); 2932 case SNDRV_PCM_IOCTL_WRITEN_FRAMES: 2933 case SNDRV_PCM_IOCTL_READN_FRAMES: 2934 return snd_pcm_xfern_frames_ioctl(substream, arg); 2935 case SNDRV_PCM_IOCTL_REWIND: 2936 return snd_pcm_rewind_ioctl(substream, arg); 2937 case SNDRV_PCM_IOCTL_FORWARD: 2938 return snd_pcm_forward_ioctl(substream, arg); 2939 } 2940 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd); 2941 return -ENOTTY; 2942 } 2943 2944 static long snd_pcm_ioctl(struct file *file, unsigned int cmd, 2945 unsigned long arg) 2946 { 2947 struct snd_pcm_file *pcm_file; 2948 2949 pcm_file = file->private_data; 2950 2951 if (((cmd >> 8) & 0xff) != 'A') 2952 return -ENOTTY; 2953 2954 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd, 2955 (void __user *)arg); 2956 } 2957 2958 /** 2959 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space 2960 * @substream: PCM substream 2961 * @cmd: IOCTL cmd 2962 * @arg: IOCTL argument 2963 * 2964 * The function is provided primarily for OSS layer and USB gadget drivers, 2965 * and it allows only the limited set of ioctls (hw_params, sw_params, 2966 * prepare, start, drain, drop, forward). 2967 */ 2968 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, 2969 unsigned int cmd, void *arg) 2970 { 2971 snd_pcm_uframes_t *frames = arg; 2972 snd_pcm_sframes_t result; 2973 2974 switch (cmd) { 2975 case SNDRV_PCM_IOCTL_FORWARD: 2976 { 2977 /* provided only for OSS; capture-only and no value returned */ 2978 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) 2979 return -EINVAL; 2980 result = snd_pcm_forward(substream, *frames); 2981 return result < 0 ? result : 0; 2982 } 2983 case SNDRV_PCM_IOCTL_HW_PARAMS: 2984 return snd_pcm_hw_params(substream, arg); 2985 case SNDRV_PCM_IOCTL_SW_PARAMS: 2986 return snd_pcm_sw_params(substream, arg); 2987 case SNDRV_PCM_IOCTL_PREPARE: 2988 return snd_pcm_prepare(substream, NULL); 2989 case SNDRV_PCM_IOCTL_START: 2990 return snd_pcm_start_lock_irq(substream); 2991 case SNDRV_PCM_IOCTL_DRAIN: 2992 return snd_pcm_drain(substream, NULL); 2993 case SNDRV_PCM_IOCTL_DROP: 2994 return snd_pcm_drop(substream); 2995 case SNDRV_PCM_IOCTL_DELAY: 2996 return snd_pcm_delay(substream, frames); 2997 default: 2998 return -EINVAL; 2999 } 3000 } 3001 EXPORT_SYMBOL(snd_pcm_kernel_ioctl); 3002 3003 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, 3004 loff_t * offset) 3005 { 3006 struct snd_pcm_file *pcm_file; 3007 struct snd_pcm_substream *substream; 3008 struct snd_pcm_runtime *runtime; 3009 snd_pcm_sframes_t result; 3010 3011 pcm_file = file->private_data; 3012 substream = pcm_file->substream; 3013 if (PCM_RUNTIME_CHECK(substream)) 3014 return -ENXIO; 3015 runtime = substream->runtime; 3016 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3017 return -EBADFD; 3018 if (!frame_aligned(runtime, count)) 3019 return -EINVAL; 3020 count = bytes_to_frames(runtime, count); 3021 result = snd_pcm_lib_read(substream, buf, count); 3022 if (result > 0) 3023 result = frames_to_bytes(runtime, result); 3024 return result; 3025 } 3026 3027 static ssize_t snd_pcm_write(struct file *file, const char __user *buf, 3028 size_t count, loff_t * offset) 3029 { 3030 struct snd_pcm_file *pcm_file; 3031 struct snd_pcm_substream *substream; 3032 struct snd_pcm_runtime *runtime; 3033 snd_pcm_sframes_t result; 3034 3035 pcm_file = file->private_data; 3036 substream = pcm_file->substream; 3037 if (PCM_RUNTIME_CHECK(substream)) 3038 return -ENXIO; 3039 runtime = substream->runtime; 3040 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3041 return -EBADFD; 3042 if (!frame_aligned(runtime, count)) 3043 return -EINVAL; 3044 count = bytes_to_frames(runtime, count); 3045 result = snd_pcm_lib_write(substream, buf, count); 3046 if (result > 0) 3047 result = frames_to_bytes(runtime, result); 3048 return result; 3049 } 3050 3051 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to) 3052 { 3053 struct snd_pcm_file *pcm_file; 3054 struct snd_pcm_substream *substream; 3055 struct snd_pcm_runtime *runtime; 3056 snd_pcm_sframes_t result; 3057 unsigned long i; 3058 void __user **bufs; 3059 snd_pcm_uframes_t frames; 3060 3061 pcm_file = iocb->ki_filp->private_data; 3062 substream = pcm_file->substream; 3063 if (PCM_RUNTIME_CHECK(substream)) 3064 return -ENXIO; 3065 runtime = substream->runtime; 3066 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3067 return -EBADFD; 3068 if (!iter_is_iovec(to)) 3069 return -EINVAL; 3070 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels) 3071 return -EINVAL; 3072 if (!frame_aligned(runtime, to->iov->iov_len)) 3073 return -EINVAL; 3074 frames = bytes_to_samples(runtime, to->iov->iov_len); 3075 bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL); 3076 if (bufs == NULL) 3077 return -ENOMEM; 3078 for (i = 0; i < to->nr_segs; ++i) 3079 bufs[i] = to->iov[i].iov_base; 3080 result = snd_pcm_lib_readv(substream, bufs, frames); 3081 if (result > 0) 3082 result = frames_to_bytes(runtime, result); 3083 kfree(bufs); 3084 return result; 3085 } 3086 3087 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from) 3088 { 3089 struct snd_pcm_file *pcm_file; 3090 struct snd_pcm_substream *substream; 3091 struct snd_pcm_runtime *runtime; 3092 snd_pcm_sframes_t result; 3093 unsigned long i; 3094 void __user **bufs; 3095 snd_pcm_uframes_t frames; 3096 3097 pcm_file = iocb->ki_filp->private_data; 3098 substream = pcm_file->substream; 3099 if (PCM_RUNTIME_CHECK(substream)) 3100 return -ENXIO; 3101 runtime = substream->runtime; 3102 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3103 return -EBADFD; 3104 if (!iter_is_iovec(from)) 3105 return -EINVAL; 3106 if (from->nr_segs > 128 || from->nr_segs != runtime->channels || 3107 !frame_aligned(runtime, from->iov->iov_len)) 3108 return -EINVAL; 3109 frames = bytes_to_samples(runtime, from->iov->iov_len); 3110 bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL); 3111 if (bufs == NULL) 3112 return -ENOMEM; 3113 for (i = 0; i < from->nr_segs; ++i) 3114 bufs[i] = from->iov[i].iov_base; 3115 result = snd_pcm_lib_writev(substream, bufs, frames); 3116 if (result > 0) 3117 result = frames_to_bytes(runtime, result); 3118 kfree(bufs); 3119 return result; 3120 } 3121 3122 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait) 3123 { 3124 struct snd_pcm_file *pcm_file; 3125 struct snd_pcm_substream *substream; 3126 struct snd_pcm_runtime *runtime; 3127 __poll_t mask, ok; 3128 snd_pcm_uframes_t avail; 3129 3130 pcm_file = file->private_data; 3131 3132 substream = pcm_file->substream; 3133 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 3134 ok = EPOLLOUT | EPOLLWRNORM; 3135 else 3136 ok = EPOLLIN | EPOLLRDNORM; 3137 if (PCM_RUNTIME_CHECK(substream)) 3138 return ok | EPOLLERR; 3139 3140 runtime = substream->runtime; 3141 poll_wait(file, &runtime->sleep, wait); 3142 3143 mask = 0; 3144 snd_pcm_stream_lock_irq(substream); 3145 avail = snd_pcm_avail(substream); 3146 switch (runtime->status->state) { 3147 case SNDRV_PCM_STATE_RUNNING: 3148 case SNDRV_PCM_STATE_PREPARED: 3149 case SNDRV_PCM_STATE_PAUSED: 3150 if (avail >= runtime->control->avail_min) 3151 mask = ok; 3152 break; 3153 case SNDRV_PCM_STATE_DRAINING: 3154 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 3155 mask = ok; 3156 if (!avail) 3157 mask |= EPOLLERR; 3158 } 3159 break; 3160 default: 3161 mask = ok | EPOLLERR; 3162 break; 3163 } 3164 snd_pcm_stream_unlock_irq(substream); 3165 return mask; 3166 } 3167 3168 /* 3169 * mmap support 3170 */ 3171 3172 /* 3173 * Only on coherent architectures, we can mmap the status and the control records 3174 * for effcient data transfer. On others, we have to use HWSYNC ioctl... 3175 */ 3176 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) 3177 /* 3178 * mmap status record 3179 */ 3180 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3181 { 3182 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3183 struct snd_pcm_runtime *runtime; 3184 3185 if (substream == NULL) 3186 return VM_FAULT_SIGBUS; 3187 runtime = substream->runtime; 3188 vmf->page = virt_to_page(runtime->status); 3189 get_page(vmf->page); 3190 return 0; 3191 } 3192 3193 static const struct vm_operations_struct snd_pcm_vm_ops_status = 3194 { 3195 .fault = snd_pcm_mmap_status_fault, 3196 }; 3197 3198 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3199 struct vm_area_struct *area) 3200 { 3201 long size; 3202 if (!(area->vm_flags & VM_READ)) 3203 return -EINVAL; 3204 size = area->vm_end - area->vm_start; 3205 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) 3206 return -EINVAL; 3207 area->vm_ops = &snd_pcm_vm_ops_status; 3208 area->vm_private_data = substream; 3209 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3210 return 0; 3211 } 3212 3213 /* 3214 * mmap control record 3215 */ 3216 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3217 { 3218 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3219 struct snd_pcm_runtime *runtime; 3220 3221 if (substream == NULL) 3222 return VM_FAULT_SIGBUS; 3223 runtime = substream->runtime; 3224 vmf->page = virt_to_page(runtime->control); 3225 get_page(vmf->page); 3226 return 0; 3227 } 3228 3229 static const struct vm_operations_struct snd_pcm_vm_ops_control = 3230 { 3231 .fault = snd_pcm_mmap_control_fault, 3232 }; 3233 3234 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3235 struct vm_area_struct *area) 3236 { 3237 long size; 3238 if (!(area->vm_flags & VM_READ)) 3239 return -EINVAL; 3240 size = area->vm_end - area->vm_start; 3241 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) 3242 return -EINVAL; 3243 area->vm_ops = &snd_pcm_vm_ops_control; 3244 area->vm_private_data = substream; 3245 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3246 return 0; 3247 } 3248 3249 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file) 3250 { 3251 if (pcm_file->no_compat_mmap) 3252 return false; 3253 /* See pcm_control_mmap_allowed() below. 3254 * Since older alsa-lib requires both status and control mmaps to be 3255 * coupled, we have to disable the status mmap for old alsa-lib, too. 3256 */ 3257 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) && 3258 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)) 3259 return false; 3260 return true; 3261 } 3262 3263 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file) 3264 { 3265 if (pcm_file->no_compat_mmap) 3266 return false; 3267 /* Disallow the control mmap when SYNC_APPLPTR flag is set; 3268 * it enforces the user-space to fall back to snd_pcm_sync_ptr(), 3269 * thus it effectively assures the manual update of appl_ptr. 3270 */ 3271 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR) 3272 return false; 3273 return true; 3274 } 3275 3276 #else /* ! coherent mmap */ 3277 /* 3278 * don't support mmap for status and control records. 3279 */ 3280 #define pcm_status_mmap_allowed(pcm_file) false 3281 #define pcm_control_mmap_allowed(pcm_file) false 3282 3283 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3284 struct vm_area_struct *area) 3285 { 3286 return -ENXIO; 3287 } 3288 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3289 struct vm_area_struct *area) 3290 { 3291 return -ENXIO; 3292 } 3293 #endif /* coherent mmap */ 3294 3295 static inline struct page * 3296 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) 3297 { 3298 void *vaddr = substream->runtime->dma_area + ofs; 3299 return virt_to_page(vaddr); 3300 } 3301 3302 /* 3303 * fault callback for mmapping a RAM page 3304 */ 3305 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3306 { 3307 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3308 struct snd_pcm_runtime *runtime; 3309 unsigned long offset; 3310 struct page * page; 3311 size_t dma_bytes; 3312 3313 if (substream == NULL) 3314 return VM_FAULT_SIGBUS; 3315 runtime = substream->runtime; 3316 offset = vmf->pgoff << PAGE_SHIFT; 3317 dma_bytes = PAGE_ALIGN(runtime->dma_bytes); 3318 if (offset > dma_bytes - PAGE_SIZE) 3319 return VM_FAULT_SIGBUS; 3320 if (substream->ops->page) 3321 page = substream->ops->page(substream, offset); 3322 else 3323 page = snd_pcm_default_page_ops(substream, offset); 3324 if (!page) 3325 return VM_FAULT_SIGBUS; 3326 get_page(page); 3327 vmf->page = page; 3328 return 0; 3329 } 3330 3331 static const struct vm_operations_struct snd_pcm_vm_ops_data = { 3332 .open = snd_pcm_mmap_data_open, 3333 .close = snd_pcm_mmap_data_close, 3334 }; 3335 3336 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { 3337 .open = snd_pcm_mmap_data_open, 3338 .close = snd_pcm_mmap_data_close, 3339 .fault = snd_pcm_mmap_data_fault, 3340 }; 3341 3342 /* 3343 * mmap the DMA buffer on RAM 3344 */ 3345 3346 /** 3347 * snd_pcm_lib_default_mmap - Default PCM data mmap function 3348 * @substream: PCM substream 3349 * @area: VMA 3350 * 3351 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL, 3352 * this function is invoked implicitly. 3353 */ 3354 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, 3355 struct vm_area_struct *area) 3356 { 3357 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3358 #ifdef CONFIG_GENERIC_ALLOCATOR 3359 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) { 3360 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 3361 return remap_pfn_range(area, area->vm_start, 3362 substream->dma_buffer.addr >> PAGE_SHIFT, 3363 area->vm_end - area->vm_start, area->vm_page_prot); 3364 } 3365 #endif /* CONFIG_GENERIC_ALLOCATOR */ 3366 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */ 3367 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page && 3368 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) 3369 return dma_mmap_coherent(substream->dma_buffer.dev.dev, 3370 area, 3371 substream->runtime->dma_area, 3372 substream->runtime->dma_addr, 3373 substream->runtime->dma_bytes); 3374 #endif /* CONFIG_X86 */ 3375 /* mmap with fault handler */ 3376 area->vm_ops = &snd_pcm_vm_ops_data_fault; 3377 return 0; 3378 } 3379 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); 3380 3381 /* 3382 * mmap the DMA buffer on I/O memory area 3383 */ 3384 #if SNDRV_PCM_INFO_MMAP_IOMEM 3385 /** 3386 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem 3387 * @substream: PCM substream 3388 * @area: VMA 3389 * 3390 * When your hardware uses the iomapped pages as the hardware buffer and 3391 * wants to mmap it, pass this function as mmap pcm_ops. Note that this 3392 * is supposed to work only on limited architectures. 3393 */ 3394 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, 3395 struct vm_area_struct *area) 3396 { 3397 struct snd_pcm_runtime *runtime = substream->runtime; 3398 3399 area->vm_page_prot = pgprot_noncached(area->vm_page_prot); 3400 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); 3401 } 3402 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); 3403 #endif /* SNDRV_PCM_INFO_MMAP */ 3404 3405 /* 3406 * mmap DMA buffer 3407 */ 3408 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, 3409 struct vm_area_struct *area) 3410 { 3411 struct snd_pcm_runtime *runtime; 3412 long size; 3413 unsigned long offset; 3414 size_t dma_bytes; 3415 int err; 3416 3417 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 3418 if (!(area->vm_flags & (VM_WRITE|VM_READ))) 3419 return -EINVAL; 3420 } else { 3421 if (!(area->vm_flags & VM_READ)) 3422 return -EINVAL; 3423 } 3424 runtime = substream->runtime; 3425 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3426 return -EBADFD; 3427 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) 3428 return -ENXIO; 3429 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 3430 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 3431 return -EINVAL; 3432 size = area->vm_end - area->vm_start; 3433 offset = area->vm_pgoff << PAGE_SHIFT; 3434 dma_bytes = PAGE_ALIGN(runtime->dma_bytes); 3435 if ((size_t)size > dma_bytes) 3436 return -EINVAL; 3437 if (offset > dma_bytes - size) 3438 return -EINVAL; 3439 3440 area->vm_ops = &snd_pcm_vm_ops_data; 3441 area->vm_private_data = substream; 3442 if (substream->ops->mmap) 3443 err = substream->ops->mmap(substream, area); 3444 else 3445 err = snd_pcm_lib_default_mmap(substream, area); 3446 if (!err) 3447 atomic_inc(&substream->mmap_count); 3448 return err; 3449 } 3450 EXPORT_SYMBOL(snd_pcm_mmap_data); 3451 3452 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) 3453 { 3454 struct snd_pcm_file * pcm_file; 3455 struct snd_pcm_substream *substream; 3456 unsigned long offset; 3457 3458 pcm_file = file->private_data; 3459 substream = pcm_file->substream; 3460 if (PCM_RUNTIME_CHECK(substream)) 3461 return -ENXIO; 3462 3463 offset = area->vm_pgoff << PAGE_SHIFT; 3464 switch (offset) { 3465 case SNDRV_PCM_MMAP_OFFSET_STATUS: 3466 if (!pcm_status_mmap_allowed(pcm_file)) 3467 return -ENXIO; 3468 return snd_pcm_mmap_status(substream, file, area); 3469 case SNDRV_PCM_MMAP_OFFSET_CONTROL: 3470 if (!pcm_control_mmap_allowed(pcm_file)) 3471 return -ENXIO; 3472 return snd_pcm_mmap_control(substream, file, area); 3473 default: 3474 return snd_pcm_mmap_data(substream, file, area); 3475 } 3476 return 0; 3477 } 3478 3479 static int snd_pcm_fasync(int fd, struct file * file, int on) 3480 { 3481 struct snd_pcm_file * pcm_file; 3482 struct snd_pcm_substream *substream; 3483 struct snd_pcm_runtime *runtime; 3484 3485 pcm_file = file->private_data; 3486 substream = pcm_file->substream; 3487 if (PCM_RUNTIME_CHECK(substream)) 3488 return -ENXIO; 3489 runtime = substream->runtime; 3490 return fasync_helper(fd, file, on, &runtime->fasync); 3491 } 3492 3493 /* 3494 * ioctl32 compat 3495 */ 3496 #ifdef CONFIG_COMPAT 3497 #include "pcm_compat.c" 3498 #else 3499 #define snd_pcm_ioctl_compat NULL 3500 #endif 3501 3502 /* 3503 * To be removed helpers to keep binary compatibility 3504 */ 3505 3506 #ifdef CONFIG_SND_SUPPORT_OLD_API 3507 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) 3508 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5)) 3509 3510 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params, 3511 struct snd_pcm_hw_params_old *oparams) 3512 { 3513 unsigned int i; 3514 3515 memset(params, 0, sizeof(*params)); 3516 params->flags = oparams->flags; 3517 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) 3518 params->masks[i].bits[0] = oparams->masks[i]; 3519 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals)); 3520 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask); 3521 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask); 3522 params->info = oparams->info; 3523 params->msbits = oparams->msbits; 3524 params->rate_num = oparams->rate_num; 3525 params->rate_den = oparams->rate_den; 3526 params->fifo_size = oparams->fifo_size; 3527 } 3528 3529 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams, 3530 struct snd_pcm_hw_params *params) 3531 { 3532 unsigned int i; 3533 3534 memset(oparams, 0, sizeof(*oparams)); 3535 oparams->flags = params->flags; 3536 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) 3537 oparams->masks[i] = params->masks[i].bits[0]; 3538 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals)); 3539 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask); 3540 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask); 3541 oparams->info = params->info; 3542 oparams->msbits = params->msbits; 3543 oparams->rate_num = params->rate_num; 3544 oparams->rate_den = params->rate_den; 3545 oparams->fifo_size = params->fifo_size; 3546 } 3547 3548 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, 3549 struct snd_pcm_hw_params_old __user * _oparams) 3550 { 3551 struct snd_pcm_hw_params *params; 3552 struct snd_pcm_hw_params_old *oparams = NULL; 3553 int err; 3554 3555 params = kmalloc(sizeof(*params), GFP_KERNEL); 3556 if (!params) 3557 return -ENOMEM; 3558 3559 oparams = memdup_user(_oparams, sizeof(*oparams)); 3560 if (IS_ERR(oparams)) { 3561 err = PTR_ERR(oparams); 3562 goto out; 3563 } 3564 snd_pcm_hw_convert_from_old_params(params, oparams); 3565 err = snd_pcm_hw_refine(substream, params); 3566 if (err < 0) 3567 goto out_old; 3568 3569 err = fixup_unreferenced_params(substream, params); 3570 if (err < 0) 3571 goto out_old; 3572 3573 snd_pcm_hw_convert_to_old_params(oparams, params); 3574 if (copy_to_user(_oparams, oparams, sizeof(*oparams))) 3575 err = -EFAULT; 3576 out_old: 3577 kfree(oparams); 3578 out: 3579 kfree(params); 3580 return err; 3581 } 3582 3583 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, 3584 struct snd_pcm_hw_params_old __user * _oparams) 3585 { 3586 struct snd_pcm_hw_params *params; 3587 struct snd_pcm_hw_params_old *oparams = NULL; 3588 int err; 3589 3590 params = kmalloc(sizeof(*params), GFP_KERNEL); 3591 if (!params) 3592 return -ENOMEM; 3593 3594 oparams = memdup_user(_oparams, sizeof(*oparams)); 3595 if (IS_ERR(oparams)) { 3596 err = PTR_ERR(oparams); 3597 goto out; 3598 } 3599 3600 snd_pcm_hw_convert_from_old_params(params, oparams); 3601 err = snd_pcm_hw_params(substream, params); 3602 if (err < 0) 3603 goto out_old; 3604 3605 snd_pcm_hw_convert_to_old_params(oparams, params); 3606 if (copy_to_user(_oparams, oparams, sizeof(*oparams))) 3607 err = -EFAULT; 3608 out_old: 3609 kfree(oparams); 3610 out: 3611 kfree(params); 3612 return err; 3613 } 3614 #endif /* CONFIG_SND_SUPPORT_OLD_API */ 3615 3616 #ifndef CONFIG_MMU 3617 static unsigned long snd_pcm_get_unmapped_area(struct file *file, 3618 unsigned long addr, 3619 unsigned long len, 3620 unsigned long pgoff, 3621 unsigned long flags) 3622 { 3623 struct snd_pcm_file *pcm_file = file->private_data; 3624 struct snd_pcm_substream *substream = pcm_file->substream; 3625 struct snd_pcm_runtime *runtime = substream->runtime; 3626 unsigned long offset = pgoff << PAGE_SHIFT; 3627 3628 switch (offset) { 3629 case SNDRV_PCM_MMAP_OFFSET_STATUS: 3630 return (unsigned long)runtime->status; 3631 case SNDRV_PCM_MMAP_OFFSET_CONTROL: 3632 return (unsigned long)runtime->control; 3633 default: 3634 return (unsigned long)runtime->dma_area + offset; 3635 } 3636 } 3637 #else 3638 # define snd_pcm_get_unmapped_area NULL 3639 #endif 3640 3641 /* 3642 * Register section 3643 */ 3644 3645 const struct file_operations snd_pcm_f_ops[2] = { 3646 { 3647 .owner = THIS_MODULE, 3648 .write = snd_pcm_write, 3649 .write_iter = snd_pcm_writev, 3650 .open = snd_pcm_playback_open, 3651 .release = snd_pcm_release, 3652 .llseek = no_llseek, 3653 .poll = snd_pcm_poll, 3654 .unlocked_ioctl = snd_pcm_ioctl, 3655 .compat_ioctl = snd_pcm_ioctl_compat, 3656 .mmap = snd_pcm_mmap, 3657 .fasync = snd_pcm_fasync, 3658 .get_unmapped_area = snd_pcm_get_unmapped_area, 3659 }, 3660 { 3661 .owner = THIS_MODULE, 3662 .read = snd_pcm_read, 3663 .read_iter = snd_pcm_readv, 3664 .open = snd_pcm_capture_open, 3665 .release = snd_pcm_release, 3666 .llseek = no_llseek, 3667 .poll = snd_pcm_poll, 3668 .unlocked_ioctl = snd_pcm_ioctl, 3669 .compat_ioctl = snd_pcm_ioctl_compat, 3670 .mmap = snd_pcm_mmap, 3671 .fasync = snd_pcm_fasync, 3672 .get_unmapped_area = snd_pcm_get_unmapped_area, 3673 } 3674 }; 3675