1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Digital Audio (PCM) abstract layer 4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 5 * Abramo Bagnara <abramo@alsa-project.org> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/sched/signal.h> 10 #include <linux/time.h> 11 #include <linux/math64.h> 12 #include <linux/export.h> 13 #include <sound/core.h> 14 #include <sound/control.h> 15 #include <sound/tlv.h> 16 #include <sound/info.h> 17 #include <sound/pcm.h> 18 #include <sound/pcm_params.h> 19 #include <sound/timer.h> 20 21 #include "pcm_local.h" 22 23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 24 #define CREATE_TRACE_POINTS 25 #include "pcm_trace.h" 26 #else 27 #define trace_hwptr(substream, pos, in_interrupt) 28 #define trace_xrun(substream) 29 #define trace_hw_ptr_error(substream, reason) 30 #define trace_applptr(substream, prev, curr) 31 #endif 32 33 static int fill_silence_frames(struct snd_pcm_substream *substream, 34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames); 35 36 /* 37 * fill ring buffer with silence 38 * runtime->silence_start: starting pointer to silence area 39 * runtime->silence_filled: size filled with silence 40 * runtime->silence_threshold: threshold from application 41 * runtime->silence_size: maximal size from application 42 * 43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately 44 */ 45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) 46 { 47 struct snd_pcm_runtime *runtime = substream->runtime; 48 snd_pcm_uframes_t frames, ofs, transfer; 49 int err; 50 51 if (runtime->silence_size < runtime->boundary) { 52 snd_pcm_sframes_t noise_dist, n; 53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); 54 if (runtime->silence_start != appl_ptr) { 55 n = appl_ptr - runtime->silence_start; 56 if (n < 0) 57 n += runtime->boundary; 58 if ((snd_pcm_uframes_t)n < runtime->silence_filled) 59 runtime->silence_filled -= n; 60 else 61 runtime->silence_filled = 0; 62 runtime->silence_start = appl_ptr; 63 } 64 if (runtime->silence_filled >= runtime->buffer_size) 65 return; 66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; 67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) 68 return; 69 frames = runtime->silence_threshold - noise_dist; 70 if (frames > runtime->silence_size) 71 frames = runtime->silence_size; 72 } else { 73 if (new_hw_ptr == ULONG_MAX) { /* initialization */ 74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); 75 if (avail > runtime->buffer_size) 76 avail = runtime->buffer_size; 77 runtime->silence_filled = avail > 0 ? avail : 0; 78 runtime->silence_start = (runtime->status->hw_ptr + 79 runtime->silence_filled) % 80 runtime->boundary; 81 } else { 82 ofs = runtime->status->hw_ptr; 83 frames = new_hw_ptr - ofs; 84 if ((snd_pcm_sframes_t)frames < 0) 85 frames += runtime->boundary; 86 runtime->silence_filled -= frames; 87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { 88 runtime->silence_filled = 0; 89 runtime->silence_start = new_hw_ptr; 90 } else { 91 runtime->silence_start = ofs; 92 } 93 } 94 frames = runtime->buffer_size - runtime->silence_filled; 95 } 96 if (snd_BUG_ON(frames > runtime->buffer_size)) 97 return; 98 if (frames == 0) 99 return; 100 ofs = runtime->silence_start % runtime->buffer_size; 101 while (frames > 0) { 102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; 103 err = fill_silence_frames(substream, ofs, transfer); 104 snd_BUG_ON(err < 0); 105 runtime->silence_filled += transfer; 106 frames -= transfer; 107 ofs = 0; 108 } 109 } 110 111 #ifdef CONFIG_SND_DEBUG 112 void snd_pcm_debug_name(struct snd_pcm_substream *substream, 113 char *name, size_t len) 114 { 115 snprintf(name, len, "pcmC%dD%d%c:%d", 116 substream->pcm->card->number, 117 substream->pcm->device, 118 substream->stream ? 'c' : 'p', 119 substream->number); 120 } 121 EXPORT_SYMBOL(snd_pcm_debug_name); 122 #endif 123 124 #define XRUN_DEBUG_BASIC (1<<0) 125 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 126 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ 127 128 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 129 130 #define xrun_debug(substream, mask) \ 131 ((substream)->pstr->xrun_debug & (mask)) 132 #else 133 #define xrun_debug(substream, mask) 0 134 #endif 135 136 #define dump_stack_on_xrun(substream) do { \ 137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ 138 dump_stack(); \ 139 } while (0) 140 141 /* call with stream lock held */ 142 void __snd_pcm_xrun(struct snd_pcm_substream *substream) 143 { 144 struct snd_pcm_runtime *runtime = substream->runtime; 145 146 trace_xrun(substream); 147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) 148 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); 149 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 150 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 151 char name[16]; 152 snd_pcm_debug_name(substream, name, sizeof(name)); 153 pcm_warn(substream->pcm, "XRUN: %s\n", name); 154 dump_stack_on_xrun(substream); 155 } 156 } 157 158 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 159 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ 160 do { \ 161 trace_hw_ptr_error(substream, reason); \ 162 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ 163 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ 164 (in_interrupt) ? 'Q' : 'P', ##args); \ 165 dump_stack_on_xrun(substream); \ 166 } \ 167 } while (0) 168 169 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ 170 171 #define hw_ptr_error(substream, fmt, args...) do { } while (0) 172 173 #endif 174 175 int snd_pcm_update_state(struct snd_pcm_substream *substream, 176 struct snd_pcm_runtime *runtime) 177 { 178 snd_pcm_uframes_t avail; 179 180 avail = snd_pcm_avail(substream); 181 if (avail > runtime->avail_max) 182 runtime->avail_max = avail; 183 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 184 if (avail >= runtime->buffer_size) { 185 snd_pcm_drain_done(substream); 186 return -EPIPE; 187 } 188 } else { 189 if (avail >= runtime->stop_threshold) { 190 __snd_pcm_xrun(substream); 191 return -EPIPE; 192 } 193 } 194 if (runtime->twake) { 195 if (avail >= runtime->twake) 196 wake_up(&runtime->tsleep); 197 } else if (avail >= runtime->control->avail_min) 198 wake_up(&runtime->sleep); 199 return 0; 200 } 201 202 static void update_audio_tstamp(struct snd_pcm_substream *substream, 203 struct timespec *curr_tstamp, 204 struct timespec *audio_tstamp) 205 { 206 struct snd_pcm_runtime *runtime = substream->runtime; 207 u64 audio_frames, audio_nsecs; 208 struct timespec driver_tstamp; 209 210 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) 211 return; 212 213 if (!(substream->ops->get_time_info) || 214 (runtime->audio_tstamp_report.actual_type == 215 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 216 217 /* 218 * provide audio timestamp derived from pointer position 219 * add delay only if requested 220 */ 221 222 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; 223 224 if (runtime->audio_tstamp_config.report_delay) { 225 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 226 audio_frames -= runtime->delay; 227 else 228 audio_frames += runtime->delay; 229 } 230 audio_nsecs = div_u64(audio_frames * 1000000000LL, 231 runtime->rate); 232 *audio_tstamp = ns_to_timespec(audio_nsecs); 233 } 234 if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { 235 runtime->status->audio_tstamp = *audio_tstamp; 236 runtime->status->tstamp = *curr_tstamp; 237 } 238 239 /* 240 * re-take a driver timestamp to let apps detect if the reference tstamp 241 * read by low-level hardware was provided with a delay 242 */ 243 snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp); 244 runtime->driver_tstamp = driver_tstamp; 245 } 246 247 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, 248 unsigned int in_interrupt) 249 { 250 struct snd_pcm_runtime *runtime = substream->runtime; 251 snd_pcm_uframes_t pos; 252 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 253 snd_pcm_sframes_t hdelta, delta; 254 unsigned long jdelta; 255 unsigned long curr_jiffies; 256 struct timespec curr_tstamp; 257 struct timespec audio_tstamp; 258 int crossed_boundary = 0; 259 260 old_hw_ptr = runtime->status->hw_ptr; 261 262 /* 263 * group pointer, time and jiffies reads to allow for more 264 * accurate correlations/corrections. 265 * The values are stored at the end of this routine after 266 * corrections for hw_ptr position 267 */ 268 pos = substream->ops->pointer(substream); 269 curr_jiffies = jiffies; 270 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 271 if ((substream->ops->get_time_info) && 272 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 273 substream->ops->get_time_info(substream, &curr_tstamp, 274 &audio_tstamp, 275 &runtime->audio_tstamp_config, 276 &runtime->audio_tstamp_report); 277 278 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ 279 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) 280 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 281 } else 282 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 283 } 284 285 if (pos == SNDRV_PCM_POS_XRUN) { 286 __snd_pcm_xrun(substream); 287 return -EPIPE; 288 } 289 if (pos >= runtime->buffer_size) { 290 if (printk_ratelimit()) { 291 char name[16]; 292 snd_pcm_debug_name(substream, name, sizeof(name)); 293 pcm_err(substream->pcm, 294 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", 295 name, pos, runtime->buffer_size, 296 runtime->period_size); 297 } 298 pos = 0; 299 } 300 pos -= pos % runtime->min_align; 301 trace_hwptr(substream, pos, in_interrupt); 302 hw_base = runtime->hw_ptr_base; 303 new_hw_ptr = hw_base + pos; 304 if (in_interrupt) { 305 /* we know that one period was processed */ 306 /* delta = "expected next hw_ptr" for in_interrupt != 0 */ 307 delta = runtime->hw_ptr_interrupt + runtime->period_size; 308 if (delta > new_hw_ptr) { 309 /* check for double acknowledged interrupts */ 310 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 311 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 312 hw_base += runtime->buffer_size; 313 if (hw_base >= runtime->boundary) { 314 hw_base = 0; 315 crossed_boundary++; 316 } 317 new_hw_ptr = hw_base + pos; 318 goto __delta; 319 } 320 } 321 } 322 /* new_hw_ptr might be lower than old_hw_ptr in case when */ 323 /* pointer crosses the end of the ring buffer */ 324 if (new_hw_ptr < old_hw_ptr) { 325 hw_base += runtime->buffer_size; 326 if (hw_base >= runtime->boundary) { 327 hw_base = 0; 328 crossed_boundary++; 329 } 330 new_hw_ptr = hw_base + pos; 331 } 332 __delta: 333 delta = new_hw_ptr - old_hw_ptr; 334 if (delta < 0) 335 delta += runtime->boundary; 336 337 if (runtime->no_period_wakeup) { 338 snd_pcm_sframes_t xrun_threshold; 339 /* 340 * Without regular period interrupts, we have to check 341 * the elapsed time to detect xruns. 342 */ 343 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 344 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 345 goto no_delta_check; 346 hdelta = jdelta - delta * HZ / runtime->rate; 347 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; 348 while (hdelta > xrun_threshold) { 349 delta += runtime->buffer_size; 350 hw_base += runtime->buffer_size; 351 if (hw_base >= runtime->boundary) { 352 hw_base = 0; 353 crossed_boundary++; 354 } 355 new_hw_ptr = hw_base + pos; 356 hdelta -= runtime->hw_ptr_buffer_jiffies; 357 } 358 goto no_delta_check; 359 } 360 361 /* something must be really wrong */ 362 if (delta >= runtime->buffer_size + runtime->period_size) { 363 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", 364 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 365 substream->stream, (long)pos, 366 (long)new_hw_ptr, (long)old_hw_ptr); 367 return 0; 368 } 369 370 /* Do jiffies check only in xrun_debug mode */ 371 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) 372 goto no_jiffies_check; 373 374 /* Skip the jiffies check for hardwares with BATCH flag. 375 * Such hardware usually just increases the position at each IRQ, 376 * thus it can't give any strange position. 377 */ 378 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 379 goto no_jiffies_check; 380 hdelta = delta; 381 if (hdelta < runtime->delay) 382 goto no_jiffies_check; 383 hdelta -= runtime->delay; 384 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 385 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 386 delta = jdelta / 387 (((runtime->period_size * HZ) / runtime->rate) 388 + HZ/100); 389 /* move new_hw_ptr according jiffies not pos variable */ 390 new_hw_ptr = old_hw_ptr; 391 hw_base = delta; 392 /* use loop to avoid checks for delta overflows */ 393 /* the delta value is small or zero in most cases */ 394 while (delta > 0) { 395 new_hw_ptr += runtime->period_size; 396 if (new_hw_ptr >= runtime->boundary) { 397 new_hw_ptr -= runtime->boundary; 398 crossed_boundary--; 399 } 400 delta--; 401 } 402 /* align hw_base to buffer_size */ 403 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", 404 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", 405 (long)pos, (long)hdelta, 406 (long)runtime->period_size, jdelta, 407 ((hdelta * HZ) / runtime->rate), hw_base, 408 (unsigned long)old_hw_ptr, 409 (unsigned long)new_hw_ptr); 410 /* reset values to proper state */ 411 delta = 0; 412 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); 413 } 414 no_jiffies_check: 415 if (delta > runtime->period_size + runtime->period_size / 2) { 416 hw_ptr_error(substream, in_interrupt, 417 "Lost interrupts?", 418 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 419 substream->stream, (long)delta, 420 (long)new_hw_ptr, 421 (long)old_hw_ptr); 422 } 423 424 no_delta_check: 425 if (runtime->status->hw_ptr == new_hw_ptr) { 426 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 427 return 0; 428 } 429 430 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 431 runtime->silence_size > 0) 432 snd_pcm_playback_silence(substream, new_hw_ptr); 433 434 if (in_interrupt) { 435 delta = new_hw_ptr - runtime->hw_ptr_interrupt; 436 if (delta < 0) 437 delta += runtime->boundary; 438 delta -= (snd_pcm_uframes_t)delta % runtime->period_size; 439 runtime->hw_ptr_interrupt += delta; 440 if (runtime->hw_ptr_interrupt >= runtime->boundary) 441 runtime->hw_ptr_interrupt -= runtime->boundary; 442 } 443 runtime->hw_ptr_base = hw_base; 444 runtime->status->hw_ptr = new_hw_ptr; 445 runtime->hw_ptr_jiffies = curr_jiffies; 446 if (crossed_boundary) { 447 snd_BUG_ON(crossed_boundary != 1); 448 runtime->hw_ptr_wrap += runtime->boundary; 449 } 450 451 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 452 453 return snd_pcm_update_state(substream, runtime); 454 } 455 456 /* CAUTION: call it with irq disabled */ 457 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) 458 { 459 return snd_pcm_update_hw_ptr0(substream, 0); 460 } 461 462 /** 463 * snd_pcm_set_ops - set the PCM operators 464 * @pcm: the pcm instance 465 * @direction: stream direction, SNDRV_PCM_STREAM_XXX 466 * @ops: the operator table 467 * 468 * Sets the given PCM operators to the pcm instance. 469 */ 470 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, 471 const struct snd_pcm_ops *ops) 472 { 473 struct snd_pcm_str *stream = &pcm->streams[direction]; 474 struct snd_pcm_substream *substream; 475 476 for (substream = stream->substream; substream != NULL; substream = substream->next) 477 substream->ops = ops; 478 } 479 EXPORT_SYMBOL(snd_pcm_set_ops); 480 481 /** 482 * snd_pcm_sync - set the PCM sync id 483 * @substream: the pcm substream 484 * 485 * Sets the PCM sync identifier for the card. 486 */ 487 void snd_pcm_set_sync(struct snd_pcm_substream *substream) 488 { 489 struct snd_pcm_runtime *runtime = substream->runtime; 490 491 runtime->sync.id32[0] = substream->pcm->card->number; 492 runtime->sync.id32[1] = -1; 493 runtime->sync.id32[2] = -1; 494 runtime->sync.id32[3] = -1; 495 } 496 EXPORT_SYMBOL(snd_pcm_set_sync); 497 498 /* 499 * Standard ioctl routine 500 */ 501 502 static inline unsigned int div32(unsigned int a, unsigned int b, 503 unsigned int *r) 504 { 505 if (b == 0) { 506 *r = 0; 507 return UINT_MAX; 508 } 509 *r = a % b; 510 return a / b; 511 } 512 513 static inline unsigned int div_down(unsigned int a, unsigned int b) 514 { 515 if (b == 0) 516 return UINT_MAX; 517 return a / b; 518 } 519 520 static inline unsigned int div_up(unsigned int a, unsigned int b) 521 { 522 unsigned int r; 523 unsigned int q; 524 if (b == 0) 525 return UINT_MAX; 526 q = div32(a, b, &r); 527 if (r) 528 ++q; 529 return q; 530 } 531 532 static inline unsigned int mul(unsigned int a, unsigned int b) 533 { 534 if (a == 0) 535 return 0; 536 if (div_down(UINT_MAX, a) < b) 537 return UINT_MAX; 538 return a * b; 539 } 540 541 static inline unsigned int muldiv32(unsigned int a, unsigned int b, 542 unsigned int c, unsigned int *r) 543 { 544 u_int64_t n = (u_int64_t) a * b; 545 if (c == 0) { 546 *r = 0; 547 return UINT_MAX; 548 } 549 n = div_u64_rem(n, c, r); 550 if (n >= UINT_MAX) { 551 *r = 0; 552 return UINT_MAX; 553 } 554 return n; 555 } 556 557 /** 558 * snd_interval_refine - refine the interval value of configurator 559 * @i: the interval value to refine 560 * @v: the interval value to refer to 561 * 562 * Refines the interval value with the reference value. 563 * The interval is changed to the range satisfying both intervals. 564 * The interval status (min, max, integer, etc.) are evaluated. 565 * 566 * Return: Positive if the value is changed, zero if it's not changed, or a 567 * negative error code. 568 */ 569 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) 570 { 571 int changed = 0; 572 if (snd_BUG_ON(snd_interval_empty(i))) 573 return -EINVAL; 574 if (i->min < v->min) { 575 i->min = v->min; 576 i->openmin = v->openmin; 577 changed = 1; 578 } else if (i->min == v->min && !i->openmin && v->openmin) { 579 i->openmin = 1; 580 changed = 1; 581 } 582 if (i->max > v->max) { 583 i->max = v->max; 584 i->openmax = v->openmax; 585 changed = 1; 586 } else if (i->max == v->max && !i->openmax && v->openmax) { 587 i->openmax = 1; 588 changed = 1; 589 } 590 if (!i->integer && v->integer) { 591 i->integer = 1; 592 changed = 1; 593 } 594 if (i->integer) { 595 if (i->openmin) { 596 i->min++; 597 i->openmin = 0; 598 } 599 if (i->openmax) { 600 i->max--; 601 i->openmax = 0; 602 } 603 } else if (!i->openmin && !i->openmax && i->min == i->max) 604 i->integer = 1; 605 if (snd_interval_checkempty(i)) { 606 snd_interval_none(i); 607 return -EINVAL; 608 } 609 return changed; 610 } 611 EXPORT_SYMBOL(snd_interval_refine); 612 613 static int snd_interval_refine_first(struct snd_interval *i) 614 { 615 const unsigned int last_max = i->max; 616 617 if (snd_BUG_ON(snd_interval_empty(i))) 618 return -EINVAL; 619 if (snd_interval_single(i)) 620 return 0; 621 i->max = i->min; 622 if (i->openmin) 623 i->max++; 624 /* only exclude max value if also excluded before refine */ 625 i->openmax = (i->openmax && i->max >= last_max); 626 return 1; 627 } 628 629 static int snd_interval_refine_last(struct snd_interval *i) 630 { 631 const unsigned int last_min = i->min; 632 633 if (snd_BUG_ON(snd_interval_empty(i))) 634 return -EINVAL; 635 if (snd_interval_single(i)) 636 return 0; 637 i->min = i->max; 638 if (i->openmax) 639 i->min--; 640 /* only exclude min value if also excluded before refine */ 641 i->openmin = (i->openmin && i->min <= last_min); 642 return 1; 643 } 644 645 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 646 { 647 if (a->empty || b->empty) { 648 snd_interval_none(c); 649 return; 650 } 651 c->empty = 0; 652 c->min = mul(a->min, b->min); 653 c->openmin = (a->openmin || b->openmin); 654 c->max = mul(a->max, b->max); 655 c->openmax = (a->openmax || b->openmax); 656 c->integer = (a->integer && b->integer); 657 } 658 659 /** 660 * snd_interval_div - refine the interval value with division 661 * @a: dividend 662 * @b: divisor 663 * @c: quotient 664 * 665 * c = a / b 666 * 667 * Returns non-zero if the value is changed, zero if not changed. 668 */ 669 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 670 { 671 unsigned int r; 672 if (a->empty || b->empty) { 673 snd_interval_none(c); 674 return; 675 } 676 c->empty = 0; 677 c->min = div32(a->min, b->max, &r); 678 c->openmin = (r || a->openmin || b->openmax); 679 if (b->min > 0) { 680 c->max = div32(a->max, b->min, &r); 681 if (r) { 682 c->max++; 683 c->openmax = 1; 684 } else 685 c->openmax = (a->openmax || b->openmin); 686 } else { 687 c->max = UINT_MAX; 688 c->openmax = 0; 689 } 690 c->integer = 0; 691 } 692 693 /** 694 * snd_interval_muldivk - refine the interval value 695 * @a: dividend 1 696 * @b: dividend 2 697 * @k: divisor (as integer) 698 * @c: result 699 * 700 * c = a * b / k 701 * 702 * Returns non-zero if the value is changed, zero if not changed. 703 */ 704 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, 705 unsigned int k, struct snd_interval *c) 706 { 707 unsigned int r; 708 if (a->empty || b->empty) { 709 snd_interval_none(c); 710 return; 711 } 712 c->empty = 0; 713 c->min = muldiv32(a->min, b->min, k, &r); 714 c->openmin = (r || a->openmin || b->openmin); 715 c->max = muldiv32(a->max, b->max, k, &r); 716 if (r) { 717 c->max++; 718 c->openmax = 1; 719 } else 720 c->openmax = (a->openmax || b->openmax); 721 c->integer = 0; 722 } 723 724 /** 725 * snd_interval_mulkdiv - refine the interval value 726 * @a: dividend 1 727 * @k: dividend 2 (as integer) 728 * @b: divisor 729 * @c: result 730 * 731 * c = a * k / b 732 * 733 * Returns non-zero if the value is changed, zero if not changed. 734 */ 735 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, 736 const struct snd_interval *b, struct snd_interval *c) 737 { 738 unsigned int r; 739 if (a->empty || b->empty) { 740 snd_interval_none(c); 741 return; 742 } 743 c->empty = 0; 744 c->min = muldiv32(a->min, k, b->max, &r); 745 c->openmin = (r || a->openmin || b->openmax); 746 if (b->min > 0) { 747 c->max = muldiv32(a->max, k, b->min, &r); 748 if (r) { 749 c->max++; 750 c->openmax = 1; 751 } else 752 c->openmax = (a->openmax || b->openmin); 753 } else { 754 c->max = UINT_MAX; 755 c->openmax = 0; 756 } 757 c->integer = 0; 758 } 759 760 /* ---- */ 761 762 763 /** 764 * snd_interval_ratnum - refine the interval value 765 * @i: interval to refine 766 * @rats_count: number of ratnum_t 767 * @rats: ratnum_t array 768 * @nump: pointer to store the resultant numerator 769 * @denp: pointer to store the resultant denominator 770 * 771 * Return: Positive if the value is changed, zero if it's not changed, or a 772 * negative error code. 773 */ 774 int snd_interval_ratnum(struct snd_interval *i, 775 unsigned int rats_count, const struct snd_ratnum *rats, 776 unsigned int *nump, unsigned int *denp) 777 { 778 unsigned int best_num, best_den; 779 int best_diff; 780 unsigned int k; 781 struct snd_interval t; 782 int err; 783 unsigned int result_num, result_den; 784 int result_diff; 785 786 best_num = best_den = best_diff = 0; 787 for (k = 0; k < rats_count; ++k) { 788 unsigned int num = rats[k].num; 789 unsigned int den; 790 unsigned int q = i->min; 791 int diff; 792 if (q == 0) 793 q = 1; 794 den = div_up(num, q); 795 if (den < rats[k].den_min) 796 continue; 797 if (den > rats[k].den_max) 798 den = rats[k].den_max; 799 else { 800 unsigned int r; 801 r = (den - rats[k].den_min) % rats[k].den_step; 802 if (r != 0) 803 den -= r; 804 } 805 diff = num - q * den; 806 if (diff < 0) 807 diff = -diff; 808 if (best_num == 0 || 809 diff * best_den < best_diff * den) { 810 best_diff = diff; 811 best_den = den; 812 best_num = num; 813 } 814 } 815 if (best_den == 0) { 816 i->empty = 1; 817 return -EINVAL; 818 } 819 t.min = div_down(best_num, best_den); 820 t.openmin = !!(best_num % best_den); 821 822 result_num = best_num; 823 result_diff = best_diff; 824 result_den = best_den; 825 best_num = best_den = best_diff = 0; 826 for (k = 0; k < rats_count; ++k) { 827 unsigned int num = rats[k].num; 828 unsigned int den; 829 unsigned int q = i->max; 830 int diff; 831 if (q == 0) { 832 i->empty = 1; 833 return -EINVAL; 834 } 835 den = div_down(num, q); 836 if (den > rats[k].den_max) 837 continue; 838 if (den < rats[k].den_min) 839 den = rats[k].den_min; 840 else { 841 unsigned int r; 842 r = (den - rats[k].den_min) % rats[k].den_step; 843 if (r != 0) 844 den += rats[k].den_step - r; 845 } 846 diff = q * den - num; 847 if (diff < 0) 848 diff = -diff; 849 if (best_num == 0 || 850 diff * best_den < best_diff * den) { 851 best_diff = diff; 852 best_den = den; 853 best_num = num; 854 } 855 } 856 if (best_den == 0) { 857 i->empty = 1; 858 return -EINVAL; 859 } 860 t.max = div_up(best_num, best_den); 861 t.openmax = !!(best_num % best_den); 862 t.integer = 0; 863 err = snd_interval_refine(i, &t); 864 if (err < 0) 865 return err; 866 867 if (snd_interval_single(i)) { 868 if (best_diff * result_den < result_diff * best_den) { 869 result_num = best_num; 870 result_den = best_den; 871 } 872 if (nump) 873 *nump = result_num; 874 if (denp) 875 *denp = result_den; 876 } 877 return err; 878 } 879 EXPORT_SYMBOL(snd_interval_ratnum); 880 881 /** 882 * snd_interval_ratden - refine the interval value 883 * @i: interval to refine 884 * @rats_count: number of struct ratden 885 * @rats: struct ratden array 886 * @nump: pointer to store the resultant numerator 887 * @denp: pointer to store the resultant denominator 888 * 889 * Return: Positive if the value is changed, zero if it's not changed, or a 890 * negative error code. 891 */ 892 static int snd_interval_ratden(struct snd_interval *i, 893 unsigned int rats_count, 894 const struct snd_ratden *rats, 895 unsigned int *nump, unsigned int *denp) 896 { 897 unsigned int best_num, best_diff, best_den; 898 unsigned int k; 899 struct snd_interval t; 900 int err; 901 902 best_num = best_den = best_diff = 0; 903 for (k = 0; k < rats_count; ++k) { 904 unsigned int num; 905 unsigned int den = rats[k].den; 906 unsigned int q = i->min; 907 int diff; 908 num = mul(q, den); 909 if (num > rats[k].num_max) 910 continue; 911 if (num < rats[k].num_min) 912 num = rats[k].num_max; 913 else { 914 unsigned int r; 915 r = (num - rats[k].num_min) % rats[k].num_step; 916 if (r != 0) 917 num += rats[k].num_step - r; 918 } 919 diff = num - q * den; 920 if (best_num == 0 || 921 diff * best_den < best_diff * den) { 922 best_diff = diff; 923 best_den = den; 924 best_num = num; 925 } 926 } 927 if (best_den == 0) { 928 i->empty = 1; 929 return -EINVAL; 930 } 931 t.min = div_down(best_num, best_den); 932 t.openmin = !!(best_num % best_den); 933 934 best_num = best_den = best_diff = 0; 935 for (k = 0; k < rats_count; ++k) { 936 unsigned int num; 937 unsigned int den = rats[k].den; 938 unsigned int q = i->max; 939 int diff; 940 num = mul(q, den); 941 if (num < rats[k].num_min) 942 continue; 943 if (num > rats[k].num_max) 944 num = rats[k].num_max; 945 else { 946 unsigned int r; 947 r = (num - rats[k].num_min) % rats[k].num_step; 948 if (r != 0) 949 num -= r; 950 } 951 diff = q * den - num; 952 if (best_num == 0 || 953 diff * best_den < best_diff * den) { 954 best_diff = diff; 955 best_den = den; 956 best_num = num; 957 } 958 } 959 if (best_den == 0) { 960 i->empty = 1; 961 return -EINVAL; 962 } 963 t.max = div_up(best_num, best_den); 964 t.openmax = !!(best_num % best_den); 965 t.integer = 0; 966 err = snd_interval_refine(i, &t); 967 if (err < 0) 968 return err; 969 970 if (snd_interval_single(i)) { 971 if (nump) 972 *nump = best_num; 973 if (denp) 974 *denp = best_den; 975 } 976 return err; 977 } 978 979 /** 980 * snd_interval_list - refine the interval value from the list 981 * @i: the interval value to refine 982 * @count: the number of elements in the list 983 * @list: the value list 984 * @mask: the bit-mask to evaluate 985 * 986 * Refines the interval value from the list. 987 * When mask is non-zero, only the elements corresponding to bit 1 are 988 * evaluated. 989 * 990 * Return: Positive if the value is changed, zero if it's not changed, or a 991 * negative error code. 992 */ 993 int snd_interval_list(struct snd_interval *i, unsigned int count, 994 const unsigned int *list, unsigned int mask) 995 { 996 unsigned int k; 997 struct snd_interval list_range; 998 999 if (!count) { 1000 i->empty = 1; 1001 return -EINVAL; 1002 } 1003 snd_interval_any(&list_range); 1004 list_range.min = UINT_MAX; 1005 list_range.max = 0; 1006 for (k = 0; k < count; k++) { 1007 if (mask && !(mask & (1 << k))) 1008 continue; 1009 if (!snd_interval_test(i, list[k])) 1010 continue; 1011 list_range.min = min(list_range.min, list[k]); 1012 list_range.max = max(list_range.max, list[k]); 1013 } 1014 return snd_interval_refine(i, &list_range); 1015 } 1016 EXPORT_SYMBOL(snd_interval_list); 1017 1018 /** 1019 * snd_interval_ranges - refine the interval value from the list of ranges 1020 * @i: the interval value to refine 1021 * @count: the number of elements in the list of ranges 1022 * @ranges: the ranges list 1023 * @mask: the bit-mask to evaluate 1024 * 1025 * Refines the interval value from the list of ranges. 1026 * When mask is non-zero, only the elements corresponding to bit 1 are 1027 * evaluated. 1028 * 1029 * Return: Positive if the value is changed, zero if it's not changed, or a 1030 * negative error code. 1031 */ 1032 int snd_interval_ranges(struct snd_interval *i, unsigned int count, 1033 const struct snd_interval *ranges, unsigned int mask) 1034 { 1035 unsigned int k; 1036 struct snd_interval range_union; 1037 struct snd_interval range; 1038 1039 if (!count) { 1040 snd_interval_none(i); 1041 return -EINVAL; 1042 } 1043 snd_interval_any(&range_union); 1044 range_union.min = UINT_MAX; 1045 range_union.max = 0; 1046 for (k = 0; k < count; k++) { 1047 if (mask && !(mask & (1 << k))) 1048 continue; 1049 snd_interval_copy(&range, &ranges[k]); 1050 if (snd_interval_refine(&range, i) < 0) 1051 continue; 1052 if (snd_interval_empty(&range)) 1053 continue; 1054 1055 if (range.min < range_union.min) { 1056 range_union.min = range.min; 1057 range_union.openmin = 1; 1058 } 1059 if (range.min == range_union.min && !range.openmin) 1060 range_union.openmin = 0; 1061 if (range.max > range_union.max) { 1062 range_union.max = range.max; 1063 range_union.openmax = 1; 1064 } 1065 if (range.max == range_union.max && !range.openmax) 1066 range_union.openmax = 0; 1067 } 1068 return snd_interval_refine(i, &range_union); 1069 } 1070 EXPORT_SYMBOL(snd_interval_ranges); 1071 1072 static int snd_interval_step(struct snd_interval *i, unsigned int step) 1073 { 1074 unsigned int n; 1075 int changed = 0; 1076 n = i->min % step; 1077 if (n != 0 || i->openmin) { 1078 i->min += step - n; 1079 i->openmin = 0; 1080 changed = 1; 1081 } 1082 n = i->max % step; 1083 if (n != 0 || i->openmax) { 1084 i->max -= n; 1085 i->openmax = 0; 1086 changed = 1; 1087 } 1088 if (snd_interval_checkempty(i)) { 1089 i->empty = 1; 1090 return -EINVAL; 1091 } 1092 return changed; 1093 } 1094 1095 /* Info constraints helpers */ 1096 1097 /** 1098 * snd_pcm_hw_rule_add - add the hw-constraint rule 1099 * @runtime: the pcm runtime instance 1100 * @cond: condition bits 1101 * @var: the variable to evaluate 1102 * @func: the evaluation function 1103 * @private: the private data pointer passed to function 1104 * @dep: the dependent variables 1105 * 1106 * Return: Zero if successful, or a negative error code on failure. 1107 */ 1108 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, 1109 int var, 1110 snd_pcm_hw_rule_func_t func, void *private, 1111 int dep, ...) 1112 { 1113 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1114 struct snd_pcm_hw_rule *c; 1115 unsigned int k; 1116 va_list args; 1117 va_start(args, dep); 1118 if (constrs->rules_num >= constrs->rules_all) { 1119 struct snd_pcm_hw_rule *new; 1120 unsigned int new_rules = constrs->rules_all + 16; 1121 new = krealloc(constrs->rules, new_rules * sizeof(*c), 1122 GFP_KERNEL); 1123 if (!new) { 1124 va_end(args); 1125 return -ENOMEM; 1126 } 1127 constrs->rules = new; 1128 constrs->rules_all = new_rules; 1129 } 1130 c = &constrs->rules[constrs->rules_num]; 1131 c->cond = cond; 1132 c->func = func; 1133 c->var = var; 1134 c->private = private; 1135 k = 0; 1136 while (1) { 1137 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1138 va_end(args); 1139 return -EINVAL; 1140 } 1141 c->deps[k++] = dep; 1142 if (dep < 0) 1143 break; 1144 dep = va_arg(args, int); 1145 } 1146 constrs->rules_num++; 1147 va_end(args); 1148 return 0; 1149 } 1150 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1151 1152 /** 1153 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint 1154 * @runtime: PCM runtime instance 1155 * @var: hw_params variable to apply the mask 1156 * @mask: the bitmap mask 1157 * 1158 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. 1159 * 1160 * Return: Zero if successful, or a negative error code on failure. 1161 */ 1162 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1163 u_int32_t mask) 1164 { 1165 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1166 struct snd_mask *maskp = constrs_mask(constrs, var); 1167 *maskp->bits &= mask; 1168 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ 1169 if (*maskp->bits == 0) 1170 return -EINVAL; 1171 return 0; 1172 } 1173 1174 /** 1175 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint 1176 * @runtime: PCM runtime instance 1177 * @var: hw_params variable to apply the mask 1178 * @mask: the 64bit bitmap mask 1179 * 1180 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. 1181 * 1182 * Return: Zero if successful, or a negative error code on failure. 1183 */ 1184 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1185 u_int64_t mask) 1186 { 1187 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1188 struct snd_mask *maskp = constrs_mask(constrs, var); 1189 maskp->bits[0] &= (u_int32_t)mask; 1190 maskp->bits[1] &= (u_int32_t)(mask >> 32); 1191 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 1192 if (! maskp->bits[0] && ! maskp->bits[1]) 1193 return -EINVAL; 1194 return 0; 1195 } 1196 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); 1197 1198 /** 1199 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval 1200 * @runtime: PCM runtime instance 1201 * @var: hw_params variable to apply the integer constraint 1202 * 1203 * Apply the constraint of integer to an interval parameter. 1204 * 1205 * Return: Positive if the value is changed, zero if it's not changed, or a 1206 * negative error code. 1207 */ 1208 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) 1209 { 1210 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1211 return snd_interval_setinteger(constrs_interval(constrs, var)); 1212 } 1213 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); 1214 1215 /** 1216 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval 1217 * @runtime: PCM runtime instance 1218 * @var: hw_params variable to apply the range 1219 * @min: the minimal value 1220 * @max: the maximal value 1221 * 1222 * Apply the min/max range constraint to an interval parameter. 1223 * 1224 * Return: Positive if the value is changed, zero if it's not changed, or a 1225 * negative error code. 1226 */ 1227 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1228 unsigned int min, unsigned int max) 1229 { 1230 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1231 struct snd_interval t; 1232 t.min = min; 1233 t.max = max; 1234 t.openmin = t.openmax = 0; 1235 t.integer = 0; 1236 return snd_interval_refine(constrs_interval(constrs, var), &t); 1237 } 1238 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); 1239 1240 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, 1241 struct snd_pcm_hw_rule *rule) 1242 { 1243 struct snd_pcm_hw_constraint_list *list = rule->private; 1244 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); 1245 } 1246 1247 1248 /** 1249 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter 1250 * @runtime: PCM runtime instance 1251 * @cond: condition bits 1252 * @var: hw_params variable to apply the list constraint 1253 * @l: list 1254 * 1255 * Apply the list of constraints to an interval parameter. 1256 * 1257 * Return: Zero if successful, or a negative error code on failure. 1258 */ 1259 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, 1260 unsigned int cond, 1261 snd_pcm_hw_param_t var, 1262 const struct snd_pcm_hw_constraint_list *l) 1263 { 1264 return snd_pcm_hw_rule_add(runtime, cond, var, 1265 snd_pcm_hw_rule_list, (void *)l, 1266 var, -1); 1267 } 1268 EXPORT_SYMBOL(snd_pcm_hw_constraint_list); 1269 1270 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, 1271 struct snd_pcm_hw_rule *rule) 1272 { 1273 struct snd_pcm_hw_constraint_ranges *r = rule->private; 1274 return snd_interval_ranges(hw_param_interval(params, rule->var), 1275 r->count, r->ranges, r->mask); 1276 } 1277 1278 1279 /** 1280 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter 1281 * @runtime: PCM runtime instance 1282 * @cond: condition bits 1283 * @var: hw_params variable to apply the list of range constraints 1284 * @r: ranges 1285 * 1286 * Apply the list of range constraints to an interval parameter. 1287 * 1288 * Return: Zero if successful, or a negative error code on failure. 1289 */ 1290 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, 1291 unsigned int cond, 1292 snd_pcm_hw_param_t var, 1293 const struct snd_pcm_hw_constraint_ranges *r) 1294 { 1295 return snd_pcm_hw_rule_add(runtime, cond, var, 1296 snd_pcm_hw_rule_ranges, (void *)r, 1297 var, -1); 1298 } 1299 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); 1300 1301 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, 1302 struct snd_pcm_hw_rule *rule) 1303 { 1304 const struct snd_pcm_hw_constraint_ratnums *r = rule->private; 1305 unsigned int num = 0, den = 0; 1306 int err; 1307 err = snd_interval_ratnum(hw_param_interval(params, rule->var), 1308 r->nrats, r->rats, &num, &den); 1309 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1310 params->rate_num = num; 1311 params->rate_den = den; 1312 } 1313 return err; 1314 } 1315 1316 /** 1317 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter 1318 * @runtime: PCM runtime instance 1319 * @cond: condition bits 1320 * @var: hw_params variable to apply the ratnums constraint 1321 * @r: struct snd_ratnums constriants 1322 * 1323 * Return: Zero if successful, or a negative error code on failure. 1324 */ 1325 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 1326 unsigned int cond, 1327 snd_pcm_hw_param_t var, 1328 const struct snd_pcm_hw_constraint_ratnums *r) 1329 { 1330 return snd_pcm_hw_rule_add(runtime, cond, var, 1331 snd_pcm_hw_rule_ratnums, (void *)r, 1332 var, -1); 1333 } 1334 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); 1335 1336 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, 1337 struct snd_pcm_hw_rule *rule) 1338 { 1339 const struct snd_pcm_hw_constraint_ratdens *r = rule->private; 1340 unsigned int num = 0, den = 0; 1341 int err = snd_interval_ratden(hw_param_interval(params, rule->var), 1342 r->nrats, r->rats, &num, &den); 1343 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1344 params->rate_num = num; 1345 params->rate_den = den; 1346 } 1347 return err; 1348 } 1349 1350 /** 1351 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter 1352 * @runtime: PCM runtime instance 1353 * @cond: condition bits 1354 * @var: hw_params variable to apply the ratdens constraint 1355 * @r: struct snd_ratdens constriants 1356 * 1357 * Return: Zero if successful, or a negative error code on failure. 1358 */ 1359 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 1360 unsigned int cond, 1361 snd_pcm_hw_param_t var, 1362 const struct snd_pcm_hw_constraint_ratdens *r) 1363 { 1364 return snd_pcm_hw_rule_add(runtime, cond, var, 1365 snd_pcm_hw_rule_ratdens, (void *)r, 1366 var, -1); 1367 } 1368 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); 1369 1370 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, 1371 struct snd_pcm_hw_rule *rule) 1372 { 1373 unsigned int l = (unsigned long) rule->private; 1374 int width = l & 0xffff; 1375 unsigned int msbits = l >> 16; 1376 const struct snd_interval *i = 1377 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 1378 1379 if (!snd_interval_single(i)) 1380 return 0; 1381 1382 if ((snd_interval_value(i) == width) || 1383 (width == 0 && snd_interval_value(i) > msbits)) 1384 params->msbits = min_not_zero(params->msbits, msbits); 1385 1386 return 0; 1387 } 1388 1389 /** 1390 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule 1391 * @runtime: PCM runtime instance 1392 * @cond: condition bits 1393 * @width: sample bits width 1394 * @msbits: msbits width 1395 * 1396 * This constraint will set the number of most significant bits (msbits) if a 1397 * sample format with the specified width has been select. If width is set to 0 1398 * the msbits will be set for any sample format with a width larger than the 1399 * specified msbits. 1400 * 1401 * Return: Zero if successful, or a negative error code on failure. 1402 */ 1403 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 1404 unsigned int cond, 1405 unsigned int width, 1406 unsigned int msbits) 1407 { 1408 unsigned long l = (msbits << 16) | width; 1409 return snd_pcm_hw_rule_add(runtime, cond, -1, 1410 snd_pcm_hw_rule_msbits, 1411 (void*) l, 1412 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 1413 } 1414 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); 1415 1416 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, 1417 struct snd_pcm_hw_rule *rule) 1418 { 1419 unsigned long step = (unsigned long) rule->private; 1420 return snd_interval_step(hw_param_interval(params, rule->var), step); 1421 } 1422 1423 /** 1424 * snd_pcm_hw_constraint_step - add a hw constraint step rule 1425 * @runtime: PCM runtime instance 1426 * @cond: condition bits 1427 * @var: hw_params variable to apply the step constraint 1428 * @step: step size 1429 * 1430 * Return: Zero if successful, or a negative error code on failure. 1431 */ 1432 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, 1433 unsigned int cond, 1434 snd_pcm_hw_param_t var, 1435 unsigned long step) 1436 { 1437 return snd_pcm_hw_rule_add(runtime, cond, var, 1438 snd_pcm_hw_rule_step, (void *) step, 1439 var, -1); 1440 } 1441 EXPORT_SYMBOL(snd_pcm_hw_constraint_step); 1442 1443 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 1444 { 1445 static unsigned int pow2_sizes[] = { 1446 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1447 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1448 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1449 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 1450 }; 1451 return snd_interval_list(hw_param_interval(params, rule->var), 1452 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); 1453 } 1454 1455 /** 1456 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule 1457 * @runtime: PCM runtime instance 1458 * @cond: condition bits 1459 * @var: hw_params variable to apply the power-of-2 constraint 1460 * 1461 * Return: Zero if successful, or a negative error code on failure. 1462 */ 1463 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, 1464 unsigned int cond, 1465 snd_pcm_hw_param_t var) 1466 { 1467 return snd_pcm_hw_rule_add(runtime, cond, var, 1468 snd_pcm_hw_rule_pow2, NULL, 1469 var, -1); 1470 } 1471 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); 1472 1473 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, 1474 struct snd_pcm_hw_rule *rule) 1475 { 1476 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; 1477 struct snd_interval *rate; 1478 1479 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1480 return snd_interval_list(rate, 1, &base_rate, 0); 1481 } 1482 1483 /** 1484 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling 1485 * @runtime: PCM runtime instance 1486 * @base_rate: the rate at which the hardware does not resample 1487 * 1488 * Return: Zero if successful, or a negative error code on failure. 1489 */ 1490 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, 1491 unsigned int base_rate) 1492 { 1493 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, 1494 SNDRV_PCM_HW_PARAM_RATE, 1495 snd_pcm_hw_rule_noresample_func, 1496 (void *)(uintptr_t)base_rate, 1497 SNDRV_PCM_HW_PARAM_RATE, -1); 1498 } 1499 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); 1500 1501 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, 1502 snd_pcm_hw_param_t var) 1503 { 1504 if (hw_is_mask(var)) { 1505 snd_mask_any(hw_param_mask(params, var)); 1506 params->cmask |= 1 << var; 1507 params->rmask |= 1 << var; 1508 return; 1509 } 1510 if (hw_is_interval(var)) { 1511 snd_interval_any(hw_param_interval(params, var)); 1512 params->cmask |= 1 << var; 1513 params->rmask |= 1 << var; 1514 return; 1515 } 1516 snd_BUG(); 1517 } 1518 1519 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) 1520 { 1521 unsigned int k; 1522 memset(params, 0, sizeof(*params)); 1523 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) 1524 _snd_pcm_hw_param_any(params, k); 1525 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 1526 _snd_pcm_hw_param_any(params, k); 1527 params->info = ~0U; 1528 } 1529 EXPORT_SYMBOL(_snd_pcm_hw_params_any); 1530 1531 /** 1532 * snd_pcm_hw_param_value - return @params field @var value 1533 * @params: the hw_params instance 1534 * @var: parameter to retrieve 1535 * @dir: pointer to the direction (-1,0,1) or %NULL 1536 * 1537 * Return: The value for field @var if it's fixed in configuration space 1538 * defined by @params. -%EINVAL otherwise. 1539 */ 1540 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, 1541 snd_pcm_hw_param_t var, int *dir) 1542 { 1543 if (hw_is_mask(var)) { 1544 const struct snd_mask *mask = hw_param_mask_c(params, var); 1545 if (!snd_mask_single(mask)) 1546 return -EINVAL; 1547 if (dir) 1548 *dir = 0; 1549 return snd_mask_value(mask); 1550 } 1551 if (hw_is_interval(var)) { 1552 const struct snd_interval *i = hw_param_interval_c(params, var); 1553 if (!snd_interval_single(i)) 1554 return -EINVAL; 1555 if (dir) 1556 *dir = i->openmin; 1557 return snd_interval_value(i); 1558 } 1559 return -EINVAL; 1560 } 1561 EXPORT_SYMBOL(snd_pcm_hw_param_value); 1562 1563 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, 1564 snd_pcm_hw_param_t var) 1565 { 1566 if (hw_is_mask(var)) { 1567 snd_mask_none(hw_param_mask(params, var)); 1568 params->cmask |= 1 << var; 1569 params->rmask |= 1 << var; 1570 } else if (hw_is_interval(var)) { 1571 snd_interval_none(hw_param_interval(params, var)); 1572 params->cmask |= 1 << var; 1573 params->rmask |= 1 << var; 1574 } else { 1575 snd_BUG(); 1576 } 1577 } 1578 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); 1579 1580 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, 1581 snd_pcm_hw_param_t var) 1582 { 1583 int changed; 1584 if (hw_is_mask(var)) 1585 changed = snd_mask_refine_first(hw_param_mask(params, var)); 1586 else if (hw_is_interval(var)) 1587 changed = snd_interval_refine_first(hw_param_interval(params, var)); 1588 else 1589 return -EINVAL; 1590 if (changed > 0) { 1591 params->cmask |= 1 << var; 1592 params->rmask |= 1 << var; 1593 } 1594 return changed; 1595 } 1596 1597 1598 /** 1599 * snd_pcm_hw_param_first - refine config space and return minimum value 1600 * @pcm: PCM instance 1601 * @params: the hw_params instance 1602 * @var: parameter to retrieve 1603 * @dir: pointer to the direction (-1,0,1) or %NULL 1604 * 1605 * Inside configuration space defined by @params remove from @var all 1606 * values > minimum. Reduce configuration space accordingly. 1607 * 1608 * Return: The minimum, or a negative error code on failure. 1609 */ 1610 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 1611 struct snd_pcm_hw_params *params, 1612 snd_pcm_hw_param_t var, int *dir) 1613 { 1614 int changed = _snd_pcm_hw_param_first(params, var); 1615 if (changed < 0) 1616 return changed; 1617 if (params->rmask) { 1618 int err = snd_pcm_hw_refine(pcm, params); 1619 if (err < 0) 1620 return err; 1621 } 1622 return snd_pcm_hw_param_value(params, var, dir); 1623 } 1624 EXPORT_SYMBOL(snd_pcm_hw_param_first); 1625 1626 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, 1627 snd_pcm_hw_param_t var) 1628 { 1629 int changed; 1630 if (hw_is_mask(var)) 1631 changed = snd_mask_refine_last(hw_param_mask(params, var)); 1632 else if (hw_is_interval(var)) 1633 changed = snd_interval_refine_last(hw_param_interval(params, var)); 1634 else 1635 return -EINVAL; 1636 if (changed > 0) { 1637 params->cmask |= 1 << var; 1638 params->rmask |= 1 << var; 1639 } 1640 return changed; 1641 } 1642 1643 1644 /** 1645 * snd_pcm_hw_param_last - refine config space and return maximum value 1646 * @pcm: PCM instance 1647 * @params: the hw_params instance 1648 * @var: parameter to retrieve 1649 * @dir: pointer to the direction (-1,0,1) or %NULL 1650 * 1651 * Inside configuration space defined by @params remove from @var all 1652 * values < maximum. Reduce configuration space accordingly. 1653 * 1654 * Return: The maximum, or a negative error code on failure. 1655 */ 1656 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 1657 struct snd_pcm_hw_params *params, 1658 snd_pcm_hw_param_t var, int *dir) 1659 { 1660 int changed = _snd_pcm_hw_param_last(params, var); 1661 if (changed < 0) 1662 return changed; 1663 if (params->rmask) { 1664 int err = snd_pcm_hw_refine(pcm, params); 1665 if (err < 0) 1666 return err; 1667 } 1668 return snd_pcm_hw_param_value(params, var, dir); 1669 } 1670 EXPORT_SYMBOL(snd_pcm_hw_param_last); 1671 1672 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, 1673 void *arg) 1674 { 1675 struct snd_pcm_runtime *runtime = substream->runtime; 1676 unsigned long flags; 1677 snd_pcm_stream_lock_irqsave(substream, flags); 1678 if (snd_pcm_running(substream) && 1679 snd_pcm_update_hw_ptr(substream) >= 0) 1680 runtime->status->hw_ptr %= runtime->buffer_size; 1681 else { 1682 runtime->status->hw_ptr = 0; 1683 runtime->hw_ptr_wrap = 0; 1684 } 1685 snd_pcm_stream_unlock_irqrestore(substream, flags); 1686 return 0; 1687 } 1688 1689 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, 1690 void *arg) 1691 { 1692 struct snd_pcm_channel_info *info = arg; 1693 struct snd_pcm_runtime *runtime = substream->runtime; 1694 int width; 1695 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { 1696 info->offset = -1; 1697 return 0; 1698 } 1699 width = snd_pcm_format_physical_width(runtime->format); 1700 if (width < 0) 1701 return width; 1702 info->offset = 0; 1703 switch (runtime->access) { 1704 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: 1705 case SNDRV_PCM_ACCESS_RW_INTERLEAVED: 1706 info->first = info->channel * width; 1707 info->step = runtime->channels * width; 1708 break; 1709 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: 1710 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: 1711 { 1712 size_t size = runtime->dma_bytes / runtime->channels; 1713 info->first = info->channel * size * 8; 1714 info->step = width; 1715 break; 1716 } 1717 default: 1718 snd_BUG(); 1719 break; 1720 } 1721 return 0; 1722 } 1723 1724 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, 1725 void *arg) 1726 { 1727 struct snd_pcm_hw_params *params = arg; 1728 snd_pcm_format_t format; 1729 int channels; 1730 ssize_t frame_size; 1731 1732 params->fifo_size = substream->runtime->hw.fifo_size; 1733 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { 1734 format = params_format(params); 1735 channels = params_channels(params); 1736 frame_size = snd_pcm_format_size(format, channels); 1737 if (frame_size > 0) 1738 params->fifo_size /= (unsigned)frame_size; 1739 } 1740 return 0; 1741 } 1742 1743 /** 1744 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1745 * @substream: the pcm substream instance 1746 * @cmd: ioctl command 1747 * @arg: ioctl argument 1748 * 1749 * Processes the generic ioctl commands for PCM. 1750 * Can be passed as the ioctl callback for PCM ops. 1751 * 1752 * Return: Zero if successful, or a negative error code on failure. 1753 */ 1754 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, 1755 unsigned int cmd, void *arg) 1756 { 1757 switch (cmd) { 1758 case SNDRV_PCM_IOCTL1_RESET: 1759 return snd_pcm_lib_ioctl_reset(substream, arg); 1760 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1761 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1762 case SNDRV_PCM_IOCTL1_FIFO_SIZE: 1763 return snd_pcm_lib_ioctl_fifo_size(substream, arg); 1764 } 1765 return -ENXIO; 1766 } 1767 EXPORT_SYMBOL(snd_pcm_lib_ioctl); 1768 1769 /** 1770 * snd_pcm_period_elapsed - update the pcm status for the next period 1771 * @substream: the pcm substream instance 1772 * 1773 * This function is called from the interrupt handler when the 1774 * PCM has processed the period size. It will update the current 1775 * pointer, wake up sleepers, etc. 1776 * 1777 * Even if more than one periods have elapsed since the last call, you 1778 * have to call this only once. 1779 */ 1780 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) 1781 { 1782 struct snd_pcm_runtime *runtime; 1783 unsigned long flags; 1784 1785 if (snd_BUG_ON(!substream)) 1786 return; 1787 1788 snd_pcm_stream_lock_irqsave(substream, flags); 1789 if (PCM_RUNTIME_CHECK(substream)) 1790 goto _unlock; 1791 runtime = substream->runtime; 1792 1793 if (!snd_pcm_running(substream) || 1794 snd_pcm_update_hw_ptr0(substream, 1) < 0) 1795 goto _end; 1796 1797 #ifdef CONFIG_SND_PCM_TIMER 1798 if (substream->timer_running) 1799 snd_timer_interrupt(substream->timer, 1); 1800 #endif 1801 _end: 1802 kill_fasync(&runtime->fasync, SIGIO, POLL_IN); 1803 _unlock: 1804 snd_pcm_stream_unlock_irqrestore(substream, flags); 1805 } 1806 EXPORT_SYMBOL(snd_pcm_period_elapsed); 1807 1808 /* 1809 * Wait until avail_min data becomes available 1810 * Returns a negative error code if any error occurs during operation. 1811 * The available space is stored on availp. When err = 0 and avail = 0 1812 * on the capture stream, it indicates the stream is in DRAINING state. 1813 */ 1814 static int wait_for_avail(struct snd_pcm_substream *substream, 1815 snd_pcm_uframes_t *availp) 1816 { 1817 struct snd_pcm_runtime *runtime = substream->runtime; 1818 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1819 wait_queue_entry_t wait; 1820 int err = 0; 1821 snd_pcm_uframes_t avail = 0; 1822 long wait_time, tout; 1823 1824 init_waitqueue_entry(&wait, current); 1825 set_current_state(TASK_INTERRUPTIBLE); 1826 add_wait_queue(&runtime->tsleep, &wait); 1827 1828 if (runtime->no_period_wakeup) 1829 wait_time = MAX_SCHEDULE_TIMEOUT; 1830 else { 1831 /* use wait time from substream if available */ 1832 if (substream->wait_time) { 1833 wait_time = substream->wait_time; 1834 } else { 1835 wait_time = 10; 1836 1837 if (runtime->rate) { 1838 long t = runtime->period_size * 2 / 1839 runtime->rate; 1840 wait_time = max(t, wait_time); 1841 } 1842 wait_time = msecs_to_jiffies(wait_time * 1000); 1843 } 1844 } 1845 1846 for (;;) { 1847 if (signal_pending(current)) { 1848 err = -ERESTARTSYS; 1849 break; 1850 } 1851 1852 /* 1853 * We need to check if space became available already 1854 * (and thus the wakeup happened already) first to close 1855 * the race of space already having become available. 1856 * This check must happen after been added to the waitqueue 1857 * and having current state be INTERRUPTIBLE. 1858 */ 1859 avail = snd_pcm_avail(substream); 1860 if (avail >= runtime->twake) 1861 break; 1862 snd_pcm_stream_unlock_irq(substream); 1863 1864 tout = schedule_timeout(wait_time); 1865 1866 snd_pcm_stream_lock_irq(substream); 1867 set_current_state(TASK_INTERRUPTIBLE); 1868 switch (runtime->status->state) { 1869 case SNDRV_PCM_STATE_SUSPENDED: 1870 err = -ESTRPIPE; 1871 goto _endloop; 1872 case SNDRV_PCM_STATE_XRUN: 1873 err = -EPIPE; 1874 goto _endloop; 1875 case SNDRV_PCM_STATE_DRAINING: 1876 if (is_playback) 1877 err = -EPIPE; 1878 else 1879 avail = 0; /* indicate draining */ 1880 goto _endloop; 1881 case SNDRV_PCM_STATE_OPEN: 1882 case SNDRV_PCM_STATE_SETUP: 1883 case SNDRV_PCM_STATE_DISCONNECTED: 1884 err = -EBADFD; 1885 goto _endloop; 1886 case SNDRV_PCM_STATE_PAUSED: 1887 continue; 1888 } 1889 if (!tout) { 1890 pcm_dbg(substream->pcm, 1891 "%s write error (DMA or IRQ trouble?)\n", 1892 is_playback ? "playback" : "capture"); 1893 err = -EIO; 1894 break; 1895 } 1896 } 1897 _endloop: 1898 set_current_state(TASK_RUNNING); 1899 remove_wait_queue(&runtime->tsleep, &wait); 1900 *availp = avail; 1901 return err; 1902 } 1903 1904 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, 1905 int channel, unsigned long hwoff, 1906 void *buf, unsigned long bytes); 1907 1908 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, 1909 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f); 1910 1911 /* calculate the target DMA-buffer position to be written/read */ 1912 static void *get_dma_ptr(struct snd_pcm_runtime *runtime, 1913 int channel, unsigned long hwoff) 1914 { 1915 return runtime->dma_area + hwoff + 1916 channel * (runtime->dma_bytes / runtime->channels); 1917 } 1918 1919 /* default copy_user ops for write; used for both interleaved and non- modes */ 1920 static int default_write_copy(struct snd_pcm_substream *substream, 1921 int channel, unsigned long hwoff, 1922 void *buf, unsigned long bytes) 1923 { 1924 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff), 1925 (void __user *)buf, bytes)) 1926 return -EFAULT; 1927 return 0; 1928 } 1929 1930 /* default copy_kernel ops for write */ 1931 static int default_write_copy_kernel(struct snd_pcm_substream *substream, 1932 int channel, unsigned long hwoff, 1933 void *buf, unsigned long bytes) 1934 { 1935 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes); 1936 return 0; 1937 } 1938 1939 /* fill silence instead of copy data; called as a transfer helper 1940 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when 1941 * a NULL buffer is passed 1942 */ 1943 static int fill_silence(struct snd_pcm_substream *substream, int channel, 1944 unsigned long hwoff, void *buf, unsigned long bytes) 1945 { 1946 struct snd_pcm_runtime *runtime = substream->runtime; 1947 1948 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 1949 return 0; 1950 if (substream->ops->fill_silence) 1951 return substream->ops->fill_silence(substream, channel, 1952 hwoff, bytes); 1953 1954 snd_pcm_format_set_silence(runtime->format, 1955 get_dma_ptr(runtime, channel, hwoff), 1956 bytes_to_samples(runtime, bytes)); 1957 return 0; 1958 } 1959 1960 /* default copy_user ops for read; used for both interleaved and non- modes */ 1961 static int default_read_copy(struct snd_pcm_substream *substream, 1962 int channel, unsigned long hwoff, 1963 void *buf, unsigned long bytes) 1964 { 1965 if (copy_to_user((void __user *)buf, 1966 get_dma_ptr(substream->runtime, channel, hwoff), 1967 bytes)) 1968 return -EFAULT; 1969 return 0; 1970 } 1971 1972 /* default copy_kernel ops for read */ 1973 static int default_read_copy_kernel(struct snd_pcm_substream *substream, 1974 int channel, unsigned long hwoff, 1975 void *buf, unsigned long bytes) 1976 { 1977 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes); 1978 return 0; 1979 } 1980 1981 /* call transfer function with the converted pointers and sizes; 1982 * for interleaved mode, it's one shot for all samples 1983 */ 1984 static int interleaved_copy(struct snd_pcm_substream *substream, 1985 snd_pcm_uframes_t hwoff, void *data, 1986 snd_pcm_uframes_t off, 1987 snd_pcm_uframes_t frames, 1988 pcm_transfer_f transfer) 1989 { 1990 struct snd_pcm_runtime *runtime = substream->runtime; 1991 1992 /* convert to bytes */ 1993 hwoff = frames_to_bytes(runtime, hwoff); 1994 off = frames_to_bytes(runtime, off); 1995 frames = frames_to_bytes(runtime, frames); 1996 return transfer(substream, 0, hwoff, data + off, frames); 1997 } 1998 1999 /* call transfer function with the converted pointers and sizes for each 2000 * non-interleaved channel; when buffer is NULL, silencing instead of copying 2001 */ 2002 static int noninterleaved_copy(struct snd_pcm_substream *substream, 2003 snd_pcm_uframes_t hwoff, void *data, 2004 snd_pcm_uframes_t off, 2005 snd_pcm_uframes_t frames, 2006 pcm_transfer_f transfer) 2007 { 2008 struct snd_pcm_runtime *runtime = substream->runtime; 2009 int channels = runtime->channels; 2010 void **bufs = data; 2011 int c, err; 2012 2013 /* convert to bytes; note that it's not frames_to_bytes() here. 2014 * in non-interleaved mode, we copy for each channel, thus 2015 * each copy is n_samples bytes x channels = whole frames. 2016 */ 2017 off = samples_to_bytes(runtime, off); 2018 frames = samples_to_bytes(runtime, frames); 2019 hwoff = samples_to_bytes(runtime, hwoff); 2020 for (c = 0; c < channels; ++c, ++bufs) { 2021 if (!data || !*bufs) 2022 err = fill_silence(substream, c, hwoff, NULL, frames); 2023 else 2024 err = transfer(substream, c, hwoff, *bufs + off, 2025 frames); 2026 if (err < 0) 2027 return err; 2028 } 2029 return 0; 2030 } 2031 2032 /* fill silence on the given buffer position; 2033 * called from snd_pcm_playback_silence() 2034 */ 2035 static int fill_silence_frames(struct snd_pcm_substream *substream, 2036 snd_pcm_uframes_t off, snd_pcm_uframes_t frames) 2037 { 2038 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 2039 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) 2040 return interleaved_copy(substream, off, NULL, 0, frames, 2041 fill_silence); 2042 else 2043 return noninterleaved_copy(substream, off, NULL, 0, frames, 2044 fill_silence); 2045 } 2046 2047 /* sanity-check for read/write methods */ 2048 static int pcm_sanity_check(struct snd_pcm_substream *substream) 2049 { 2050 struct snd_pcm_runtime *runtime; 2051 if (PCM_RUNTIME_CHECK(substream)) 2052 return -ENXIO; 2053 runtime = substream->runtime; 2054 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area)) 2055 return -EINVAL; 2056 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2057 return -EBADFD; 2058 return 0; 2059 } 2060 2061 static int pcm_accessible_state(struct snd_pcm_runtime *runtime) 2062 { 2063 switch (runtime->status->state) { 2064 case SNDRV_PCM_STATE_PREPARED: 2065 case SNDRV_PCM_STATE_RUNNING: 2066 case SNDRV_PCM_STATE_PAUSED: 2067 return 0; 2068 case SNDRV_PCM_STATE_XRUN: 2069 return -EPIPE; 2070 case SNDRV_PCM_STATE_SUSPENDED: 2071 return -ESTRPIPE; 2072 default: 2073 return -EBADFD; 2074 } 2075 } 2076 2077 /* update to the given appl_ptr and call ack callback if needed; 2078 * when an error is returned, take back to the original value 2079 */ 2080 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, 2081 snd_pcm_uframes_t appl_ptr) 2082 { 2083 struct snd_pcm_runtime *runtime = substream->runtime; 2084 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; 2085 int ret; 2086 2087 if (old_appl_ptr == appl_ptr) 2088 return 0; 2089 2090 runtime->control->appl_ptr = appl_ptr; 2091 if (substream->ops->ack) { 2092 ret = substream->ops->ack(substream); 2093 if (ret < 0) { 2094 runtime->control->appl_ptr = old_appl_ptr; 2095 return ret; 2096 } 2097 } 2098 2099 trace_applptr(substream, old_appl_ptr, appl_ptr); 2100 2101 return 0; 2102 } 2103 2104 /* the common loop for read/write data */ 2105 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2106 void *data, bool interleaved, 2107 snd_pcm_uframes_t size, bool in_kernel) 2108 { 2109 struct snd_pcm_runtime *runtime = substream->runtime; 2110 snd_pcm_uframes_t xfer = 0; 2111 snd_pcm_uframes_t offset = 0; 2112 snd_pcm_uframes_t avail; 2113 pcm_copy_f writer; 2114 pcm_transfer_f transfer; 2115 bool nonblock; 2116 bool is_playback; 2117 int err; 2118 2119 err = pcm_sanity_check(substream); 2120 if (err < 0) 2121 return err; 2122 2123 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 2124 if (interleaved) { 2125 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2126 runtime->channels > 1) 2127 return -EINVAL; 2128 writer = interleaved_copy; 2129 } else { 2130 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2131 return -EINVAL; 2132 writer = noninterleaved_copy; 2133 } 2134 2135 if (!data) { 2136 if (is_playback) 2137 transfer = fill_silence; 2138 else 2139 return -EINVAL; 2140 } else if (in_kernel) { 2141 if (substream->ops->copy_kernel) 2142 transfer = substream->ops->copy_kernel; 2143 else 2144 transfer = is_playback ? 2145 default_write_copy_kernel : default_read_copy_kernel; 2146 } else { 2147 if (substream->ops->copy_user) 2148 transfer = (pcm_transfer_f)substream->ops->copy_user; 2149 else 2150 transfer = is_playback ? 2151 default_write_copy : default_read_copy; 2152 } 2153 2154 if (size == 0) 2155 return 0; 2156 2157 nonblock = !!(substream->f_flags & O_NONBLOCK); 2158 2159 snd_pcm_stream_lock_irq(substream); 2160 err = pcm_accessible_state(runtime); 2161 if (err < 0) 2162 goto _end_unlock; 2163 2164 runtime->twake = runtime->control->avail_min ? : 1; 2165 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 2166 snd_pcm_update_hw_ptr(substream); 2167 2168 /* 2169 * If size < start_threshold, wait indefinitely. Another 2170 * thread may start capture 2171 */ 2172 if (!is_playback && 2173 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2174 size >= runtime->start_threshold) { 2175 err = snd_pcm_start(substream); 2176 if (err < 0) 2177 goto _end_unlock; 2178 } 2179 2180 avail = snd_pcm_avail(substream); 2181 2182 while (size > 0) { 2183 snd_pcm_uframes_t frames, appl_ptr, appl_ofs; 2184 snd_pcm_uframes_t cont; 2185 if (!avail) { 2186 if (!is_playback && 2187 runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2188 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2189 goto _end_unlock; 2190 } 2191 if (nonblock) { 2192 err = -EAGAIN; 2193 goto _end_unlock; 2194 } 2195 runtime->twake = min_t(snd_pcm_uframes_t, size, 2196 runtime->control->avail_min ? : 1); 2197 err = wait_for_avail(substream, &avail); 2198 if (err < 0) 2199 goto _end_unlock; 2200 if (!avail) 2201 continue; /* draining */ 2202 } 2203 frames = size > avail ? avail : size; 2204 appl_ptr = READ_ONCE(runtime->control->appl_ptr); 2205 appl_ofs = appl_ptr % runtime->buffer_size; 2206 cont = runtime->buffer_size - appl_ofs; 2207 if (frames > cont) 2208 frames = cont; 2209 if (snd_BUG_ON(!frames)) { 2210 err = -EINVAL; 2211 goto _end_unlock; 2212 } 2213 snd_pcm_stream_unlock_irq(substream); 2214 err = writer(substream, appl_ofs, data, offset, frames, 2215 transfer); 2216 snd_pcm_stream_lock_irq(substream); 2217 if (err < 0) 2218 goto _end_unlock; 2219 err = pcm_accessible_state(runtime); 2220 if (err < 0) 2221 goto _end_unlock; 2222 appl_ptr += frames; 2223 if (appl_ptr >= runtime->boundary) 2224 appl_ptr -= runtime->boundary; 2225 err = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2226 if (err < 0) 2227 goto _end_unlock; 2228 2229 offset += frames; 2230 size -= frames; 2231 xfer += frames; 2232 avail -= frames; 2233 if (is_playback && 2234 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2235 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { 2236 err = snd_pcm_start(substream); 2237 if (err < 0) 2238 goto _end_unlock; 2239 } 2240 } 2241 _end_unlock: 2242 runtime->twake = 0; 2243 if (xfer > 0 && err >= 0) 2244 snd_pcm_update_state(substream, runtime); 2245 snd_pcm_stream_unlock_irq(substream); 2246 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; 2247 } 2248 EXPORT_SYMBOL(__snd_pcm_lib_xfer); 2249 2250 /* 2251 * standard channel mapping helpers 2252 */ 2253 2254 /* default channel maps for multi-channel playbacks, up to 8 channels */ 2255 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { 2256 { .channels = 1, 2257 .map = { SNDRV_CHMAP_MONO } }, 2258 { .channels = 2, 2259 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2260 { .channels = 4, 2261 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2262 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2263 { .channels = 6, 2264 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2265 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2266 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, 2267 { .channels = 8, 2268 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2269 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2270 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2271 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2272 { } 2273 }; 2274 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); 2275 2276 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ 2277 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { 2278 { .channels = 1, 2279 .map = { SNDRV_CHMAP_MONO } }, 2280 { .channels = 2, 2281 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2282 { .channels = 4, 2283 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2284 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2285 { .channels = 6, 2286 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2287 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2288 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2289 { .channels = 8, 2290 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2291 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2292 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2293 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2294 { } 2295 }; 2296 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); 2297 2298 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) 2299 { 2300 if (ch > info->max_channels) 2301 return false; 2302 return !info->channel_mask || (info->channel_mask & (1U << ch)); 2303 } 2304 2305 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, 2306 struct snd_ctl_elem_info *uinfo) 2307 { 2308 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2309 2310 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 2311 uinfo->count = 0; 2312 uinfo->count = info->max_channels; 2313 uinfo->value.integer.min = 0; 2314 uinfo->value.integer.max = SNDRV_CHMAP_LAST; 2315 return 0; 2316 } 2317 2318 /* get callback for channel map ctl element 2319 * stores the channel position firstly matching with the current channels 2320 */ 2321 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, 2322 struct snd_ctl_elem_value *ucontrol) 2323 { 2324 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2325 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2326 struct snd_pcm_substream *substream; 2327 const struct snd_pcm_chmap_elem *map; 2328 2329 if (!info->chmap) 2330 return -EINVAL; 2331 substream = snd_pcm_chmap_substream(info, idx); 2332 if (!substream) 2333 return -ENODEV; 2334 memset(ucontrol->value.integer.value, 0, 2335 sizeof(ucontrol->value.integer.value)); 2336 if (!substream->runtime) 2337 return 0; /* no channels set */ 2338 for (map = info->chmap; map->channels; map++) { 2339 int i; 2340 if (map->channels == substream->runtime->channels && 2341 valid_chmap_channels(info, map->channels)) { 2342 for (i = 0; i < map->channels; i++) 2343 ucontrol->value.integer.value[i] = map->map[i]; 2344 return 0; 2345 } 2346 } 2347 return -EINVAL; 2348 } 2349 2350 /* tlv callback for channel map ctl element 2351 * expands the pre-defined channel maps in a form of TLV 2352 */ 2353 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, 2354 unsigned int size, unsigned int __user *tlv) 2355 { 2356 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2357 const struct snd_pcm_chmap_elem *map; 2358 unsigned int __user *dst; 2359 int c, count = 0; 2360 2361 if (!info->chmap) 2362 return -EINVAL; 2363 if (size < 8) 2364 return -ENOMEM; 2365 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) 2366 return -EFAULT; 2367 size -= 8; 2368 dst = tlv + 2; 2369 for (map = info->chmap; map->channels; map++) { 2370 int chs_bytes = map->channels * 4; 2371 if (!valid_chmap_channels(info, map->channels)) 2372 continue; 2373 if (size < 8) 2374 return -ENOMEM; 2375 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || 2376 put_user(chs_bytes, dst + 1)) 2377 return -EFAULT; 2378 dst += 2; 2379 size -= 8; 2380 count += 8; 2381 if (size < chs_bytes) 2382 return -ENOMEM; 2383 size -= chs_bytes; 2384 count += chs_bytes; 2385 for (c = 0; c < map->channels; c++) { 2386 if (put_user(map->map[c], dst)) 2387 return -EFAULT; 2388 dst++; 2389 } 2390 } 2391 if (put_user(count, tlv + 1)) 2392 return -EFAULT; 2393 return 0; 2394 } 2395 2396 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) 2397 { 2398 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2399 info->pcm->streams[info->stream].chmap_kctl = NULL; 2400 kfree(info); 2401 } 2402 2403 /** 2404 * snd_pcm_add_chmap_ctls - create channel-mapping control elements 2405 * @pcm: the assigned PCM instance 2406 * @stream: stream direction 2407 * @chmap: channel map elements (for query) 2408 * @max_channels: the max number of channels for the stream 2409 * @private_value: the value passed to each kcontrol's private_value field 2410 * @info_ret: store struct snd_pcm_chmap instance if non-NULL 2411 * 2412 * Create channel-mapping control elements assigned to the given PCM stream(s). 2413 * Return: Zero if successful, or a negative error value. 2414 */ 2415 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, 2416 const struct snd_pcm_chmap_elem *chmap, 2417 int max_channels, 2418 unsigned long private_value, 2419 struct snd_pcm_chmap **info_ret) 2420 { 2421 struct snd_pcm_chmap *info; 2422 struct snd_kcontrol_new knew = { 2423 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2424 .access = SNDRV_CTL_ELEM_ACCESS_READ | 2425 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 2426 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, 2427 .info = pcm_chmap_ctl_info, 2428 .get = pcm_chmap_ctl_get, 2429 .tlv.c = pcm_chmap_ctl_tlv, 2430 }; 2431 int err; 2432 2433 if (WARN_ON(pcm->streams[stream].chmap_kctl)) 2434 return -EBUSY; 2435 info = kzalloc(sizeof(*info), GFP_KERNEL); 2436 if (!info) 2437 return -ENOMEM; 2438 info->pcm = pcm; 2439 info->stream = stream; 2440 info->chmap = chmap; 2441 info->max_channels = max_channels; 2442 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 2443 knew.name = "Playback Channel Map"; 2444 else 2445 knew.name = "Capture Channel Map"; 2446 knew.device = pcm->device; 2447 knew.count = pcm->streams[stream].substream_count; 2448 knew.private_value = private_value; 2449 info->kctl = snd_ctl_new1(&knew, info); 2450 if (!info->kctl) { 2451 kfree(info); 2452 return -ENOMEM; 2453 } 2454 info->kctl->private_free = pcm_chmap_ctl_private_free; 2455 err = snd_ctl_add(pcm->card, info->kctl); 2456 if (err < 0) 2457 return err; 2458 pcm->streams[stream].chmap_kctl = info->kctl; 2459 if (info_ret) 2460 *info_ret = info; 2461 return 0; 2462 } 2463 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls); 2464