1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Digital Audio (PCM) abstract layer 4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 5 * Abramo Bagnara <abramo@alsa-project.org> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/sched/signal.h> 10 #include <linux/time.h> 11 #include <linux/math64.h> 12 #include <linux/export.h> 13 #include <sound/core.h> 14 #include <sound/control.h> 15 #include <sound/tlv.h> 16 #include <sound/info.h> 17 #include <sound/pcm.h> 18 #include <sound/pcm_params.h> 19 #include <sound/timer.h> 20 21 #include "pcm_local.h" 22 23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 24 #define CREATE_TRACE_POINTS 25 #include "pcm_trace.h" 26 #else 27 #define trace_hwptr(substream, pos, in_interrupt) 28 #define trace_xrun(substream) 29 #define trace_hw_ptr_error(substream, reason) 30 #define trace_applptr(substream, prev, curr) 31 #endif 32 33 static int fill_silence_frames(struct snd_pcm_substream *substream, 34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames); 35 36 /* 37 * fill ring buffer with silence 38 * runtime->silence_start: starting pointer to silence area 39 * runtime->silence_filled: size filled with silence 40 * runtime->silence_threshold: threshold from application 41 * runtime->silence_size: maximal size from application 42 * 43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately 44 */ 45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) 46 { 47 struct snd_pcm_runtime *runtime = substream->runtime; 48 snd_pcm_uframes_t frames, ofs, transfer; 49 int err; 50 51 if (runtime->silence_size < runtime->boundary) { 52 snd_pcm_sframes_t noise_dist, n; 53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); 54 if (runtime->silence_start != appl_ptr) { 55 n = appl_ptr - runtime->silence_start; 56 if (n < 0) 57 n += runtime->boundary; 58 if ((snd_pcm_uframes_t)n < runtime->silence_filled) 59 runtime->silence_filled -= n; 60 else 61 runtime->silence_filled = 0; 62 runtime->silence_start = appl_ptr; 63 } 64 if (runtime->silence_filled >= runtime->buffer_size) 65 return; 66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; 67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) 68 return; 69 frames = runtime->silence_threshold - noise_dist; 70 if (frames > runtime->silence_size) 71 frames = runtime->silence_size; 72 } else { 73 if (new_hw_ptr == ULONG_MAX) { /* initialization */ 74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); 75 if (avail > runtime->buffer_size) 76 avail = runtime->buffer_size; 77 runtime->silence_filled = avail > 0 ? avail : 0; 78 runtime->silence_start = (runtime->status->hw_ptr + 79 runtime->silence_filled) % 80 runtime->boundary; 81 } else { 82 ofs = runtime->status->hw_ptr; 83 frames = new_hw_ptr - ofs; 84 if ((snd_pcm_sframes_t)frames < 0) 85 frames += runtime->boundary; 86 runtime->silence_filled -= frames; 87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { 88 runtime->silence_filled = 0; 89 runtime->silence_start = new_hw_ptr; 90 } else { 91 runtime->silence_start = ofs; 92 } 93 } 94 frames = runtime->buffer_size - runtime->silence_filled; 95 } 96 if (snd_BUG_ON(frames > runtime->buffer_size)) 97 return; 98 if (frames == 0) 99 return; 100 ofs = runtime->silence_start % runtime->buffer_size; 101 while (frames > 0) { 102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; 103 err = fill_silence_frames(substream, ofs, transfer); 104 snd_BUG_ON(err < 0); 105 runtime->silence_filled += transfer; 106 frames -= transfer; 107 ofs = 0; 108 } 109 } 110 111 #ifdef CONFIG_SND_DEBUG 112 void snd_pcm_debug_name(struct snd_pcm_substream *substream, 113 char *name, size_t len) 114 { 115 snprintf(name, len, "pcmC%dD%d%c:%d", 116 substream->pcm->card->number, 117 substream->pcm->device, 118 substream->stream ? 'c' : 'p', 119 substream->number); 120 } 121 EXPORT_SYMBOL(snd_pcm_debug_name); 122 #endif 123 124 #define XRUN_DEBUG_BASIC (1<<0) 125 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 126 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ 127 128 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 129 130 #define xrun_debug(substream, mask) \ 131 ((substream)->pstr->xrun_debug & (mask)) 132 #else 133 #define xrun_debug(substream, mask) 0 134 #endif 135 136 #define dump_stack_on_xrun(substream) do { \ 137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ 138 dump_stack(); \ 139 } while (0) 140 141 /* call with stream lock held */ 142 void __snd_pcm_xrun(struct snd_pcm_substream *substream) 143 { 144 struct snd_pcm_runtime *runtime = substream->runtime; 145 146 trace_xrun(substream); 147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) 148 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); 149 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 150 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 151 char name[16]; 152 snd_pcm_debug_name(substream, name, sizeof(name)); 153 pcm_warn(substream->pcm, "XRUN: %s\n", name); 154 dump_stack_on_xrun(substream); 155 } 156 } 157 158 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 159 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ 160 do { \ 161 trace_hw_ptr_error(substream, reason); \ 162 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ 163 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ 164 (in_interrupt) ? 'Q' : 'P', ##args); \ 165 dump_stack_on_xrun(substream); \ 166 } \ 167 } while (0) 168 169 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ 170 171 #define hw_ptr_error(substream, fmt, args...) do { } while (0) 172 173 #endif 174 175 int snd_pcm_update_state(struct snd_pcm_substream *substream, 176 struct snd_pcm_runtime *runtime) 177 { 178 snd_pcm_uframes_t avail; 179 180 avail = snd_pcm_avail(substream); 181 if (avail > runtime->avail_max) 182 runtime->avail_max = avail; 183 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 184 if (avail >= runtime->buffer_size) { 185 snd_pcm_drain_done(substream); 186 return -EPIPE; 187 } 188 } else { 189 if (avail >= runtime->stop_threshold) { 190 __snd_pcm_xrun(substream); 191 return -EPIPE; 192 } 193 } 194 if (runtime->twake) { 195 if (avail >= runtime->twake) 196 wake_up(&runtime->tsleep); 197 } else if (avail >= runtime->control->avail_min) 198 wake_up(&runtime->sleep); 199 return 0; 200 } 201 202 static void update_audio_tstamp(struct snd_pcm_substream *substream, 203 struct timespec *curr_tstamp, 204 struct timespec *audio_tstamp) 205 { 206 struct snd_pcm_runtime *runtime = substream->runtime; 207 u64 audio_frames, audio_nsecs; 208 struct timespec driver_tstamp; 209 210 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) 211 return; 212 213 if (!(substream->ops->get_time_info) || 214 (runtime->audio_tstamp_report.actual_type == 215 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 216 217 /* 218 * provide audio timestamp derived from pointer position 219 * add delay only if requested 220 */ 221 222 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; 223 224 if (runtime->audio_tstamp_config.report_delay) { 225 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 226 audio_frames -= runtime->delay; 227 else 228 audio_frames += runtime->delay; 229 } 230 audio_nsecs = div_u64(audio_frames * 1000000000LL, 231 runtime->rate); 232 *audio_tstamp = ns_to_timespec(audio_nsecs); 233 } 234 if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { 235 runtime->status->audio_tstamp = *audio_tstamp; 236 runtime->status->tstamp = *curr_tstamp; 237 } 238 239 /* 240 * re-take a driver timestamp to let apps detect if the reference tstamp 241 * read by low-level hardware was provided with a delay 242 */ 243 snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp); 244 runtime->driver_tstamp = driver_tstamp; 245 } 246 247 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, 248 unsigned int in_interrupt) 249 { 250 struct snd_pcm_runtime *runtime = substream->runtime; 251 snd_pcm_uframes_t pos; 252 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 253 snd_pcm_sframes_t hdelta, delta; 254 unsigned long jdelta; 255 unsigned long curr_jiffies; 256 struct timespec curr_tstamp; 257 struct timespec audio_tstamp; 258 int crossed_boundary = 0; 259 260 old_hw_ptr = runtime->status->hw_ptr; 261 262 /* 263 * group pointer, time and jiffies reads to allow for more 264 * accurate correlations/corrections. 265 * The values are stored at the end of this routine after 266 * corrections for hw_ptr position 267 */ 268 pos = substream->ops->pointer(substream); 269 curr_jiffies = jiffies; 270 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 271 if ((substream->ops->get_time_info) && 272 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 273 substream->ops->get_time_info(substream, &curr_tstamp, 274 &audio_tstamp, 275 &runtime->audio_tstamp_config, 276 &runtime->audio_tstamp_report); 277 278 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ 279 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) 280 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 281 } else 282 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 283 } 284 285 if (pos == SNDRV_PCM_POS_XRUN) { 286 __snd_pcm_xrun(substream); 287 return -EPIPE; 288 } 289 if (pos >= runtime->buffer_size) { 290 if (printk_ratelimit()) { 291 char name[16]; 292 snd_pcm_debug_name(substream, name, sizeof(name)); 293 pcm_err(substream->pcm, 294 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", 295 name, pos, runtime->buffer_size, 296 runtime->period_size); 297 } 298 pos = 0; 299 } 300 pos -= pos % runtime->min_align; 301 trace_hwptr(substream, pos, in_interrupt); 302 hw_base = runtime->hw_ptr_base; 303 new_hw_ptr = hw_base + pos; 304 if (in_interrupt) { 305 /* we know that one period was processed */ 306 /* delta = "expected next hw_ptr" for in_interrupt != 0 */ 307 delta = runtime->hw_ptr_interrupt + runtime->period_size; 308 if (delta > new_hw_ptr) { 309 /* check for double acknowledged interrupts */ 310 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 311 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 312 hw_base += runtime->buffer_size; 313 if (hw_base >= runtime->boundary) { 314 hw_base = 0; 315 crossed_boundary++; 316 } 317 new_hw_ptr = hw_base + pos; 318 goto __delta; 319 } 320 } 321 } 322 /* new_hw_ptr might be lower than old_hw_ptr in case when */ 323 /* pointer crosses the end of the ring buffer */ 324 if (new_hw_ptr < old_hw_ptr) { 325 hw_base += runtime->buffer_size; 326 if (hw_base >= runtime->boundary) { 327 hw_base = 0; 328 crossed_boundary++; 329 } 330 new_hw_ptr = hw_base + pos; 331 } 332 __delta: 333 delta = new_hw_ptr - old_hw_ptr; 334 if (delta < 0) 335 delta += runtime->boundary; 336 337 if (runtime->no_period_wakeup) { 338 snd_pcm_sframes_t xrun_threshold; 339 /* 340 * Without regular period interrupts, we have to check 341 * the elapsed time to detect xruns. 342 */ 343 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 344 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 345 goto no_delta_check; 346 hdelta = jdelta - delta * HZ / runtime->rate; 347 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; 348 while (hdelta > xrun_threshold) { 349 delta += runtime->buffer_size; 350 hw_base += runtime->buffer_size; 351 if (hw_base >= runtime->boundary) { 352 hw_base = 0; 353 crossed_boundary++; 354 } 355 new_hw_ptr = hw_base + pos; 356 hdelta -= runtime->hw_ptr_buffer_jiffies; 357 } 358 goto no_delta_check; 359 } 360 361 /* something must be really wrong */ 362 if (delta >= runtime->buffer_size + runtime->period_size) { 363 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", 364 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 365 substream->stream, (long)pos, 366 (long)new_hw_ptr, (long)old_hw_ptr); 367 return 0; 368 } 369 370 /* Do jiffies check only in xrun_debug mode */ 371 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) 372 goto no_jiffies_check; 373 374 /* Skip the jiffies check for hardwares with BATCH flag. 375 * Such hardware usually just increases the position at each IRQ, 376 * thus it can't give any strange position. 377 */ 378 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 379 goto no_jiffies_check; 380 hdelta = delta; 381 if (hdelta < runtime->delay) 382 goto no_jiffies_check; 383 hdelta -= runtime->delay; 384 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 385 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 386 delta = jdelta / 387 (((runtime->period_size * HZ) / runtime->rate) 388 + HZ/100); 389 /* move new_hw_ptr according jiffies not pos variable */ 390 new_hw_ptr = old_hw_ptr; 391 hw_base = delta; 392 /* use loop to avoid checks for delta overflows */ 393 /* the delta value is small or zero in most cases */ 394 while (delta > 0) { 395 new_hw_ptr += runtime->period_size; 396 if (new_hw_ptr >= runtime->boundary) { 397 new_hw_ptr -= runtime->boundary; 398 crossed_boundary--; 399 } 400 delta--; 401 } 402 /* align hw_base to buffer_size */ 403 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", 404 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", 405 (long)pos, (long)hdelta, 406 (long)runtime->period_size, jdelta, 407 ((hdelta * HZ) / runtime->rate), hw_base, 408 (unsigned long)old_hw_ptr, 409 (unsigned long)new_hw_ptr); 410 /* reset values to proper state */ 411 delta = 0; 412 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); 413 } 414 no_jiffies_check: 415 if (delta > runtime->period_size + runtime->period_size / 2) { 416 hw_ptr_error(substream, in_interrupt, 417 "Lost interrupts?", 418 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 419 substream->stream, (long)delta, 420 (long)new_hw_ptr, 421 (long)old_hw_ptr); 422 } 423 424 no_delta_check: 425 if (runtime->status->hw_ptr == new_hw_ptr) { 426 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 427 return 0; 428 } 429 430 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 431 runtime->silence_size > 0) 432 snd_pcm_playback_silence(substream, new_hw_ptr); 433 434 if (in_interrupt) { 435 delta = new_hw_ptr - runtime->hw_ptr_interrupt; 436 if (delta < 0) 437 delta += runtime->boundary; 438 delta -= (snd_pcm_uframes_t)delta % runtime->period_size; 439 runtime->hw_ptr_interrupt += delta; 440 if (runtime->hw_ptr_interrupt >= runtime->boundary) 441 runtime->hw_ptr_interrupt -= runtime->boundary; 442 } 443 runtime->hw_ptr_base = hw_base; 444 runtime->status->hw_ptr = new_hw_ptr; 445 runtime->hw_ptr_jiffies = curr_jiffies; 446 if (crossed_boundary) { 447 snd_BUG_ON(crossed_boundary != 1); 448 runtime->hw_ptr_wrap += runtime->boundary; 449 } 450 451 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 452 453 return snd_pcm_update_state(substream, runtime); 454 } 455 456 /* CAUTION: call it with irq disabled */ 457 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) 458 { 459 return snd_pcm_update_hw_ptr0(substream, 0); 460 } 461 462 /** 463 * snd_pcm_set_ops - set the PCM operators 464 * @pcm: the pcm instance 465 * @direction: stream direction, SNDRV_PCM_STREAM_XXX 466 * @ops: the operator table 467 * 468 * Sets the given PCM operators to the pcm instance. 469 */ 470 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, 471 const struct snd_pcm_ops *ops) 472 { 473 struct snd_pcm_str *stream = &pcm->streams[direction]; 474 struct snd_pcm_substream *substream; 475 476 for (substream = stream->substream; substream != NULL; substream = substream->next) 477 substream->ops = ops; 478 } 479 EXPORT_SYMBOL(snd_pcm_set_ops); 480 481 /** 482 * snd_pcm_sync - set the PCM sync id 483 * @substream: the pcm substream 484 * 485 * Sets the PCM sync identifier for the card. 486 */ 487 void snd_pcm_set_sync(struct snd_pcm_substream *substream) 488 { 489 struct snd_pcm_runtime *runtime = substream->runtime; 490 491 runtime->sync.id32[0] = substream->pcm->card->number; 492 runtime->sync.id32[1] = -1; 493 runtime->sync.id32[2] = -1; 494 runtime->sync.id32[3] = -1; 495 } 496 EXPORT_SYMBOL(snd_pcm_set_sync); 497 498 /* 499 * Standard ioctl routine 500 */ 501 502 static inline unsigned int div32(unsigned int a, unsigned int b, 503 unsigned int *r) 504 { 505 if (b == 0) { 506 *r = 0; 507 return UINT_MAX; 508 } 509 *r = a % b; 510 return a / b; 511 } 512 513 static inline unsigned int div_down(unsigned int a, unsigned int b) 514 { 515 if (b == 0) 516 return UINT_MAX; 517 return a / b; 518 } 519 520 static inline unsigned int div_up(unsigned int a, unsigned int b) 521 { 522 unsigned int r; 523 unsigned int q; 524 if (b == 0) 525 return UINT_MAX; 526 q = div32(a, b, &r); 527 if (r) 528 ++q; 529 return q; 530 } 531 532 static inline unsigned int mul(unsigned int a, unsigned int b) 533 { 534 if (a == 0) 535 return 0; 536 if (div_down(UINT_MAX, a) < b) 537 return UINT_MAX; 538 return a * b; 539 } 540 541 static inline unsigned int muldiv32(unsigned int a, unsigned int b, 542 unsigned int c, unsigned int *r) 543 { 544 u_int64_t n = (u_int64_t) a * b; 545 if (c == 0) { 546 *r = 0; 547 return UINT_MAX; 548 } 549 n = div_u64_rem(n, c, r); 550 if (n >= UINT_MAX) { 551 *r = 0; 552 return UINT_MAX; 553 } 554 return n; 555 } 556 557 /** 558 * snd_interval_refine - refine the interval value of configurator 559 * @i: the interval value to refine 560 * @v: the interval value to refer to 561 * 562 * Refines the interval value with the reference value. 563 * The interval is changed to the range satisfying both intervals. 564 * The interval status (min, max, integer, etc.) are evaluated. 565 * 566 * Return: Positive if the value is changed, zero if it's not changed, or a 567 * negative error code. 568 */ 569 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) 570 { 571 int changed = 0; 572 if (snd_BUG_ON(snd_interval_empty(i))) 573 return -EINVAL; 574 if (i->min < v->min) { 575 i->min = v->min; 576 i->openmin = v->openmin; 577 changed = 1; 578 } else if (i->min == v->min && !i->openmin && v->openmin) { 579 i->openmin = 1; 580 changed = 1; 581 } 582 if (i->max > v->max) { 583 i->max = v->max; 584 i->openmax = v->openmax; 585 changed = 1; 586 } else if (i->max == v->max && !i->openmax && v->openmax) { 587 i->openmax = 1; 588 changed = 1; 589 } 590 if (!i->integer && v->integer) { 591 i->integer = 1; 592 changed = 1; 593 } 594 if (i->integer) { 595 if (i->openmin) { 596 i->min++; 597 i->openmin = 0; 598 } 599 if (i->openmax) { 600 i->max--; 601 i->openmax = 0; 602 } 603 } else if (!i->openmin && !i->openmax && i->min == i->max) 604 i->integer = 1; 605 if (snd_interval_checkempty(i)) { 606 snd_interval_none(i); 607 return -EINVAL; 608 } 609 return changed; 610 } 611 EXPORT_SYMBOL(snd_interval_refine); 612 613 static int snd_interval_refine_first(struct snd_interval *i) 614 { 615 const unsigned int last_max = i->max; 616 617 if (snd_BUG_ON(snd_interval_empty(i))) 618 return -EINVAL; 619 if (snd_interval_single(i)) 620 return 0; 621 i->max = i->min; 622 if (i->openmin) 623 i->max++; 624 /* only exclude max value if also excluded before refine */ 625 i->openmax = (i->openmax && i->max >= last_max); 626 return 1; 627 } 628 629 static int snd_interval_refine_last(struct snd_interval *i) 630 { 631 const unsigned int last_min = i->min; 632 633 if (snd_BUG_ON(snd_interval_empty(i))) 634 return -EINVAL; 635 if (snd_interval_single(i)) 636 return 0; 637 i->min = i->max; 638 if (i->openmax) 639 i->min--; 640 /* only exclude min value if also excluded before refine */ 641 i->openmin = (i->openmin && i->min <= last_min); 642 return 1; 643 } 644 645 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 646 { 647 if (a->empty || b->empty) { 648 snd_interval_none(c); 649 return; 650 } 651 c->empty = 0; 652 c->min = mul(a->min, b->min); 653 c->openmin = (a->openmin || b->openmin); 654 c->max = mul(a->max, b->max); 655 c->openmax = (a->openmax || b->openmax); 656 c->integer = (a->integer && b->integer); 657 } 658 659 /** 660 * snd_interval_div - refine the interval value with division 661 * @a: dividend 662 * @b: divisor 663 * @c: quotient 664 * 665 * c = a / b 666 * 667 * Returns non-zero if the value is changed, zero if not changed. 668 */ 669 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 670 { 671 unsigned int r; 672 if (a->empty || b->empty) { 673 snd_interval_none(c); 674 return; 675 } 676 c->empty = 0; 677 c->min = div32(a->min, b->max, &r); 678 c->openmin = (r || a->openmin || b->openmax); 679 if (b->min > 0) { 680 c->max = div32(a->max, b->min, &r); 681 if (r) { 682 c->max++; 683 c->openmax = 1; 684 } else 685 c->openmax = (a->openmax || b->openmin); 686 } else { 687 c->max = UINT_MAX; 688 c->openmax = 0; 689 } 690 c->integer = 0; 691 } 692 693 /** 694 * snd_interval_muldivk - refine the interval value 695 * @a: dividend 1 696 * @b: dividend 2 697 * @k: divisor (as integer) 698 * @c: result 699 * 700 * c = a * b / k 701 * 702 * Returns non-zero if the value is changed, zero if not changed. 703 */ 704 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, 705 unsigned int k, struct snd_interval *c) 706 { 707 unsigned int r; 708 if (a->empty || b->empty) { 709 snd_interval_none(c); 710 return; 711 } 712 c->empty = 0; 713 c->min = muldiv32(a->min, b->min, k, &r); 714 c->openmin = (r || a->openmin || b->openmin); 715 c->max = muldiv32(a->max, b->max, k, &r); 716 if (r) { 717 c->max++; 718 c->openmax = 1; 719 } else 720 c->openmax = (a->openmax || b->openmax); 721 c->integer = 0; 722 } 723 724 /** 725 * snd_interval_mulkdiv - refine the interval value 726 * @a: dividend 1 727 * @k: dividend 2 (as integer) 728 * @b: divisor 729 * @c: result 730 * 731 * c = a * k / b 732 * 733 * Returns non-zero if the value is changed, zero if not changed. 734 */ 735 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, 736 const struct snd_interval *b, struct snd_interval *c) 737 { 738 unsigned int r; 739 if (a->empty || b->empty) { 740 snd_interval_none(c); 741 return; 742 } 743 c->empty = 0; 744 c->min = muldiv32(a->min, k, b->max, &r); 745 c->openmin = (r || a->openmin || b->openmax); 746 if (b->min > 0) { 747 c->max = muldiv32(a->max, k, b->min, &r); 748 if (r) { 749 c->max++; 750 c->openmax = 1; 751 } else 752 c->openmax = (a->openmax || b->openmin); 753 } else { 754 c->max = UINT_MAX; 755 c->openmax = 0; 756 } 757 c->integer = 0; 758 } 759 760 /* ---- */ 761 762 763 /** 764 * snd_interval_ratnum - refine the interval value 765 * @i: interval to refine 766 * @rats_count: number of ratnum_t 767 * @rats: ratnum_t array 768 * @nump: pointer to store the resultant numerator 769 * @denp: pointer to store the resultant denominator 770 * 771 * Return: Positive if the value is changed, zero if it's not changed, or a 772 * negative error code. 773 */ 774 int snd_interval_ratnum(struct snd_interval *i, 775 unsigned int rats_count, const struct snd_ratnum *rats, 776 unsigned int *nump, unsigned int *denp) 777 { 778 unsigned int best_num, best_den; 779 int best_diff; 780 unsigned int k; 781 struct snd_interval t; 782 int err; 783 unsigned int result_num, result_den; 784 int result_diff; 785 786 best_num = best_den = best_diff = 0; 787 for (k = 0; k < rats_count; ++k) { 788 unsigned int num = rats[k].num; 789 unsigned int den; 790 unsigned int q = i->min; 791 int diff; 792 if (q == 0) 793 q = 1; 794 den = div_up(num, q); 795 if (den < rats[k].den_min) 796 continue; 797 if (den > rats[k].den_max) 798 den = rats[k].den_max; 799 else { 800 unsigned int r; 801 r = (den - rats[k].den_min) % rats[k].den_step; 802 if (r != 0) 803 den -= r; 804 } 805 diff = num - q * den; 806 if (diff < 0) 807 diff = -diff; 808 if (best_num == 0 || 809 diff * best_den < best_diff * den) { 810 best_diff = diff; 811 best_den = den; 812 best_num = num; 813 } 814 } 815 if (best_den == 0) { 816 i->empty = 1; 817 return -EINVAL; 818 } 819 t.min = div_down(best_num, best_den); 820 t.openmin = !!(best_num % best_den); 821 822 result_num = best_num; 823 result_diff = best_diff; 824 result_den = best_den; 825 best_num = best_den = best_diff = 0; 826 for (k = 0; k < rats_count; ++k) { 827 unsigned int num = rats[k].num; 828 unsigned int den; 829 unsigned int q = i->max; 830 int diff; 831 if (q == 0) { 832 i->empty = 1; 833 return -EINVAL; 834 } 835 den = div_down(num, q); 836 if (den > rats[k].den_max) 837 continue; 838 if (den < rats[k].den_min) 839 den = rats[k].den_min; 840 else { 841 unsigned int r; 842 r = (den - rats[k].den_min) % rats[k].den_step; 843 if (r != 0) 844 den += rats[k].den_step - r; 845 } 846 diff = q * den - num; 847 if (diff < 0) 848 diff = -diff; 849 if (best_num == 0 || 850 diff * best_den < best_diff * den) { 851 best_diff = diff; 852 best_den = den; 853 best_num = num; 854 } 855 } 856 if (best_den == 0) { 857 i->empty = 1; 858 return -EINVAL; 859 } 860 t.max = div_up(best_num, best_den); 861 t.openmax = !!(best_num % best_den); 862 t.integer = 0; 863 err = snd_interval_refine(i, &t); 864 if (err < 0) 865 return err; 866 867 if (snd_interval_single(i)) { 868 if (best_diff * result_den < result_diff * best_den) { 869 result_num = best_num; 870 result_den = best_den; 871 } 872 if (nump) 873 *nump = result_num; 874 if (denp) 875 *denp = result_den; 876 } 877 return err; 878 } 879 EXPORT_SYMBOL(snd_interval_ratnum); 880 881 /** 882 * snd_interval_ratden - refine the interval value 883 * @i: interval to refine 884 * @rats_count: number of struct ratden 885 * @rats: struct ratden array 886 * @nump: pointer to store the resultant numerator 887 * @denp: pointer to store the resultant denominator 888 * 889 * Return: Positive if the value is changed, zero if it's not changed, or a 890 * negative error code. 891 */ 892 static int snd_interval_ratden(struct snd_interval *i, 893 unsigned int rats_count, 894 const struct snd_ratden *rats, 895 unsigned int *nump, unsigned int *denp) 896 { 897 unsigned int best_num, best_diff, best_den; 898 unsigned int k; 899 struct snd_interval t; 900 int err; 901 902 best_num = best_den = best_diff = 0; 903 for (k = 0; k < rats_count; ++k) { 904 unsigned int num; 905 unsigned int den = rats[k].den; 906 unsigned int q = i->min; 907 int diff; 908 num = mul(q, den); 909 if (num > rats[k].num_max) 910 continue; 911 if (num < rats[k].num_min) 912 num = rats[k].num_max; 913 else { 914 unsigned int r; 915 r = (num - rats[k].num_min) % rats[k].num_step; 916 if (r != 0) 917 num += rats[k].num_step - r; 918 } 919 diff = num - q * den; 920 if (best_num == 0 || 921 diff * best_den < best_diff * den) { 922 best_diff = diff; 923 best_den = den; 924 best_num = num; 925 } 926 } 927 if (best_den == 0) { 928 i->empty = 1; 929 return -EINVAL; 930 } 931 t.min = div_down(best_num, best_den); 932 t.openmin = !!(best_num % best_den); 933 934 best_num = best_den = best_diff = 0; 935 for (k = 0; k < rats_count; ++k) { 936 unsigned int num; 937 unsigned int den = rats[k].den; 938 unsigned int q = i->max; 939 int diff; 940 num = mul(q, den); 941 if (num < rats[k].num_min) 942 continue; 943 if (num > rats[k].num_max) 944 num = rats[k].num_max; 945 else { 946 unsigned int r; 947 r = (num - rats[k].num_min) % rats[k].num_step; 948 if (r != 0) 949 num -= r; 950 } 951 diff = q * den - num; 952 if (best_num == 0 || 953 diff * best_den < best_diff * den) { 954 best_diff = diff; 955 best_den = den; 956 best_num = num; 957 } 958 } 959 if (best_den == 0) { 960 i->empty = 1; 961 return -EINVAL; 962 } 963 t.max = div_up(best_num, best_den); 964 t.openmax = !!(best_num % best_den); 965 t.integer = 0; 966 err = snd_interval_refine(i, &t); 967 if (err < 0) 968 return err; 969 970 if (snd_interval_single(i)) { 971 if (nump) 972 *nump = best_num; 973 if (denp) 974 *denp = best_den; 975 } 976 return err; 977 } 978 979 /** 980 * snd_interval_list - refine the interval value from the list 981 * @i: the interval value to refine 982 * @count: the number of elements in the list 983 * @list: the value list 984 * @mask: the bit-mask to evaluate 985 * 986 * Refines the interval value from the list. 987 * When mask is non-zero, only the elements corresponding to bit 1 are 988 * evaluated. 989 * 990 * Return: Positive if the value is changed, zero if it's not changed, or a 991 * negative error code. 992 */ 993 int snd_interval_list(struct snd_interval *i, unsigned int count, 994 const unsigned int *list, unsigned int mask) 995 { 996 unsigned int k; 997 struct snd_interval list_range; 998 999 if (!count) { 1000 i->empty = 1; 1001 return -EINVAL; 1002 } 1003 snd_interval_any(&list_range); 1004 list_range.min = UINT_MAX; 1005 list_range.max = 0; 1006 for (k = 0; k < count; k++) { 1007 if (mask && !(mask & (1 << k))) 1008 continue; 1009 if (!snd_interval_test(i, list[k])) 1010 continue; 1011 list_range.min = min(list_range.min, list[k]); 1012 list_range.max = max(list_range.max, list[k]); 1013 } 1014 return snd_interval_refine(i, &list_range); 1015 } 1016 EXPORT_SYMBOL(snd_interval_list); 1017 1018 /** 1019 * snd_interval_ranges - refine the interval value from the list of ranges 1020 * @i: the interval value to refine 1021 * @count: the number of elements in the list of ranges 1022 * @ranges: the ranges list 1023 * @mask: the bit-mask to evaluate 1024 * 1025 * Refines the interval value from the list of ranges. 1026 * When mask is non-zero, only the elements corresponding to bit 1 are 1027 * evaluated. 1028 * 1029 * Return: Positive if the value is changed, zero if it's not changed, or a 1030 * negative error code. 1031 */ 1032 int snd_interval_ranges(struct snd_interval *i, unsigned int count, 1033 const struct snd_interval *ranges, unsigned int mask) 1034 { 1035 unsigned int k; 1036 struct snd_interval range_union; 1037 struct snd_interval range; 1038 1039 if (!count) { 1040 snd_interval_none(i); 1041 return -EINVAL; 1042 } 1043 snd_interval_any(&range_union); 1044 range_union.min = UINT_MAX; 1045 range_union.max = 0; 1046 for (k = 0; k < count; k++) { 1047 if (mask && !(mask & (1 << k))) 1048 continue; 1049 snd_interval_copy(&range, &ranges[k]); 1050 if (snd_interval_refine(&range, i) < 0) 1051 continue; 1052 if (snd_interval_empty(&range)) 1053 continue; 1054 1055 if (range.min < range_union.min) { 1056 range_union.min = range.min; 1057 range_union.openmin = 1; 1058 } 1059 if (range.min == range_union.min && !range.openmin) 1060 range_union.openmin = 0; 1061 if (range.max > range_union.max) { 1062 range_union.max = range.max; 1063 range_union.openmax = 1; 1064 } 1065 if (range.max == range_union.max && !range.openmax) 1066 range_union.openmax = 0; 1067 } 1068 return snd_interval_refine(i, &range_union); 1069 } 1070 EXPORT_SYMBOL(snd_interval_ranges); 1071 1072 static int snd_interval_step(struct snd_interval *i, unsigned int step) 1073 { 1074 unsigned int n; 1075 int changed = 0; 1076 n = i->min % step; 1077 if (n != 0 || i->openmin) { 1078 i->min += step - n; 1079 i->openmin = 0; 1080 changed = 1; 1081 } 1082 n = i->max % step; 1083 if (n != 0 || i->openmax) { 1084 i->max -= n; 1085 i->openmax = 0; 1086 changed = 1; 1087 } 1088 if (snd_interval_checkempty(i)) { 1089 i->empty = 1; 1090 return -EINVAL; 1091 } 1092 return changed; 1093 } 1094 1095 /* Info constraints helpers */ 1096 1097 /** 1098 * snd_pcm_hw_rule_add - add the hw-constraint rule 1099 * @runtime: the pcm runtime instance 1100 * @cond: condition bits 1101 * @var: the variable to evaluate 1102 * @func: the evaluation function 1103 * @private: the private data pointer passed to function 1104 * @dep: the dependent variables 1105 * 1106 * Return: Zero if successful, or a negative error code on failure. 1107 */ 1108 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, 1109 int var, 1110 snd_pcm_hw_rule_func_t func, void *private, 1111 int dep, ...) 1112 { 1113 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1114 struct snd_pcm_hw_rule *c; 1115 unsigned int k; 1116 va_list args; 1117 va_start(args, dep); 1118 if (constrs->rules_num >= constrs->rules_all) { 1119 struct snd_pcm_hw_rule *new; 1120 unsigned int new_rules = constrs->rules_all + 16; 1121 new = krealloc(constrs->rules, new_rules * sizeof(*c), 1122 GFP_KERNEL); 1123 if (!new) { 1124 va_end(args); 1125 return -ENOMEM; 1126 } 1127 constrs->rules = new; 1128 constrs->rules_all = new_rules; 1129 } 1130 c = &constrs->rules[constrs->rules_num]; 1131 c->cond = cond; 1132 c->func = func; 1133 c->var = var; 1134 c->private = private; 1135 k = 0; 1136 while (1) { 1137 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1138 va_end(args); 1139 return -EINVAL; 1140 } 1141 c->deps[k++] = dep; 1142 if (dep < 0) 1143 break; 1144 dep = va_arg(args, int); 1145 } 1146 constrs->rules_num++; 1147 va_end(args); 1148 return 0; 1149 } 1150 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1151 1152 /** 1153 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint 1154 * @runtime: PCM runtime instance 1155 * @var: hw_params variable to apply the mask 1156 * @mask: the bitmap mask 1157 * 1158 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. 1159 * 1160 * Return: Zero if successful, or a negative error code on failure. 1161 */ 1162 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1163 u_int32_t mask) 1164 { 1165 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1166 struct snd_mask *maskp = constrs_mask(constrs, var); 1167 *maskp->bits &= mask; 1168 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ 1169 if (*maskp->bits == 0) 1170 return -EINVAL; 1171 return 0; 1172 } 1173 1174 /** 1175 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint 1176 * @runtime: PCM runtime instance 1177 * @var: hw_params variable to apply the mask 1178 * @mask: the 64bit bitmap mask 1179 * 1180 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. 1181 * 1182 * Return: Zero if successful, or a negative error code on failure. 1183 */ 1184 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1185 u_int64_t mask) 1186 { 1187 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1188 struct snd_mask *maskp = constrs_mask(constrs, var); 1189 maskp->bits[0] &= (u_int32_t)mask; 1190 maskp->bits[1] &= (u_int32_t)(mask >> 32); 1191 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 1192 if (! maskp->bits[0] && ! maskp->bits[1]) 1193 return -EINVAL; 1194 return 0; 1195 } 1196 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); 1197 1198 /** 1199 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval 1200 * @runtime: PCM runtime instance 1201 * @var: hw_params variable to apply the integer constraint 1202 * 1203 * Apply the constraint of integer to an interval parameter. 1204 * 1205 * Return: Positive if the value is changed, zero if it's not changed, or a 1206 * negative error code. 1207 */ 1208 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) 1209 { 1210 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1211 return snd_interval_setinteger(constrs_interval(constrs, var)); 1212 } 1213 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); 1214 1215 /** 1216 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval 1217 * @runtime: PCM runtime instance 1218 * @var: hw_params variable to apply the range 1219 * @min: the minimal value 1220 * @max: the maximal value 1221 * 1222 * Apply the min/max range constraint to an interval parameter. 1223 * 1224 * Return: Positive if the value is changed, zero if it's not changed, or a 1225 * negative error code. 1226 */ 1227 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1228 unsigned int min, unsigned int max) 1229 { 1230 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1231 struct snd_interval t; 1232 t.min = min; 1233 t.max = max; 1234 t.openmin = t.openmax = 0; 1235 t.integer = 0; 1236 return snd_interval_refine(constrs_interval(constrs, var), &t); 1237 } 1238 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); 1239 1240 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, 1241 struct snd_pcm_hw_rule *rule) 1242 { 1243 struct snd_pcm_hw_constraint_list *list = rule->private; 1244 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); 1245 } 1246 1247 1248 /** 1249 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter 1250 * @runtime: PCM runtime instance 1251 * @cond: condition bits 1252 * @var: hw_params variable to apply the list constraint 1253 * @l: list 1254 * 1255 * Apply the list of constraints to an interval parameter. 1256 * 1257 * Return: Zero if successful, or a negative error code on failure. 1258 */ 1259 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, 1260 unsigned int cond, 1261 snd_pcm_hw_param_t var, 1262 const struct snd_pcm_hw_constraint_list *l) 1263 { 1264 return snd_pcm_hw_rule_add(runtime, cond, var, 1265 snd_pcm_hw_rule_list, (void *)l, 1266 var, -1); 1267 } 1268 EXPORT_SYMBOL(snd_pcm_hw_constraint_list); 1269 1270 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, 1271 struct snd_pcm_hw_rule *rule) 1272 { 1273 struct snd_pcm_hw_constraint_ranges *r = rule->private; 1274 return snd_interval_ranges(hw_param_interval(params, rule->var), 1275 r->count, r->ranges, r->mask); 1276 } 1277 1278 1279 /** 1280 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter 1281 * @runtime: PCM runtime instance 1282 * @cond: condition bits 1283 * @var: hw_params variable to apply the list of range constraints 1284 * @r: ranges 1285 * 1286 * Apply the list of range constraints to an interval parameter. 1287 * 1288 * Return: Zero if successful, or a negative error code on failure. 1289 */ 1290 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, 1291 unsigned int cond, 1292 snd_pcm_hw_param_t var, 1293 const struct snd_pcm_hw_constraint_ranges *r) 1294 { 1295 return snd_pcm_hw_rule_add(runtime, cond, var, 1296 snd_pcm_hw_rule_ranges, (void *)r, 1297 var, -1); 1298 } 1299 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); 1300 1301 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, 1302 struct snd_pcm_hw_rule *rule) 1303 { 1304 const struct snd_pcm_hw_constraint_ratnums *r = rule->private; 1305 unsigned int num = 0, den = 0; 1306 int err; 1307 err = snd_interval_ratnum(hw_param_interval(params, rule->var), 1308 r->nrats, r->rats, &num, &den); 1309 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1310 params->rate_num = num; 1311 params->rate_den = den; 1312 } 1313 return err; 1314 } 1315 1316 /** 1317 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter 1318 * @runtime: PCM runtime instance 1319 * @cond: condition bits 1320 * @var: hw_params variable to apply the ratnums constraint 1321 * @r: struct snd_ratnums constriants 1322 * 1323 * Return: Zero if successful, or a negative error code on failure. 1324 */ 1325 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 1326 unsigned int cond, 1327 snd_pcm_hw_param_t var, 1328 const struct snd_pcm_hw_constraint_ratnums *r) 1329 { 1330 return snd_pcm_hw_rule_add(runtime, cond, var, 1331 snd_pcm_hw_rule_ratnums, (void *)r, 1332 var, -1); 1333 } 1334 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); 1335 1336 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, 1337 struct snd_pcm_hw_rule *rule) 1338 { 1339 const struct snd_pcm_hw_constraint_ratdens *r = rule->private; 1340 unsigned int num = 0, den = 0; 1341 int err = snd_interval_ratden(hw_param_interval(params, rule->var), 1342 r->nrats, r->rats, &num, &den); 1343 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1344 params->rate_num = num; 1345 params->rate_den = den; 1346 } 1347 return err; 1348 } 1349 1350 /** 1351 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter 1352 * @runtime: PCM runtime instance 1353 * @cond: condition bits 1354 * @var: hw_params variable to apply the ratdens constraint 1355 * @r: struct snd_ratdens constriants 1356 * 1357 * Return: Zero if successful, or a negative error code on failure. 1358 */ 1359 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 1360 unsigned int cond, 1361 snd_pcm_hw_param_t var, 1362 const struct snd_pcm_hw_constraint_ratdens *r) 1363 { 1364 return snd_pcm_hw_rule_add(runtime, cond, var, 1365 snd_pcm_hw_rule_ratdens, (void *)r, 1366 var, -1); 1367 } 1368 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); 1369 1370 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, 1371 struct snd_pcm_hw_rule *rule) 1372 { 1373 unsigned int l = (unsigned long) rule->private; 1374 int width = l & 0xffff; 1375 unsigned int msbits = l >> 16; 1376 const struct snd_interval *i = 1377 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 1378 1379 if (!snd_interval_single(i)) 1380 return 0; 1381 1382 if ((snd_interval_value(i) == width) || 1383 (width == 0 && snd_interval_value(i) > msbits)) 1384 params->msbits = min_not_zero(params->msbits, msbits); 1385 1386 return 0; 1387 } 1388 1389 /** 1390 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule 1391 * @runtime: PCM runtime instance 1392 * @cond: condition bits 1393 * @width: sample bits width 1394 * @msbits: msbits width 1395 * 1396 * This constraint will set the number of most significant bits (msbits) if a 1397 * sample format with the specified width has been select. If width is set to 0 1398 * the msbits will be set for any sample format with a width larger than the 1399 * specified msbits. 1400 * 1401 * Return: Zero if successful, or a negative error code on failure. 1402 */ 1403 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 1404 unsigned int cond, 1405 unsigned int width, 1406 unsigned int msbits) 1407 { 1408 unsigned long l = (msbits << 16) | width; 1409 return snd_pcm_hw_rule_add(runtime, cond, -1, 1410 snd_pcm_hw_rule_msbits, 1411 (void*) l, 1412 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 1413 } 1414 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); 1415 1416 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, 1417 struct snd_pcm_hw_rule *rule) 1418 { 1419 unsigned long step = (unsigned long) rule->private; 1420 return snd_interval_step(hw_param_interval(params, rule->var), step); 1421 } 1422 1423 /** 1424 * snd_pcm_hw_constraint_step - add a hw constraint step rule 1425 * @runtime: PCM runtime instance 1426 * @cond: condition bits 1427 * @var: hw_params variable to apply the step constraint 1428 * @step: step size 1429 * 1430 * Return: Zero if successful, or a negative error code on failure. 1431 */ 1432 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, 1433 unsigned int cond, 1434 snd_pcm_hw_param_t var, 1435 unsigned long step) 1436 { 1437 return snd_pcm_hw_rule_add(runtime, cond, var, 1438 snd_pcm_hw_rule_step, (void *) step, 1439 var, -1); 1440 } 1441 EXPORT_SYMBOL(snd_pcm_hw_constraint_step); 1442 1443 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 1444 { 1445 static unsigned int pow2_sizes[] = { 1446 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1447 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1448 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1449 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 1450 }; 1451 return snd_interval_list(hw_param_interval(params, rule->var), 1452 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); 1453 } 1454 1455 /** 1456 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule 1457 * @runtime: PCM runtime instance 1458 * @cond: condition bits 1459 * @var: hw_params variable to apply the power-of-2 constraint 1460 * 1461 * Return: Zero if successful, or a negative error code on failure. 1462 */ 1463 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, 1464 unsigned int cond, 1465 snd_pcm_hw_param_t var) 1466 { 1467 return snd_pcm_hw_rule_add(runtime, cond, var, 1468 snd_pcm_hw_rule_pow2, NULL, 1469 var, -1); 1470 } 1471 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); 1472 1473 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, 1474 struct snd_pcm_hw_rule *rule) 1475 { 1476 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; 1477 struct snd_interval *rate; 1478 1479 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1480 return snd_interval_list(rate, 1, &base_rate, 0); 1481 } 1482 1483 /** 1484 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling 1485 * @runtime: PCM runtime instance 1486 * @base_rate: the rate at which the hardware does not resample 1487 * 1488 * Return: Zero if successful, or a negative error code on failure. 1489 */ 1490 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, 1491 unsigned int base_rate) 1492 { 1493 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, 1494 SNDRV_PCM_HW_PARAM_RATE, 1495 snd_pcm_hw_rule_noresample_func, 1496 (void *)(uintptr_t)base_rate, 1497 SNDRV_PCM_HW_PARAM_RATE, -1); 1498 } 1499 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); 1500 1501 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, 1502 snd_pcm_hw_param_t var) 1503 { 1504 if (hw_is_mask(var)) { 1505 snd_mask_any(hw_param_mask(params, var)); 1506 params->cmask |= 1 << var; 1507 params->rmask |= 1 << var; 1508 return; 1509 } 1510 if (hw_is_interval(var)) { 1511 snd_interval_any(hw_param_interval(params, var)); 1512 params->cmask |= 1 << var; 1513 params->rmask |= 1 << var; 1514 return; 1515 } 1516 snd_BUG(); 1517 } 1518 1519 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) 1520 { 1521 unsigned int k; 1522 memset(params, 0, sizeof(*params)); 1523 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) 1524 _snd_pcm_hw_param_any(params, k); 1525 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 1526 _snd_pcm_hw_param_any(params, k); 1527 params->info = ~0U; 1528 } 1529 EXPORT_SYMBOL(_snd_pcm_hw_params_any); 1530 1531 /** 1532 * snd_pcm_hw_param_value - return @params field @var value 1533 * @params: the hw_params instance 1534 * @var: parameter to retrieve 1535 * @dir: pointer to the direction (-1,0,1) or %NULL 1536 * 1537 * Return: The value for field @var if it's fixed in configuration space 1538 * defined by @params. -%EINVAL otherwise. 1539 */ 1540 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, 1541 snd_pcm_hw_param_t var, int *dir) 1542 { 1543 if (hw_is_mask(var)) { 1544 const struct snd_mask *mask = hw_param_mask_c(params, var); 1545 if (!snd_mask_single(mask)) 1546 return -EINVAL; 1547 if (dir) 1548 *dir = 0; 1549 return snd_mask_value(mask); 1550 } 1551 if (hw_is_interval(var)) { 1552 const struct snd_interval *i = hw_param_interval_c(params, var); 1553 if (!snd_interval_single(i)) 1554 return -EINVAL; 1555 if (dir) 1556 *dir = i->openmin; 1557 return snd_interval_value(i); 1558 } 1559 return -EINVAL; 1560 } 1561 EXPORT_SYMBOL(snd_pcm_hw_param_value); 1562 1563 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, 1564 snd_pcm_hw_param_t var) 1565 { 1566 if (hw_is_mask(var)) { 1567 snd_mask_none(hw_param_mask(params, var)); 1568 params->cmask |= 1 << var; 1569 params->rmask |= 1 << var; 1570 } else if (hw_is_interval(var)) { 1571 snd_interval_none(hw_param_interval(params, var)); 1572 params->cmask |= 1 << var; 1573 params->rmask |= 1 << var; 1574 } else { 1575 snd_BUG(); 1576 } 1577 } 1578 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); 1579 1580 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, 1581 snd_pcm_hw_param_t var) 1582 { 1583 int changed; 1584 if (hw_is_mask(var)) 1585 changed = snd_mask_refine_first(hw_param_mask(params, var)); 1586 else if (hw_is_interval(var)) 1587 changed = snd_interval_refine_first(hw_param_interval(params, var)); 1588 else 1589 return -EINVAL; 1590 if (changed > 0) { 1591 params->cmask |= 1 << var; 1592 params->rmask |= 1 << var; 1593 } 1594 return changed; 1595 } 1596 1597 1598 /** 1599 * snd_pcm_hw_param_first - refine config space and return minimum value 1600 * @pcm: PCM instance 1601 * @params: the hw_params instance 1602 * @var: parameter to retrieve 1603 * @dir: pointer to the direction (-1,0,1) or %NULL 1604 * 1605 * Inside configuration space defined by @params remove from @var all 1606 * values > minimum. Reduce configuration space accordingly. 1607 * 1608 * Return: The minimum, or a negative error code on failure. 1609 */ 1610 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 1611 struct snd_pcm_hw_params *params, 1612 snd_pcm_hw_param_t var, int *dir) 1613 { 1614 int changed = _snd_pcm_hw_param_first(params, var); 1615 if (changed < 0) 1616 return changed; 1617 if (params->rmask) { 1618 int err = snd_pcm_hw_refine(pcm, params); 1619 if (err < 0) 1620 return err; 1621 } 1622 return snd_pcm_hw_param_value(params, var, dir); 1623 } 1624 EXPORT_SYMBOL(snd_pcm_hw_param_first); 1625 1626 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, 1627 snd_pcm_hw_param_t var) 1628 { 1629 int changed; 1630 if (hw_is_mask(var)) 1631 changed = snd_mask_refine_last(hw_param_mask(params, var)); 1632 else if (hw_is_interval(var)) 1633 changed = snd_interval_refine_last(hw_param_interval(params, var)); 1634 else 1635 return -EINVAL; 1636 if (changed > 0) { 1637 params->cmask |= 1 << var; 1638 params->rmask |= 1 << var; 1639 } 1640 return changed; 1641 } 1642 1643 1644 /** 1645 * snd_pcm_hw_param_last - refine config space and return maximum value 1646 * @pcm: PCM instance 1647 * @params: the hw_params instance 1648 * @var: parameter to retrieve 1649 * @dir: pointer to the direction (-1,0,1) or %NULL 1650 * 1651 * Inside configuration space defined by @params remove from @var all 1652 * values < maximum. Reduce configuration space accordingly. 1653 * 1654 * Return: The maximum, or a negative error code on failure. 1655 */ 1656 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 1657 struct snd_pcm_hw_params *params, 1658 snd_pcm_hw_param_t var, int *dir) 1659 { 1660 int changed = _snd_pcm_hw_param_last(params, var); 1661 if (changed < 0) 1662 return changed; 1663 if (params->rmask) { 1664 int err = snd_pcm_hw_refine(pcm, params); 1665 if (err < 0) 1666 return err; 1667 } 1668 return snd_pcm_hw_param_value(params, var, dir); 1669 } 1670 EXPORT_SYMBOL(snd_pcm_hw_param_last); 1671 1672 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, 1673 void *arg) 1674 { 1675 struct snd_pcm_runtime *runtime = substream->runtime; 1676 unsigned long flags; 1677 snd_pcm_stream_lock_irqsave(substream, flags); 1678 if (snd_pcm_running(substream) && 1679 snd_pcm_update_hw_ptr(substream) >= 0) 1680 runtime->status->hw_ptr %= runtime->buffer_size; 1681 else { 1682 runtime->status->hw_ptr = 0; 1683 runtime->hw_ptr_wrap = 0; 1684 } 1685 snd_pcm_stream_unlock_irqrestore(substream, flags); 1686 return 0; 1687 } 1688 1689 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, 1690 void *arg) 1691 { 1692 struct snd_pcm_channel_info *info = arg; 1693 struct snd_pcm_runtime *runtime = substream->runtime; 1694 int width; 1695 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { 1696 info->offset = -1; 1697 return 0; 1698 } 1699 width = snd_pcm_format_physical_width(runtime->format); 1700 if (width < 0) 1701 return width; 1702 info->offset = 0; 1703 switch (runtime->access) { 1704 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: 1705 case SNDRV_PCM_ACCESS_RW_INTERLEAVED: 1706 info->first = info->channel * width; 1707 info->step = runtime->channels * width; 1708 break; 1709 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: 1710 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: 1711 { 1712 size_t size = runtime->dma_bytes / runtime->channels; 1713 info->first = info->channel * size * 8; 1714 info->step = width; 1715 break; 1716 } 1717 default: 1718 snd_BUG(); 1719 break; 1720 } 1721 return 0; 1722 } 1723 1724 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, 1725 void *arg) 1726 { 1727 struct snd_pcm_hw_params *params = arg; 1728 snd_pcm_format_t format; 1729 int channels; 1730 ssize_t frame_size; 1731 1732 params->fifo_size = substream->runtime->hw.fifo_size; 1733 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { 1734 format = params_format(params); 1735 channels = params_channels(params); 1736 frame_size = snd_pcm_format_size(format, channels); 1737 if (frame_size > 0) 1738 params->fifo_size /= (unsigned)frame_size; 1739 } 1740 return 0; 1741 } 1742 1743 /** 1744 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1745 * @substream: the pcm substream instance 1746 * @cmd: ioctl command 1747 * @arg: ioctl argument 1748 * 1749 * Processes the generic ioctl commands for PCM. 1750 * Can be passed as the ioctl callback for PCM ops. 1751 * 1752 * Return: Zero if successful, or a negative error code on failure. 1753 */ 1754 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, 1755 unsigned int cmd, void *arg) 1756 { 1757 switch (cmd) { 1758 case SNDRV_PCM_IOCTL1_RESET: 1759 return snd_pcm_lib_ioctl_reset(substream, arg); 1760 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1761 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1762 case SNDRV_PCM_IOCTL1_FIFO_SIZE: 1763 return snd_pcm_lib_ioctl_fifo_size(substream, arg); 1764 } 1765 return -ENXIO; 1766 } 1767 EXPORT_SYMBOL(snd_pcm_lib_ioctl); 1768 1769 /** 1770 * snd_pcm_period_elapsed - update the pcm status for the next period 1771 * @substream: the pcm substream instance 1772 * 1773 * This function is called from the interrupt handler when the 1774 * PCM has processed the period size. It will update the current 1775 * pointer, wake up sleepers, etc. 1776 * 1777 * Even if more than one periods have elapsed since the last call, you 1778 * have to call this only once. 1779 */ 1780 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) 1781 { 1782 struct snd_pcm_runtime *runtime; 1783 unsigned long flags; 1784 1785 if (PCM_RUNTIME_CHECK(substream)) 1786 return; 1787 runtime = substream->runtime; 1788 1789 snd_pcm_stream_lock_irqsave(substream, flags); 1790 if (!snd_pcm_running(substream) || 1791 snd_pcm_update_hw_ptr0(substream, 1) < 0) 1792 goto _end; 1793 1794 #ifdef CONFIG_SND_PCM_TIMER 1795 if (substream->timer_running) 1796 snd_timer_interrupt(substream->timer, 1); 1797 #endif 1798 _end: 1799 kill_fasync(&runtime->fasync, SIGIO, POLL_IN); 1800 snd_pcm_stream_unlock_irqrestore(substream, flags); 1801 } 1802 EXPORT_SYMBOL(snd_pcm_period_elapsed); 1803 1804 /* 1805 * Wait until avail_min data becomes available 1806 * Returns a negative error code if any error occurs during operation. 1807 * The available space is stored on availp. When err = 0 and avail = 0 1808 * on the capture stream, it indicates the stream is in DRAINING state. 1809 */ 1810 static int wait_for_avail(struct snd_pcm_substream *substream, 1811 snd_pcm_uframes_t *availp) 1812 { 1813 struct snd_pcm_runtime *runtime = substream->runtime; 1814 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1815 wait_queue_entry_t wait; 1816 int err = 0; 1817 snd_pcm_uframes_t avail = 0; 1818 long wait_time, tout; 1819 1820 init_waitqueue_entry(&wait, current); 1821 set_current_state(TASK_INTERRUPTIBLE); 1822 add_wait_queue(&runtime->tsleep, &wait); 1823 1824 if (runtime->no_period_wakeup) 1825 wait_time = MAX_SCHEDULE_TIMEOUT; 1826 else { 1827 /* use wait time from substream if available */ 1828 if (substream->wait_time) { 1829 wait_time = substream->wait_time; 1830 } else { 1831 wait_time = 10; 1832 1833 if (runtime->rate) { 1834 long t = runtime->period_size * 2 / 1835 runtime->rate; 1836 wait_time = max(t, wait_time); 1837 } 1838 wait_time = msecs_to_jiffies(wait_time * 1000); 1839 } 1840 } 1841 1842 for (;;) { 1843 if (signal_pending(current)) { 1844 err = -ERESTARTSYS; 1845 break; 1846 } 1847 1848 /* 1849 * We need to check if space became available already 1850 * (and thus the wakeup happened already) first to close 1851 * the race of space already having become available. 1852 * This check must happen after been added to the waitqueue 1853 * and having current state be INTERRUPTIBLE. 1854 */ 1855 avail = snd_pcm_avail(substream); 1856 if (avail >= runtime->twake) 1857 break; 1858 snd_pcm_stream_unlock_irq(substream); 1859 1860 tout = schedule_timeout(wait_time); 1861 1862 snd_pcm_stream_lock_irq(substream); 1863 set_current_state(TASK_INTERRUPTIBLE); 1864 switch (runtime->status->state) { 1865 case SNDRV_PCM_STATE_SUSPENDED: 1866 err = -ESTRPIPE; 1867 goto _endloop; 1868 case SNDRV_PCM_STATE_XRUN: 1869 err = -EPIPE; 1870 goto _endloop; 1871 case SNDRV_PCM_STATE_DRAINING: 1872 if (is_playback) 1873 err = -EPIPE; 1874 else 1875 avail = 0; /* indicate draining */ 1876 goto _endloop; 1877 case SNDRV_PCM_STATE_OPEN: 1878 case SNDRV_PCM_STATE_SETUP: 1879 case SNDRV_PCM_STATE_DISCONNECTED: 1880 err = -EBADFD; 1881 goto _endloop; 1882 case SNDRV_PCM_STATE_PAUSED: 1883 continue; 1884 } 1885 if (!tout) { 1886 pcm_dbg(substream->pcm, 1887 "%s write error (DMA or IRQ trouble?)\n", 1888 is_playback ? "playback" : "capture"); 1889 err = -EIO; 1890 break; 1891 } 1892 } 1893 _endloop: 1894 set_current_state(TASK_RUNNING); 1895 remove_wait_queue(&runtime->tsleep, &wait); 1896 *availp = avail; 1897 return err; 1898 } 1899 1900 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, 1901 int channel, unsigned long hwoff, 1902 void *buf, unsigned long bytes); 1903 1904 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, 1905 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f); 1906 1907 /* calculate the target DMA-buffer position to be written/read */ 1908 static void *get_dma_ptr(struct snd_pcm_runtime *runtime, 1909 int channel, unsigned long hwoff) 1910 { 1911 return runtime->dma_area + hwoff + 1912 channel * (runtime->dma_bytes / runtime->channels); 1913 } 1914 1915 /* default copy_user ops for write; used for both interleaved and non- modes */ 1916 static int default_write_copy(struct snd_pcm_substream *substream, 1917 int channel, unsigned long hwoff, 1918 void *buf, unsigned long bytes) 1919 { 1920 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff), 1921 (void __user *)buf, bytes)) 1922 return -EFAULT; 1923 return 0; 1924 } 1925 1926 /* default copy_kernel ops for write */ 1927 static int default_write_copy_kernel(struct snd_pcm_substream *substream, 1928 int channel, unsigned long hwoff, 1929 void *buf, unsigned long bytes) 1930 { 1931 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes); 1932 return 0; 1933 } 1934 1935 /* fill silence instead of copy data; called as a transfer helper 1936 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when 1937 * a NULL buffer is passed 1938 */ 1939 static int fill_silence(struct snd_pcm_substream *substream, int channel, 1940 unsigned long hwoff, void *buf, unsigned long bytes) 1941 { 1942 struct snd_pcm_runtime *runtime = substream->runtime; 1943 1944 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 1945 return 0; 1946 if (substream->ops->fill_silence) 1947 return substream->ops->fill_silence(substream, channel, 1948 hwoff, bytes); 1949 1950 snd_pcm_format_set_silence(runtime->format, 1951 get_dma_ptr(runtime, channel, hwoff), 1952 bytes_to_samples(runtime, bytes)); 1953 return 0; 1954 } 1955 1956 /* default copy_user ops for read; used for both interleaved and non- modes */ 1957 static int default_read_copy(struct snd_pcm_substream *substream, 1958 int channel, unsigned long hwoff, 1959 void *buf, unsigned long bytes) 1960 { 1961 if (copy_to_user((void __user *)buf, 1962 get_dma_ptr(substream->runtime, channel, hwoff), 1963 bytes)) 1964 return -EFAULT; 1965 return 0; 1966 } 1967 1968 /* default copy_kernel ops for read */ 1969 static int default_read_copy_kernel(struct snd_pcm_substream *substream, 1970 int channel, unsigned long hwoff, 1971 void *buf, unsigned long bytes) 1972 { 1973 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes); 1974 return 0; 1975 } 1976 1977 /* call transfer function with the converted pointers and sizes; 1978 * for interleaved mode, it's one shot for all samples 1979 */ 1980 static int interleaved_copy(struct snd_pcm_substream *substream, 1981 snd_pcm_uframes_t hwoff, void *data, 1982 snd_pcm_uframes_t off, 1983 snd_pcm_uframes_t frames, 1984 pcm_transfer_f transfer) 1985 { 1986 struct snd_pcm_runtime *runtime = substream->runtime; 1987 1988 /* convert to bytes */ 1989 hwoff = frames_to_bytes(runtime, hwoff); 1990 off = frames_to_bytes(runtime, off); 1991 frames = frames_to_bytes(runtime, frames); 1992 return transfer(substream, 0, hwoff, data + off, frames); 1993 } 1994 1995 /* call transfer function with the converted pointers and sizes for each 1996 * non-interleaved channel; when buffer is NULL, silencing instead of copying 1997 */ 1998 static int noninterleaved_copy(struct snd_pcm_substream *substream, 1999 snd_pcm_uframes_t hwoff, void *data, 2000 snd_pcm_uframes_t off, 2001 snd_pcm_uframes_t frames, 2002 pcm_transfer_f transfer) 2003 { 2004 struct snd_pcm_runtime *runtime = substream->runtime; 2005 int channels = runtime->channels; 2006 void **bufs = data; 2007 int c, err; 2008 2009 /* convert to bytes; note that it's not frames_to_bytes() here. 2010 * in non-interleaved mode, we copy for each channel, thus 2011 * each copy is n_samples bytes x channels = whole frames. 2012 */ 2013 off = samples_to_bytes(runtime, off); 2014 frames = samples_to_bytes(runtime, frames); 2015 hwoff = samples_to_bytes(runtime, hwoff); 2016 for (c = 0; c < channels; ++c, ++bufs) { 2017 if (!data || !*bufs) 2018 err = fill_silence(substream, c, hwoff, NULL, frames); 2019 else 2020 err = transfer(substream, c, hwoff, *bufs + off, 2021 frames); 2022 if (err < 0) 2023 return err; 2024 } 2025 return 0; 2026 } 2027 2028 /* fill silence on the given buffer position; 2029 * called from snd_pcm_playback_silence() 2030 */ 2031 static int fill_silence_frames(struct snd_pcm_substream *substream, 2032 snd_pcm_uframes_t off, snd_pcm_uframes_t frames) 2033 { 2034 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 2035 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) 2036 return interleaved_copy(substream, off, NULL, 0, frames, 2037 fill_silence); 2038 else 2039 return noninterleaved_copy(substream, off, NULL, 0, frames, 2040 fill_silence); 2041 } 2042 2043 /* sanity-check for read/write methods */ 2044 static int pcm_sanity_check(struct snd_pcm_substream *substream) 2045 { 2046 struct snd_pcm_runtime *runtime; 2047 if (PCM_RUNTIME_CHECK(substream)) 2048 return -ENXIO; 2049 runtime = substream->runtime; 2050 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area)) 2051 return -EINVAL; 2052 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2053 return -EBADFD; 2054 return 0; 2055 } 2056 2057 static int pcm_accessible_state(struct snd_pcm_runtime *runtime) 2058 { 2059 switch (runtime->status->state) { 2060 case SNDRV_PCM_STATE_PREPARED: 2061 case SNDRV_PCM_STATE_RUNNING: 2062 case SNDRV_PCM_STATE_PAUSED: 2063 return 0; 2064 case SNDRV_PCM_STATE_XRUN: 2065 return -EPIPE; 2066 case SNDRV_PCM_STATE_SUSPENDED: 2067 return -ESTRPIPE; 2068 default: 2069 return -EBADFD; 2070 } 2071 } 2072 2073 /* update to the given appl_ptr and call ack callback if needed; 2074 * when an error is returned, take back to the original value 2075 */ 2076 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, 2077 snd_pcm_uframes_t appl_ptr) 2078 { 2079 struct snd_pcm_runtime *runtime = substream->runtime; 2080 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; 2081 int ret; 2082 2083 if (old_appl_ptr == appl_ptr) 2084 return 0; 2085 2086 runtime->control->appl_ptr = appl_ptr; 2087 if (substream->ops->ack) { 2088 ret = substream->ops->ack(substream); 2089 if (ret < 0) { 2090 runtime->control->appl_ptr = old_appl_ptr; 2091 return ret; 2092 } 2093 } 2094 2095 trace_applptr(substream, old_appl_ptr, appl_ptr); 2096 2097 return 0; 2098 } 2099 2100 /* the common loop for read/write data */ 2101 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2102 void *data, bool interleaved, 2103 snd_pcm_uframes_t size, bool in_kernel) 2104 { 2105 struct snd_pcm_runtime *runtime = substream->runtime; 2106 snd_pcm_uframes_t xfer = 0; 2107 snd_pcm_uframes_t offset = 0; 2108 snd_pcm_uframes_t avail; 2109 pcm_copy_f writer; 2110 pcm_transfer_f transfer; 2111 bool nonblock; 2112 bool is_playback; 2113 int err; 2114 2115 err = pcm_sanity_check(substream); 2116 if (err < 0) 2117 return err; 2118 2119 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 2120 if (interleaved) { 2121 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2122 runtime->channels > 1) 2123 return -EINVAL; 2124 writer = interleaved_copy; 2125 } else { 2126 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2127 return -EINVAL; 2128 writer = noninterleaved_copy; 2129 } 2130 2131 if (!data) { 2132 if (is_playback) 2133 transfer = fill_silence; 2134 else 2135 return -EINVAL; 2136 } else if (in_kernel) { 2137 if (substream->ops->copy_kernel) 2138 transfer = substream->ops->copy_kernel; 2139 else 2140 transfer = is_playback ? 2141 default_write_copy_kernel : default_read_copy_kernel; 2142 } else { 2143 if (substream->ops->copy_user) 2144 transfer = (pcm_transfer_f)substream->ops->copy_user; 2145 else 2146 transfer = is_playback ? 2147 default_write_copy : default_read_copy; 2148 } 2149 2150 if (size == 0) 2151 return 0; 2152 2153 nonblock = !!(substream->f_flags & O_NONBLOCK); 2154 2155 snd_pcm_stream_lock_irq(substream); 2156 err = pcm_accessible_state(runtime); 2157 if (err < 0) 2158 goto _end_unlock; 2159 2160 runtime->twake = runtime->control->avail_min ? : 1; 2161 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 2162 snd_pcm_update_hw_ptr(substream); 2163 2164 /* 2165 * If size < start_threshold, wait indefinitely. Another 2166 * thread may start capture 2167 */ 2168 if (!is_playback && 2169 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2170 size >= runtime->start_threshold) { 2171 err = snd_pcm_start(substream); 2172 if (err < 0) 2173 goto _end_unlock; 2174 } 2175 2176 avail = snd_pcm_avail(substream); 2177 2178 while (size > 0) { 2179 snd_pcm_uframes_t frames, appl_ptr, appl_ofs; 2180 snd_pcm_uframes_t cont; 2181 if (!avail) { 2182 if (!is_playback && 2183 runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2184 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2185 goto _end_unlock; 2186 } 2187 if (nonblock) { 2188 err = -EAGAIN; 2189 goto _end_unlock; 2190 } 2191 runtime->twake = min_t(snd_pcm_uframes_t, size, 2192 runtime->control->avail_min ? : 1); 2193 err = wait_for_avail(substream, &avail); 2194 if (err < 0) 2195 goto _end_unlock; 2196 if (!avail) 2197 continue; /* draining */ 2198 } 2199 frames = size > avail ? avail : size; 2200 appl_ptr = READ_ONCE(runtime->control->appl_ptr); 2201 appl_ofs = appl_ptr % runtime->buffer_size; 2202 cont = runtime->buffer_size - appl_ofs; 2203 if (frames > cont) 2204 frames = cont; 2205 if (snd_BUG_ON(!frames)) { 2206 err = -EINVAL; 2207 goto _end_unlock; 2208 } 2209 snd_pcm_stream_unlock_irq(substream); 2210 err = writer(substream, appl_ofs, data, offset, frames, 2211 transfer); 2212 snd_pcm_stream_lock_irq(substream); 2213 if (err < 0) 2214 goto _end_unlock; 2215 err = pcm_accessible_state(runtime); 2216 if (err < 0) 2217 goto _end_unlock; 2218 appl_ptr += frames; 2219 if (appl_ptr >= runtime->boundary) 2220 appl_ptr -= runtime->boundary; 2221 err = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2222 if (err < 0) 2223 goto _end_unlock; 2224 2225 offset += frames; 2226 size -= frames; 2227 xfer += frames; 2228 avail -= frames; 2229 if (is_playback && 2230 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2231 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { 2232 err = snd_pcm_start(substream); 2233 if (err < 0) 2234 goto _end_unlock; 2235 } 2236 } 2237 _end_unlock: 2238 runtime->twake = 0; 2239 if (xfer > 0 && err >= 0) 2240 snd_pcm_update_state(substream, runtime); 2241 snd_pcm_stream_unlock_irq(substream); 2242 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; 2243 } 2244 EXPORT_SYMBOL(__snd_pcm_lib_xfer); 2245 2246 /* 2247 * standard channel mapping helpers 2248 */ 2249 2250 /* default channel maps for multi-channel playbacks, up to 8 channels */ 2251 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { 2252 { .channels = 1, 2253 .map = { SNDRV_CHMAP_MONO } }, 2254 { .channels = 2, 2255 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2256 { .channels = 4, 2257 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2258 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2259 { .channels = 6, 2260 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2261 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2262 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, 2263 { .channels = 8, 2264 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2265 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2266 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2267 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2268 { } 2269 }; 2270 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); 2271 2272 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ 2273 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { 2274 { .channels = 1, 2275 .map = { SNDRV_CHMAP_MONO } }, 2276 { .channels = 2, 2277 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2278 { .channels = 4, 2279 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2280 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2281 { .channels = 6, 2282 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2283 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2284 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2285 { .channels = 8, 2286 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2287 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2288 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2289 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2290 { } 2291 }; 2292 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); 2293 2294 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) 2295 { 2296 if (ch > info->max_channels) 2297 return false; 2298 return !info->channel_mask || (info->channel_mask & (1U << ch)); 2299 } 2300 2301 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, 2302 struct snd_ctl_elem_info *uinfo) 2303 { 2304 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2305 2306 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 2307 uinfo->count = 0; 2308 uinfo->count = info->max_channels; 2309 uinfo->value.integer.min = 0; 2310 uinfo->value.integer.max = SNDRV_CHMAP_LAST; 2311 return 0; 2312 } 2313 2314 /* get callback for channel map ctl element 2315 * stores the channel position firstly matching with the current channels 2316 */ 2317 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, 2318 struct snd_ctl_elem_value *ucontrol) 2319 { 2320 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2321 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2322 struct snd_pcm_substream *substream; 2323 const struct snd_pcm_chmap_elem *map; 2324 2325 if (!info->chmap) 2326 return -EINVAL; 2327 substream = snd_pcm_chmap_substream(info, idx); 2328 if (!substream) 2329 return -ENODEV; 2330 memset(ucontrol->value.integer.value, 0, 2331 sizeof(ucontrol->value.integer.value)); 2332 if (!substream->runtime) 2333 return 0; /* no channels set */ 2334 for (map = info->chmap; map->channels; map++) { 2335 int i; 2336 if (map->channels == substream->runtime->channels && 2337 valid_chmap_channels(info, map->channels)) { 2338 for (i = 0; i < map->channels; i++) 2339 ucontrol->value.integer.value[i] = map->map[i]; 2340 return 0; 2341 } 2342 } 2343 return -EINVAL; 2344 } 2345 2346 /* tlv callback for channel map ctl element 2347 * expands the pre-defined channel maps in a form of TLV 2348 */ 2349 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, 2350 unsigned int size, unsigned int __user *tlv) 2351 { 2352 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2353 const struct snd_pcm_chmap_elem *map; 2354 unsigned int __user *dst; 2355 int c, count = 0; 2356 2357 if (!info->chmap) 2358 return -EINVAL; 2359 if (size < 8) 2360 return -ENOMEM; 2361 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) 2362 return -EFAULT; 2363 size -= 8; 2364 dst = tlv + 2; 2365 for (map = info->chmap; map->channels; map++) { 2366 int chs_bytes = map->channels * 4; 2367 if (!valid_chmap_channels(info, map->channels)) 2368 continue; 2369 if (size < 8) 2370 return -ENOMEM; 2371 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || 2372 put_user(chs_bytes, dst + 1)) 2373 return -EFAULT; 2374 dst += 2; 2375 size -= 8; 2376 count += 8; 2377 if (size < chs_bytes) 2378 return -ENOMEM; 2379 size -= chs_bytes; 2380 count += chs_bytes; 2381 for (c = 0; c < map->channels; c++) { 2382 if (put_user(map->map[c], dst)) 2383 return -EFAULT; 2384 dst++; 2385 } 2386 } 2387 if (put_user(count, tlv + 1)) 2388 return -EFAULT; 2389 return 0; 2390 } 2391 2392 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) 2393 { 2394 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2395 info->pcm->streams[info->stream].chmap_kctl = NULL; 2396 kfree(info); 2397 } 2398 2399 /** 2400 * snd_pcm_add_chmap_ctls - create channel-mapping control elements 2401 * @pcm: the assigned PCM instance 2402 * @stream: stream direction 2403 * @chmap: channel map elements (for query) 2404 * @max_channels: the max number of channels for the stream 2405 * @private_value: the value passed to each kcontrol's private_value field 2406 * @info_ret: store struct snd_pcm_chmap instance if non-NULL 2407 * 2408 * Create channel-mapping control elements assigned to the given PCM stream(s). 2409 * Return: Zero if successful, or a negative error value. 2410 */ 2411 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, 2412 const struct snd_pcm_chmap_elem *chmap, 2413 int max_channels, 2414 unsigned long private_value, 2415 struct snd_pcm_chmap **info_ret) 2416 { 2417 struct snd_pcm_chmap *info; 2418 struct snd_kcontrol_new knew = { 2419 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2420 .access = SNDRV_CTL_ELEM_ACCESS_READ | 2421 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 2422 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, 2423 .info = pcm_chmap_ctl_info, 2424 .get = pcm_chmap_ctl_get, 2425 .tlv.c = pcm_chmap_ctl_tlv, 2426 }; 2427 int err; 2428 2429 if (WARN_ON(pcm->streams[stream].chmap_kctl)) 2430 return -EBUSY; 2431 info = kzalloc(sizeof(*info), GFP_KERNEL); 2432 if (!info) 2433 return -ENOMEM; 2434 info->pcm = pcm; 2435 info->stream = stream; 2436 info->chmap = chmap; 2437 info->max_channels = max_channels; 2438 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 2439 knew.name = "Playback Channel Map"; 2440 else 2441 knew.name = "Capture Channel Map"; 2442 knew.device = pcm->device; 2443 knew.count = pcm->streams[stream].substream_count; 2444 knew.private_value = private_value; 2445 info->kctl = snd_ctl_new1(&knew, info); 2446 if (!info->kctl) { 2447 kfree(info); 2448 return -ENOMEM; 2449 } 2450 info->kctl->private_free = pcm_chmap_ctl_private_free; 2451 err = snd_ctl_add(pcm->card, info->kctl); 2452 if (err < 0) 2453 return err; 2454 pcm->streams[stream].chmap_kctl = info->kctl; 2455 if (info_ret) 2456 *info_ret = info; 2457 return 0; 2458 } 2459 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls); 2460