1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Digital Audio (PCM) abstract layer 4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 5 * Abramo Bagnara <abramo@alsa-project.org> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/sched/signal.h> 10 #include <linux/time.h> 11 #include <linux/math64.h> 12 #include <linux/export.h> 13 #include <sound/core.h> 14 #include <sound/control.h> 15 #include <sound/tlv.h> 16 #include <sound/info.h> 17 #include <sound/pcm.h> 18 #include <sound/pcm_params.h> 19 #include <sound/timer.h> 20 21 #include "pcm_local.h" 22 23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 24 #define CREATE_TRACE_POINTS 25 #include "pcm_trace.h" 26 #else 27 #define trace_hwptr(substream, pos, in_interrupt) 28 #define trace_xrun(substream) 29 #define trace_hw_ptr_error(substream, reason) 30 #define trace_applptr(substream, prev, curr) 31 #endif 32 33 static int fill_silence_frames(struct snd_pcm_substream *substream, 34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames); 35 36 /* 37 * fill ring buffer with silence 38 * runtime->silence_start: starting pointer to silence area 39 * runtime->silence_filled: size filled with silence 40 * runtime->silence_threshold: threshold from application 41 * runtime->silence_size: maximal size from application 42 * 43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately 44 */ 45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) 46 { 47 struct snd_pcm_runtime *runtime = substream->runtime; 48 snd_pcm_uframes_t frames, ofs, transfer; 49 int err; 50 51 if (runtime->silence_size < runtime->boundary) { 52 snd_pcm_sframes_t noise_dist, n; 53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); 54 if (runtime->silence_start != appl_ptr) { 55 n = appl_ptr - runtime->silence_start; 56 if (n < 0) 57 n += runtime->boundary; 58 if ((snd_pcm_uframes_t)n < runtime->silence_filled) 59 runtime->silence_filled -= n; 60 else 61 runtime->silence_filled = 0; 62 runtime->silence_start = appl_ptr; 63 } 64 if (runtime->silence_filled >= runtime->buffer_size) 65 return; 66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; 67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) 68 return; 69 frames = runtime->silence_threshold - noise_dist; 70 if (frames > runtime->silence_size) 71 frames = runtime->silence_size; 72 } else { 73 if (new_hw_ptr == ULONG_MAX) { /* initialization */ 74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); 75 if (avail > runtime->buffer_size) 76 avail = runtime->buffer_size; 77 runtime->silence_filled = avail > 0 ? avail : 0; 78 runtime->silence_start = (runtime->status->hw_ptr + 79 runtime->silence_filled) % 80 runtime->boundary; 81 } else { 82 ofs = runtime->status->hw_ptr; 83 frames = new_hw_ptr - ofs; 84 if ((snd_pcm_sframes_t)frames < 0) 85 frames += runtime->boundary; 86 runtime->silence_filled -= frames; 87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { 88 runtime->silence_filled = 0; 89 runtime->silence_start = new_hw_ptr; 90 } else { 91 runtime->silence_start = ofs; 92 } 93 } 94 frames = runtime->buffer_size - runtime->silence_filled; 95 } 96 if (snd_BUG_ON(frames > runtime->buffer_size)) 97 return; 98 if (frames == 0) 99 return; 100 ofs = runtime->silence_start % runtime->buffer_size; 101 while (frames > 0) { 102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; 103 err = fill_silence_frames(substream, ofs, transfer); 104 snd_BUG_ON(err < 0); 105 runtime->silence_filled += transfer; 106 frames -= transfer; 107 ofs = 0; 108 } 109 } 110 111 #ifdef CONFIG_SND_DEBUG 112 void snd_pcm_debug_name(struct snd_pcm_substream *substream, 113 char *name, size_t len) 114 { 115 snprintf(name, len, "pcmC%dD%d%c:%d", 116 substream->pcm->card->number, 117 substream->pcm->device, 118 substream->stream ? 'c' : 'p', 119 substream->number); 120 } 121 EXPORT_SYMBOL(snd_pcm_debug_name); 122 #endif 123 124 #define XRUN_DEBUG_BASIC (1<<0) 125 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 126 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ 127 128 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 129 130 #define xrun_debug(substream, mask) \ 131 ((substream)->pstr->xrun_debug & (mask)) 132 #else 133 #define xrun_debug(substream, mask) 0 134 #endif 135 136 #define dump_stack_on_xrun(substream) do { \ 137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ 138 dump_stack(); \ 139 } while (0) 140 141 /* call with stream lock held */ 142 void __snd_pcm_xrun(struct snd_pcm_substream *substream) 143 { 144 struct snd_pcm_runtime *runtime = substream->runtime; 145 146 trace_xrun(substream); 147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 148 struct timespec64 tstamp; 149 150 snd_pcm_gettime(runtime, &tstamp); 151 runtime->status->tstamp.tv_sec = tstamp.tv_sec; 152 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec; 153 } 154 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 155 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 156 char name[16]; 157 snd_pcm_debug_name(substream, name, sizeof(name)); 158 pcm_warn(substream->pcm, "XRUN: %s\n", name); 159 dump_stack_on_xrun(substream); 160 } 161 } 162 163 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 164 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ 165 do { \ 166 trace_hw_ptr_error(substream, reason); \ 167 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ 168 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ 169 (in_interrupt) ? 'Q' : 'P', ##args); \ 170 dump_stack_on_xrun(substream); \ 171 } \ 172 } while (0) 173 174 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ 175 176 #define hw_ptr_error(substream, fmt, args...) do { } while (0) 177 178 #endif 179 180 int snd_pcm_update_state(struct snd_pcm_substream *substream, 181 struct snd_pcm_runtime *runtime) 182 { 183 snd_pcm_uframes_t avail; 184 185 avail = snd_pcm_avail(substream); 186 if (avail > runtime->avail_max) 187 runtime->avail_max = avail; 188 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 189 if (avail >= runtime->buffer_size) { 190 snd_pcm_drain_done(substream); 191 return -EPIPE; 192 } 193 } else { 194 if (avail >= runtime->stop_threshold) { 195 __snd_pcm_xrun(substream); 196 return -EPIPE; 197 } 198 } 199 if (runtime->twake) { 200 if (avail >= runtime->twake) 201 wake_up(&runtime->tsleep); 202 } else if (avail >= runtime->control->avail_min) 203 wake_up(&runtime->sleep); 204 return 0; 205 } 206 207 static void update_audio_tstamp(struct snd_pcm_substream *substream, 208 struct timespec64 *curr_tstamp, 209 struct timespec64 *audio_tstamp) 210 { 211 struct snd_pcm_runtime *runtime = substream->runtime; 212 u64 audio_frames, audio_nsecs; 213 struct timespec64 driver_tstamp; 214 215 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) 216 return; 217 218 if (!(substream->ops->get_time_info) || 219 (runtime->audio_tstamp_report.actual_type == 220 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 221 222 /* 223 * provide audio timestamp derived from pointer position 224 * add delay only if requested 225 */ 226 227 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; 228 229 if (runtime->audio_tstamp_config.report_delay) { 230 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 231 audio_frames -= runtime->delay; 232 else 233 audio_frames += runtime->delay; 234 } 235 audio_nsecs = div_u64(audio_frames * 1000000000LL, 236 runtime->rate); 237 *audio_tstamp = ns_to_timespec64(audio_nsecs); 238 } 239 240 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec || 241 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) { 242 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec; 243 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec; 244 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec; 245 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec; 246 } 247 248 249 /* 250 * re-take a driver timestamp to let apps detect if the reference tstamp 251 * read by low-level hardware was provided with a delay 252 */ 253 snd_pcm_gettime(substream->runtime, &driver_tstamp); 254 runtime->driver_tstamp = driver_tstamp; 255 } 256 257 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, 258 unsigned int in_interrupt) 259 { 260 struct snd_pcm_runtime *runtime = substream->runtime; 261 snd_pcm_uframes_t pos; 262 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 263 snd_pcm_sframes_t hdelta, delta; 264 unsigned long jdelta; 265 unsigned long curr_jiffies; 266 struct timespec64 curr_tstamp; 267 struct timespec64 audio_tstamp; 268 int crossed_boundary = 0; 269 270 old_hw_ptr = runtime->status->hw_ptr; 271 272 /* 273 * group pointer, time and jiffies reads to allow for more 274 * accurate correlations/corrections. 275 * The values are stored at the end of this routine after 276 * corrections for hw_ptr position 277 */ 278 pos = substream->ops->pointer(substream); 279 curr_jiffies = jiffies; 280 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 281 if ((substream->ops->get_time_info) && 282 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 283 substream->ops->get_time_info(substream, &curr_tstamp, 284 &audio_tstamp, 285 &runtime->audio_tstamp_config, 286 &runtime->audio_tstamp_report); 287 288 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ 289 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) 290 snd_pcm_gettime(runtime, &curr_tstamp); 291 } else 292 snd_pcm_gettime(runtime, &curr_tstamp); 293 } 294 295 if (pos == SNDRV_PCM_POS_XRUN) { 296 __snd_pcm_xrun(substream); 297 return -EPIPE; 298 } 299 if (pos >= runtime->buffer_size) { 300 if (printk_ratelimit()) { 301 char name[16]; 302 snd_pcm_debug_name(substream, name, sizeof(name)); 303 pcm_err(substream->pcm, 304 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", 305 name, pos, runtime->buffer_size, 306 runtime->period_size); 307 } 308 pos = 0; 309 } 310 pos -= pos % runtime->min_align; 311 trace_hwptr(substream, pos, in_interrupt); 312 hw_base = runtime->hw_ptr_base; 313 new_hw_ptr = hw_base + pos; 314 if (in_interrupt) { 315 /* we know that one period was processed */ 316 /* delta = "expected next hw_ptr" for in_interrupt != 0 */ 317 delta = runtime->hw_ptr_interrupt + runtime->period_size; 318 if (delta > new_hw_ptr) { 319 /* check for double acknowledged interrupts */ 320 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 321 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 322 hw_base += runtime->buffer_size; 323 if (hw_base >= runtime->boundary) { 324 hw_base = 0; 325 crossed_boundary++; 326 } 327 new_hw_ptr = hw_base + pos; 328 goto __delta; 329 } 330 } 331 } 332 /* new_hw_ptr might be lower than old_hw_ptr in case when */ 333 /* pointer crosses the end of the ring buffer */ 334 if (new_hw_ptr < old_hw_ptr) { 335 hw_base += runtime->buffer_size; 336 if (hw_base >= runtime->boundary) { 337 hw_base = 0; 338 crossed_boundary++; 339 } 340 new_hw_ptr = hw_base + pos; 341 } 342 __delta: 343 delta = new_hw_ptr - old_hw_ptr; 344 if (delta < 0) 345 delta += runtime->boundary; 346 347 if (runtime->no_period_wakeup) { 348 snd_pcm_sframes_t xrun_threshold; 349 /* 350 * Without regular period interrupts, we have to check 351 * the elapsed time to detect xruns. 352 */ 353 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 354 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 355 goto no_delta_check; 356 hdelta = jdelta - delta * HZ / runtime->rate; 357 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; 358 while (hdelta > xrun_threshold) { 359 delta += runtime->buffer_size; 360 hw_base += runtime->buffer_size; 361 if (hw_base >= runtime->boundary) { 362 hw_base = 0; 363 crossed_boundary++; 364 } 365 new_hw_ptr = hw_base + pos; 366 hdelta -= runtime->hw_ptr_buffer_jiffies; 367 } 368 goto no_delta_check; 369 } 370 371 /* something must be really wrong */ 372 if (delta >= runtime->buffer_size + runtime->period_size) { 373 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", 374 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 375 substream->stream, (long)pos, 376 (long)new_hw_ptr, (long)old_hw_ptr); 377 return 0; 378 } 379 380 /* Do jiffies check only in xrun_debug mode */ 381 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) 382 goto no_jiffies_check; 383 384 /* Skip the jiffies check for hardwares with BATCH flag. 385 * Such hardware usually just increases the position at each IRQ, 386 * thus it can't give any strange position. 387 */ 388 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 389 goto no_jiffies_check; 390 hdelta = delta; 391 if (hdelta < runtime->delay) 392 goto no_jiffies_check; 393 hdelta -= runtime->delay; 394 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 395 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 396 delta = jdelta / 397 (((runtime->period_size * HZ) / runtime->rate) 398 + HZ/100); 399 /* move new_hw_ptr according jiffies not pos variable */ 400 new_hw_ptr = old_hw_ptr; 401 hw_base = delta; 402 /* use loop to avoid checks for delta overflows */ 403 /* the delta value is small or zero in most cases */ 404 while (delta > 0) { 405 new_hw_ptr += runtime->period_size; 406 if (new_hw_ptr >= runtime->boundary) { 407 new_hw_ptr -= runtime->boundary; 408 crossed_boundary--; 409 } 410 delta--; 411 } 412 /* align hw_base to buffer_size */ 413 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", 414 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", 415 (long)pos, (long)hdelta, 416 (long)runtime->period_size, jdelta, 417 ((hdelta * HZ) / runtime->rate), hw_base, 418 (unsigned long)old_hw_ptr, 419 (unsigned long)new_hw_ptr); 420 /* reset values to proper state */ 421 delta = 0; 422 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); 423 } 424 no_jiffies_check: 425 if (delta > runtime->period_size + runtime->period_size / 2) { 426 hw_ptr_error(substream, in_interrupt, 427 "Lost interrupts?", 428 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 429 substream->stream, (long)delta, 430 (long)new_hw_ptr, 431 (long)old_hw_ptr); 432 } 433 434 no_delta_check: 435 if (runtime->status->hw_ptr == new_hw_ptr) { 436 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 437 return 0; 438 } 439 440 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 441 runtime->silence_size > 0) 442 snd_pcm_playback_silence(substream, new_hw_ptr); 443 444 if (in_interrupt) { 445 delta = new_hw_ptr - runtime->hw_ptr_interrupt; 446 if (delta < 0) 447 delta += runtime->boundary; 448 delta -= (snd_pcm_uframes_t)delta % runtime->period_size; 449 runtime->hw_ptr_interrupt += delta; 450 if (runtime->hw_ptr_interrupt >= runtime->boundary) 451 runtime->hw_ptr_interrupt -= runtime->boundary; 452 } 453 runtime->hw_ptr_base = hw_base; 454 runtime->status->hw_ptr = new_hw_ptr; 455 runtime->hw_ptr_jiffies = curr_jiffies; 456 if (crossed_boundary) { 457 snd_BUG_ON(crossed_boundary != 1); 458 runtime->hw_ptr_wrap += runtime->boundary; 459 } 460 461 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 462 463 return snd_pcm_update_state(substream, runtime); 464 } 465 466 /* CAUTION: call it with irq disabled */ 467 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) 468 { 469 return snd_pcm_update_hw_ptr0(substream, 0); 470 } 471 472 /** 473 * snd_pcm_set_ops - set the PCM operators 474 * @pcm: the pcm instance 475 * @direction: stream direction, SNDRV_PCM_STREAM_XXX 476 * @ops: the operator table 477 * 478 * Sets the given PCM operators to the pcm instance. 479 */ 480 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, 481 const struct snd_pcm_ops *ops) 482 { 483 struct snd_pcm_str *stream = &pcm->streams[direction]; 484 struct snd_pcm_substream *substream; 485 486 for (substream = stream->substream; substream != NULL; substream = substream->next) 487 substream->ops = ops; 488 } 489 EXPORT_SYMBOL(snd_pcm_set_ops); 490 491 /** 492 * snd_pcm_sync - set the PCM sync id 493 * @substream: the pcm substream 494 * 495 * Sets the PCM sync identifier for the card. 496 */ 497 void snd_pcm_set_sync(struct snd_pcm_substream *substream) 498 { 499 struct snd_pcm_runtime *runtime = substream->runtime; 500 501 runtime->sync.id32[0] = substream->pcm->card->number; 502 runtime->sync.id32[1] = -1; 503 runtime->sync.id32[2] = -1; 504 runtime->sync.id32[3] = -1; 505 } 506 EXPORT_SYMBOL(snd_pcm_set_sync); 507 508 /* 509 * Standard ioctl routine 510 */ 511 512 static inline unsigned int div32(unsigned int a, unsigned int b, 513 unsigned int *r) 514 { 515 if (b == 0) { 516 *r = 0; 517 return UINT_MAX; 518 } 519 *r = a % b; 520 return a / b; 521 } 522 523 static inline unsigned int div_down(unsigned int a, unsigned int b) 524 { 525 if (b == 0) 526 return UINT_MAX; 527 return a / b; 528 } 529 530 static inline unsigned int div_up(unsigned int a, unsigned int b) 531 { 532 unsigned int r; 533 unsigned int q; 534 if (b == 0) 535 return UINT_MAX; 536 q = div32(a, b, &r); 537 if (r) 538 ++q; 539 return q; 540 } 541 542 static inline unsigned int mul(unsigned int a, unsigned int b) 543 { 544 if (a == 0) 545 return 0; 546 if (div_down(UINT_MAX, a) < b) 547 return UINT_MAX; 548 return a * b; 549 } 550 551 static inline unsigned int muldiv32(unsigned int a, unsigned int b, 552 unsigned int c, unsigned int *r) 553 { 554 u_int64_t n = (u_int64_t) a * b; 555 if (c == 0) { 556 *r = 0; 557 return UINT_MAX; 558 } 559 n = div_u64_rem(n, c, r); 560 if (n >= UINT_MAX) { 561 *r = 0; 562 return UINT_MAX; 563 } 564 return n; 565 } 566 567 /** 568 * snd_interval_refine - refine the interval value of configurator 569 * @i: the interval value to refine 570 * @v: the interval value to refer to 571 * 572 * Refines the interval value with the reference value. 573 * The interval is changed to the range satisfying both intervals. 574 * The interval status (min, max, integer, etc.) are evaluated. 575 * 576 * Return: Positive if the value is changed, zero if it's not changed, or a 577 * negative error code. 578 */ 579 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) 580 { 581 int changed = 0; 582 if (snd_BUG_ON(snd_interval_empty(i))) 583 return -EINVAL; 584 if (i->min < v->min) { 585 i->min = v->min; 586 i->openmin = v->openmin; 587 changed = 1; 588 } else if (i->min == v->min && !i->openmin && v->openmin) { 589 i->openmin = 1; 590 changed = 1; 591 } 592 if (i->max > v->max) { 593 i->max = v->max; 594 i->openmax = v->openmax; 595 changed = 1; 596 } else if (i->max == v->max && !i->openmax && v->openmax) { 597 i->openmax = 1; 598 changed = 1; 599 } 600 if (!i->integer && v->integer) { 601 i->integer = 1; 602 changed = 1; 603 } 604 if (i->integer) { 605 if (i->openmin) { 606 i->min++; 607 i->openmin = 0; 608 } 609 if (i->openmax) { 610 i->max--; 611 i->openmax = 0; 612 } 613 } else if (!i->openmin && !i->openmax && i->min == i->max) 614 i->integer = 1; 615 if (snd_interval_checkempty(i)) { 616 snd_interval_none(i); 617 return -EINVAL; 618 } 619 return changed; 620 } 621 EXPORT_SYMBOL(snd_interval_refine); 622 623 static int snd_interval_refine_first(struct snd_interval *i) 624 { 625 const unsigned int last_max = i->max; 626 627 if (snd_BUG_ON(snd_interval_empty(i))) 628 return -EINVAL; 629 if (snd_interval_single(i)) 630 return 0; 631 i->max = i->min; 632 if (i->openmin) 633 i->max++; 634 /* only exclude max value if also excluded before refine */ 635 i->openmax = (i->openmax && i->max >= last_max); 636 return 1; 637 } 638 639 static int snd_interval_refine_last(struct snd_interval *i) 640 { 641 const unsigned int last_min = i->min; 642 643 if (snd_BUG_ON(snd_interval_empty(i))) 644 return -EINVAL; 645 if (snd_interval_single(i)) 646 return 0; 647 i->min = i->max; 648 if (i->openmax) 649 i->min--; 650 /* only exclude min value if also excluded before refine */ 651 i->openmin = (i->openmin && i->min <= last_min); 652 return 1; 653 } 654 655 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 656 { 657 if (a->empty || b->empty) { 658 snd_interval_none(c); 659 return; 660 } 661 c->empty = 0; 662 c->min = mul(a->min, b->min); 663 c->openmin = (a->openmin || b->openmin); 664 c->max = mul(a->max, b->max); 665 c->openmax = (a->openmax || b->openmax); 666 c->integer = (a->integer && b->integer); 667 } 668 669 /** 670 * snd_interval_div - refine the interval value with division 671 * @a: dividend 672 * @b: divisor 673 * @c: quotient 674 * 675 * c = a / b 676 * 677 * Returns non-zero if the value is changed, zero if not changed. 678 */ 679 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 680 { 681 unsigned int r; 682 if (a->empty || b->empty) { 683 snd_interval_none(c); 684 return; 685 } 686 c->empty = 0; 687 c->min = div32(a->min, b->max, &r); 688 c->openmin = (r || a->openmin || b->openmax); 689 if (b->min > 0) { 690 c->max = div32(a->max, b->min, &r); 691 if (r) { 692 c->max++; 693 c->openmax = 1; 694 } else 695 c->openmax = (a->openmax || b->openmin); 696 } else { 697 c->max = UINT_MAX; 698 c->openmax = 0; 699 } 700 c->integer = 0; 701 } 702 703 /** 704 * snd_interval_muldivk - refine the interval value 705 * @a: dividend 1 706 * @b: dividend 2 707 * @k: divisor (as integer) 708 * @c: result 709 * 710 * c = a * b / k 711 * 712 * Returns non-zero if the value is changed, zero if not changed. 713 */ 714 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, 715 unsigned int k, struct snd_interval *c) 716 { 717 unsigned int r; 718 if (a->empty || b->empty) { 719 snd_interval_none(c); 720 return; 721 } 722 c->empty = 0; 723 c->min = muldiv32(a->min, b->min, k, &r); 724 c->openmin = (r || a->openmin || b->openmin); 725 c->max = muldiv32(a->max, b->max, k, &r); 726 if (r) { 727 c->max++; 728 c->openmax = 1; 729 } else 730 c->openmax = (a->openmax || b->openmax); 731 c->integer = 0; 732 } 733 734 /** 735 * snd_interval_mulkdiv - refine the interval value 736 * @a: dividend 1 737 * @k: dividend 2 (as integer) 738 * @b: divisor 739 * @c: result 740 * 741 * c = a * k / b 742 * 743 * Returns non-zero if the value is changed, zero if not changed. 744 */ 745 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, 746 const struct snd_interval *b, struct snd_interval *c) 747 { 748 unsigned int r; 749 if (a->empty || b->empty) { 750 snd_interval_none(c); 751 return; 752 } 753 c->empty = 0; 754 c->min = muldiv32(a->min, k, b->max, &r); 755 c->openmin = (r || a->openmin || b->openmax); 756 if (b->min > 0) { 757 c->max = muldiv32(a->max, k, b->min, &r); 758 if (r) { 759 c->max++; 760 c->openmax = 1; 761 } else 762 c->openmax = (a->openmax || b->openmin); 763 } else { 764 c->max = UINT_MAX; 765 c->openmax = 0; 766 } 767 c->integer = 0; 768 } 769 770 /* ---- */ 771 772 773 /** 774 * snd_interval_ratnum - refine the interval value 775 * @i: interval to refine 776 * @rats_count: number of ratnum_t 777 * @rats: ratnum_t array 778 * @nump: pointer to store the resultant numerator 779 * @denp: pointer to store the resultant denominator 780 * 781 * Return: Positive if the value is changed, zero if it's not changed, or a 782 * negative error code. 783 */ 784 int snd_interval_ratnum(struct snd_interval *i, 785 unsigned int rats_count, const struct snd_ratnum *rats, 786 unsigned int *nump, unsigned int *denp) 787 { 788 unsigned int best_num, best_den; 789 int best_diff; 790 unsigned int k; 791 struct snd_interval t; 792 int err; 793 unsigned int result_num, result_den; 794 int result_diff; 795 796 best_num = best_den = best_diff = 0; 797 for (k = 0; k < rats_count; ++k) { 798 unsigned int num = rats[k].num; 799 unsigned int den; 800 unsigned int q = i->min; 801 int diff; 802 if (q == 0) 803 q = 1; 804 den = div_up(num, q); 805 if (den < rats[k].den_min) 806 continue; 807 if (den > rats[k].den_max) 808 den = rats[k].den_max; 809 else { 810 unsigned int r; 811 r = (den - rats[k].den_min) % rats[k].den_step; 812 if (r != 0) 813 den -= r; 814 } 815 diff = num - q * den; 816 if (diff < 0) 817 diff = -diff; 818 if (best_num == 0 || 819 diff * best_den < best_diff * den) { 820 best_diff = diff; 821 best_den = den; 822 best_num = num; 823 } 824 } 825 if (best_den == 0) { 826 i->empty = 1; 827 return -EINVAL; 828 } 829 t.min = div_down(best_num, best_den); 830 t.openmin = !!(best_num % best_den); 831 832 result_num = best_num; 833 result_diff = best_diff; 834 result_den = best_den; 835 best_num = best_den = best_diff = 0; 836 for (k = 0; k < rats_count; ++k) { 837 unsigned int num = rats[k].num; 838 unsigned int den; 839 unsigned int q = i->max; 840 int diff; 841 if (q == 0) { 842 i->empty = 1; 843 return -EINVAL; 844 } 845 den = div_down(num, q); 846 if (den > rats[k].den_max) 847 continue; 848 if (den < rats[k].den_min) 849 den = rats[k].den_min; 850 else { 851 unsigned int r; 852 r = (den - rats[k].den_min) % rats[k].den_step; 853 if (r != 0) 854 den += rats[k].den_step - r; 855 } 856 diff = q * den - num; 857 if (diff < 0) 858 diff = -diff; 859 if (best_num == 0 || 860 diff * best_den < best_diff * den) { 861 best_diff = diff; 862 best_den = den; 863 best_num = num; 864 } 865 } 866 if (best_den == 0) { 867 i->empty = 1; 868 return -EINVAL; 869 } 870 t.max = div_up(best_num, best_den); 871 t.openmax = !!(best_num % best_den); 872 t.integer = 0; 873 err = snd_interval_refine(i, &t); 874 if (err < 0) 875 return err; 876 877 if (snd_interval_single(i)) { 878 if (best_diff * result_den < result_diff * best_den) { 879 result_num = best_num; 880 result_den = best_den; 881 } 882 if (nump) 883 *nump = result_num; 884 if (denp) 885 *denp = result_den; 886 } 887 return err; 888 } 889 EXPORT_SYMBOL(snd_interval_ratnum); 890 891 /** 892 * snd_interval_ratden - refine the interval value 893 * @i: interval to refine 894 * @rats_count: number of struct ratden 895 * @rats: struct ratden array 896 * @nump: pointer to store the resultant numerator 897 * @denp: pointer to store the resultant denominator 898 * 899 * Return: Positive if the value is changed, zero if it's not changed, or a 900 * negative error code. 901 */ 902 static int snd_interval_ratden(struct snd_interval *i, 903 unsigned int rats_count, 904 const struct snd_ratden *rats, 905 unsigned int *nump, unsigned int *denp) 906 { 907 unsigned int best_num, best_diff, best_den; 908 unsigned int k; 909 struct snd_interval t; 910 int err; 911 912 best_num = best_den = best_diff = 0; 913 for (k = 0; k < rats_count; ++k) { 914 unsigned int num; 915 unsigned int den = rats[k].den; 916 unsigned int q = i->min; 917 int diff; 918 num = mul(q, den); 919 if (num > rats[k].num_max) 920 continue; 921 if (num < rats[k].num_min) 922 num = rats[k].num_max; 923 else { 924 unsigned int r; 925 r = (num - rats[k].num_min) % rats[k].num_step; 926 if (r != 0) 927 num += rats[k].num_step - r; 928 } 929 diff = num - q * den; 930 if (best_num == 0 || 931 diff * best_den < best_diff * den) { 932 best_diff = diff; 933 best_den = den; 934 best_num = num; 935 } 936 } 937 if (best_den == 0) { 938 i->empty = 1; 939 return -EINVAL; 940 } 941 t.min = div_down(best_num, best_den); 942 t.openmin = !!(best_num % best_den); 943 944 best_num = best_den = best_diff = 0; 945 for (k = 0; k < rats_count; ++k) { 946 unsigned int num; 947 unsigned int den = rats[k].den; 948 unsigned int q = i->max; 949 int diff; 950 num = mul(q, den); 951 if (num < rats[k].num_min) 952 continue; 953 if (num > rats[k].num_max) 954 num = rats[k].num_max; 955 else { 956 unsigned int r; 957 r = (num - rats[k].num_min) % rats[k].num_step; 958 if (r != 0) 959 num -= r; 960 } 961 diff = q * den - num; 962 if (best_num == 0 || 963 diff * best_den < best_diff * den) { 964 best_diff = diff; 965 best_den = den; 966 best_num = num; 967 } 968 } 969 if (best_den == 0) { 970 i->empty = 1; 971 return -EINVAL; 972 } 973 t.max = div_up(best_num, best_den); 974 t.openmax = !!(best_num % best_den); 975 t.integer = 0; 976 err = snd_interval_refine(i, &t); 977 if (err < 0) 978 return err; 979 980 if (snd_interval_single(i)) { 981 if (nump) 982 *nump = best_num; 983 if (denp) 984 *denp = best_den; 985 } 986 return err; 987 } 988 989 /** 990 * snd_interval_list - refine the interval value from the list 991 * @i: the interval value to refine 992 * @count: the number of elements in the list 993 * @list: the value list 994 * @mask: the bit-mask to evaluate 995 * 996 * Refines the interval value from the list. 997 * When mask is non-zero, only the elements corresponding to bit 1 are 998 * evaluated. 999 * 1000 * Return: Positive if the value is changed, zero if it's not changed, or a 1001 * negative error code. 1002 */ 1003 int snd_interval_list(struct snd_interval *i, unsigned int count, 1004 const unsigned int *list, unsigned int mask) 1005 { 1006 unsigned int k; 1007 struct snd_interval list_range; 1008 1009 if (!count) { 1010 i->empty = 1; 1011 return -EINVAL; 1012 } 1013 snd_interval_any(&list_range); 1014 list_range.min = UINT_MAX; 1015 list_range.max = 0; 1016 for (k = 0; k < count; k++) { 1017 if (mask && !(mask & (1 << k))) 1018 continue; 1019 if (!snd_interval_test(i, list[k])) 1020 continue; 1021 list_range.min = min(list_range.min, list[k]); 1022 list_range.max = max(list_range.max, list[k]); 1023 } 1024 return snd_interval_refine(i, &list_range); 1025 } 1026 EXPORT_SYMBOL(snd_interval_list); 1027 1028 /** 1029 * snd_interval_ranges - refine the interval value from the list of ranges 1030 * @i: the interval value to refine 1031 * @count: the number of elements in the list of ranges 1032 * @ranges: the ranges list 1033 * @mask: the bit-mask to evaluate 1034 * 1035 * Refines the interval value from the list of ranges. 1036 * When mask is non-zero, only the elements corresponding to bit 1 are 1037 * evaluated. 1038 * 1039 * Return: Positive if the value is changed, zero if it's not changed, or a 1040 * negative error code. 1041 */ 1042 int snd_interval_ranges(struct snd_interval *i, unsigned int count, 1043 const struct snd_interval *ranges, unsigned int mask) 1044 { 1045 unsigned int k; 1046 struct snd_interval range_union; 1047 struct snd_interval range; 1048 1049 if (!count) { 1050 snd_interval_none(i); 1051 return -EINVAL; 1052 } 1053 snd_interval_any(&range_union); 1054 range_union.min = UINT_MAX; 1055 range_union.max = 0; 1056 for (k = 0; k < count; k++) { 1057 if (mask && !(mask & (1 << k))) 1058 continue; 1059 snd_interval_copy(&range, &ranges[k]); 1060 if (snd_interval_refine(&range, i) < 0) 1061 continue; 1062 if (snd_interval_empty(&range)) 1063 continue; 1064 1065 if (range.min < range_union.min) { 1066 range_union.min = range.min; 1067 range_union.openmin = 1; 1068 } 1069 if (range.min == range_union.min && !range.openmin) 1070 range_union.openmin = 0; 1071 if (range.max > range_union.max) { 1072 range_union.max = range.max; 1073 range_union.openmax = 1; 1074 } 1075 if (range.max == range_union.max && !range.openmax) 1076 range_union.openmax = 0; 1077 } 1078 return snd_interval_refine(i, &range_union); 1079 } 1080 EXPORT_SYMBOL(snd_interval_ranges); 1081 1082 static int snd_interval_step(struct snd_interval *i, unsigned int step) 1083 { 1084 unsigned int n; 1085 int changed = 0; 1086 n = i->min % step; 1087 if (n != 0 || i->openmin) { 1088 i->min += step - n; 1089 i->openmin = 0; 1090 changed = 1; 1091 } 1092 n = i->max % step; 1093 if (n != 0 || i->openmax) { 1094 i->max -= n; 1095 i->openmax = 0; 1096 changed = 1; 1097 } 1098 if (snd_interval_checkempty(i)) { 1099 i->empty = 1; 1100 return -EINVAL; 1101 } 1102 return changed; 1103 } 1104 1105 /* Info constraints helpers */ 1106 1107 /** 1108 * snd_pcm_hw_rule_add - add the hw-constraint rule 1109 * @runtime: the pcm runtime instance 1110 * @cond: condition bits 1111 * @var: the variable to evaluate 1112 * @func: the evaluation function 1113 * @private: the private data pointer passed to function 1114 * @dep: the dependent variables 1115 * 1116 * Return: Zero if successful, or a negative error code on failure. 1117 */ 1118 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, 1119 int var, 1120 snd_pcm_hw_rule_func_t func, void *private, 1121 int dep, ...) 1122 { 1123 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1124 struct snd_pcm_hw_rule *c; 1125 unsigned int k; 1126 va_list args; 1127 va_start(args, dep); 1128 if (constrs->rules_num >= constrs->rules_all) { 1129 struct snd_pcm_hw_rule *new; 1130 unsigned int new_rules = constrs->rules_all + 16; 1131 new = krealloc(constrs->rules, new_rules * sizeof(*c), 1132 GFP_KERNEL); 1133 if (!new) { 1134 va_end(args); 1135 return -ENOMEM; 1136 } 1137 constrs->rules = new; 1138 constrs->rules_all = new_rules; 1139 } 1140 c = &constrs->rules[constrs->rules_num]; 1141 c->cond = cond; 1142 c->func = func; 1143 c->var = var; 1144 c->private = private; 1145 k = 0; 1146 while (1) { 1147 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1148 va_end(args); 1149 return -EINVAL; 1150 } 1151 c->deps[k++] = dep; 1152 if (dep < 0) 1153 break; 1154 dep = va_arg(args, int); 1155 } 1156 constrs->rules_num++; 1157 va_end(args); 1158 return 0; 1159 } 1160 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1161 1162 /** 1163 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint 1164 * @runtime: PCM runtime instance 1165 * @var: hw_params variable to apply the mask 1166 * @mask: the bitmap mask 1167 * 1168 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. 1169 * 1170 * Return: Zero if successful, or a negative error code on failure. 1171 */ 1172 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1173 u_int32_t mask) 1174 { 1175 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1176 struct snd_mask *maskp = constrs_mask(constrs, var); 1177 *maskp->bits &= mask; 1178 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ 1179 if (*maskp->bits == 0) 1180 return -EINVAL; 1181 return 0; 1182 } 1183 1184 /** 1185 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint 1186 * @runtime: PCM runtime instance 1187 * @var: hw_params variable to apply the mask 1188 * @mask: the 64bit bitmap mask 1189 * 1190 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. 1191 * 1192 * Return: Zero if successful, or a negative error code on failure. 1193 */ 1194 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1195 u_int64_t mask) 1196 { 1197 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1198 struct snd_mask *maskp = constrs_mask(constrs, var); 1199 maskp->bits[0] &= (u_int32_t)mask; 1200 maskp->bits[1] &= (u_int32_t)(mask >> 32); 1201 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 1202 if (! maskp->bits[0] && ! maskp->bits[1]) 1203 return -EINVAL; 1204 return 0; 1205 } 1206 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); 1207 1208 /** 1209 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval 1210 * @runtime: PCM runtime instance 1211 * @var: hw_params variable to apply the integer constraint 1212 * 1213 * Apply the constraint of integer to an interval parameter. 1214 * 1215 * Return: Positive if the value is changed, zero if it's not changed, or a 1216 * negative error code. 1217 */ 1218 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) 1219 { 1220 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1221 return snd_interval_setinteger(constrs_interval(constrs, var)); 1222 } 1223 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); 1224 1225 /** 1226 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval 1227 * @runtime: PCM runtime instance 1228 * @var: hw_params variable to apply the range 1229 * @min: the minimal value 1230 * @max: the maximal value 1231 * 1232 * Apply the min/max range constraint to an interval parameter. 1233 * 1234 * Return: Positive if the value is changed, zero if it's not changed, or a 1235 * negative error code. 1236 */ 1237 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1238 unsigned int min, unsigned int max) 1239 { 1240 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1241 struct snd_interval t; 1242 t.min = min; 1243 t.max = max; 1244 t.openmin = t.openmax = 0; 1245 t.integer = 0; 1246 return snd_interval_refine(constrs_interval(constrs, var), &t); 1247 } 1248 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); 1249 1250 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, 1251 struct snd_pcm_hw_rule *rule) 1252 { 1253 struct snd_pcm_hw_constraint_list *list = rule->private; 1254 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); 1255 } 1256 1257 1258 /** 1259 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter 1260 * @runtime: PCM runtime instance 1261 * @cond: condition bits 1262 * @var: hw_params variable to apply the list constraint 1263 * @l: list 1264 * 1265 * Apply the list of constraints to an interval parameter. 1266 * 1267 * Return: Zero if successful, or a negative error code on failure. 1268 */ 1269 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, 1270 unsigned int cond, 1271 snd_pcm_hw_param_t var, 1272 const struct snd_pcm_hw_constraint_list *l) 1273 { 1274 return snd_pcm_hw_rule_add(runtime, cond, var, 1275 snd_pcm_hw_rule_list, (void *)l, 1276 var, -1); 1277 } 1278 EXPORT_SYMBOL(snd_pcm_hw_constraint_list); 1279 1280 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, 1281 struct snd_pcm_hw_rule *rule) 1282 { 1283 struct snd_pcm_hw_constraint_ranges *r = rule->private; 1284 return snd_interval_ranges(hw_param_interval(params, rule->var), 1285 r->count, r->ranges, r->mask); 1286 } 1287 1288 1289 /** 1290 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter 1291 * @runtime: PCM runtime instance 1292 * @cond: condition bits 1293 * @var: hw_params variable to apply the list of range constraints 1294 * @r: ranges 1295 * 1296 * Apply the list of range constraints to an interval parameter. 1297 * 1298 * Return: Zero if successful, or a negative error code on failure. 1299 */ 1300 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, 1301 unsigned int cond, 1302 snd_pcm_hw_param_t var, 1303 const struct snd_pcm_hw_constraint_ranges *r) 1304 { 1305 return snd_pcm_hw_rule_add(runtime, cond, var, 1306 snd_pcm_hw_rule_ranges, (void *)r, 1307 var, -1); 1308 } 1309 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); 1310 1311 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, 1312 struct snd_pcm_hw_rule *rule) 1313 { 1314 const struct snd_pcm_hw_constraint_ratnums *r = rule->private; 1315 unsigned int num = 0, den = 0; 1316 int err; 1317 err = snd_interval_ratnum(hw_param_interval(params, rule->var), 1318 r->nrats, r->rats, &num, &den); 1319 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1320 params->rate_num = num; 1321 params->rate_den = den; 1322 } 1323 return err; 1324 } 1325 1326 /** 1327 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter 1328 * @runtime: PCM runtime instance 1329 * @cond: condition bits 1330 * @var: hw_params variable to apply the ratnums constraint 1331 * @r: struct snd_ratnums constriants 1332 * 1333 * Return: Zero if successful, or a negative error code on failure. 1334 */ 1335 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 1336 unsigned int cond, 1337 snd_pcm_hw_param_t var, 1338 const struct snd_pcm_hw_constraint_ratnums *r) 1339 { 1340 return snd_pcm_hw_rule_add(runtime, cond, var, 1341 snd_pcm_hw_rule_ratnums, (void *)r, 1342 var, -1); 1343 } 1344 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); 1345 1346 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, 1347 struct snd_pcm_hw_rule *rule) 1348 { 1349 const struct snd_pcm_hw_constraint_ratdens *r = rule->private; 1350 unsigned int num = 0, den = 0; 1351 int err = snd_interval_ratden(hw_param_interval(params, rule->var), 1352 r->nrats, r->rats, &num, &den); 1353 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1354 params->rate_num = num; 1355 params->rate_den = den; 1356 } 1357 return err; 1358 } 1359 1360 /** 1361 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter 1362 * @runtime: PCM runtime instance 1363 * @cond: condition bits 1364 * @var: hw_params variable to apply the ratdens constraint 1365 * @r: struct snd_ratdens constriants 1366 * 1367 * Return: Zero if successful, or a negative error code on failure. 1368 */ 1369 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 1370 unsigned int cond, 1371 snd_pcm_hw_param_t var, 1372 const struct snd_pcm_hw_constraint_ratdens *r) 1373 { 1374 return snd_pcm_hw_rule_add(runtime, cond, var, 1375 snd_pcm_hw_rule_ratdens, (void *)r, 1376 var, -1); 1377 } 1378 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); 1379 1380 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, 1381 struct snd_pcm_hw_rule *rule) 1382 { 1383 unsigned int l = (unsigned long) rule->private; 1384 int width = l & 0xffff; 1385 unsigned int msbits = l >> 16; 1386 const struct snd_interval *i = 1387 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 1388 1389 if (!snd_interval_single(i)) 1390 return 0; 1391 1392 if ((snd_interval_value(i) == width) || 1393 (width == 0 && snd_interval_value(i) > msbits)) 1394 params->msbits = min_not_zero(params->msbits, msbits); 1395 1396 return 0; 1397 } 1398 1399 /** 1400 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule 1401 * @runtime: PCM runtime instance 1402 * @cond: condition bits 1403 * @width: sample bits width 1404 * @msbits: msbits width 1405 * 1406 * This constraint will set the number of most significant bits (msbits) if a 1407 * sample format with the specified width has been select. If width is set to 0 1408 * the msbits will be set for any sample format with a width larger than the 1409 * specified msbits. 1410 * 1411 * Return: Zero if successful, or a negative error code on failure. 1412 */ 1413 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 1414 unsigned int cond, 1415 unsigned int width, 1416 unsigned int msbits) 1417 { 1418 unsigned long l = (msbits << 16) | width; 1419 return snd_pcm_hw_rule_add(runtime, cond, -1, 1420 snd_pcm_hw_rule_msbits, 1421 (void*) l, 1422 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 1423 } 1424 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); 1425 1426 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, 1427 struct snd_pcm_hw_rule *rule) 1428 { 1429 unsigned long step = (unsigned long) rule->private; 1430 return snd_interval_step(hw_param_interval(params, rule->var), step); 1431 } 1432 1433 /** 1434 * snd_pcm_hw_constraint_step - add a hw constraint step rule 1435 * @runtime: PCM runtime instance 1436 * @cond: condition bits 1437 * @var: hw_params variable to apply the step constraint 1438 * @step: step size 1439 * 1440 * Return: Zero if successful, or a negative error code on failure. 1441 */ 1442 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, 1443 unsigned int cond, 1444 snd_pcm_hw_param_t var, 1445 unsigned long step) 1446 { 1447 return snd_pcm_hw_rule_add(runtime, cond, var, 1448 snd_pcm_hw_rule_step, (void *) step, 1449 var, -1); 1450 } 1451 EXPORT_SYMBOL(snd_pcm_hw_constraint_step); 1452 1453 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 1454 { 1455 static const unsigned int pow2_sizes[] = { 1456 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1457 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1458 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1459 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 1460 }; 1461 return snd_interval_list(hw_param_interval(params, rule->var), 1462 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); 1463 } 1464 1465 /** 1466 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule 1467 * @runtime: PCM runtime instance 1468 * @cond: condition bits 1469 * @var: hw_params variable to apply the power-of-2 constraint 1470 * 1471 * Return: Zero if successful, or a negative error code on failure. 1472 */ 1473 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, 1474 unsigned int cond, 1475 snd_pcm_hw_param_t var) 1476 { 1477 return snd_pcm_hw_rule_add(runtime, cond, var, 1478 snd_pcm_hw_rule_pow2, NULL, 1479 var, -1); 1480 } 1481 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); 1482 1483 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, 1484 struct snd_pcm_hw_rule *rule) 1485 { 1486 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; 1487 struct snd_interval *rate; 1488 1489 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1490 return snd_interval_list(rate, 1, &base_rate, 0); 1491 } 1492 1493 /** 1494 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling 1495 * @runtime: PCM runtime instance 1496 * @base_rate: the rate at which the hardware does not resample 1497 * 1498 * Return: Zero if successful, or a negative error code on failure. 1499 */ 1500 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, 1501 unsigned int base_rate) 1502 { 1503 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, 1504 SNDRV_PCM_HW_PARAM_RATE, 1505 snd_pcm_hw_rule_noresample_func, 1506 (void *)(uintptr_t)base_rate, 1507 SNDRV_PCM_HW_PARAM_RATE, -1); 1508 } 1509 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); 1510 1511 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, 1512 snd_pcm_hw_param_t var) 1513 { 1514 if (hw_is_mask(var)) { 1515 snd_mask_any(hw_param_mask(params, var)); 1516 params->cmask |= 1 << var; 1517 params->rmask |= 1 << var; 1518 return; 1519 } 1520 if (hw_is_interval(var)) { 1521 snd_interval_any(hw_param_interval(params, var)); 1522 params->cmask |= 1 << var; 1523 params->rmask |= 1 << var; 1524 return; 1525 } 1526 snd_BUG(); 1527 } 1528 1529 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) 1530 { 1531 unsigned int k; 1532 memset(params, 0, sizeof(*params)); 1533 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) 1534 _snd_pcm_hw_param_any(params, k); 1535 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 1536 _snd_pcm_hw_param_any(params, k); 1537 params->info = ~0U; 1538 } 1539 EXPORT_SYMBOL(_snd_pcm_hw_params_any); 1540 1541 /** 1542 * snd_pcm_hw_param_value - return @params field @var value 1543 * @params: the hw_params instance 1544 * @var: parameter to retrieve 1545 * @dir: pointer to the direction (-1,0,1) or %NULL 1546 * 1547 * Return: The value for field @var if it's fixed in configuration space 1548 * defined by @params. -%EINVAL otherwise. 1549 */ 1550 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, 1551 snd_pcm_hw_param_t var, int *dir) 1552 { 1553 if (hw_is_mask(var)) { 1554 const struct snd_mask *mask = hw_param_mask_c(params, var); 1555 if (!snd_mask_single(mask)) 1556 return -EINVAL; 1557 if (dir) 1558 *dir = 0; 1559 return snd_mask_value(mask); 1560 } 1561 if (hw_is_interval(var)) { 1562 const struct snd_interval *i = hw_param_interval_c(params, var); 1563 if (!snd_interval_single(i)) 1564 return -EINVAL; 1565 if (dir) 1566 *dir = i->openmin; 1567 return snd_interval_value(i); 1568 } 1569 return -EINVAL; 1570 } 1571 EXPORT_SYMBOL(snd_pcm_hw_param_value); 1572 1573 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, 1574 snd_pcm_hw_param_t var) 1575 { 1576 if (hw_is_mask(var)) { 1577 snd_mask_none(hw_param_mask(params, var)); 1578 params->cmask |= 1 << var; 1579 params->rmask |= 1 << var; 1580 } else if (hw_is_interval(var)) { 1581 snd_interval_none(hw_param_interval(params, var)); 1582 params->cmask |= 1 << var; 1583 params->rmask |= 1 << var; 1584 } else { 1585 snd_BUG(); 1586 } 1587 } 1588 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); 1589 1590 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, 1591 snd_pcm_hw_param_t var) 1592 { 1593 int changed; 1594 if (hw_is_mask(var)) 1595 changed = snd_mask_refine_first(hw_param_mask(params, var)); 1596 else if (hw_is_interval(var)) 1597 changed = snd_interval_refine_first(hw_param_interval(params, var)); 1598 else 1599 return -EINVAL; 1600 if (changed > 0) { 1601 params->cmask |= 1 << var; 1602 params->rmask |= 1 << var; 1603 } 1604 return changed; 1605 } 1606 1607 1608 /** 1609 * snd_pcm_hw_param_first - refine config space and return minimum value 1610 * @pcm: PCM instance 1611 * @params: the hw_params instance 1612 * @var: parameter to retrieve 1613 * @dir: pointer to the direction (-1,0,1) or %NULL 1614 * 1615 * Inside configuration space defined by @params remove from @var all 1616 * values > minimum. Reduce configuration space accordingly. 1617 * 1618 * Return: The minimum, or a negative error code on failure. 1619 */ 1620 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 1621 struct snd_pcm_hw_params *params, 1622 snd_pcm_hw_param_t var, int *dir) 1623 { 1624 int changed = _snd_pcm_hw_param_first(params, var); 1625 if (changed < 0) 1626 return changed; 1627 if (params->rmask) { 1628 int err = snd_pcm_hw_refine(pcm, params); 1629 if (err < 0) 1630 return err; 1631 } 1632 return snd_pcm_hw_param_value(params, var, dir); 1633 } 1634 EXPORT_SYMBOL(snd_pcm_hw_param_first); 1635 1636 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, 1637 snd_pcm_hw_param_t var) 1638 { 1639 int changed; 1640 if (hw_is_mask(var)) 1641 changed = snd_mask_refine_last(hw_param_mask(params, var)); 1642 else if (hw_is_interval(var)) 1643 changed = snd_interval_refine_last(hw_param_interval(params, var)); 1644 else 1645 return -EINVAL; 1646 if (changed > 0) { 1647 params->cmask |= 1 << var; 1648 params->rmask |= 1 << var; 1649 } 1650 return changed; 1651 } 1652 1653 1654 /** 1655 * snd_pcm_hw_param_last - refine config space and return maximum value 1656 * @pcm: PCM instance 1657 * @params: the hw_params instance 1658 * @var: parameter to retrieve 1659 * @dir: pointer to the direction (-1,0,1) or %NULL 1660 * 1661 * Inside configuration space defined by @params remove from @var all 1662 * values < maximum. Reduce configuration space accordingly. 1663 * 1664 * Return: The maximum, or a negative error code on failure. 1665 */ 1666 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 1667 struct snd_pcm_hw_params *params, 1668 snd_pcm_hw_param_t var, int *dir) 1669 { 1670 int changed = _snd_pcm_hw_param_last(params, var); 1671 if (changed < 0) 1672 return changed; 1673 if (params->rmask) { 1674 int err = snd_pcm_hw_refine(pcm, params); 1675 if (err < 0) 1676 return err; 1677 } 1678 return snd_pcm_hw_param_value(params, var, dir); 1679 } 1680 EXPORT_SYMBOL(snd_pcm_hw_param_last); 1681 1682 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, 1683 void *arg) 1684 { 1685 struct snd_pcm_runtime *runtime = substream->runtime; 1686 unsigned long flags; 1687 snd_pcm_stream_lock_irqsave(substream, flags); 1688 if (snd_pcm_running(substream) && 1689 snd_pcm_update_hw_ptr(substream) >= 0) 1690 runtime->status->hw_ptr %= runtime->buffer_size; 1691 else { 1692 runtime->status->hw_ptr = 0; 1693 runtime->hw_ptr_wrap = 0; 1694 } 1695 snd_pcm_stream_unlock_irqrestore(substream, flags); 1696 return 0; 1697 } 1698 1699 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, 1700 void *arg) 1701 { 1702 struct snd_pcm_channel_info *info = arg; 1703 struct snd_pcm_runtime *runtime = substream->runtime; 1704 int width; 1705 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { 1706 info->offset = -1; 1707 return 0; 1708 } 1709 width = snd_pcm_format_physical_width(runtime->format); 1710 if (width < 0) 1711 return width; 1712 info->offset = 0; 1713 switch (runtime->access) { 1714 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: 1715 case SNDRV_PCM_ACCESS_RW_INTERLEAVED: 1716 info->first = info->channel * width; 1717 info->step = runtime->channels * width; 1718 break; 1719 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: 1720 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: 1721 { 1722 size_t size = runtime->dma_bytes / runtime->channels; 1723 info->first = info->channel * size * 8; 1724 info->step = width; 1725 break; 1726 } 1727 default: 1728 snd_BUG(); 1729 break; 1730 } 1731 return 0; 1732 } 1733 1734 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, 1735 void *arg) 1736 { 1737 struct snd_pcm_hw_params *params = arg; 1738 snd_pcm_format_t format; 1739 int channels; 1740 ssize_t frame_size; 1741 1742 params->fifo_size = substream->runtime->hw.fifo_size; 1743 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { 1744 format = params_format(params); 1745 channels = params_channels(params); 1746 frame_size = snd_pcm_format_size(format, channels); 1747 if (frame_size > 0) 1748 params->fifo_size /= (unsigned)frame_size; 1749 } 1750 return 0; 1751 } 1752 1753 /** 1754 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1755 * @substream: the pcm substream instance 1756 * @cmd: ioctl command 1757 * @arg: ioctl argument 1758 * 1759 * Processes the generic ioctl commands for PCM. 1760 * Can be passed as the ioctl callback for PCM ops. 1761 * 1762 * Return: Zero if successful, or a negative error code on failure. 1763 */ 1764 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, 1765 unsigned int cmd, void *arg) 1766 { 1767 switch (cmd) { 1768 case SNDRV_PCM_IOCTL1_RESET: 1769 return snd_pcm_lib_ioctl_reset(substream, arg); 1770 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1771 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1772 case SNDRV_PCM_IOCTL1_FIFO_SIZE: 1773 return snd_pcm_lib_ioctl_fifo_size(substream, arg); 1774 } 1775 return -ENXIO; 1776 } 1777 EXPORT_SYMBOL(snd_pcm_lib_ioctl); 1778 1779 /** 1780 * snd_pcm_period_elapsed - update the pcm status for the next period 1781 * @substream: the pcm substream instance 1782 * 1783 * This function is called from the interrupt handler when the 1784 * PCM has processed the period size. It will update the current 1785 * pointer, wake up sleepers, etc. 1786 * 1787 * Even if more than one periods have elapsed since the last call, you 1788 * have to call this only once. 1789 */ 1790 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) 1791 { 1792 struct snd_pcm_runtime *runtime; 1793 unsigned long flags; 1794 1795 if (snd_BUG_ON(!substream)) 1796 return; 1797 1798 snd_pcm_stream_lock_irqsave(substream, flags); 1799 if (PCM_RUNTIME_CHECK(substream)) 1800 goto _unlock; 1801 runtime = substream->runtime; 1802 1803 if (!snd_pcm_running(substream) || 1804 snd_pcm_update_hw_ptr0(substream, 1) < 0) 1805 goto _end; 1806 1807 #ifdef CONFIG_SND_PCM_TIMER 1808 if (substream->timer_running) 1809 snd_timer_interrupt(substream->timer, 1); 1810 #endif 1811 _end: 1812 kill_fasync(&runtime->fasync, SIGIO, POLL_IN); 1813 _unlock: 1814 snd_pcm_stream_unlock_irqrestore(substream, flags); 1815 } 1816 EXPORT_SYMBOL(snd_pcm_period_elapsed); 1817 1818 /* 1819 * Wait until avail_min data becomes available 1820 * Returns a negative error code if any error occurs during operation. 1821 * The available space is stored on availp. When err = 0 and avail = 0 1822 * on the capture stream, it indicates the stream is in DRAINING state. 1823 */ 1824 static int wait_for_avail(struct snd_pcm_substream *substream, 1825 snd_pcm_uframes_t *availp) 1826 { 1827 struct snd_pcm_runtime *runtime = substream->runtime; 1828 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1829 wait_queue_entry_t wait; 1830 int err = 0; 1831 snd_pcm_uframes_t avail = 0; 1832 long wait_time, tout; 1833 1834 init_waitqueue_entry(&wait, current); 1835 set_current_state(TASK_INTERRUPTIBLE); 1836 add_wait_queue(&runtime->tsleep, &wait); 1837 1838 if (runtime->no_period_wakeup) 1839 wait_time = MAX_SCHEDULE_TIMEOUT; 1840 else { 1841 /* use wait time from substream if available */ 1842 if (substream->wait_time) { 1843 wait_time = substream->wait_time; 1844 } else { 1845 wait_time = 10; 1846 1847 if (runtime->rate) { 1848 long t = runtime->period_size * 2 / 1849 runtime->rate; 1850 wait_time = max(t, wait_time); 1851 } 1852 wait_time = msecs_to_jiffies(wait_time * 1000); 1853 } 1854 } 1855 1856 for (;;) { 1857 if (signal_pending(current)) { 1858 err = -ERESTARTSYS; 1859 break; 1860 } 1861 1862 /* 1863 * We need to check if space became available already 1864 * (and thus the wakeup happened already) first to close 1865 * the race of space already having become available. 1866 * This check must happen after been added to the waitqueue 1867 * and having current state be INTERRUPTIBLE. 1868 */ 1869 avail = snd_pcm_avail(substream); 1870 if (avail >= runtime->twake) 1871 break; 1872 snd_pcm_stream_unlock_irq(substream); 1873 1874 tout = schedule_timeout(wait_time); 1875 1876 snd_pcm_stream_lock_irq(substream); 1877 set_current_state(TASK_INTERRUPTIBLE); 1878 switch (runtime->status->state) { 1879 case SNDRV_PCM_STATE_SUSPENDED: 1880 err = -ESTRPIPE; 1881 goto _endloop; 1882 case SNDRV_PCM_STATE_XRUN: 1883 err = -EPIPE; 1884 goto _endloop; 1885 case SNDRV_PCM_STATE_DRAINING: 1886 if (is_playback) 1887 err = -EPIPE; 1888 else 1889 avail = 0; /* indicate draining */ 1890 goto _endloop; 1891 case SNDRV_PCM_STATE_OPEN: 1892 case SNDRV_PCM_STATE_SETUP: 1893 case SNDRV_PCM_STATE_DISCONNECTED: 1894 err = -EBADFD; 1895 goto _endloop; 1896 case SNDRV_PCM_STATE_PAUSED: 1897 continue; 1898 } 1899 if (!tout) { 1900 pcm_dbg(substream->pcm, 1901 "%s write error (DMA or IRQ trouble?)\n", 1902 is_playback ? "playback" : "capture"); 1903 err = -EIO; 1904 break; 1905 } 1906 } 1907 _endloop: 1908 set_current_state(TASK_RUNNING); 1909 remove_wait_queue(&runtime->tsleep, &wait); 1910 *availp = avail; 1911 return err; 1912 } 1913 1914 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, 1915 int channel, unsigned long hwoff, 1916 void *buf, unsigned long bytes); 1917 1918 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, 1919 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f); 1920 1921 /* calculate the target DMA-buffer position to be written/read */ 1922 static void *get_dma_ptr(struct snd_pcm_runtime *runtime, 1923 int channel, unsigned long hwoff) 1924 { 1925 return runtime->dma_area + hwoff + 1926 channel * (runtime->dma_bytes / runtime->channels); 1927 } 1928 1929 /* default copy_user ops for write; used for both interleaved and non- modes */ 1930 static int default_write_copy(struct snd_pcm_substream *substream, 1931 int channel, unsigned long hwoff, 1932 void *buf, unsigned long bytes) 1933 { 1934 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff), 1935 (void __user *)buf, bytes)) 1936 return -EFAULT; 1937 return 0; 1938 } 1939 1940 /* default copy_kernel ops for write */ 1941 static int default_write_copy_kernel(struct snd_pcm_substream *substream, 1942 int channel, unsigned long hwoff, 1943 void *buf, unsigned long bytes) 1944 { 1945 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes); 1946 return 0; 1947 } 1948 1949 /* fill silence instead of copy data; called as a transfer helper 1950 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when 1951 * a NULL buffer is passed 1952 */ 1953 static int fill_silence(struct snd_pcm_substream *substream, int channel, 1954 unsigned long hwoff, void *buf, unsigned long bytes) 1955 { 1956 struct snd_pcm_runtime *runtime = substream->runtime; 1957 1958 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 1959 return 0; 1960 if (substream->ops->fill_silence) 1961 return substream->ops->fill_silence(substream, channel, 1962 hwoff, bytes); 1963 1964 snd_pcm_format_set_silence(runtime->format, 1965 get_dma_ptr(runtime, channel, hwoff), 1966 bytes_to_samples(runtime, bytes)); 1967 return 0; 1968 } 1969 1970 /* default copy_user ops for read; used for both interleaved and non- modes */ 1971 static int default_read_copy(struct snd_pcm_substream *substream, 1972 int channel, unsigned long hwoff, 1973 void *buf, unsigned long bytes) 1974 { 1975 if (copy_to_user((void __user *)buf, 1976 get_dma_ptr(substream->runtime, channel, hwoff), 1977 bytes)) 1978 return -EFAULT; 1979 return 0; 1980 } 1981 1982 /* default copy_kernel ops for read */ 1983 static int default_read_copy_kernel(struct snd_pcm_substream *substream, 1984 int channel, unsigned long hwoff, 1985 void *buf, unsigned long bytes) 1986 { 1987 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes); 1988 return 0; 1989 } 1990 1991 /* call transfer function with the converted pointers and sizes; 1992 * for interleaved mode, it's one shot for all samples 1993 */ 1994 static int interleaved_copy(struct snd_pcm_substream *substream, 1995 snd_pcm_uframes_t hwoff, void *data, 1996 snd_pcm_uframes_t off, 1997 snd_pcm_uframes_t frames, 1998 pcm_transfer_f transfer) 1999 { 2000 struct snd_pcm_runtime *runtime = substream->runtime; 2001 2002 /* convert to bytes */ 2003 hwoff = frames_to_bytes(runtime, hwoff); 2004 off = frames_to_bytes(runtime, off); 2005 frames = frames_to_bytes(runtime, frames); 2006 return transfer(substream, 0, hwoff, data + off, frames); 2007 } 2008 2009 /* call transfer function with the converted pointers and sizes for each 2010 * non-interleaved channel; when buffer is NULL, silencing instead of copying 2011 */ 2012 static int noninterleaved_copy(struct snd_pcm_substream *substream, 2013 snd_pcm_uframes_t hwoff, void *data, 2014 snd_pcm_uframes_t off, 2015 snd_pcm_uframes_t frames, 2016 pcm_transfer_f transfer) 2017 { 2018 struct snd_pcm_runtime *runtime = substream->runtime; 2019 int channels = runtime->channels; 2020 void **bufs = data; 2021 int c, err; 2022 2023 /* convert to bytes; note that it's not frames_to_bytes() here. 2024 * in non-interleaved mode, we copy for each channel, thus 2025 * each copy is n_samples bytes x channels = whole frames. 2026 */ 2027 off = samples_to_bytes(runtime, off); 2028 frames = samples_to_bytes(runtime, frames); 2029 hwoff = samples_to_bytes(runtime, hwoff); 2030 for (c = 0; c < channels; ++c, ++bufs) { 2031 if (!data || !*bufs) 2032 err = fill_silence(substream, c, hwoff, NULL, frames); 2033 else 2034 err = transfer(substream, c, hwoff, *bufs + off, 2035 frames); 2036 if (err < 0) 2037 return err; 2038 } 2039 return 0; 2040 } 2041 2042 /* fill silence on the given buffer position; 2043 * called from snd_pcm_playback_silence() 2044 */ 2045 static int fill_silence_frames(struct snd_pcm_substream *substream, 2046 snd_pcm_uframes_t off, snd_pcm_uframes_t frames) 2047 { 2048 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 2049 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) 2050 return interleaved_copy(substream, off, NULL, 0, frames, 2051 fill_silence); 2052 else 2053 return noninterleaved_copy(substream, off, NULL, 0, frames, 2054 fill_silence); 2055 } 2056 2057 /* sanity-check for read/write methods */ 2058 static int pcm_sanity_check(struct snd_pcm_substream *substream) 2059 { 2060 struct snd_pcm_runtime *runtime; 2061 if (PCM_RUNTIME_CHECK(substream)) 2062 return -ENXIO; 2063 runtime = substream->runtime; 2064 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area)) 2065 return -EINVAL; 2066 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2067 return -EBADFD; 2068 return 0; 2069 } 2070 2071 static int pcm_accessible_state(struct snd_pcm_runtime *runtime) 2072 { 2073 switch (runtime->status->state) { 2074 case SNDRV_PCM_STATE_PREPARED: 2075 case SNDRV_PCM_STATE_RUNNING: 2076 case SNDRV_PCM_STATE_PAUSED: 2077 return 0; 2078 case SNDRV_PCM_STATE_XRUN: 2079 return -EPIPE; 2080 case SNDRV_PCM_STATE_SUSPENDED: 2081 return -ESTRPIPE; 2082 default: 2083 return -EBADFD; 2084 } 2085 } 2086 2087 /* update to the given appl_ptr and call ack callback if needed; 2088 * when an error is returned, take back to the original value 2089 */ 2090 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, 2091 snd_pcm_uframes_t appl_ptr) 2092 { 2093 struct snd_pcm_runtime *runtime = substream->runtime; 2094 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; 2095 int ret; 2096 2097 if (old_appl_ptr == appl_ptr) 2098 return 0; 2099 2100 runtime->control->appl_ptr = appl_ptr; 2101 if (substream->ops->ack) { 2102 ret = substream->ops->ack(substream); 2103 if (ret < 0) { 2104 runtime->control->appl_ptr = old_appl_ptr; 2105 return ret; 2106 } 2107 } 2108 2109 trace_applptr(substream, old_appl_ptr, appl_ptr); 2110 2111 return 0; 2112 } 2113 2114 /* the common loop for read/write data */ 2115 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2116 void *data, bool interleaved, 2117 snd_pcm_uframes_t size, bool in_kernel) 2118 { 2119 struct snd_pcm_runtime *runtime = substream->runtime; 2120 snd_pcm_uframes_t xfer = 0; 2121 snd_pcm_uframes_t offset = 0; 2122 snd_pcm_uframes_t avail; 2123 pcm_copy_f writer; 2124 pcm_transfer_f transfer; 2125 bool nonblock; 2126 bool is_playback; 2127 int err; 2128 2129 err = pcm_sanity_check(substream); 2130 if (err < 0) 2131 return err; 2132 2133 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 2134 if (interleaved) { 2135 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2136 runtime->channels > 1) 2137 return -EINVAL; 2138 writer = interleaved_copy; 2139 } else { 2140 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2141 return -EINVAL; 2142 writer = noninterleaved_copy; 2143 } 2144 2145 if (!data) { 2146 if (is_playback) 2147 transfer = fill_silence; 2148 else 2149 return -EINVAL; 2150 } else if (in_kernel) { 2151 if (substream->ops->copy_kernel) 2152 transfer = substream->ops->copy_kernel; 2153 else 2154 transfer = is_playback ? 2155 default_write_copy_kernel : default_read_copy_kernel; 2156 } else { 2157 if (substream->ops->copy_user) 2158 transfer = (pcm_transfer_f)substream->ops->copy_user; 2159 else 2160 transfer = is_playback ? 2161 default_write_copy : default_read_copy; 2162 } 2163 2164 if (size == 0) 2165 return 0; 2166 2167 nonblock = !!(substream->f_flags & O_NONBLOCK); 2168 2169 snd_pcm_stream_lock_irq(substream); 2170 err = pcm_accessible_state(runtime); 2171 if (err < 0) 2172 goto _end_unlock; 2173 2174 runtime->twake = runtime->control->avail_min ? : 1; 2175 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 2176 snd_pcm_update_hw_ptr(substream); 2177 2178 /* 2179 * If size < start_threshold, wait indefinitely. Another 2180 * thread may start capture 2181 */ 2182 if (!is_playback && 2183 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2184 size >= runtime->start_threshold) { 2185 err = snd_pcm_start(substream); 2186 if (err < 0) 2187 goto _end_unlock; 2188 } 2189 2190 avail = snd_pcm_avail(substream); 2191 2192 while (size > 0) { 2193 snd_pcm_uframes_t frames, appl_ptr, appl_ofs; 2194 snd_pcm_uframes_t cont; 2195 if (!avail) { 2196 if (!is_playback && 2197 runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2198 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2199 goto _end_unlock; 2200 } 2201 if (nonblock) { 2202 err = -EAGAIN; 2203 goto _end_unlock; 2204 } 2205 runtime->twake = min_t(snd_pcm_uframes_t, size, 2206 runtime->control->avail_min ? : 1); 2207 err = wait_for_avail(substream, &avail); 2208 if (err < 0) 2209 goto _end_unlock; 2210 if (!avail) 2211 continue; /* draining */ 2212 } 2213 frames = size > avail ? avail : size; 2214 appl_ptr = READ_ONCE(runtime->control->appl_ptr); 2215 appl_ofs = appl_ptr % runtime->buffer_size; 2216 cont = runtime->buffer_size - appl_ofs; 2217 if (frames > cont) 2218 frames = cont; 2219 if (snd_BUG_ON(!frames)) { 2220 err = -EINVAL; 2221 goto _end_unlock; 2222 } 2223 snd_pcm_stream_unlock_irq(substream); 2224 err = writer(substream, appl_ofs, data, offset, frames, 2225 transfer); 2226 snd_pcm_stream_lock_irq(substream); 2227 if (err < 0) 2228 goto _end_unlock; 2229 err = pcm_accessible_state(runtime); 2230 if (err < 0) 2231 goto _end_unlock; 2232 appl_ptr += frames; 2233 if (appl_ptr >= runtime->boundary) 2234 appl_ptr -= runtime->boundary; 2235 err = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2236 if (err < 0) 2237 goto _end_unlock; 2238 2239 offset += frames; 2240 size -= frames; 2241 xfer += frames; 2242 avail -= frames; 2243 if (is_playback && 2244 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2245 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { 2246 err = snd_pcm_start(substream); 2247 if (err < 0) 2248 goto _end_unlock; 2249 } 2250 } 2251 _end_unlock: 2252 runtime->twake = 0; 2253 if (xfer > 0 && err >= 0) 2254 snd_pcm_update_state(substream, runtime); 2255 snd_pcm_stream_unlock_irq(substream); 2256 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; 2257 } 2258 EXPORT_SYMBOL(__snd_pcm_lib_xfer); 2259 2260 /* 2261 * standard channel mapping helpers 2262 */ 2263 2264 /* default channel maps for multi-channel playbacks, up to 8 channels */ 2265 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { 2266 { .channels = 1, 2267 .map = { SNDRV_CHMAP_MONO } }, 2268 { .channels = 2, 2269 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2270 { .channels = 4, 2271 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2272 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2273 { .channels = 6, 2274 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2275 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2276 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, 2277 { .channels = 8, 2278 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2279 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2280 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2281 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2282 { } 2283 }; 2284 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); 2285 2286 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ 2287 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { 2288 { .channels = 1, 2289 .map = { SNDRV_CHMAP_MONO } }, 2290 { .channels = 2, 2291 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2292 { .channels = 4, 2293 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2294 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2295 { .channels = 6, 2296 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2297 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2298 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2299 { .channels = 8, 2300 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2301 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2302 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2303 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2304 { } 2305 }; 2306 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); 2307 2308 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) 2309 { 2310 if (ch > info->max_channels) 2311 return false; 2312 return !info->channel_mask || (info->channel_mask & (1U << ch)); 2313 } 2314 2315 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, 2316 struct snd_ctl_elem_info *uinfo) 2317 { 2318 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2319 2320 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 2321 uinfo->count = info->max_channels; 2322 uinfo->value.integer.min = 0; 2323 uinfo->value.integer.max = SNDRV_CHMAP_LAST; 2324 return 0; 2325 } 2326 2327 /* get callback for channel map ctl element 2328 * stores the channel position firstly matching with the current channels 2329 */ 2330 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, 2331 struct snd_ctl_elem_value *ucontrol) 2332 { 2333 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2334 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2335 struct snd_pcm_substream *substream; 2336 const struct snd_pcm_chmap_elem *map; 2337 2338 if (!info->chmap) 2339 return -EINVAL; 2340 substream = snd_pcm_chmap_substream(info, idx); 2341 if (!substream) 2342 return -ENODEV; 2343 memset(ucontrol->value.integer.value, 0, 2344 sizeof(long) * info->max_channels); 2345 if (!substream->runtime) 2346 return 0; /* no channels set */ 2347 for (map = info->chmap; map->channels; map++) { 2348 int i; 2349 if (map->channels == substream->runtime->channels && 2350 valid_chmap_channels(info, map->channels)) { 2351 for (i = 0; i < map->channels; i++) 2352 ucontrol->value.integer.value[i] = map->map[i]; 2353 return 0; 2354 } 2355 } 2356 return -EINVAL; 2357 } 2358 2359 /* tlv callback for channel map ctl element 2360 * expands the pre-defined channel maps in a form of TLV 2361 */ 2362 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, 2363 unsigned int size, unsigned int __user *tlv) 2364 { 2365 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2366 const struct snd_pcm_chmap_elem *map; 2367 unsigned int __user *dst; 2368 int c, count = 0; 2369 2370 if (!info->chmap) 2371 return -EINVAL; 2372 if (size < 8) 2373 return -ENOMEM; 2374 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) 2375 return -EFAULT; 2376 size -= 8; 2377 dst = tlv + 2; 2378 for (map = info->chmap; map->channels; map++) { 2379 int chs_bytes = map->channels * 4; 2380 if (!valid_chmap_channels(info, map->channels)) 2381 continue; 2382 if (size < 8) 2383 return -ENOMEM; 2384 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || 2385 put_user(chs_bytes, dst + 1)) 2386 return -EFAULT; 2387 dst += 2; 2388 size -= 8; 2389 count += 8; 2390 if (size < chs_bytes) 2391 return -ENOMEM; 2392 size -= chs_bytes; 2393 count += chs_bytes; 2394 for (c = 0; c < map->channels; c++) { 2395 if (put_user(map->map[c], dst)) 2396 return -EFAULT; 2397 dst++; 2398 } 2399 } 2400 if (put_user(count, tlv + 1)) 2401 return -EFAULT; 2402 return 0; 2403 } 2404 2405 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) 2406 { 2407 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2408 info->pcm->streams[info->stream].chmap_kctl = NULL; 2409 kfree(info); 2410 } 2411 2412 /** 2413 * snd_pcm_add_chmap_ctls - create channel-mapping control elements 2414 * @pcm: the assigned PCM instance 2415 * @stream: stream direction 2416 * @chmap: channel map elements (for query) 2417 * @max_channels: the max number of channels for the stream 2418 * @private_value: the value passed to each kcontrol's private_value field 2419 * @info_ret: store struct snd_pcm_chmap instance if non-NULL 2420 * 2421 * Create channel-mapping control elements assigned to the given PCM stream(s). 2422 * Return: Zero if successful, or a negative error value. 2423 */ 2424 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, 2425 const struct snd_pcm_chmap_elem *chmap, 2426 int max_channels, 2427 unsigned long private_value, 2428 struct snd_pcm_chmap **info_ret) 2429 { 2430 struct snd_pcm_chmap *info; 2431 struct snd_kcontrol_new knew = { 2432 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2433 .access = SNDRV_CTL_ELEM_ACCESS_READ | 2434 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 2435 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, 2436 .info = pcm_chmap_ctl_info, 2437 .get = pcm_chmap_ctl_get, 2438 .tlv.c = pcm_chmap_ctl_tlv, 2439 }; 2440 int err; 2441 2442 if (WARN_ON(pcm->streams[stream].chmap_kctl)) 2443 return -EBUSY; 2444 info = kzalloc(sizeof(*info), GFP_KERNEL); 2445 if (!info) 2446 return -ENOMEM; 2447 info->pcm = pcm; 2448 info->stream = stream; 2449 info->chmap = chmap; 2450 info->max_channels = max_channels; 2451 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 2452 knew.name = "Playback Channel Map"; 2453 else 2454 knew.name = "Capture Channel Map"; 2455 knew.device = pcm->device; 2456 knew.count = pcm->streams[stream].substream_count; 2457 knew.private_value = private_value; 2458 info->kctl = snd_ctl_new1(&knew, info); 2459 if (!info->kctl) { 2460 kfree(info); 2461 return -ENOMEM; 2462 } 2463 info->kctl->private_free = pcm_chmap_ctl_private_free; 2464 err = snd_ctl_add(pcm->card, info->kctl); 2465 if (err < 0) 2466 return err; 2467 pcm->streams[stream].chmap_kctl = info->kctl; 2468 if (info_ret) 2469 *info_ret = info; 2470 return 0; 2471 } 2472 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls); 2473