1 /* 2 * Digital Audio (PCM) abstract layer 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 4 * Abramo Bagnara <abramo@alsa-project.org> 5 * 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <linux/slab.h> 24 #include <linux/sched/signal.h> 25 #include <linux/time.h> 26 #include <linux/math64.h> 27 #include <linux/export.h> 28 #include <sound/core.h> 29 #include <sound/control.h> 30 #include <sound/tlv.h> 31 #include <sound/info.h> 32 #include <sound/pcm.h> 33 #include <sound/pcm_params.h> 34 #include <sound/timer.h> 35 36 #include "pcm_local.h" 37 38 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 39 #define CREATE_TRACE_POINTS 40 #include "pcm_trace.h" 41 #else 42 #define trace_hwptr(substream, pos, in_interrupt) 43 #define trace_xrun(substream) 44 #define trace_hw_ptr_error(substream, reason) 45 #define trace_applptr(substream, prev, curr) 46 #endif 47 48 static int fill_silence_frames(struct snd_pcm_substream *substream, 49 snd_pcm_uframes_t off, snd_pcm_uframes_t frames); 50 51 /* 52 * fill ring buffer with silence 53 * runtime->silence_start: starting pointer to silence area 54 * runtime->silence_filled: size filled with silence 55 * runtime->silence_threshold: threshold from application 56 * runtime->silence_size: maximal size from application 57 * 58 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately 59 */ 60 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) 61 { 62 struct snd_pcm_runtime *runtime = substream->runtime; 63 snd_pcm_uframes_t frames, ofs, transfer; 64 int err; 65 66 if (runtime->silence_size < runtime->boundary) { 67 snd_pcm_sframes_t noise_dist, n; 68 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); 69 if (runtime->silence_start != appl_ptr) { 70 n = appl_ptr - runtime->silence_start; 71 if (n < 0) 72 n += runtime->boundary; 73 if ((snd_pcm_uframes_t)n < runtime->silence_filled) 74 runtime->silence_filled -= n; 75 else 76 runtime->silence_filled = 0; 77 runtime->silence_start = appl_ptr; 78 } 79 if (runtime->silence_filled >= runtime->buffer_size) 80 return; 81 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; 82 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) 83 return; 84 frames = runtime->silence_threshold - noise_dist; 85 if (frames > runtime->silence_size) 86 frames = runtime->silence_size; 87 } else { 88 if (new_hw_ptr == ULONG_MAX) { /* initialization */ 89 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); 90 if (avail > runtime->buffer_size) 91 avail = runtime->buffer_size; 92 runtime->silence_filled = avail > 0 ? avail : 0; 93 runtime->silence_start = (runtime->status->hw_ptr + 94 runtime->silence_filled) % 95 runtime->boundary; 96 } else { 97 ofs = runtime->status->hw_ptr; 98 frames = new_hw_ptr - ofs; 99 if ((snd_pcm_sframes_t)frames < 0) 100 frames += runtime->boundary; 101 runtime->silence_filled -= frames; 102 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { 103 runtime->silence_filled = 0; 104 runtime->silence_start = new_hw_ptr; 105 } else { 106 runtime->silence_start = ofs; 107 } 108 } 109 frames = runtime->buffer_size - runtime->silence_filled; 110 } 111 if (snd_BUG_ON(frames > runtime->buffer_size)) 112 return; 113 if (frames == 0) 114 return; 115 ofs = runtime->silence_start % runtime->buffer_size; 116 while (frames > 0) { 117 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; 118 err = fill_silence_frames(substream, ofs, transfer); 119 snd_BUG_ON(err < 0); 120 runtime->silence_filled += transfer; 121 frames -= transfer; 122 ofs = 0; 123 } 124 } 125 126 #ifdef CONFIG_SND_DEBUG 127 void snd_pcm_debug_name(struct snd_pcm_substream *substream, 128 char *name, size_t len) 129 { 130 snprintf(name, len, "pcmC%dD%d%c:%d", 131 substream->pcm->card->number, 132 substream->pcm->device, 133 substream->stream ? 'c' : 'p', 134 substream->number); 135 } 136 EXPORT_SYMBOL(snd_pcm_debug_name); 137 #endif 138 139 #define XRUN_DEBUG_BASIC (1<<0) 140 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 141 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ 142 143 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 144 145 #define xrun_debug(substream, mask) \ 146 ((substream)->pstr->xrun_debug & (mask)) 147 #else 148 #define xrun_debug(substream, mask) 0 149 #endif 150 151 #define dump_stack_on_xrun(substream) do { \ 152 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ 153 dump_stack(); \ 154 } while (0) 155 156 static void xrun(struct snd_pcm_substream *substream) 157 { 158 struct snd_pcm_runtime *runtime = substream->runtime; 159 160 trace_xrun(substream); 161 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) 162 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); 163 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 164 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 165 char name[16]; 166 snd_pcm_debug_name(substream, name, sizeof(name)); 167 pcm_warn(substream->pcm, "XRUN: %s\n", name); 168 dump_stack_on_xrun(substream); 169 } 170 } 171 172 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 173 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ 174 do { \ 175 trace_hw_ptr_error(substream, reason); \ 176 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ 177 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ 178 (in_interrupt) ? 'Q' : 'P', ##args); \ 179 dump_stack_on_xrun(substream); \ 180 } \ 181 } while (0) 182 183 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ 184 185 #define hw_ptr_error(substream, fmt, args...) do { } while (0) 186 187 #endif 188 189 int snd_pcm_update_state(struct snd_pcm_substream *substream, 190 struct snd_pcm_runtime *runtime) 191 { 192 snd_pcm_uframes_t avail; 193 194 avail = snd_pcm_avail(substream); 195 if (avail > runtime->avail_max) 196 runtime->avail_max = avail; 197 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 198 if (avail >= runtime->buffer_size) { 199 snd_pcm_drain_done(substream); 200 return -EPIPE; 201 } 202 } else { 203 if (avail >= runtime->stop_threshold) { 204 xrun(substream); 205 return -EPIPE; 206 } 207 } 208 if (runtime->twake) { 209 if (avail >= runtime->twake) 210 wake_up(&runtime->tsleep); 211 } else if (avail >= runtime->control->avail_min) 212 wake_up(&runtime->sleep); 213 return 0; 214 } 215 216 static void update_audio_tstamp(struct snd_pcm_substream *substream, 217 struct timespec *curr_tstamp, 218 struct timespec *audio_tstamp) 219 { 220 struct snd_pcm_runtime *runtime = substream->runtime; 221 u64 audio_frames, audio_nsecs; 222 struct timespec driver_tstamp; 223 224 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) 225 return; 226 227 if (!(substream->ops->get_time_info) || 228 (runtime->audio_tstamp_report.actual_type == 229 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 230 231 /* 232 * provide audio timestamp derived from pointer position 233 * add delay only if requested 234 */ 235 236 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; 237 238 if (runtime->audio_tstamp_config.report_delay) { 239 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 240 audio_frames -= runtime->delay; 241 else 242 audio_frames += runtime->delay; 243 } 244 audio_nsecs = div_u64(audio_frames * 1000000000LL, 245 runtime->rate); 246 *audio_tstamp = ns_to_timespec(audio_nsecs); 247 } 248 if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { 249 runtime->status->audio_tstamp = *audio_tstamp; 250 runtime->status->tstamp = *curr_tstamp; 251 } 252 253 /* 254 * re-take a driver timestamp to let apps detect if the reference tstamp 255 * read by low-level hardware was provided with a delay 256 */ 257 snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp); 258 runtime->driver_tstamp = driver_tstamp; 259 } 260 261 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, 262 unsigned int in_interrupt) 263 { 264 struct snd_pcm_runtime *runtime = substream->runtime; 265 snd_pcm_uframes_t pos; 266 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 267 snd_pcm_sframes_t hdelta, delta; 268 unsigned long jdelta; 269 unsigned long curr_jiffies; 270 struct timespec curr_tstamp; 271 struct timespec audio_tstamp; 272 int crossed_boundary = 0; 273 274 old_hw_ptr = runtime->status->hw_ptr; 275 276 /* 277 * group pointer, time and jiffies reads to allow for more 278 * accurate correlations/corrections. 279 * The values are stored at the end of this routine after 280 * corrections for hw_ptr position 281 */ 282 pos = substream->ops->pointer(substream); 283 curr_jiffies = jiffies; 284 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 285 if ((substream->ops->get_time_info) && 286 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 287 substream->ops->get_time_info(substream, &curr_tstamp, 288 &audio_tstamp, 289 &runtime->audio_tstamp_config, 290 &runtime->audio_tstamp_report); 291 292 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ 293 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) 294 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 295 } else 296 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); 297 } 298 299 if (pos == SNDRV_PCM_POS_XRUN) { 300 xrun(substream); 301 return -EPIPE; 302 } 303 if (pos >= runtime->buffer_size) { 304 if (printk_ratelimit()) { 305 char name[16]; 306 snd_pcm_debug_name(substream, name, sizeof(name)); 307 pcm_err(substream->pcm, 308 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", 309 name, pos, runtime->buffer_size, 310 runtime->period_size); 311 } 312 pos = 0; 313 } 314 pos -= pos % runtime->min_align; 315 trace_hwptr(substream, pos, in_interrupt); 316 hw_base = runtime->hw_ptr_base; 317 new_hw_ptr = hw_base + pos; 318 if (in_interrupt) { 319 /* we know that one period was processed */ 320 /* delta = "expected next hw_ptr" for in_interrupt != 0 */ 321 delta = runtime->hw_ptr_interrupt + runtime->period_size; 322 if (delta > new_hw_ptr) { 323 /* check for double acknowledged interrupts */ 324 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 325 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 326 hw_base += runtime->buffer_size; 327 if (hw_base >= runtime->boundary) { 328 hw_base = 0; 329 crossed_boundary++; 330 } 331 new_hw_ptr = hw_base + pos; 332 goto __delta; 333 } 334 } 335 } 336 /* new_hw_ptr might be lower than old_hw_ptr in case when */ 337 /* pointer crosses the end of the ring buffer */ 338 if (new_hw_ptr < old_hw_ptr) { 339 hw_base += runtime->buffer_size; 340 if (hw_base >= runtime->boundary) { 341 hw_base = 0; 342 crossed_boundary++; 343 } 344 new_hw_ptr = hw_base + pos; 345 } 346 __delta: 347 delta = new_hw_ptr - old_hw_ptr; 348 if (delta < 0) 349 delta += runtime->boundary; 350 351 if (runtime->no_period_wakeup) { 352 snd_pcm_sframes_t xrun_threshold; 353 /* 354 * Without regular period interrupts, we have to check 355 * the elapsed time to detect xruns. 356 */ 357 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 358 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 359 goto no_delta_check; 360 hdelta = jdelta - delta * HZ / runtime->rate; 361 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; 362 while (hdelta > xrun_threshold) { 363 delta += runtime->buffer_size; 364 hw_base += runtime->buffer_size; 365 if (hw_base >= runtime->boundary) { 366 hw_base = 0; 367 crossed_boundary++; 368 } 369 new_hw_ptr = hw_base + pos; 370 hdelta -= runtime->hw_ptr_buffer_jiffies; 371 } 372 goto no_delta_check; 373 } 374 375 /* something must be really wrong */ 376 if (delta >= runtime->buffer_size + runtime->period_size) { 377 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", 378 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 379 substream->stream, (long)pos, 380 (long)new_hw_ptr, (long)old_hw_ptr); 381 return 0; 382 } 383 384 /* Do jiffies check only in xrun_debug mode */ 385 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) 386 goto no_jiffies_check; 387 388 /* Skip the jiffies check for hardwares with BATCH flag. 389 * Such hardware usually just increases the position at each IRQ, 390 * thus it can't give any strange position. 391 */ 392 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 393 goto no_jiffies_check; 394 hdelta = delta; 395 if (hdelta < runtime->delay) 396 goto no_jiffies_check; 397 hdelta -= runtime->delay; 398 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 399 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 400 delta = jdelta / 401 (((runtime->period_size * HZ) / runtime->rate) 402 + HZ/100); 403 /* move new_hw_ptr according jiffies not pos variable */ 404 new_hw_ptr = old_hw_ptr; 405 hw_base = delta; 406 /* use loop to avoid checks for delta overflows */ 407 /* the delta value is small or zero in most cases */ 408 while (delta > 0) { 409 new_hw_ptr += runtime->period_size; 410 if (new_hw_ptr >= runtime->boundary) { 411 new_hw_ptr -= runtime->boundary; 412 crossed_boundary--; 413 } 414 delta--; 415 } 416 /* align hw_base to buffer_size */ 417 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", 418 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", 419 (long)pos, (long)hdelta, 420 (long)runtime->period_size, jdelta, 421 ((hdelta * HZ) / runtime->rate), hw_base, 422 (unsigned long)old_hw_ptr, 423 (unsigned long)new_hw_ptr); 424 /* reset values to proper state */ 425 delta = 0; 426 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); 427 } 428 no_jiffies_check: 429 if (delta > runtime->period_size + runtime->period_size / 2) { 430 hw_ptr_error(substream, in_interrupt, 431 "Lost interrupts?", 432 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 433 substream->stream, (long)delta, 434 (long)new_hw_ptr, 435 (long)old_hw_ptr); 436 } 437 438 no_delta_check: 439 if (runtime->status->hw_ptr == new_hw_ptr) { 440 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 441 return 0; 442 } 443 444 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 445 runtime->silence_size > 0) 446 snd_pcm_playback_silence(substream, new_hw_ptr); 447 448 if (in_interrupt) { 449 delta = new_hw_ptr - runtime->hw_ptr_interrupt; 450 if (delta < 0) 451 delta += runtime->boundary; 452 delta -= (snd_pcm_uframes_t)delta % runtime->period_size; 453 runtime->hw_ptr_interrupt += delta; 454 if (runtime->hw_ptr_interrupt >= runtime->boundary) 455 runtime->hw_ptr_interrupt -= runtime->boundary; 456 } 457 runtime->hw_ptr_base = hw_base; 458 runtime->status->hw_ptr = new_hw_ptr; 459 runtime->hw_ptr_jiffies = curr_jiffies; 460 if (crossed_boundary) { 461 snd_BUG_ON(crossed_boundary != 1); 462 runtime->hw_ptr_wrap += runtime->boundary; 463 } 464 465 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 466 467 return snd_pcm_update_state(substream, runtime); 468 } 469 470 /* CAUTION: call it with irq disabled */ 471 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) 472 { 473 return snd_pcm_update_hw_ptr0(substream, 0); 474 } 475 476 /** 477 * snd_pcm_set_ops - set the PCM operators 478 * @pcm: the pcm instance 479 * @direction: stream direction, SNDRV_PCM_STREAM_XXX 480 * @ops: the operator table 481 * 482 * Sets the given PCM operators to the pcm instance. 483 */ 484 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, 485 const struct snd_pcm_ops *ops) 486 { 487 struct snd_pcm_str *stream = &pcm->streams[direction]; 488 struct snd_pcm_substream *substream; 489 490 for (substream = stream->substream; substream != NULL; substream = substream->next) 491 substream->ops = ops; 492 } 493 EXPORT_SYMBOL(snd_pcm_set_ops); 494 495 /** 496 * snd_pcm_sync - set the PCM sync id 497 * @substream: the pcm substream 498 * 499 * Sets the PCM sync identifier for the card. 500 */ 501 void snd_pcm_set_sync(struct snd_pcm_substream *substream) 502 { 503 struct snd_pcm_runtime *runtime = substream->runtime; 504 505 runtime->sync.id32[0] = substream->pcm->card->number; 506 runtime->sync.id32[1] = -1; 507 runtime->sync.id32[2] = -1; 508 runtime->sync.id32[3] = -1; 509 } 510 EXPORT_SYMBOL(snd_pcm_set_sync); 511 512 /* 513 * Standard ioctl routine 514 */ 515 516 static inline unsigned int div32(unsigned int a, unsigned int b, 517 unsigned int *r) 518 { 519 if (b == 0) { 520 *r = 0; 521 return UINT_MAX; 522 } 523 *r = a % b; 524 return a / b; 525 } 526 527 static inline unsigned int div_down(unsigned int a, unsigned int b) 528 { 529 if (b == 0) 530 return UINT_MAX; 531 return a / b; 532 } 533 534 static inline unsigned int div_up(unsigned int a, unsigned int b) 535 { 536 unsigned int r; 537 unsigned int q; 538 if (b == 0) 539 return UINT_MAX; 540 q = div32(a, b, &r); 541 if (r) 542 ++q; 543 return q; 544 } 545 546 static inline unsigned int mul(unsigned int a, unsigned int b) 547 { 548 if (a == 0) 549 return 0; 550 if (div_down(UINT_MAX, a) < b) 551 return UINT_MAX; 552 return a * b; 553 } 554 555 static inline unsigned int muldiv32(unsigned int a, unsigned int b, 556 unsigned int c, unsigned int *r) 557 { 558 u_int64_t n = (u_int64_t) a * b; 559 if (c == 0) { 560 *r = 0; 561 return UINT_MAX; 562 } 563 n = div_u64_rem(n, c, r); 564 if (n >= UINT_MAX) { 565 *r = 0; 566 return UINT_MAX; 567 } 568 return n; 569 } 570 571 /** 572 * snd_interval_refine - refine the interval value of configurator 573 * @i: the interval value to refine 574 * @v: the interval value to refer to 575 * 576 * Refines the interval value with the reference value. 577 * The interval is changed to the range satisfying both intervals. 578 * The interval status (min, max, integer, etc.) are evaluated. 579 * 580 * Return: Positive if the value is changed, zero if it's not changed, or a 581 * negative error code. 582 */ 583 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) 584 { 585 int changed = 0; 586 if (snd_BUG_ON(snd_interval_empty(i))) 587 return -EINVAL; 588 if (i->min < v->min) { 589 i->min = v->min; 590 i->openmin = v->openmin; 591 changed = 1; 592 } else if (i->min == v->min && !i->openmin && v->openmin) { 593 i->openmin = 1; 594 changed = 1; 595 } 596 if (i->max > v->max) { 597 i->max = v->max; 598 i->openmax = v->openmax; 599 changed = 1; 600 } else if (i->max == v->max && !i->openmax && v->openmax) { 601 i->openmax = 1; 602 changed = 1; 603 } 604 if (!i->integer && v->integer) { 605 i->integer = 1; 606 changed = 1; 607 } 608 if (i->integer) { 609 if (i->openmin) { 610 i->min++; 611 i->openmin = 0; 612 } 613 if (i->openmax) { 614 i->max--; 615 i->openmax = 0; 616 } 617 } else if (!i->openmin && !i->openmax && i->min == i->max) 618 i->integer = 1; 619 if (snd_interval_checkempty(i)) { 620 snd_interval_none(i); 621 return -EINVAL; 622 } 623 return changed; 624 } 625 EXPORT_SYMBOL(snd_interval_refine); 626 627 static int snd_interval_refine_first(struct snd_interval *i) 628 { 629 if (snd_BUG_ON(snd_interval_empty(i))) 630 return -EINVAL; 631 if (snd_interval_single(i)) 632 return 0; 633 i->max = i->min; 634 i->openmax = i->openmin; 635 if (i->openmax) 636 i->max++; 637 return 1; 638 } 639 640 static int snd_interval_refine_last(struct snd_interval *i) 641 { 642 if (snd_BUG_ON(snd_interval_empty(i))) 643 return -EINVAL; 644 if (snd_interval_single(i)) 645 return 0; 646 i->min = i->max; 647 i->openmin = i->openmax; 648 if (i->openmin) 649 i->min--; 650 return 1; 651 } 652 653 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 654 { 655 if (a->empty || b->empty) { 656 snd_interval_none(c); 657 return; 658 } 659 c->empty = 0; 660 c->min = mul(a->min, b->min); 661 c->openmin = (a->openmin || b->openmin); 662 c->max = mul(a->max, b->max); 663 c->openmax = (a->openmax || b->openmax); 664 c->integer = (a->integer && b->integer); 665 } 666 667 /** 668 * snd_interval_div - refine the interval value with division 669 * @a: dividend 670 * @b: divisor 671 * @c: quotient 672 * 673 * c = a / b 674 * 675 * Returns non-zero if the value is changed, zero if not changed. 676 */ 677 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 678 { 679 unsigned int r; 680 if (a->empty || b->empty) { 681 snd_interval_none(c); 682 return; 683 } 684 c->empty = 0; 685 c->min = div32(a->min, b->max, &r); 686 c->openmin = (r || a->openmin || b->openmax); 687 if (b->min > 0) { 688 c->max = div32(a->max, b->min, &r); 689 if (r) { 690 c->max++; 691 c->openmax = 1; 692 } else 693 c->openmax = (a->openmax || b->openmin); 694 } else { 695 c->max = UINT_MAX; 696 c->openmax = 0; 697 } 698 c->integer = 0; 699 } 700 701 /** 702 * snd_interval_muldivk - refine the interval value 703 * @a: dividend 1 704 * @b: dividend 2 705 * @k: divisor (as integer) 706 * @c: result 707 * 708 * c = a * b / k 709 * 710 * Returns non-zero if the value is changed, zero if not changed. 711 */ 712 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, 713 unsigned int k, struct snd_interval *c) 714 { 715 unsigned int r; 716 if (a->empty || b->empty) { 717 snd_interval_none(c); 718 return; 719 } 720 c->empty = 0; 721 c->min = muldiv32(a->min, b->min, k, &r); 722 c->openmin = (r || a->openmin || b->openmin); 723 c->max = muldiv32(a->max, b->max, k, &r); 724 if (r) { 725 c->max++; 726 c->openmax = 1; 727 } else 728 c->openmax = (a->openmax || b->openmax); 729 c->integer = 0; 730 } 731 732 /** 733 * snd_interval_mulkdiv - refine the interval value 734 * @a: dividend 1 735 * @k: dividend 2 (as integer) 736 * @b: divisor 737 * @c: result 738 * 739 * c = a * k / b 740 * 741 * Returns non-zero if the value is changed, zero if not changed. 742 */ 743 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, 744 const struct snd_interval *b, struct snd_interval *c) 745 { 746 unsigned int r; 747 if (a->empty || b->empty) { 748 snd_interval_none(c); 749 return; 750 } 751 c->empty = 0; 752 c->min = muldiv32(a->min, k, b->max, &r); 753 c->openmin = (r || a->openmin || b->openmax); 754 if (b->min > 0) { 755 c->max = muldiv32(a->max, k, b->min, &r); 756 if (r) { 757 c->max++; 758 c->openmax = 1; 759 } else 760 c->openmax = (a->openmax || b->openmin); 761 } else { 762 c->max = UINT_MAX; 763 c->openmax = 0; 764 } 765 c->integer = 0; 766 } 767 768 /* ---- */ 769 770 771 /** 772 * snd_interval_ratnum - refine the interval value 773 * @i: interval to refine 774 * @rats_count: number of ratnum_t 775 * @rats: ratnum_t array 776 * @nump: pointer to store the resultant numerator 777 * @denp: pointer to store the resultant denominator 778 * 779 * Return: Positive if the value is changed, zero if it's not changed, or a 780 * negative error code. 781 */ 782 int snd_interval_ratnum(struct snd_interval *i, 783 unsigned int rats_count, const struct snd_ratnum *rats, 784 unsigned int *nump, unsigned int *denp) 785 { 786 unsigned int best_num, best_den; 787 int best_diff; 788 unsigned int k; 789 struct snd_interval t; 790 int err; 791 unsigned int result_num, result_den; 792 int result_diff; 793 794 best_num = best_den = best_diff = 0; 795 for (k = 0; k < rats_count; ++k) { 796 unsigned int num = rats[k].num; 797 unsigned int den; 798 unsigned int q = i->min; 799 int diff; 800 if (q == 0) 801 q = 1; 802 den = div_up(num, q); 803 if (den < rats[k].den_min) 804 continue; 805 if (den > rats[k].den_max) 806 den = rats[k].den_max; 807 else { 808 unsigned int r; 809 r = (den - rats[k].den_min) % rats[k].den_step; 810 if (r != 0) 811 den -= r; 812 } 813 diff = num - q * den; 814 if (diff < 0) 815 diff = -diff; 816 if (best_num == 0 || 817 diff * best_den < best_diff * den) { 818 best_diff = diff; 819 best_den = den; 820 best_num = num; 821 } 822 } 823 if (best_den == 0) { 824 i->empty = 1; 825 return -EINVAL; 826 } 827 t.min = div_down(best_num, best_den); 828 t.openmin = !!(best_num % best_den); 829 830 result_num = best_num; 831 result_diff = best_diff; 832 result_den = best_den; 833 best_num = best_den = best_diff = 0; 834 for (k = 0; k < rats_count; ++k) { 835 unsigned int num = rats[k].num; 836 unsigned int den; 837 unsigned int q = i->max; 838 int diff; 839 if (q == 0) { 840 i->empty = 1; 841 return -EINVAL; 842 } 843 den = div_down(num, q); 844 if (den > rats[k].den_max) 845 continue; 846 if (den < rats[k].den_min) 847 den = rats[k].den_min; 848 else { 849 unsigned int r; 850 r = (den - rats[k].den_min) % rats[k].den_step; 851 if (r != 0) 852 den += rats[k].den_step - r; 853 } 854 diff = q * den - num; 855 if (diff < 0) 856 diff = -diff; 857 if (best_num == 0 || 858 diff * best_den < best_diff * den) { 859 best_diff = diff; 860 best_den = den; 861 best_num = num; 862 } 863 } 864 if (best_den == 0) { 865 i->empty = 1; 866 return -EINVAL; 867 } 868 t.max = div_up(best_num, best_den); 869 t.openmax = !!(best_num % best_den); 870 t.integer = 0; 871 err = snd_interval_refine(i, &t); 872 if (err < 0) 873 return err; 874 875 if (snd_interval_single(i)) { 876 if (best_diff * result_den < result_diff * best_den) { 877 result_num = best_num; 878 result_den = best_den; 879 } 880 if (nump) 881 *nump = result_num; 882 if (denp) 883 *denp = result_den; 884 } 885 return err; 886 } 887 EXPORT_SYMBOL(snd_interval_ratnum); 888 889 /** 890 * snd_interval_ratden - refine the interval value 891 * @i: interval to refine 892 * @rats_count: number of struct ratden 893 * @rats: struct ratden array 894 * @nump: pointer to store the resultant numerator 895 * @denp: pointer to store the resultant denominator 896 * 897 * Return: Positive if the value is changed, zero if it's not changed, or a 898 * negative error code. 899 */ 900 static int snd_interval_ratden(struct snd_interval *i, 901 unsigned int rats_count, 902 const struct snd_ratden *rats, 903 unsigned int *nump, unsigned int *denp) 904 { 905 unsigned int best_num, best_diff, best_den; 906 unsigned int k; 907 struct snd_interval t; 908 int err; 909 910 best_num = best_den = best_diff = 0; 911 for (k = 0; k < rats_count; ++k) { 912 unsigned int num; 913 unsigned int den = rats[k].den; 914 unsigned int q = i->min; 915 int diff; 916 num = mul(q, den); 917 if (num > rats[k].num_max) 918 continue; 919 if (num < rats[k].num_min) 920 num = rats[k].num_max; 921 else { 922 unsigned int r; 923 r = (num - rats[k].num_min) % rats[k].num_step; 924 if (r != 0) 925 num += rats[k].num_step - r; 926 } 927 diff = num - q * den; 928 if (best_num == 0 || 929 diff * best_den < best_diff * den) { 930 best_diff = diff; 931 best_den = den; 932 best_num = num; 933 } 934 } 935 if (best_den == 0) { 936 i->empty = 1; 937 return -EINVAL; 938 } 939 t.min = div_down(best_num, best_den); 940 t.openmin = !!(best_num % best_den); 941 942 best_num = best_den = best_diff = 0; 943 for (k = 0; k < rats_count; ++k) { 944 unsigned int num; 945 unsigned int den = rats[k].den; 946 unsigned int q = i->max; 947 int diff; 948 num = mul(q, den); 949 if (num < rats[k].num_min) 950 continue; 951 if (num > rats[k].num_max) 952 num = rats[k].num_max; 953 else { 954 unsigned int r; 955 r = (num - rats[k].num_min) % rats[k].num_step; 956 if (r != 0) 957 num -= r; 958 } 959 diff = q * den - num; 960 if (best_num == 0 || 961 diff * best_den < best_diff * den) { 962 best_diff = diff; 963 best_den = den; 964 best_num = num; 965 } 966 } 967 if (best_den == 0) { 968 i->empty = 1; 969 return -EINVAL; 970 } 971 t.max = div_up(best_num, best_den); 972 t.openmax = !!(best_num % best_den); 973 t.integer = 0; 974 err = snd_interval_refine(i, &t); 975 if (err < 0) 976 return err; 977 978 if (snd_interval_single(i)) { 979 if (nump) 980 *nump = best_num; 981 if (denp) 982 *denp = best_den; 983 } 984 return err; 985 } 986 987 /** 988 * snd_interval_list - refine the interval value from the list 989 * @i: the interval value to refine 990 * @count: the number of elements in the list 991 * @list: the value list 992 * @mask: the bit-mask to evaluate 993 * 994 * Refines the interval value from the list. 995 * When mask is non-zero, only the elements corresponding to bit 1 are 996 * evaluated. 997 * 998 * Return: Positive if the value is changed, zero if it's not changed, or a 999 * negative error code. 1000 */ 1001 int snd_interval_list(struct snd_interval *i, unsigned int count, 1002 const unsigned int *list, unsigned int mask) 1003 { 1004 unsigned int k; 1005 struct snd_interval list_range; 1006 1007 if (!count) { 1008 i->empty = 1; 1009 return -EINVAL; 1010 } 1011 snd_interval_any(&list_range); 1012 list_range.min = UINT_MAX; 1013 list_range.max = 0; 1014 for (k = 0; k < count; k++) { 1015 if (mask && !(mask & (1 << k))) 1016 continue; 1017 if (!snd_interval_test(i, list[k])) 1018 continue; 1019 list_range.min = min(list_range.min, list[k]); 1020 list_range.max = max(list_range.max, list[k]); 1021 } 1022 return snd_interval_refine(i, &list_range); 1023 } 1024 EXPORT_SYMBOL(snd_interval_list); 1025 1026 /** 1027 * snd_interval_ranges - refine the interval value from the list of ranges 1028 * @i: the interval value to refine 1029 * @count: the number of elements in the list of ranges 1030 * @ranges: the ranges list 1031 * @mask: the bit-mask to evaluate 1032 * 1033 * Refines the interval value from the list of ranges. 1034 * When mask is non-zero, only the elements corresponding to bit 1 are 1035 * evaluated. 1036 * 1037 * Return: Positive if the value is changed, zero if it's not changed, or a 1038 * negative error code. 1039 */ 1040 int snd_interval_ranges(struct snd_interval *i, unsigned int count, 1041 const struct snd_interval *ranges, unsigned int mask) 1042 { 1043 unsigned int k; 1044 struct snd_interval range_union; 1045 struct snd_interval range; 1046 1047 if (!count) { 1048 snd_interval_none(i); 1049 return -EINVAL; 1050 } 1051 snd_interval_any(&range_union); 1052 range_union.min = UINT_MAX; 1053 range_union.max = 0; 1054 for (k = 0; k < count; k++) { 1055 if (mask && !(mask & (1 << k))) 1056 continue; 1057 snd_interval_copy(&range, &ranges[k]); 1058 if (snd_interval_refine(&range, i) < 0) 1059 continue; 1060 if (snd_interval_empty(&range)) 1061 continue; 1062 1063 if (range.min < range_union.min) { 1064 range_union.min = range.min; 1065 range_union.openmin = 1; 1066 } 1067 if (range.min == range_union.min && !range.openmin) 1068 range_union.openmin = 0; 1069 if (range.max > range_union.max) { 1070 range_union.max = range.max; 1071 range_union.openmax = 1; 1072 } 1073 if (range.max == range_union.max && !range.openmax) 1074 range_union.openmax = 0; 1075 } 1076 return snd_interval_refine(i, &range_union); 1077 } 1078 EXPORT_SYMBOL(snd_interval_ranges); 1079 1080 static int snd_interval_step(struct snd_interval *i, unsigned int step) 1081 { 1082 unsigned int n; 1083 int changed = 0; 1084 n = i->min % step; 1085 if (n != 0 || i->openmin) { 1086 i->min += step - n; 1087 i->openmin = 0; 1088 changed = 1; 1089 } 1090 n = i->max % step; 1091 if (n != 0 || i->openmax) { 1092 i->max -= n; 1093 i->openmax = 0; 1094 changed = 1; 1095 } 1096 if (snd_interval_checkempty(i)) { 1097 i->empty = 1; 1098 return -EINVAL; 1099 } 1100 return changed; 1101 } 1102 1103 /* Info constraints helpers */ 1104 1105 /** 1106 * snd_pcm_hw_rule_add - add the hw-constraint rule 1107 * @runtime: the pcm runtime instance 1108 * @cond: condition bits 1109 * @var: the variable to evaluate 1110 * @func: the evaluation function 1111 * @private: the private data pointer passed to function 1112 * @dep: the dependent variables 1113 * 1114 * Return: Zero if successful, or a negative error code on failure. 1115 */ 1116 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, 1117 int var, 1118 snd_pcm_hw_rule_func_t func, void *private, 1119 int dep, ...) 1120 { 1121 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1122 struct snd_pcm_hw_rule *c; 1123 unsigned int k; 1124 va_list args; 1125 va_start(args, dep); 1126 if (constrs->rules_num >= constrs->rules_all) { 1127 struct snd_pcm_hw_rule *new; 1128 unsigned int new_rules = constrs->rules_all + 16; 1129 new = krealloc(constrs->rules, new_rules * sizeof(*c), 1130 GFP_KERNEL); 1131 if (!new) { 1132 va_end(args); 1133 return -ENOMEM; 1134 } 1135 constrs->rules = new; 1136 constrs->rules_all = new_rules; 1137 } 1138 c = &constrs->rules[constrs->rules_num]; 1139 c->cond = cond; 1140 c->func = func; 1141 c->var = var; 1142 c->private = private; 1143 k = 0; 1144 while (1) { 1145 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1146 va_end(args); 1147 return -EINVAL; 1148 } 1149 c->deps[k++] = dep; 1150 if (dep < 0) 1151 break; 1152 dep = va_arg(args, int); 1153 } 1154 constrs->rules_num++; 1155 va_end(args); 1156 return 0; 1157 } 1158 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1159 1160 /** 1161 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint 1162 * @runtime: PCM runtime instance 1163 * @var: hw_params variable to apply the mask 1164 * @mask: the bitmap mask 1165 * 1166 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. 1167 * 1168 * Return: Zero if successful, or a negative error code on failure. 1169 */ 1170 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1171 u_int32_t mask) 1172 { 1173 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1174 struct snd_mask *maskp = constrs_mask(constrs, var); 1175 *maskp->bits &= mask; 1176 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ 1177 if (*maskp->bits == 0) 1178 return -EINVAL; 1179 return 0; 1180 } 1181 1182 /** 1183 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint 1184 * @runtime: PCM runtime instance 1185 * @var: hw_params variable to apply the mask 1186 * @mask: the 64bit bitmap mask 1187 * 1188 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. 1189 * 1190 * Return: Zero if successful, or a negative error code on failure. 1191 */ 1192 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1193 u_int64_t mask) 1194 { 1195 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1196 struct snd_mask *maskp = constrs_mask(constrs, var); 1197 maskp->bits[0] &= (u_int32_t)mask; 1198 maskp->bits[1] &= (u_int32_t)(mask >> 32); 1199 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 1200 if (! maskp->bits[0] && ! maskp->bits[1]) 1201 return -EINVAL; 1202 return 0; 1203 } 1204 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); 1205 1206 /** 1207 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval 1208 * @runtime: PCM runtime instance 1209 * @var: hw_params variable to apply the integer constraint 1210 * 1211 * Apply the constraint of integer to an interval parameter. 1212 * 1213 * Return: Positive if the value is changed, zero if it's not changed, or a 1214 * negative error code. 1215 */ 1216 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) 1217 { 1218 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1219 return snd_interval_setinteger(constrs_interval(constrs, var)); 1220 } 1221 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); 1222 1223 /** 1224 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval 1225 * @runtime: PCM runtime instance 1226 * @var: hw_params variable to apply the range 1227 * @min: the minimal value 1228 * @max: the maximal value 1229 * 1230 * Apply the min/max range constraint to an interval parameter. 1231 * 1232 * Return: Positive if the value is changed, zero if it's not changed, or a 1233 * negative error code. 1234 */ 1235 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1236 unsigned int min, unsigned int max) 1237 { 1238 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1239 struct snd_interval t; 1240 t.min = min; 1241 t.max = max; 1242 t.openmin = t.openmax = 0; 1243 t.integer = 0; 1244 return snd_interval_refine(constrs_interval(constrs, var), &t); 1245 } 1246 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); 1247 1248 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, 1249 struct snd_pcm_hw_rule *rule) 1250 { 1251 struct snd_pcm_hw_constraint_list *list = rule->private; 1252 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); 1253 } 1254 1255 1256 /** 1257 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter 1258 * @runtime: PCM runtime instance 1259 * @cond: condition bits 1260 * @var: hw_params variable to apply the list constraint 1261 * @l: list 1262 * 1263 * Apply the list of constraints to an interval parameter. 1264 * 1265 * Return: Zero if successful, or a negative error code on failure. 1266 */ 1267 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, 1268 unsigned int cond, 1269 snd_pcm_hw_param_t var, 1270 const struct snd_pcm_hw_constraint_list *l) 1271 { 1272 return snd_pcm_hw_rule_add(runtime, cond, var, 1273 snd_pcm_hw_rule_list, (void *)l, 1274 var, -1); 1275 } 1276 EXPORT_SYMBOL(snd_pcm_hw_constraint_list); 1277 1278 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, 1279 struct snd_pcm_hw_rule *rule) 1280 { 1281 struct snd_pcm_hw_constraint_ranges *r = rule->private; 1282 return snd_interval_ranges(hw_param_interval(params, rule->var), 1283 r->count, r->ranges, r->mask); 1284 } 1285 1286 1287 /** 1288 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter 1289 * @runtime: PCM runtime instance 1290 * @cond: condition bits 1291 * @var: hw_params variable to apply the list of range constraints 1292 * @r: ranges 1293 * 1294 * Apply the list of range constraints to an interval parameter. 1295 * 1296 * Return: Zero if successful, or a negative error code on failure. 1297 */ 1298 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, 1299 unsigned int cond, 1300 snd_pcm_hw_param_t var, 1301 const struct snd_pcm_hw_constraint_ranges *r) 1302 { 1303 return snd_pcm_hw_rule_add(runtime, cond, var, 1304 snd_pcm_hw_rule_ranges, (void *)r, 1305 var, -1); 1306 } 1307 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); 1308 1309 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, 1310 struct snd_pcm_hw_rule *rule) 1311 { 1312 const struct snd_pcm_hw_constraint_ratnums *r = rule->private; 1313 unsigned int num = 0, den = 0; 1314 int err; 1315 err = snd_interval_ratnum(hw_param_interval(params, rule->var), 1316 r->nrats, r->rats, &num, &den); 1317 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1318 params->rate_num = num; 1319 params->rate_den = den; 1320 } 1321 return err; 1322 } 1323 1324 /** 1325 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter 1326 * @runtime: PCM runtime instance 1327 * @cond: condition bits 1328 * @var: hw_params variable to apply the ratnums constraint 1329 * @r: struct snd_ratnums constriants 1330 * 1331 * Return: Zero if successful, or a negative error code on failure. 1332 */ 1333 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 1334 unsigned int cond, 1335 snd_pcm_hw_param_t var, 1336 const struct snd_pcm_hw_constraint_ratnums *r) 1337 { 1338 return snd_pcm_hw_rule_add(runtime, cond, var, 1339 snd_pcm_hw_rule_ratnums, (void *)r, 1340 var, -1); 1341 } 1342 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); 1343 1344 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, 1345 struct snd_pcm_hw_rule *rule) 1346 { 1347 const struct snd_pcm_hw_constraint_ratdens *r = rule->private; 1348 unsigned int num = 0, den = 0; 1349 int err = snd_interval_ratden(hw_param_interval(params, rule->var), 1350 r->nrats, r->rats, &num, &den); 1351 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1352 params->rate_num = num; 1353 params->rate_den = den; 1354 } 1355 return err; 1356 } 1357 1358 /** 1359 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter 1360 * @runtime: PCM runtime instance 1361 * @cond: condition bits 1362 * @var: hw_params variable to apply the ratdens constraint 1363 * @r: struct snd_ratdens constriants 1364 * 1365 * Return: Zero if successful, or a negative error code on failure. 1366 */ 1367 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 1368 unsigned int cond, 1369 snd_pcm_hw_param_t var, 1370 const struct snd_pcm_hw_constraint_ratdens *r) 1371 { 1372 return snd_pcm_hw_rule_add(runtime, cond, var, 1373 snd_pcm_hw_rule_ratdens, (void *)r, 1374 var, -1); 1375 } 1376 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); 1377 1378 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, 1379 struct snd_pcm_hw_rule *rule) 1380 { 1381 unsigned int l = (unsigned long) rule->private; 1382 int width = l & 0xffff; 1383 unsigned int msbits = l >> 16; 1384 const struct snd_interval *i = 1385 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 1386 1387 if (!snd_interval_single(i)) 1388 return 0; 1389 1390 if ((snd_interval_value(i) == width) || 1391 (width == 0 && snd_interval_value(i) > msbits)) 1392 params->msbits = min_not_zero(params->msbits, msbits); 1393 1394 return 0; 1395 } 1396 1397 /** 1398 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule 1399 * @runtime: PCM runtime instance 1400 * @cond: condition bits 1401 * @width: sample bits width 1402 * @msbits: msbits width 1403 * 1404 * This constraint will set the number of most significant bits (msbits) if a 1405 * sample format with the specified width has been select. If width is set to 0 1406 * the msbits will be set for any sample format with a width larger than the 1407 * specified msbits. 1408 * 1409 * Return: Zero if successful, or a negative error code on failure. 1410 */ 1411 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 1412 unsigned int cond, 1413 unsigned int width, 1414 unsigned int msbits) 1415 { 1416 unsigned long l = (msbits << 16) | width; 1417 return snd_pcm_hw_rule_add(runtime, cond, -1, 1418 snd_pcm_hw_rule_msbits, 1419 (void*) l, 1420 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 1421 } 1422 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); 1423 1424 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, 1425 struct snd_pcm_hw_rule *rule) 1426 { 1427 unsigned long step = (unsigned long) rule->private; 1428 return snd_interval_step(hw_param_interval(params, rule->var), step); 1429 } 1430 1431 /** 1432 * snd_pcm_hw_constraint_step - add a hw constraint step rule 1433 * @runtime: PCM runtime instance 1434 * @cond: condition bits 1435 * @var: hw_params variable to apply the step constraint 1436 * @step: step size 1437 * 1438 * Return: Zero if successful, or a negative error code on failure. 1439 */ 1440 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, 1441 unsigned int cond, 1442 snd_pcm_hw_param_t var, 1443 unsigned long step) 1444 { 1445 return snd_pcm_hw_rule_add(runtime, cond, var, 1446 snd_pcm_hw_rule_step, (void *) step, 1447 var, -1); 1448 } 1449 EXPORT_SYMBOL(snd_pcm_hw_constraint_step); 1450 1451 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 1452 { 1453 static unsigned int pow2_sizes[] = { 1454 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1455 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1456 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1457 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 1458 }; 1459 return snd_interval_list(hw_param_interval(params, rule->var), 1460 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); 1461 } 1462 1463 /** 1464 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule 1465 * @runtime: PCM runtime instance 1466 * @cond: condition bits 1467 * @var: hw_params variable to apply the power-of-2 constraint 1468 * 1469 * Return: Zero if successful, or a negative error code on failure. 1470 */ 1471 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, 1472 unsigned int cond, 1473 snd_pcm_hw_param_t var) 1474 { 1475 return snd_pcm_hw_rule_add(runtime, cond, var, 1476 snd_pcm_hw_rule_pow2, NULL, 1477 var, -1); 1478 } 1479 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); 1480 1481 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, 1482 struct snd_pcm_hw_rule *rule) 1483 { 1484 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; 1485 struct snd_interval *rate; 1486 1487 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1488 return snd_interval_list(rate, 1, &base_rate, 0); 1489 } 1490 1491 /** 1492 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling 1493 * @runtime: PCM runtime instance 1494 * @base_rate: the rate at which the hardware does not resample 1495 * 1496 * Return: Zero if successful, or a negative error code on failure. 1497 */ 1498 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, 1499 unsigned int base_rate) 1500 { 1501 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, 1502 SNDRV_PCM_HW_PARAM_RATE, 1503 snd_pcm_hw_rule_noresample_func, 1504 (void *)(uintptr_t)base_rate, 1505 SNDRV_PCM_HW_PARAM_RATE, -1); 1506 } 1507 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); 1508 1509 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, 1510 snd_pcm_hw_param_t var) 1511 { 1512 if (hw_is_mask(var)) { 1513 snd_mask_any(hw_param_mask(params, var)); 1514 params->cmask |= 1 << var; 1515 params->rmask |= 1 << var; 1516 return; 1517 } 1518 if (hw_is_interval(var)) { 1519 snd_interval_any(hw_param_interval(params, var)); 1520 params->cmask |= 1 << var; 1521 params->rmask |= 1 << var; 1522 return; 1523 } 1524 snd_BUG(); 1525 } 1526 1527 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) 1528 { 1529 unsigned int k; 1530 memset(params, 0, sizeof(*params)); 1531 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) 1532 _snd_pcm_hw_param_any(params, k); 1533 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 1534 _snd_pcm_hw_param_any(params, k); 1535 params->info = ~0U; 1536 } 1537 EXPORT_SYMBOL(_snd_pcm_hw_params_any); 1538 1539 /** 1540 * snd_pcm_hw_param_value - return @params field @var value 1541 * @params: the hw_params instance 1542 * @var: parameter to retrieve 1543 * @dir: pointer to the direction (-1,0,1) or %NULL 1544 * 1545 * Return: The value for field @var if it's fixed in configuration space 1546 * defined by @params. -%EINVAL otherwise. 1547 */ 1548 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, 1549 snd_pcm_hw_param_t var, int *dir) 1550 { 1551 if (hw_is_mask(var)) { 1552 const struct snd_mask *mask = hw_param_mask_c(params, var); 1553 if (!snd_mask_single(mask)) 1554 return -EINVAL; 1555 if (dir) 1556 *dir = 0; 1557 return snd_mask_value(mask); 1558 } 1559 if (hw_is_interval(var)) { 1560 const struct snd_interval *i = hw_param_interval_c(params, var); 1561 if (!snd_interval_single(i)) 1562 return -EINVAL; 1563 if (dir) 1564 *dir = i->openmin; 1565 return snd_interval_value(i); 1566 } 1567 return -EINVAL; 1568 } 1569 EXPORT_SYMBOL(snd_pcm_hw_param_value); 1570 1571 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, 1572 snd_pcm_hw_param_t var) 1573 { 1574 if (hw_is_mask(var)) { 1575 snd_mask_none(hw_param_mask(params, var)); 1576 params->cmask |= 1 << var; 1577 params->rmask |= 1 << var; 1578 } else if (hw_is_interval(var)) { 1579 snd_interval_none(hw_param_interval(params, var)); 1580 params->cmask |= 1 << var; 1581 params->rmask |= 1 << var; 1582 } else { 1583 snd_BUG(); 1584 } 1585 } 1586 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); 1587 1588 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, 1589 snd_pcm_hw_param_t var) 1590 { 1591 int changed; 1592 if (hw_is_mask(var)) 1593 changed = snd_mask_refine_first(hw_param_mask(params, var)); 1594 else if (hw_is_interval(var)) 1595 changed = snd_interval_refine_first(hw_param_interval(params, var)); 1596 else 1597 return -EINVAL; 1598 if (changed > 0) { 1599 params->cmask |= 1 << var; 1600 params->rmask |= 1 << var; 1601 } 1602 return changed; 1603 } 1604 1605 1606 /** 1607 * snd_pcm_hw_param_first - refine config space and return minimum value 1608 * @pcm: PCM instance 1609 * @params: the hw_params instance 1610 * @var: parameter to retrieve 1611 * @dir: pointer to the direction (-1,0,1) or %NULL 1612 * 1613 * Inside configuration space defined by @params remove from @var all 1614 * values > minimum. Reduce configuration space accordingly. 1615 * 1616 * Return: The minimum, or a negative error code on failure. 1617 */ 1618 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 1619 struct snd_pcm_hw_params *params, 1620 snd_pcm_hw_param_t var, int *dir) 1621 { 1622 int changed = _snd_pcm_hw_param_first(params, var); 1623 if (changed < 0) 1624 return changed; 1625 if (params->rmask) { 1626 int err = snd_pcm_hw_refine(pcm, params); 1627 if (err < 0) 1628 return err; 1629 } 1630 return snd_pcm_hw_param_value(params, var, dir); 1631 } 1632 EXPORT_SYMBOL(snd_pcm_hw_param_first); 1633 1634 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, 1635 snd_pcm_hw_param_t var) 1636 { 1637 int changed; 1638 if (hw_is_mask(var)) 1639 changed = snd_mask_refine_last(hw_param_mask(params, var)); 1640 else if (hw_is_interval(var)) 1641 changed = snd_interval_refine_last(hw_param_interval(params, var)); 1642 else 1643 return -EINVAL; 1644 if (changed > 0) { 1645 params->cmask |= 1 << var; 1646 params->rmask |= 1 << var; 1647 } 1648 return changed; 1649 } 1650 1651 1652 /** 1653 * snd_pcm_hw_param_last - refine config space and return maximum value 1654 * @pcm: PCM instance 1655 * @params: the hw_params instance 1656 * @var: parameter to retrieve 1657 * @dir: pointer to the direction (-1,0,1) or %NULL 1658 * 1659 * Inside configuration space defined by @params remove from @var all 1660 * values < maximum. Reduce configuration space accordingly. 1661 * 1662 * Return: The maximum, or a negative error code on failure. 1663 */ 1664 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 1665 struct snd_pcm_hw_params *params, 1666 snd_pcm_hw_param_t var, int *dir) 1667 { 1668 int changed = _snd_pcm_hw_param_last(params, var); 1669 if (changed < 0) 1670 return changed; 1671 if (params->rmask) { 1672 int err = snd_pcm_hw_refine(pcm, params); 1673 if (err < 0) 1674 return err; 1675 } 1676 return snd_pcm_hw_param_value(params, var, dir); 1677 } 1678 EXPORT_SYMBOL(snd_pcm_hw_param_last); 1679 1680 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, 1681 void *arg) 1682 { 1683 struct snd_pcm_runtime *runtime = substream->runtime; 1684 unsigned long flags; 1685 snd_pcm_stream_lock_irqsave(substream, flags); 1686 if (snd_pcm_running(substream) && 1687 snd_pcm_update_hw_ptr(substream) >= 0) 1688 runtime->status->hw_ptr %= runtime->buffer_size; 1689 else { 1690 runtime->status->hw_ptr = 0; 1691 runtime->hw_ptr_wrap = 0; 1692 } 1693 snd_pcm_stream_unlock_irqrestore(substream, flags); 1694 return 0; 1695 } 1696 1697 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, 1698 void *arg) 1699 { 1700 struct snd_pcm_channel_info *info = arg; 1701 struct snd_pcm_runtime *runtime = substream->runtime; 1702 int width; 1703 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { 1704 info->offset = -1; 1705 return 0; 1706 } 1707 width = snd_pcm_format_physical_width(runtime->format); 1708 if (width < 0) 1709 return width; 1710 info->offset = 0; 1711 switch (runtime->access) { 1712 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: 1713 case SNDRV_PCM_ACCESS_RW_INTERLEAVED: 1714 info->first = info->channel * width; 1715 info->step = runtime->channels * width; 1716 break; 1717 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: 1718 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: 1719 { 1720 size_t size = runtime->dma_bytes / runtime->channels; 1721 info->first = info->channel * size * 8; 1722 info->step = width; 1723 break; 1724 } 1725 default: 1726 snd_BUG(); 1727 break; 1728 } 1729 return 0; 1730 } 1731 1732 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, 1733 void *arg) 1734 { 1735 struct snd_pcm_hw_params *params = arg; 1736 snd_pcm_format_t format; 1737 int channels; 1738 ssize_t frame_size; 1739 1740 params->fifo_size = substream->runtime->hw.fifo_size; 1741 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { 1742 format = params_format(params); 1743 channels = params_channels(params); 1744 frame_size = snd_pcm_format_size(format, channels); 1745 if (frame_size > 0) 1746 params->fifo_size /= (unsigned)frame_size; 1747 } 1748 return 0; 1749 } 1750 1751 /** 1752 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1753 * @substream: the pcm substream instance 1754 * @cmd: ioctl command 1755 * @arg: ioctl argument 1756 * 1757 * Processes the generic ioctl commands for PCM. 1758 * Can be passed as the ioctl callback for PCM ops. 1759 * 1760 * Return: Zero if successful, or a negative error code on failure. 1761 */ 1762 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, 1763 unsigned int cmd, void *arg) 1764 { 1765 switch (cmd) { 1766 case SNDRV_PCM_IOCTL1_RESET: 1767 return snd_pcm_lib_ioctl_reset(substream, arg); 1768 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1769 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1770 case SNDRV_PCM_IOCTL1_FIFO_SIZE: 1771 return snd_pcm_lib_ioctl_fifo_size(substream, arg); 1772 } 1773 return -ENXIO; 1774 } 1775 EXPORT_SYMBOL(snd_pcm_lib_ioctl); 1776 1777 /** 1778 * snd_pcm_period_elapsed - update the pcm status for the next period 1779 * @substream: the pcm substream instance 1780 * 1781 * This function is called from the interrupt handler when the 1782 * PCM has processed the period size. It will update the current 1783 * pointer, wake up sleepers, etc. 1784 * 1785 * Even if more than one periods have elapsed since the last call, you 1786 * have to call this only once. 1787 */ 1788 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) 1789 { 1790 struct snd_pcm_runtime *runtime; 1791 unsigned long flags; 1792 1793 if (PCM_RUNTIME_CHECK(substream)) 1794 return; 1795 runtime = substream->runtime; 1796 1797 snd_pcm_stream_lock_irqsave(substream, flags); 1798 if (!snd_pcm_running(substream) || 1799 snd_pcm_update_hw_ptr0(substream, 1) < 0) 1800 goto _end; 1801 1802 #ifdef CONFIG_SND_PCM_TIMER 1803 if (substream->timer_running) 1804 snd_timer_interrupt(substream->timer, 1); 1805 #endif 1806 _end: 1807 kill_fasync(&runtime->fasync, SIGIO, POLL_IN); 1808 snd_pcm_stream_unlock_irqrestore(substream, flags); 1809 } 1810 EXPORT_SYMBOL(snd_pcm_period_elapsed); 1811 1812 /* 1813 * Wait until avail_min data becomes available 1814 * Returns a negative error code if any error occurs during operation. 1815 * The available space is stored on availp. When err = 0 and avail = 0 1816 * on the capture stream, it indicates the stream is in DRAINING state. 1817 */ 1818 static int wait_for_avail(struct snd_pcm_substream *substream, 1819 snd_pcm_uframes_t *availp) 1820 { 1821 struct snd_pcm_runtime *runtime = substream->runtime; 1822 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1823 wait_queue_entry_t wait; 1824 int err = 0; 1825 snd_pcm_uframes_t avail = 0; 1826 long wait_time, tout; 1827 1828 init_waitqueue_entry(&wait, current); 1829 set_current_state(TASK_INTERRUPTIBLE); 1830 add_wait_queue(&runtime->tsleep, &wait); 1831 1832 if (runtime->no_period_wakeup) 1833 wait_time = MAX_SCHEDULE_TIMEOUT; 1834 else { 1835 wait_time = 10; 1836 if (runtime->rate) { 1837 long t = runtime->period_size * 2 / runtime->rate; 1838 wait_time = max(t, wait_time); 1839 } 1840 wait_time = msecs_to_jiffies(wait_time * 1000); 1841 } 1842 1843 for (;;) { 1844 if (signal_pending(current)) { 1845 err = -ERESTARTSYS; 1846 break; 1847 } 1848 1849 /* 1850 * We need to check if space became available already 1851 * (and thus the wakeup happened already) first to close 1852 * the race of space already having become available. 1853 * This check must happen after been added to the waitqueue 1854 * and having current state be INTERRUPTIBLE. 1855 */ 1856 avail = snd_pcm_avail(substream); 1857 if (avail >= runtime->twake) 1858 break; 1859 snd_pcm_stream_unlock_irq(substream); 1860 1861 tout = schedule_timeout(wait_time); 1862 1863 snd_pcm_stream_lock_irq(substream); 1864 set_current_state(TASK_INTERRUPTIBLE); 1865 switch (runtime->status->state) { 1866 case SNDRV_PCM_STATE_SUSPENDED: 1867 err = -ESTRPIPE; 1868 goto _endloop; 1869 case SNDRV_PCM_STATE_XRUN: 1870 err = -EPIPE; 1871 goto _endloop; 1872 case SNDRV_PCM_STATE_DRAINING: 1873 if (is_playback) 1874 err = -EPIPE; 1875 else 1876 avail = 0; /* indicate draining */ 1877 goto _endloop; 1878 case SNDRV_PCM_STATE_OPEN: 1879 case SNDRV_PCM_STATE_SETUP: 1880 case SNDRV_PCM_STATE_DISCONNECTED: 1881 err = -EBADFD; 1882 goto _endloop; 1883 case SNDRV_PCM_STATE_PAUSED: 1884 continue; 1885 } 1886 if (!tout) { 1887 pcm_dbg(substream->pcm, 1888 "%s write error (DMA or IRQ trouble?)\n", 1889 is_playback ? "playback" : "capture"); 1890 err = -EIO; 1891 break; 1892 } 1893 } 1894 _endloop: 1895 set_current_state(TASK_RUNNING); 1896 remove_wait_queue(&runtime->tsleep, &wait); 1897 *availp = avail; 1898 return err; 1899 } 1900 1901 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, 1902 int channel, unsigned long hwoff, 1903 void *buf, unsigned long bytes); 1904 1905 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, 1906 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f); 1907 1908 /* calculate the target DMA-buffer position to be written/read */ 1909 static void *get_dma_ptr(struct snd_pcm_runtime *runtime, 1910 int channel, unsigned long hwoff) 1911 { 1912 return runtime->dma_area + hwoff + 1913 channel * (runtime->dma_bytes / runtime->channels); 1914 } 1915 1916 /* default copy_user ops for write; used for both interleaved and non- modes */ 1917 static int default_write_copy(struct snd_pcm_substream *substream, 1918 int channel, unsigned long hwoff, 1919 void *buf, unsigned long bytes) 1920 { 1921 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff), 1922 (void __user *)buf, bytes)) 1923 return -EFAULT; 1924 return 0; 1925 } 1926 1927 /* default copy_kernel ops for write */ 1928 static int default_write_copy_kernel(struct snd_pcm_substream *substream, 1929 int channel, unsigned long hwoff, 1930 void *buf, unsigned long bytes) 1931 { 1932 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes); 1933 return 0; 1934 } 1935 1936 /* fill silence instead of copy data; called as a transfer helper 1937 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when 1938 * a NULL buffer is passed 1939 */ 1940 static int fill_silence(struct snd_pcm_substream *substream, int channel, 1941 unsigned long hwoff, void *buf, unsigned long bytes) 1942 { 1943 struct snd_pcm_runtime *runtime = substream->runtime; 1944 1945 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 1946 return 0; 1947 if (substream->ops->fill_silence) 1948 return substream->ops->fill_silence(substream, channel, 1949 hwoff, bytes); 1950 1951 snd_pcm_format_set_silence(runtime->format, 1952 get_dma_ptr(runtime, channel, hwoff), 1953 bytes_to_samples(runtime, bytes)); 1954 return 0; 1955 } 1956 1957 /* default copy_user ops for read; used for both interleaved and non- modes */ 1958 static int default_read_copy(struct snd_pcm_substream *substream, 1959 int channel, unsigned long hwoff, 1960 void *buf, unsigned long bytes) 1961 { 1962 if (copy_to_user((void __user *)buf, 1963 get_dma_ptr(substream->runtime, channel, hwoff), 1964 bytes)) 1965 return -EFAULT; 1966 return 0; 1967 } 1968 1969 /* default copy_kernel ops for read */ 1970 static int default_read_copy_kernel(struct snd_pcm_substream *substream, 1971 int channel, unsigned long hwoff, 1972 void *buf, unsigned long bytes) 1973 { 1974 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes); 1975 return 0; 1976 } 1977 1978 /* call transfer function with the converted pointers and sizes; 1979 * for interleaved mode, it's one shot for all samples 1980 */ 1981 static int interleaved_copy(struct snd_pcm_substream *substream, 1982 snd_pcm_uframes_t hwoff, void *data, 1983 snd_pcm_uframes_t off, 1984 snd_pcm_uframes_t frames, 1985 pcm_transfer_f transfer) 1986 { 1987 struct snd_pcm_runtime *runtime = substream->runtime; 1988 1989 /* convert to bytes */ 1990 hwoff = frames_to_bytes(runtime, hwoff); 1991 off = frames_to_bytes(runtime, off); 1992 frames = frames_to_bytes(runtime, frames); 1993 return transfer(substream, 0, hwoff, data + off, frames); 1994 } 1995 1996 /* call transfer function with the converted pointers and sizes for each 1997 * non-interleaved channel; when buffer is NULL, silencing instead of copying 1998 */ 1999 static int noninterleaved_copy(struct snd_pcm_substream *substream, 2000 snd_pcm_uframes_t hwoff, void *data, 2001 snd_pcm_uframes_t off, 2002 snd_pcm_uframes_t frames, 2003 pcm_transfer_f transfer) 2004 { 2005 struct snd_pcm_runtime *runtime = substream->runtime; 2006 int channels = runtime->channels; 2007 void **bufs = data; 2008 int c, err; 2009 2010 /* convert to bytes; note that it's not frames_to_bytes() here. 2011 * in non-interleaved mode, we copy for each channel, thus 2012 * each copy is n_samples bytes x channels = whole frames. 2013 */ 2014 off = samples_to_bytes(runtime, off); 2015 frames = samples_to_bytes(runtime, frames); 2016 hwoff = samples_to_bytes(runtime, hwoff); 2017 for (c = 0; c < channels; ++c, ++bufs) { 2018 if (!data || !*bufs) 2019 err = fill_silence(substream, c, hwoff, NULL, frames); 2020 else 2021 err = transfer(substream, c, hwoff, *bufs + off, 2022 frames); 2023 if (err < 0) 2024 return err; 2025 } 2026 return 0; 2027 } 2028 2029 /* fill silence on the given buffer position; 2030 * called from snd_pcm_playback_silence() 2031 */ 2032 static int fill_silence_frames(struct snd_pcm_substream *substream, 2033 snd_pcm_uframes_t off, snd_pcm_uframes_t frames) 2034 { 2035 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 2036 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) 2037 return interleaved_copy(substream, off, NULL, 0, frames, 2038 fill_silence); 2039 else 2040 return noninterleaved_copy(substream, off, NULL, 0, frames, 2041 fill_silence); 2042 } 2043 2044 /* sanity-check for read/write methods */ 2045 static int pcm_sanity_check(struct snd_pcm_substream *substream) 2046 { 2047 struct snd_pcm_runtime *runtime; 2048 if (PCM_RUNTIME_CHECK(substream)) 2049 return -ENXIO; 2050 runtime = substream->runtime; 2051 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area)) 2052 return -EINVAL; 2053 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2054 return -EBADFD; 2055 return 0; 2056 } 2057 2058 static int pcm_accessible_state(struct snd_pcm_runtime *runtime) 2059 { 2060 switch (runtime->status->state) { 2061 case SNDRV_PCM_STATE_PREPARED: 2062 case SNDRV_PCM_STATE_RUNNING: 2063 case SNDRV_PCM_STATE_PAUSED: 2064 return 0; 2065 case SNDRV_PCM_STATE_XRUN: 2066 return -EPIPE; 2067 case SNDRV_PCM_STATE_SUSPENDED: 2068 return -ESTRPIPE; 2069 default: 2070 return -EBADFD; 2071 } 2072 } 2073 2074 /* update to the given appl_ptr and call ack callback if needed; 2075 * when an error is returned, take back to the original value 2076 */ 2077 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, 2078 snd_pcm_uframes_t appl_ptr) 2079 { 2080 struct snd_pcm_runtime *runtime = substream->runtime; 2081 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; 2082 int ret; 2083 2084 if (old_appl_ptr == appl_ptr) 2085 return 0; 2086 2087 runtime->control->appl_ptr = appl_ptr; 2088 if (substream->ops->ack) { 2089 ret = substream->ops->ack(substream); 2090 if (ret < 0) { 2091 runtime->control->appl_ptr = old_appl_ptr; 2092 return ret; 2093 } 2094 } 2095 2096 trace_applptr(substream, old_appl_ptr, appl_ptr); 2097 2098 return 0; 2099 } 2100 2101 /* the common loop for read/write data */ 2102 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2103 void *data, bool interleaved, 2104 snd_pcm_uframes_t size, bool in_kernel) 2105 { 2106 struct snd_pcm_runtime *runtime = substream->runtime; 2107 snd_pcm_uframes_t xfer = 0; 2108 snd_pcm_uframes_t offset = 0; 2109 snd_pcm_uframes_t avail; 2110 pcm_copy_f writer; 2111 pcm_transfer_f transfer; 2112 bool nonblock; 2113 bool is_playback; 2114 int err; 2115 2116 err = pcm_sanity_check(substream); 2117 if (err < 0) 2118 return err; 2119 2120 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 2121 if (interleaved) { 2122 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2123 runtime->channels > 1) 2124 return -EINVAL; 2125 writer = interleaved_copy; 2126 } else { 2127 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2128 return -EINVAL; 2129 writer = noninterleaved_copy; 2130 } 2131 2132 if (!data) { 2133 if (is_playback) 2134 transfer = fill_silence; 2135 else 2136 return -EINVAL; 2137 } else if (in_kernel) { 2138 if (substream->ops->copy_kernel) 2139 transfer = substream->ops->copy_kernel; 2140 else 2141 transfer = is_playback ? 2142 default_write_copy_kernel : default_read_copy_kernel; 2143 } else { 2144 if (substream->ops->copy_user) 2145 transfer = (pcm_transfer_f)substream->ops->copy_user; 2146 else 2147 transfer = is_playback ? 2148 default_write_copy : default_read_copy; 2149 } 2150 2151 if (size == 0) 2152 return 0; 2153 2154 nonblock = !!(substream->f_flags & O_NONBLOCK); 2155 2156 snd_pcm_stream_lock_irq(substream); 2157 err = pcm_accessible_state(runtime); 2158 if (err < 0) 2159 goto _end_unlock; 2160 2161 if (!is_playback && 2162 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2163 size >= runtime->start_threshold) { 2164 err = snd_pcm_start(substream); 2165 if (err < 0) 2166 goto _end_unlock; 2167 } 2168 2169 runtime->twake = runtime->control->avail_min ? : 1; 2170 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 2171 snd_pcm_update_hw_ptr(substream); 2172 avail = snd_pcm_avail(substream); 2173 while (size > 0) { 2174 snd_pcm_uframes_t frames, appl_ptr, appl_ofs; 2175 snd_pcm_uframes_t cont; 2176 if (!avail) { 2177 if (!is_playback && 2178 runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2179 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2180 goto _end_unlock; 2181 } 2182 if (nonblock) { 2183 err = -EAGAIN; 2184 goto _end_unlock; 2185 } 2186 runtime->twake = min_t(snd_pcm_uframes_t, size, 2187 runtime->control->avail_min ? : 1); 2188 err = wait_for_avail(substream, &avail); 2189 if (err < 0) 2190 goto _end_unlock; 2191 if (!avail) 2192 continue; /* draining */ 2193 } 2194 frames = size > avail ? avail : size; 2195 appl_ptr = READ_ONCE(runtime->control->appl_ptr); 2196 appl_ofs = appl_ptr % runtime->buffer_size; 2197 cont = runtime->buffer_size - appl_ofs; 2198 if (frames > cont) 2199 frames = cont; 2200 if (snd_BUG_ON(!frames)) { 2201 runtime->twake = 0; 2202 snd_pcm_stream_unlock_irq(substream); 2203 return -EINVAL; 2204 } 2205 snd_pcm_stream_unlock_irq(substream); 2206 err = writer(substream, appl_ofs, data, offset, frames, 2207 transfer); 2208 snd_pcm_stream_lock_irq(substream); 2209 if (err < 0) 2210 goto _end_unlock; 2211 err = pcm_accessible_state(runtime); 2212 if (err < 0) 2213 goto _end_unlock; 2214 appl_ptr += frames; 2215 if (appl_ptr >= runtime->boundary) 2216 appl_ptr -= runtime->boundary; 2217 err = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2218 if (err < 0) 2219 goto _end_unlock; 2220 2221 offset += frames; 2222 size -= frames; 2223 xfer += frames; 2224 avail -= frames; 2225 if (is_playback && 2226 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2227 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { 2228 err = snd_pcm_start(substream); 2229 if (err < 0) 2230 goto _end_unlock; 2231 } 2232 } 2233 _end_unlock: 2234 runtime->twake = 0; 2235 if (xfer > 0 && err >= 0) 2236 snd_pcm_update_state(substream, runtime); 2237 snd_pcm_stream_unlock_irq(substream); 2238 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; 2239 } 2240 EXPORT_SYMBOL(__snd_pcm_lib_xfer); 2241 2242 /* 2243 * standard channel mapping helpers 2244 */ 2245 2246 /* default channel maps for multi-channel playbacks, up to 8 channels */ 2247 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { 2248 { .channels = 1, 2249 .map = { SNDRV_CHMAP_MONO } }, 2250 { .channels = 2, 2251 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2252 { .channels = 4, 2253 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2254 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2255 { .channels = 6, 2256 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2257 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2258 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, 2259 { .channels = 8, 2260 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2261 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2262 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2263 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2264 { } 2265 }; 2266 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); 2267 2268 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ 2269 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { 2270 { .channels = 1, 2271 .map = { SNDRV_CHMAP_MONO } }, 2272 { .channels = 2, 2273 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2274 { .channels = 4, 2275 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2276 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2277 { .channels = 6, 2278 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2279 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2280 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2281 { .channels = 8, 2282 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2283 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2284 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2285 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2286 { } 2287 }; 2288 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); 2289 2290 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) 2291 { 2292 if (ch > info->max_channels) 2293 return false; 2294 return !info->channel_mask || (info->channel_mask & (1U << ch)); 2295 } 2296 2297 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, 2298 struct snd_ctl_elem_info *uinfo) 2299 { 2300 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2301 2302 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 2303 uinfo->count = 0; 2304 uinfo->count = info->max_channels; 2305 uinfo->value.integer.min = 0; 2306 uinfo->value.integer.max = SNDRV_CHMAP_LAST; 2307 return 0; 2308 } 2309 2310 /* get callback for channel map ctl element 2311 * stores the channel position firstly matching with the current channels 2312 */ 2313 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, 2314 struct snd_ctl_elem_value *ucontrol) 2315 { 2316 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2317 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2318 struct snd_pcm_substream *substream; 2319 const struct snd_pcm_chmap_elem *map; 2320 2321 if (!info->chmap) 2322 return -EINVAL; 2323 substream = snd_pcm_chmap_substream(info, idx); 2324 if (!substream) 2325 return -ENODEV; 2326 memset(ucontrol->value.integer.value, 0, 2327 sizeof(ucontrol->value.integer.value)); 2328 if (!substream->runtime) 2329 return 0; /* no channels set */ 2330 for (map = info->chmap; map->channels; map++) { 2331 int i; 2332 if (map->channels == substream->runtime->channels && 2333 valid_chmap_channels(info, map->channels)) { 2334 for (i = 0; i < map->channels; i++) 2335 ucontrol->value.integer.value[i] = map->map[i]; 2336 return 0; 2337 } 2338 } 2339 return -EINVAL; 2340 } 2341 2342 /* tlv callback for channel map ctl element 2343 * expands the pre-defined channel maps in a form of TLV 2344 */ 2345 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, 2346 unsigned int size, unsigned int __user *tlv) 2347 { 2348 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2349 const struct snd_pcm_chmap_elem *map; 2350 unsigned int __user *dst; 2351 int c, count = 0; 2352 2353 if (!info->chmap) 2354 return -EINVAL; 2355 if (size < 8) 2356 return -ENOMEM; 2357 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) 2358 return -EFAULT; 2359 size -= 8; 2360 dst = tlv + 2; 2361 for (map = info->chmap; map->channels; map++) { 2362 int chs_bytes = map->channels * 4; 2363 if (!valid_chmap_channels(info, map->channels)) 2364 continue; 2365 if (size < 8) 2366 return -ENOMEM; 2367 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || 2368 put_user(chs_bytes, dst + 1)) 2369 return -EFAULT; 2370 dst += 2; 2371 size -= 8; 2372 count += 8; 2373 if (size < chs_bytes) 2374 return -ENOMEM; 2375 size -= chs_bytes; 2376 count += chs_bytes; 2377 for (c = 0; c < map->channels; c++) { 2378 if (put_user(map->map[c], dst)) 2379 return -EFAULT; 2380 dst++; 2381 } 2382 } 2383 if (put_user(count, tlv + 1)) 2384 return -EFAULT; 2385 return 0; 2386 } 2387 2388 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) 2389 { 2390 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2391 info->pcm->streams[info->stream].chmap_kctl = NULL; 2392 kfree(info); 2393 } 2394 2395 /** 2396 * snd_pcm_add_chmap_ctls - create channel-mapping control elements 2397 * @pcm: the assigned PCM instance 2398 * @stream: stream direction 2399 * @chmap: channel map elements (for query) 2400 * @max_channels: the max number of channels for the stream 2401 * @private_value: the value passed to each kcontrol's private_value field 2402 * @info_ret: store struct snd_pcm_chmap instance if non-NULL 2403 * 2404 * Create channel-mapping control elements assigned to the given PCM stream(s). 2405 * Return: Zero if successful, or a negative error value. 2406 */ 2407 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, 2408 const struct snd_pcm_chmap_elem *chmap, 2409 int max_channels, 2410 unsigned long private_value, 2411 struct snd_pcm_chmap **info_ret) 2412 { 2413 struct snd_pcm_chmap *info; 2414 struct snd_kcontrol_new knew = { 2415 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2416 .access = SNDRV_CTL_ELEM_ACCESS_READ | 2417 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 2418 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, 2419 .info = pcm_chmap_ctl_info, 2420 .get = pcm_chmap_ctl_get, 2421 .tlv.c = pcm_chmap_ctl_tlv, 2422 }; 2423 int err; 2424 2425 if (WARN_ON(pcm->streams[stream].chmap_kctl)) 2426 return -EBUSY; 2427 info = kzalloc(sizeof(*info), GFP_KERNEL); 2428 if (!info) 2429 return -ENOMEM; 2430 info->pcm = pcm; 2431 info->stream = stream; 2432 info->chmap = chmap; 2433 info->max_channels = max_channels; 2434 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 2435 knew.name = "Playback Channel Map"; 2436 else 2437 knew.name = "Capture Channel Map"; 2438 knew.device = pcm->device; 2439 knew.count = pcm->streams[stream].substream_count; 2440 knew.private_value = private_value; 2441 info->kctl = snd_ctl_new1(&knew, info); 2442 if (!info->kctl) { 2443 kfree(info); 2444 return -ENOMEM; 2445 } 2446 info->kctl->private_free = pcm_chmap_ctl_private_free; 2447 err = snd_ctl_add(pcm->card, info->kctl); 2448 if (err < 0) 2449 return err; 2450 pcm->streams[stream].chmap_kctl = info->kctl; 2451 if (info_ret) 2452 *info_ret = info; 2453 return 0; 2454 } 2455 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls); 2456