1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * compress_core.c - compress offload core 4 * 5 * Copyright (C) 2011 Intel Corporation 6 * Authors: Vinod Koul <vinod.koul@linux.intel.com> 7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 */ 12 #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt) 14 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/list.h> 18 #include <linux/math64.h> 19 #include <linux/mm.h> 20 #include <linux/mutex.h> 21 #include <linux/poll.h> 22 #include <linux/slab.h> 23 #include <linux/sched.h> 24 #include <linux/types.h> 25 #include <linux/uio.h> 26 #include <linux/uaccess.h> 27 #include <linux/module.h> 28 #include <linux/compat.h> 29 #include <sound/core.h> 30 #include <sound/initval.h> 31 #include <sound/info.h> 32 #include <sound/compress_params.h> 33 #include <sound/compress_offload.h> 34 #include <sound/compress_driver.h> 35 36 /* struct snd_compr_codec_caps overflows the ioctl bit size for some 37 * architectures, so we need to disable the relevant ioctls. 38 */ 39 #if _IOC_SIZEBITS < 14 40 #define COMPR_CODEC_CAPS_OVERFLOW 41 #endif 42 43 /* TODO: 44 * - add substream support for multiple devices in case of 45 * SND_DYNAMIC_MINORS is not used 46 * - Multiple node representation 47 * driver should be able to register multiple nodes 48 */ 49 50 struct snd_compr_file { 51 unsigned long caps; 52 struct snd_compr_stream stream; 53 }; 54 55 static void error_delayed_work(struct work_struct *work); 56 57 /* 58 * a note on stream states used: 59 * we use following states in the compressed core 60 * SNDRV_PCM_STATE_OPEN: When stream has been opened. 61 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by 62 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this 63 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain. 64 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for 65 * playback only). User after setting up stream writes the data buffer 66 * before starting the stream. 67 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is 68 * decoding/encoding and rendering/capturing data. 69 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done 70 * by calling SNDRV_COMPRESS_DRAIN. 71 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling 72 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling 73 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively. 74 */ 75 static int snd_compr_open(struct inode *inode, struct file *f) 76 { 77 struct snd_compr *compr; 78 struct snd_compr_file *data; 79 struct snd_compr_runtime *runtime; 80 enum snd_compr_direction dirn; 81 int maj = imajor(inode); 82 int ret; 83 84 if ((f->f_flags & O_ACCMODE) == O_WRONLY) 85 dirn = SND_COMPRESS_PLAYBACK; 86 else if ((f->f_flags & O_ACCMODE) == O_RDONLY) 87 dirn = SND_COMPRESS_CAPTURE; 88 else 89 return -EINVAL; 90 91 if (maj == snd_major) 92 compr = snd_lookup_minor_data(iminor(inode), 93 SNDRV_DEVICE_TYPE_COMPRESS); 94 else 95 return -EBADFD; 96 97 if (compr == NULL) { 98 pr_err("no device data!!!\n"); 99 return -ENODEV; 100 } 101 102 if (dirn != compr->direction) { 103 pr_err("this device doesn't support this direction\n"); 104 snd_card_unref(compr->card); 105 return -EINVAL; 106 } 107 108 data = kzalloc(sizeof(*data), GFP_KERNEL); 109 if (!data) { 110 snd_card_unref(compr->card); 111 return -ENOMEM; 112 } 113 114 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work); 115 116 data->stream.ops = compr->ops; 117 data->stream.direction = dirn; 118 data->stream.private_data = compr->private_data; 119 data->stream.device = compr; 120 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); 121 if (!runtime) { 122 kfree(data); 123 snd_card_unref(compr->card); 124 return -ENOMEM; 125 } 126 runtime->state = SNDRV_PCM_STATE_OPEN; 127 init_waitqueue_head(&runtime->sleep); 128 data->stream.runtime = runtime; 129 f->private_data = (void *)data; 130 mutex_lock(&compr->lock); 131 ret = compr->ops->open(&data->stream); 132 mutex_unlock(&compr->lock); 133 if (ret) { 134 kfree(runtime); 135 kfree(data); 136 } 137 snd_card_unref(compr->card); 138 return ret; 139 } 140 141 static int snd_compr_free(struct inode *inode, struct file *f) 142 { 143 struct snd_compr_file *data = f->private_data; 144 struct snd_compr_runtime *runtime = data->stream.runtime; 145 146 cancel_delayed_work_sync(&data->stream.error_work); 147 148 switch (runtime->state) { 149 case SNDRV_PCM_STATE_RUNNING: 150 case SNDRV_PCM_STATE_DRAINING: 151 case SNDRV_PCM_STATE_PAUSED: 152 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP); 153 break; 154 default: 155 break; 156 } 157 158 data->stream.ops->free(&data->stream); 159 if (!data->stream.runtime->dma_buffer_p) 160 kfree(data->stream.runtime->buffer); 161 kfree(data->stream.runtime); 162 kfree(data); 163 return 0; 164 } 165 166 static int snd_compr_update_tstamp(struct snd_compr_stream *stream, 167 struct snd_compr_tstamp *tstamp) 168 { 169 if (!stream->ops->pointer) 170 return -ENOTSUPP; 171 stream->ops->pointer(stream, tstamp); 172 pr_debug("dsp consumed till %d total %d bytes\n", 173 tstamp->byte_offset, tstamp->copied_total); 174 if (stream->direction == SND_COMPRESS_PLAYBACK) 175 stream->runtime->total_bytes_transferred = tstamp->copied_total; 176 else 177 stream->runtime->total_bytes_available = tstamp->copied_total; 178 return 0; 179 } 180 181 static size_t snd_compr_calc_avail(struct snd_compr_stream *stream, 182 struct snd_compr_avail *avail) 183 { 184 memset(avail, 0, sizeof(*avail)); 185 snd_compr_update_tstamp(stream, &avail->tstamp); 186 /* Still need to return avail even if tstamp can't be filled in */ 187 188 if (stream->runtime->total_bytes_available == 0 && 189 stream->runtime->state == SNDRV_PCM_STATE_SETUP && 190 stream->direction == SND_COMPRESS_PLAYBACK) { 191 pr_debug("detected init and someone forgot to do a write\n"); 192 return stream->runtime->buffer_size; 193 } 194 pr_debug("app wrote %lld, DSP consumed %lld\n", 195 stream->runtime->total_bytes_available, 196 stream->runtime->total_bytes_transferred); 197 if (stream->runtime->total_bytes_available == 198 stream->runtime->total_bytes_transferred) { 199 if (stream->direction == SND_COMPRESS_PLAYBACK) { 200 pr_debug("both pointers are same, returning full avail\n"); 201 return stream->runtime->buffer_size; 202 } else { 203 pr_debug("both pointers are same, returning no avail\n"); 204 return 0; 205 } 206 } 207 208 avail->avail = stream->runtime->total_bytes_available - 209 stream->runtime->total_bytes_transferred; 210 if (stream->direction == SND_COMPRESS_PLAYBACK) 211 avail->avail = stream->runtime->buffer_size - avail->avail; 212 213 pr_debug("ret avail as %lld\n", avail->avail); 214 return avail->avail; 215 } 216 217 static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream) 218 { 219 struct snd_compr_avail avail; 220 221 return snd_compr_calc_avail(stream, &avail); 222 } 223 224 static int 225 snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg) 226 { 227 struct snd_compr_avail ioctl_avail; 228 size_t avail; 229 230 avail = snd_compr_calc_avail(stream, &ioctl_avail); 231 ioctl_avail.avail = avail; 232 233 switch (stream->runtime->state) { 234 case SNDRV_PCM_STATE_OPEN: 235 return -EBADFD; 236 case SNDRV_PCM_STATE_XRUN: 237 return -EPIPE; 238 default: 239 break; 240 } 241 242 if (copy_to_user((__u64 __user *)arg, 243 &ioctl_avail, sizeof(ioctl_avail))) 244 return -EFAULT; 245 return 0; 246 } 247 248 static int snd_compr_write_data(struct snd_compr_stream *stream, 249 const char __user *buf, size_t count) 250 { 251 void *dstn; 252 size_t copy; 253 struct snd_compr_runtime *runtime = stream->runtime; 254 /* 64-bit Modulus */ 255 u64 app_pointer = div64_u64(runtime->total_bytes_available, 256 runtime->buffer_size); 257 app_pointer = runtime->total_bytes_available - 258 (app_pointer * runtime->buffer_size); 259 260 dstn = runtime->buffer + app_pointer; 261 pr_debug("copying %ld at %lld\n", 262 (unsigned long)count, app_pointer); 263 if (count < runtime->buffer_size - app_pointer) { 264 if (copy_from_user(dstn, buf, count)) 265 return -EFAULT; 266 } else { 267 copy = runtime->buffer_size - app_pointer; 268 if (copy_from_user(dstn, buf, copy)) 269 return -EFAULT; 270 if (copy_from_user(runtime->buffer, buf + copy, count - copy)) 271 return -EFAULT; 272 } 273 /* if DSP cares, let it know data has been written */ 274 if (stream->ops->ack) 275 stream->ops->ack(stream, count); 276 return count; 277 } 278 279 static ssize_t snd_compr_write(struct file *f, const char __user *buf, 280 size_t count, loff_t *offset) 281 { 282 struct snd_compr_file *data = f->private_data; 283 struct snd_compr_stream *stream; 284 size_t avail; 285 int retval; 286 287 if (snd_BUG_ON(!data)) 288 return -EFAULT; 289 290 stream = &data->stream; 291 mutex_lock(&stream->device->lock); 292 /* write is allowed when stream is running or has been steup */ 293 switch (stream->runtime->state) { 294 case SNDRV_PCM_STATE_SETUP: 295 case SNDRV_PCM_STATE_PREPARED: 296 case SNDRV_PCM_STATE_RUNNING: 297 break; 298 default: 299 mutex_unlock(&stream->device->lock); 300 return -EBADFD; 301 } 302 303 avail = snd_compr_get_avail(stream); 304 pr_debug("avail returned %ld\n", (unsigned long)avail); 305 /* calculate how much we can write to buffer */ 306 if (avail > count) 307 avail = count; 308 309 if (stream->ops->copy) { 310 char __user* cbuf = (char __user*)buf; 311 retval = stream->ops->copy(stream, cbuf, avail); 312 } else { 313 retval = snd_compr_write_data(stream, buf, avail); 314 } 315 if (retval > 0) 316 stream->runtime->total_bytes_available += retval; 317 318 /* while initiating the stream, write should be called before START 319 * call, so in setup move state */ 320 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) { 321 stream->runtime->state = SNDRV_PCM_STATE_PREPARED; 322 pr_debug("stream prepared, Houston we are good to go\n"); 323 } 324 325 mutex_unlock(&stream->device->lock); 326 return retval; 327 } 328 329 330 static ssize_t snd_compr_read(struct file *f, char __user *buf, 331 size_t count, loff_t *offset) 332 { 333 struct snd_compr_file *data = f->private_data; 334 struct snd_compr_stream *stream; 335 size_t avail; 336 int retval; 337 338 if (snd_BUG_ON(!data)) 339 return -EFAULT; 340 341 stream = &data->stream; 342 mutex_lock(&stream->device->lock); 343 344 /* read is allowed when stream is running, paused, draining and setup 345 * (yes setup is state which we transition to after stop, so if user 346 * wants to read data after stop we allow that) 347 */ 348 switch (stream->runtime->state) { 349 case SNDRV_PCM_STATE_OPEN: 350 case SNDRV_PCM_STATE_PREPARED: 351 case SNDRV_PCM_STATE_SUSPENDED: 352 case SNDRV_PCM_STATE_DISCONNECTED: 353 retval = -EBADFD; 354 goto out; 355 case SNDRV_PCM_STATE_XRUN: 356 retval = -EPIPE; 357 goto out; 358 } 359 360 avail = snd_compr_get_avail(stream); 361 pr_debug("avail returned %ld\n", (unsigned long)avail); 362 /* calculate how much we can read from buffer */ 363 if (avail > count) 364 avail = count; 365 366 if (stream->ops->copy) { 367 retval = stream->ops->copy(stream, buf, avail); 368 } else { 369 retval = -ENXIO; 370 goto out; 371 } 372 if (retval > 0) 373 stream->runtime->total_bytes_transferred += retval; 374 375 out: 376 mutex_unlock(&stream->device->lock); 377 return retval; 378 } 379 380 static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma) 381 { 382 return -ENXIO; 383 } 384 385 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream) 386 { 387 if (stream->direction == SND_COMPRESS_PLAYBACK) 388 return EPOLLOUT | EPOLLWRNORM; 389 else 390 return EPOLLIN | EPOLLRDNORM; 391 } 392 393 static __poll_t snd_compr_poll(struct file *f, poll_table *wait) 394 { 395 struct snd_compr_file *data = f->private_data; 396 struct snd_compr_stream *stream; 397 size_t avail; 398 __poll_t retval = 0; 399 400 if (snd_BUG_ON(!data)) 401 return EPOLLERR; 402 403 stream = &data->stream; 404 405 mutex_lock(&stream->device->lock); 406 407 switch (stream->runtime->state) { 408 case SNDRV_PCM_STATE_OPEN: 409 case SNDRV_PCM_STATE_XRUN: 410 retval = snd_compr_get_poll(stream) | EPOLLERR; 411 goto out; 412 default: 413 break; 414 } 415 416 poll_wait(f, &stream->runtime->sleep, wait); 417 418 avail = snd_compr_get_avail(stream); 419 pr_debug("avail is %ld\n", (unsigned long)avail); 420 /* check if we have at least one fragment to fill */ 421 switch (stream->runtime->state) { 422 case SNDRV_PCM_STATE_DRAINING: 423 /* stream has been woken up after drain is complete 424 * draining done so set stream state to stopped 425 */ 426 retval = snd_compr_get_poll(stream); 427 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 428 break; 429 case SNDRV_PCM_STATE_RUNNING: 430 case SNDRV_PCM_STATE_PREPARED: 431 case SNDRV_PCM_STATE_PAUSED: 432 if (avail >= stream->runtime->fragment_size) 433 retval = snd_compr_get_poll(stream); 434 break; 435 default: 436 retval = snd_compr_get_poll(stream) | EPOLLERR; 437 break; 438 } 439 out: 440 mutex_unlock(&stream->device->lock); 441 return retval; 442 } 443 444 static int 445 snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg) 446 { 447 int retval; 448 struct snd_compr_caps caps; 449 450 if (!stream->ops->get_caps) 451 return -ENXIO; 452 453 memset(&caps, 0, sizeof(caps)); 454 retval = stream->ops->get_caps(stream, &caps); 455 if (retval) 456 goto out; 457 if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) 458 retval = -EFAULT; 459 out: 460 return retval; 461 } 462 463 #ifndef COMPR_CODEC_CAPS_OVERFLOW 464 static int 465 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) 466 { 467 int retval; 468 struct snd_compr_codec_caps *caps; 469 470 if (!stream->ops->get_codec_caps) 471 return -ENXIO; 472 473 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 474 if (!caps) 475 return -ENOMEM; 476 477 retval = stream->ops->get_codec_caps(stream, caps); 478 if (retval) 479 goto out; 480 if (copy_to_user((void __user *)arg, caps, sizeof(*caps))) 481 retval = -EFAULT; 482 483 out: 484 kfree(caps); 485 return retval; 486 } 487 #endif /* !COMPR_CODEC_CAPS_OVERFLOW */ 488 489 int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size) 490 { 491 struct snd_dma_buffer *dmab; 492 int ret; 493 494 if (snd_BUG_ON(!(stream) || !(stream)->runtime)) 495 return -EINVAL; 496 dmab = kzalloc(sizeof(*dmab), GFP_KERNEL); 497 if (!dmab) 498 return -ENOMEM; 499 dmab->dev = stream->dma_buffer.dev; 500 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab); 501 if (ret < 0) { 502 kfree(dmab); 503 return ret; 504 } 505 506 snd_compr_set_runtime_buffer(stream, dmab); 507 stream->runtime->dma_bytes = size; 508 return 1; 509 } 510 EXPORT_SYMBOL(snd_compr_malloc_pages); 511 512 int snd_compr_free_pages(struct snd_compr_stream *stream) 513 { 514 struct snd_compr_runtime *runtime; 515 516 if (snd_BUG_ON(!(stream) || !(stream)->runtime)) 517 return -EINVAL; 518 runtime = stream->runtime; 519 if (runtime->dma_area == NULL) 520 return 0; 521 if (runtime->dma_buffer_p != &stream->dma_buffer) { 522 /* It's a newly allocated buffer. Release it now. */ 523 snd_dma_free_pages(runtime->dma_buffer_p); 524 kfree(runtime->dma_buffer_p); 525 } 526 527 snd_compr_set_runtime_buffer(stream, NULL); 528 return 0; 529 } 530 EXPORT_SYMBOL(snd_compr_free_pages); 531 532 /* revisit this with snd_pcm_preallocate_xxx */ 533 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, 534 struct snd_compr_params *params) 535 { 536 unsigned int buffer_size; 537 void *buffer = NULL; 538 539 buffer_size = params->buffer.fragment_size * params->buffer.fragments; 540 if (stream->ops->copy) { 541 buffer = NULL; 542 /* if copy is defined the driver will be required to copy 543 * the data from core 544 */ 545 } else { 546 if (stream->runtime->dma_buffer_p) { 547 548 if (buffer_size > stream->runtime->dma_buffer_p->bytes) 549 dev_err(&stream->device->dev, 550 "Not enough DMA buffer"); 551 else 552 buffer = stream->runtime->dma_buffer_p->area; 553 554 } else { 555 buffer = kmalloc(buffer_size, GFP_KERNEL); 556 } 557 558 if (!buffer) 559 return -ENOMEM; 560 } 561 stream->runtime->fragment_size = params->buffer.fragment_size; 562 stream->runtime->fragments = params->buffer.fragments; 563 stream->runtime->buffer = buffer; 564 stream->runtime->buffer_size = buffer_size; 565 return 0; 566 } 567 568 static int snd_compress_check_input(struct snd_compr_params *params) 569 { 570 /* first let's check the buffer parameter's */ 571 if (params->buffer.fragment_size == 0 || 572 params->buffer.fragments > U32_MAX / params->buffer.fragment_size || 573 params->buffer.fragments == 0) 574 return -EINVAL; 575 576 /* now codec parameters */ 577 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX) 578 return -EINVAL; 579 580 if (params->codec.ch_in == 0 || params->codec.ch_out == 0) 581 return -EINVAL; 582 583 return 0; 584 } 585 586 static int 587 snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg) 588 { 589 struct snd_compr_params *params; 590 int retval; 591 592 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) { 593 /* 594 * we should allow parameter change only when stream has been 595 * opened not in other cases 596 */ 597 params = memdup_user((void __user *)arg, sizeof(*params)); 598 if (IS_ERR(params)) 599 return PTR_ERR(params); 600 601 retval = snd_compress_check_input(params); 602 if (retval) 603 goto out; 604 605 retval = snd_compr_allocate_buffer(stream, params); 606 if (retval) { 607 retval = -ENOMEM; 608 goto out; 609 } 610 611 retval = stream->ops->set_params(stream, params); 612 if (retval) 613 goto out; 614 615 stream->metadata_set = false; 616 stream->next_track = false; 617 618 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 619 } else { 620 return -EPERM; 621 } 622 out: 623 kfree(params); 624 return retval; 625 } 626 627 static int 628 snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg) 629 { 630 struct snd_codec *params; 631 int retval; 632 633 if (!stream->ops->get_params) 634 return -EBADFD; 635 636 params = kzalloc(sizeof(*params), GFP_KERNEL); 637 if (!params) 638 return -ENOMEM; 639 retval = stream->ops->get_params(stream, params); 640 if (retval) 641 goto out; 642 if (copy_to_user((char __user *)arg, params, sizeof(*params))) 643 retval = -EFAULT; 644 645 out: 646 kfree(params); 647 return retval; 648 } 649 650 static int 651 snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg) 652 { 653 struct snd_compr_metadata metadata; 654 int retval; 655 656 if (!stream->ops->get_metadata) 657 return -ENXIO; 658 659 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) 660 return -EFAULT; 661 662 retval = stream->ops->get_metadata(stream, &metadata); 663 if (retval != 0) 664 return retval; 665 666 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata))) 667 return -EFAULT; 668 669 return 0; 670 } 671 672 static int 673 snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg) 674 { 675 struct snd_compr_metadata metadata; 676 int retval; 677 678 if (!stream->ops->set_metadata) 679 return -ENXIO; 680 /* 681 * we should allow parameter change only when stream has been 682 * opened not in other cases 683 */ 684 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) 685 return -EFAULT; 686 687 retval = stream->ops->set_metadata(stream, &metadata); 688 stream->metadata_set = true; 689 690 return retval; 691 } 692 693 static inline int 694 snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg) 695 { 696 struct snd_compr_tstamp tstamp = {0}; 697 int ret; 698 699 ret = snd_compr_update_tstamp(stream, &tstamp); 700 if (ret == 0) 701 ret = copy_to_user((struct snd_compr_tstamp __user *)arg, 702 &tstamp, sizeof(tstamp)) ? -EFAULT : 0; 703 return ret; 704 } 705 706 static int snd_compr_pause(struct snd_compr_stream *stream) 707 { 708 int retval; 709 710 switch (stream->runtime->state) { 711 case SNDRV_PCM_STATE_RUNNING: 712 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 713 if (!retval) 714 stream->runtime->state = SNDRV_PCM_STATE_PAUSED; 715 break; 716 case SNDRV_PCM_STATE_DRAINING: 717 if (!stream->device->use_pause_in_draining) 718 return -EPERM; 719 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 720 if (!retval) 721 stream->pause_in_draining = true; 722 break; 723 default: 724 return -EPERM; 725 } 726 return retval; 727 } 728 729 static int snd_compr_resume(struct snd_compr_stream *stream) 730 { 731 int retval; 732 733 switch (stream->runtime->state) { 734 case SNDRV_PCM_STATE_PAUSED: 735 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 736 if (!retval) 737 stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 738 break; 739 case SNDRV_PCM_STATE_DRAINING: 740 if (!stream->pause_in_draining) 741 return -EPERM; 742 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 743 if (!retval) 744 stream->pause_in_draining = false; 745 break; 746 default: 747 return -EPERM; 748 } 749 return retval; 750 } 751 752 static int snd_compr_start(struct snd_compr_stream *stream) 753 { 754 int retval; 755 756 switch (stream->runtime->state) { 757 case SNDRV_PCM_STATE_SETUP: 758 if (stream->direction != SND_COMPRESS_CAPTURE) 759 return -EPERM; 760 break; 761 case SNDRV_PCM_STATE_PREPARED: 762 break; 763 default: 764 return -EPERM; 765 } 766 767 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START); 768 if (!retval) 769 stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 770 return retval; 771 } 772 773 static int snd_compr_stop(struct snd_compr_stream *stream) 774 { 775 int retval; 776 777 switch (stream->runtime->state) { 778 case SNDRV_PCM_STATE_OPEN: 779 case SNDRV_PCM_STATE_SETUP: 780 case SNDRV_PCM_STATE_PREPARED: 781 return -EPERM; 782 default: 783 break; 784 } 785 786 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 787 if (!retval) { 788 /* clear flags and stop any drain wait */ 789 stream->partial_drain = false; 790 stream->metadata_set = false; 791 stream->pause_in_draining = false; 792 snd_compr_drain_notify(stream); 793 stream->runtime->total_bytes_available = 0; 794 stream->runtime->total_bytes_transferred = 0; 795 } 796 return retval; 797 } 798 799 static void error_delayed_work(struct work_struct *work) 800 { 801 struct snd_compr_stream *stream; 802 803 stream = container_of(work, struct snd_compr_stream, error_work.work); 804 805 mutex_lock(&stream->device->lock); 806 807 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 808 wake_up(&stream->runtime->sleep); 809 810 mutex_unlock(&stream->device->lock); 811 } 812 813 /* 814 * snd_compr_stop_error: Report a fatal error on a stream 815 * @stream: pointer to stream 816 * @state: state to transition the stream to 817 * 818 * Stop the stream and set its state. 819 * 820 * Should be called with compressed device lock held. 821 */ 822 int snd_compr_stop_error(struct snd_compr_stream *stream, 823 snd_pcm_state_t state) 824 { 825 if (stream->runtime->state == state) 826 return 0; 827 828 stream->runtime->state = state; 829 830 pr_debug("Changing state to: %d\n", state); 831 832 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0); 833 834 return 0; 835 } 836 EXPORT_SYMBOL_GPL(snd_compr_stop_error); 837 838 static int snd_compress_wait_for_drain(struct snd_compr_stream *stream) 839 { 840 int ret; 841 842 /* 843 * We are called with lock held. So drop the lock while we wait for 844 * drain complete notification from the driver 845 * 846 * It is expected that driver will notify the drain completion and then 847 * stream will be moved to SETUP state, even if draining resulted in an 848 * error. We can trigger next track after this. 849 */ 850 stream->runtime->state = SNDRV_PCM_STATE_DRAINING; 851 mutex_unlock(&stream->device->lock); 852 853 /* we wait for drain to complete here, drain can return when 854 * interruption occurred, wait returned error or success. 855 * For the first two cases we don't do anything different here and 856 * return after waking up 857 */ 858 859 ret = wait_event_interruptible(stream->runtime->sleep, 860 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING)); 861 if (ret == -ERESTARTSYS) 862 pr_debug("wait aborted by a signal\n"); 863 else if (ret) 864 pr_debug("wait for drain failed with %d\n", ret); 865 866 867 wake_up(&stream->runtime->sleep); 868 mutex_lock(&stream->device->lock); 869 870 return ret; 871 } 872 873 static int snd_compr_drain(struct snd_compr_stream *stream) 874 { 875 int retval; 876 877 switch (stream->runtime->state) { 878 case SNDRV_PCM_STATE_OPEN: 879 case SNDRV_PCM_STATE_SETUP: 880 case SNDRV_PCM_STATE_PREPARED: 881 case SNDRV_PCM_STATE_PAUSED: 882 return -EPERM; 883 case SNDRV_PCM_STATE_XRUN: 884 return -EPIPE; 885 default: 886 break; 887 } 888 889 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN); 890 if (retval) { 891 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval); 892 wake_up(&stream->runtime->sleep); 893 return retval; 894 } 895 896 return snd_compress_wait_for_drain(stream); 897 } 898 899 static int snd_compr_next_track(struct snd_compr_stream *stream) 900 { 901 int retval; 902 903 /* only a running stream can transition to next track */ 904 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) 905 return -EPERM; 906 907 /* next track doesn't have any meaning for capture streams */ 908 if (stream->direction == SND_COMPRESS_CAPTURE) 909 return -EPERM; 910 911 /* you can signal next track if this is intended to be a gapless stream 912 * and current track metadata is set 913 */ 914 if (stream->metadata_set == false) 915 return -EPERM; 916 917 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK); 918 if (retval != 0) 919 return retval; 920 stream->metadata_set = false; 921 stream->next_track = true; 922 return 0; 923 } 924 925 static int snd_compr_partial_drain(struct snd_compr_stream *stream) 926 { 927 int retval; 928 929 switch (stream->runtime->state) { 930 case SNDRV_PCM_STATE_OPEN: 931 case SNDRV_PCM_STATE_SETUP: 932 case SNDRV_PCM_STATE_PREPARED: 933 case SNDRV_PCM_STATE_PAUSED: 934 return -EPERM; 935 case SNDRV_PCM_STATE_XRUN: 936 return -EPIPE; 937 default: 938 break; 939 } 940 941 /* partial drain doesn't have any meaning for capture streams */ 942 if (stream->direction == SND_COMPRESS_CAPTURE) 943 return -EPERM; 944 945 /* stream can be drained only when next track has been signalled */ 946 if (stream->next_track == false) 947 return -EPERM; 948 949 stream->partial_drain = true; 950 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); 951 if (retval) { 952 pr_debug("Partial drain returned failure\n"); 953 wake_up(&stream->runtime->sleep); 954 return retval; 955 } 956 957 stream->next_track = false; 958 return snd_compress_wait_for_drain(stream); 959 } 960 961 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 962 { 963 struct snd_compr_file *data = f->private_data; 964 struct snd_compr_stream *stream; 965 int retval = -ENOTTY; 966 967 if (snd_BUG_ON(!data)) 968 return -EFAULT; 969 970 stream = &data->stream; 971 972 mutex_lock(&stream->device->lock); 973 switch (_IOC_NR(cmd)) { 974 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): 975 retval = put_user(SNDRV_COMPRESS_VERSION, 976 (int __user *)arg) ? -EFAULT : 0; 977 break; 978 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): 979 retval = snd_compr_get_caps(stream, arg); 980 break; 981 #ifndef COMPR_CODEC_CAPS_OVERFLOW 982 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): 983 retval = snd_compr_get_codec_caps(stream, arg); 984 break; 985 #endif 986 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): 987 retval = snd_compr_set_params(stream, arg); 988 break; 989 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): 990 retval = snd_compr_get_params(stream, arg); 991 break; 992 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): 993 retval = snd_compr_set_metadata(stream, arg); 994 break; 995 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): 996 retval = snd_compr_get_metadata(stream, arg); 997 break; 998 case _IOC_NR(SNDRV_COMPRESS_TSTAMP): 999 retval = snd_compr_tstamp(stream, arg); 1000 break; 1001 case _IOC_NR(SNDRV_COMPRESS_AVAIL): 1002 retval = snd_compr_ioctl_avail(stream, arg); 1003 break; 1004 case _IOC_NR(SNDRV_COMPRESS_PAUSE): 1005 retval = snd_compr_pause(stream); 1006 break; 1007 case _IOC_NR(SNDRV_COMPRESS_RESUME): 1008 retval = snd_compr_resume(stream); 1009 break; 1010 case _IOC_NR(SNDRV_COMPRESS_START): 1011 retval = snd_compr_start(stream); 1012 break; 1013 case _IOC_NR(SNDRV_COMPRESS_STOP): 1014 retval = snd_compr_stop(stream); 1015 break; 1016 case _IOC_NR(SNDRV_COMPRESS_DRAIN): 1017 retval = snd_compr_drain(stream); 1018 break; 1019 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): 1020 retval = snd_compr_partial_drain(stream); 1021 break; 1022 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): 1023 retval = snd_compr_next_track(stream); 1024 break; 1025 1026 } 1027 mutex_unlock(&stream->device->lock); 1028 return retval; 1029 } 1030 1031 /* support of 32bit userspace on 64bit platforms */ 1032 #ifdef CONFIG_COMPAT 1033 static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd, 1034 unsigned long arg) 1035 { 1036 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1037 } 1038 #endif 1039 1040 static const struct file_operations snd_compr_file_ops = { 1041 .owner = THIS_MODULE, 1042 .open = snd_compr_open, 1043 .release = snd_compr_free, 1044 .write = snd_compr_write, 1045 .read = snd_compr_read, 1046 .unlocked_ioctl = snd_compr_ioctl, 1047 #ifdef CONFIG_COMPAT 1048 .compat_ioctl = snd_compr_ioctl_compat, 1049 #endif 1050 .mmap = snd_compr_mmap, 1051 .poll = snd_compr_poll, 1052 }; 1053 1054 static int snd_compress_dev_register(struct snd_device *device) 1055 { 1056 int ret; 1057 struct snd_compr *compr; 1058 1059 if (snd_BUG_ON(!device || !device->device_data)) 1060 return -EBADFD; 1061 compr = device->device_data; 1062 1063 pr_debug("reg device %s, direction %d\n", compr->name, 1064 compr->direction); 1065 /* register compressed device */ 1066 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, 1067 compr->card, compr->device, 1068 &snd_compr_file_ops, compr, &compr->dev); 1069 if (ret < 0) { 1070 pr_err("snd_register_device failed %d\n", ret); 1071 return ret; 1072 } 1073 return ret; 1074 1075 } 1076 1077 static int snd_compress_dev_disconnect(struct snd_device *device) 1078 { 1079 struct snd_compr *compr; 1080 1081 compr = device->device_data; 1082 snd_unregister_device(&compr->dev); 1083 return 0; 1084 } 1085 1086 #ifdef CONFIG_SND_VERBOSE_PROCFS 1087 static void snd_compress_proc_info_read(struct snd_info_entry *entry, 1088 struct snd_info_buffer *buffer) 1089 { 1090 struct snd_compr *compr = (struct snd_compr *)entry->private_data; 1091 1092 snd_iprintf(buffer, "card: %d\n", compr->card->number); 1093 snd_iprintf(buffer, "device: %d\n", compr->device); 1094 snd_iprintf(buffer, "stream: %s\n", 1095 compr->direction == SND_COMPRESS_PLAYBACK 1096 ? "PLAYBACK" : "CAPTURE"); 1097 snd_iprintf(buffer, "id: %s\n", compr->id); 1098 } 1099 1100 static int snd_compress_proc_init(struct snd_compr *compr) 1101 { 1102 struct snd_info_entry *entry; 1103 char name[16]; 1104 1105 sprintf(name, "compr%i", compr->device); 1106 entry = snd_info_create_card_entry(compr->card, name, 1107 compr->card->proc_root); 1108 if (!entry) 1109 return -ENOMEM; 1110 entry->mode = S_IFDIR | 0555; 1111 compr->proc_root = entry; 1112 1113 entry = snd_info_create_card_entry(compr->card, "info", 1114 compr->proc_root); 1115 if (entry) 1116 snd_info_set_text_ops(entry, compr, 1117 snd_compress_proc_info_read); 1118 compr->proc_info_entry = entry; 1119 1120 return 0; 1121 } 1122 1123 static void snd_compress_proc_done(struct snd_compr *compr) 1124 { 1125 snd_info_free_entry(compr->proc_info_entry); 1126 compr->proc_info_entry = NULL; 1127 snd_info_free_entry(compr->proc_root); 1128 compr->proc_root = NULL; 1129 } 1130 1131 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id) 1132 { 1133 strscpy(compr->id, id, sizeof(compr->id)); 1134 } 1135 #else 1136 static inline int snd_compress_proc_init(struct snd_compr *compr) 1137 { 1138 return 0; 1139 } 1140 1141 static inline void snd_compress_proc_done(struct snd_compr *compr) 1142 { 1143 } 1144 1145 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id) 1146 { 1147 } 1148 #endif 1149 1150 static int snd_compress_dev_free(struct snd_device *device) 1151 { 1152 struct snd_compr *compr; 1153 1154 compr = device->device_data; 1155 snd_compress_proc_done(compr); 1156 put_device(&compr->dev); 1157 return 0; 1158 } 1159 1160 /* 1161 * snd_compress_new: create new compress device 1162 * @card: sound card pointer 1163 * @device: device number 1164 * @dirn: device direction, should be of type enum snd_compr_direction 1165 * @compr: compress device pointer 1166 */ 1167 int snd_compress_new(struct snd_card *card, int device, 1168 int dirn, const char *id, struct snd_compr *compr) 1169 { 1170 static const struct snd_device_ops ops = { 1171 .dev_free = snd_compress_dev_free, 1172 .dev_register = snd_compress_dev_register, 1173 .dev_disconnect = snd_compress_dev_disconnect, 1174 }; 1175 int ret; 1176 1177 compr->card = card; 1178 compr->device = device; 1179 compr->direction = dirn; 1180 mutex_init(&compr->lock); 1181 1182 snd_compress_set_id(compr, id); 1183 1184 snd_device_initialize(&compr->dev, card); 1185 dev_set_name(&compr->dev, "comprC%iD%i", card->number, device); 1186 1187 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops); 1188 if (ret == 0) 1189 snd_compress_proc_init(compr); 1190 1191 return ret; 1192 } 1193 EXPORT_SYMBOL_GPL(snd_compress_new); 1194 1195 MODULE_DESCRIPTION("ALSA Compressed offload framework"); 1196 MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>"); 1197 MODULE_LICENSE("GPL v2"); 1198