1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Handling of buffer allocation / resizing. 7 * 8 * Things to look at here. 9 * - Better memory allocation techniques? 10 * - Alternative access techniques? 11 */ 12 #include <linux/kernel.h> 13 #include <linux/export.h> 14 #include <linux/device.h> 15 #include <linux/fs.h> 16 #include <linux/cdev.h> 17 #include <linux/slab.h> 18 #include <linux/poll.h> 19 #include <linux/sched/signal.h> 20 21 #include <linux/iio/iio.h> 22 #include <linux/iio/iio-opaque.h> 23 #include "iio_core.h" 24 #include "iio_core_trigger.h" 25 #include <linux/iio/sysfs.h> 26 #include <linux/iio/buffer.h> 27 #include <linux/iio/buffer_impl.h> 28 29 static const char * const iio_endian_prefix[] = { 30 [IIO_BE] = "be", 31 [IIO_LE] = "le", 32 }; 33 34 static bool iio_buffer_is_active(struct iio_buffer *buf) 35 { 36 return !list_empty(&buf->buffer_list); 37 } 38 39 static size_t iio_buffer_data_available(struct iio_buffer *buf) 40 { 41 return buf->access->data_available(buf); 42 } 43 44 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, 45 struct iio_buffer *buf, size_t required) 46 { 47 if (!indio_dev->info->hwfifo_flush_to_buffer) 48 return -ENODEV; 49 50 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); 51 } 52 53 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 54 size_t to_wait, int to_flush) 55 { 56 size_t avail; 57 int flushed = 0; 58 59 /* wakeup if the device was unregistered */ 60 if (!indio_dev->info) 61 return true; 62 63 /* drain the buffer if it was disabled */ 64 if (!iio_buffer_is_active(buf)) { 65 to_wait = min_t(size_t, to_wait, 1); 66 to_flush = 0; 67 } 68 69 avail = iio_buffer_data_available(buf); 70 71 if (avail >= to_wait) { 72 /* force a flush for non-blocking reads */ 73 if (!to_wait && avail < to_flush) 74 iio_buffer_flush_hwfifo(indio_dev, buf, 75 to_flush - avail); 76 return true; 77 } 78 79 if (to_flush) 80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf, 81 to_wait - avail); 82 if (flushed <= 0) 83 return false; 84 85 if (avail + flushed >= to_wait) 86 return true; 87 88 return false; 89 } 90 91 /** 92 * iio_buffer_read_outer() - chrdev read for buffer access 93 * @filp: File structure pointer for the char device 94 * @buf: Destination buffer for iio buffer read 95 * @n: First n bytes to read 96 * @f_ps: Long offset provided by the user as a seek position 97 * 98 * This function relies on all buffer implementations having an 99 * iio_buffer as their first element. 100 * 101 * Return: negative values corresponding to error codes or ret != 0 102 * for ending the reading activity 103 **/ 104 ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf, 105 size_t n, loff_t *f_ps) 106 { 107 struct iio_dev *indio_dev = filp->private_data; 108 struct iio_buffer *rb = indio_dev->buffer; 109 DEFINE_WAIT_FUNC(wait, woken_wake_function); 110 size_t datum_size; 111 size_t to_wait; 112 int ret = 0; 113 114 if (!indio_dev->info) 115 return -ENODEV; 116 117 if (!rb || !rb->access->read) 118 return -EINVAL; 119 120 datum_size = rb->bytes_per_datum; 121 122 /* 123 * If datum_size is 0 there will never be anything to read from the 124 * buffer, so signal end of file now. 125 */ 126 if (!datum_size) 127 return 0; 128 129 if (filp->f_flags & O_NONBLOCK) 130 to_wait = 0; 131 else 132 to_wait = min_t(size_t, n / datum_size, rb->watermark); 133 134 add_wait_queue(&rb->pollq, &wait); 135 do { 136 if (!indio_dev->info) { 137 ret = -ENODEV; 138 break; 139 } 140 141 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { 142 if (signal_pending(current)) { 143 ret = -ERESTARTSYS; 144 break; 145 } 146 147 wait_woken(&wait, TASK_INTERRUPTIBLE, 148 MAX_SCHEDULE_TIMEOUT); 149 continue; 150 } 151 152 ret = rb->access->read(rb, n, buf); 153 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 154 ret = -EAGAIN; 155 } while (ret == 0); 156 remove_wait_queue(&rb->pollq, &wait); 157 158 return ret; 159 } 160 161 /** 162 * iio_buffer_poll() - poll the buffer to find out if it has data 163 * @filp: File structure pointer for device access 164 * @wait: Poll table structure pointer for which the driver adds 165 * a wait queue 166 * 167 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading 168 * or 0 for other cases 169 */ 170 __poll_t iio_buffer_poll(struct file *filp, 171 struct poll_table_struct *wait) 172 { 173 struct iio_dev *indio_dev = filp->private_data; 174 struct iio_buffer *rb = indio_dev->buffer; 175 176 if (!indio_dev->info || rb == NULL) 177 return 0; 178 179 poll_wait(filp, &rb->pollq, wait); 180 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) 181 return EPOLLIN | EPOLLRDNORM; 182 return 0; 183 } 184 185 /** 186 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 187 * @indio_dev: The IIO device 188 * 189 * Wakes up the event waitqueue used for poll(). Should usually 190 * be called when the device is unregistered. 191 */ 192 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 193 { 194 struct iio_buffer *buffer = indio_dev->buffer; 195 196 if (!buffer) 197 return; 198 199 wake_up(&buffer->pollq); 200 } 201 202 void iio_buffer_init(struct iio_buffer *buffer) 203 { 204 INIT_LIST_HEAD(&buffer->demux_list); 205 INIT_LIST_HEAD(&buffer->buffer_list); 206 init_waitqueue_head(&buffer->pollq); 207 kref_init(&buffer->ref); 208 if (!buffer->watermark) 209 buffer->watermark = 1; 210 } 211 EXPORT_SYMBOL(iio_buffer_init); 212 213 static ssize_t iio_show_scan_index(struct device *dev, 214 struct device_attribute *attr, 215 char *buf) 216 { 217 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 218 } 219 220 static ssize_t iio_show_fixed_type(struct device *dev, 221 struct device_attribute *attr, 222 char *buf) 223 { 224 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 225 u8 type = this_attr->c->scan_type.endianness; 226 227 if (type == IIO_CPU) { 228 #ifdef __LITTLE_ENDIAN 229 type = IIO_LE; 230 #else 231 type = IIO_BE; 232 #endif 233 } 234 if (this_attr->c->scan_type.repeat > 1) 235 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", 236 iio_endian_prefix[type], 237 this_attr->c->scan_type.sign, 238 this_attr->c->scan_type.realbits, 239 this_attr->c->scan_type.storagebits, 240 this_attr->c->scan_type.repeat, 241 this_attr->c->scan_type.shift); 242 else 243 return sprintf(buf, "%s:%c%d/%d>>%u\n", 244 iio_endian_prefix[type], 245 this_attr->c->scan_type.sign, 246 this_attr->c->scan_type.realbits, 247 this_attr->c->scan_type.storagebits, 248 this_attr->c->scan_type.shift); 249 } 250 251 static ssize_t iio_scan_el_show(struct device *dev, 252 struct device_attribute *attr, 253 char *buf) 254 { 255 int ret; 256 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 257 struct iio_buffer *buffer = indio_dev->buffer; 258 259 /* Ensure ret is 0 or 1. */ 260 ret = !!test_bit(to_iio_dev_attr(attr)->address, 261 buffer->scan_mask); 262 263 return sprintf(buf, "%d\n", ret); 264 } 265 266 /* Note NULL used as error indicator as it doesn't make sense. */ 267 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 268 unsigned int masklength, 269 const unsigned long *mask, 270 bool strict) 271 { 272 if (bitmap_empty(mask, masklength)) 273 return NULL; 274 while (*av_masks) { 275 if (strict) { 276 if (bitmap_equal(mask, av_masks, masklength)) 277 return av_masks; 278 } else { 279 if (bitmap_subset(mask, av_masks, masklength)) 280 return av_masks; 281 } 282 av_masks += BITS_TO_LONGS(masklength); 283 } 284 return NULL; 285 } 286 287 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 288 const unsigned long *mask) 289 { 290 if (!indio_dev->setup_ops->validate_scan_mask) 291 return true; 292 293 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 294 } 295 296 /** 297 * iio_scan_mask_set() - set particular bit in the scan mask 298 * @indio_dev: the iio device 299 * @buffer: the buffer whose scan mask we are interested in 300 * @bit: the bit to be set. 301 * 302 * Note that at this point we have no way of knowing what other 303 * buffers might request, hence this code only verifies that the 304 * individual buffers request is plausible. 305 */ 306 static int iio_scan_mask_set(struct iio_dev *indio_dev, 307 struct iio_buffer *buffer, int bit) 308 { 309 const unsigned long *mask; 310 unsigned long *trialmask; 311 312 trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); 313 if (trialmask == NULL) 314 return -ENOMEM; 315 if (!indio_dev->masklength) { 316 WARN(1, "Trying to set scanmask prior to registering buffer\n"); 317 goto err_invalid_mask; 318 } 319 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 320 set_bit(bit, trialmask); 321 322 if (!iio_validate_scan_mask(indio_dev, trialmask)) 323 goto err_invalid_mask; 324 325 if (indio_dev->available_scan_masks) { 326 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 327 indio_dev->masklength, 328 trialmask, false); 329 if (!mask) 330 goto err_invalid_mask; 331 } 332 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 333 334 bitmap_free(trialmask); 335 336 return 0; 337 338 err_invalid_mask: 339 bitmap_free(trialmask); 340 return -EINVAL; 341 } 342 343 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 344 { 345 clear_bit(bit, buffer->scan_mask); 346 return 0; 347 } 348 349 static int iio_scan_mask_query(struct iio_dev *indio_dev, 350 struct iio_buffer *buffer, int bit) 351 { 352 if (bit > indio_dev->masklength) 353 return -EINVAL; 354 355 if (!buffer->scan_mask) 356 return 0; 357 358 /* Ensure return value is 0 or 1. */ 359 return !!test_bit(bit, buffer->scan_mask); 360 }; 361 362 static ssize_t iio_scan_el_store(struct device *dev, 363 struct device_attribute *attr, 364 const char *buf, 365 size_t len) 366 { 367 int ret; 368 bool state; 369 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 370 struct iio_buffer *buffer = indio_dev->buffer; 371 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 372 373 ret = strtobool(buf, &state); 374 if (ret < 0) 375 return ret; 376 mutex_lock(&indio_dev->mlock); 377 if (iio_buffer_is_active(buffer)) { 378 ret = -EBUSY; 379 goto error_ret; 380 } 381 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 382 if (ret < 0) 383 goto error_ret; 384 if (!state && ret) { 385 ret = iio_scan_mask_clear(buffer, this_attr->address); 386 if (ret) 387 goto error_ret; 388 } else if (state && !ret) { 389 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 390 if (ret) 391 goto error_ret; 392 } 393 394 error_ret: 395 mutex_unlock(&indio_dev->mlock); 396 397 return ret < 0 ? ret : len; 398 399 } 400 401 static ssize_t iio_scan_el_ts_show(struct device *dev, 402 struct device_attribute *attr, 403 char *buf) 404 { 405 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 406 struct iio_buffer *buffer = indio_dev->buffer; 407 408 return sprintf(buf, "%d\n", buffer->scan_timestamp); 409 } 410 411 static ssize_t iio_scan_el_ts_store(struct device *dev, 412 struct device_attribute *attr, 413 const char *buf, 414 size_t len) 415 { 416 int ret; 417 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 418 struct iio_buffer *buffer = indio_dev->buffer; 419 bool state; 420 421 ret = strtobool(buf, &state); 422 if (ret < 0) 423 return ret; 424 425 mutex_lock(&indio_dev->mlock); 426 if (iio_buffer_is_active(buffer)) { 427 ret = -EBUSY; 428 goto error_ret; 429 } 430 buffer->scan_timestamp = state; 431 error_ret: 432 mutex_unlock(&indio_dev->mlock); 433 434 return ret ? ret : len; 435 } 436 437 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 438 struct iio_buffer *buffer, 439 const struct iio_chan_spec *chan) 440 { 441 int ret, attrcount = 0; 442 443 ret = __iio_add_chan_devattr("index", 444 chan, 445 &iio_show_scan_index, 446 NULL, 447 0, 448 IIO_SEPARATE, 449 &indio_dev->dev, 450 &buffer->scan_el_dev_attr_list); 451 if (ret) 452 return ret; 453 attrcount++; 454 ret = __iio_add_chan_devattr("type", 455 chan, 456 &iio_show_fixed_type, 457 NULL, 458 0, 459 0, 460 &indio_dev->dev, 461 &buffer->scan_el_dev_attr_list); 462 if (ret) 463 return ret; 464 attrcount++; 465 if (chan->type != IIO_TIMESTAMP) 466 ret = __iio_add_chan_devattr("en", 467 chan, 468 &iio_scan_el_show, 469 &iio_scan_el_store, 470 chan->scan_index, 471 0, 472 &indio_dev->dev, 473 &buffer->scan_el_dev_attr_list); 474 else 475 ret = __iio_add_chan_devattr("en", 476 chan, 477 &iio_scan_el_ts_show, 478 &iio_scan_el_ts_store, 479 chan->scan_index, 480 0, 481 &indio_dev->dev, 482 &buffer->scan_el_dev_attr_list); 483 if (ret) 484 return ret; 485 attrcount++; 486 ret = attrcount; 487 return ret; 488 } 489 490 static ssize_t iio_buffer_read_length(struct device *dev, 491 struct device_attribute *attr, 492 char *buf) 493 { 494 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 495 struct iio_buffer *buffer = indio_dev->buffer; 496 497 return sprintf(buf, "%d\n", buffer->length); 498 } 499 500 static ssize_t iio_buffer_write_length(struct device *dev, 501 struct device_attribute *attr, 502 const char *buf, size_t len) 503 { 504 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 505 struct iio_buffer *buffer = indio_dev->buffer; 506 unsigned int val; 507 int ret; 508 509 ret = kstrtouint(buf, 10, &val); 510 if (ret) 511 return ret; 512 513 if (val == buffer->length) 514 return len; 515 516 mutex_lock(&indio_dev->mlock); 517 if (iio_buffer_is_active(buffer)) { 518 ret = -EBUSY; 519 } else { 520 buffer->access->set_length(buffer, val); 521 ret = 0; 522 } 523 if (ret) 524 goto out; 525 if (buffer->length && buffer->length < buffer->watermark) 526 buffer->watermark = buffer->length; 527 out: 528 mutex_unlock(&indio_dev->mlock); 529 530 return ret ? ret : len; 531 } 532 533 static ssize_t iio_buffer_show_enable(struct device *dev, 534 struct device_attribute *attr, 535 char *buf) 536 { 537 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 538 struct iio_buffer *buffer = indio_dev->buffer; 539 540 return sprintf(buf, "%d\n", iio_buffer_is_active(buffer)); 541 } 542 543 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev, 544 unsigned int scan_index) 545 { 546 const struct iio_chan_spec *ch; 547 unsigned int bytes; 548 549 ch = iio_find_channel_from_si(indio_dev, scan_index); 550 bytes = ch->scan_type.storagebits / 8; 551 if (ch->scan_type.repeat > 1) 552 bytes *= ch->scan_type.repeat; 553 return bytes; 554 } 555 556 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) 557 { 558 return iio_storage_bytes_for_si(indio_dev, 559 indio_dev->scan_index_timestamp); 560 } 561 562 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 563 const unsigned long *mask, bool timestamp) 564 { 565 unsigned bytes = 0; 566 int length, i, largest = 0; 567 568 /* How much space will the demuxed element take? */ 569 for_each_set_bit(i, mask, 570 indio_dev->masklength) { 571 length = iio_storage_bytes_for_si(indio_dev, i); 572 bytes = ALIGN(bytes, length); 573 bytes += length; 574 largest = max(largest, length); 575 } 576 577 if (timestamp) { 578 length = iio_storage_bytes_for_timestamp(indio_dev); 579 bytes = ALIGN(bytes, length); 580 bytes += length; 581 largest = max(largest, length); 582 } 583 584 bytes = ALIGN(bytes, largest); 585 return bytes; 586 } 587 588 static void iio_buffer_activate(struct iio_dev *indio_dev, 589 struct iio_buffer *buffer) 590 { 591 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 592 593 iio_buffer_get(buffer); 594 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); 595 } 596 597 static void iio_buffer_deactivate(struct iio_buffer *buffer) 598 { 599 list_del_init(&buffer->buffer_list); 600 wake_up_interruptible(&buffer->pollq); 601 iio_buffer_put(buffer); 602 } 603 604 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) 605 { 606 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 607 struct iio_buffer *buffer, *_buffer; 608 609 list_for_each_entry_safe(buffer, _buffer, 610 &iio_dev_opaque->buffer_list, buffer_list) 611 iio_buffer_deactivate(buffer); 612 } 613 614 static int iio_buffer_enable(struct iio_buffer *buffer, 615 struct iio_dev *indio_dev) 616 { 617 if (!buffer->access->enable) 618 return 0; 619 return buffer->access->enable(buffer, indio_dev); 620 } 621 622 static int iio_buffer_disable(struct iio_buffer *buffer, 623 struct iio_dev *indio_dev) 624 { 625 if (!buffer->access->disable) 626 return 0; 627 return buffer->access->disable(buffer, indio_dev); 628 } 629 630 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 631 struct iio_buffer *buffer) 632 { 633 unsigned int bytes; 634 635 if (!buffer->access->set_bytes_per_datum) 636 return; 637 638 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 639 buffer->scan_timestamp); 640 641 buffer->access->set_bytes_per_datum(buffer, bytes); 642 } 643 644 static int iio_buffer_request_update(struct iio_dev *indio_dev, 645 struct iio_buffer *buffer) 646 { 647 int ret; 648 649 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 650 if (buffer->access->request_update) { 651 ret = buffer->access->request_update(buffer); 652 if (ret) { 653 dev_dbg(&indio_dev->dev, 654 "Buffer not started: buffer parameter update failed (%d)\n", 655 ret); 656 return ret; 657 } 658 } 659 660 return 0; 661 } 662 663 static void iio_free_scan_mask(struct iio_dev *indio_dev, 664 const unsigned long *mask) 665 { 666 /* If the mask is dynamically allocated free it, otherwise do nothing */ 667 if (!indio_dev->available_scan_masks) 668 bitmap_free(mask); 669 } 670 671 struct iio_device_config { 672 unsigned int mode; 673 unsigned int watermark; 674 const unsigned long *scan_mask; 675 unsigned int scan_bytes; 676 bool scan_timestamp; 677 }; 678 679 static int iio_verify_update(struct iio_dev *indio_dev, 680 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer, 681 struct iio_device_config *config) 682 { 683 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 684 unsigned long *compound_mask; 685 const unsigned long *scan_mask; 686 bool strict_scanmask = false; 687 struct iio_buffer *buffer; 688 bool scan_timestamp; 689 unsigned int modes; 690 691 if (insert_buffer && 692 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) { 693 dev_dbg(&indio_dev->dev, 694 "At least one scan element must be enabled first\n"); 695 return -EINVAL; 696 } 697 698 memset(config, 0, sizeof(*config)); 699 config->watermark = ~0; 700 701 /* 702 * If there is just one buffer and we are removing it there is nothing 703 * to verify. 704 */ 705 if (remove_buffer && !insert_buffer && 706 list_is_singular(&iio_dev_opaque->buffer_list)) 707 return 0; 708 709 modes = indio_dev->modes; 710 711 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 712 if (buffer == remove_buffer) 713 continue; 714 modes &= buffer->access->modes; 715 config->watermark = min(config->watermark, buffer->watermark); 716 } 717 718 if (insert_buffer) { 719 modes &= insert_buffer->access->modes; 720 config->watermark = min(config->watermark, 721 insert_buffer->watermark); 722 } 723 724 /* Definitely possible for devices to support both of these. */ 725 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { 726 config->mode = INDIO_BUFFER_TRIGGERED; 727 } else if (modes & INDIO_BUFFER_HARDWARE) { 728 /* 729 * Keep things simple for now and only allow a single buffer to 730 * be connected in hardware mode. 731 */ 732 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) 733 return -EINVAL; 734 config->mode = INDIO_BUFFER_HARDWARE; 735 strict_scanmask = true; 736 } else if (modes & INDIO_BUFFER_SOFTWARE) { 737 config->mode = INDIO_BUFFER_SOFTWARE; 738 } else { 739 /* Can only occur on first buffer */ 740 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 741 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); 742 return -EINVAL; 743 } 744 745 /* What scan mask do we actually have? */ 746 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); 747 if (compound_mask == NULL) 748 return -ENOMEM; 749 750 scan_timestamp = false; 751 752 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 753 if (buffer == remove_buffer) 754 continue; 755 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 756 indio_dev->masklength); 757 scan_timestamp |= buffer->scan_timestamp; 758 } 759 760 if (insert_buffer) { 761 bitmap_or(compound_mask, compound_mask, 762 insert_buffer->scan_mask, indio_dev->masklength); 763 scan_timestamp |= insert_buffer->scan_timestamp; 764 } 765 766 if (indio_dev->available_scan_masks) { 767 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, 768 indio_dev->masklength, 769 compound_mask, 770 strict_scanmask); 771 bitmap_free(compound_mask); 772 if (scan_mask == NULL) 773 return -EINVAL; 774 } else { 775 scan_mask = compound_mask; 776 } 777 778 config->scan_bytes = iio_compute_scan_bytes(indio_dev, 779 scan_mask, scan_timestamp); 780 config->scan_mask = scan_mask; 781 config->scan_timestamp = scan_timestamp; 782 783 return 0; 784 } 785 786 /** 787 * struct iio_demux_table - table describing demux memcpy ops 788 * @from: index to copy from 789 * @to: index to copy to 790 * @length: how many bytes to copy 791 * @l: list head used for management 792 */ 793 struct iio_demux_table { 794 unsigned from; 795 unsigned to; 796 unsigned length; 797 struct list_head l; 798 }; 799 800 static void iio_buffer_demux_free(struct iio_buffer *buffer) 801 { 802 struct iio_demux_table *p, *q; 803 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 804 list_del(&p->l); 805 kfree(p); 806 } 807 } 808 809 static int iio_buffer_add_demux(struct iio_buffer *buffer, 810 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc, 811 unsigned int length) 812 { 813 814 if (*p && (*p)->from + (*p)->length == in_loc && 815 (*p)->to + (*p)->length == out_loc) { 816 (*p)->length += length; 817 } else { 818 *p = kmalloc(sizeof(**p), GFP_KERNEL); 819 if (*p == NULL) 820 return -ENOMEM; 821 (*p)->from = in_loc; 822 (*p)->to = out_loc; 823 (*p)->length = length; 824 list_add_tail(&(*p)->l, &buffer->demux_list); 825 } 826 827 return 0; 828 } 829 830 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 831 struct iio_buffer *buffer) 832 { 833 int ret, in_ind = -1, out_ind, length; 834 unsigned in_loc = 0, out_loc = 0; 835 struct iio_demux_table *p = NULL; 836 837 /* Clear out any old demux */ 838 iio_buffer_demux_free(buffer); 839 kfree(buffer->demux_bounce); 840 buffer->demux_bounce = NULL; 841 842 /* First work out which scan mode we will actually have */ 843 if (bitmap_equal(indio_dev->active_scan_mask, 844 buffer->scan_mask, 845 indio_dev->masklength)) 846 return 0; 847 848 /* Now we have the two masks, work from least sig and build up sizes */ 849 for_each_set_bit(out_ind, 850 buffer->scan_mask, 851 indio_dev->masklength) { 852 in_ind = find_next_bit(indio_dev->active_scan_mask, 853 indio_dev->masklength, 854 in_ind + 1); 855 while (in_ind != out_ind) { 856 length = iio_storage_bytes_for_si(indio_dev, in_ind); 857 /* Make sure we are aligned */ 858 in_loc = roundup(in_loc, length) + length; 859 in_ind = find_next_bit(indio_dev->active_scan_mask, 860 indio_dev->masklength, 861 in_ind + 1); 862 } 863 length = iio_storage_bytes_for_si(indio_dev, in_ind); 864 out_loc = roundup(out_loc, length); 865 in_loc = roundup(in_loc, length); 866 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 867 if (ret) 868 goto error_clear_mux_table; 869 out_loc += length; 870 in_loc += length; 871 } 872 /* Relies on scan_timestamp being last */ 873 if (buffer->scan_timestamp) { 874 length = iio_storage_bytes_for_timestamp(indio_dev); 875 out_loc = roundup(out_loc, length); 876 in_loc = roundup(in_loc, length); 877 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 878 if (ret) 879 goto error_clear_mux_table; 880 out_loc += length; 881 in_loc += length; 882 } 883 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 884 if (buffer->demux_bounce == NULL) { 885 ret = -ENOMEM; 886 goto error_clear_mux_table; 887 } 888 return 0; 889 890 error_clear_mux_table: 891 iio_buffer_demux_free(buffer); 892 893 return ret; 894 } 895 896 static int iio_update_demux(struct iio_dev *indio_dev) 897 { 898 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 899 struct iio_buffer *buffer; 900 int ret; 901 902 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 903 ret = iio_buffer_update_demux(indio_dev, buffer); 904 if (ret < 0) 905 goto error_clear_mux_table; 906 } 907 return 0; 908 909 error_clear_mux_table: 910 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) 911 iio_buffer_demux_free(buffer); 912 913 return ret; 914 } 915 916 static int iio_enable_buffers(struct iio_dev *indio_dev, 917 struct iio_device_config *config) 918 { 919 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 920 struct iio_buffer *buffer; 921 int ret; 922 923 indio_dev->active_scan_mask = config->scan_mask; 924 indio_dev->scan_timestamp = config->scan_timestamp; 925 indio_dev->scan_bytes = config->scan_bytes; 926 indio_dev->currentmode = config->mode; 927 928 iio_update_demux(indio_dev); 929 930 /* Wind up again */ 931 if (indio_dev->setup_ops->preenable) { 932 ret = indio_dev->setup_ops->preenable(indio_dev); 933 if (ret) { 934 dev_dbg(&indio_dev->dev, 935 "Buffer not started: buffer preenable failed (%d)\n", ret); 936 goto err_undo_config; 937 } 938 } 939 940 if (indio_dev->info->update_scan_mode) { 941 ret = indio_dev->info 942 ->update_scan_mode(indio_dev, 943 indio_dev->active_scan_mask); 944 if (ret < 0) { 945 dev_dbg(&indio_dev->dev, 946 "Buffer not started: update scan mode failed (%d)\n", 947 ret); 948 goto err_run_postdisable; 949 } 950 } 951 952 if (indio_dev->info->hwfifo_set_watermark) 953 indio_dev->info->hwfifo_set_watermark(indio_dev, 954 config->watermark); 955 956 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 957 ret = iio_buffer_enable(buffer, indio_dev); 958 if (ret) 959 goto err_disable_buffers; 960 } 961 962 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 963 ret = iio_trigger_attach_poll_func(indio_dev->trig, 964 indio_dev->pollfunc); 965 if (ret) 966 goto err_disable_buffers; 967 } 968 969 if (indio_dev->setup_ops->postenable) { 970 ret = indio_dev->setup_ops->postenable(indio_dev); 971 if (ret) { 972 dev_dbg(&indio_dev->dev, 973 "Buffer not started: postenable failed (%d)\n", ret); 974 goto err_detach_pollfunc; 975 } 976 } 977 978 return 0; 979 980 err_detach_pollfunc: 981 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 982 iio_trigger_detach_poll_func(indio_dev->trig, 983 indio_dev->pollfunc); 984 } 985 err_disable_buffers: 986 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, 987 buffer_list) 988 iio_buffer_disable(buffer, indio_dev); 989 err_run_postdisable: 990 if (indio_dev->setup_ops->postdisable) 991 indio_dev->setup_ops->postdisable(indio_dev); 992 err_undo_config: 993 indio_dev->currentmode = INDIO_DIRECT_MODE; 994 indio_dev->active_scan_mask = NULL; 995 996 return ret; 997 } 998 999 static int iio_disable_buffers(struct iio_dev *indio_dev) 1000 { 1001 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1002 struct iio_buffer *buffer; 1003 int ret = 0; 1004 int ret2; 1005 1006 /* Wind down existing buffers - iff there are any */ 1007 if (list_empty(&iio_dev_opaque->buffer_list)) 1008 return 0; 1009 1010 /* 1011 * If things go wrong at some step in disable we still need to continue 1012 * to perform the other steps, otherwise we leave the device in a 1013 * inconsistent state. We return the error code for the first error we 1014 * encountered. 1015 */ 1016 1017 if (indio_dev->setup_ops->predisable) { 1018 ret2 = indio_dev->setup_ops->predisable(indio_dev); 1019 if (ret2 && !ret) 1020 ret = ret2; 1021 } 1022 1023 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 1024 iio_trigger_detach_poll_func(indio_dev->trig, 1025 indio_dev->pollfunc); 1026 } 1027 1028 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1029 ret2 = iio_buffer_disable(buffer, indio_dev); 1030 if (ret2 && !ret) 1031 ret = ret2; 1032 } 1033 1034 if (indio_dev->setup_ops->postdisable) { 1035 ret2 = indio_dev->setup_ops->postdisable(indio_dev); 1036 if (ret2 && !ret) 1037 ret = ret2; 1038 } 1039 1040 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); 1041 indio_dev->active_scan_mask = NULL; 1042 indio_dev->currentmode = INDIO_DIRECT_MODE; 1043 1044 return ret; 1045 } 1046 1047 static int __iio_update_buffers(struct iio_dev *indio_dev, 1048 struct iio_buffer *insert_buffer, 1049 struct iio_buffer *remove_buffer) 1050 { 1051 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1052 struct iio_device_config new_config; 1053 int ret; 1054 1055 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer, 1056 &new_config); 1057 if (ret) 1058 return ret; 1059 1060 if (insert_buffer) { 1061 ret = iio_buffer_request_update(indio_dev, insert_buffer); 1062 if (ret) 1063 goto err_free_config; 1064 } 1065 1066 ret = iio_disable_buffers(indio_dev); 1067 if (ret) 1068 goto err_deactivate_all; 1069 1070 if (remove_buffer) 1071 iio_buffer_deactivate(remove_buffer); 1072 if (insert_buffer) 1073 iio_buffer_activate(indio_dev, insert_buffer); 1074 1075 /* If no buffers in list, we are done */ 1076 if (list_empty(&iio_dev_opaque->buffer_list)) 1077 return 0; 1078 1079 ret = iio_enable_buffers(indio_dev, &new_config); 1080 if (ret) 1081 goto err_deactivate_all; 1082 1083 return 0; 1084 1085 err_deactivate_all: 1086 /* 1087 * We've already verified that the config is valid earlier. If things go 1088 * wrong in either enable or disable the most likely reason is an IO 1089 * error from the device. In this case there is no good recovery 1090 * strategy. Just make sure to disable everything and leave the device 1091 * in a sane state. With a bit of luck the device might come back to 1092 * life again later and userspace can try again. 1093 */ 1094 iio_buffer_deactivate_all(indio_dev); 1095 1096 err_free_config: 1097 iio_free_scan_mask(indio_dev, new_config.scan_mask); 1098 return ret; 1099 } 1100 1101 int iio_update_buffers(struct iio_dev *indio_dev, 1102 struct iio_buffer *insert_buffer, 1103 struct iio_buffer *remove_buffer) 1104 { 1105 int ret; 1106 1107 if (insert_buffer == remove_buffer) 1108 return 0; 1109 1110 mutex_lock(&indio_dev->info_exist_lock); 1111 mutex_lock(&indio_dev->mlock); 1112 1113 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 1114 insert_buffer = NULL; 1115 1116 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 1117 remove_buffer = NULL; 1118 1119 if (!insert_buffer && !remove_buffer) { 1120 ret = 0; 1121 goto out_unlock; 1122 } 1123 1124 if (indio_dev->info == NULL) { 1125 ret = -ENODEV; 1126 goto out_unlock; 1127 } 1128 1129 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 1130 1131 out_unlock: 1132 mutex_unlock(&indio_dev->mlock); 1133 mutex_unlock(&indio_dev->info_exist_lock); 1134 1135 return ret; 1136 } 1137 EXPORT_SYMBOL_GPL(iio_update_buffers); 1138 1139 void iio_disable_all_buffers(struct iio_dev *indio_dev) 1140 { 1141 iio_disable_buffers(indio_dev); 1142 iio_buffer_deactivate_all(indio_dev); 1143 } 1144 1145 static ssize_t iio_buffer_store_enable(struct device *dev, 1146 struct device_attribute *attr, 1147 const char *buf, 1148 size_t len) 1149 { 1150 int ret; 1151 bool requested_state; 1152 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1153 struct iio_buffer *buffer = indio_dev->buffer; 1154 bool inlist; 1155 1156 ret = strtobool(buf, &requested_state); 1157 if (ret < 0) 1158 return ret; 1159 1160 mutex_lock(&indio_dev->mlock); 1161 1162 /* Find out if it is in the list */ 1163 inlist = iio_buffer_is_active(buffer); 1164 /* Already in desired state */ 1165 if (inlist == requested_state) 1166 goto done; 1167 1168 if (requested_state) 1169 ret = __iio_update_buffers(indio_dev, buffer, NULL); 1170 else 1171 ret = __iio_update_buffers(indio_dev, NULL, buffer); 1172 1173 done: 1174 mutex_unlock(&indio_dev->mlock); 1175 return (ret < 0) ? ret : len; 1176 } 1177 1178 static const char * const iio_scan_elements_group_name = "scan_elements"; 1179 1180 static ssize_t iio_buffer_show_watermark(struct device *dev, 1181 struct device_attribute *attr, 1182 char *buf) 1183 { 1184 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1185 struct iio_buffer *buffer = indio_dev->buffer; 1186 1187 return sprintf(buf, "%u\n", buffer->watermark); 1188 } 1189 1190 static ssize_t iio_buffer_store_watermark(struct device *dev, 1191 struct device_attribute *attr, 1192 const char *buf, 1193 size_t len) 1194 { 1195 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1196 struct iio_buffer *buffer = indio_dev->buffer; 1197 unsigned int val; 1198 int ret; 1199 1200 ret = kstrtouint(buf, 10, &val); 1201 if (ret) 1202 return ret; 1203 if (!val) 1204 return -EINVAL; 1205 1206 mutex_lock(&indio_dev->mlock); 1207 1208 if (val > buffer->length) { 1209 ret = -EINVAL; 1210 goto out; 1211 } 1212 1213 if (iio_buffer_is_active(buffer)) { 1214 ret = -EBUSY; 1215 goto out; 1216 } 1217 1218 buffer->watermark = val; 1219 out: 1220 mutex_unlock(&indio_dev->mlock); 1221 1222 return ret ? ret : len; 1223 } 1224 1225 static ssize_t iio_dma_show_data_available(struct device *dev, 1226 struct device_attribute *attr, 1227 char *buf) 1228 { 1229 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1230 struct iio_buffer *buffer = indio_dev->buffer; 1231 1232 return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer)); 1233 } 1234 1235 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, 1236 iio_buffer_write_length); 1237 static struct device_attribute dev_attr_length_ro = __ATTR(length, 1238 S_IRUGO, iio_buffer_read_length, NULL); 1239 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, 1240 iio_buffer_show_enable, iio_buffer_store_enable); 1241 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, 1242 iio_buffer_show_watermark, iio_buffer_store_watermark); 1243 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark, 1244 S_IRUGO, iio_buffer_show_watermark, NULL); 1245 static DEVICE_ATTR(data_available, S_IRUGO, 1246 iio_dma_show_data_available, NULL); 1247 1248 static struct attribute *iio_buffer_attrs[] = { 1249 &dev_attr_length.attr, 1250 &dev_attr_enable.attr, 1251 &dev_attr_watermark.attr, 1252 &dev_attr_data_available.attr, 1253 }; 1254 1255 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, 1256 struct iio_dev *indio_dev) 1257 { 1258 struct iio_dev_attr *p; 1259 struct attribute **attr; 1260 int ret, i, attrn, attrcount; 1261 const struct iio_chan_spec *channels; 1262 1263 attrcount = 0; 1264 if (buffer->attrs) { 1265 while (buffer->attrs[attrcount] != NULL) 1266 attrcount++; 1267 } 1268 1269 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1, 1270 sizeof(struct attribute *), GFP_KERNEL); 1271 if (!attr) 1272 return -ENOMEM; 1273 1274 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); 1275 if (!buffer->access->set_length) 1276 attr[0] = &dev_attr_length_ro.attr; 1277 1278 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) 1279 attr[2] = &dev_attr_watermark_ro.attr; 1280 1281 if (buffer->attrs) 1282 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, 1283 sizeof(struct attribute *) * attrcount); 1284 1285 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL; 1286 1287 buffer->buffer_group.name = "buffer"; 1288 buffer->buffer_group.attrs = attr; 1289 1290 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; 1291 1292 attrcount = 0; 1293 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 1294 channels = indio_dev->channels; 1295 if (channels) { 1296 /* new magic */ 1297 for (i = 0; i < indio_dev->num_channels; i++) { 1298 if (channels[i].scan_index < 0) 1299 continue; 1300 1301 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, 1302 &channels[i]); 1303 if (ret < 0) 1304 goto error_cleanup_dynamic; 1305 attrcount += ret; 1306 if (channels[i].type == IIO_TIMESTAMP) 1307 indio_dev->scan_index_timestamp = 1308 channels[i].scan_index; 1309 } 1310 if (indio_dev->masklength && buffer->scan_mask == NULL) { 1311 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, 1312 GFP_KERNEL); 1313 if (buffer->scan_mask == NULL) { 1314 ret = -ENOMEM; 1315 goto error_cleanup_dynamic; 1316 } 1317 } 1318 } 1319 1320 buffer->scan_el_group.name = iio_scan_elements_group_name; 1321 1322 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 1323 sizeof(buffer->scan_el_group.attrs[0]), 1324 GFP_KERNEL); 1325 if (buffer->scan_el_group.attrs == NULL) { 1326 ret = -ENOMEM; 1327 goto error_free_scan_mask; 1328 } 1329 attrn = 0; 1330 1331 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 1332 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 1333 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 1334 1335 return 0; 1336 1337 error_free_scan_mask: 1338 bitmap_free(buffer->scan_mask); 1339 error_cleanup_dynamic: 1340 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); 1341 kfree(buffer->buffer_group.attrs); 1342 1343 return ret; 1344 } 1345 1346 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) 1347 { 1348 struct iio_buffer *buffer = indio_dev->buffer; 1349 const struct iio_chan_spec *channels; 1350 int i; 1351 1352 channels = indio_dev->channels; 1353 if (channels) { 1354 int ml = indio_dev->masklength; 1355 1356 for (i = 0; i < indio_dev->num_channels; i++) 1357 ml = max(ml, channels[i].scan_index + 1); 1358 indio_dev->masklength = ml; 1359 } 1360 1361 if (!buffer) 1362 return 0; 1363 1364 return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev); 1365 } 1366 1367 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer) 1368 { 1369 bitmap_free(buffer->scan_mask); 1370 kfree(buffer->buffer_group.attrs); 1371 kfree(buffer->scan_el_group.attrs); 1372 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); 1373 } 1374 1375 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) 1376 { 1377 struct iio_buffer *buffer = indio_dev->buffer; 1378 1379 if (!buffer) 1380 return; 1381 1382 __iio_buffer_free_sysfs_and_mask(buffer); 1383 } 1384 1385 /** 1386 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 1387 * @indio_dev: the iio device 1388 * @mask: scan mask to be checked 1389 * 1390 * Return true if exactly one bit is set in the scan mask, false otherwise. It 1391 * can be used for devices where only one channel can be active for sampling at 1392 * a time. 1393 */ 1394 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 1395 const unsigned long *mask) 1396 { 1397 return bitmap_weight(mask, indio_dev->masklength) == 1; 1398 } 1399 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 1400 1401 static const void *iio_demux(struct iio_buffer *buffer, 1402 const void *datain) 1403 { 1404 struct iio_demux_table *t; 1405 1406 if (list_empty(&buffer->demux_list)) 1407 return datain; 1408 list_for_each_entry(t, &buffer->demux_list, l) 1409 memcpy(buffer->demux_bounce + t->to, 1410 datain + t->from, t->length); 1411 1412 return buffer->demux_bounce; 1413 } 1414 1415 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 1416 { 1417 const void *dataout = iio_demux(buffer, data); 1418 int ret; 1419 1420 ret = buffer->access->store_to(buffer, dataout); 1421 if (ret) 1422 return ret; 1423 1424 /* 1425 * We can't just test for watermark to decide if we wake the poll queue 1426 * because read may request less samples than the watermark. 1427 */ 1428 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); 1429 return 0; 1430 } 1431 1432 /** 1433 * iio_push_to_buffers() - push to a registered buffer. 1434 * @indio_dev: iio_dev structure for device. 1435 * @data: Full scan. 1436 */ 1437 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 1438 { 1439 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1440 int ret; 1441 struct iio_buffer *buf; 1442 1443 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { 1444 ret = iio_push_to_buffer(buf, data); 1445 if (ret < 0) 1446 return ret; 1447 } 1448 1449 return 0; 1450 } 1451 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 1452 1453 /** 1454 * iio_buffer_release() - Free a buffer's resources 1455 * @ref: Pointer to the kref embedded in the iio_buffer struct 1456 * 1457 * This function is called when the last reference to the buffer has been 1458 * dropped. It will typically free all resources allocated by the buffer. Do not 1459 * call this function manually, always use iio_buffer_put() when done using a 1460 * buffer. 1461 */ 1462 static void iio_buffer_release(struct kref *ref) 1463 { 1464 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 1465 1466 buffer->access->release(buffer); 1467 } 1468 1469 /** 1470 * iio_buffer_get() - Grab a reference to the buffer 1471 * @buffer: The buffer to grab a reference for, may be NULL 1472 * 1473 * Returns the pointer to the buffer that was passed into the function. 1474 */ 1475 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 1476 { 1477 if (buffer) 1478 kref_get(&buffer->ref); 1479 1480 return buffer; 1481 } 1482 EXPORT_SYMBOL_GPL(iio_buffer_get); 1483 1484 /** 1485 * iio_buffer_put() - Release the reference to the buffer 1486 * @buffer: The buffer to release the reference for, may be NULL 1487 */ 1488 void iio_buffer_put(struct iio_buffer *buffer) 1489 { 1490 if (buffer) 1491 kref_put(&buffer->ref, iio_buffer_release); 1492 } 1493 EXPORT_SYMBOL_GPL(iio_buffer_put); 1494 1495 /** 1496 * iio_device_attach_buffer - Attach a buffer to a IIO device 1497 * @indio_dev: The device the buffer should be attached to 1498 * @buffer: The buffer to attach to the device 1499 * 1500 * This function attaches a buffer to a IIO device. The buffer stays attached to 1501 * the device until the device is freed. The function should only be called at 1502 * most once per device. 1503 */ 1504 void iio_device_attach_buffer(struct iio_dev *indio_dev, 1505 struct iio_buffer *buffer) 1506 { 1507 indio_dev->buffer = iio_buffer_get(buffer); 1508 } 1509 EXPORT_SYMBOL_GPL(iio_device_attach_buffer); 1510