1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 #include <linux/sched.h> 24 25 #include <linux/iio/iio.h> 26 #include "iio_core.h" 27 #include <linux/iio/sysfs.h> 28 #include <linux/iio/buffer.h> 29 30 static const char * const iio_endian_prefix[] = { 31 [IIO_BE] = "be", 32 [IIO_LE] = "le", 33 }; 34 35 static bool iio_buffer_is_active(struct iio_buffer *buf) 36 { 37 return !list_empty(&buf->buffer_list); 38 } 39 40 /** 41 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 42 * 43 * This function relies on all buffer implementations having an 44 * iio_buffer as their first element. 45 **/ 46 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 47 size_t n, loff_t *f_ps) 48 { 49 struct iio_dev *indio_dev = filp->private_data; 50 struct iio_buffer *rb = indio_dev->buffer; 51 52 if (!indio_dev->info) 53 return -ENODEV; 54 55 if (!rb || !rb->access->read_first_n) 56 return -EINVAL; 57 return rb->access->read_first_n(rb, n, buf); 58 } 59 60 /** 61 * iio_buffer_poll() - poll the buffer to find out if it has data 62 */ 63 unsigned int iio_buffer_poll(struct file *filp, 64 struct poll_table_struct *wait) 65 { 66 struct iio_dev *indio_dev = filp->private_data; 67 struct iio_buffer *rb = indio_dev->buffer; 68 69 if (!indio_dev->info) 70 return -ENODEV; 71 72 poll_wait(filp, &rb->pollq, wait); 73 if (rb->stufftoread) 74 return POLLIN | POLLRDNORM; 75 /* need a way of knowing if there may be enough data... */ 76 return 0; 77 } 78 79 /** 80 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 81 * @indio_dev: The IIO device 82 * 83 * Wakes up the event waitqueue used for poll(). Should usually 84 * be called when the device is unregistered. 85 */ 86 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 87 { 88 if (!indio_dev->buffer) 89 return; 90 91 wake_up(&indio_dev->buffer->pollq); 92 } 93 94 void iio_buffer_init(struct iio_buffer *buffer) 95 { 96 INIT_LIST_HEAD(&buffer->demux_list); 97 INIT_LIST_HEAD(&buffer->buffer_list); 98 init_waitqueue_head(&buffer->pollq); 99 kref_init(&buffer->ref); 100 } 101 EXPORT_SYMBOL(iio_buffer_init); 102 103 static ssize_t iio_show_scan_index(struct device *dev, 104 struct device_attribute *attr, 105 char *buf) 106 { 107 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 108 } 109 110 static ssize_t iio_show_fixed_type(struct device *dev, 111 struct device_attribute *attr, 112 char *buf) 113 { 114 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 115 u8 type = this_attr->c->scan_type.endianness; 116 117 if (type == IIO_CPU) { 118 #ifdef __LITTLE_ENDIAN 119 type = IIO_LE; 120 #else 121 type = IIO_BE; 122 #endif 123 } 124 return sprintf(buf, "%s:%c%d/%d>>%u\n", 125 iio_endian_prefix[type], 126 this_attr->c->scan_type.sign, 127 this_attr->c->scan_type.realbits, 128 this_attr->c->scan_type.storagebits, 129 this_attr->c->scan_type.shift); 130 } 131 132 static ssize_t iio_scan_el_show(struct device *dev, 133 struct device_attribute *attr, 134 char *buf) 135 { 136 int ret; 137 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 138 139 ret = test_bit(to_iio_dev_attr(attr)->address, 140 indio_dev->buffer->scan_mask); 141 142 return sprintf(buf, "%d\n", ret); 143 } 144 145 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 146 { 147 clear_bit(bit, buffer->scan_mask); 148 return 0; 149 } 150 151 static ssize_t iio_scan_el_store(struct device *dev, 152 struct device_attribute *attr, 153 const char *buf, 154 size_t len) 155 { 156 int ret; 157 bool state; 158 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 159 struct iio_buffer *buffer = indio_dev->buffer; 160 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 161 162 ret = strtobool(buf, &state); 163 if (ret < 0) 164 return ret; 165 mutex_lock(&indio_dev->mlock); 166 if (iio_buffer_is_active(indio_dev->buffer)) { 167 ret = -EBUSY; 168 goto error_ret; 169 } 170 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 171 if (ret < 0) 172 goto error_ret; 173 if (!state && ret) { 174 ret = iio_scan_mask_clear(buffer, this_attr->address); 175 if (ret) 176 goto error_ret; 177 } else if (state && !ret) { 178 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 179 if (ret) 180 goto error_ret; 181 } 182 183 error_ret: 184 mutex_unlock(&indio_dev->mlock); 185 186 return ret < 0 ? ret : len; 187 188 } 189 190 static ssize_t iio_scan_el_ts_show(struct device *dev, 191 struct device_attribute *attr, 192 char *buf) 193 { 194 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 195 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 196 } 197 198 static ssize_t iio_scan_el_ts_store(struct device *dev, 199 struct device_attribute *attr, 200 const char *buf, 201 size_t len) 202 { 203 int ret; 204 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 205 bool state; 206 207 ret = strtobool(buf, &state); 208 if (ret < 0) 209 return ret; 210 211 mutex_lock(&indio_dev->mlock); 212 if (iio_buffer_is_active(indio_dev->buffer)) { 213 ret = -EBUSY; 214 goto error_ret; 215 } 216 indio_dev->buffer->scan_timestamp = state; 217 error_ret: 218 mutex_unlock(&indio_dev->mlock); 219 220 return ret ? ret : len; 221 } 222 223 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 224 const struct iio_chan_spec *chan) 225 { 226 int ret, attrcount = 0; 227 struct iio_buffer *buffer = indio_dev->buffer; 228 229 ret = __iio_add_chan_devattr("index", 230 chan, 231 &iio_show_scan_index, 232 NULL, 233 0, 234 IIO_SEPARATE, 235 &indio_dev->dev, 236 &buffer->scan_el_dev_attr_list); 237 if (ret) 238 goto error_ret; 239 attrcount++; 240 ret = __iio_add_chan_devattr("type", 241 chan, 242 &iio_show_fixed_type, 243 NULL, 244 0, 245 0, 246 &indio_dev->dev, 247 &buffer->scan_el_dev_attr_list); 248 if (ret) 249 goto error_ret; 250 attrcount++; 251 if (chan->type != IIO_TIMESTAMP) 252 ret = __iio_add_chan_devattr("en", 253 chan, 254 &iio_scan_el_show, 255 &iio_scan_el_store, 256 chan->scan_index, 257 0, 258 &indio_dev->dev, 259 &buffer->scan_el_dev_attr_list); 260 else 261 ret = __iio_add_chan_devattr("en", 262 chan, 263 &iio_scan_el_ts_show, 264 &iio_scan_el_ts_store, 265 chan->scan_index, 266 0, 267 &indio_dev->dev, 268 &buffer->scan_el_dev_attr_list); 269 if (ret) 270 goto error_ret; 271 attrcount++; 272 ret = attrcount; 273 error_ret: 274 return ret; 275 } 276 277 static const char * const iio_scan_elements_group_name = "scan_elements"; 278 279 int iio_buffer_register(struct iio_dev *indio_dev, 280 const struct iio_chan_spec *channels, 281 int num_channels) 282 { 283 struct iio_dev_attr *p; 284 struct attribute **attr; 285 struct iio_buffer *buffer = indio_dev->buffer; 286 int ret, i, attrn, attrcount, attrcount_orig = 0; 287 288 if (buffer->attrs) 289 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; 290 291 if (buffer->scan_el_attrs != NULL) { 292 attr = buffer->scan_el_attrs->attrs; 293 while (*attr++ != NULL) 294 attrcount_orig++; 295 } 296 attrcount = attrcount_orig; 297 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 298 if (channels) { 299 /* new magic */ 300 for (i = 0; i < num_channels; i++) { 301 if (channels[i].scan_index < 0) 302 continue; 303 304 /* Establish necessary mask length */ 305 if (channels[i].scan_index > 306 (int)indio_dev->masklength - 1) 307 indio_dev->masklength 308 = channels[i].scan_index + 1; 309 310 ret = iio_buffer_add_channel_sysfs(indio_dev, 311 &channels[i]); 312 if (ret < 0) 313 goto error_cleanup_dynamic; 314 attrcount += ret; 315 if (channels[i].type == IIO_TIMESTAMP) 316 indio_dev->scan_index_timestamp = 317 channels[i].scan_index; 318 } 319 if (indio_dev->masklength && buffer->scan_mask == NULL) { 320 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 321 sizeof(*buffer->scan_mask), 322 GFP_KERNEL); 323 if (buffer->scan_mask == NULL) { 324 ret = -ENOMEM; 325 goto error_cleanup_dynamic; 326 } 327 } 328 } 329 330 buffer->scan_el_group.name = iio_scan_elements_group_name; 331 332 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 333 sizeof(buffer->scan_el_group.attrs[0]), 334 GFP_KERNEL); 335 if (buffer->scan_el_group.attrs == NULL) { 336 ret = -ENOMEM; 337 goto error_free_scan_mask; 338 } 339 if (buffer->scan_el_attrs) 340 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 341 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 342 attrn = attrcount_orig; 343 344 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 345 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 346 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 347 348 return 0; 349 350 error_free_scan_mask: 351 kfree(buffer->scan_mask); 352 error_cleanup_dynamic: 353 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); 354 355 return ret; 356 } 357 EXPORT_SYMBOL(iio_buffer_register); 358 359 void iio_buffer_unregister(struct iio_dev *indio_dev) 360 { 361 kfree(indio_dev->buffer->scan_mask); 362 kfree(indio_dev->buffer->scan_el_group.attrs); 363 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); 364 } 365 EXPORT_SYMBOL(iio_buffer_unregister); 366 367 ssize_t iio_buffer_read_length(struct device *dev, 368 struct device_attribute *attr, 369 char *buf) 370 { 371 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 372 struct iio_buffer *buffer = indio_dev->buffer; 373 374 if (buffer->access->get_length) 375 return sprintf(buf, "%d\n", 376 buffer->access->get_length(buffer)); 377 378 return 0; 379 } 380 EXPORT_SYMBOL(iio_buffer_read_length); 381 382 ssize_t iio_buffer_write_length(struct device *dev, 383 struct device_attribute *attr, 384 const char *buf, 385 size_t len) 386 { 387 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 388 struct iio_buffer *buffer = indio_dev->buffer; 389 unsigned int val; 390 int ret; 391 392 ret = kstrtouint(buf, 10, &val); 393 if (ret) 394 return ret; 395 396 if (buffer->access->get_length) 397 if (val == buffer->access->get_length(buffer)) 398 return len; 399 400 mutex_lock(&indio_dev->mlock); 401 if (iio_buffer_is_active(indio_dev->buffer)) { 402 ret = -EBUSY; 403 } else { 404 if (buffer->access->set_length) 405 buffer->access->set_length(buffer, val); 406 ret = 0; 407 } 408 mutex_unlock(&indio_dev->mlock); 409 410 return ret ? ret : len; 411 } 412 EXPORT_SYMBOL(iio_buffer_write_length); 413 414 ssize_t iio_buffer_show_enable(struct device *dev, 415 struct device_attribute *attr, 416 char *buf) 417 { 418 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 419 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); 420 } 421 EXPORT_SYMBOL(iio_buffer_show_enable); 422 423 /* Note NULL used as error indicator as it doesn't make sense. */ 424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 425 unsigned int masklength, 426 const unsigned long *mask) 427 { 428 if (bitmap_empty(mask, masklength)) 429 return NULL; 430 while (*av_masks) { 431 if (bitmap_subset(mask, av_masks, masklength)) 432 return av_masks; 433 av_masks += BITS_TO_LONGS(masklength); 434 } 435 return NULL; 436 } 437 438 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 439 const unsigned long *mask, bool timestamp) 440 { 441 const struct iio_chan_spec *ch; 442 unsigned bytes = 0; 443 int length, i; 444 445 /* How much space will the demuxed element take? */ 446 for_each_set_bit(i, mask, 447 indio_dev->masklength) { 448 ch = iio_find_channel_from_si(indio_dev, i); 449 length = ch->scan_type.storagebits / 8; 450 bytes = ALIGN(bytes, length); 451 bytes += length; 452 } 453 if (timestamp) { 454 ch = iio_find_channel_from_si(indio_dev, 455 indio_dev->scan_index_timestamp); 456 length = ch->scan_type.storagebits / 8; 457 bytes = ALIGN(bytes, length); 458 bytes += length; 459 } 460 return bytes; 461 } 462 463 static void iio_buffer_activate(struct iio_dev *indio_dev, 464 struct iio_buffer *buffer) 465 { 466 iio_buffer_get(buffer); 467 list_add(&buffer->buffer_list, &indio_dev->buffer_list); 468 } 469 470 static void iio_buffer_deactivate(struct iio_buffer *buffer) 471 { 472 list_del_init(&buffer->buffer_list); 473 iio_buffer_put(buffer); 474 } 475 476 void iio_disable_all_buffers(struct iio_dev *indio_dev) 477 { 478 struct iio_buffer *buffer, *_buffer; 479 480 if (list_empty(&indio_dev->buffer_list)) 481 return; 482 483 if (indio_dev->setup_ops->predisable) 484 indio_dev->setup_ops->predisable(indio_dev); 485 486 list_for_each_entry_safe(buffer, _buffer, 487 &indio_dev->buffer_list, buffer_list) 488 iio_buffer_deactivate(buffer); 489 490 indio_dev->currentmode = INDIO_DIRECT_MODE; 491 if (indio_dev->setup_ops->postdisable) 492 indio_dev->setup_ops->postdisable(indio_dev); 493 494 if (indio_dev->available_scan_masks == NULL) 495 kfree(indio_dev->active_scan_mask); 496 } 497 498 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 499 struct iio_buffer *buffer) 500 { 501 unsigned int bytes; 502 503 if (!buffer->access->set_bytes_per_datum) 504 return; 505 506 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 507 buffer->scan_timestamp); 508 509 buffer->access->set_bytes_per_datum(buffer, bytes); 510 } 511 512 static int __iio_update_buffers(struct iio_dev *indio_dev, 513 struct iio_buffer *insert_buffer, 514 struct iio_buffer *remove_buffer) 515 { 516 int ret; 517 int success = 0; 518 struct iio_buffer *buffer; 519 unsigned long *compound_mask; 520 const unsigned long *old_mask; 521 522 /* Wind down existing buffers - iff there are any */ 523 if (!list_empty(&indio_dev->buffer_list)) { 524 if (indio_dev->setup_ops->predisable) { 525 ret = indio_dev->setup_ops->predisable(indio_dev); 526 if (ret) 527 goto error_ret; 528 } 529 indio_dev->currentmode = INDIO_DIRECT_MODE; 530 if (indio_dev->setup_ops->postdisable) { 531 ret = indio_dev->setup_ops->postdisable(indio_dev); 532 if (ret) 533 goto error_ret; 534 } 535 } 536 /* Keep a copy of current setup to allow roll back */ 537 old_mask = indio_dev->active_scan_mask; 538 if (!indio_dev->available_scan_masks) 539 indio_dev->active_scan_mask = NULL; 540 541 if (remove_buffer) 542 iio_buffer_deactivate(remove_buffer); 543 if (insert_buffer) 544 iio_buffer_activate(indio_dev, insert_buffer); 545 546 /* If no buffers in list, we are done */ 547 if (list_empty(&indio_dev->buffer_list)) { 548 indio_dev->currentmode = INDIO_DIRECT_MODE; 549 if (indio_dev->available_scan_masks == NULL) 550 kfree(old_mask); 551 return 0; 552 } 553 554 /* What scan mask do we actually have? */ 555 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 556 sizeof(long), GFP_KERNEL); 557 if (compound_mask == NULL) { 558 if (indio_dev->available_scan_masks == NULL) 559 kfree(old_mask); 560 return -ENOMEM; 561 } 562 indio_dev->scan_timestamp = 0; 563 564 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 565 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 566 indio_dev->masklength); 567 indio_dev->scan_timestamp |= buffer->scan_timestamp; 568 } 569 if (indio_dev->available_scan_masks) { 570 indio_dev->active_scan_mask = 571 iio_scan_mask_match(indio_dev->available_scan_masks, 572 indio_dev->masklength, 573 compound_mask); 574 if (indio_dev->active_scan_mask == NULL) { 575 /* 576 * Roll back. 577 * Note can only occur when adding a buffer. 578 */ 579 iio_buffer_deactivate(insert_buffer); 580 if (old_mask) { 581 indio_dev->active_scan_mask = old_mask; 582 success = -EINVAL; 583 } 584 else { 585 kfree(compound_mask); 586 ret = -EINVAL; 587 goto error_ret; 588 } 589 } 590 } else { 591 indio_dev->active_scan_mask = compound_mask; 592 } 593 594 iio_update_demux(indio_dev); 595 596 /* Wind up again */ 597 if (indio_dev->setup_ops->preenable) { 598 ret = indio_dev->setup_ops->preenable(indio_dev); 599 if (ret) { 600 printk(KERN_ERR 601 "Buffer not started: buffer preenable failed (%d)\n", ret); 602 goto error_remove_inserted; 603 } 604 } 605 indio_dev->scan_bytes = 606 iio_compute_scan_bytes(indio_dev, 607 indio_dev->active_scan_mask, 608 indio_dev->scan_timestamp); 609 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 610 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 611 if (buffer->access->request_update) { 612 ret = buffer->access->request_update(buffer); 613 if (ret) { 614 printk(KERN_INFO 615 "Buffer not started: buffer parameter update failed (%d)\n", ret); 616 goto error_run_postdisable; 617 } 618 } 619 } 620 if (indio_dev->info->update_scan_mode) { 621 ret = indio_dev->info 622 ->update_scan_mode(indio_dev, 623 indio_dev->active_scan_mask); 624 if (ret < 0) { 625 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); 626 goto error_run_postdisable; 627 } 628 } 629 /* Definitely possible for devices to support both of these. */ 630 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 631 if (!indio_dev->trig) { 632 printk(KERN_INFO "Buffer not started: no trigger\n"); 633 ret = -EINVAL; 634 /* Can only occur on first buffer */ 635 goto error_run_postdisable; 636 } 637 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 638 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 639 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 640 } else { /* Should never be reached */ 641 ret = -EINVAL; 642 goto error_run_postdisable; 643 } 644 645 if (indio_dev->setup_ops->postenable) { 646 ret = indio_dev->setup_ops->postenable(indio_dev); 647 if (ret) { 648 printk(KERN_INFO 649 "Buffer not started: postenable failed (%d)\n", ret); 650 indio_dev->currentmode = INDIO_DIRECT_MODE; 651 if (indio_dev->setup_ops->postdisable) 652 indio_dev->setup_ops->postdisable(indio_dev); 653 goto error_disable_all_buffers; 654 } 655 } 656 657 if (indio_dev->available_scan_masks) 658 kfree(compound_mask); 659 else 660 kfree(old_mask); 661 662 return success; 663 664 error_disable_all_buffers: 665 indio_dev->currentmode = INDIO_DIRECT_MODE; 666 error_run_postdisable: 667 if (indio_dev->setup_ops->postdisable) 668 indio_dev->setup_ops->postdisable(indio_dev); 669 error_remove_inserted: 670 671 if (insert_buffer) 672 iio_buffer_deactivate(insert_buffer); 673 indio_dev->active_scan_mask = old_mask; 674 kfree(compound_mask); 675 error_ret: 676 677 return ret; 678 } 679 680 int iio_update_buffers(struct iio_dev *indio_dev, 681 struct iio_buffer *insert_buffer, 682 struct iio_buffer *remove_buffer) 683 { 684 int ret; 685 686 if (insert_buffer == remove_buffer) 687 return 0; 688 689 mutex_lock(&indio_dev->info_exist_lock); 690 mutex_lock(&indio_dev->mlock); 691 692 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 693 insert_buffer = NULL; 694 695 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 696 remove_buffer = NULL; 697 698 if (!insert_buffer && !remove_buffer) { 699 ret = 0; 700 goto out_unlock; 701 } 702 703 if (indio_dev->info == NULL) { 704 ret = -ENODEV; 705 goto out_unlock; 706 } 707 708 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 709 710 out_unlock: 711 mutex_unlock(&indio_dev->mlock); 712 mutex_unlock(&indio_dev->info_exist_lock); 713 714 return ret; 715 } 716 EXPORT_SYMBOL_GPL(iio_update_buffers); 717 718 ssize_t iio_buffer_store_enable(struct device *dev, 719 struct device_attribute *attr, 720 const char *buf, 721 size_t len) 722 { 723 int ret; 724 bool requested_state; 725 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 726 bool inlist; 727 728 ret = strtobool(buf, &requested_state); 729 if (ret < 0) 730 return ret; 731 732 mutex_lock(&indio_dev->mlock); 733 734 /* Find out if it is in the list */ 735 inlist = iio_buffer_is_active(indio_dev->buffer); 736 /* Already in desired state */ 737 if (inlist == requested_state) 738 goto done; 739 740 if (requested_state) 741 ret = __iio_update_buffers(indio_dev, 742 indio_dev->buffer, NULL); 743 else 744 ret = __iio_update_buffers(indio_dev, 745 NULL, indio_dev->buffer); 746 747 if (ret < 0) 748 goto done; 749 done: 750 mutex_unlock(&indio_dev->mlock); 751 return (ret < 0) ? ret : len; 752 } 753 EXPORT_SYMBOL(iio_buffer_store_enable); 754 755 /** 756 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 757 * @indio_dev: the iio device 758 * @mask: scan mask to be checked 759 * 760 * Return true if exactly one bit is set in the scan mask, false otherwise. It 761 * can be used for devices where only one channel can be active for sampling at 762 * a time. 763 */ 764 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 765 const unsigned long *mask) 766 { 767 return bitmap_weight(mask, indio_dev->masklength) == 1; 768 } 769 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 770 771 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 772 const unsigned long *mask) 773 { 774 if (!indio_dev->setup_ops->validate_scan_mask) 775 return true; 776 777 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 778 } 779 780 /** 781 * iio_scan_mask_set() - set particular bit in the scan mask 782 * @indio_dev: the iio device 783 * @buffer: the buffer whose scan mask we are interested in 784 * @bit: the bit to be set. 785 * 786 * Note that at this point we have no way of knowing what other 787 * buffers might request, hence this code only verifies that the 788 * individual buffers request is plausible. 789 */ 790 int iio_scan_mask_set(struct iio_dev *indio_dev, 791 struct iio_buffer *buffer, int bit) 792 { 793 const unsigned long *mask; 794 unsigned long *trialmask; 795 796 trialmask = kmalloc(sizeof(*trialmask)* 797 BITS_TO_LONGS(indio_dev->masklength), 798 GFP_KERNEL); 799 800 if (trialmask == NULL) 801 return -ENOMEM; 802 if (!indio_dev->masklength) { 803 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 804 goto err_invalid_mask; 805 } 806 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 807 set_bit(bit, trialmask); 808 809 if (!iio_validate_scan_mask(indio_dev, trialmask)) 810 goto err_invalid_mask; 811 812 if (indio_dev->available_scan_masks) { 813 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 814 indio_dev->masklength, 815 trialmask); 816 if (!mask) 817 goto err_invalid_mask; 818 } 819 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 820 821 kfree(trialmask); 822 823 return 0; 824 825 err_invalid_mask: 826 kfree(trialmask); 827 return -EINVAL; 828 } 829 EXPORT_SYMBOL_GPL(iio_scan_mask_set); 830 831 int iio_scan_mask_query(struct iio_dev *indio_dev, 832 struct iio_buffer *buffer, int bit) 833 { 834 if (bit > indio_dev->masklength) 835 return -EINVAL; 836 837 if (!buffer->scan_mask) 838 return 0; 839 840 return test_bit(bit, buffer->scan_mask); 841 }; 842 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 843 844 /** 845 * struct iio_demux_table() - table describing demux memcpy ops 846 * @from: index to copy from 847 * @to: index to copy to 848 * @length: how many bytes to copy 849 * @l: list head used for management 850 */ 851 struct iio_demux_table { 852 unsigned from; 853 unsigned to; 854 unsigned length; 855 struct list_head l; 856 }; 857 858 static const void *iio_demux(struct iio_buffer *buffer, 859 const void *datain) 860 { 861 struct iio_demux_table *t; 862 863 if (list_empty(&buffer->demux_list)) 864 return datain; 865 list_for_each_entry(t, &buffer->demux_list, l) 866 memcpy(buffer->demux_bounce + t->to, 867 datain + t->from, t->length); 868 869 return buffer->demux_bounce; 870 } 871 872 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 873 { 874 const void *dataout = iio_demux(buffer, data); 875 876 return buffer->access->store_to(buffer, dataout); 877 } 878 879 static void iio_buffer_demux_free(struct iio_buffer *buffer) 880 { 881 struct iio_demux_table *p, *q; 882 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 883 list_del(&p->l); 884 kfree(p); 885 } 886 } 887 888 889 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 890 { 891 int ret; 892 struct iio_buffer *buf; 893 894 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { 895 ret = iio_push_to_buffer(buf, data); 896 if (ret < 0) 897 return ret; 898 } 899 900 return 0; 901 } 902 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 903 904 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 905 struct iio_buffer *buffer) 906 { 907 const struct iio_chan_spec *ch; 908 int ret, in_ind = -1, out_ind, length; 909 unsigned in_loc = 0, out_loc = 0; 910 struct iio_demux_table *p; 911 912 /* Clear out any old demux */ 913 iio_buffer_demux_free(buffer); 914 kfree(buffer->demux_bounce); 915 buffer->demux_bounce = NULL; 916 917 /* First work out which scan mode we will actually have */ 918 if (bitmap_equal(indio_dev->active_scan_mask, 919 buffer->scan_mask, 920 indio_dev->masklength)) 921 return 0; 922 923 /* Now we have the two masks, work from least sig and build up sizes */ 924 for_each_set_bit(out_ind, 925 indio_dev->active_scan_mask, 926 indio_dev->masklength) { 927 in_ind = find_next_bit(indio_dev->active_scan_mask, 928 indio_dev->masklength, 929 in_ind + 1); 930 while (in_ind != out_ind) { 931 in_ind = find_next_bit(indio_dev->active_scan_mask, 932 indio_dev->masklength, 933 in_ind + 1); 934 ch = iio_find_channel_from_si(indio_dev, in_ind); 935 length = ch->scan_type.storagebits/8; 936 /* Make sure we are aligned */ 937 in_loc += length; 938 if (in_loc % length) 939 in_loc += length - in_loc % length; 940 } 941 p = kmalloc(sizeof(*p), GFP_KERNEL); 942 if (p == NULL) { 943 ret = -ENOMEM; 944 goto error_clear_mux_table; 945 } 946 ch = iio_find_channel_from_si(indio_dev, in_ind); 947 length = ch->scan_type.storagebits/8; 948 if (out_loc % length) 949 out_loc += length - out_loc % length; 950 if (in_loc % length) 951 in_loc += length - in_loc % length; 952 p->from = in_loc; 953 p->to = out_loc; 954 p->length = length; 955 list_add_tail(&p->l, &buffer->demux_list); 956 out_loc += length; 957 in_loc += length; 958 } 959 /* Relies on scan_timestamp being last */ 960 if (buffer->scan_timestamp) { 961 p = kmalloc(sizeof(*p), GFP_KERNEL); 962 if (p == NULL) { 963 ret = -ENOMEM; 964 goto error_clear_mux_table; 965 } 966 ch = iio_find_channel_from_si(indio_dev, 967 indio_dev->scan_index_timestamp); 968 length = ch->scan_type.storagebits/8; 969 if (out_loc % length) 970 out_loc += length - out_loc % length; 971 if (in_loc % length) 972 in_loc += length - in_loc % length; 973 p->from = in_loc; 974 p->to = out_loc; 975 p->length = length; 976 list_add_tail(&p->l, &buffer->demux_list); 977 out_loc += length; 978 in_loc += length; 979 } 980 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 981 if (buffer->demux_bounce == NULL) { 982 ret = -ENOMEM; 983 goto error_clear_mux_table; 984 } 985 return 0; 986 987 error_clear_mux_table: 988 iio_buffer_demux_free(buffer); 989 990 return ret; 991 } 992 993 int iio_update_demux(struct iio_dev *indio_dev) 994 { 995 struct iio_buffer *buffer; 996 int ret; 997 998 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 999 ret = iio_buffer_update_demux(indio_dev, buffer); 1000 if (ret < 0) 1001 goto error_clear_mux_table; 1002 } 1003 return 0; 1004 1005 error_clear_mux_table: 1006 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 1007 iio_buffer_demux_free(buffer); 1008 1009 return ret; 1010 } 1011 EXPORT_SYMBOL_GPL(iio_update_demux); 1012 1013 /** 1014 * iio_buffer_release() - Free a buffer's resources 1015 * @ref: Pointer to the kref embedded in the iio_buffer struct 1016 * 1017 * This function is called when the last reference to the buffer has been 1018 * dropped. It will typically free all resources allocated by the buffer. Do not 1019 * call this function manually, always use iio_buffer_put() when done using a 1020 * buffer. 1021 */ 1022 static void iio_buffer_release(struct kref *ref) 1023 { 1024 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 1025 1026 buffer->access->release(buffer); 1027 } 1028 1029 /** 1030 * iio_buffer_get() - Grab a reference to the buffer 1031 * @buffer: The buffer to grab a reference for, may be NULL 1032 * 1033 * Returns the pointer to the buffer that was passed into the function. 1034 */ 1035 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 1036 { 1037 if (buffer) 1038 kref_get(&buffer->ref); 1039 1040 return buffer; 1041 } 1042 EXPORT_SYMBOL_GPL(iio_buffer_get); 1043 1044 /** 1045 * iio_buffer_put() - Release the reference to the buffer 1046 * @buffer: The buffer to release the reference for, may be NULL 1047 */ 1048 void iio_buffer_put(struct iio_buffer *buffer) 1049 { 1050 if (buffer) 1051 kref_put(&buffer->ref, iio_buffer_release); 1052 } 1053 EXPORT_SYMBOL_GPL(iio_buffer_put); 1054