1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 #include <linux/sched.h> 24 25 #include <linux/iio/iio.h> 26 #include "iio_core.h" 27 #include <linux/iio/sysfs.h> 28 #include <linux/iio/buffer.h> 29 30 static const char * const iio_endian_prefix[] = { 31 [IIO_BE] = "be", 32 [IIO_LE] = "le", 33 }; 34 35 static bool iio_buffer_is_active(struct iio_buffer *buf) 36 { 37 return !list_empty(&buf->buffer_list); 38 } 39 40 static bool iio_buffer_data_available(struct iio_buffer *buf) 41 { 42 if (buf->access->data_available) 43 return buf->access->data_available(buf); 44 45 return buf->stufftoread; 46 } 47 48 /** 49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 50 * 51 * This function relies on all buffer implementations having an 52 * iio_buffer as their first element. 53 **/ 54 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 55 size_t n, loff_t *f_ps) 56 { 57 struct iio_dev *indio_dev = filp->private_data; 58 struct iio_buffer *rb = indio_dev->buffer; 59 int ret; 60 61 if (!indio_dev->info) 62 return -ENODEV; 63 64 if (!rb || !rb->access->read_first_n) 65 return -EINVAL; 66 67 do { 68 if (!iio_buffer_data_available(rb)) { 69 if (filp->f_flags & O_NONBLOCK) 70 return -EAGAIN; 71 72 ret = wait_event_interruptible(rb->pollq, 73 iio_buffer_data_available(rb) || 74 indio_dev->info == NULL); 75 if (ret) 76 return ret; 77 if (indio_dev->info == NULL) 78 return -ENODEV; 79 } 80 81 ret = rb->access->read_first_n(rb, n, buf); 82 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 83 ret = -EAGAIN; 84 } while (ret == 0); 85 86 return ret; 87 } 88 89 /** 90 * iio_buffer_poll() - poll the buffer to find out if it has data 91 */ 92 unsigned int iio_buffer_poll(struct file *filp, 93 struct poll_table_struct *wait) 94 { 95 struct iio_dev *indio_dev = filp->private_data; 96 struct iio_buffer *rb = indio_dev->buffer; 97 98 if (!indio_dev->info) 99 return -ENODEV; 100 101 poll_wait(filp, &rb->pollq, wait); 102 if (iio_buffer_data_available(rb)) 103 return POLLIN | POLLRDNORM; 104 /* need a way of knowing if there may be enough data... */ 105 return 0; 106 } 107 108 /** 109 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 110 * @indio_dev: The IIO device 111 * 112 * Wakes up the event waitqueue used for poll(). Should usually 113 * be called when the device is unregistered. 114 */ 115 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 116 { 117 if (!indio_dev->buffer) 118 return; 119 120 wake_up(&indio_dev->buffer->pollq); 121 } 122 123 void iio_buffer_init(struct iio_buffer *buffer) 124 { 125 INIT_LIST_HEAD(&buffer->demux_list); 126 INIT_LIST_HEAD(&buffer->buffer_list); 127 init_waitqueue_head(&buffer->pollq); 128 kref_init(&buffer->ref); 129 } 130 EXPORT_SYMBOL(iio_buffer_init); 131 132 static ssize_t iio_show_scan_index(struct device *dev, 133 struct device_attribute *attr, 134 char *buf) 135 { 136 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 137 } 138 139 static ssize_t iio_show_fixed_type(struct device *dev, 140 struct device_attribute *attr, 141 char *buf) 142 { 143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 144 u8 type = this_attr->c->scan_type.endianness; 145 146 if (type == IIO_CPU) { 147 #ifdef __LITTLE_ENDIAN 148 type = IIO_LE; 149 #else 150 type = IIO_BE; 151 #endif 152 } 153 if (this_attr->c->scan_type.repeat > 1) 154 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", 155 iio_endian_prefix[type], 156 this_attr->c->scan_type.sign, 157 this_attr->c->scan_type.realbits, 158 this_attr->c->scan_type.storagebits, 159 this_attr->c->scan_type.repeat, 160 this_attr->c->scan_type.shift); 161 else 162 return sprintf(buf, "%s:%c%d/%d>>%u\n", 163 iio_endian_prefix[type], 164 this_attr->c->scan_type.sign, 165 this_attr->c->scan_type.realbits, 166 this_attr->c->scan_type.storagebits, 167 this_attr->c->scan_type.shift); 168 } 169 170 static ssize_t iio_scan_el_show(struct device *dev, 171 struct device_attribute *attr, 172 char *buf) 173 { 174 int ret; 175 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 176 177 /* Ensure ret is 0 or 1. */ 178 ret = !!test_bit(to_iio_dev_attr(attr)->address, 179 indio_dev->buffer->scan_mask); 180 181 return sprintf(buf, "%d\n", ret); 182 } 183 184 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 185 { 186 clear_bit(bit, buffer->scan_mask); 187 return 0; 188 } 189 190 static ssize_t iio_scan_el_store(struct device *dev, 191 struct device_attribute *attr, 192 const char *buf, 193 size_t len) 194 { 195 int ret; 196 bool state; 197 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 198 struct iio_buffer *buffer = indio_dev->buffer; 199 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 200 201 ret = strtobool(buf, &state); 202 if (ret < 0) 203 return ret; 204 mutex_lock(&indio_dev->mlock); 205 if (iio_buffer_is_active(indio_dev->buffer)) { 206 ret = -EBUSY; 207 goto error_ret; 208 } 209 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 210 if (ret < 0) 211 goto error_ret; 212 if (!state && ret) { 213 ret = iio_scan_mask_clear(buffer, this_attr->address); 214 if (ret) 215 goto error_ret; 216 } else if (state && !ret) { 217 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 218 if (ret) 219 goto error_ret; 220 } 221 222 error_ret: 223 mutex_unlock(&indio_dev->mlock); 224 225 return ret < 0 ? ret : len; 226 227 } 228 229 static ssize_t iio_scan_el_ts_show(struct device *dev, 230 struct device_attribute *attr, 231 char *buf) 232 { 233 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 234 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 235 } 236 237 static ssize_t iio_scan_el_ts_store(struct device *dev, 238 struct device_attribute *attr, 239 const char *buf, 240 size_t len) 241 { 242 int ret; 243 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 244 bool state; 245 246 ret = strtobool(buf, &state); 247 if (ret < 0) 248 return ret; 249 250 mutex_lock(&indio_dev->mlock); 251 if (iio_buffer_is_active(indio_dev->buffer)) { 252 ret = -EBUSY; 253 goto error_ret; 254 } 255 indio_dev->buffer->scan_timestamp = state; 256 error_ret: 257 mutex_unlock(&indio_dev->mlock); 258 259 return ret ? ret : len; 260 } 261 262 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 263 const struct iio_chan_spec *chan) 264 { 265 int ret, attrcount = 0; 266 struct iio_buffer *buffer = indio_dev->buffer; 267 268 ret = __iio_add_chan_devattr("index", 269 chan, 270 &iio_show_scan_index, 271 NULL, 272 0, 273 IIO_SEPARATE, 274 &indio_dev->dev, 275 &buffer->scan_el_dev_attr_list); 276 if (ret) 277 return ret; 278 attrcount++; 279 ret = __iio_add_chan_devattr("type", 280 chan, 281 &iio_show_fixed_type, 282 NULL, 283 0, 284 0, 285 &indio_dev->dev, 286 &buffer->scan_el_dev_attr_list); 287 if (ret) 288 return ret; 289 attrcount++; 290 if (chan->type != IIO_TIMESTAMP) 291 ret = __iio_add_chan_devattr("en", 292 chan, 293 &iio_scan_el_show, 294 &iio_scan_el_store, 295 chan->scan_index, 296 0, 297 &indio_dev->dev, 298 &buffer->scan_el_dev_attr_list); 299 else 300 ret = __iio_add_chan_devattr("en", 301 chan, 302 &iio_scan_el_ts_show, 303 &iio_scan_el_ts_store, 304 chan->scan_index, 305 0, 306 &indio_dev->dev, 307 &buffer->scan_el_dev_attr_list); 308 if (ret) 309 return ret; 310 attrcount++; 311 ret = attrcount; 312 return ret; 313 } 314 315 static const char * const iio_scan_elements_group_name = "scan_elements"; 316 317 int iio_buffer_register(struct iio_dev *indio_dev, 318 const struct iio_chan_spec *channels, 319 int num_channels) 320 { 321 struct iio_dev_attr *p; 322 struct attribute **attr; 323 struct iio_buffer *buffer = indio_dev->buffer; 324 int ret, i, attrn, attrcount, attrcount_orig = 0; 325 326 if (buffer->attrs) 327 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; 328 329 if (buffer->scan_el_attrs != NULL) { 330 attr = buffer->scan_el_attrs->attrs; 331 while (*attr++ != NULL) 332 attrcount_orig++; 333 } 334 attrcount = attrcount_orig; 335 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 336 if (channels) { 337 /* new magic */ 338 for (i = 0; i < num_channels; i++) { 339 if (channels[i].scan_index < 0) 340 continue; 341 342 /* Establish necessary mask length */ 343 if (channels[i].scan_index > 344 (int)indio_dev->masklength - 1) 345 indio_dev->masklength 346 = channels[i].scan_index + 1; 347 348 ret = iio_buffer_add_channel_sysfs(indio_dev, 349 &channels[i]); 350 if (ret < 0) 351 goto error_cleanup_dynamic; 352 attrcount += ret; 353 if (channels[i].type == IIO_TIMESTAMP) 354 indio_dev->scan_index_timestamp = 355 channels[i].scan_index; 356 } 357 if (indio_dev->masklength && buffer->scan_mask == NULL) { 358 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 359 sizeof(*buffer->scan_mask), 360 GFP_KERNEL); 361 if (buffer->scan_mask == NULL) { 362 ret = -ENOMEM; 363 goto error_cleanup_dynamic; 364 } 365 } 366 } 367 368 buffer->scan_el_group.name = iio_scan_elements_group_name; 369 370 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 371 sizeof(buffer->scan_el_group.attrs[0]), 372 GFP_KERNEL); 373 if (buffer->scan_el_group.attrs == NULL) { 374 ret = -ENOMEM; 375 goto error_free_scan_mask; 376 } 377 if (buffer->scan_el_attrs) 378 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 379 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 380 attrn = attrcount_orig; 381 382 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 383 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 384 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 385 386 return 0; 387 388 error_free_scan_mask: 389 kfree(buffer->scan_mask); 390 error_cleanup_dynamic: 391 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); 392 393 return ret; 394 } 395 EXPORT_SYMBOL(iio_buffer_register); 396 397 void iio_buffer_unregister(struct iio_dev *indio_dev) 398 { 399 kfree(indio_dev->buffer->scan_mask); 400 kfree(indio_dev->buffer->scan_el_group.attrs); 401 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); 402 } 403 EXPORT_SYMBOL(iio_buffer_unregister); 404 405 ssize_t iio_buffer_read_length(struct device *dev, 406 struct device_attribute *attr, 407 char *buf) 408 { 409 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 410 struct iio_buffer *buffer = indio_dev->buffer; 411 412 if (buffer->access->get_length) 413 return sprintf(buf, "%d\n", 414 buffer->access->get_length(buffer)); 415 416 return 0; 417 } 418 EXPORT_SYMBOL(iio_buffer_read_length); 419 420 ssize_t iio_buffer_write_length(struct device *dev, 421 struct device_attribute *attr, 422 const char *buf, 423 size_t len) 424 { 425 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 426 struct iio_buffer *buffer = indio_dev->buffer; 427 unsigned int val; 428 int ret; 429 430 ret = kstrtouint(buf, 10, &val); 431 if (ret) 432 return ret; 433 434 if (buffer->access->get_length) 435 if (val == buffer->access->get_length(buffer)) 436 return len; 437 438 mutex_lock(&indio_dev->mlock); 439 if (iio_buffer_is_active(indio_dev->buffer)) { 440 ret = -EBUSY; 441 } else { 442 if (buffer->access->set_length) 443 buffer->access->set_length(buffer, val); 444 ret = 0; 445 } 446 mutex_unlock(&indio_dev->mlock); 447 448 return ret ? ret : len; 449 } 450 EXPORT_SYMBOL(iio_buffer_write_length); 451 452 ssize_t iio_buffer_show_enable(struct device *dev, 453 struct device_attribute *attr, 454 char *buf) 455 { 456 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 457 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); 458 } 459 EXPORT_SYMBOL(iio_buffer_show_enable); 460 461 /* Note NULL used as error indicator as it doesn't make sense. */ 462 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 463 unsigned int masklength, 464 const unsigned long *mask) 465 { 466 if (bitmap_empty(mask, masklength)) 467 return NULL; 468 while (*av_masks) { 469 if (bitmap_subset(mask, av_masks, masklength)) 470 return av_masks; 471 av_masks += BITS_TO_LONGS(masklength); 472 } 473 return NULL; 474 } 475 476 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 477 const unsigned long *mask, bool timestamp) 478 { 479 const struct iio_chan_spec *ch; 480 unsigned bytes = 0; 481 int length, i; 482 483 /* How much space will the demuxed element take? */ 484 for_each_set_bit(i, mask, 485 indio_dev->masklength) { 486 ch = iio_find_channel_from_si(indio_dev, i); 487 if (ch->scan_type.repeat > 1) 488 length = ch->scan_type.storagebits / 8 * 489 ch->scan_type.repeat; 490 else 491 length = ch->scan_type.storagebits / 8; 492 bytes = ALIGN(bytes, length); 493 bytes += length; 494 } 495 if (timestamp) { 496 ch = iio_find_channel_from_si(indio_dev, 497 indio_dev->scan_index_timestamp); 498 if (ch->scan_type.repeat > 1) 499 length = ch->scan_type.storagebits / 8 * 500 ch->scan_type.repeat; 501 else 502 length = ch->scan_type.storagebits / 8; 503 bytes = ALIGN(bytes, length); 504 bytes += length; 505 } 506 return bytes; 507 } 508 509 static void iio_buffer_activate(struct iio_dev *indio_dev, 510 struct iio_buffer *buffer) 511 { 512 iio_buffer_get(buffer); 513 list_add(&buffer->buffer_list, &indio_dev->buffer_list); 514 } 515 516 static void iio_buffer_deactivate(struct iio_buffer *buffer) 517 { 518 list_del_init(&buffer->buffer_list); 519 iio_buffer_put(buffer); 520 } 521 522 void iio_disable_all_buffers(struct iio_dev *indio_dev) 523 { 524 struct iio_buffer *buffer, *_buffer; 525 526 if (list_empty(&indio_dev->buffer_list)) 527 return; 528 529 if (indio_dev->setup_ops->predisable) 530 indio_dev->setup_ops->predisable(indio_dev); 531 532 list_for_each_entry_safe(buffer, _buffer, 533 &indio_dev->buffer_list, buffer_list) 534 iio_buffer_deactivate(buffer); 535 536 indio_dev->currentmode = INDIO_DIRECT_MODE; 537 if (indio_dev->setup_ops->postdisable) 538 indio_dev->setup_ops->postdisable(indio_dev); 539 540 if (indio_dev->available_scan_masks == NULL) 541 kfree(indio_dev->active_scan_mask); 542 } 543 544 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 545 struct iio_buffer *buffer) 546 { 547 unsigned int bytes; 548 549 if (!buffer->access->set_bytes_per_datum) 550 return; 551 552 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 553 buffer->scan_timestamp); 554 555 buffer->access->set_bytes_per_datum(buffer, bytes); 556 } 557 558 static int __iio_update_buffers(struct iio_dev *indio_dev, 559 struct iio_buffer *insert_buffer, 560 struct iio_buffer *remove_buffer) 561 { 562 int ret; 563 int success = 0; 564 struct iio_buffer *buffer; 565 unsigned long *compound_mask; 566 const unsigned long *old_mask; 567 568 /* Wind down existing buffers - iff there are any */ 569 if (!list_empty(&indio_dev->buffer_list)) { 570 if (indio_dev->setup_ops->predisable) { 571 ret = indio_dev->setup_ops->predisable(indio_dev); 572 if (ret) 573 return ret; 574 } 575 indio_dev->currentmode = INDIO_DIRECT_MODE; 576 if (indio_dev->setup_ops->postdisable) { 577 ret = indio_dev->setup_ops->postdisable(indio_dev); 578 if (ret) 579 return ret; 580 } 581 } 582 /* Keep a copy of current setup to allow roll back */ 583 old_mask = indio_dev->active_scan_mask; 584 if (!indio_dev->available_scan_masks) 585 indio_dev->active_scan_mask = NULL; 586 587 if (remove_buffer) 588 iio_buffer_deactivate(remove_buffer); 589 if (insert_buffer) 590 iio_buffer_activate(indio_dev, insert_buffer); 591 592 /* If no buffers in list, we are done */ 593 if (list_empty(&indio_dev->buffer_list)) { 594 indio_dev->currentmode = INDIO_DIRECT_MODE; 595 if (indio_dev->available_scan_masks == NULL) 596 kfree(old_mask); 597 return 0; 598 } 599 600 /* What scan mask do we actually have? */ 601 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 602 sizeof(long), GFP_KERNEL); 603 if (compound_mask == NULL) { 604 if (indio_dev->available_scan_masks == NULL) 605 kfree(old_mask); 606 return -ENOMEM; 607 } 608 indio_dev->scan_timestamp = 0; 609 610 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 611 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 612 indio_dev->masklength); 613 indio_dev->scan_timestamp |= buffer->scan_timestamp; 614 } 615 if (indio_dev->available_scan_masks) { 616 indio_dev->active_scan_mask = 617 iio_scan_mask_match(indio_dev->available_scan_masks, 618 indio_dev->masklength, 619 compound_mask); 620 if (indio_dev->active_scan_mask == NULL) { 621 /* 622 * Roll back. 623 * Note can only occur when adding a buffer. 624 */ 625 iio_buffer_deactivate(insert_buffer); 626 if (old_mask) { 627 indio_dev->active_scan_mask = old_mask; 628 success = -EINVAL; 629 } 630 else { 631 kfree(compound_mask); 632 ret = -EINVAL; 633 return ret; 634 } 635 } 636 } else { 637 indio_dev->active_scan_mask = compound_mask; 638 } 639 640 iio_update_demux(indio_dev); 641 642 /* Wind up again */ 643 if (indio_dev->setup_ops->preenable) { 644 ret = indio_dev->setup_ops->preenable(indio_dev); 645 if (ret) { 646 printk(KERN_ERR 647 "Buffer not started: buffer preenable failed (%d)\n", ret); 648 goto error_remove_inserted; 649 } 650 } 651 indio_dev->scan_bytes = 652 iio_compute_scan_bytes(indio_dev, 653 indio_dev->active_scan_mask, 654 indio_dev->scan_timestamp); 655 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 656 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 657 if (buffer->access->request_update) { 658 ret = buffer->access->request_update(buffer); 659 if (ret) { 660 printk(KERN_INFO 661 "Buffer not started: buffer parameter update failed (%d)\n", ret); 662 goto error_run_postdisable; 663 } 664 } 665 } 666 if (indio_dev->info->update_scan_mode) { 667 ret = indio_dev->info 668 ->update_scan_mode(indio_dev, 669 indio_dev->active_scan_mask); 670 if (ret < 0) { 671 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); 672 goto error_run_postdisable; 673 } 674 } 675 /* Definitely possible for devices to support both of these. */ 676 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 677 if (!indio_dev->trig) { 678 printk(KERN_INFO "Buffer not started: no trigger\n"); 679 ret = -EINVAL; 680 /* Can only occur on first buffer */ 681 goto error_run_postdisable; 682 } 683 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 684 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 685 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 686 } else { /* Should never be reached */ 687 ret = -EINVAL; 688 goto error_run_postdisable; 689 } 690 691 if (indio_dev->setup_ops->postenable) { 692 ret = indio_dev->setup_ops->postenable(indio_dev); 693 if (ret) { 694 printk(KERN_INFO 695 "Buffer not started: postenable failed (%d)\n", ret); 696 indio_dev->currentmode = INDIO_DIRECT_MODE; 697 if (indio_dev->setup_ops->postdisable) 698 indio_dev->setup_ops->postdisable(indio_dev); 699 goto error_disable_all_buffers; 700 } 701 } 702 703 if (indio_dev->available_scan_masks) 704 kfree(compound_mask); 705 else 706 kfree(old_mask); 707 708 return success; 709 710 error_disable_all_buffers: 711 indio_dev->currentmode = INDIO_DIRECT_MODE; 712 error_run_postdisable: 713 if (indio_dev->setup_ops->postdisable) 714 indio_dev->setup_ops->postdisable(indio_dev); 715 error_remove_inserted: 716 if (insert_buffer) 717 iio_buffer_deactivate(insert_buffer); 718 indio_dev->active_scan_mask = old_mask; 719 kfree(compound_mask); 720 return ret; 721 } 722 723 int iio_update_buffers(struct iio_dev *indio_dev, 724 struct iio_buffer *insert_buffer, 725 struct iio_buffer *remove_buffer) 726 { 727 int ret; 728 729 if (insert_buffer == remove_buffer) 730 return 0; 731 732 mutex_lock(&indio_dev->info_exist_lock); 733 mutex_lock(&indio_dev->mlock); 734 735 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 736 insert_buffer = NULL; 737 738 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 739 remove_buffer = NULL; 740 741 if (!insert_buffer && !remove_buffer) { 742 ret = 0; 743 goto out_unlock; 744 } 745 746 if (indio_dev->info == NULL) { 747 ret = -ENODEV; 748 goto out_unlock; 749 } 750 751 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 752 753 out_unlock: 754 mutex_unlock(&indio_dev->mlock); 755 mutex_unlock(&indio_dev->info_exist_lock); 756 757 return ret; 758 } 759 EXPORT_SYMBOL_GPL(iio_update_buffers); 760 761 ssize_t iio_buffer_store_enable(struct device *dev, 762 struct device_attribute *attr, 763 const char *buf, 764 size_t len) 765 { 766 int ret; 767 bool requested_state; 768 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 769 bool inlist; 770 771 ret = strtobool(buf, &requested_state); 772 if (ret < 0) 773 return ret; 774 775 mutex_lock(&indio_dev->mlock); 776 777 /* Find out if it is in the list */ 778 inlist = iio_buffer_is_active(indio_dev->buffer); 779 /* Already in desired state */ 780 if (inlist == requested_state) 781 goto done; 782 783 if (requested_state) 784 ret = __iio_update_buffers(indio_dev, 785 indio_dev->buffer, NULL); 786 else 787 ret = __iio_update_buffers(indio_dev, 788 NULL, indio_dev->buffer); 789 790 if (ret < 0) 791 goto done; 792 done: 793 mutex_unlock(&indio_dev->mlock); 794 return (ret < 0) ? ret : len; 795 } 796 EXPORT_SYMBOL(iio_buffer_store_enable); 797 798 /** 799 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 800 * @indio_dev: the iio device 801 * @mask: scan mask to be checked 802 * 803 * Return true if exactly one bit is set in the scan mask, false otherwise. It 804 * can be used for devices where only one channel can be active for sampling at 805 * a time. 806 */ 807 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 808 const unsigned long *mask) 809 { 810 return bitmap_weight(mask, indio_dev->masklength) == 1; 811 } 812 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 813 814 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 815 const unsigned long *mask) 816 { 817 if (!indio_dev->setup_ops->validate_scan_mask) 818 return true; 819 820 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 821 } 822 823 /** 824 * iio_scan_mask_set() - set particular bit in the scan mask 825 * @indio_dev: the iio device 826 * @buffer: the buffer whose scan mask we are interested in 827 * @bit: the bit to be set. 828 * 829 * Note that at this point we have no way of knowing what other 830 * buffers might request, hence this code only verifies that the 831 * individual buffers request is plausible. 832 */ 833 int iio_scan_mask_set(struct iio_dev *indio_dev, 834 struct iio_buffer *buffer, int bit) 835 { 836 const unsigned long *mask; 837 unsigned long *trialmask; 838 839 trialmask = kmalloc(sizeof(*trialmask)* 840 BITS_TO_LONGS(indio_dev->masklength), 841 GFP_KERNEL); 842 843 if (trialmask == NULL) 844 return -ENOMEM; 845 if (!indio_dev->masklength) { 846 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 847 goto err_invalid_mask; 848 } 849 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 850 set_bit(bit, trialmask); 851 852 if (!iio_validate_scan_mask(indio_dev, trialmask)) 853 goto err_invalid_mask; 854 855 if (indio_dev->available_scan_masks) { 856 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 857 indio_dev->masklength, 858 trialmask); 859 if (!mask) 860 goto err_invalid_mask; 861 } 862 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 863 864 kfree(trialmask); 865 866 return 0; 867 868 err_invalid_mask: 869 kfree(trialmask); 870 return -EINVAL; 871 } 872 EXPORT_SYMBOL_GPL(iio_scan_mask_set); 873 874 int iio_scan_mask_query(struct iio_dev *indio_dev, 875 struct iio_buffer *buffer, int bit) 876 { 877 if (bit > indio_dev->masklength) 878 return -EINVAL; 879 880 if (!buffer->scan_mask) 881 return 0; 882 883 /* Ensure return value is 0 or 1. */ 884 return !!test_bit(bit, buffer->scan_mask); 885 }; 886 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 887 888 /** 889 * struct iio_demux_table() - table describing demux memcpy ops 890 * @from: index to copy from 891 * @to: index to copy to 892 * @length: how many bytes to copy 893 * @l: list head used for management 894 */ 895 struct iio_demux_table { 896 unsigned from; 897 unsigned to; 898 unsigned length; 899 struct list_head l; 900 }; 901 902 static const void *iio_demux(struct iio_buffer *buffer, 903 const void *datain) 904 { 905 struct iio_demux_table *t; 906 907 if (list_empty(&buffer->demux_list)) 908 return datain; 909 list_for_each_entry(t, &buffer->demux_list, l) 910 memcpy(buffer->demux_bounce + t->to, 911 datain + t->from, t->length); 912 913 return buffer->demux_bounce; 914 } 915 916 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 917 { 918 const void *dataout = iio_demux(buffer, data); 919 920 return buffer->access->store_to(buffer, dataout); 921 } 922 923 static void iio_buffer_demux_free(struct iio_buffer *buffer) 924 { 925 struct iio_demux_table *p, *q; 926 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 927 list_del(&p->l); 928 kfree(p); 929 } 930 } 931 932 933 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 934 { 935 int ret; 936 struct iio_buffer *buf; 937 938 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { 939 ret = iio_push_to_buffer(buf, data); 940 if (ret < 0) 941 return ret; 942 } 943 944 return 0; 945 } 946 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 947 948 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 949 struct iio_buffer *buffer) 950 { 951 const struct iio_chan_spec *ch; 952 int ret, in_ind = -1, out_ind, length; 953 unsigned in_loc = 0, out_loc = 0; 954 struct iio_demux_table *p; 955 956 /* Clear out any old demux */ 957 iio_buffer_demux_free(buffer); 958 kfree(buffer->demux_bounce); 959 buffer->demux_bounce = NULL; 960 961 /* First work out which scan mode we will actually have */ 962 if (bitmap_equal(indio_dev->active_scan_mask, 963 buffer->scan_mask, 964 indio_dev->masklength)) 965 return 0; 966 967 /* Now we have the two masks, work from least sig and build up sizes */ 968 for_each_set_bit(out_ind, 969 indio_dev->active_scan_mask, 970 indio_dev->masklength) { 971 in_ind = find_next_bit(indio_dev->active_scan_mask, 972 indio_dev->masklength, 973 in_ind + 1); 974 while (in_ind != out_ind) { 975 in_ind = find_next_bit(indio_dev->active_scan_mask, 976 indio_dev->masklength, 977 in_ind + 1); 978 ch = iio_find_channel_from_si(indio_dev, in_ind); 979 if (ch->scan_type.repeat > 1) 980 length = ch->scan_type.storagebits / 8 * 981 ch->scan_type.repeat; 982 else 983 length = ch->scan_type.storagebits / 8; 984 /* Make sure we are aligned */ 985 in_loc += length; 986 if (in_loc % length) 987 in_loc += length - in_loc % length; 988 } 989 p = kmalloc(sizeof(*p), GFP_KERNEL); 990 if (p == NULL) { 991 ret = -ENOMEM; 992 goto error_clear_mux_table; 993 } 994 ch = iio_find_channel_from_si(indio_dev, in_ind); 995 if (ch->scan_type.repeat > 1) 996 length = ch->scan_type.storagebits / 8 * 997 ch->scan_type.repeat; 998 else 999 length = ch->scan_type.storagebits / 8; 1000 if (out_loc % length) 1001 out_loc += length - out_loc % length; 1002 if (in_loc % length) 1003 in_loc += length - in_loc % length; 1004 p->from = in_loc; 1005 p->to = out_loc; 1006 p->length = length; 1007 list_add_tail(&p->l, &buffer->demux_list); 1008 out_loc += length; 1009 in_loc += length; 1010 } 1011 /* Relies on scan_timestamp being last */ 1012 if (buffer->scan_timestamp) { 1013 p = kmalloc(sizeof(*p), GFP_KERNEL); 1014 if (p == NULL) { 1015 ret = -ENOMEM; 1016 goto error_clear_mux_table; 1017 } 1018 ch = iio_find_channel_from_si(indio_dev, 1019 indio_dev->scan_index_timestamp); 1020 if (ch->scan_type.repeat > 1) 1021 length = ch->scan_type.storagebits / 8 * 1022 ch->scan_type.repeat; 1023 else 1024 length = ch->scan_type.storagebits / 8; 1025 if (out_loc % length) 1026 out_loc += length - out_loc % length; 1027 if (in_loc % length) 1028 in_loc += length - in_loc % length; 1029 p->from = in_loc; 1030 p->to = out_loc; 1031 p->length = length; 1032 list_add_tail(&p->l, &buffer->demux_list); 1033 out_loc += length; 1034 in_loc += length; 1035 } 1036 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 1037 if (buffer->demux_bounce == NULL) { 1038 ret = -ENOMEM; 1039 goto error_clear_mux_table; 1040 } 1041 return 0; 1042 1043 error_clear_mux_table: 1044 iio_buffer_demux_free(buffer); 1045 1046 return ret; 1047 } 1048 1049 int iio_update_demux(struct iio_dev *indio_dev) 1050 { 1051 struct iio_buffer *buffer; 1052 int ret; 1053 1054 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 1055 ret = iio_buffer_update_demux(indio_dev, buffer); 1056 if (ret < 0) 1057 goto error_clear_mux_table; 1058 } 1059 return 0; 1060 1061 error_clear_mux_table: 1062 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 1063 iio_buffer_demux_free(buffer); 1064 1065 return ret; 1066 } 1067 EXPORT_SYMBOL_GPL(iio_update_demux); 1068 1069 /** 1070 * iio_buffer_release() - Free a buffer's resources 1071 * @ref: Pointer to the kref embedded in the iio_buffer struct 1072 * 1073 * This function is called when the last reference to the buffer has been 1074 * dropped. It will typically free all resources allocated by the buffer. Do not 1075 * call this function manually, always use iio_buffer_put() when done using a 1076 * buffer. 1077 */ 1078 static void iio_buffer_release(struct kref *ref) 1079 { 1080 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 1081 1082 buffer->access->release(buffer); 1083 } 1084 1085 /** 1086 * iio_buffer_get() - Grab a reference to the buffer 1087 * @buffer: The buffer to grab a reference for, may be NULL 1088 * 1089 * Returns the pointer to the buffer that was passed into the function. 1090 */ 1091 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 1092 { 1093 if (buffer) 1094 kref_get(&buffer->ref); 1095 1096 return buffer; 1097 } 1098 EXPORT_SYMBOL_GPL(iio_buffer_get); 1099 1100 /** 1101 * iio_buffer_put() - Release the reference to the buffer 1102 * @buffer: The buffer to release the reference for, may be NULL 1103 */ 1104 void iio_buffer_put(struct iio_buffer *buffer) 1105 { 1106 if (buffer) 1107 kref_put(&buffer->ref, iio_buffer_release); 1108 } 1109 EXPORT_SYMBOL_GPL(iio_buffer_put); 1110