1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 #include <linux/sched.h> 24 25 #include <linux/iio/iio.h> 26 #include "iio_core.h" 27 #include <linux/iio/sysfs.h> 28 #include <linux/iio/buffer.h> 29 30 static const char * const iio_endian_prefix[] = { 31 [IIO_BE] = "be", 32 [IIO_LE] = "le", 33 }; 34 35 static bool iio_buffer_is_active(struct iio_buffer *buf) 36 { 37 return !list_empty(&buf->buffer_list); 38 } 39 40 static size_t iio_buffer_data_available(struct iio_buffer *buf) 41 { 42 return buf->access->data_available(buf); 43 } 44 45 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, 46 struct iio_buffer *buf, size_t required) 47 { 48 if (!indio_dev->info->hwfifo_flush_to_buffer) 49 return -ENODEV; 50 51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); 52 } 53 54 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 55 size_t to_wait, int to_flush) 56 { 57 size_t avail; 58 int flushed = 0; 59 60 /* wakeup if the device was unregistered */ 61 if (!indio_dev->info) 62 return true; 63 64 /* drain the buffer if it was disabled */ 65 if (!iio_buffer_is_active(buf)) { 66 to_wait = min_t(size_t, to_wait, 1); 67 to_flush = 0; 68 } 69 70 avail = iio_buffer_data_available(buf); 71 72 if (avail >= to_wait) { 73 /* force a flush for non-blocking reads */ 74 if (!to_wait && !avail && to_flush) 75 iio_buffer_flush_hwfifo(indio_dev, buf, to_flush); 76 return true; 77 } 78 79 if (to_flush) 80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf, 81 to_wait - avail); 82 if (flushed <= 0) 83 return false; 84 85 if (avail + flushed >= to_wait) 86 return true; 87 88 return false; 89 } 90 91 /** 92 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 93 * 94 * This function relies on all buffer implementations having an 95 * iio_buffer as their first element. 96 **/ 97 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 98 size_t n, loff_t *f_ps) 99 { 100 struct iio_dev *indio_dev = filp->private_data; 101 struct iio_buffer *rb = indio_dev->buffer; 102 size_t datum_size; 103 size_t to_wait = 0; 104 size_t to_read; 105 int ret; 106 107 if (!indio_dev->info) 108 return -ENODEV; 109 110 if (!rb || !rb->access->read_first_n) 111 return -EINVAL; 112 113 datum_size = rb->bytes_per_datum; 114 115 /* 116 * If datum_size is 0 there will never be anything to read from the 117 * buffer, so signal end of file now. 118 */ 119 if (!datum_size) 120 return 0; 121 122 to_read = min_t(size_t, n / datum_size, rb->watermark); 123 124 if (!(filp->f_flags & O_NONBLOCK)) 125 to_wait = to_read; 126 127 do { 128 ret = wait_event_interruptible(rb->pollq, 129 iio_buffer_ready(indio_dev, rb, to_wait, to_read)); 130 if (ret) 131 return ret; 132 133 if (!indio_dev->info) 134 return -ENODEV; 135 136 ret = rb->access->read_first_n(rb, n, buf); 137 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 138 ret = -EAGAIN; 139 } while (ret == 0); 140 141 return ret; 142 } 143 144 /** 145 * iio_buffer_poll() - poll the buffer to find out if it has data 146 */ 147 unsigned int iio_buffer_poll(struct file *filp, 148 struct poll_table_struct *wait) 149 { 150 struct iio_dev *indio_dev = filp->private_data; 151 struct iio_buffer *rb = indio_dev->buffer; 152 153 if (!indio_dev->info) 154 return -ENODEV; 155 156 poll_wait(filp, &rb->pollq, wait); 157 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) 158 return POLLIN | POLLRDNORM; 159 return 0; 160 } 161 162 /** 163 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 164 * @indio_dev: The IIO device 165 * 166 * Wakes up the event waitqueue used for poll(). Should usually 167 * be called when the device is unregistered. 168 */ 169 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 170 { 171 if (!indio_dev->buffer) 172 return; 173 174 wake_up(&indio_dev->buffer->pollq); 175 } 176 177 void iio_buffer_init(struct iio_buffer *buffer) 178 { 179 INIT_LIST_HEAD(&buffer->demux_list); 180 INIT_LIST_HEAD(&buffer->buffer_list); 181 init_waitqueue_head(&buffer->pollq); 182 kref_init(&buffer->ref); 183 buffer->watermark = 1; 184 } 185 EXPORT_SYMBOL(iio_buffer_init); 186 187 static ssize_t iio_show_scan_index(struct device *dev, 188 struct device_attribute *attr, 189 char *buf) 190 { 191 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 192 } 193 194 static ssize_t iio_show_fixed_type(struct device *dev, 195 struct device_attribute *attr, 196 char *buf) 197 { 198 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 199 u8 type = this_attr->c->scan_type.endianness; 200 201 if (type == IIO_CPU) { 202 #ifdef __LITTLE_ENDIAN 203 type = IIO_LE; 204 #else 205 type = IIO_BE; 206 #endif 207 } 208 if (this_attr->c->scan_type.repeat > 1) 209 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", 210 iio_endian_prefix[type], 211 this_attr->c->scan_type.sign, 212 this_attr->c->scan_type.realbits, 213 this_attr->c->scan_type.storagebits, 214 this_attr->c->scan_type.repeat, 215 this_attr->c->scan_type.shift); 216 else 217 return sprintf(buf, "%s:%c%d/%d>>%u\n", 218 iio_endian_prefix[type], 219 this_attr->c->scan_type.sign, 220 this_attr->c->scan_type.realbits, 221 this_attr->c->scan_type.storagebits, 222 this_attr->c->scan_type.shift); 223 } 224 225 static ssize_t iio_scan_el_show(struct device *dev, 226 struct device_attribute *attr, 227 char *buf) 228 { 229 int ret; 230 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 231 232 /* Ensure ret is 0 or 1. */ 233 ret = !!test_bit(to_iio_dev_attr(attr)->address, 234 indio_dev->buffer->scan_mask); 235 236 return sprintf(buf, "%d\n", ret); 237 } 238 239 /* Note NULL used as error indicator as it doesn't make sense. */ 240 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 241 unsigned int masklength, 242 const unsigned long *mask) 243 { 244 if (bitmap_empty(mask, masklength)) 245 return NULL; 246 while (*av_masks) { 247 if (bitmap_subset(mask, av_masks, masklength)) 248 return av_masks; 249 av_masks += BITS_TO_LONGS(masklength); 250 } 251 return NULL; 252 } 253 254 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 255 const unsigned long *mask) 256 { 257 if (!indio_dev->setup_ops->validate_scan_mask) 258 return true; 259 260 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 261 } 262 263 /** 264 * iio_scan_mask_set() - set particular bit in the scan mask 265 * @indio_dev: the iio device 266 * @buffer: the buffer whose scan mask we are interested in 267 * @bit: the bit to be set. 268 * 269 * Note that at this point we have no way of knowing what other 270 * buffers might request, hence this code only verifies that the 271 * individual buffers request is plausible. 272 */ 273 static int iio_scan_mask_set(struct iio_dev *indio_dev, 274 struct iio_buffer *buffer, int bit) 275 { 276 const unsigned long *mask; 277 unsigned long *trialmask; 278 279 trialmask = kmalloc(sizeof(*trialmask)* 280 BITS_TO_LONGS(indio_dev->masklength), 281 GFP_KERNEL); 282 283 if (trialmask == NULL) 284 return -ENOMEM; 285 if (!indio_dev->masklength) { 286 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 287 goto err_invalid_mask; 288 } 289 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 290 set_bit(bit, trialmask); 291 292 if (!iio_validate_scan_mask(indio_dev, trialmask)) 293 goto err_invalid_mask; 294 295 if (indio_dev->available_scan_masks) { 296 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 297 indio_dev->masklength, 298 trialmask); 299 if (!mask) 300 goto err_invalid_mask; 301 } 302 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 303 304 kfree(trialmask); 305 306 return 0; 307 308 err_invalid_mask: 309 kfree(trialmask); 310 return -EINVAL; 311 } 312 313 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 314 { 315 clear_bit(bit, buffer->scan_mask); 316 return 0; 317 } 318 319 static ssize_t iio_scan_el_store(struct device *dev, 320 struct device_attribute *attr, 321 const char *buf, 322 size_t len) 323 { 324 int ret; 325 bool state; 326 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 327 struct iio_buffer *buffer = indio_dev->buffer; 328 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 329 330 ret = strtobool(buf, &state); 331 if (ret < 0) 332 return ret; 333 mutex_lock(&indio_dev->mlock); 334 if (iio_buffer_is_active(indio_dev->buffer)) { 335 ret = -EBUSY; 336 goto error_ret; 337 } 338 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 339 if (ret < 0) 340 goto error_ret; 341 if (!state && ret) { 342 ret = iio_scan_mask_clear(buffer, this_attr->address); 343 if (ret) 344 goto error_ret; 345 } else if (state && !ret) { 346 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 347 if (ret) 348 goto error_ret; 349 } 350 351 error_ret: 352 mutex_unlock(&indio_dev->mlock); 353 354 return ret < 0 ? ret : len; 355 356 } 357 358 static ssize_t iio_scan_el_ts_show(struct device *dev, 359 struct device_attribute *attr, 360 char *buf) 361 { 362 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 363 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 364 } 365 366 static ssize_t iio_scan_el_ts_store(struct device *dev, 367 struct device_attribute *attr, 368 const char *buf, 369 size_t len) 370 { 371 int ret; 372 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 373 bool state; 374 375 ret = strtobool(buf, &state); 376 if (ret < 0) 377 return ret; 378 379 mutex_lock(&indio_dev->mlock); 380 if (iio_buffer_is_active(indio_dev->buffer)) { 381 ret = -EBUSY; 382 goto error_ret; 383 } 384 indio_dev->buffer->scan_timestamp = state; 385 error_ret: 386 mutex_unlock(&indio_dev->mlock); 387 388 return ret ? ret : len; 389 } 390 391 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 392 const struct iio_chan_spec *chan) 393 { 394 int ret, attrcount = 0; 395 struct iio_buffer *buffer = indio_dev->buffer; 396 397 ret = __iio_add_chan_devattr("index", 398 chan, 399 &iio_show_scan_index, 400 NULL, 401 0, 402 IIO_SEPARATE, 403 &indio_dev->dev, 404 &buffer->scan_el_dev_attr_list); 405 if (ret) 406 return ret; 407 attrcount++; 408 ret = __iio_add_chan_devattr("type", 409 chan, 410 &iio_show_fixed_type, 411 NULL, 412 0, 413 0, 414 &indio_dev->dev, 415 &buffer->scan_el_dev_attr_list); 416 if (ret) 417 return ret; 418 attrcount++; 419 if (chan->type != IIO_TIMESTAMP) 420 ret = __iio_add_chan_devattr("en", 421 chan, 422 &iio_scan_el_show, 423 &iio_scan_el_store, 424 chan->scan_index, 425 0, 426 &indio_dev->dev, 427 &buffer->scan_el_dev_attr_list); 428 else 429 ret = __iio_add_chan_devattr("en", 430 chan, 431 &iio_scan_el_ts_show, 432 &iio_scan_el_ts_store, 433 chan->scan_index, 434 0, 435 &indio_dev->dev, 436 &buffer->scan_el_dev_attr_list); 437 if (ret) 438 return ret; 439 attrcount++; 440 ret = attrcount; 441 return ret; 442 } 443 444 static ssize_t iio_buffer_read_length(struct device *dev, 445 struct device_attribute *attr, 446 char *buf) 447 { 448 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 449 struct iio_buffer *buffer = indio_dev->buffer; 450 451 return sprintf(buf, "%d\n", buffer->length); 452 } 453 454 static ssize_t iio_buffer_write_length(struct device *dev, 455 struct device_attribute *attr, 456 const char *buf, size_t len) 457 { 458 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 459 struct iio_buffer *buffer = indio_dev->buffer; 460 unsigned int val; 461 int ret; 462 463 ret = kstrtouint(buf, 10, &val); 464 if (ret) 465 return ret; 466 467 if (val == buffer->length) 468 return len; 469 470 mutex_lock(&indio_dev->mlock); 471 if (iio_buffer_is_active(indio_dev->buffer)) { 472 ret = -EBUSY; 473 } else { 474 buffer->access->set_length(buffer, val); 475 ret = 0; 476 } 477 if (ret) 478 goto out; 479 if (buffer->length && buffer->length < buffer->watermark) 480 buffer->watermark = buffer->length; 481 out: 482 mutex_unlock(&indio_dev->mlock); 483 484 return ret ? ret : len; 485 } 486 487 static ssize_t iio_buffer_show_enable(struct device *dev, 488 struct device_attribute *attr, 489 char *buf) 490 { 491 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 492 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); 493 } 494 495 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 496 const unsigned long *mask, bool timestamp) 497 { 498 const struct iio_chan_spec *ch; 499 unsigned bytes = 0; 500 int length, i; 501 502 /* How much space will the demuxed element take? */ 503 for_each_set_bit(i, mask, 504 indio_dev->masklength) { 505 ch = iio_find_channel_from_si(indio_dev, i); 506 if (ch->scan_type.repeat > 1) 507 length = ch->scan_type.storagebits / 8 * 508 ch->scan_type.repeat; 509 else 510 length = ch->scan_type.storagebits / 8; 511 bytes = ALIGN(bytes, length); 512 bytes += length; 513 } 514 if (timestamp) { 515 ch = iio_find_channel_from_si(indio_dev, 516 indio_dev->scan_index_timestamp); 517 if (ch->scan_type.repeat > 1) 518 length = ch->scan_type.storagebits / 8 * 519 ch->scan_type.repeat; 520 else 521 length = ch->scan_type.storagebits / 8; 522 bytes = ALIGN(bytes, length); 523 bytes += length; 524 } 525 return bytes; 526 } 527 528 static void iio_buffer_activate(struct iio_dev *indio_dev, 529 struct iio_buffer *buffer) 530 { 531 iio_buffer_get(buffer); 532 list_add(&buffer->buffer_list, &indio_dev->buffer_list); 533 } 534 535 static void iio_buffer_deactivate(struct iio_buffer *buffer) 536 { 537 list_del_init(&buffer->buffer_list); 538 wake_up_interruptible(&buffer->pollq); 539 iio_buffer_put(buffer); 540 } 541 542 void iio_disable_all_buffers(struct iio_dev *indio_dev) 543 { 544 struct iio_buffer *buffer, *_buffer; 545 546 if (list_empty(&indio_dev->buffer_list)) 547 return; 548 549 if (indio_dev->setup_ops->predisable) 550 indio_dev->setup_ops->predisable(indio_dev); 551 552 list_for_each_entry_safe(buffer, _buffer, 553 &indio_dev->buffer_list, buffer_list) 554 iio_buffer_deactivate(buffer); 555 556 indio_dev->currentmode = INDIO_DIRECT_MODE; 557 if (indio_dev->setup_ops->postdisable) 558 indio_dev->setup_ops->postdisable(indio_dev); 559 560 if (indio_dev->available_scan_masks == NULL) 561 kfree(indio_dev->active_scan_mask); 562 } 563 564 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 565 struct iio_buffer *buffer) 566 { 567 unsigned int bytes; 568 569 if (!buffer->access->set_bytes_per_datum) 570 return; 571 572 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 573 buffer->scan_timestamp); 574 575 buffer->access->set_bytes_per_datum(buffer, bytes); 576 } 577 578 static void iio_free_scan_mask(struct iio_dev *indio_dev, 579 const unsigned long *mask) 580 { 581 /* If the mask is dynamically allocated free it, otherwise do nothing */ 582 if (!indio_dev->available_scan_masks) 583 kfree(mask); 584 } 585 586 static int __iio_update_buffers(struct iio_dev *indio_dev, 587 struct iio_buffer *insert_buffer, 588 struct iio_buffer *remove_buffer) 589 { 590 int ret; 591 int success = 0; 592 struct iio_buffer *buffer; 593 unsigned long *compound_mask; 594 const unsigned long *old_mask; 595 596 /* Wind down existing buffers - iff there are any */ 597 if (!list_empty(&indio_dev->buffer_list)) { 598 if (indio_dev->setup_ops->predisable) { 599 ret = indio_dev->setup_ops->predisable(indio_dev); 600 if (ret) 601 return ret; 602 } 603 indio_dev->currentmode = INDIO_DIRECT_MODE; 604 if (indio_dev->setup_ops->postdisable) { 605 ret = indio_dev->setup_ops->postdisable(indio_dev); 606 if (ret) 607 return ret; 608 } 609 } 610 /* Keep a copy of current setup to allow roll back */ 611 old_mask = indio_dev->active_scan_mask; 612 if (!indio_dev->available_scan_masks) 613 indio_dev->active_scan_mask = NULL; 614 615 if (remove_buffer) 616 iio_buffer_deactivate(remove_buffer); 617 if (insert_buffer) 618 iio_buffer_activate(indio_dev, insert_buffer); 619 620 /* If no buffers in list, we are done */ 621 if (list_empty(&indio_dev->buffer_list)) { 622 indio_dev->currentmode = INDIO_DIRECT_MODE; 623 iio_free_scan_mask(indio_dev, old_mask); 624 return 0; 625 } 626 627 /* What scan mask do we actually have? */ 628 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 629 sizeof(long), GFP_KERNEL); 630 if (compound_mask == NULL) { 631 iio_free_scan_mask(indio_dev, old_mask); 632 return -ENOMEM; 633 } 634 indio_dev->scan_timestamp = 0; 635 636 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 637 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 638 indio_dev->masklength); 639 indio_dev->scan_timestamp |= buffer->scan_timestamp; 640 } 641 if (indio_dev->available_scan_masks) { 642 indio_dev->active_scan_mask = 643 iio_scan_mask_match(indio_dev->available_scan_masks, 644 indio_dev->masklength, 645 compound_mask); 646 kfree(compound_mask); 647 if (indio_dev->active_scan_mask == NULL) { 648 /* 649 * Roll back. 650 * Note can only occur when adding a buffer. 651 */ 652 iio_buffer_deactivate(insert_buffer); 653 if (old_mask) { 654 indio_dev->active_scan_mask = old_mask; 655 success = -EINVAL; 656 } 657 else { 658 ret = -EINVAL; 659 return ret; 660 } 661 } 662 } else { 663 indio_dev->active_scan_mask = compound_mask; 664 } 665 666 iio_update_demux(indio_dev); 667 668 /* Wind up again */ 669 if (indio_dev->setup_ops->preenable) { 670 ret = indio_dev->setup_ops->preenable(indio_dev); 671 if (ret) { 672 dev_dbg(&indio_dev->dev, 673 "Buffer not started: buffer preenable failed (%d)\n", ret); 674 goto error_remove_inserted; 675 } 676 } 677 indio_dev->scan_bytes = 678 iio_compute_scan_bytes(indio_dev, 679 indio_dev->active_scan_mask, 680 indio_dev->scan_timestamp); 681 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 682 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 683 if (buffer->access->request_update) { 684 ret = buffer->access->request_update(buffer); 685 if (ret) { 686 dev_dbg(&indio_dev->dev, 687 "Buffer not started: buffer parameter update failed (%d)\n", ret); 688 goto error_run_postdisable; 689 } 690 } 691 } 692 if (indio_dev->info->update_scan_mode) { 693 ret = indio_dev->info 694 ->update_scan_mode(indio_dev, 695 indio_dev->active_scan_mask); 696 if (ret < 0) { 697 dev_dbg(&indio_dev->dev, 698 "Buffer not started: update scan mode failed (%d)\n", 699 ret); 700 goto error_run_postdisable; 701 } 702 } 703 /* Definitely possible for devices to support both of these. */ 704 if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { 705 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 706 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 707 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 708 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) { 709 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE; 710 } else { /* Should never be reached */ 711 /* Can only occur on first buffer */ 712 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 713 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); 714 ret = -EINVAL; 715 goto error_run_postdisable; 716 } 717 718 if (indio_dev->setup_ops->postenable) { 719 ret = indio_dev->setup_ops->postenable(indio_dev); 720 if (ret) { 721 dev_dbg(&indio_dev->dev, 722 "Buffer not started: postenable failed (%d)\n", ret); 723 indio_dev->currentmode = INDIO_DIRECT_MODE; 724 if (indio_dev->setup_ops->postdisable) 725 indio_dev->setup_ops->postdisable(indio_dev); 726 goto error_disable_all_buffers; 727 } 728 } 729 730 iio_free_scan_mask(indio_dev, old_mask); 731 732 return success; 733 734 error_disable_all_buffers: 735 indio_dev->currentmode = INDIO_DIRECT_MODE; 736 error_run_postdisable: 737 if (indio_dev->setup_ops->postdisable) 738 indio_dev->setup_ops->postdisable(indio_dev); 739 error_remove_inserted: 740 if (insert_buffer) 741 iio_buffer_deactivate(insert_buffer); 742 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); 743 indio_dev->active_scan_mask = old_mask; 744 return ret; 745 } 746 747 int iio_update_buffers(struct iio_dev *indio_dev, 748 struct iio_buffer *insert_buffer, 749 struct iio_buffer *remove_buffer) 750 { 751 int ret; 752 753 if (insert_buffer == remove_buffer) 754 return 0; 755 756 mutex_lock(&indio_dev->info_exist_lock); 757 mutex_lock(&indio_dev->mlock); 758 759 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 760 insert_buffer = NULL; 761 762 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 763 remove_buffer = NULL; 764 765 if (!insert_buffer && !remove_buffer) { 766 ret = 0; 767 goto out_unlock; 768 } 769 770 if (indio_dev->info == NULL) { 771 ret = -ENODEV; 772 goto out_unlock; 773 } 774 775 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 776 777 out_unlock: 778 mutex_unlock(&indio_dev->mlock); 779 mutex_unlock(&indio_dev->info_exist_lock); 780 781 return ret; 782 } 783 EXPORT_SYMBOL_GPL(iio_update_buffers); 784 785 static ssize_t iio_buffer_store_enable(struct device *dev, 786 struct device_attribute *attr, 787 const char *buf, 788 size_t len) 789 { 790 int ret; 791 bool requested_state; 792 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 793 bool inlist; 794 795 ret = strtobool(buf, &requested_state); 796 if (ret < 0) 797 return ret; 798 799 mutex_lock(&indio_dev->mlock); 800 801 /* Find out if it is in the list */ 802 inlist = iio_buffer_is_active(indio_dev->buffer); 803 /* Already in desired state */ 804 if (inlist == requested_state) 805 goto done; 806 807 if (requested_state) 808 ret = __iio_update_buffers(indio_dev, 809 indio_dev->buffer, NULL); 810 else 811 ret = __iio_update_buffers(indio_dev, 812 NULL, indio_dev->buffer); 813 814 if (ret < 0) 815 goto done; 816 done: 817 mutex_unlock(&indio_dev->mlock); 818 return (ret < 0) ? ret : len; 819 } 820 821 static const char * const iio_scan_elements_group_name = "scan_elements"; 822 823 static ssize_t iio_buffer_show_watermark(struct device *dev, 824 struct device_attribute *attr, 825 char *buf) 826 { 827 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 828 struct iio_buffer *buffer = indio_dev->buffer; 829 830 return sprintf(buf, "%u\n", buffer->watermark); 831 } 832 833 static ssize_t iio_buffer_store_watermark(struct device *dev, 834 struct device_attribute *attr, 835 const char *buf, 836 size_t len) 837 { 838 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 839 struct iio_buffer *buffer = indio_dev->buffer; 840 unsigned int val; 841 int ret; 842 843 ret = kstrtouint(buf, 10, &val); 844 if (ret) 845 return ret; 846 if (!val) 847 return -EINVAL; 848 849 mutex_lock(&indio_dev->mlock); 850 851 if (val > buffer->length) { 852 ret = -EINVAL; 853 goto out; 854 } 855 856 if (iio_buffer_is_active(indio_dev->buffer)) { 857 ret = -EBUSY; 858 goto out; 859 } 860 861 buffer->watermark = val; 862 863 if (indio_dev->info->hwfifo_set_watermark) 864 indio_dev->info->hwfifo_set_watermark(indio_dev, val); 865 out: 866 mutex_unlock(&indio_dev->mlock); 867 868 return ret ? ret : len; 869 } 870 871 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, 872 iio_buffer_write_length); 873 static struct device_attribute dev_attr_length_ro = __ATTR(length, 874 S_IRUGO, iio_buffer_read_length, NULL); 875 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, 876 iio_buffer_show_enable, iio_buffer_store_enable); 877 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, 878 iio_buffer_show_watermark, iio_buffer_store_watermark); 879 880 static struct attribute *iio_buffer_attrs[] = { 881 &dev_attr_length.attr, 882 &dev_attr_enable.attr, 883 &dev_attr_watermark.attr, 884 }; 885 886 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) 887 { 888 struct iio_dev_attr *p; 889 struct attribute **attr; 890 struct iio_buffer *buffer = indio_dev->buffer; 891 int ret, i, attrn, attrcount, attrcount_orig = 0; 892 const struct iio_chan_spec *channels; 893 894 if (!buffer) 895 return 0; 896 897 attrcount = 0; 898 if (buffer->attrs) { 899 while (buffer->attrs[attrcount] != NULL) 900 attrcount++; 901 } 902 903 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1, 904 sizeof(struct attribute *), GFP_KERNEL); 905 if (!attr) 906 return -ENOMEM; 907 908 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); 909 if (!buffer->access->set_length) 910 attr[0] = &dev_attr_length_ro.attr; 911 912 if (buffer->attrs) 913 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, 914 sizeof(struct attribute *) * attrcount); 915 916 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL; 917 918 buffer->buffer_group.name = "buffer"; 919 buffer->buffer_group.attrs = attr; 920 921 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; 922 923 if (buffer->scan_el_attrs != NULL) { 924 attr = buffer->scan_el_attrs->attrs; 925 while (*attr++ != NULL) 926 attrcount_orig++; 927 } 928 attrcount = attrcount_orig; 929 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 930 channels = indio_dev->channels; 931 if (channels) { 932 /* new magic */ 933 for (i = 0; i < indio_dev->num_channels; i++) { 934 if (channels[i].scan_index < 0) 935 continue; 936 937 /* Establish necessary mask length */ 938 if (channels[i].scan_index > 939 (int)indio_dev->masklength - 1) 940 indio_dev->masklength 941 = channels[i].scan_index + 1; 942 943 ret = iio_buffer_add_channel_sysfs(indio_dev, 944 &channels[i]); 945 if (ret < 0) 946 goto error_cleanup_dynamic; 947 attrcount += ret; 948 if (channels[i].type == IIO_TIMESTAMP) 949 indio_dev->scan_index_timestamp = 950 channels[i].scan_index; 951 } 952 if (indio_dev->masklength && buffer->scan_mask == NULL) { 953 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 954 sizeof(*buffer->scan_mask), 955 GFP_KERNEL); 956 if (buffer->scan_mask == NULL) { 957 ret = -ENOMEM; 958 goto error_cleanup_dynamic; 959 } 960 } 961 } 962 963 buffer->scan_el_group.name = iio_scan_elements_group_name; 964 965 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 966 sizeof(buffer->scan_el_group.attrs[0]), 967 GFP_KERNEL); 968 if (buffer->scan_el_group.attrs == NULL) { 969 ret = -ENOMEM; 970 goto error_free_scan_mask; 971 } 972 if (buffer->scan_el_attrs) 973 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 974 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 975 attrn = attrcount_orig; 976 977 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 978 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 979 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 980 981 return 0; 982 983 error_free_scan_mask: 984 kfree(buffer->scan_mask); 985 error_cleanup_dynamic: 986 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); 987 kfree(indio_dev->buffer->buffer_group.attrs); 988 989 return ret; 990 } 991 992 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) 993 { 994 if (!indio_dev->buffer) 995 return; 996 997 kfree(indio_dev->buffer->scan_mask); 998 kfree(indio_dev->buffer->buffer_group.attrs); 999 kfree(indio_dev->buffer->scan_el_group.attrs); 1000 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); 1001 } 1002 1003 /** 1004 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 1005 * @indio_dev: the iio device 1006 * @mask: scan mask to be checked 1007 * 1008 * Return true if exactly one bit is set in the scan mask, false otherwise. It 1009 * can be used for devices where only one channel can be active for sampling at 1010 * a time. 1011 */ 1012 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 1013 const unsigned long *mask) 1014 { 1015 return bitmap_weight(mask, indio_dev->masklength) == 1; 1016 } 1017 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 1018 1019 int iio_scan_mask_query(struct iio_dev *indio_dev, 1020 struct iio_buffer *buffer, int bit) 1021 { 1022 if (bit > indio_dev->masklength) 1023 return -EINVAL; 1024 1025 if (!buffer->scan_mask) 1026 return 0; 1027 1028 /* Ensure return value is 0 or 1. */ 1029 return !!test_bit(bit, buffer->scan_mask); 1030 }; 1031 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 1032 1033 /** 1034 * struct iio_demux_table() - table describing demux memcpy ops 1035 * @from: index to copy from 1036 * @to: index to copy to 1037 * @length: how many bytes to copy 1038 * @l: list head used for management 1039 */ 1040 struct iio_demux_table { 1041 unsigned from; 1042 unsigned to; 1043 unsigned length; 1044 struct list_head l; 1045 }; 1046 1047 static const void *iio_demux(struct iio_buffer *buffer, 1048 const void *datain) 1049 { 1050 struct iio_demux_table *t; 1051 1052 if (list_empty(&buffer->demux_list)) 1053 return datain; 1054 list_for_each_entry(t, &buffer->demux_list, l) 1055 memcpy(buffer->demux_bounce + t->to, 1056 datain + t->from, t->length); 1057 1058 return buffer->demux_bounce; 1059 } 1060 1061 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 1062 { 1063 const void *dataout = iio_demux(buffer, data); 1064 int ret; 1065 1066 ret = buffer->access->store_to(buffer, dataout); 1067 if (ret) 1068 return ret; 1069 1070 /* 1071 * We can't just test for watermark to decide if we wake the poll queue 1072 * because read may request less samples than the watermark. 1073 */ 1074 wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); 1075 return 0; 1076 } 1077 1078 static void iio_buffer_demux_free(struct iio_buffer *buffer) 1079 { 1080 struct iio_demux_table *p, *q; 1081 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 1082 list_del(&p->l); 1083 kfree(p); 1084 } 1085 } 1086 1087 1088 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 1089 { 1090 int ret; 1091 struct iio_buffer *buf; 1092 1093 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { 1094 ret = iio_push_to_buffer(buf, data); 1095 if (ret < 0) 1096 return ret; 1097 } 1098 1099 return 0; 1100 } 1101 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 1102 1103 static int iio_buffer_add_demux(struct iio_buffer *buffer, 1104 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc, 1105 unsigned int length) 1106 { 1107 1108 if (*p && (*p)->from + (*p)->length == in_loc && 1109 (*p)->to + (*p)->length == out_loc) { 1110 (*p)->length += length; 1111 } else { 1112 *p = kmalloc(sizeof(**p), GFP_KERNEL); 1113 if (*p == NULL) 1114 return -ENOMEM; 1115 (*p)->from = in_loc; 1116 (*p)->to = out_loc; 1117 (*p)->length = length; 1118 list_add_tail(&(*p)->l, &buffer->demux_list); 1119 } 1120 1121 return 0; 1122 } 1123 1124 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 1125 struct iio_buffer *buffer) 1126 { 1127 const struct iio_chan_spec *ch; 1128 int ret, in_ind = -1, out_ind, length; 1129 unsigned in_loc = 0, out_loc = 0; 1130 struct iio_demux_table *p = NULL; 1131 1132 /* Clear out any old demux */ 1133 iio_buffer_demux_free(buffer); 1134 kfree(buffer->demux_bounce); 1135 buffer->demux_bounce = NULL; 1136 1137 /* First work out which scan mode we will actually have */ 1138 if (bitmap_equal(indio_dev->active_scan_mask, 1139 buffer->scan_mask, 1140 indio_dev->masklength)) 1141 return 0; 1142 1143 /* Now we have the two masks, work from least sig and build up sizes */ 1144 for_each_set_bit(out_ind, 1145 buffer->scan_mask, 1146 indio_dev->masklength) { 1147 in_ind = find_next_bit(indio_dev->active_scan_mask, 1148 indio_dev->masklength, 1149 in_ind + 1); 1150 while (in_ind != out_ind) { 1151 in_ind = find_next_bit(indio_dev->active_scan_mask, 1152 indio_dev->masklength, 1153 in_ind + 1); 1154 ch = iio_find_channel_from_si(indio_dev, in_ind); 1155 if (ch->scan_type.repeat > 1) 1156 length = ch->scan_type.storagebits / 8 * 1157 ch->scan_type.repeat; 1158 else 1159 length = ch->scan_type.storagebits / 8; 1160 /* Make sure we are aligned */ 1161 in_loc = roundup(in_loc, length) + length; 1162 } 1163 ch = iio_find_channel_from_si(indio_dev, in_ind); 1164 if (ch->scan_type.repeat > 1) 1165 length = ch->scan_type.storagebits / 8 * 1166 ch->scan_type.repeat; 1167 else 1168 length = ch->scan_type.storagebits / 8; 1169 out_loc = roundup(out_loc, length); 1170 in_loc = roundup(in_loc, length); 1171 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1172 if (ret) 1173 goto error_clear_mux_table; 1174 out_loc += length; 1175 in_loc += length; 1176 } 1177 /* Relies on scan_timestamp being last */ 1178 if (buffer->scan_timestamp) { 1179 ch = iio_find_channel_from_si(indio_dev, 1180 indio_dev->scan_index_timestamp); 1181 if (ch->scan_type.repeat > 1) 1182 length = ch->scan_type.storagebits / 8 * 1183 ch->scan_type.repeat; 1184 else 1185 length = ch->scan_type.storagebits / 8; 1186 out_loc = roundup(out_loc, length); 1187 in_loc = roundup(in_loc, length); 1188 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1189 if (ret) 1190 goto error_clear_mux_table; 1191 out_loc += length; 1192 in_loc += length; 1193 } 1194 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 1195 if (buffer->demux_bounce == NULL) { 1196 ret = -ENOMEM; 1197 goto error_clear_mux_table; 1198 } 1199 return 0; 1200 1201 error_clear_mux_table: 1202 iio_buffer_demux_free(buffer); 1203 1204 return ret; 1205 } 1206 1207 int iio_update_demux(struct iio_dev *indio_dev) 1208 { 1209 struct iio_buffer *buffer; 1210 int ret; 1211 1212 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 1213 ret = iio_buffer_update_demux(indio_dev, buffer); 1214 if (ret < 0) 1215 goto error_clear_mux_table; 1216 } 1217 return 0; 1218 1219 error_clear_mux_table: 1220 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 1221 iio_buffer_demux_free(buffer); 1222 1223 return ret; 1224 } 1225 EXPORT_SYMBOL_GPL(iio_update_demux); 1226 1227 /** 1228 * iio_buffer_release() - Free a buffer's resources 1229 * @ref: Pointer to the kref embedded in the iio_buffer struct 1230 * 1231 * This function is called when the last reference to the buffer has been 1232 * dropped. It will typically free all resources allocated by the buffer. Do not 1233 * call this function manually, always use iio_buffer_put() when done using a 1234 * buffer. 1235 */ 1236 static void iio_buffer_release(struct kref *ref) 1237 { 1238 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 1239 1240 buffer->access->release(buffer); 1241 } 1242 1243 /** 1244 * iio_buffer_get() - Grab a reference to the buffer 1245 * @buffer: The buffer to grab a reference for, may be NULL 1246 * 1247 * Returns the pointer to the buffer that was passed into the function. 1248 */ 1249 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 1250 { 1251 if (buffer) 1252 kref_get(&buffer->ref); 1253 1254 return buffer; 1255 } 1256 EXPORT_SYMBOL_GPL(iio_buffer_get); 1257 1258 /** 1259 * iio_buffer_put() - Release the reference to the buffer 1260 * @buffer: The buffer to release the reference for, may be NULL 1261 */ 1262 void iio_buffer_put(struct iio_buffer *buffer) 1263 { 1264 if (buffer) 1265 kref_put(&buffer->ref, iio_buffer_release); 1266 } 1267 EXPORT_SYMBOL_GPL(iio_buffer_put); 1268