1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 24 #include <linux/iio/iio.h> 25 #include "iio_core.h" 26 #include <linux/iio/sysfs.h> 27 #include <linux/iio/buffer.h> 28 29 static const char * const iio_endian_prefix[] = { 30 [IIO_BE] = "be", 31 [IIO_LE] = "le", 32 }; 33 34 static bool iio_buffer_is_active(struct iio_dev *indio_dev, 35 struct iio_buffer *buf) 36 { 37 struct list_head *p; 38 39 list_for_each(p, &indio_dev->buffer_list) 40 if (p == &buf->buffer_list) 41 return true; 42 43 return false; 44 } 45 46 /** 47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 48 * 49 * This function relies on all buffer implementations having an 50 * iio_buffer as their first element. 51 **/ 52 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 53 size_t n, loff_t *f_ps) 54 { 55 struct iio_dev *indio_dev = filp->private_data; 56 struct iio_buffer *rb = indio_dev->buffer; 57 58 if (!rb || !rb->access->read_first_n) 59 return -EINVAL; 60 return rb->access->read_first_n(rb, n, buf); 61 } 62 63 /** 64 * iio_buffer_poll() - poll the buffer to find out if it has data 65 */ 66 unsigned int iio_buffer_poll(struct file *filp, 67 struct poll_table_struct *wait) 68 { 69 struct iio_dev *indio_dev = filp->private_data; 70 struct iio_buffer *rb = indio_dev->buffer; 71 72 poll_wait(filp, &rb->pollq, wait); 73 if (rb->stufftoread) 74 return POLLIN | POLLRDNORM; 75 /* need a way of knowing if there may be enough data... */ 76 return 0; 77 } 78 79 void iio_buffer_init(struct iio_buffer *buffer) 80 { 81 INIT_LIST_HEAD(&buffer->demux_list); 82 init_waitqueue_head(&buffer->pollq); 83 } 84 EXPORT_SYMBOL(iio_buffer_init); 85 86 static ssize_t iio_show_scan_index(struct device *dev, 87 struct device_attribute *attr, 88 char *buf) 89 { 90 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 91 } 92 93 static ssize_t iio_show_fixed_type(struct device *dev, 94 struct device_attribute *attr, 95 char *buf) 96 { 97 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 98 u8 type = this_attr->c->scan_type.endianness; 99 100 if (type == IIO_CPU) { 101 #ifdef __LITTLE_ENDIAN 102 type = IIO_LE; 103 #else 104 type = IIO_BE; 105 #endif 106 } 107 return sprintf(buf, "%s:%c%d/%d>>%u\n", 108 iio_endian_prefix[type], 109 this_attr->c->scan_type.sign, 110 this_attr->c->scan_type.realbits, 111 this_attr->c->scan_type.storagebits, 112 this_attr->c->scan_type.shift); 113 } 114 115 static ssize_t iio_scan_el_show(struct device *dev, 116 struct device_attribute *attr, 117 char *buf) 118 { 119 int ret; 120 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 121 122 ret = test_bit(to_iio_dev_attr(attr)->address, 123 indio_dev->buffer->scan_mask); 124 125 return sprintf(buf, "%d\n", ret); 126 } 127 128 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 129 { 130 clear_bit(bit, buffer->scan_mask); 131 return 0; 132 } 133 134 static ssize_t iio_scan_el_store(struct device *dev, 135 struct device_attribute *attr, 136 const char *buf, 137 size_t len) 138 { 139 int ret; 140 bool state; 141 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 142 struct iio_buffer *buffer = indio_dev->buffer; 143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 144 145 ret = strtobool(buf, &state); 146 if (ret < 0) 147 return ret; 148 mutex_lock(&indio_dev->mlock); 149 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 150 ret = -EBUSY; 151 goto error_ret; 152 } 153 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 154 if (ret < 0) 155 goto error_ret; 156 if (!state && ret) { 157 ret = iio_scan_mask_clear(buffer, this_attr->address); 158 if (ret) 159 goto error_ret; 160 } else if (state && !ret) { 161 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 162 if (ret) 163 goto error_ret; 164 } 165 166 error_ret: 167 mutex_unlock(&indio_dev->mlock); 168 169 return ret < 0 ? ret : len; 170 171 } 172 173 static ssize_t iio_scan_el_ts_show(struct device *dev, 174 struct device_attribute *attr, 175 char *buf) 176 { 177 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 178 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 179 } 180 181 static ssize_t iio_scan_el_ts_store(struct device *dev, 182 struct device_attribute *attr, 183 const char *buf, 184 size_t len) 185 { 186 int ret; 187 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 188 bool state; 189 190 ret = strtobool(buf, &state); 191 if (ret < 0) 192 return ret; 193 194 mutex_lock(&indio_dev->mlock); 195 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 196 ret = -EBUSY; 197 goto error_ret; 198 } 199 indio_dev->buffer->scan_timestamp = state; 200 error_ret: 201 mutex_unlock(&indio_dev->mlock); 202 203 return ret ? ret : len; 204 } 205 206 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 207 const struct iio_chan_spec *chan) 208 { 209 int ret, attrcount = 0; 210 struct iio_buffer *buffer = indio_dev->buffer; 211 212 ret = __iio_add_chan_devattr("index", 213 chan, 214 &iio_show_scan_index, 215 NULL, 216 0, 217 0, 218 &indio_dev->dev, 219 &buffer->scan_el_dev_attr_list); 220 if (ret) 221 goto error_ret; 222 attrcount++; 223 ret = __iio_add_chan_devattr("type", 224 chan, 225 &iio_show_fixed_type, 226 NULL, 227 0, 228 0, 229 &indio_dev->dev, 230 &buffer->scan_el_dev_attr_list); 231 if (ret) 232 goto error_ret; 233 attrcount++; 234 if (chan->type != IIO_TIMESTAMP) 235 ret = __iio_add_chan_devattr("en", 236 chan, 237 &iio_scan_el_show, 238 &iio_scan_el_store, 239 chan->scan_index, 240 0, 241 &indio_dev->dev, 242 &buffer->scan_el_dev_attr_list); 243 else 244 ret = __iio_add_chan_devattr("en", 245 chan, 246 &iio_scan_el_ts_show, 247 &iio_scan_el_ts_store, 248 chan->scan_index, 249 0, 250 &indio_dev->dev, 251 &buffer->scan_el_dev_attr_list); 252 attrcount++; 253 ret = attrcount; 254 error_ret: 255 return ret; 256 } 257 258 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, 259 struct iio_dev_attr *p) 260 { 261 kfree(p->dev_attr.attr.name); 262 kfree(p); 263 } 264 265 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) 266 { 267 struct iio_dev_attr *p, *n; 268 struct iio_buffer *buffer = indio_dev->buffer; 269 270 list_for_each_entry_safe(p, n, 271 &buffer->scan_el_dev_attr_list, l) 272 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); 273 } 274 275 static const char * const iio_scan_elements_group_name = "scan_elements"; 276 277 int iio_buffer_register(struct iio_dev *indio_dev, 278 const struct iio_chan_spec *channels, 279 int num_channels) 280 { 281 struct iio_dev_attr *p; 282 struct attribute **attr; 283 struct iio_buffer *buffer = indio_dev->buffer; 284 int ret, i, attrn, attrcount, attrcount_orig = 0; 285 286 if (buffer->attrs) 287 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; 288 289 if (buffer->scan_el_attrs != NULL) { 290 attr = buffer->scan_el_attrs->attrs; 291 while (*attr++ != NULL) 292 attrcount_orig++; 293 } 294 attrcount = attrcount_orig; 295 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 296 if (channels) { 297 /* new magic */ 298 for (i = 0; i < num_channels; i++) { 299 if (channels[i].scan_index < 0) 300 continue; 301 302 /* Establish necessary mask length */ 303 if (channels[i].scan_index > 304 (int)indio_dev->masklength - 1) 305 indio_dev->masklength 306 = channels[i].scan_index + 1; 307 308 ret = iio_buffer_add_channel_sysfs(indio_dev, 309 &channels[i]); 310 if (ret < 0) 311 goto error_cleanup_dynamic; 312 attrcount += ret; 313 if (channels[i].type == IIO_TIMESTAMP) 314 indio_dev->scan_index_timestamp = 315 channels[i].scan_index; 316 } 317 if (indio_dev->masklength && buffer->scan_mask == NULL) { 318 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 319 sizeof(*buffer->scan_mask), 320 GFP_KERNEL); 321 if (buffer->scan_mask == NULL) { 322 ret = -ENOMEM; 323 goto error_cleanup_dynamic; 324 } 325 } 326 } 327 328 buffer->scan_el_group.name = iio_scan_elements_group_name; 329 330 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 331 sizeof(buffer->scan_el_group.attrs[0]), 332 GFP_KERNEL); 333 if (buffer->scan_el_group.attrs == NULL) { 334 ret = -ENOMEM; 335 goto error_free_scan_mask; 336 } 337 if (buffer->scan_el_attrs) 338 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 339 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 340 attrn = attrcount_orig; 341 342 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 343 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 344 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 345 346 return 0; 347 348 error_free_scan_mask: 349 kfree(buffer->scan_mask); 350 error_cleanup_dynamic: 351 __iio_buffer_attr_cleanup(indio_dev); 352 353 return ret; 354 } 355 EXPORT_SYMBOL(iio_buffer_register); 356 357 void iio_buffer_unregister(struct iio_dev *indio_dev) 358 { 359 kfree(indio_dev->buffer->scan_mask); 360 kfree(indio_dev->buffer->scan_el_group.attrs); 361 __iio_buffer_attr_cleanup(indio_dev); 362 } 363 EXPORT_SYMBOL(iio_buffer_unregister); 364 365 ssize_t iio_buffer_read_length(struct device *dev, 366 struct device_attribute *attr, 367 char *buf) 368 { 369 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 370 struct iio_buffer *buffer = indio_dev->buffer; 371 372 if (buffer->access->get_length) 373 return sprintf(buf, "%d\n", 374 buffer->access->get_length(buffer)); 375 376 return 0; 377 } 378 EXPORT_SYMBOL(iio_buffer_read_length); 379 380 ssize_t iio_buffer_write_length(struct device *dev, 381 struct device_attribute *attr, 382 const char *buf, 383 size_t len) 384 { 385 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 386 struct iio_buffer *buffer = indio_dev->buffer; 387 unsigned int val; 388 int ret; 389 390 ret = kstrtouint(buf, 10, &val); 391 if (ret) 392 return ret; 393 394 if (buffer->access->get_length) 395 if (val == buffer->access->get_length(buffer)) 396 return len; 397 398 mutex_lock(&indio_dev->mlock); 399 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 400 ret = -EBUSY; 401 } else { 402 if (buffer->access->set_length) 403 buffer->access->set_length(buffer, val); 404 ret = 0; 405 } 406 mutex_unlock(&indio_dev->mlock); 407 408 return ret ? ret : len; 409 } 410 EXPORT_SYMBOL(iio_buffer_write_length); 411 412 ssize_t iio_buffer_show_enable(struct device *dev, 413 struct device_attribute *attr, 414 char *buf) 415 { 416 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 417 return sprintf(buf, "%d\n", 418 iio_buffer_is_active(indio_dev, 419 indio_dev->buffer)); 420 } 421 EXPORT_SYMBOL(iio_buffer_show_enable); 422 423 /* note NULL used as error indicator as it doesn't make sense. */ 424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 425 unsigned int masklength, 426 const unsigned long *mask) 427 { 428 if (bitmap_empty(mask, masklength)) 429 return NULL; 430 while (*av_masks) { 431 if (bitmap_subset(mask, av_masks, masklength)) 432 return av_masks; 433 av_masks += BITS_TO_LONGS(masklength); 434 } 435 return NULL; 436 } 437 438 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, 439 bool timestamp) 440 { 441 const struct iio_chan_spec *ch; 442 unsigned bytes = 0; 443 int length, i; 444 445 /* How much space will the demuxed element take? */ 446 for_each_set_bit(i, mask, 447 indio_dev->masklength) { 448 ch = iio_find_channel_from_si(indio_dev, i); 449 length = ch->scan_type.storagebits / 8; 450 bytes = ALIGN(bytes, length); 451 bytes += length; 452 } 453 if (timestamp) { 454 ch = iio_find_channel_from_si(indio_dev, 455 indio_dev->scan_index_timestamp); 456 length = ch->scan_type.storagebits / 8; 457 bytes = ALIGN(bytes, length); 458 bytes += length; 459 } 460 return bytes; 461 } 462 463 int iio_update_buffers(struct iio_dev *indio_dev, 464 struct iio_buffer *insert_buffer, 465 struct iio_buffer *remove_buffer) 466 { 467 int ret; 468 int success = 0; 469 struct iio_buffer *buffer; 470 unsigned long *compound_mask; 471 const unsigned long *old_mask; 472 473 /* Wind down existing buffers - iff there are any */ 474 if (!list_empty(&indio_dev->buffer_list)) { 475 if (indio_dev->setup_ops->predisable) { 476 ret = indio_dev->setup_ops->predisable(indio_dev); 477 if (ret) 478 goto error_ret; 479 } 480 indio_dev->currentmode = INDIO_DIRECT_MODE; 481 if (indio_dev->setup_ops->postdisable) { 482 ret = indio_dev->setup_ops->postdisable(indio_dev); 483 if (ret) 484 goto error_ret; 485 } 486 } 487 /* Keep a copy of current setup to allow roll back */ 488 old_mask = indio_dev->active_scan_mask; 489 if (!indio_dev->available_scan_masks) 490 indio_dev->active_scan_mask = NULL; 491 492 if (remove_buffer) 493 list_del(&remove_buffer->buffer_list); 494 if (insert_buffer) 495 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); 496 497 /* If no buffers in list, we are done */ 498 if (list_empty(&indio_dev->buffer_list)) { 499 indio_dev->currentmode = INDIO_DIRECT_MODE; 500 if (indio_dev->available_scan_masks == NULL) 501 kfree(old_mask); 502 return 0; 503 } 504 505 /* What scan mask do we actually have ?*/ 506 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 507 sizeof(long), GFP_KERNEL); 508 if (compound_mask == NULL) { 509 if (indio_dev->available_scan_masks == NULL) 510 kfree(old_mask); 511 return -ENOMEM; 512 } 513 indio_dev->scan_timestamp = 0; 514 515 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 516 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 517 indio_dev->masklength); 518 indio_dev->scan_timestamp |= buffer->scan_timestamp; 519 } 520 if (indio_dev->available_scan_masks) { 521 indio_dev->active_scan_mask = 522 iio_scan_mask_match(indio_dev->available_scan_masks, 523 indio_dev->masklength, 524 compound_mask); 525 if (indio_dev->active_scan_mask == NULL) { 526 /* 527 * Roll back. 528 * Note can only occur when adding a buffer. 529 */ 530 list_del(&insert_buffer->buffer_list); 531 indio_dev->active_scan_mask = old_mask; 532 success = -EINVAL; 533 } 534 } else { 535 indio_dev->active_scan_mask = compound_mask; 536 } 537 538 iio_update_demux(indio_dev); 539 540 /* Wind up again */ 541 if (indio_dev->setup_ops->preenable) { 542 ret = indio_dev->setup_ops->preenable(indio_dev); 543 if (ret) { 544 printk(KERN_ERR 545 "Buffer not started: buffer preenable failed (%d)\n", ret); 546 goto error_remove_inserted; 547 } 548 } 549 indio_dev->scan_bytes = 550 iio_compute_scan_bytes(indio_dev, 551 indio_dev->active_scan_mask, 552 indio_dev->scan_timestamp); 553 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 554 if (buffer->access->request_update) { 555 ret = buffer->access->request_update(buffer); 556 if (ret) { 557 printk(KERN_INFO 558 "Buffer not started: buffer parameter update failed (%d)\n", ret); 559 goto error_run_postdisable; 560 } 561 } 562 if (indio_dev->info->update_scan_mode) { 563 ret = indio_dev->info 564 ->update_scan_mode(indio_dev, 565 indio_dev->active_scan_mask); 566 if (ret < 0) { 567 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); 568 goto error_run_postdisable; 569 } 570 } 571 /* Definitely possible for devices to support both of these.*/ 572 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 573 if (!indio_dev->trig) { 574 printk(KERN_INFO "Buffer not started: no trigger\n"); 575 ret = -EINVAL; 576 /* Can only occur on first buffer */ 577 goto error_run_postdisable; 578 } 579 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 580 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 581 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 582 } else { /* should never be reached */ 583 ret = -EINVAL; 584 goto error_run_postdisable; 585 } 586 587 if (indio_dev->setup_ops->postenable) { 588 ret = indio_dev->setup_ops->postenable(indio_dev); 589 if (ret) { 590 printk(KERN_INFO 591 "Buffer not started: postenable failed (%d)\n", ret); 592 indio_dev->currentmode = INDIO_DIRECT_MODE; 593 if (indio_dev->setup_ops->postdisable) 594 indio_dev->setup_ops->postdisable(indio_dev); 595 goto error_disable_all_buffers; 596 } 597 } 598 599 if (indio_dev->available_scan_masks) 600 kfree(compound_mask); 601 else 602 kfree(old_mask); 603 604 return success; 605 606 error_disable_all_buffers: 607 indio_dev->currentmode = INDIO_DIRECT_MODE; 608 error_run_postdisable: 609 if (indio_dev->setup_ops->postdisable) 610 indio_dev->setup_ops->postdisable(indio_dev); 611 error_remove_inserted: 612 613 if (insert_buffer) 614 list_del(&insert_buffer->buffer_list); 615 indio_dev->active_scan_mask = old_mask; 616 kfree(compound_mask); 617 error_ret: 618 619 return ret; 620 } 621 EXPORT_SYMBOL_GPL(iio_update_buffers); 622 623 ssize_t iio_buffer_store_enable(struct device *dev, 624 struct device_attribute *attr, 625 const char *buf, 626 size_t len) 627 { 628 int ret; 629 bool requested_state; 630 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 631 struct iio_buffer *pbuf = indio_dev->buffer; 632 bool inlist; 633 634 ret = strtobool(buf, &requested_state); 635 if (ret < 0) 636 return ret; 637 638 mutex_lock(&indio_dev->mlock); 639 640 /* Find out if it is in the list */ 641 inlist = iio_buffer_is_active(indio_dev, pbuf); 642 /* Already in desired state */ 643 if (inlist == requested_state) 644 goto done; 645 646 if (requested_state) 647 ret = iio_update_buffers(indio_dev, 648 indio_dev->buffer, NULL); 649 else 650 ret = iio_update_buffers(indio_dev, 651 NULL, indio_dev->buffer); 652 653 if (ret < 0) 654 goto done; 655 done: 656 mutex_unlock(&indio_dev->mlock); 657 return (ret < 0) ? ret : len; 658 } 659 EXPORT_SYMBOL(iio_buffer_store_enable); 660 661 int iio_sw_buffer_preenable(struct iio_dev *indio_dev) 662 { 663 struct iio_buffer *buffer; 664 unsigned bytes; 665 dev_dbg(&indio_dev->dev, "%s\n", __func__); 666 667 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 668 if (buffer->access->set_bytes_per_datum) { 669 bytes = iio_compute_scan_bytes(indio_dev, 670 buffer->scan_mask, 671 buffer->scan_timestamp); 672 673 buffer->access->set_bytes_per_datum(buffer, bytes); 674 } 675 return 0; 676 } 677 EXPORT_SYMBOL(iio_sw_buffer_preenable); 678 679 /** 680 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 681 * @indio_dev: the iio device 682 * @mask: scan mask to be checked 683 * 684 * Return true if exactly one bit is set in the scan mask, false otherwise. It 685 * can be used for devices where only one channel can be active for sampling at 686 * a time. 687 */ 688 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 689 const unsigned long *mask) 690 { 691 return bitmap_weight(mask, indio_dev->masklength) == 1; 692 } 693 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 694 695 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 696 const unsigned long *mask) 697 { 698 if (!indio_dev->setup_ops->validate_scan_mask) 699 return true; 700 701 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 702 } 703 704 /** 705 * iio_scan_mask_set() - set particular bit in the scan mask 706 * @buffer: the buffer whose scan mask we are interested in 707 * @bit: the bit to be set. 708 * 709 * Note that at this point we have no way of knowing what other 710 * buffers might request, hence this code only verifies that the 711 * individual buffers request is plausible. 712 */ 713 int iio_scan_mask_set(struct iio_dev *indio_dev, 714 struct iio_buffer *buffer, int bit) 715 { 716 const unsigned long *mask; 717 unsigned long *trialmask; 718 719 trialmask = kmalloc(sizeof(*trialmask)* 720 BITS_TO_LONGS(indio_dev->masklength), 721 GFP_KERNEL); 722 723 if (trialmask == NULL) 724 return -ENOMEM; 725 if (!indio_dev->masklength) { 726 WARN_ON("trying to set scanmask prior to registering buffer\n"); 727 goto err_invalid_mask; 728 } 729 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 730 set_bit(bit, trialmask); 731 732 if (!iio_validate_scan_mask(indio_dev, trialmask)) 733 goto err_invalid_mask; 734 735 if (indio_dev->available_scan_masks) { 736 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 737 indio_dev->masklength, 738 trialmask); 739 if (!mask) 740 goto err_invalid_mask; 741 } 742 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 743 744 kfree(trialmask); 745 746 return 0; 747 748 err_invalid_mask: 749 kfree(trialmask); 750 return -EINVAL; 751 } 752 EXPORT_SYMBOL_GPL(iio_scan_mask_set); 753 754 int iio_scan_mask_query(struct iio_dev *indio_dev, 755 struct iio_buffer *buffer, int bit) 756 { 757 if (bit > indio_dev->masklength) 758 return -EINVAL; 759 760 if (!buffer->scan_mask) 761 return 0; 762 763 return test_bit(bit, buffer->scan_mask); 764 }; 765 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 766 767 /** 768 * struct iio_demux_table() - table describing demux memcpy ops 769 * @from: index to copy from 770 * @to: index to copy to 771 * @length: how many bytes to copy 772 * @l: list head used for management 773 */ 774 struct iio_demux_table { 775 unsigned from; 776 unsigned to; 777 unsigned length; 778 struct list_head l; 779 }; 780 781 static unsigned char *iio_demux(struct iio_buffer *buffer, 782 unsigned char *datain) 783 { 784 struct iio_demux_table *t; 785 786 if (list_empty(&buffer->demux_list)) 787 return datain; 788 list_for_each_entry(t, &buffer->demux_list, l) 789 memcpy(buffer->demux_bounce + t->to, 790 datain + t->from, t->length); 791 792 return buffer->demux_bounce; 793 } 794 795 static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) 796 { 797 unsigned char *dataout = iio_demux(buffer, data); 798 799 return buffer->access->store_to(buffer, dataout); 800 } 801 802 static void iio_buffer_demux_free(struct iio_buffer *buffer) 803 { 804 struct iio_demux_table *p, *q; 805 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 806 list_del(&p->l); 807 kfree(p); 808 } 809 } 810 811 812 int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data) 813 { 814 int ret; 815 struct iio_buffer *buf; 816 817 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { 818 ret = iio_push_to_buffer(buf, data); 819 if (ret < 0) 820 return ret; 821 } 822 823 return 0; 824 } 825 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 826 827 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 828 struct iio_buffer *buffer) 829 { 830 const struct iio_chan_spec *ch; 831 int ret, in_ind = -1, out_ind, length; 832 unsigned in_loc = 0, out_loc = 0; 833 struct iio_demux_table *p; 834 835 /* Clear out any old demux */ 836 iio_buffer_demux_free(buffer); 837 kfree(buffer->demux_bounce); 838 buffer->demux_bounce = NULL; 839 840 /* First work out which scan mode we will actually have */ 841 if (bitmap_equal(indio_dev->active_scan_mask, 842 buffer->scan_mask, 843 indio_dev->masklength)) 844 return 0; 845 846 /* Now we have the two masks, work from least sig and build up sizes */ 847 for_each_set_bit(out_ind, 848 indio_dev->active_scan_mask, 849 indio_dev->masklength) { 850 in_ind = find_next_bit(indio_dev->active_scan_mask, 851 indio_dev->masklength, 852 in_ind + 1); 853 while (in_ind != out_ind) { 854 in_ind = find_next_bit(indio_dev->active_scan_mask, 855 indio_dev->masklength, 856 in_ind + 1); 857 ch = iio_find_channel_from_si(indio_dev, in_ind); 858 length = ch->scan_type.storagebits/8; 859 /* Make sure we are aligned */ 860 in_loc += length; 861 if (in_loc % length) 862 in_loc += length - in_loc % length; 863 } 864 p = kmalloc(sizeof(*p), GFP_KERNEL); 865 if (p == NULL) { 866 ret = -ENOMEM; 867 goto error_clear_mux_table; 868 } 869 ch = iio_find_channel_from_si(indio_dev, in_ind); 870 length = ch->scan_type.storagebits/8; 871 if (out_loc % length) 872 out_loc += length - out_loc % length; 873 if (in_loc % length) 874 in_loc += length - in_loc % length; 875 p->from = in_loc; 876 p->to = out_loc; 877 p->length = length; 878 list_add_tail(&p->l, &buffer->demux_list); 879 out_loc += length; 880 in_loc += length; 881 } 882 /* Relies on scan_timestamp being last */ 883 if (buffer->scan_timestamp) { 884 p = kmalloc(sizeof(*p), GFP_KERNEL); 885 if (p == NULL) { 886 ret = -ENOMEM; 887 goto error_clear_mux_table; 888 } 889 ch = iio_find_channel_from_si(indio_dev, 890 indio_dev->scan_index_timestamp); 891 length = ch->scan_type.storagebits/8; 892 if (out_loc % length) 893 out_loc += length - out_loc % length; 894 if (in_loc % length) 895 in_loc += length - in_loc % length; 896 p->from = in_loc; 897 p->to = out_loc; 898 p->length = length; 899 list_add_tail(&p->l, &buffer->demux_list); 900 out_loc += length; 901 in_loc += length; 902 } 903 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 904 if (buffer->demux_bounce == NULL) { 905 ret = -ENOMEM; 906 goto error_clear_mux_table; 907 } 908 return 0; 909 910 error_clear_mux_table: 911 iio_buffer_demux_free(buffer); 912 913 return ret; 914 } 915 916 int iio_update_demux(struct iio_dev *indio_dev) 917 { 918 struct iio_buffer *buffer; 919 int ret; 920 921 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 922 ret = iio_buffer_update_demux(indio_dev, buffer); 923 if (ret < 0) 924 goto error_clear_mux_table; 925 } 926 return 0; 927 928 error_clear_mux_table: 929 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 930 iio_buffer_demux_free(buffer); 931 932 return ret; 933 } 934 EXPORT_SYMBOL_GPL(iio_update_demux); 935