1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 24 #include <linux/iio/iio.h> 25 #include "iio_core.h" 26 #include <linux/iio/sysfs.h> 27 #include <linux/iio/buffer.h> 28 29 static const char * const iio_endian_prefix[] = { 30 [IIO_BE] = "be", 31 [IIO_LE] = "le", 32 }; 33 34 static bool iio_buffer_is_active(struct iio_dev *indio_dev, 35 struct iio_buffer *buf) 36 { 37 struct list_head *p; 38 39 list_for_each(p, &indio_dev->buffer_list) 40 if (p == &buf->buffer_list) 41 return true; 42 43 return false; 44 } 45 46 /** 47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 48 * 49 * This function relies on all buffer implementations having an 50 * iio_buffer as their first element. 51 **/ 52 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 53 size_t n, loff_t *f_ps) 54 { 55 struct iio_dev *indio_dev = filp->private_data; 56 struct iio_buffer *rb = indio_dev->buffer; 57 58 if (!rb || !rb->access->read_first_n) 59 return -EINVAL; 60 return rb->access->read_first_n(rb, n, buf); 61 } 62 63 /** 64 * iio_buffer_poll() - poll the buffer to find out if it has data 65 */ 66 unsigned int iio_buffer_poll(struct file *filp, 67 struct poll_table_struct *wait) 68 { 69 struct iio_dev *indio_dev = filp->private_data; 70 struct iio_buffer *rb = indio_dev->buffer; 71 72 poll_wait(filp, &rb->pollq, wait); 73 if (rb->stufftoread) 74 return POLLIN | POLLRDNORM; 75 /* need a way of knowing if there may be enough data... */ 76 return 0; 77 } 78 79 void iio_buffer_init(struct iio_buffer *buffer) 80 { 81 INIT_LIST_HEAD(&buffer->demux_list); 82 init_waitqueue_head(&buffer->pollq); 83 } 84 EXPORT_SYMBOL(iio_buffer_init); 85 86 static ssize_t iio_show_scan_index(struct device *dev, 87 struct device_attribute *attr, 88 char *buf) 89 { 90 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 91 } 92 93 static ssize_t iio_show_fixed_type(struct device *dev, 94 struct device_attribute *attr, 95 char *buf) 96 { 97 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 98 u8 type = this_attr->c->scan_type.endianness; 99 100 if (type == IIO_CPU) { 101 #ifdef __LITTLE_ENDIAN 102 type = IIO_LE; 103 #else 104 type = IIO_BE; 105 #endif 106 } 107 return sprintf(buf, "%s:%c%d/%d>>%u\n", 108 iio_endian_prefix[type], 109 this_attr->c->scan_type.sign, 110 this_attr->c->scan_type.realbits, 111 this_attr->c->scan_type.storagebits, 112 this_attr->c->scan_type.shift); 113 } 114 115 static ssize_t iio_scan_el_show(struct device *dev, 116 struct device_attribute *attr, 117 char *buf) 118 { 119 int ret; 120 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 121 122 ret = test_bit(to_iio_dev_attr(attr)->address, 123 indio_dev->buffer->scan_mask); 124 125 return sprintf(buf, "%d\n", ret); 126 } 127 128 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 129 { 130 clear_bit(bit, buffer->scan_mask); 131 return 0; 132 } 133 134 static ssize_t iio_scan_el_store(struct device *dev, 135 struct device_attribute *attr, 136 const char *buf, 137 size_t len) 138 { 139 int ret; 140 bool state; 141 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 142 struct iio_buffer *buffer = indio_dev->buffer; 143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 144 145 ret = strtobool(buf, &state); 146 if (ret < 0) 147 return ret; 148 mutex_lock(&indio_dev->mlock); 149 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 150 ret = -EBUSY; 151 goto error_ret; 152 } 153 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 154 if (ret < 0) 155 goto error_ret; 156 if (!state && ret) { 157 ret = iio_scan_mask_clear(buffer, this_attr->address); 158 if (ret) 159 goto error_ret; 160 } else if (state && !ret) { 161 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 162 if (ret) 163 goto error_ret; 164 } 165 166 error_ret: 167 mutex_unlock(&indio_dev->mlock); 168 169 return ret < 0 ? ret : len; 170 171 } 172 173 static ssize_t iio_scan_el_ts_show(struct device *dev, 174 struct device_attribute *attr, 175 char *buf) 176 { 177 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 178 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 179 } 180 181 static ssize_t iio_scan_el_ts_store(struct device *dev, 182 struct device_attribute *attr, 183 const char *buf, 184 size_t len) 185 { 186 int ret; 187 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 188 bool state; 189 190 ret = strtobool(buf, &state); 191 if (ret < 0) 192 return ret; 193 194 mutex_lock(&indio_dev->mlock); 195 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 196 ret = -EBUSY; 197 goto error_ret; 198 } 199 indio_dev->buffer->scan_timestamp = state; 200 error_ret: 201 mutex_unlock(&indio_dev->mlock); 202 203 return ret ? ret : len; 204 } 205 206 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 207 const struct iio_chan_spec *chan) 208 { 209 int ret, attrcount = 0; 210 struct iio_buffer *buffer = indio_dev->buffer; 211 212 ret = __iio_add_chan_devattr("index", 213 chan, 214 &iio_show_scan_index, 215 NULL, 216 0, 217 0, 218 &indio_dev->dev, 219 &buffer->scan_el_dev_attr_list); 220 if (ret) 221 goto error_ret; 222 attrcount++; 223 ret = __iio_add_chan_devattr("type", 224 chan, 225 &iio_show_fixed_type, 226 NULL, 227 0, 228 0, 229 &indio_dev->dev, 230 &buffer->scan_el_dev_attr_list); 231 if (ret) 232 goto error_ret; 233 attrcount++; 234 if (chan->type != IIO_TIMESTAMP) 235 ret = __iio_add_chan_devattr("en", 236 chan, 237 &iio_scan_el_show, 238 &iio_scan_el_store, 239 chan->scan_index, 240 0, 241 &indio_dev->dev, 242 &buffer->scan_el_dev_attr_list); 243 else 244 ret = __iio_add_chan_devattr("en", 245 chan, 246 &iio_scan_el_ts_show, 247 &iio_scan_el_ts_store, 248 chan->scan_index, 249 0, 250 &indio_dev->dev, 251 &buffer->scan_el_dev_attr_list); 252 attrcount++; 253 ret = attrcount; 254 error_ret: 255 return ret; 256 } 257 258 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, 259 struct iio_dev_attr *p) 260 { 261 kfree(p->dev_attr.attr.name); 262 kfree(p); 263 } 264 265 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) 266 { 267 struct iio_dev_attr *p, *n; 268 struct iio_buffer *buffer = indio_dev->buffer; 269 270 list_for_each_entry_safe(p, n, 271 &buffer->scan_el_dev_attr_list, l) 272 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); 273 } 274 275 static const char * const iio_scan_elements_group_name = "scan_elements"; 276 277 int iio_buffer_register(struct iio_dev *indio_dev, 278 const struct iio_chan_spec *channels, 279 int num_channels) 280 { 281 struct iio_dev_attr *p; 282 struct attribute **attr; 283 struct iio_buffer *buffer = indio_dev->buffer; 284 int ret, i, attrn, attrcount, attrcount_orig = 0; 285 286 if (buffer->attrs) 287 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; 288 289 if (buffer->scan_el_attrs != NULL) { 290 attr = buffer->scan_el_attrs->attrs; 291 while (*attr++ != NULL) 292 attrcount_orig++; 293 } 294 attrcount = attrcount_orig; 295 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 296 if (channels) { 297 /* new magic */ 298 for (i = 0; i < num_channels; i++) { 299 if (channels[i].scan_index < 0) 300 continue; 301 302 /* Establish necessary mask length */ 303 if (channels[i].scan_index > 304 (int)indio_dev->masklength - 1) 305 indio_dev->masklength 306 = channels[i].scan_index + 1; 307 308 ret = iio_buffer_add_channel_sysfs(indio_dev, 309 &channels[i]); 310 if (ret < 0) 311 goto error_cleanup_dynamic; 312 attrcount += ret; 313 if (channels[i].type == IIO_TIMESTAMP) 314 indio_dev->scan_index_timestamp = 315 channels[i].scan_index; 316 } 317 if (indio_dev->masklength && buffer->scan_mask == NULL) { 318 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 319 sizeof(*buffer->scan_mask), 320 GFP_KERNEL); 321 if (buffer->scan_mask == NULL) { 322 ret = -ENOMEM; 323 goto error_cleanup_dynamic; 324 } 325 } 326 } 327 328 buffer->scan_el_group.name = iio_scan_elements_group_name; 329 330 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 331 sizeof(buffer->scan_el_group.attrs[0]), 332 GFP_KERNEL); 333 if (buffer->scan_el_group.attrs == NULL) { 334 ret = -ENOMEM; 335 goto error_free_scan_mask; 336 } 337 if (buffer->scan_el_attrs) 338 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 339 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 340 attrn = attrcount_orig; 341 342 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 343 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 344 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 345 346 return 0; 347 348 error_free_scan_mask: 349 kfree(buffer->scan_mask); 350 error_cleanup_dynamic: 351 __iio_buffer_attr_cleanup(indio_dev); 352 353 return ret; 354 } 355 EXPORT_SYMBOL(iio_buffer_register); 356 357 void iio_buffer_unregister(struct iio_dev *indio_dev) 358 { 359 kfree(indio_dev->buffer->scan_mask); 360 kfree(indio_dev->buffer->scan_el_group.attrs); 361 __iio_buffer_attr_cleanup(indio_dev); 362 } 363 EXPORT_SYMBOL(iio_buffer_unregister); 364 365 ssize_t iio_buffer_read_length(struct device *dev, 366 struct device_attribute *attr, 367 char *buf) 368 { 369 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 370 struct iio_buffer *buffer = indio_dev->buffer; 371 372 if (buffer->access->get_length) 373 return sprintf(buf, "%d\n", 374 buffer->access->get_length(buffer)); 375 376 return 0; 377 } 378 EXPORT_SYMBOL(iio_buffer_read_length); 379 380 ssize_t iio_buffer_write_length(struct device *dev, 381 struct device_attribute *attr, 382 const char *buf, 383 size_t len) 384 { 385 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 386 struct iio_buffer *buffer = indio_dev->buffer; 387 unsigned int val; 388 int ret; 389 390 ret = kstrtouint(buf, 10, &val); 391 if (ret) 392 return ret; 393 394 if (buffer->access->get_length) 395 if (val == buffer->access->get_length(buffer)) 396 return len; 397 398 mutex_lock(&indio_dev->mlock); 399 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { 400 ret = -EBUSY; 401 } else { 402 if (buffer->access->set_length) 403 buffer->access->set_length(buffer, val); 404 ret = 0; 405 } 406 mutex_unlock(&indio_dev->mlock); 407 408 return ret ? ret : len; 409 } 410 EXPORT_SYMBOL(iio_buffer_write_length); 411 412 ssize_t iio_buffer_show_enable(struct device *dev, 413 struct device_attribute *attr, 414 char *buf) 415 { 416 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 417 return sprintf(buf, "%d\n", 418 iio_buffer_is_active(indio_dev, 419 indio_dev->buffer)); 420 } 421 EXPORT_SYMBOL(iio_buffer_show_enable); 422 423 /* note NULL used as error indicator as it doesn't make sense. */ 424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 425 unsigned int masklength, 426 const unsigned long *mask) 427 { 428 if (bitmap_empty(mask, masklength)) 429 return NULL; 430 while (*av_masks) { 431 if (bitmap_subset(mask, av_masks, masklength)) 432 return av_masks; 433 av_masks += BITS_TO_LONGS(masklength); 434 } 435 return NULL; 436 } 437 438 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, 439 bool timestamp) 440 { 441 const struct iio_chan_spec *ch; 442 unsigned bytes = 0; 443 int length, i; 444 445 /* How much space will the demuxed element take? */ 446 for_each_set_bit(i, mask, 447 indio_dev->masklength) { 448 ch = iio_find_channel_from_si(indio_dev, i); 449 length = ch->scan_type.storagebits / 8; 450 bytes = ALIGN(bytes, length); 451 bytes += length; 452 } 453 if (timestamp) { 454 ch = iio_find_channel_from_si(indio_dev, 455 indio_dev->scan_index_timestamp); 456 length = ch->scan_type.storagebits / 8; 457 bytes = ALIGN(bytes, length); 458 bytes += length; 459 } 460 return bytes; 461 } 462 463 void iio_disable_all_buffers(struct iio_dev *indio_dev) 464 { 465 struct iio_buffer *buffer, *_buffer; 466 467 if (list_empty(&indio_dev->buffer_list)) 468 return; 469 470 if (indio_dev->setup_ops->predisable) 471 indio_dev->setup_ops->predisable(indio_dev); 472 473 list_for_each_entry_safe(buffer, _buffer, 474 &indio_dev->buffer_list, buffer_list) 475 list_del_init(&buffer->buffer_list); 476 477 indio_dev->currentmode = INDIO_DIRECT_MODE; 478 if (indio_dev->setup_ops->postdisable) 479 indio_dev->setup_ops->postdisable(indio_dev); 480 } 481 482 int iio_update_buffers(struct iio_dev *indio_dev, 483 struct iio_buffer *insert_buffer, 484 struct iio_buffer *remove_buffer) 485 { 486 int ret; 487 int success = 0; 488 struct iio_buffer *buffer; 489 unsigned long *compound_mask; 490 const unsigned long *old_mask; 491 492 /* Wind down existing buffers - iff there are any */ 493 if (!list_empty(&indio_dev->buffer_list)) { 494 if (indio_dev->setup_ops->predisable) { 495 ret = indio_dev->setup_ops->predisable(indio_dev); 496 if (ret) 497 goto error_ret; 498 } 499 indio_dev->currentmode = INDIO_DIRECT_MODE; 500 if (indio_dev->setup_ops->postdisable) { 501 ret = indio_dev->setup_ops->postdisable(indio_dev); 502 if (ret) 503 goto error_ret; 504 } 505 } 506 /* Keep a copy of current setup to allow roll back */ 507 old_mask = indio_dev->active_scan_mask; 508 if (!indio_dev->available_scan_masks) 509 indio_dev->active_scan_mask = NULL; 510 511 if (remove_buffer) 512 list_del(&remove_buffer->buffer_list); 513 if (insert_buffer) 514 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); 515 516 /* If no buffers in list, we are done */ 517 if (list_empty(&indio_dev->buffer_list)) { 518 indio_dev->currentmode = INDIO_DIRECT_MODE; 519 if (indio_dev->available_scan_masks == NULL) 520 kfree(old_mask); 521 return 0; 522 } 523 524 /* What scan mask do we actually have ?*/ 525 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 526 sizeof(long), GFP_KERNEL); 527 if (compound_mask == NULL) { 528 if (indio_dev->available_scan_masks == NULL) 529 kfree(old_mask); 530 return -ENOMEM; 531 } 532 indio_dev->scan_timestamp = 0; 533 534 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 535 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 536 indio_dev->masklength); 537 indio_dev->scan_timestamp |= buffer->scan_timestamp; 538 } 539 if (indio_dev->available_scan_masks) { 540 indio_dev->active_scan_mask = 541 iio_scan_mask_match(indio_dev->available_scan_masks, 542 indio_dev->masklength, 543 compound_mask); 544 if (indio_dev->active_scan_mask == NULL) { 545 /* 546 * Roll back. 547 * Note can only occur when adding a buffer. 548 */ 549 list_del(&insert_buffer->buffer_list); 550 if (old_mask) { 551 indio_dev->active_scan_mask = old_mask; 552 success = -EINVAL; 553 } 554 else { 555 kfree(compound_mask); 556 ret = -EINVAL; 557 goto error_ret; 558 } 559 } 560 } else { 561 indio_dev->active_scan_mask = compound_mask; 562 } 563 564 iio_update_demux(indio_dev); 565 566 /* Wind up again */ 567 if (indio_dev->setup_ops->preenable) { 568 ret = indio_dev->setup_ops->preenable(indio_dev); 569 if (ret) { 570 printk(KERN_ERR 571 "Buffer not started: buffer preenable failed (%d)\n", ret); 572 goto error_remove_inserted; 573 } 574 } 575 indio_dev->scan_bytes = 576 iio_compute_scan_bytes(indio_dev, 577 indio_dev->active_scan_mask, 578 indio_dev->scan_timestamp); 579 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 580 if (buffer->access->request_update) { 581 ret = buffer->access->request_update(buffer); 582 if (ret) { 583 printk(KERN_INFO 584 "Buffer not started: buffer parameter update failed (%d)\n", ret); 585 goto error_run_postdisable; 586 } 587 } 588 if (indio_dev->info->update_scan_mode) { 589 ret = indio_dev->info 590 ->update_scan_mode(indio_dev, 591 indio_dev->active_scan_mask); 592 if (ret < 0) { 593 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); 594 goto error_run_postdisable; 595 } 596 } 597 /* Definitely possible for devices to support both of these.*/ 598 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 599 if (!indio_dev->trig) { 600 printk(KERN_INFO "Buffer not started: no trigger\n"); 601 ret = -EINVAL; 602 /* Can only occur on first buffer */ 603 goto error_run_postdisable; 604 } 605 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 606 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 607 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 608 } else { /* should never be reached */ 609 ret = -EINVAL; 610 goto error_run_postdisable; 611 } 612 613 if (indio_dev->setup_ops->postenable) { 614 ret = indio_dev->setup_ops->postenable(indio_dev); 615 if (ret) { 616 printk(KERN_INFO 617 "Buffer not started: postenable failed (%d)\n", ret); 618 indio_dev->currentmode = INDIO_DIRECT_MODE; 619 if (indio_dev->setup_ops->postdisable) 620 indio_dev->setup_ops->postdisable(indio_dev); 621 goto error_disable_all_buffers; 622 } 623 } 624 625 if (indio_dev->available_scan_masks) 626 kfree(compound_mask); 627 else 628 kfree(old_mask); 629 630 return success; 631 632 error_disable_all_buffers: 633 indio_dev->currentmode = INDIO_DIRECT_MODE; 634 error_run_postdisable: 635 if (indio_dev->setup_ops->postdisable) 636 indio_dev->setup_ops->postdisable(indio_dev); 637 error_remove_inserted: 638 639 if (insert_buffer) 640 list_del(&insert_buffer->buffer_list); 641 indio_dev->active_scan_mask = old_mask; 642 kfree(compound_mask); 643 error_ret: 644 645 return ret; 646 } 647 EXPORT_SYMBOL_GPL(iio_update_buffers); 648 649 ssize_t iio_buffer_store_enable(struct device *dev, 650 struct device_attribute *attr, 651 const char *buf, 652 size_t len) 653 { 654 int ret; 655 bool requested_state; 656 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 657 struct iio_buffer *pbuf = indio_dev->buffer; 658 bool inlist; 659 660 ret = strtobool(buf, &requested_state); 661 if (ret < 0) 662 return ret; 663 664 mutex_lock(&indio_dev->mlock); 665 666 /* Find out if it is in the list */ 667 inlist = iio_buffer_is_active(indio_dev, pbuf); 668 /* Already in desired state */ 669 if (inlist == requested_state) 670 goto done; 671 672 if (requested_state) 673 ret = iio_update_buffers(indio_dev, 674 indio_dev->buffer, NULL); 675 else 676 ret = iio_update_buffers(indio_dev, 677 NULL, indio_dev->buffer); 678 679 if (ret < 0) 680 goto done; 681 done: 682 mutex_unlock(&indio_dev->mlock); 683 return (ret < 0) ? ret : len; 684 } 685 EXPORT_SYMBOL(iio_buffer_store_enable); 686 687 int iio_sw_buffer_preenable(struct iio_dev *indio_dev) 688 { 689 struct iio_buffer *buffer; 690 unsigned bytes; 691 dev_dbg(&indio_dev->dev, "%s\n", __func__); 692 693 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 694 if (buffer->access->set_bytes_per_datum) { 695 bytes = iio_compute_scan_bytes(indio_dev, 696 buffer->scan_mask, 697 buffer->scan_timestamp); 698 699 buffer->access->set_bytes_per_datum(buffer, bytes); 700 } 701 return 0; 702 } 703 EXPORT_SYMBOL(iio_sw_buffer_preenable); 704 705 /** 706 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 707 * @indio_dev: the iio device 708 * @mask: scan mask to be checked 709 * 710 * Return true if exactly one bit is set in the scan mask, false otherwise. It 711 * can be used for devices where only one channel can be active for sampling at 712 * a time. 713 */ 714 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 715 const unsigned long *mask) 716 { 717 return bitmap_weight(mask, indio_dev->masklength) == 1; 718 } 719 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 720 721 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 722 const unsigned long *mask) 723 { 724 if (!indio_dev->setup_ops->validate_scan_mask) 725 return true; 726 727 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 728 } 729 730 /** 731 * iio_scan_mask_set() - set particular bit in the scan mask 732 * @buffer: the buffer whose scan mask we are interested in 733 * @bit: the bit to be set. 734 * 735 * Note that at this point we have no way of knowing what other 736 * buffers might request, hence this code only verifies that the 737 * individual buffers request is plausible. 738 */ 739 int iio_scan_mask_set(struct iio_dev *indio_dev, 740 struct iio_buffer *buffer, int bit) 741 { 742 const unsigned long *mask; 743 unsigned long *trialmask; 744 745 trialmask = kmalloc(sizeof(*trialmask)* 746 BITS_TO_LONGS(indio_dev->masklength), 747 GFP_KERNEL); 748 749 if (trialmask == NULL) 750 return -ENOMEM; 751 if (!indio_dev->masklength) { 752 WARN_ON("trying to set scanmask prior to registering buffer\n"); 753 goto err_invalid_mask; 754 } 755 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 756 set_bit(bit, trialmask); 757 758 if (!iio_validate_scan_mask(indio_dev, trialmask)) 759 goto err_invalid_mask; 760 761 if (indio_dev->available_scan_masks) { 762 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 763 indio_dev->masklength, 764 trialmask); 765 if (!mask) 766 goto err_invalid_mask; 767 } 768 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 769 770 kfree(trialmask); 771 772 return 0; 773 774 err_invalid_mask: 775 kfree(trialmask); 776 return -EINVAL; 777 } 778 EXPORT_SYMBOL_GPL(iio_scan_mask_set); 779 780 int iio_scan_mask_query(struct iio_dev *indio_dev, 781 struct iio_buffer *buffer, int bit) 782 { 783 if (bit > indio_dev->masklength) 784 return -EINVAL; 785 786 if (!buffer->scan_mask) 787 return 0; 788 789 return test_bit(bit, buffer->scan_mask); 790 }; 791 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 792 793 /** 794 * struct iio_demux_table() - table describing demux memcpy ops 795 * @from: index to copy from 796 * @to: index to copy to 797 * @length: how many bytes to copy 798 * @l: list head used for management 799 */ 800 struct iio_demux_table { 801 unsigned from; 802 unsigned to; 803 unsigned length; 804 struct list_head l; 805 }; 806 807 static unsigned char *iio_demux(struct iio_buffer *buffer, 808 unsigned char *datain) 809 { 810 struct iio_demux_table *t; 811 812 if (list_empty(&buffer->demux_list)) 813 return datain; 814 list_for_each_entry(t, &buffer->demux_list, l) 815 memcpy(buffer->demux_bounce + t->to, 816 datain + t->from, t->length); 817 818 return buffer->demux_bounce; 819 } 820 821 static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) 822 { 823 unsigned char *dataout = iio_demux(buffer, data); 824 825 return buffer->access->store_to(buffer, dataout); 826 } 827 828 static void iio_buffer_demux_free(struct iio_buffer *buffer) 829 { 830 struct iio_demux_table *p, *q; 831 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 832 list_del(&p->l); 833 kfree(p); 834 } 835 } 836 837 838 int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data) 839 { 840 int ret; 841 struct iio_buffer *buf; 842 843 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { 844 ret = iio_push_to_buffer(buf, data); 845 if (ret < 0) 846 return ret; 847 } 848 849 return 0; 850 } 851 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 852 853 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 854 struct iio_buffer *buffer) 855 { 856 const struct iio_chan_spec *ch; 857 int ret, in_ind = -1, out_ind, length; 858 unsigned in_loc = 0, out_loc = 0; 859 struct iio_demux_table *p; 860 861 /* Clear out any old demux */ 862 iio_buffer_demux_free(buffer); 863 kfree(buffer->demux_bounce); 864 buffer->demux_bounce = NULL; 865 866 /* First work out which scan mode we will actually have */ 867 if (bitmap_equal(indio_dev->active_scan_mask, 868 buffer->scan_mask, 869 indio_dev->masklength)) 870 return 0; 871 872 /* Now we have the two masks, work from least sig and build up sizes */ 873 for_each_set_bit(out_ind, 874 indio_dev->active_scan_mask, 875 indio_dev->masklength) { 876 in_ind = find_next_bit(indio_dev->active_scan_mask, 877 indio_dev->masklength, 878 in_ind + 1); 879 while (in_ind != out_ind) { 880 in_ind = find_next_bit(indio_dev->active_scan_mask, 881 indio_dev->masklength, 882 in_ind + 1); 883 ch = iio_find_channel_from_si(indio_dev, in_ind); 884 length = ch->scan_type.storagebits/8; 885 /* Make sure we are aligned */ 886 in_loc += length; 887 if (in_loc % length) 888 in_loc += length - in_loc % length; 889 } 890 p = kmalloc(sizeof(*p), GFP_KERNEL); 891 if (p == NULL) { 892 ret = -ENOMEM; 893 goto error_clear_mux_table; 894 } 895 ch = iio_find_channel_from_si(indio_dev, in_ind); 896 length = ch->scan_type.storagebits/8; 897 if (out_loc % length) 898 out_loc += length - out_loc % length; 899 if (in_loc % length) 900 in_loc += length - in_loc % length; 901 p->from = in_loc; 902 p->to = out_loc; 903 p->length = length; 904 list_add_tail(&p->l, &buffer->demux_list); 905 out_loc += length; 906 in_loc += length; 907 } 908 /* Relies on scan_timestamp being last */ 909 if (buffer->scan_timestamp) { 910 p = kmalloc(sizeof(*p), GFP_KERNEL); 911 if (p == NULL) { 912 ret = -ENOMEM; 913 goto error_clear_mux_table; 914 } 915 ch = iio_find_channel_from_si(indio_dev, 916 indio_dev->scan_index_timestamp); 917 length = ch->scan_type.storagebits/8; 918 if (out_loc % length) 919 out_loc += length - out_loc % length; 920 if (in_loc % length) 921 in_loc += length - in_loc % length; 922 p->from = in_loc; 923 p->to = out_loc; 924 p->length = length; 925 list_add_tail(&p->l, &buffer->demux_list); 926 out_loc += length; 927 in_loc += length; 928 } 929 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 930 if (buffer->demux_bounce == NULL) { 931 ret = -ENOMEM; 932 goto error_clear_mux_table; 933 } 934 return 0; 935 936 error_clear_mux_table: 937 iio_buffer_demux_free(buffer); 938 939 return ret; 940 } 941 942 int iio_update_demux(struct iio_dev *indio_dev) 943 { 944 struct iio_buffer *buffer; 945 int ret; 946 947 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { 948 ret = iio_buffer_update_demux(indio_dev, buffer); 949 if (ret < 0) 950 goto error_clear_mux_table; 951 } 952 return 0; 953 954 error_clear_mux_table: 955 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) 956 iio_buffer_demux_free(buffer); 957 958 return ret; 959 } 960 EXPORT_SYMBOL_GPL(iio_update_demux); 961