1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core in kernel channel mapping 3 * 4 * Copyright (c) 2011 Jonathan Cameron 5 */ 6 #include <linux/err.h> 7 #include <linux/export.h> 8 #include <linux/slab.h> 9 #include <linux/mutex.h> 10 #include <linux/of.h> 11 12 #include <linux/iio/iio.h> 13 #include "iio_core.h" 14 #include <linux/iio/machine.h> 15 #include <linux/iio/driver.h> 16 #include <linux/iio/consumer.h> 17 18 struct iio_map_internal { 19 struct iio_dev *indio_dev; 20 struct iio_map *map; 21 struct list_head l; 22 }; 23 24 static LIST_HEAD(iio_map_list); 25 static DEFINE_MUTEX(iio_map_list_lock); 26 27 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev) 28 { 29 int ret = -ENODEV; 30 struct iio_map_internal *mapi, *next; 31 32 list_for_each_entry_safe(mapi, next, &iio_map_list, l) { 33 if (indio_dev == mapi->indio_dev) { 34 list_del(&mapi->l); 35 kfree(mapi); 36 ret = 0; 37 } 38 } 39 return ret; 40 } 41 42 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) 43 { 44 int i = 0, ret = 0; 45 struct iio_map_internal *mapi; 46 47 if (maps == NULL) 48 return 0; 49 50 mutex_lock(&iio_map_list_lock); 51 while (maps[i].consumer_dev_name != NULL) { 52 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL); 53 if (mapi == NULL) { 54 ret = -ENOMEM; 55 goto error_ret; 56 } 57 mapi->map = &maps[i]; 58 mapi->indio_dev = indio_dev; 59 list_add_tail(&mapi->l, &iio_map_list); 60 i++; 61 } 62 error_ret: 63 if (ret) 64 iio_map_array_unregister_locked(indio_dev); 65 mutex_unlock(&iio_map_list_lock); 66 67 return ret; 68 } 69 EXPORT_SYMBOL_GPL(iio_map_array_register); 70 71 72 /* 73 * Remove all map entries associated with the given iio device 74 */ 75 int iio_map_array_unregister(struct iio_dev *indio_dev) 76 { 77 int ret; 78 79 mutex_lock(&iio_map_list_lock); 80 ret = iio_map_array_unregister_locked(indio_dev); 81 mutex_unlock(&iio_map_list_lock); 82 83 return ret; 84 } 85 EXPORT_SYMBOL_GPL(iio_map_array_unregister); 86 87 static const struct iio_chan_spec 88 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name) 89 { 90 int i; 91 const struct iio_chan_spec *chan = NULL; 92 93 for (i = 0; i < indio_dev->num_channels; i++) 94 if (indio_dev->channels[i].datasheet_name && 95 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) { 96 chan = &indio_dev->channels[i]; 97 break; 98 } 99 return chan; 100 } 101 102 #ifdef CONFIG_OF 103 104 static int iio_dev_node_match(struct device *dev, const void *data) 105 { 106 return dev->of_node == data && dev->type == &iio_device_type; 107 } 108 109 /** 110 * __of_iio_simple_xlate - translate iiospec to the IIO channel index 111 * @indio_dev: pointer to the iio_dev structure 112 * @iiospec: IIO specifier as found in the device tree 113 * 114 * This is simple translation function, suitable for the most 1:1 mapped 115 * channels in IIO chips. This function performs only one sanity check: 116 * whether IIO index is less than num_channels (that is specified in the 117 * iio_dev). 118 */ 119 static int __of_iio_simple_xlate(struct iio_dev *indio_dev, 120 const struct of_phandle_args *iiospec) 121 { 122 if (!iiospec->args_count) 123 return 0; 124 125 if (iiospec->args[0] >= indio_dev->num_channels) { 126 dev_err(&indio_dev->dev, "invalid channel index %u\n", 127 iiospec->args[0]); 128 return -EINVAL; 129 } 130 131 return iiospec->args[0]; 132 } 133 134 static int __of_iio_channel_get(struct iio_channel *channel, 135 struct device_node *np, int index) 136 { 137 struct device *idev; 138 struct iio_dev *indio_dev; 139 int err; 140 struct of_phandle_args iiospec; 141 142 err = of_parse_phandle_with_args(np, "io-channels", 143 "#io-channel-cells", 144 index, &iiospec); 145 if (err) 146 return err; 147 148 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np, 149 iio_dev_node_match); 150 of_node_put(iiospec.np); 151 if (idev == NULL) 152 return -EPROBE_DEFER; 153 154 indio_dev = dev_to_iio_dev(idev); 155 channel->indio_dev = indio_dev; 156 if (indio_dev->info->of_xlate) 157 index = indio_dev->info->of_xlate(indio_dev, &iiospec); 158 else 159 index = __of_iio_simple_xlate(indio_dev, &iiospec); 160 if (index < 0) 161 goto err_put; 162 channel->channel = &indio_dev->channels[index]; 163 164 return 0; 165 166 err_put: 167 iio_device_put(indio_dev); 168 return index; 169 } 170 171 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index) 172 { 173 struct iio_channel *channel; 174 int err; 175 176 if (index < 0) 177 return ERR_PTR(-EINVAL); 178 179 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 180 if (channel == NULL) 181 return ERR_PTR(-ENOMEM); 182 183 err = __of_iio_channel_get(channel, np, index); 184 if (err) 185 goto err_free_channel; 186 187 return channel; 188 189 err_free_channel: 190 kfree(channel); 191 return ERR_PTR(err); 192 } 193 194 struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, 195 const char *name) 196 { 197 struct iio_channel *chan = NULL; 198 199 /* Walk up the tree of devices looking for a matching iio channel */ 200 while (np) { 201 int index = 0; 202 203 /* 204 * For named iio channels, first look up the name in the 205 * "io-channel-names" property. If it cannot be found, the 206 * index will be an error code, and of_iio_channel_get() 207 * will fail. 208 */ 209 if (name) 210 index = of_property_match_string(np, "io-channel-names", 211 name); 212 chan = of_iio_channel_get(np, index); 213 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 214 break; 215 else if (name && index >= 0) { 216 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n", 217 np, name ? name : "", index); 218 return NULL; 219 } 220 221 /* 222 * No matching IIO channel found on this node. 223 * If the parent node has a "io-channel-ranges" property, 224 * then we can try one of its channels. 225 */ 226 np = np->parent; 227 if (np && !of_get_property(np, "io-channel-ranges", NULL)) 228 return NULL; 229 } 230 231 return chan; 232 } 233 EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name); 234 235 static struct iio_channel *of_iio_channel_get_all(struct device *dev) 236 { 237 struct iio_channel *chans; 238 int i, mapind, nummaps = 0; 239 int ret; 240 241 do { 242 ret = of_parse_phandle_with_args(dev->of_node, 243 "io-channels", 244 "#io-channel-cells", 245 nummaps, NULL); 246 if (ret < 0) 247 break; 248 } while (++nummaps); 249 250 if (nummaps == 0) /* no error, return NULL to search map table */ 251 return NULL; 252 253 /* NULL terminated array to save passing size */ 254 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 255 if (chans == NULL) 256 return ERR_PTR(-ENOMEM); 257 258 /* Search for OF matches */ 259 for (mapind = 0; mapind < nummaps; mapind++) { 260 ret = __of_iio_channel_get(&chans[mapind], dev->of_node, 261 mapind); 262 if (ret) 263 goto error_free_chans; 264 } 265 return chans; 266 267 error_free_chans: 268 for (i = 0; i < mapind; i++) 269 iio_device_put(chans[i].indio_dev); 270 kfree(chans); 271 return ERR_PTR(ret); 272 } 273 274 #else /* CONFIG_OF */ 275 276 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev) 277 { 278 return NULL; 279 } 280 281 #endif /* CONFIG_OF */ 282 283 static struct iio_channel *iio_channel_get_sys(const char *name, 284 const char *channel_name) 285 { 286 struct iio_map_internal *c_i = NULL, *c = NULL; 287 struct iio_channel *channel; 288 int err; 289 290 if (name == NULL && channel_name == NULL) 291 return ERR_PTR(-ENODEV); 292 293 /* first find matching entry the channel map */ 294 mutex_lock(&iio_map_list_lock); 295 list_for_each_entry(c_i, &iio_map_list, l) { 296 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || 297 (channel_name && 298 strcmp(channel_name, c_i->map->consumer_channel) != 0)) 299 continue; 300 c = c_i; 301 iio_device_get(c->indio_dev); 302 break; 303 } 304 mutex_unlock(&iio_map_list_lock); 305 if (c == NULL) 306 return ERR_PTR(-ENODEV); 307 308 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 309 if (channel == NULL) { 310 err = -ENOMEM; 311 goto error_no_mem; 312 } 313 314 channel->indio_dev = c->indio_dev; 315 316 if (c->map->adc_channel_label) { 317 channel->channel = 318 iio_chan_spec_from_name(channel->indio_dev, 319 c->map->adc_channel_label); 320 321 if (channel->channel == NULL) { 322 err = -EINVAL; 323 goto error_no_chan; 324 } 325 } 326 327 return channel; 328 329 error_no_chan: 330 kfree(channel); 331 error_no_mem: 332 iio_device_put(c->indio_dev); 333 return ERR_PTR(err); 334 } 335 336 struct iio_channel *iio_channel_get(struct device *dev, 337 const char *channel_name) 338 { 339 const char *name = dev ? dev_name(dev) : NULL; 340 struct iio_channel *channel; 341 342 if (dev) { 343 channel = of_iio_channel_get_by_name(dev->of_node, 344 channel_name); 345 if (channel != NULL) 346 return channel; 347 } 348 349 return iio_channel_get_sys(name, channel_name); 350 } 351 EXPORT_SYMBOL_GPL(iio_channel_get); 352 353 void iio_channel_release(struct iio_channel *channel) 354 { 355 if (!channel) 356 return; 357 iio_device_put(channel->indio_dev); 358 kfree(channel); 359 } 360 EXPORT_SYMBOL_GPL(iio_channel_release); 361 362 static void devm_iio_channel_free(struct device *dev, void *res) 363 { 364 struct iio_channel *channel = *(struct iio_channel **)res; 365 366 iio_channel_release(channel); 367 } 368 369 struct iio_channel *devm_iio_channel_get(struct device *dev, 370 const char *channel_name) 371 { 372 struct iio_channel **ptr, *channel; 373 374 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL); 375 if (!ptr) 376 return ERR_PTR(-ENOMEM); 377 378 channel = iio_channel_get(dev, channel_name); 379 if (IS_ERR(channel)) { 380 devres_free(ptr); 381 return channel; 382 } 383 384 *ptr = channel; 385 devres_add(dev, ptr); 386 387 return channel; 388 } 389 EXPORT_SYMBOL_GPL(devm_iio_channel_get); 390 391 struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev, 392 struct device_node *np, 393 const char *channel_name) 394 { 395 struct iio_channel **ptr, *channel; 396 397 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL); 398 if (!ptr) 399 return ERR_PTR(-ENOMEM); 400 401 channel = of_iio_channel_get_by_name(np, channel_name); 402 if (IS_ERR(channel)) { 403 devres_free(ptr); 404 return channel; 405 } 406 407 *ptr = channel; 408 devres_add(dev, ptr); 409 410 return channel; 411 } 412 EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name); 413 414 struct iio_channel *iio_channel_get_all(struct device *dev) 415 { 416 const char *name; 417 struct iio_channel *chans; 418 struct iio_map_internal *c = NULL; 419 int nummaps = 0; 420 int mapind = 0; 421 int i, ret; 422 423 if (dev == NULL) 424 return ERR_PTR(-EINVAL); 425 426 chans = of_iio_channel_get_all(dev); 427 if (chans) 428 return chans; 429 430 name = dev_name(dev); 431 432 mutex_lock(&iio_map_list_lock); 433 /* first count the matching maps */ 434 list_for_each_entry(c, &iio_map_list, l) 435 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 436 continue; 437 else 438 nummaps++; 439 440 if (nummaps == 0) { 441 ret = -ENODEV; 442 goto error_ret; 443 } 444 445 /* NULL terminated array to save passing size */ 446 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 447 if (chans == NULL) { 448 ret = -ENOMEM; 449 goto error_ret; 450 } 451 452 /* for each map fill in the chans element */ 453 list_for_each_entry(c, &iio_map_list, l) { 454 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 455 continue; 456 chans[mapind].indio_dev = c->indio_dev; 457 chans[mapind].data = c->map->consumer_data; 458 chans[mapind].channel = 459 iio_chan_spec_from_name(chans[mapind].indio_dev, 460 c->map->adc_channel_label); 461 if (chans[mapind].channel == NULL) { 462 ret = -EINVAL; 463 goto error_free_chans; 464 } 465 iio_device_get(chans[mapind].indio_dev); 466 mapind++; 467 } 468 if (mapind == 0) { 469 ret = -ENODEV; 470 goto error_free_chans; 471 } 472 mutex_unlock(&iio_map_list_lock); 473 474 return chans; 475 476 error_free_chans: 477 for (i = 0; i < nummaps; i++) 478 iio_device_put(chans[i].indio_dev); 479 kfree(chans); 480 error_ret: 481 mutex_unlock(&iio_map_list_lock); 482 483 return ERR_PTR(ret); 484 } 485 EXPORT_SYMBOL_GPL(iio_channel_get_all); 486 487 void iio_channel_release_all(struct iio_channel *channels) 488 { 489 struct iio_channel *chan = &channels[0]; 490 491 while (chan->indio_dev) { 492 iio_device_put(chan->indio_dev); 493 chan++; 494 } 495 kfree(channels); 496 } 497 EXPORT_SYMBOL_GPL(iio_channel_release_all); 498 499 static void devm_iio_channel_free_all(struct device *dev, void *res) 500 { 501 struct iio_channel *channels = *(struct iio_channel **)res; 502 503 iio_channel_release_all(channels); 504 } 505 506 struct iio_channel *devm_iio_channel_get_all(struct device *dev) 507 { 508 struct iio_channel **ptr, *channels; 509 510 ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL); 511 if (!ptr) 512 return ERR_PTR(-ENOMEM); 513 514 channels = iio_channel_get_all(dev); 515 if (IS_ERR(channels)) { 516 devres_free(ptr); 517 return channels; 518 } 519 520 *ptr = channels; 521 devres_add(dev, ptr); 522 523 return channels; 524 } 525 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all); 526 527 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, 528 enum iio_chan_info_enum info) 529 { 530 int unused; 531 int vals[INDIO_MAX_RAW_ELEMENTS]; 532 int ret; 533 int val_len = 2; 534 535 if (val2 == NULL) 536 val2 = &unused; 537 538 if (!iio_channel_has_info(chan->channel, info)) 539 return -EINVAL; 540 541 if (chan->indio_dev->info->read_raw_multi) { 542 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, 543 chan->channel, INDIO_MAX_RAW_ELEMENTS, 544 vals, &val_len, info); 545 *val = vals[0]; 546 *val2 = vals[1]; 547 } else 548 ret = chan->indio_dev->info->read_raw(chan->indio_dev, 549 chan->channel, val, val2, info); 550 551 return ret; 552 } 553 554 int iio_read_channel_raw(struct iio_channel *chan, int *val) 555 { 556 int ret; 557 558 mutex_lock(&chan->indio_dev->info_exist_lock); 559 if (chan->indio_dev->info == NULL) { 560 ret = -ENODEV; 561 goto err_unlock; 562 } 563 564 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 565 err_unlock: 566 mutex_unlock(&chan->indio_dev->info_exist_lock); 567 568 return ret; 569 } 570 EXPORT_SYMBOL_GPL(iio_read_channel_raw); 571 572 int iio_read_channel_average_raw(struct iio_channel *chan, int *val) 573 { 574 int ret; 575 576 mutex_lock(&chan->indio_dev->info_exist_lock); 577 if (chan->indio_dev->info == NULL) { 578 ret = -ENODEV; 579 goto err_unlock; 580 } 581 582 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW); 583 err_unlock: 584 mutex_unlock(&chan->indio_dev->info_exist_lock); 585 586 return ret; 587 } 588 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); 589 590 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, 591 int raw, int *processed, unsigned int scale) 592 { 593 int scale_type, scale_val, scale_val2, offset; 594 s64 raw64 = raw; 595 int ret; 596 597 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); 598 if (ret >= 0) 599 raw64 += offset; 600 601 scale_type = iio_channel_read(chan, &scale_val, &scale_val2, 602 IIO_CHAN_INFO_SCALE); 603 if (scale_type < 0) { 604 /* 605 * Just pass raw values as processed if no scaling is 606 * available. 607 */ 608 *processed = raw; 609 return 0; 610 } 611 612 switch (scale_type) { 613 case IIO_VAL_INT: 614 *processed = raw64 * scale_val; 615 break; 616 case IIO_VAL_INT_PLUS_MICRO: 617 if (scale_val2 < 0) 618 *processed = -raw64 * scale_val; 619 else 620 *processed = raw64 * scale_val; 621 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 622 1000000LL); 623 break; 624 case IIO_VAL_INT_PLUS_NANO: 625 if (scale_val2 < 0) 626 *processed = -raw64 * scale_val; 627 else 628 *processed = raw64 * scale_val; 629 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 630 1000000000LL); 631 break; 632 case IIO_VAL_FRACTIONAL: 633 *processed = div_s64(raw64 * (s64)scale_val * scale, 634 scale_val2); 635 break; 636 case IIO_VAL_FRACTIONAL_LOG2: 637 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2; 638 break; 639 default: 640 return -EINVAL; 641 } 642 643 return 0; 644 } 645 646 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, 647 int *processed, unsigned int scale) 648 { 649 int ret; 650 651 mutex_lock(&chan->indio_dev->info_exist_lock); 652 if (chan->indio_dev->info == NULL) { 653 ret = -ENODEV; 654 goto err_unlock; 655 } 656 657 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed, 658 scale); 659 err_unlock: 660 mutex_unlock(&chan->indio_dev->info_exist_lock); 661 662 return ret; 663 } 664 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed); 665 666 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2, 667 enum iio_chan_info_enum attribute) 668 { 669 int ret; 670 671 mutex_lock(&chan->indio_dev->info_exist_lock); 672 if (chan->indio_dev->info == NULL) { 673 ret = -ENODEV; 674 goto err_unlock; 675 } 676 677 ret = iio_channel_read(chan, val, val2, attribute); 678 err_unlock: 679 mutex_unlock(&chan->indio_dev->info_exist_lock); 680 681 return ret; 682 } 683 EXPORT_SYMBOL_GPL(iio_read_channel_attribute); 684 685 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2) 686 { 687 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET); 688 } 689 EXPORT_SYMBOL_GPL(iio_read_channel_offset); 690 691 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, 692 unsigned int scale) 693 { 694 int ret; 695 696 mutex_lock(&chan->indio_dev->info_exist_lock); 697 if (chan->indio_dev->info == NULL) { 698 ret = -ENODEV; 699 goto err_unlock; 700 } 701 702 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) { 703 ret = iio_channel_read(chan, val, NULL, 704 IIO_CHAN_INFO_PROCESSED); 705 if (ret < 0) 706 goto err_unlock; 707 *val *= scale; 708 } else { 709 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 710 if (ret < 0) 711 goto err_unlock; 712 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 713 scale); 714 } 715 716 err_unlock: 717 mutex_unlock(&chan->indio_dev->info_exist_lock); 718 719 return ret; 720 } 721 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale); 722 723 int iio_read_channel_processed(struct iio_channel *chan, int *val) 724 { 725 /* This is just a special case with scale factor 1 */ 726 return iio_read_channel_processed_scale(chan, val, 1); 727 } 728 EXPORT_SYMBOL_GPL(iio_read_channel_processed); 729 730 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2) 731 { 732 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE); 733 } 734 EXPORT_SYMBOL_GPL(iio_read_channel_scale); 735 736 static int iio_channel_read_avail(struct iio_channel *chan, 737 const int **vals, int *type, int *length, 738 enum iio_chan_info_enum info) 739 { 740 if (!iio_channel_has_available(chan->channel, info)) 741 return -EINVAL; 742 743 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel, 744 vals, type, length, info); 745 } 746 747 int iio_read_avail_channel_attribute(struct iio_channel *chan, 748 const int **vals, int *type, int *length, 749 enum iio_chan_info_enum attribute) 750 { 751 int ret; 752 753 mutex_lock(&chan->indio_dev->info_exist_lock); 754 if (!chan->indio_dev->info) { 755 ret = -ENODEV; 756 goto err_unlock; 757 } 758 759 ret = iio_channel_read_avail(chan, vals, type, length, attribute); 760 err_unlock: 761 mutex_unlock(&chan->indio_dev->info_exist_lock); 762 763 return ret; 764 } 765 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute); 766 767 int iio_read_avail_channel_raw(struct iio_channel *chan, 768 const int **vals, int *length) 769 { 770 int ret; 771 int type; 772 773 ret = iio_read_avail_channel_attribute(chan, vals, &type, length, 774 IIO_CHAN_INFO_RAW); 775 776 if (ret >= 0 && type != IIO_VAL_INT) 777 /* raw values are assumed to be IIO_VAL_INT */ 778 ret = -EINVAL; 779 780 return ret; 781 } 782 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw); 783 784 static int iio_channel_read_max(struct iio_channel *chan, 785 int *val, int *val2, int *type, 786 enum iio_chan_info_enum info) 787 { 788 int unused; 789 const int *vals; 790 int length; 791 int ret; 792 793 if (!val2) 794 val2 = &unused; 795 796 ret = iio_channel_read_avail(chan, &vals, type, &length, info); 797 switch (ret) { 798 case IIO_AVAIL_RANGE: 799 switch (*type) { 800 case IIO_VAL_INT: 801 *val = vals[2]; 802 break; 803 default: 804 *val = vals[4]; 805 *val2 = vals[5]; 806 } 807 return 0; 808 809 case IIO_AVAIL_LIST: 810 if (length <= 0) 811 return -EINVAL; 812 switch (*type) { 813 case IIO_VAL_INT: 814 *val = vals[--length]; 815 while (length) { 816 if (vals[--length] > *val) 817 *val = vals[length]; 818 } 819 break; 820 default: 821 /* FIXME: learn about max for other iio values */ 822 return -EINVAL; 823 } 824 return 0; 825 826 default: 827 return ret; 828 } 829 } 830 831 int iio_read_max_channel_raw(struct iio_channel *chan, int *val) 832 { 833 int ret; 834 int type; 835 836 mutex_lock(&chan->indio_dev->info_exist_lock); 837 if (!chan->indio_dev->info) { 838 ret = -ENODEV; 839 goto err_unlock; 840 } 841 842 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW); 843 err_unlock: 844 mutex_unlock(&chan->indio_dev->info_exist_lock); 845 846 return ret; 847 } 848 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw); 849 850 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type) 851 { 852 int ret = 0; 853 /* Need to verify underlying driver has not gone away */ 854 855 mutex_lock(&chan->indio_dev->info_exist_lock); 856 if (chan->indio_dev->info == NULL) { 857 ret = -ENODEV; 858 goto err_unlock; 859 } 860 861 *type = chan->channel->type; 862 err_unlock: 863 mutex_unlock(&chan->indio_dev->info_exist_lock); 864 865 return ret; 866 } 867 EXPORT_SYMBOL_GPL(iio_get_channel_type); 868 869 static int iio_channel_write(struct iio_channel *chan, int val, int val2, 870 enum iio_chan_info_enum info) 871 { 872 return chan->indio_dev->info->write_raw(chan->indio_dev, 873 chan->channel, val, val2, info); 874 } 875 876 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, 877 enum iio_chan_info_enum attribute) 878 { 879 int ret; 880 881 mutex_lock(&chan->indio_dev->info_exist_lock); 882 if (chan->indio_dev->info == NULL) { 883 ret = -ENODEV; 884 goto err_unlock; 885 } 886 887 ret = iio_channel_write(chan, val, val2, attribute); 888 err_unlock: 889 mutex_unlock(&chan->indio_dev->info_exist_lock); 890 891 return ret; 892 } 893 EXPORT_SYMBOL_GPL(iio_write_channel_attribute); 894 895 int iio_write_channel_raw(struct iio_channel *chan, int val) 896 { 897 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW); 898 } 899 EXPORT_SYMBOL_GPL(iio_write_channel_raw); 900 901 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan) 902 { 903 const struct iio_chan_spec_ext_info *ext_info; 904 unsigned int i = 0; 905 906 if (!chan->channel->ext_info) 907 return i; 908 909 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++) 910 ++i; 911 912 return i; 913 } 914 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count); 915 916 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info( 917 const struct iio_channel *chan, 918 const char *attr) 919 { 920 const struct iio_chan_spec_ext_info *ext_info; 921 922 if (!chan->channel->ext_info) 923 return NULL; 924 925 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) { 926 if (!strcmp(attr, ext_info->name)) 927 return ext_info; 928 } 929 930 return NULL; 931 } 932 933 ssize_t iio_read_channel_ext_info(struct iio_channel *chan, 934 const char *attr, char *buf) 935 { 936 const struct iio_chan_spec_ext_info *ext_info; 937 938 ext_info = iio_lookup_ext_info(chan, attr); 939 if (!ext_info) 940 return -EINVAL; 941 942 return ext_info->read(chan->indio_dev, ext_info->private, 943 chan->channel, buf); 944 } 945 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info); 946 947 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, 948 const char *buf, size_t len) 949 { 950 const struct iio_chan_spec_ext_info *ext_info; 951 952 ext_info = iio_lookup_ext_info(chan, attr); 953 if (!ext_info) 954 return -EINVAL; 955 956 return ext_info->write(chan->indio_dev, ext_info->private, 957 chan->channel, buf, len); 958 } 959 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info); 960