1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core in kernel channel mapping 3 * 4 * Copyright (c) 2011 Jonathan Cameron 5 */ 6 #include <linux/err.h> 7 #include <linux/export.h> 8 #include <linux/property.h> 9 #include <linux/slab.h> 10 #include <linux/mutex.h> 11 12 #include <linux/iio/iio.h> 13 #include <linux/iio/iio-opaque.h> 14 #include "iio_core.h" 15 #include <linux/iio/machine.h> 16 #include <linux/iio/driver.h> 17 #include <linux/iio/consumer.h> 18 19 struct iio_map_internal { 20 struct iio_dev *indio_dev; 21 struct iio_map *map; 22 struct list_head l; 23 }; 24 25 static LIST_HEAD(iio_map_list); 26 static DEFINE_MUTEX(iio_map_list_lock); 27 28 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev) 29 { 30 int ret = -ENODEV; 31 struct iio_map_internal *mapi, *next; 32 33 list_for_each_entry_safe(mapi, next, &iio_map_list, l) { 34 if (indio_dev == mapi->indio_dev) { 35 list_del(&mapi->l); 36 kfree(mapi); 37 ret = 0; 38 } 39 } 40 return ret; 41 } 42 43 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) 44 { 45 int i = 0, ret = 0; 46 struct iio_map_internal *mapi; 47 48 if (!maps) 49 return 0; 50 51 mutex_lock(&iio_map_list_lock); 52 while (maps[i].consumer_dev_name) { 53 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL); 54 if (!mapi) { 55 ret = -ENOMEM; 56 goto error_ret; 57 } 58 mapi->map = &maps[i]; 59 mapi->indio_dev = indio_dev; 60 list_add_tail(&mapi->l, &iio_map_list); 61 i++; 62 } 63 error_ret: 64 if (ret) 65 iio_map_array_unregister_locked(indio_dev); 66 mutex_unlock(&iio_map_list_lock); 67 68 return ret; 69 } 70 EXPORT_SYMBOL_GPL(iio_map_array_register); 71 72 /* 73 * Remove all map entries associated with the given iio device 74 */ 75 int iio_map_array_unregister(struct iio_dev *indio_dev) 76 { 77 int ret; 78 79 mutex_lock(&iio_map_list_lock); 80 ret = iio_map_array_unregister_locked(indio_dev); 81 mutex_unlock(&iio_map_list_lock); 82 83 return ret; 84 } 85 EXPORT_SYMBOL_GPL(iio_map_array_unregister); 86 87 static void iio_map_array_unregister_cb(void *indio_dev) 88 { 89 iio_map_array_unregister(indio_dev); 90 } 91 92 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps) 93 { 94 int ret; 95 96 ret = iio_map_array_register(indio_dev, maps); 97 if (ret) 98 return ret; 99 100 return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev); 101 } 102 EXPORT_SYMBOL_GPL(devm_iio_map_array_register); 103 104 static const struct iio_chan_spec 105 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name) 106 { 107 int i; 108 const struct iio_chan_spec *chan = NULL; 109 110 for (i = 0; i < indio_dev->num_channels; i++) 111 if (indio_dev->channels[i].datasheet_name && 112 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) { 113 chan = &indio_dev->channels[i]; 114 break; 115 } 116 return chan; 117 } 118 119 /** 120 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index 121 * @indio_dev: pointer to the iio_dev structure 122 * @iiospec: IIO specifier as found in the device tree 123 * 124 * This is simple translation function, suitable for the most 1:1 mapped 125 * channels in IIO chips. This function performs only one sanity check: 126 * whether IIO index is less than num_channels (that is specified in the 127 * iio_dev). 128 */ 129 static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev, 130 const struct fwnode_reference_args *iiospec) 131 { 132 if (!iiospec->nargs) 133 return 0; 134 135 if (iiospec->args[0] >= indio_dev->num_channels) { 136 dev_err(&indio_dev->dev, "invalid channel index %llu\n", 137 iiospec->args[0]); 138 return -EINVAL; 139 } 140 141 return iiospec->args[0]; 142 } 143 144 static int __fwnode_iio_channel_get(struct iio_channel *channel, 145 struct fwnode_handle *fwnode, int index) 146 { 147 struct fwnode_reference_args iiospec; 148 struct device *idev; 149 struct iio_dev *indio_dev; 150 int err; 151 152 err = fwnode_property_get_reference_args(fwnode, "io-channels", 153 "#io-channel-cells", 0, 154 index, &iiospec); 155 if (err) 156 return err; 157 158 idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode); 159 if (!idev) { 160 fwnode_handle_put(iiospec.fwnode); 161 return -EPROBE_DEFER; 162 } 163 164 indio_dev = dev_to_iio_dev(idev); 165 channel->indio_dev = indio_dev; 166 if (indio_dev->info->fwnode_xlate) 167 index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec); 168 else 169 index = __fwnode_iio_simple_xlate(indio_dev, &iiospec); 170 fwnode_handle_put(iiospec.fwnode); 171 if (index < 0) 172 goto err_put; 173 channel->channel = &indio_dev->channels[index]; 174 175 return 0; 176 177 err_put: 178 iio_device_put(indio_dev); 179 return index; 180 } 181 182 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode, 183 int index) 184 { 185 struct iio_channel *channel; 186 int err; 187 188 if (index < 0) 189 return ERR_PTR(-EINVAL); 190 191 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 192 if (!channel) 193 return ERR_PTR(-ENOMEM); 194 195 err = __fwnode_iio_channel_get(channel, fwnode, index); 196 if (err) 197 goto err_free_channel; 198 199 return channel; 200 201 err_free_channel: 202 kfree(channel); 203 return ERR_PTR(err); 204 } 205 206 static struct iio_channel * 207 __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name) 208 { 209 struct iio_channel *chan; 210 int index = 0; 211 212 /* 213 * For named iio channels, first look up the name in the 214 * "io-channel-names" property. If it cannot be found, the 215 * index will be an error code, and fwnode_iio_channel_get() 216 * will fail. 217 */ 218 if (name) 219 index = fwnode_property_match_string(fwnode, "io-channel-names", 220 name); 221 222 chan = fwnode_iio_channel_get(fwnode, index); 223 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 224 return chan; 225 if (name) { 226 if (index >= 0) { 227 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n", 228 fwnode, name, index); 229 /* 230 * In this case, we found 'name' in 'io-channel-names' 231 * but somehow we still fail so that we should not proceed 232 * with any other lookup. Hence, explicitly return -EINVAL 233 * (maybe not the better error code) so that the caller 234 * won't do a system lookup. 235 */ 236 return ERR_PTR(-EINVAL); 237 } 238 /* 239 * If index < 0, then fwnode_property_get_reference_args() fails 240 * with -EINVAL or -ENOENT (ACPI case) which is expected. We 241 * should not proceed if we get any other error. 242 */ 243 if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT) 244 return chan; 245 } else if (PTR_ERR(chan) != -ENOENT) { 246 /* 247 * if !name, then we should only proceed the lookup if 248 * fwnode_property_get_reference_args() returns -ENOENT. 249 */ 250 return chan; 251 } 252 253 /* so we continue the lookup */ 254 return ERR_PTR(-ENODEV); 255 } 256 257 struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, 258 const char *name) 259 { 260 struct fwnode_handle *parent; 261 struct iio_channel *chan; 262 263 /* Walk up the tree of devices looking for a matching iio channel */ 264 chan = __fwnode_iio_channel_get_by_name(fwnode, name); 265 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) 266 return chan; 267 268 /* 269 * No matching IIO channel found on this node. 270 * If the parent node has a "io-channel-ranges" property, 271 * then we can try one of its channels. 272 */ 273 fwnode_for_each_parent_node(fwnode, parent) { 274 if (!fwnode_property_present(parent, "io-channel-ranges")) { 275 fwnode_handle_put(parent); 276 return ERR_PTR(-ENODEV); 277 } 278 279 chan = __fwnode_iio_channel_get_by_name(fwnode, name); 280 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) { 281 fwnode_handle_put(parent); 282 return chan; 283 } 284 } 285 286 return ERR_PTR(-ENODEV); 287 } 288 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name); 289 290 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev) 291 { 292 struct fwnode_handle *fwnode = dev_fwnode(dev); 293 struct iio_channel *chans; 294 int i, mapind, nummaps = 0; 295 int ret; 296 297 do { 298 ret = fwnode_property_get_reference_args(fwnode, "io-channels", 299 "#io-channel-cells", 0, 300 nummaps, NULL); 301 if (ret < 0) 302 break; 303 } while (++nummaps); 304 305 if (nummaps == 0) 306 return ERR_PTR(-ENODEV); 307 308 /* NULL terminated array to save passing size */ 309 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 310 if (!chans) 311 return ERR_PTR(-ENOMEM); 312 313 /* Search for FW matches */ 314 for (mapind = 0; mapind < nummaps; mapind++) { 315 ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind); 316 if (ret) 317 goto error_free_chans; 318 } 319 return chans; 320 321 error_free_chans: 322 for (i = 0; i < mapind; i++) 323 iio_device_put(chans[i].indio_dev); 324 kfree(chans); 325 return ERR_PTR(ret); 326 } 327 328 static struct iio_channel *iio_channel_get_sys(const char *name, 329 const char *channel_name) 330 { 331 struct iio_map_internal *c_i = NULL, *c = NULL; 332 struct iio_channel *channel; 333 int err; 334 335 if (!(name || channel_name)) 336 return ERR_PTR(-ENODEV); 337 338 /* first find matching entry the channel map */ 339 mutex_lock(&iio_map_list_lock); 340 list_for_each_entry(c_i, &iio_map_list, l) { 341 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || 342 (channel_name && 343 strcmp(channel_name, c_i->map->consumer_channel) != 0)) 344 continue; 345 c = c_i; 346 iio_device_get(c->indio_dev); 347 break; 348 } 349 mutex_unlock(&iio_map_list_lock); 350 if (!c) 351 return ERR_PTR(-ENODEV); 352 353 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 354 if (!channel) { 355 err = -ENOMEM; 356 goto error_no_mem; 357 } 358 359 channel->indio_dev = c->indio_dev; 360 361 if (c->map->adc_channel_label) { 362 channel->channel = 363 iio_chan_spec_from_name(channel->indio_dev, 364 c->map->adc_channel_label); 365 366 if (!channel->channel) { 367 err = -EINVAL; 368 goto error_no_chan; 369 } 370 } 371 372 return channel; 373 374 error_no_chan: 375 kfree(channel); 376 error_no_mem: 377 iio_device_put(c->indio_dev); 378 return ERR_PTR(err); 379 } 380 381 struct iio_channel *iio_channel_get(struct device *dev, 382 const char *channel_name) 383 { 384 const char *name = dev ? dev_name(dev) : NULL; 385 struct iio_channel *channel; 386 387 if (dev) { 388 channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev), 389 channel_name); 390 if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV) 391 return channel; 392 } 393 394 return iio_channel_get_sys(name, channel_name); 395 } 396 EXPORT_SYMBOL_GPL(iio_channel_get); 397 398 void iio_channel_release(struct iio_channel *channel) 399 { 400 if (!channel) 401 return; 402 iio_device_put(channel->indio_dev); 403 kfree(channel); 404 } 405 EXPORT_SYMBOL_GPL(iio_channel_release); 406 407 static void devm_iio_channel_free(void *iio_channel) 408 { 409 iio_channel_release(iio_channel); 410 } 411 412 struct iio_channel *devm_iio_channel_get(struct device *dev, 413 const char *channel_name) 414 { 415 struct iio_channel *channel; 416 int ret; 417 418 channel = iio_channel_get(dev, channel_name); 419 if (IS_ERR(channel)) 420 return channel; 421 422 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel); 423 if (ret) 424 return ERR_PTR(ret); 425 426 return channel; 427 } 428 EXPORT_SYMBOL_GPL(devm_iio_channel_get); 429 430 struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev, 431 struct fwnode_handle *fwnode, 432 const char *channel_name) 433 { 434 struct iio_channel *channel; 435 int ret; 436 437 channel = fwnode_iio_channel_get_by_name(fwnode, channel_name); 438 if (IS_ERR(channel)) 439 return channel; 440 441 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel); 442 if (ret) 443 return ERR_PTR(ret); 444 445 return channel; 446 } 447 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name); 448 449 struct iio_channel *iio_channel_get_all(struct device *dev) 450 { 451 const char *name; 452 struct iio_channel *chans; 453 struct iio_map_internal *c = NULL; 454 int nummaps = 0; 455 int mapind = 0; 456 int i, ret; 457 458 if (!dev) 459 return ERR_PTR(-EINVAL); 460 461 chans = fwnode_iio_channel_get_all(dev); 462 /* 463 * We only want to carry on if the error is -ENODEV. Anything else 464 * should be reported up the stack. 465 */ 466 if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV) 467 return chans; 468 469 name = dev_name(dev); 470 471 mutex_lock(&iio_map_list_lock); 472 /* first count the matching maps */ 473 list_for_each_entry(c, &iio_map_list, l) 474 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 475 continue; 476 else 477 nummaps++; 478 479 if (nummaps == 0) { 480 ret = -ENODEV; 481 goto error_ret; 482 } 483 484 /* NULL terminated array to save passing size */ 485 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 486 if (!chans) { 487 ret = -ENOMEM; 488 goto error_ret; 489 } 490 491 /* for each map fill in the chans element */ 492 list_for_each_entry(c, &iio_map_list, l) { 493 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 494 continue; 495 chans[mapind].indio_dev = c->indio_dev; 496 chans[mapind].data = c->map->consumer_data; 497 chans[mapind].channel = 498 iio_chan_spec_from_name(chans[mapind].indio_dev, 499 c->map->adc_channel_label); 500 if (!chans[mapind].channel) { 501 ret = -EINVAL; 502 goto error_free_chans; 503 } 504 iio_device_get(chans[mapind].indio_dev); 505 mapind++; 506 } 507 if (mapind == 0) { 508 ret = -ENODEV; 509 goto error_free_chans; 510 } 511 mutex_unlock(&iio_map_list_lock); 512 513 return chans; 514 515 error_free_chans: 516 for (i = 0; i < nummaps; i++) 517 iio_device_put(chans[i].indio_dev); 518 kfree(chans); 519 error_ret: 520 mutex_unlock(&iio_map_list_lock); 521 522 return ERR_PTR(ret); 523 } 524 EXPORT_SYMBOL_GPL(iio_channel_get_all); 525 526 void iio_channel_release_all(struct iio_channel *channels) 527 { 528 struct iio_channel *chan = &channels[0]; 529 530 while (chan->indio_dev) { 531 iio_device_put(chan->indio_dev); 532 chan++; 533 } 534 kfree(channels); 535 } 536 EXPORT_SYMBOL_GPL(iio_channel_release_all); 537 538 static void devm_iio_channel_free_all(void *iio_channels) 539 { 540 iio_channel_release_all(iio_channels); 541 } 542 543 struct iio_channel *devm_iio_channel_get_all(struct device *dev) 544 { 545 struct iio_channel *channels; 546 int ret; 547 548 channels = iio_channel_get_all(dev); 549 if (IS_ERR(channels)) 550 return channels; 551 552 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all, 553 channels); 554 if (ret) 555 return ERR_PTR(ret); 556 557 return channels; 558 } 559 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all); 560 561 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, 562 enum iio_chan_info_enum info) 563 { 564 int unused; 565 int vals[INDIO_MAX_RAW_ELEMENTS]; 566 int ret; 567 int val_len = 2; 568 569 if (!val2) 570 val2 = &unused; 571 572 if (!iio_channel_has_info(chan->channel, info)) 573 return -EINVAL; 574 575 if (chan->indio_dev->info->read_raw_multi) { 576 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, 577 chan->channel, INDIO_MAX_RAW_ELEMENTS, 578 vals, &val_len, info); 579 *val = vals[0]; 580 *val2 = vals[1]; 581 } else { 582 ret = chan->indio_dev->info->read_raw(chan->indio_dev, 583 chan->channel, val, val2, info); 584 } 585 586 return ret; 587 } 588 589 int iio_read_channel_raw(struct iio_channel *chan, int *val) 590 { 591 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 592 int ret; 593 594 mutex_lock(&iio_dev_opaque->info_exist_lock); 595 if (!chan->indio_dev->info) { 596 ret = -ENODEV; 597 goto err_unlock; 598 } 599 600 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 601 err_unlock: 602 mutex_unlock(&iio_dev_opaque->info_exist_lock); 603 604 return ret; 605 } 606 EXPORT_SYMBOL_GPL(iio_read_channel_raw); 607 608 int iio_read_channel_average_raw(struct iio_channel *chan, int *val) 609 { 610 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 611 int ret; 612 613 mutex_lock(&iio_dev_opaque->info_exist_lock); 614 if (!chan->indio_dev->info) { 615 ret = -ENODEV; 616 goto err_unlock; 617 } 618 619 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW); 620 err_unlock: 621 mutex_unlock(&iio_dev_opaque->info_exist_lock); 622 623 return ret; 624 } 625 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); 626 627 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, 628 int raw, int *processed, 629 unsigned int scale) 630 { 631 int scale_type, scale_val, scale_val2; 632 int offset_type, offset_val, offset_val2; 633 s64 raw64 = raw; 634 635 offset_type = iio_channel_read(chan, &offset_val, &offset_val2, 636 IIO_CHAN_INFO_OFFSET); 637 if (offset_type >= 0) { 638 switch (offset_type) { 639 case IIO_VAL_INT: 640 break; 641 case IIO_VAL_INT_PLUS_MICRO: 642 case IIO_VAL_INT_PLUS_NANO: 643 /* 644 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO 645 * implicitely truncate the offset to it's integer form. 646 */ 647 break; 648 case IIO_VAL_FRACTIONAL: 649 offset_val /= offset_val2; 650 break; 651 case IIO_VAL_FRACTIONAL_LOG2: 652 offset_val >>= offset_val2; 653 break; 654 default: 655 return -EINVAL; 656 } 657 658 raw64 += offset_val; 659 } 660 661 scale_type = iio_channel_read(chan, &scale_val, &scale_val2, 662 IIO_CHAN_INFO_SCALE); 663 if (scale_type < 0) { 664 /* 665 * If no channel scaling is available apply consumer scale to 666 * raw value and return. 667 */ 668 *processed = raw * scale; 669 return 0; 670 } 671 672 switch (scale_type) { 673 case IIO_VAL_INT: 674 *processed = raw64 * scale_val * scale; 675 break; 676 case IIO_VAL_INT_PLUS_MICRO: 677 if (scale_val2 < 0) 678 *processed = -raw64 * scale_val; 679 else 680 *processed = raw64 * scale_val; 681 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 682 1000000LL); 683 break; 684 case IIO_VAL_INT_PLUS_NANO: 685 if (scale_val2 < 0) 686 *processed = -raw64 * scale_val; 687 else 688 *processed = raw64 * scale_val; 689 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 690 1000000000LL); 691 break; 692 case IIO_VAL_FRACTIONAL: 693 *processed = div_s64(raw64 * (s64)scale_val * scale, 694 scale_val2); 695 break; 696 case IIO_VAL_FRACTIONAL_LOG2: 697 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2; 698 break; 699 default: 700 return -EINVAL; 701 } 702 703 return 0; 704 } 705 706 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, 707 int *processed, unsigned int scale) 708 { 709 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 710 int ret; 711 712 mutex_lock(&iio_dev_opaque->info_exist_lock); 713 if (!chan->indio_dev->info) { 714 ret = -ENODEV; 715 goto err_unlock; 716 } 717 718 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed, 719 scale); 720 err_unlock: 721 mutex_unlock(&iio_dev_opaque->info_exist_lock); 722 723 return ret; 724 } 725 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed); 726 727 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2, 728 enum iio_chan_info_enum attribute) 729 { 730 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 731 int ret; 732 733 mutex_lock(&iio_dev_opaque->info_exist_lock); 734 if (!chan->indio_dev->info) { 735 ret = -ENODEV; 736 goto err_unlock; 737 } 738 739 ret = iio_channel_read(chan, val, val2, attribute); 740 err_unlock: 741 mutex_unlock(&iio_dev_opaque->info_exist_lock); 742 743 return ret; 744 } 745 EXPORT_SYMBOL_GPL(iio_read_channel_attribute); 746 747 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2) 748 { 749 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET); 750 } 751 EXPORT_SYMBOL_GPL(iio_read_channel_offset); 752 753 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, 754 unsigned int scale) 755 { 756 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 757 int ret; 758 759 mutex_lock(&iio_dev_opaque->info_exist_lock); 760 if (!chan->indio_dev->info) { 761 ret = -ENODEV; 762 goto err_unlock; 763 } 764 765 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) { 766 ret = iio_channel_read(chan, val, NULL, 767 IIO_CHAN_INFO_PROCESSED); 768 if (ret < 0) 769 goto err_unlock; 770 *val *= scale; 771 } else { 772 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 773 if (ret < 0) 774 goto err_unlock; 775 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 776 scale); 777 } 778 779 err_unlock: 780 mutex_unlock(&iio_dev_opaque->info_exist_lock); 781 782 return ret; 783 } 784 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale); 785 786 int iio_read_channel_processed(struct iio_channel *chan, int *val) 787 { 788 /* This is just a special case with scale factor 1 */ 789 return iio_read_channel_processed_scale(chan, val, 1); 790 } 791 EXPORT_SYMBOL_GPL(iio_read_channel_processed); 792 793 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2) 794 { 795 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE); 796 } 797 EXPORT_SYMBOL_GPL(iio_read_channel_scale); 798 799 static int iio_channel_read_avail(struct iio_channel *chan, 800 const int **vals, int *type, int *length, 801 enum iio_chan_info_enum info) 802 { 803 if (!iio_channel_has_available(chan->channel, info)) 804 return -EINVAL; 805 806 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel, 807 vals, type, length, info); 808 } 809 810 int iio_read_avail_channel_attribute(struct iio_channel *chan, 811 const int **vals, int *type, int *length, 812 enum iio_chan_info_enum attribute) 813 { 814 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 815 int ret; 816 817 mutex_lock(&iio_dev_opaque->info_exist_lock); 818 if (!chan->indio_dev->info) { 819 ret = -ENODEV; 820 goto err_unlock; 821 } 822 823 ret = iio_channel_read_avail(chan, vals, type, length, attribute); 824 err_unlock: 825 mutex_unlock(&iio_dev_opaque->info_exist_lock); 826 827 return ret; 828 } 829 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute); 830 831 int iio_read_avail_channel_raw(struct iio_channel *chan, 832 const int **vals, int *length) 833 { 834 int ret; 835 int type; 836 837 ret = iio_read_avail_channel_attribute(chan, vals, &type, length, 838 IIO_CHAN_INFO_RAW); 839 840 if (ret >= 0 && type != IIO_VAL_INT) 841 /* raw values are assumed to be IIO_VAL_INT */ 842 ret = -EINVAL; 843 844 return ret; 845 } 846 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw); 847 848 static int iio_channel_read_max(struct iio_channel *chan, 849 int *val, int *val2, int *type, 850 enum iio_chan_info_enum info) 851 { 852 int unused; 853 const int *vals; 854 int length; 855 int ret; 856 857 if (!val2) 858 val2 = &unused; 859 860 ret = iio_channel_read_avail(chan, &vals, type, &length, info); 861 switch (ret) { 862 case IIO_AVAIL_RANGE: 863 switch (*type) { 864 case IIO_VAL_INT: 865 *val = vals[2]; 866 break; 867 default: 868 *val = vals[4]; 869 *val2 = vals[5]; 870 } 871 return 0; 872 873 case IIO_AVAIL_LIST: 874 if (length <= 0) 875 return -EINVAL; 876 switch (*type) { 877 case IIO_VAL_INT: 878 *val = vals[--length]; 879 while (length) { 880 if (vals[--length] > *val) 881 *val = vals[length]; 882 } 883 break; 884 default: 885 /* FIXME: learn about max for other iio values */ 886 return -EINVAL; 887 } 888 return 0; 889 890 default: 891 return ret; 892 } 893 } 894 895 int iio_read_max_channel_raw(struct iio_channel *chan, int *val) 896 { 897 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 898 int ret; 899 int type; 900 901 mutex_lock(&iio_dev_opaque->info_exist_lock); 902 if (!chan->indio_dev->info) { 903 ret = -ENODEV; 904 goto err_unlock; 905 } 906 907 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW); 908 err_unlock: 909 mutex_unlock(&iio_dev_opaque->info_exist_lock); 910 911 return ret; 912 } 913 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw); 914 915 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type) 916 { 917 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 918 int ret = 0; 919 /* Need to verify underlying driver has not gone away */ 920 921 mutex_lock(&iio_dev_opaque->info_exist_lock); 922 if (!chan->indio_dev->info) { 923 ret = -ENODEV; 924 goto err_unlock; 925 } 926 927 *type = chan->channel->type; 928 err_unlock: 929 mutex_unlock(&iio_dev_opaque->info_exist_lock); 930 931 return ret; 932 } 933 EXPORT_SYMBOL_GPL(iio_get_channel_type); 934 935 static int iio_channel_write(struct iio_channel *chan, int val, int val2, 936 enum iio_chan_info_enum info) 937 { 938 return chan->indio_dev->info->write_raw(chan->indio_dev, 939 chan->channel, val, val2, info); 940 } 941 942 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, 943 enum iio_chan_info_enum attribute) 944 { 945 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 946 int ret; 947 948 mutex_lock(&iio_dev_opaque->info_exist_lock); 949 if (!chan->indio_dev->info) { 950 ret = -ENODEV; 951 goto err_unlock; 952 } 953 954 ret = iio_channel_write(chan, val, val2, attribute); 955 err_unlock: 956 mutex_unlock(&iio_dev_opaque->info_exist_lock); 957 958 return ret; 959 } 960 EXPORT_SYMBOL_GPL(iio_write_channel_attribute); 961 962 int iio_write_channel_raw(struct iio_channel *chan, int val) 963 { 964 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW); 965 } 966 EXPORT_SYMBOL_GPL(iio_write_channel_raw); 967 968 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan) 969 { 970 const struct iio_chan_spec_ext_info *ext_info; 971 unsigned int i = 0; 972 973 if (!chan->channel->ext_info) 974 return i; 975 976 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++) 977 ++i; 978 979 return i; 980 } 981 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count); 982 983 static const struct iio_chan_spec_ext_info * 984 iio_lookup_ext_info(const struct iio_channel *chan, const char *attr) 985 { 986 const struct iio_chan_spec_ext_info *ext_info; 987 988 if (!chan->channel->ext_info) 989 return NULL; 990 991 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) { 992 if (!strcmp(attr, ext_info->name)) 993 return ext_info; 994 } 995 996 return NULL; 997 } 998 999 ssize_t iio_read_channel_ext_info(struct iio_channel *chan, 1000 const char *attr, char *buf) 1001 { 1002 const struct iio_chan_spec_ext_info *ext_info; 1003 1004 ext_info = iio_lookup_ext_info(chan, attr); 1005 if (!ext_info) 1006 return -EINVAL; 1007 1008 return ext_info->read(chan->indio_dev, ext_info->private, 1009 chan->channel, buf); 1010 } 1011 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info); 1012 1013 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, 1014 const char *buf, size_t len) 1015 { 1016 const struct iio_chan_spec_ext_info *ext_info; 1017 1018 ext_info = iio_lookup_ext_info(chan, attr); 1019 if (!ext_info) 1020 return -EINVAL; 1021 1022 return ext_info->write(chan->indio_dev, ext_info->private, 1023 chan->channel, buf, len); 1024 } 1025 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info); 1026