xref: /openbmc/linux/drivers/iio/inkern.c (revision 887069f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
3  *
4  * Copyright (c) 2011 Jonathan Cameron
5  */
6 #include <linux/err.h>
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
10 #include <linux/of.h>
11 
12 #include <linux/iio/iio.h>
13 #include <linux/iio/iio-opaque.h>
14 #include "iio_core.h"
15 #include <linux/iio/machine.h>
16 #include <linux/iio/driver.h>
17 #include <linux/iio/consumer.h>
18 
19 struct iio_map_internal {
20 	struct iio_dev *indio_dev;
21 	struct iio_map *map;
22 	struct list_head l;
23 };
24 
25 static LIST_HEAD(iio_map_list);
26 static DEFINE_MUTEX(iio_map_list_lock);
27 
28 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
29 {
30 	int ret = -ENODEV;
31 	struct iio_map_internal *mapi, *next;
32 
33 	list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
34 		if (indio_dev == mapi->indio_dev) {
35 			list_del(&mapi->l);
36 			kfree(mapi);
37 			ret = 0;
38 		}
39 	}
40 	return ret;
41 }
42 
43 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
44 {
45 	int i = 0, ret = 0;
46 	struct iio_map_internal *mapi;
47 
48 	if (maps == NULL)
49 		return 0;
50 
51 	mutex_lock(&iio_map_list_lock);
52 	while (maps[i].consumer_dev_name != NULL) {
53 		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
54 		if (mapi == NULL) {
55 			ret = -ENOMEM;
56 			goto error_ret;
57 		}
58 		mapi->map = &maps[i];
59 		mapi->indio_dev = indio_dev;
60 		list_add_tail(&mapi->l, &iio_map_list);
61 		i++;
62 	}
63 error_ret:
64 	if (ret)
65 		iio_map_array_unregister_locked(indio_dev);
66 	mutex_unlock(&iio_map_list_lock);
67 
68 	return ret;
69 }
70 EXPORT_SYMBOL_GPL(iio_map_array_register);
71 
72 
73 /*
74  * Remove all map entries associated with the given iio device
75  */
76 int iio_map_array_unregister(struct iio_dev *indio_dev)
77 {
78 	int ret;
79 
80 	mutex_lock(&iio_map_list_lock);
81 	ret = iio_map_array_unregister_locked(indio_dev);
82 	mutex_unlock(&iio_map_list_lock);
83 
84 	return ret;
85 }
86 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
87 
88 static const struct iio_chan_spec
89 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
90 {
91 	int i;
92 	const struct iio_chan_spec *chan = NULL;
93 
94 	for (i = 0; i < indio_dev->num_channels; i++)
95 		if (indio_dev->channels[i].datasheet_name &&
96 		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
97 			chan = &indio_dev->channels[i];
98 			break;
99 		}
100 	return chan;
101 }
102 
103 #ifdef CONFIG_OF
104 
105 static int iio_dev_node_match(struct device *dev, const void *data)
106 {
107 	return dev->of_node == data && dev->type == &iio_device_type;
108 }
109 
110 /**
111  * __of_iio_simple_xlate - translate iiospec to the IIO channel index
112  * @indio_dev:	pointer to the iio_dev structure
113  * @iiospec:	IIO specifier as found in the device tree
114  *
115  * This is simple translation function, suitable for the most 1:1 mapped
116  * channels in IIO chips. This function performs only one sanity check:
117  * whether IIO index is less than num_channels (that is specified in the
118  * iio_dev).
119  */
120 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
121 				const struct of_phandle_args *iiospec)
122 {
123 	if (!iiospec->args_count)
124 		return 0;
125 
126 	if (iiospec->args[0] >= indio_dev->num_channels) {
127 		dev_err(&indio_dev->dev, "invalid channel index %u\n",
128 			iiospec->args[0]);
129 		return -EINVAL;
130 	}
131 
132 	return iiospec->args[0];
133 }
134 
135 static int __of_iio_channel_get(struct iio_channel *channel,
136 				struct device_node *np, int index)
137 {
138 	struct device *idev;
139 	struct iio_dev *indio_dev;
140 	int err;
141 	struct of_phandle_args iiospec;
142 
143 	err = of_parse_phandle_with_args(np, "io-channels",
144 					 "#io-channel-cells",
145 					 index, &iiospec);
146 	if (err)
147 		return err;
148 
149 	idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
150 			       iio_dev_node_match);
151 	of_node_put(iiospec.np);
152 	if (idev == NULL)
153 		return -EPROBE_DEFER;
154 
155 	indio_dev = dev_to_iio_dev(idev);
156 	channel->indio_dev = indio_dev;
157 	if (indio_dev->info->of_xlate)
158 		index = indio_dev->info->of_xlate(indio_dev, &iiospec);
159 	else
160 		index = __of_iio_simple_xlate(indio_dev, &iiospec);
161 	if (index < 0)
162 		goto err_put;
163 	channel->channel = &indio_dev->channels[index];
164 
165 	return 0;
166 
167 err_put:
168 	iio_device_put(indio_dev);
169 	return index;
170 }
171 
172 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
173 {
174 	struct iio_channel *channel;
175 	int err;
176 
177 	if (index < 0)
178 		return ERR_PTR(-EINVAL);
179 
180 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
181 	if (channel == NULL)
182 		return ERR_PTR(-ENOMEM);
183 
184 	err = __of_iio_channel_get(channel, np, index);
185 	if (err)
186 		goto err_free_channel;
187 
188 	return channel;
189 
190 err_free_channel:
191 	kfree(channel);
192 	return ERR_PTR(err);
193 }
194 
195 struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
196 					       const char *name)
197 {
198 	struct iio_channel *chan = NULL;
199 
200 	/* Walk up the tree of devices looking for a matching iio channel */
201 	while (np) {
202 		int index = 0;
203 
204 		/*
205 		 * For named iio channels, first look up the name in the
206 		 * "io-channel-names" property.  If it cannot be found, the
207 		 * index will be an error code, and of_iio_channel_get()
208 		 * will fail.
209 		 */
210 		if (name)
211 			index = of_property_match_string(np, "io-channel-names",
212 							 name);
213 		chan = of_iio_channel_get(np, index);
214 		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
215 			break;
216 		else if (name && index >= 0) {
217 			pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
218 				np, name ? name : "", index);
219 			return NULL;
220 		}
221 
222 		/*
223 		 * No matching IIO channel found on this node.
224 		 * If the parent node has a "io-channel-ranges" property,
225 		 * then we can try one of its channels.
226 		 */
227 		np = np->parent;
228 		if (np && !of_get_property(np, "io-channel-ranges", NULL))
229 			return NULL;
230 	}
231 
232 	return chan;
233 }
234 EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
235 
236 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
237 {
238 	struct iio_channel *chans;
239 	int i, mapind, nummaps = 0;
240 	int ret;
241 
242 	do {
243 		ret = of_parse_phandle_with_args(dev->of_node,
244 						 "io-channels",
245 						 "#io-channel-cells",
246 						 nummaps, NULL);
247 		if (ret < 0)
248 			break;
249 	} while (++nummaps);
250 
251 	if (nummaps == 0)	/* no error, return NULL to search map table */
252 		return NULL;
253 
254 	/* NULL terminated array to save passing size */
255 	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
256 	if (chans == NULL)
257 		return ERR_PTR(-ENOMEM);
258 
259 	/* Search for OF matches */
260 	for (mapind = 0; mapind < nummaps; mapind++) {
261 		ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
262 					   mapind);
263 		if (ret)
264 			goto error_free_chans;
265 	}
266 	return chans;
267 
268 error_free_chans:
269 	for (i = 0; i < mapind; i++)
270 		iio_device_put(chans[i].indio_dev);
271 	kfree(chans);
272 	return ERR_PTR(ret);
273 }
274 
275 #else /* CONFIG_OF */
276 
277 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
278 {
279 	return NULL;
280 }
281 
282 #endif /* CONFIG_OF */
283 
284 static struct iio_channel *iio_channel_get_sys(const char *name,
285 					       const char *channel_name)
286 {
287 	struct iio_map_internal *c_i = NULL, *c = NULL;
288 	struct iio_channel *channel;
289 	int err;
290 
291 	if (name == NULL && channel_name == NULL)
292 		return ERR_PTR(-ENODEV);
293 
294 	/* first find matching entry the channel map */
295 	mutex_lock(&iio_map_list_lock);
296 	list_for_each_entry(c_i, &iio_map_list, l) {
297 		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
298 		    (channel_name &&
299 		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
300 			continue;
301 		c = c_i;
302 		iio_device_get(c->indio_dev);
303 		break;
304 	}
305 	mutex_unlock(&iio_map_list_lock);
306 	if (c == NULL)
307 		return ERR_PTR(-ENODEV);
308 
309 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
310 	if (channel == NULL) {
311 		err = -ENOMEM;
312 		goto error_no_mem;
313 	}
314 
315 	channel->indio_dev = c->indio_dev;
316 
317 	if (c->map->adc_channel_label) {
318 		channel->channel =
319 			iio_chan_spec_from_name(channel->indio_dev,
320 						c->map->adc_channel_label);
321 
322 		if (channel->channel == NULL) {
323 			err = -EINVAL;
324 			goto error_no_chan;
325 		}
326 	}
327 
328 	return channel;
329 
330 error_no_chan:
331 	kfree(channel);
332 error_no_mem:
333 	iio_device_put(c->indio_dev);
334 	return ERR_PTR(err);
335 }
336 
337 struct iio_channel *iio_channel_get(struct device *dev,
338 				    const char *channel_name)
339 {
340 	const char *name = dev ? dev_name(dev) : NULL;
341 	struct iio_channel *channel;
342 
343 	if (dev) {
344 		channel = of_iio_channel_get_by_name(dev->of_node,
345 						     channel_name);
346 		if (channel != NULL)
347 			return channel;
348 	}
349 
350 	return iio_channel_get_sys(name, channel_name);
351 }
352 EXPORT_SYMBOL_GPL(iio_channel_get);
353 
354 void iio_channel_release(struct iio_channel *channel)
355 {
356 	if (!channel)
357 		return;
358 	iio_device_put(channel->indio_dev);
359 	kfree(channel);
360 }
361 EXPORT_SYMBOL_GPL(iio_channel_release);
362 
363 static void devm_iio_channel_free(void *iio_channel)
364 {
365 	iio_channel_release(iio_channel);
366 }
367 
368 struct iio_channel *devm_iio_channel_get(struct device *dev,
369 					 const char *channel_name)
370 {
371 	struct iio_channel *channel;
372 	int ret;
373 
374 	channel = iio_channel_get(dev, channel_name);
375 	if (IS_ERR(channel))
376 		return channel;
377 
378 	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
379 	if (ret)
380 		return ERR_PTR(ret);
381 
382 	return channel;
383 }
384 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
385 
386 struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
387 						    struct device_node *np,
388 						    const char *channel_name)
389 {
390 	struct iio_channel *channel;
391 	int ret;
392 
393 	channel = of_iio_channel_get_by_name(np, channel_name);
394 	if (IS_ERR(channel))
395 		return channel;
396 
397 	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
398 	if (ret)
399 		return ERR_PTR(ret);
400 
401 	return channel;
402 }
403 EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
404 
405 struct iio_channel *iio_channel_get_all(struct device *dev)
406 {
407 	const char *name;
408 	struct iio_channel *chans;
409 	struct iio_map_internal *c = NULL;
410 	int nummaps = 0;
411 	int mapind = 0;
412 	int i, ret;
413 
414 	if (dev == NULL)
415 		return ERR_PTR(-EINVAL);
416 
417 	chans = of_iio_channel_get_all(dev);
418 	if (chans)
419 		return chans;
420 
421 	name = dev_name(dev);
422 
423 	mutex_lock(&iio_map_list_lock);
424 	/* first count the matching maps */
425 	list_for_each_entry(c, &iio_map_list, l)
426 		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
427 			continue;
428 		else
429 			nummaps++;
430 
431 	if (nummaps == 0) {
432 		ret = -ENODEV;
433 		goto error_ret;
434 	}
435 
436 	/* NULL terminated array to save passing size */
437 	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
438 	if (chans == NULL) {
439 		ret = -ENOMEM;
440 		goto error_ret;
441 	}
442 
443 	/* for each map fill in the chans element */
444 	list_for_each_entry(c, &iio_map_list, l) {
445 		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
446 			continue;
447 		chans[mapind].indio_dev = c->indio_dev;
448 		chans[mapind].data = c->map->consumer_data;
449 		chans[mapind].channel =
450 			iio_chan_spec_from_name(chans[mapind].indio_dev,
451 						c->map->adc_channel_label);
452 		if (chans[mapind].channel == NULL) {
453 			ret = -EINVAL;
454 			goto error_free_chans;
455 		}
456 		iio_device_get(chans[mapind].indio_dev);
457 		mapind++;
458 	}
459 	if (mapind == 0) {
460 		ret = -ENODEV;
461 		goto error_free_chans;
462 	}
463 	mutex_unlock(&iio_map_list_lock);
464 
465 	return chans;
466 
467 error_free_chans:
468 	for (i = 0; i < nummaps; i++)
469 		iio_device_put(chans[i].indio_dev);
470 	kfree(chans);
471 error_ret:
472 	mutex_unlock(&iio_map_list_lock);
473 
474 	return ERR_PTR(ret);
475 }
476 EXPORT_SYMBOL_GPL(iio_channel_get_all);
477 
478 void iio_channel_release_all(struct iio_channel *channels)
479 {
480 	struct iio_channel *chan = &channels[0];
481 
482 	while (chan->indio_dev) {
483 		iio_device_put(chan->indio_dev);
484 		chan++;
485 	}
486 	kfree(channels);
487 }
488 EXPORT_SYMBOL_GPL(iio_channel_release_all);
489 
490 static void devm_iio_channel_free_all(void *iio_channels)
491 {
492 	iio_channel_release_all(iio_channels);
493 }
494 
495 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
496 {
497 	struct iio_channel *channels;
498 	int ret;
499 
500 	channels = iio_channel_get_all(dev);
501 	if (IS_ERR(channels))
502 		return channels;
503 
504 	ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
505 				       channels);
506 	if (ret)
507 		return ERR_PTR(ret);
508 
509 	return channels;
510 }
511 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
512 
513 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
514 	enum iio_chan_info_enum info)
515 {
516 	int unused;
517 	int vals[INDIO_MAX_RAW_ELEMENTS];
518 	int ret;
519 	int val_len = 2;
520 
521 	if (val2 == NULL)
522 		val2 = &unused;
523 
524 	if (!iio_channel_has_info(chan->channel, info))
525 		return -EINVAL;
526 
527 	if (chan->indio_dev->info->read_raw_multi) {
528 		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
529 					chan->channel, INDIO_MAX_RAW_ELEMENTS,
530 					vals, &val_len, info);
531 		*val = vals[0];
532 		*val2 = vals[1];
533 	} else
534 		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
535 					chan->channel, val, val2, info);
536 
537 	return ret;
538 }
539 
540 int iio_read_channel_raw(struct iio_channel *chan, int *val)
541 {
542 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
543 	int ret;
544 
545 	mutex_lock(&iio_dev_opaque->info_exist_lock);
546 	if (chan->indio_dev->info == NULL) {
547 		ret = -ENODEV;
548 		goto err_unlock;
549 	}
550 
551 	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
552 err_unlock:
553 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
554 
555 	return ret;
556 }
557 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
558 
559 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
560 {
561 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
562 	int ret;
563 
564 	mutex_lock(&iio_dev_opaque->info_exist_lock);
565 	if (chan->indio_dev->info == NULL) {
566 		ret = -ENODEV;
567 		goto err_unlock;
568 	}
569 
570 	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
571 err_unlock:
572 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
573 
574 	return ret;
575 }
576 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
577 
578 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
579 	int raw, int *processed, unsigned int scale)
580 {
581 	int scale_type, scale_val, scale_val2, offset;
582 	s64 raw64 = raw;
583 	int ret;
584 
585 	ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
586 	if (ret >= 0)
587 		raw64 += offset;
588 
589 	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
590 					IIO_CHAN_INFO_SCALE);
591 	if (scale_type < 0) {
592 		/*
593 		 * Just pass raw values as processed if no scaling is
594 		 * available.
595 		 */
596 		*processed = raw;
597 		return 0;
598 	}
599 
600 	switch (scale_type) {
601 	case IIO_VAL_INT:
602 		*processed = raw64 * scale_val;
603 		break;
604 	case IIO_VAL_INT_PLUS_MICRO:
605 		if (scale_val2 < 0)
606 			*processed = -raw64 * scale_val;
607 		else
608 			*processed = raw64 * scale_val;
609 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
610 				      1000000LL);
611 		break;
612 	case IIO_VAL_INT_PLUS_NANO:
613 		if (scale_val2 < 0)
614 			*processed = -raw64 * scale_val;
615 		else
616 			*processed = raw64 * scale_val;
617 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
618 				      1000000000LL);
619 		break;
620 	case IIO_VAL_FRACTIONAL:
621 		*processed = div_s64(raw64 * (s64)scale_val * scale,
622 				     scale_val2);
623 		break;
624 	case IIO_VAL_FRACTIONAL_LOG2:
625 		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
626 		break;
627 	default:
628 		return -EINVAL;
629 	}
630 
631 	return 0;
632 }
633 
634 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
635 	int *processed, unsigned int scale)
636 {
637 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
638 	int ret;
639 
640 	mutex_lock(&iio_dev_opaque->info_exist_lock);
641 	if (chan->indio_dev->info == NULL) {
642 		ret = -ENODEV;
643 		goto err_unlock;
644 	}
645 
646 	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
647 							scale);
648 err_unlock:
649 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
650 
651 	return ret;
652 }
653 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
654 
655 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
656 			       enum iio_chan_info_enum attribute)
657 {
658 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
659 	int ret;
660 
661 	mutex_lock(&iio_dev_opaque->info_exist_lock);
662 	if (chan->indio_dev->info == NULL) {
663 		ret = -ENODEV;
664 		goto err_unlock;
665 	}
666 
667 	ret = iio_channel_read(chan, val, val2, attribute);
668 err_unlock:
669 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
670 
671 	return ret;
672 }
673 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
674 
675 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
676 {
677 	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
678 }
679 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
680 
681 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
682 				     unsigned int scale)
683 {
684 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
685 	int ret;
686 
687 	mutex_lock(&iio_dev_opaque->info_exist_lock);
688 	if (chan->indio_dev->info == NULL) {
689 		ret = -ENODEV;
690 		goto err_unlock;
691 	}
692 
693 	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
694 		ret = iio_channel_read(chan, val, NULL,
695 				       IIO_CHAN_INFO_PROCESSED);
696 		if (ret < 0)
697 			goto err_unlock;
698 		*val *= scale;
699 	} else {
700 		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
701 		if (ret < 0)
702 			goto err_unlock;
703 		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
704 							    scale);
705 	}
706 
707 err_unlock:
708 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
709 
710 	return ret;
711 }
712 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
713 
714 int iio_read_channel_processed(struct iio_channel *chan, int *val)
715 {
716 	/* This is just a special case with scale factor 1 */
717 	return iio_read_channel_processed_scale(chan, val, 1);
718 }
719 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
720 
721 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
722 {
723 	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
724 }
725 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
726 
727 static int iio_channel_read_avail(struct iio_channel *chan,
728 				  const int **vals, int *type, int *length,
729 				  enum iio_chan_info_enum info)
730 {
731 	if (!iio_channel_has_available(chan->channel, info))
732 		return -EINVAL;
733 
734 	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
735 						 vals, type, length, info);
736 }
737 
738 int iio_read_avail_channel_attribute(struct iio_channel *chan,
739 				     const int **vals, int *type, int *length,
740 				     enum iio_chan_info_enum attribute)
741 {
742 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
743 	int ret;
744 
745 	mutex_lock(&iio_dev_opaque->info_exist_lock);
746 	if (!chan->indio_dev->info) {
747 		ret = -ENODEV;
748 		goto err_unlock;
749 	}
750 
751 	ret = iio_channel_read_avail(chan, vals, type, length, attribute);
752 err_unlock:
753 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
754 
755 	return ret;
756 }
757 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
758 
759 int iio_read_avail_channel_raw(struct iio_channel *chan,
760 			       const int **vals, int *length)
761 {
762 	int ret;
763 	int type;
764 
765 	ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
766 					 IIO_CHAN_INFO_RAW);
767 
768 	if (ret >= 0 && type != IIO_VAL_INT)
769 		/* raw values are assumed to be IIO_VAL_INT */
770 		ret = -EINVAL;
771 
772 	return ret;
773 }
774 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
775 
776 static int iio_channel_read_max(struct iio_channel *chan,
777 				int *val, int *val2, int *type,
778 				enum iio_chan_info_enum info)
779 {
780 	int unused;
781 	const int *vals;
782 	int length;
783 	int ret;
784 
785 	if (!val2)
786 		val2 = &unused;
787 
788 	ret = iio_channel_read_avail(chan, &vals, type, &length, info);
789 	switch (ret) {
790 	case IIO_AVAIL_RANGE:
791 		switch (*type) {
792 		case IIO_VAL_INT:
793 			*val = vals[2];
794 			break;
795 		default:
796 			*val = vals[4];
797 			*val2 = vals[5];
798 		}
799 		return 0;
800 
801 	case IIO_AVAIL_LIST:
802 		if (length <= 0)
803 			return -EINVAL;
804 		switch (*type) {
805 		case IIO_VAL_INT:
806 			*val = vals[--length];
807 			while (length) {
808 				if (vals[--length] > *val)
809 					*val = vals[length];
810 			}
811 			break;
812 		default:
813 			/* FIXME: learn about max for other iio values */
814 			return -EINVAL;
815 		}
816 		return 0;
817 
818 	default:
819 		return ret;
820 	}
821 }
822 
823 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
824 {
825 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
826 	int ret;
827 	int type;
828 
829 	mutex_lock(&iio_dev_opaque->info_exist_lock);
830 	if (!chan->indio_dev->info) {
831 		ret = -ENODEV;
832 		goto err_unlock;
833 	}
834 
835 	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
836 err_unlock:
837 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
838 
839 	return ret;
840 }
841 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
842 
843 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
844 {
845 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
846 	int ret = 0;
847 	/* Need to verify underlying driver has not gone away */
848 
849 	mutex_lock(&iio_dev_opaque->info_exist_lock);
850 	if (chan->indio_dev->info == NULL) {
851 		ret = -ENODEV;
852 		goto err_unlock;
853 	}
854 
855 	*type = chan->channel->type;
856 err_unlock:
857 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
858 
859 	return ret;
860 }
861 EXPORT_SYMBOL_GPL(iio_get_channel_type);
862 
863 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
864 			     enum iio_chan_info_enum info)
865 {
866 	return chan->indio_dev->info->write_raw(chan->indio_dev,
867 						chan->channel, val, val2, info);
868 }
869 
870 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
871 				enum iio_chan_info_enum attribute)
872 {
873 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
874 	int ret;
875 
876 	mutex_lock(&iio_dev_opaque->info_exist_lock);
877 	if (chan->indio_dev->info == NULL) {
878 		ret = -ENODEV;
879 		goto err_unlock;
880 	}
881 
882 	ret = iio_channel_write(chan, val, val2, attribute);
883 err_unlock:
884 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
885 
886 	return ret;
887 }
888 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
889 
890 int iio_write_channel_raw(struct iio_channel *chan, int val)
891 {
892 	return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
893 }
894 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
895 
896 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
897 {
898 	const struct iio_chan_spec_ext_info *ext_info;
899 	unsigned int i = 0;
900 
901 	if (!chan->channel->ext_info)
902 		return i;
903 
904 	for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
905 		++i;
906 
907 	return i;
908 }
909 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
910 
911 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
912 						const struct iio_channel *chan,
913 						const char *attr)
914 {
915 	const struct iio_chan_spec_ext_info *ext_info;
916 
917 	if (!chan->channel->ext_info)
918 		return NULL;
919 
920 	for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
921 		if (!strcmp(attr, ext_info->name))
922 			return ext_info;
923 	}
924 
925 	return NULL;
926 }
927 
928 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
929 				  const char *attr, char *buf)
930 {
931 	const struct iio_chan_spec_ext_info *ext_info;
932 
933 	ext_info = iio_lookup_ext_info(chan, attr);
934 	if (!ext_info)
935 		return -EINVAL;
936 
937 	return ext_info->read(chan->indio_dev, ext_info->private,
938 			      chan->channel, buf);
939 }
940 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
941 
942 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
943 				   const char *buf, size_t len)
944 {
945 	const struct iio_chan_spec_ext_info *ext_info;
946 
947 	ext_info = iio_lookup_ext_info(chan, attr);
948 	if (!ext_info)
949 		return -EINVAL;
950 
951 	return ext_info->write(chan->indio_dev, ext_info->private,
952 			       chan->channel, buf, len);
953 }
954 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
955