xref: /openbmc/linux/drivers/iio/inkern.c (revision d0b73b48)
1 /* The industrial I/O core in kernel channel mapping
2  *
3  * Copyright (c) 2011 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 
14 #include <linux/iio/iio.h>
15 #include "iio_core.h"
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
19 
20 struct iio_map_internal {
21 	struct iio_dev *indio_dev;
22 	struct iio_map *map;
23 	struct list_head l;
24 };
25 
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
28 
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30 {
31 	int i = 0, ret = 0;
32 	struct iio_map_internal *mapi;
33 
34 	if (maps == NULL)
35 		return 0;
36 
37 	mutex_lock(&iio_map_list_lock);
38 	while (maps[i].consumer_dev_name != NULL) {
39 		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 		if (mapi == NULL) {
41 			ret = -ENOMEM;
42 			goto error_ret;
43 		}
44 		mapi->map = &maps[i];
45 		mapi->indio_dev = indio_dev;
46 		list_add(&mapi->l, &iio_map_list);
47 		i++;
48 	}
49 error_ret:
50 	mutex_unlock(&iio_map_list_lock);
51 
52 	return ret;
53 }
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
55 
56 
57 /* Assumes the exact same array (e.g. memory locations)
58  * used at unregistration as used at registration rather than
59  * more complex checking of contents.
60  */
61 int iio_map_array_unregister(struct iio_dev *indio_dev,
62 			     struct iio_map *maps)
63 {
64 	int i = 0, ret = 0;
65 	bool found_it;
66 	struct iio_map_internal *mapi;
67 
68 	if (maps == NULL)
69 		return 0;
70 
71 	mutex_lock(&iio_map_list_lock);
72 	while (maps[i].consumer_dev_name != NULL) {
73 		found_it = false;
74 		list_for_each_entry(mapi, &iio_map_list, l)
75 			if (&maps[i] == mapi->map) {
76 				list_del(&mapi->l);
77 				kfree(mapi);
78 				found_it = true;
79 				break;
80 			}
81 		if (!found_it) {
82 			ret = -ENODEV;
83 			goto error_ret;
84 		}
85 		i++;
86 	}
87 error_ret:
88 	mutex_unlock(&iio_map_list_lock);
89 
90 	return ret;
91 }
92 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
93 
94 static const struct iio_chan_spec
95 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
96 {
97 	int i;
98 	const struct iio_chan_spec *chan = NULL;
99 
100 	for (i = 0; i < indio_dev->num_channels; i++)
101 		if (indio_dev->channels[i].datasheet_name &&
102 		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 			chan = &indio_dev->channels[i];
104 			break;
105 		}
106 	return chan;
107 }
108 
109 
110 struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
111 {
112 	struct iio_map_internal *c_i = NULL, *c = NULL;
113 	struct iio_channel *channel;
114 	int err;
115 
116 	if (name == NULL && channel_name == NULL)
117 		return ERR_PTR(-ENODEV);
118 
119 	/* first find matching entry the channel map */
120 	mutex_lock(&iio_map_list_lock);
121 	list_for_each_entry(c_i, &iio_map_list, l) {
122 		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123 		    (channel_name &&
124 		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
125 			continue;
126 		c = c_i;
127 		iio_device_get(c->indio_dev);
128 		break;
129 	}
130 	mutex_unlock(&iio_map_list_lock);
131 	if (c == NULL)
132 		return ERR_PTR(-ENODEV);
133 
134 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
135 	if (channel == NULL) {
136 		err = -ENOMEM;
137 		goto error_no_mem;
138 	}
139 
140 	channel->indio_dev = c->indio_dev;
141 
142 	if (c->map->adc_channel_label) {
143 		channel->channel =
144 			iio_chan_spec_from_name(channel->indio_dev,
145 						c->map->adc_channel_label);
146 
147 		if (channel->channel == NULL) {
148 			err = -EINVAL;
149 			goto error_no_chan;
150 		}
151 	}
152 
153 	return channel;
154 
155 error_no_chan:
156 	kfree(channel);
157 error_no_mem:
158 	iio_device_put(c->indio_dev);
159 	return ERR_PTR(err);
160 }
161 EXPORT_SYMBOL_GPL(iio_channel_get);
162 
163 void iio_channel_release(struct iio_channel *channel)
164 {
165 	iio_device_put(channel->indio_dev);
166 	kfree(channel);
167 }
168 EXPORT_SYMBOL_GPL(iio_channel_release);
169 
170 struct iio_channel *iio_channel_get_all(const char *name)
171 {
172 	struct iio_channel *chans;
173 	struct iio_map_internal *c = NULL;
174 	int nummaps = 0;
175 	int mapind = 0;
176 	int i, ret;
177 
178 	if (name == NULL)
179 		return ERR_PTR(-EINVAL);
180 
181 	mutex_lock(&iio_map_list_lock);
182 	/* first count the matching maps */
183 	list_for_each_entry(c, &iio_map_list, l)
184 		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
185 			continue;
186 		else
187 			nummaps++;
188 
189 	if (nummaps == 0) {
190 		ret = -ENODEV;
191 		goto error_ret;
192 	}
193 
194 	/* NULL terminated array to save passing size */
195 	chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
196 	if (chans == NULL) {
197 		ret = -ENOMEM;
198 		goto error_ret;
199 	}
200 
201 	/* for each map fill in the chans element */
202 	list_for_each_entry(c, &iio_map_list, l) {
203 		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
204 			continue;
205 		chans[mapind].indio_dev = c->indio_dev;
206 		chans[mapind].data = c->map->consumer_data;
207 		chans[mapind].channel =
208 			iio_chan_spec_from_name(chans[mapind].indio_dev,
209 						c->map->adc_channel_label);
210 		if (chans[mapind].channel == NULL) {
211 			ret = -EINVAL;
212 			goto error_free_chans;
213 		}
214 		iio_device_get(chans[mapind].indio_dev);
215 		mapind++;
216 	}
217 	if (mapind == 0) {
218 		ret = -ENODEV;
219 		goto error_free_chans;
220 	}
221 	mutex_unlock(&iio_map_list_lock);
222 
223 	return chans;
224 
225 error_free_chans:
226 	for (i = 0; i < nummaps; i++)
227 		iio_device_put(chans[i].indio_dev);
228 	kfree(chans);
229 error_ret:
230 	mutex_unlock(&iio_map_list_lock);
231 
232 	return ERR_PTR(ret);
233 }
234 EXPORT_SYMBOL_GPL(iio_channel_get_all);
235 
236 void iio_channel_release_all(struct iio_channel *channels)
237 {
238 	struct iio_channel *chan = &channels[0];
239 
240 	while (chan->indio_dev) {
241 		iio_device_put(chan->indio_dev);
242 		chan++;
243 	}
244 	kfree(channels);
245 }
246 EXPORT_SYMBOL_GPL(iio_channel_release_all);
247 
248 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
249 	enum iio_chan_info_enum info)
250 {
251 	int unused;
252 
253 	if (val2 == NULL)
254 		val2 = &unused;
255 
256 	return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
257 						val, val2, info);
258 }
259 
260 int iio_read_channel_raw(struct iio_channel *chan, int *val)
261 {
262 	int ret;
263 
264 	mutex_lock(&chan->indio_dev->info_exist_lock);
265 	if (chan->indio_dev->info == NULL) {
266 		ret = -ENODEV;
267 		goto err_unlock;
268 	}
269 
270 	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
271 err_unlock:
272 	mutex_unlock(&chan->indio_dev->info_exist_lock);
273 
274 	return ret;
275 }
276 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
277 
278 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
279 	int raw, int *processed, unsigned int scale)
280 {
281 	int scale_type, scale_val, scale_val2, offset;
282 	s64 raw64 = raw;
283 	int ret;
284 
285 	ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
286 	if (ret == 0)
287 		raw64 += offset;
288 
289 	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
290 					IIO_CHAN_INFO_SCALE);
291 	if (scale_type < 0)
292 		return scale_type;
293 
294 	switch (scale_type) {
295 	case IIO_VAL_INT:
296 		*processed = raw64 * scale_val;
297 		break;
298 	case IIO_VAL_INT_PLUS_MICRO:
299 		if (scale_val2 < 0)
300 			*processed = -raw64 * scale_val;
301 		else
302 			*processed = raw64 * scale_val;
303 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
304 				      1000000LL);
305 		break;
306 	case IIO_VAL_INT_PLUS_NANO:
307 		if (scale_val2 < 0)
308 			*processed = -raw64 * scale_val;
309 		else
310 			*processed = raw64 * scale_val;
311 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
312 				      1000000000LL);
313 		break;
314 	case IIO_VAL_FRACTIONAL:
315 		*processed = div_s64(raw64 * (s64)scale_val * scale,
316 				     scale_val2);
317 		break;
318 	case IIO_VAL_FRACTIONAL_LOG2:
319 		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
320 		break;
321 	default:
322 		return -EINVAL;
323 	}
324 
325 	return 0;
326 }
327 
328 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
329 	int *processed, unsigned int scale)
330 {
331 	int ret;
332 
333 	mutex_lock(&chan->indio_dev->info_exist_lock);
334 	if (chan->indio_dev->info == NULL) {
335 		ret = -ENODEV;
336 		goto err_unlock;
337 	}
338 
339 	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
340 							scale);
341 err_unlock:
342 	mutex_unlock(&chan->indio_dev->info_exist_lock);
343 
344 	return ret;
345 }
346 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
347 
348 int iio_read_channel_processed(struct iio_channel *chan, int *val)
349 {
350 	int ret;
351 
352 	mutex_lock(&chan->indio_dev->info_exist_lock);
353 	if (chan->indio_dev->info == NULL) {
354 		ret = -ENODEV;
355 		goto err_unlock;
356 	}
357 
358 	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
359 		ret = iio_channel_read(chan, val, NULL,
360 				       IIO_CHAN_INFO_PROCESSED);
361 	} else {
362 		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
363 		if (ret < 0)
364 			goto err_unlock;
365 		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
366 	}
367 
368 err_unlock:
369 	mutex_unlock(&chan->indio_dev->info_exist_lock);
370 
371 	return ret;
372 }
373 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
374 
375 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
376 {
377 	int ret;
378 
379 	mutex_lock(&chan->indio_dev->info_exist_lock);
380 	if (chan->indio_dev->info == NULL) {
381 		ret = -ENODEV;
382 		goto err_unlock;
383 	}
384 
385 	ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
386 err_unlock:
387 	mutex_unlock(&chan->indio_dev->info_exist_lock);
388 
389 	return ret;
390 }
391 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
392 
393 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
394 {
395 	int ret = 0;
396 	/* Need to verify underlying driver has not gone away */
397 
398 	mutex_lock(&chan->indio_dev->info_exist_lock);
399 	if (chan->indio_dev->info == NULL) {
400 		ret = -ENODEV;
401 		goto err_unlock;
402 	}
403 
404 	*type = chan->channel->type;
405 err_unlock:
406 	mutex_unlock(&chan->indio_dev->info_exist_lock);
407 
408 	return ret;
409 }
410 EXPORT_SYMBOL_GPL(iio_get_channel_type);
411