1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of buffer allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 
25 #include <linux/iio/iio.h>
26 #include "iio_core.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 
30 static const char * const iio_endian_prefix[] = {
31 	[IIO_BE] = "be",
32 	[IIO_LE] = "le",
33 };
34 
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
36 {
37 	return !list_empty(&buf->buffer_list);
38 }
39 
40 static bool iio_buffer_data_available(struct iio_buffer *buf)
41 {
42 	if (buf->access->data_available)
43 		return buf->access->data_available(buf);
44 
45 	return buf->stufftoread;
46 }
47 
48 /**
49  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
50  *
51  * This function relies on all buffer implementations having an
52  * iio_buffer as their first element.
53  **/
54 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
55 				      size_t n, loff_t *f_ps)
56 {
57 	struct iio_dev *indio_dev = filp->private_data;
58 	struct iio_buffer *rb = indio_dev->buffer;
59 	int ret;
60 
61 	if (!indio_dev->info)
62 		return -ENODEV;
63 
64 	if (!rb || !rb->access->read_first_n)
65 		return -EINVAL;
66 
67 	do {
68 		if (!iio_buffer_data_available(rb)) {
69 			if (filp->f_flags & O_NONBLOCK)
70 				return -EAGAIN;
71 
72 			ret = wait_event_interruptible(rb->pollq,
73 					iio_buffer_data_available(rb) ||
74 					indio_dev->info == NULL);
75 			if (ret)
76 				return ret;
77 			if (indio_dev->info == NULL)
78 				return -ENODEV;
79 		}
80 
81 		ret = rb->access->read_first_n(rb, n, buf);
82 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
83 			ret = -EAGAIN;
84 	 } while (ret == 0);
85 
86 	return ret;
87 }
88 
89 /**
90  * iio_buffer_poll() - poll the buffer to find out if it has data
91  */
92 unsigned int iio_buffer_poll(struct file *filp,
93 			     struct poll_table_struct *wait)
94 {
95 	struct iio_dev *indio_dev = filp->private_data;
96 	struct iio_buffer *rb = indio_dev->buffer;
97 
98 	if (!indio_dev->info)
99 		return -ENODEV;
100 
101 	poll_wait(filp, &rb->pollq, wait);
102 	if (iio_buffer_data_available(rb))
103 		return POLLIN | POLLRDNORM;
104 	/* need a way of knowing if there may be enough data... */
105 	return 0;
106 }
107 
108 /**
109  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
110  * @indio_dev: The IIO device
111  *
112  * Wakes up the event waitqueue used for poll(). Should usually
113  * be called when the device is unregistered.
114  */
115 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
116 {
117 	if (!indio_dev->buffer)
118 		return;
119 
120 	wake_up(&indio_dev->buffer->pollq);
121 }
122 
123 void iio_buffer_init(struct iio_buffer *buffer)
124 {
125 	INIT_LIST_HEAD(&buffer->demux_list);
126 	INIT_LIST_HEAD(&buffer->buffer_list);
127 	init_waitqueue_head(&buffer->pollq);
128 	kref_init(&buffer->ref);
129 }
130 EXPORT_SYMBOL(iio_buffer_init);
131 
132 static ssize_t iio_show_scan_index(struct device *dev,
133 				   struct device_attribute *attr,
134 				   char *buf)
135 {
136 	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
137 }
138 
139 static ssize_t iio_show_fixed_type(struct device *dev,
140 				   struct device_attribute *attr,
141 				   char *buf)
142 {
143 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144 	u8 type = this_attr->c->scan_type.endianness;
145 
146 	if (type == IIO_CPU) {
147 #ifdef __LITTLE_ENDIAN
148 		type = IIO_LE;
149 #else
150 		type = IIO_BE;
151 #endif
152 	}
153 	return sprintf(buf, "%s:%c%d/%d>>%u\n",
154 		       iio_endian_prefix[type],
155 		       this_attr->c->scan_type.sign,
156 		       this_attr->c->scan_type.realbits,
157 		       this_attr->c->scan_type.storagebits,
158 		       this_attr->c->scan_type.shift);
159 }
160 
161 static ssize_t iio_scan_el_show(struct device *dev,
162 				struct device_attribute *attr,
163 				char *buf)
164 {
165 	int ret;
166 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
167 
168 	ret = test_bit(to_iio_dev_attr(attr)->address,
169 		       indio_dev->buffer->scan_mask);
170 
171 	return sprintf(buf, "%d\n", ret);
172 }
173 
174 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
175 {
176 	clear_bit(bit, buffer->scan_mask);
177 	return 0;
178 }
179 
180 static ssize_t iio_scan_el_store(struct device *dev,
181 				 struct device_attribute *attr,
182 				 const char *buf,
183 				 size_t len)
184 {
185 	int ret;
186 	bool state;
187 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
188 	struct iio_buffer *buffer = indio_dev->buffer;
189 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
190 
191 	ret = strtobool(buf, &state);
192 	if (ret < 0)
193 		return ret;
194 	mutex_lock(&indio_dev->mlock);
195 	if (iio_buffer_is_active(indio_dev->buffer)) {
196 		ret = -EBUSY;
197 		goto error_ret;
198 	}
199 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
200 	if (ret < 0)
201 		goto error_ret;
202 	if (!state && ret) {
203 		ret = iio_scan_mask_clear(buffer, this_attr->address);
204 		if (ret)
205 			goto error_ret;
206 	} else if (state && !ret) {
207 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
208 		if (ret)
209 			goto error_ret;
210 	}
211 
212 error_ret:
213 	mutex_unlock(&indio_dev->mlock);
214 
215 	return ret < 0 ? ret : len;
216 
217 }
218 
219 static ssize_t iio_scan_el_ts_show(struct device *dev,
220 				   struct device_attribute *attr,
221 				   char *buf)
222 {
223 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
224 	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
225 }
226 
227 static ssize_t iio_scan_el_ts_store(struct device *dev,
228 				    struct device_attribute *attr,
229 				    const char *buf,
230 				    size_t len)
231 {
232 	int ret;
233 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
234 	bool state;
235 
236 	ret = strtobool(buf, &state);
237 	if (ret < 0)
238 		return ret;
239 
240 	mutex_lock(&indio_dev->mlock);
241 	if (iio_buffer_is_active(indio_dev->buffer)) {
242 		ret = -EBUSY;
243 		goto error_ret;
244 	}
245 	indio_dev->buffer->scan_timestamp = state;
246 error_ret:
247 	mutex_unlock(&indio_dev->mlock);
248 
249 	return ret ? ret : len;
250 }
251 
252 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
253 					const struct iio_chan_spec *chan)
254 {
255 	int ret, attrcount = 0;
256 	struct iio_buffer *buffer = indio_dev->buffer;
257 
258 	ret = __iio_add_chan_devattr("index",
259 				     chan,
260 				     &iio_show_scan_index,
261 				     NULL,
262 				     0,
263 				     IIO_SEPARATE,
264 				     &indio_dev->dev,
265 				     &buffer->scan_el_dev_attr_list);
266 	if (ret)
267 		return ret;
268 	attrcount++;
269 	ret = __iio_add_chan_devattr("type",
270 				     chan,
271 				     &iio_show_fixed_type,
272 				     NULL,
273 				     0,
274 				     0,
275 				     &indio_dev->dev,
276 				     &buffer->scan_el_dev_attr_list);
277 	if (ret)
278 		return ret;
279 	attrcount++;
280 	if (chan->type != IIO_TIMESTAMP)
281 		ret = __iio_add_chan_devattr("en",
282 					     chan,
283 					     &iio_scan_el_show,
284 					     &iio_scan_el_store,
285 					     chan->scan_index,
286 					     0,
287 					     &indio_dev->dev,
288 					     &buffer->scan_el_dev_attr_list);
289 	else
290 		ret = __iio_add_chan_devattr("en",
291 					     chan,
292 					     &iio_scan_el_ts_show,
293 					     &iio_scan_el_ts_store,
294 					     chan->scan_index,
295 					     0,
296 					     &indio_dev->dev,
297 					     &buffer->scan_el_dev_attr_list);
298 	if (ret)
299 		return ret;
300 	attrcount++;
301 	ret = attrcount;
302 	return ret;
303 }
304 
305 static const char * const iio_scan_elements_group_name = "scan_elements";
306 
307 int iio_buffer_register(struct iio_dev *indio_dev,
308 			const struct iio_chan_spec *channels,
309 			int num_channels)
310 {
311 	struct iio_dev_attr *p;
312 	struct attribute **attr;
313 	struct iio_buffer *buffer = indio_dev->buffer;
314 	int ret, i, attrn, attrcount, attrcount_orig = 0;
315 
316 	if (buffer->attrs)
317 		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
318 
319 	if (buffer->scan_el_attrs != NULL) {
320 		attr = buffer->scan_el_attrs->attrs;
321 		while (*attr++ != NULL)
322 			attrcount_orig++;
323 	}
324 	attrcount = attrcount_orig;
325 	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
326 	if (channels) {
327 		/* new magic */
328 		for (i = 0; i < num_channels; i++) {
329 			if (channels[i].scan_index < 0)
330 				continue;
331 
332 			/* Establish necessary mask length */
333 			if (channels[i].scan_index >
334 			    (int)indio_dev->masklength - 1)
335 				indio_dev->masklength
336 					= channels[i].scan_index + 1;
337 
338 			ret = iio_buffer_add_channel_sysfs(indio_dev,
339 							 &channels[i]);
340 			if (ret < 0)
341 				goto error_cleanup_dynamic;
342 			attrcount += ret;
343 			if (channels[i].type == IIO_TIMESTAMP)
344 				indio_dev->scan_index_timestamp =
345 					channels[i].scan_index;
346 		}
347 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
348 			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
349 						    sizeof(*buffer->scan_mask),
350 						    GFP_KERNEL);
351 			if (buffer->scan_mask == NULL) {
352 				ret = -ENOMEM;
353 				goto error_cleanup_dynamic;
354 			}
355 		}
356 	}
357 
358 	buffer->scan_el_group.name = iio_scan_elements_group_name;
359 
360 	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
361 					      sizeof(buffer->scan_el_group.attrs[0]),
362 					      GFP_KERNEL);
363 	if (buffer->scan_el_group.attrs == NULL) {
364 		ret = -ENOMEM;
365 		goto error_free_scan_mask;
366 	}
367 	if (buffer->scan_el_attrs)
368 		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
369 		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
370 	attrn = attrcount_orig;
371 
372 	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
373 		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
374 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
375 
376 	return 0;
377 
378 error_free_scan_mask:
379 	kfree(buffer->scan_mask);
380 error_cleanup_dynamic:
381 	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
382 
383 	return ret;
384 }
385 EXPORT_SYMBOL(iio_buffer_register);
386 
387 void iio_buffer_unregister(struct iio_dev *indio_dev)
388 {
389 	kfree(indio_dev->buffer->scan_mask);
390 	kfree(indio_dev->buffer->scan_el_group.attrs);
391 	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
392 }
393 EXPORT_SYMBOL(iio_buffer_unregister);
394 
395 ssize_t iio_buffer_read_length(struct device *dev,
396 			       struct device_attribute *attr,
397 			       char *buf)
398 {
399 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
400 	struct iio_buffer *buffer = indio_dev->buffer;
401 
402 	if (buffer->access->get_length)
403 		return sprintf(buf, "%d\n",
404 			       buffer->access->get_length(buffer));
405 
406 	return 0;
407 }
408 EXPORT_SYMBOL(iio_buffer_read_length);
409 
410 ssize_t iio_buffer_write_length(struct device *dev,
411 				struct device_attribute *attr,
412 				const char *buf,
413 				size_t len)
414 {
415 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
416 	struct iio_buffer *buffer = indio_dev->buffer;
417 	unsigned int val;
418 	int ret;
419 
420 	ret = kstrtouint(buf, 10, &val);
421 	if (ret)
422 		return ret;
423 
424 	if (buffer->access->get_length)
425 		if (val == buffer->access->get_length(buffer))
426 			return len;
427 
428 	mutex_lock(&indio_dev->mlock);
429 	if (iio_buffer_is_active(indio_dev->buffer)) {
430 		ret = -EBUSY;
431 	} else {
432 		if (buffer->access->set_length)
433 			buffer->access->set_length(buffer, val);
434 		ret = 0;
435 	}
436 	mutex_unlock(&indio_dev->mlock);
437 
438 	return ret ? ret : len;
439 }
440 EXPORT_SYMBOL(iio_buffer_write_length);
441 
442 ssize_t iio_buffer_show_enable(struct device *dev,
443 			       struct device_attribute *attr,
444 			       char *buf)
445 {
446 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
447 	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
448 }
449 EXPORT_SYMBOL(iio_buffer_show_enable);
450 
451 /* Note NULL used as error indicator as it doesn't make sense. */
452 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
453 					  unsigned int masklength,
454 					  const unsigned long *mask)
455 {
456 	if (bitmap_empty(mask, masklength))
457 		return NULL;
458 	while (*av_masks) {
459 		if (bitmap_subset(mask, av_masks, masklength))
460 			return av_masks;
461 		av_masks += BITS_TO_LONGS(masklength);
462 	}
463 	return NULL;
464 }
465 
466 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
467 				const unsigned long *mask, bool timestamp)
468 {
469 	const struct iio_chan_spec *ch;
470 	unsigned bytes = 0;
471 	int length, i;
472 
473 	/* How much space will the demuxed element take? */
474 	for_each_set_bit(i, mask,
475 			 indio_dev->masklength) {
476 		ch = iio_find_channel_from_si(indio_dev, i);
477 		length = ch->scan_type.storagebits / 8;
478 		bytes = ALIGN(bytes, length);
479 		bytes += length;
480 	}
481 	if (timestamp) {
482 		ch = iio_find_channel_from_si(indio_dev,
483 					      indio_dev->scan_index_timestamp);
484 		length = ch->scan_type.storagebits / 8;
485 		bytes = ALIGN(bytes, length);
486 		bytes += length;
487 	}
488 	return bytes;
489 }
490 
491 static void iio_buffer_activate(struct iio_dev *indio_dev,
492 	struct iio_buffer *buffer)
493 {
494 	iio_buffer_get(buffer);
495 	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
496 }
497 
498 static void iio_buffer_deactivate(struct iio_buffer *buffer)
499 {
500 	list_del_init(&buffer->buffer_list);
501 	iio_buffer_put(buffer);
502 }
503 
504 void iio_disable_all_buffers(struct iio_dev *indio_dev)
505 {
506 	struct iio_buffer *buffer, *_buffer;
507 
508 	if (list_empty(&indio_dev->buffer_list))
509 		return;
510 
511 	if (indio_dev->setup_ops->predisable)
512 		indio_dev->setup_ops->predisable(indio_dev);
513 
514 	list_for_each_entry_safe(buffer, _buffer,
515 			&indio_dev->buffer_list, buffer_list)
516 		iio_buffer_deactivate(buffer);
517 
518 	indio_dev->currentmode = INDIO_DIRECT_MODE;
519 	if (indio_dev->setup_ops->postdisable)
520 		indio_dev->setup_ops->postdisable(indio_dev);
521 
522 	if (indio_dev->available_scan_masks == NULL)
523 		kfree(indio_dev->active_scan_mask);
524 }
525 
526 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
527 	struct iio_buffer *buffer)
528 {
529 	unsigned int bytes;
530 
531 	if (!buffer->access->set_bytes_per_datum)
532 		return;
533 
534 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
535 		buffer->scan_timestamp);
536 
537 	buffer->access->set_bytes_per_datum(buffer, bytes);
538 }
539 
540 static int __iio_update_buffers(struct iio_dev *indio_dev,
541 		       struct iio_buffer *insert_buffer,
542 		       struct iio_buffer *remove_buffer)
543 {
544 	int ret;
545 	int success = 0;
546 	struct iio_buffer *buffer;
547 	unsigned long *compound_mask;
548 	const unsigned long *old_mask;
549 
550 	/* Wind down existing buffers - iff there are any */
551 	if (!list_empty(&indio_dev->buffer_list)) {
552 		if (indio_dev->setup_ops->predisable) {
553 			ret = indio_dev->setup_ops->predisable(indio_dev);
554 			if (ret)
555 				return ret;
556 		}
557 		indio_dev->currentmode = INDIO_DIRECT_MODE;
558 		if (indio_dev->setup_ops->postdisable) {
559 			ret = indio_dev->setup_ops->postdisable(indio_dev);
560 			if (ret)
561 				return ret;
562 		}
563 	}
564 	/* Keep a copy of current setup to allow roll back */
565 	old_mask = indio_dev->active_scan_mask;
566 	if (!indio_dev->available_scan_masks)
567 		indio_dev->active_scan_mask = NULL;
568 
569 	if (remove_buffer)
570 		iio_buffer_deactivate(remove_buffer);
571 	if (insert_buffer)
572 		iio_buffer_activate(indio_dev, insert_buffer);
573 
574 	/* If no buffers in list, we are done */
575 	if (list_empty(&indio_dev->buffer_list)) {
576 		indio_dev->currentmode = INDIO_DIRECT_MODE;
577 		if (indio_dev->available_scan_masks == NULL)
578 			kfree(old_mask);
579 		return 0;
580 	}
581 
582 	/* What scan mask do we actually have? */
583 	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
584 				sizeof(long), GFP_KERNEL);
585 	if (compound_mask == NULL) {
586 		if (indio_dev->available_scan_masks == NULL)
587 			kfree(old_mask);
588 		return -ENOMEM;
589 	}
590 	indio_dev->scan_timestamp = 0;
591 
592 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
593 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
594 			  indio_dev->masklength);
595 		indio_dev->scan_timestamp |= buffer->scan_timestamp;
596 	}
597 	if (indio_dev->available_scan_masks) {
598 		indio_dev->active_scan_mask =
599 			iio_scan_mask_match(indio_dev->available_scan_masks,
600 					    indio_dev->masklength,
601 					    compound_mask);
602 		if (indio_dev->active_scan_mask == NULL) {
603 			/*
604 			 * Roll back.
605 			 * Note can only occur when adding a buffer.
606 			 */
607 			iio_buffer_deactivate(insert_buffer);
608 			if (old_mask) {
609 				indio_dev->active_scan_mask = old_mask;
610 				success = -EINVAL;
611 			}
612 			else {
613 				kfree(compound_mask);
614 				ret = -EINVAL;
615 				return ret;
616 			}
617 		}
618 	} else {
619 		indio_dev->active_scan_mask = compound_mask;
620 	}
621 
622 	iio_update_demux(indio_dev);
623 
624 	/* Wind up again */
625 	if (indio_dev->setup_ops->preenable) {
626 		ret = indio_dev->setup_ops->preenable(indio_dev);
627 		if (ret) {
628 			printk(KERN_ERR
629 			       "Buffer not started: buffer preenable failed (%d)\n", ret);
630 			goto error_remove_inserted;
631 		}
632 	}
633 	indio_dev->scan_bytes =
634 		iio_compute_scan_bytes(indio_dev,
635 				       indio_dev->active_scan_mask,
636 				       indio_dev->scan_timestamp);
637 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
638 		iio_buffer_update_bytes_per_datum(indio_dev, buffer);
639 		if (buffer->access->request_update) {
640 			ret = buffer->access->request_update(buffer);
641 			if (ret) {
642 				printk(KERN_INFO
643 				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
644 				goto error_run_postdisable;
645 			}
646 		}
647 	}
648 	if (indio_dev->info->update_scan_mode) {
649 		ret = indio_dev->info
650 			->update_scan_mode(indio_dev,
651 					   indio_dev->active_scan_mask);
652 		if (ret < 0) {
653 			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
654 			goto error_run_postdisable;
655 		}
656 	}
657 	/* Definitely possible for devices to support both of these. */
658 	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
659 		if (!indio_dev->trig) {
660 			printk(KERN_INFO "Buffer not started: no trigger\n");
661 			ret = -EINVAL;
662 			/* Can only occur on first buffer */
663 			goto error_run_postdisable;
664 		}
665 		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
666 	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
667 		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
668 	} else { /* Should never be reached */
669 		ret = -EINVAL;
670 		goto error_run_postdisable;
671 	}
672 
673 	if (indio_dev->setup_ops->postenable) {
674 		ret = indio_dev->setup_ops->postenable(indio_dev);
675 		if (ret) {
676 			printk(KERN_INFO
677 			       "Buffer not started: postenable failed (%d)\n", ret);
678 			indio_dev->currentmode = INDIO_DIRECT_MODE;
679 			if (indio_dev->setup_ops->postdisable)
680 				indio_dev->setup_ops->postdisable(indio_dev);
681 			goto error_disable_all_buffers;
682 		}
683 	}
684 
685 	if (indio_dev->available_scan_masks)
686 		kfree(compound_mask);
687 	else
688 		kfree(old_mask);
689 
690 	return success;
691 
692 error_disable_all_buffers:
693 	indio_dev->currentmode = INDIO_DIRECT_MODE;
694 error_run_postdisable:
695 	if (indio_dev->setup_ops->postdisable)
696 		indio_dev->setup_ops->postdisable(indio_dev);
697 error_remove_inserted:
698 	if (insert_buffer)
699 		iio_buffer_deactivate(insert_buffer);
700 	indio_dev->active_scan_mask = old_mask;
701 	kfree(compound_mask);
702 	return ret;
703 }
704 
705 int iio_update_buffers(struct iio_dev *indio_dev,
706 		       struct iio_buffer *insert_buffer,
707 		       struct iio_buffer *remove_buffer)
708 {
709 	int ret;
710 
711 	if (insert_buffer == remove_buffer)
712 		return 0;
713 
714 	mutex_lock(&indio_dev->info_exist_lock);
715 	mutex_lock(&indio_dev->mlock);
716 
717 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
718 		insert_buffer = NULL;
719 
720 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
721 		remove_buffer = NULL;
722 
723 	if (!insert_buffer && !remove_buffer) {
724 		ret = 0;
725 		goto out_unlock;
726 	}
727 
728 	if (indio_dev->info == NULL) {
729 		ret = -ENODEV;
730 		goto out_unlock;
731 	}
732 
733 	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
734 
735 out_unlock:
736 	mutex_unlock(&indio_dev->mlock);
737 	mutex_unlock(&indio_dev->info_exist_lock);
738 
739 	return ret;
740 }
741 EXPORT_SYMBOL_GPL(iio_update_buffers);
742 
743 ssize_t iio_buffer_store_enable(struct device *dev,
744 				struct device_attribute *attr,
745 				const char *buf,
746 				size_t len)
747 {
748 	int ret;
749 	bool requested_state;
750 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
751 	bool inlist;
752 
753 	ret = strtobool(buf, &requested_state);
754 	if (ret < 0)
755 		return ret;
756 
757 	mutex_lock(&indio_dev->mlock);
758 
759 	/* Find out if it is in the list */
760 	inlist = iio_buffer_is_active(indio_dev->buffer);
761 	/* Already in desired state */
762 	if (inlist == requested_state)
763 		goto done;
764 
765 	if (requested_state)
766 		ret = __iio_update_buffers(indio_dev,
767 					 indio_dev->buffer, NULL);
768 	else
769 		ret = __iio_update_buffers(indio_dev,
770 					 NULL, indio_dev->buffer);
771 
772 	if (ret < 0)
773 		goto done;
774 done:
775 	mutex_unlock(&indio_dev->mlock);
776 	return (ret < 0) ? ret : len;
777 }
778 EXPORT_SYMBOL(iio_buffer_store_enable);
779 
780 /**
781  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
782  * @indio_dev: the iio device
783  * @mask: scan mask to be checked
784  *
785  * Return true if exactly one bit is set in the scan mask, false otherwise. It
786  * can be used for devices where only one channel can be active for sampling at
787  * a time.
788  */
789 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
790 	const unsigned long *mask)
791 {
792 	return bitmap_weight(mask, indio_dev->masklength) == 1;
793 }
794 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
795 
796 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
797 	const unsigned long *mask)
798 {
799 	if (!indio_dev->setup_ops->validate_scan_mask)
800 		return true;
801 
802 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
803 }
804 
805 /**
806  * iio_scan_mask_set() - set particular bit in the scan mask
807  * @indio_dev: the iio device
808  * @buffer: the buffer whose scan mask we are interested in
809  * @bit: the bit to be set.
810  *
811  * Note that at this point we have no way of knowing what other
812  * buffers might request, hence this code only verifies that the
813  * individual buffers request is plausible.
814  */
815 int iio_scan_mask_set(struct iio_dev *indio_dev,
816 		      struct iio_buffer *buffer, int bit)
817 {
818 	const unsigned long *mask;
819 	unsigned long *trialmask;
820 
821 	trialmask = kmalloc(sizeof(*trialmask)*
822 			    BITS_TO_LONGS(indio_dev->masklength),
823 			    GFP_KERNEL);
824 
825 	if (trialmask == NULL)
826 		return -ENOMEM;
827 	if (!indio_dev->masklength) {
828 		WARN_ON("Trying to set scanmask prior to registering buffer\n");
829 		goto err_invalid_mask;
830 	}
831 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
832 	set_bit(bit, trialmask);
833 
834 	if (!iio_validate_scan_mask(indio_dev, trialmask))
835 		goto err_invalid_mask;
836 
837 	if (indio_dev->available_scan_masks) {
838 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
839 					   indio_dev->masklength,
840 					   trialmask);
841 		if (!mask)
842 			goto err_invalid_mask;
843 	}
844 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
845 
846 	kfree(trialmask);
847 
848 	return 0;
849 
850 err_invalid_mask:
851 	kfree(trialmask);
852 	return -EINVAL;
853 }
854 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
855 
856 int iio_scan_mask_query(struct iio_dev *indio_dev,
857 			struct iio_buffer *buffer, int bit)
858 {
859 	if (bit > indio_dev->masklength)
860 		return -EINVAL;
861 
862 	if (!buffer->scan_mask)
863 		return 0;
864 
865 	return test_bit(bit, buffer->scan_mask);
866 };
867 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
868 
869 /**
870  * struct iio_demux_table() - table describing demux memcpy ops
871  * @from:	index to copy from
872  * @to:		index to copy to
873  * @length:	how many bytes to copy
874  * @l:		list head used for management
875  */
876 struct iio_demux_table {
877 	unsigned from;
878 	unsigned to;
879 	unsigned length;
880 	struct list_head l;
881 };
882 
883 static const void *iio_demux(struct iio_buffer *buffer,
884 				 const void *datain)
885 {
886 	struct iio_demux_table *t;
887 
888 	if (list_empty(&buffer->demux_list))
889 		return datain;
890 	list_for_each_entry(t, &buffer->demux_list, l)
891 		memcpy(buffer->demux_bounce + t->to,
892 		       datain + t->from, t->length);
893 
894 	return buffer->demux_bounce;
895 }
896 
897 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
898 {
899 	const void *dataout = iio_demux(buffer, data);
900 
901 	return buffer->access->store_to(buffer, dataout);
902 }
903 
904 static void iio_buffer_demux_free(struct iio_buffer *buffer)
905 {
906 	struct iio_demux_table *p, *q;
907 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
908 		list_del(&p->l);
909 		kfree(p);
910 	}
911 }
912 
913 
914 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
915 {
916 	int ret;
917 	struct iio_buffer *buf;
918 
919 	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
920 		ret = iio_push_to_buffer(buf, data);
921 		if (ret < 0)
922 			return ret;
923 	}
924 
925 	return 0;
926 }
927 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
928 
929 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
930 				   struct iio_buffer *buffer)
931 {
932 	const struct iio_chan_spec *ch;
933 	int ret, in_ind = -1, out_ind, length;
934 	unsigned in_loc = 0, out_loc = 0;
935 	struct iio_demux_table *p;
936 
937 	/* Clear out any old demux */
938 	iio_buffer_demux_free(buffer);
939 	kfree(buffer->demux_bounce);
940 	buffer->demux_bounce = NULL;
941 
942 	/* First work out which scan mode we will actually have */
943 	if (bitmap_equal(indio_dev->active_scan_mask,
944 			 buffer->scan_mask,
945 			 indio_dev->masklength))
946 		return 0;
947 
948 	/* Now we have the two masks, work from least sig and build up sizes */
949 	for_each_set_bit(out_ind,
950 			 indio_dev->active_scan_mask,
951 			 indio_dev->masklength) {
952 		in_ind = find_next_bit(indio_dev->active_scan_mask,
953 				       indio_dev->masklength,
954 				       in_ind + 1);
955 		while (in_ind != out_ind) {
956 			in_ind = find_next_bit(indio_dev->active_scan_mask,
957 					       indio_dev->masklength,
958 					       in_ind + 1);
959 			ch = iio_find_channel_from_si(indio_dev, in_ind);
960 			length = ch->scan_type.storagebits/8;
961 			/* Make sure we are aligned */
962 			in_loc += length;
963 			if (in_loc % length)
964 				in_loc += length - in_loc % length;
965 		}
966 		p = kmalloc(sizeof(*p), GFP_KERNEL);
967 		if (p == NULL) {
968 			ret = -ENOMEM;
969 			goto error_clear_mux_table;
970 		}
971 		ch = iio_find_channel_from_si(indio_dev, in_ind);
972 		length = ch->scan_type.storagebits/8;
973 		if (out_loc % length)
974 			out_loc += length - out_loc % length;
975 		if (in_loc % length)
976 			in_loc += length - in_loc % length;
977 		p->from = in_loc;
978 		p->to = out_loc;
979 		p->length = length;
980 		list_add_tail(&p->l, &buffer->demux_list);
981 		out_loc += length;
982 		in_loc += length;
983 	}
984 	/* Relies on scan_timestamp being last */
985 	if (buffer->scan_timestamp) {
986 		p = kmalloc(sizeof(*p), GFP_KERNEL);
987 		if (p == NULL) {
988 			ret = -ENOMEM;
989 			goto error_clear_mux_table;
990 		}
991 		ch = iio_find_channel_from_si(indio_dev,
992 			indio_dev->scan_index_timestamp);
993 		length = ch->scan_type.storagebits/8;
994 		if (out_loc % length)
995 			out_loc += length - out_loc % length;
996 		if (in_loc % length)
997 			in_loc += length - in_loc % length;
998 		p->from = in_loc;
999 		p->to = out_loc;
1000 		p->length = length;
1001 		list_add_tail(&p->l, &buffer->demux_list);
1002 		out_loc += length;
1003 		in_loc += length;
1004 	}
1005 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1006 	if (buffer->demux_bounce == NULL) {
1007 		ret = -ENOMEM;
1008 		goto error_clear_mux_table;
1009 	}
1010 	return 0;
1011 
1012 error_clear_mux_table:
1013 	iio_buffer_demux_free(buffer);
1014 
1015 	return ret;
1016 }
1017 
1018 int iio_update_demux(struct iio_dev *indio_dev)
1019 {
1020 	struct iio_buffer *buffer;
1021 	int ret;
1022 
1023 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1024 		ret = iio_buffer_update_demux(indio_dev, buffer);
1025 		if (ret < 0)
1026 			goto error_clear_mux_table;
1027 	}
1028 	return 0;
1029 
1030 error_clear_mux_table:
1031 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1032 		iio_buffer_demux_free(buffer);
1033 
1034 	return ret;
1035 }
1036 EXPORT_SYMBOL_GPL(iio_update_demux);
1037 
1038 /**
1039  * iio_buffer_release() - Free a buffer's resources
1040  * @ref: Pointer to the kref embedded in the iio_buffer struct
1041  *
1042  * This function is called when the last reference to the buffer has been
1043  * dropped. It will typically free all resources allocated by the buffer. Do not
1044  * call this function manually, always use iio_buffer_put() when done using a
1045  * buffer.
1046  */
1047 static void iio_buffer_release(struct kref *ref)
1048 {
1049 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1050 
1051 	buffer->access->release(buffer);
1052 }
1053 
1054 /**
1055  * iio_buffer_get() - Grab a reference to the buffer
1056  * @buffer: The buffer to grab a reference for, may be NULL
1057  *
1058  * Returns the pointer to the buffer that was passed into the function.
1059  */
1060 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1061 {
1062 	if (buffer)
1063 		kref_get(&buffer->ref);
1064 
1065 	return buffer;
1066 }
1067 EXPORT_SYMBOL_GPL(iio_buffer_get);
1068 
1069 /**
1070  * iio_buffer_put() - Release the reference to the buffer
1071  * @buffer: The buffer to release the reference for, may be NULL
1072  */
1073 void iio_buffer_put(struct iio_buffer *buffer)
1074 {
1075 	if (buffer)
1076 		kref_put(&buffer->ref, iio_buffer_release);
1077 }
1078 EXPORT_SYMBOL_GPL(iio_buffer_put);
1079