xref: /openbmc/linux/drivers/iio/industrialio-buffer.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of buffer allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched/signal.h>
24 
25 #include <linux/iio/iio.h>
26 #include "iio_core.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30 
31 static const char * const iio_endian_prefix[] = {
32 	[IIO_BE] = "be",
33 	[IIO_LE] = "le",
34 };
35 
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 {
38 	return !list_empty(&buf->buffer_list);
39 }
40 
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 {
43 	return buf->access->data_available(buf);
44 }
45 
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 				   struct iio_buffer *buf, size_t required)
48 {
49 	if (!indio_dev->info->hwfifo_flush_to_buffer)
50 		return -ENODEV;
51 
52 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53 }
54 
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 			     size_t to_wait, int to_flush)
57 {
58 	size_t avail;
59 	int flushed = 0;
60 
61 	/* wakeup if the device was unregistered */
62 	if (!indio_dev->info)
63 		return true;
64 
65 	/* drain the buffer if it was disabled */
66 	if (!iio_buffer_is_active(buf)) {
67 		to_wait = min_t(size_t, to_wait, 1);
68 		to_flush = 0;
69 	}
70 
71 	avail = iio_buffer_data_available(buf);
72 
73 	if (avail >= to_wait) {
74 		/* force a flush for non-blocking reads */
75 		if (!to_wait && avail < to_flush)
76 			iio_buffer_flush_hwfifo(indio_dev, buf,
77 						to_flush - avail);
78 		return true;
79 	}
80 
81 	if (to_flush)
82 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83 						  to_wait - avail);
84 	if (flushed <= 0)
85 		return false;
86 
87 	if (avail + flushed >= to_wait)
88 		return true;
89 
90 	return false;
91 }
92 
93 /**
94  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
95  * @filp:	File structure pointer for the char device
96  * @buf:	Destination buffer for iio buffer read
97  * @n:		First n bytes to read
98  * @f_ps:	Long offset provided by the user as a seek position
99  *
100  * This function relies on all buffer implementations having an
101  * iio_buffer as their first element.
102  *
103  * Return: negative values corresponding to error codes or ret != 0
104  *	   for ending the reading activity
105  **/
106 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
107 				      size_t n, loff_t *f_ps)
108 {
109 	struct iio_dev *indio_dev = filp->private_data;
110 	struct iio_buffer *rb = indio_dev->buffer;
111 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
112 	size_t datum_size;
113 	size_t to_wait;
114 	int ret = 0;
115 
116 	if (!indio_dev->info)
117 		return -ENODEV;
118 
119 	if (!rb || !rb->access->read_first_n)
120 		return -EINVAL;
121 
122 	datum_size = rb->bytes_per_datum;
123 
124 	/*
125 	 * If datum_size is 0 there will never be anything to read from the
126 	 * buffer, so signal end of file now.
127 	 */
128 	if (!datum_size)
129 		return 0;
130 
131 	if (filp->f_flags & O_NONBLOCK)
132 		to_wait = 0;
133 	else
134 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
135 
136 	add_wait_queue(&rb->pollq, &wait);
137 	do {
138 		if (!indio_dev->info) {
139 			ret = -ENODEV;
140 			break;
141 		}
142 
143 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
144 			if (signal_pending(current)) {
145 				ret = -ERESTARTSYS;
146 				break;
147 			}
148 
149 			wait_woken(&wait, TASK_INTERRUPTIBLE,
150 				   MAX_SCHEDULE_TIMEOUT);
151 			continue;
152 		}
153 
154 		ret = rb->access->read_first_n(rb, n, buf);
155 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
156 			ret = -EAGAIN;
157 	} while (ret == 0);
158 	remove_wait_queue(&rb->pollq, &wait);
159 
160 	return ret;
161 }
162 
163 /**
164  * iio_buffer_poll() - poll the buffer to find out if it has data
165  * @filp:	File structure pointer for device access
166  * @wait:	Poll table structure pointer for which the driver adds
167  *		a wait queue
168  *
169  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
170  *	   or 0 for other cases
171  */
172 __poll_t iio_buffer_poll(struct file *filp,
173 			     struct poll_table_struct *wait)
174 {
175 	struct iio_dev *indio_dev = filp->private_data;
176 	struct iio_buffer *rb = indio_dev->buffer;
177 
178 	if (!indio_dev->info || rb == NULL)
179 		return 0;
180 
181 	poll_wait(filp, &rb->pollq, wait);
182 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
183 		return EPOLLIN | EPOLLRDNORM;
184 	return 0;
185 }
186 
187 /**
188  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
189  * @indio_dev: The IIO device
190  *
191  * Wakes up the event waitqueue used for poll(). Should usually
192  * be called when the device is unregistered.
193  */
194 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
195 {
196 	if (!indio_dev->buffer)
197 		return;
198 
199 	wake_up(&indio_dev->buffer->pollq);
200 }
201 
202 void iio_buffer_init(struct iio_buffer *buffer)
203 {
204 	INIT_LIST_HEAD(&buffer->demux_list);
205 	INIT_LIST_HEAD(&buffer->buffer_list);
206 	init_waitqueue_head(&buffer->pollq);
207 	kref_init(&buffer->ref);
208 	if (!buffer->watermark)
209 		buffer->watermark = 1;
210 }
211 EXPORT_SYMBOL(iio_buffer_init);
212 
213 /**
214  * iio_buffer_set_attrs - Set buffer specific attributes
215  * @buffer: The buffer for which we are setting attributes
216  * @attrs: Pointer to a null terminated list of pointers to attributes
217  */
218 void iio_buffer_set_attrs(struct iio_buffer *buffer,
219 			 const struct attribute **attrs)
220 {
221 	buffer->attrs = attrs;
222 }
223 EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
224 
225 static ssize_t iio_show_scan_index(struct device *dev,
226 				   struct device_attribute *attr,
227 				   char *buf)
228 {
229 	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
230 }
231 
232 static ssize_t iio_show_fixed_type(struct device *dev,
233 				   struct device_attribute *attr,
234 				   char *buf)
235 {
236 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
237 	u8 type = this_attr->c->scan_type.endianness;
238 
239 	if (type == IIO_CPU) {
240 #ifdef __LITTLE_ENDIAN
241 		type = IIO_LE;
242 #else
243 		type = IIO_BE;
244 #endif
245 	}
246 	if (this_attr->c->scan_type.repeat > 1)
247 		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
248 		       iio_endian_prefix[type],
249 		       this_attr->c->scan_type.sign,
250 		       this_attr->c->scan_type.realbits,
251 		       this_attr->c->scan_type.storagebits,
252 		       this_attr->c->scan_type.repeat,
253 		       this_attr->c->scan_type.shift);
254 		else
255 			return sprintf(buf, "%s:%c%d/%d>>%u\n",
256 		       iio_endian_prefix[type],
257 		       this_attr->c->scan_type.sign,
258 		       this_attr->c->scan_type.realbits,
259 		       this_attr->c->scan_type.storagebits,
260 		       this_attr->c->scan_type.shift);
261 }
262 
263 static ssize_t iio_scan_el_show(struct device *dev,
264 				struct device_attribute *attr,
265 				char *buf)
266 {
267 	int ret;
268 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269 
270 	/* Ensure ret is 0 or 1. */
271 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
272 		       indio_dev->buffer->scan_mask);
273 
274 	return sprintf(buf, "%d\n", ret);
275 }
276 
277 /* Note NULL used as error indicator as it doesn't make sense. */
278 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
279 					  unsigned int masklength,
280 					  const unsigned long *mask,
281 					  bool strict)
282 {
283 	if (bitmap_empty(mask, masklength))
284 		return NULL;
285 	while (*av_masks) {
286 		if (strict) {
287 			if (bitmap_equal(mask, av_masks, masklength))
288 				return av_masks;
289 		} else {
290 			if (bitmap_subset(mask, av_masks, masklength))
291 				return av_masks;
292 		}
293 		av_masks += BITS_TO_LONGS(masklength);
294 	}
295 	return NULL;
296 }
297 
298 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
299 	const unsigned long *mask)
300 {
301 	if (!indio_dev->setup_ops->validate_scan_mask)
302 		return true;
303 
304 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
305 }
306 
307 /**
308  * iio_scan_mask_set() - set particular bit in the scan mask
309  * @indio_dev: the iio device
310  * @buffer: the buffer whose scan mask we are interested in
311  * @bit: the bit to be set.
312  *
313  * Note that at this point we have no way of knowing what other
314  * buffers might request, hence this code only verifies that the
315  * individual buffers request is plausible.
316  */
317 static int iio_scan_mask_set(struct iio_dev *indio_dev,
318 		      struct iio_buffer *buffer, int bit)
319 {
320 	const unsigned long *mask;
321 	unsigned long *trialmask;
322 
323 	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
324 	if (trialmask == NULL)
325 		return -ENOMEM;
326 	if (!indio_dev->masklength) {
327 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
328 		goto err_invalid_mask;
329 	}
330 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
331 	set_bit(bit, trialmask);
332 
333 	if (!iio_validate_scan_mask(indio_dev, trialmask))
334 		goto err_invalid_mask;
335 
336 	if (indio_dev->available_scan_masks) {
337 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
338 					   indio_dev->masklength,
339 					   trialmask, false);
340 		if (!mask)
341 			goto err_invalid_mask;
342 	}
343 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
344 
345 	bitmap_free(trialmask);
346 
347 	return 0;
348 
349 err_invalid_mask:
350 	bitmap_free(trialmask);
351 	return -EINVAL;
352 }
353 
354 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
355 {
356 	clear_bit(bit, buffer->scan_mask);
357 	return 0;
358 }
359 
360 static int iio_scan_mask_query(struct iio_dev *indio_dev,
361 			       struct iio_buffer *buffer, int bit)
362 {
363 	if (bit > indio_dev->masklength)
364 		return -EINVAL;
365 
366 	if (!buffer->scan_mask)
367 		return 0;
368 
369 	/* Ensure return value is 0 or 1. */
370 	return !!test_bit(bit, buffer->scan_mask);
371 };
372 
373 static ssize_t iio_scan_el_store(struct device *dev,
374 				 struct device_attribute *attr,
375 				 const char *buf,
376 				 size_t len)
377 {
378 	int ret;
379 	bool state;
380 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
381 	struct iio_buffer *buffer = indio_dev->buffer;
382 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
383 
384 	ret = strtobool(buf, &state);
385 	if (ret < 0)
386 		return ret;
387 	mutex_lock(&indio_dev->mlock);
388 	if (iio_buffer_is_active(indio_dev->buffer)) {
389 		ret = -EBUSY;
390 		goto error_ret;
391 	}
392 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
393 	if (ret < 0)
394 		goto error_ret;
395 	if (!state && ret) {
396 		ret = iio_scan_mask_clear(buffer, this_attr->address);
397 		if (ret)
398 			goto error_ret;
399 	} else if (state && !ret) {
400 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
401 		if (ret)
402 			goto error_ret;
403 	}
404 
405 error_ret:
406 	mutex_unlock(&indio_dev->mlock);
407 
408 	return ret < 0 ? ret : len;
409 
410 }
411 
412 static ssize_t iio_scan_el_ts_show(struct device *dev,
413 				   struct device_attribute *attr,
414 				   char *buf)
415 {
416 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
417 	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
418 }
419 
420 static ssize_t iio_scan_el_ts_store(struct device *dev,
421 				    struct device_attribute *attr,
422 				    const char *buf,
423 				    size_t len)
424 {
425 	int ret;
426 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
427 	bool state;
428 
429 	ret = strtobool(buf, &state);
430 	if (ret < 0)
431 		return ret;
432 
433 	mutex_lock(&indio_dev->mlock);
434 	if (iio_buffer_is_active(indio_dev->buffer)) {
435 		ret = -EBUSY;
436 		goto error_ret;
437 	}
438 	indio_dev->buffer->scan_timestamp = state;
439 error_ret:
440 	mutex_unlock(&indio_dev->mlock);
441 
442 	return ret ? ret : len;
443 }
444 
445 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
446 					const struct iio_chan_spec *chan)
447 {
448 	int ret, attrcount = 0;
449 	struct iio_buffer *buffer = indio_dev->buffer;
450 
451 	ret = __iio_add_chan_devattr("index",
452 				     chan,
453 				     &iio_show_scan_index,
454 				     NULL,
455 				     0,
456 				     IIO_SEPARATE,
457 				     &indio_dev->dev,
458 				     &buffer->scan_el_dev_attr_list);
459 	if (ret)
460 		return ret;
461 	attrcount++;
462 	ret = __iio_add_chan_devattr("type",
463 				     chan,
464 				     &iio_show_fixed_type,
465 				     NULL,
466 				     0,
467 				     0,
468 				     &indio_dev->dev,
469 				     &buffer->scan_el_dev_attr_list);
470 	if (ret)
471 		return ret;
472 	attrcount++;
473 	if (chan->type != IIO_TIMESTAMP)
474 		ret = __iio_add_chan_devattr("en",
475 					     chan,
476 					     &iio_scan_el_show,
477 					     &iio_scan_el_store,
478 					     chan->scan_index,
479 					     0,
480 					     &indio_dev->dev,
481 					     &buffer->scan_el_dev_attr_list);
482 	else
483 		ret = __iio_add_chan_devattr("en",
484 					     chan,
485 					     &iio_scan_el_ts_show,
486 					     &iio_scan_el_ts_store,
487 					     chan->scan_index,
488 					     0,
489 					     &indio_dev->dev,
490 					     &buffer->scan_el_dev_attr_list);
491 	if (ret)
492 		return ret;
493 	attrcount++;
494 	ret = attrcount;
495 	return ret;
496 }
497 
498 static ssize_t iio_buffer_read_length(struct device *dev,
499 				      struct device_attribute *attr,
500 				      char *buf)
501 {
502 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
503 	struct iio_buffer *buffer = indio_dev->buffer;
504 
505 	return sprintf(buf, "%d\n", buffer->length);
506 }
507 
508 static ssize_t iio_buffer_write_length(struct device *dev,
509 				       struct device_attribute *attr,
510 				       const char *buf, size_t len)
511 {
512 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
513 	struct iio_buffer *buffer = indio_dev->buffer;
514 	unsigned int val;
515 	int ret;
516 
517 	ret = kstrtouint(buf, 10, &val);
518 	if (ret)
519 		return ret;
520 
521 	if (val == buffer->length)
522 		return len;
523 
524 	mutex_lock(&indio_dev->mlock);
525 	if (iio_buffer_is_active(indio_dev->buffer)) {
526 		ret = -EBUSY;
527 	} else {
528 		buffer->access->set_length(buffer, val);
529 		ret = 0;
530 	}
531 	if (ret)
532 		goto out;
533 	if (buffer->length && buffer->length < buffer->watermark)
534 		buffer->watermark = buffer->length;
535 out:
536 	mutex_unlock(&indio_dev->mlock);
537 
538 	return ret ? ret : len;
539 }
540 
541 static ssize_t iio_buffer_show_enable(struct device *dev,
542 				      struct device_attribute *attr,
543 				      char *buf)
544 {
545 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
546 	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
547 }
548 
549 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
550 					     unsigned int scan_index)
551 {
552 	const struct iio_chan_spec *ch;
553 	unsigned int bytes;
554 
555 	ch = iio_find_channel_from_si(indio_dev, scan_index);
556 	bytes = ch->scan_type.storagebits / 8;
557 	if (ch->scan_type.repeat > 1)
558 		bytes *= ch->scan_type.repeat;
559 	return bytes;
560 }
561 
562 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
563 {
564 	return iio_storage_bytes_for_si(indio_dev,
565 					indio_dev->scan_index_timestamp);
566 }
567 
568 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
569 				const unsigned long *mask, bool timestamp)
570 {
571 	unsigned bytes = 0;
572 	int length, i;
573 
574 	/* How much space will the demuxed element take? */
575 	for_each_set_bit(i, mask,
576 			 indio_dev->masklength) {
577 		length = iio_storage_bytes_for_si(indio_dev, i);
578 		bytes = ALIGN(bytes, length);
579 		bytes += length;
580 	}
581 
582 	if (timestamp) {
583 		length = iio_storage_bytes_for_timestamp(indio_dev);
584 		bytes = ALIGN(bytes, length);
585 		bytes += length;
586 	}
587 	return bytes;
588 }
589 
590 static void iio_buffer_activate(struct iio_dev *indio_dev,
591 	struct iio_buffer *buffer)
592 {
593 	iio_buffer_get(buffer);
594 	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
595 }
596 
597 static void iio_buffer_deactivate(struct iio_buffer *buffer)
598 {
599 	list_del_init(&buffer->buffer_list);
600 	wake_up_interruptible(&buffer->pollq);
601 	iio_buffer_put(buffer);
602 }
603 
604 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
605 {
606 	struct iio_buffer *buffer, *_buffer;
607 
608 	list_for_each_entry_safe(buffer, _buffer,
609 			&indio_dev->buffer_list, buffer_list)
610 		iio_buffer_deactivate(buffer);
611 }
612 
613 static int iio_buffer_enable(struct iio_buffer *buffer,
614 	struct iio_dev *indio_dev)
615 {
616 	if (!buffer->access->enable)
617 		return 0;
618 	return buffer->access->enable(buffer, indio_dev);
619 }
620 
621 static int iio_buffer_disable(struct iio_buffer *buffer,
622 	struct iio_dev *indio_dev)
623 {
624 	if (!buffer->access->disable)
625 		return 0;
626 	return buffer->access->disable(buffer, indio_dev);
627 }
628 
629 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
630 	struct iio_buffer *buffer)
631 {
632 	unsigned int bytes;
633 
634 	if (!buffer->access->set_bytes_per_datum)
635 		return;
636 
637 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
638 		buffer->scan_timestamp);
639 
640 	buffer->access->set_bytes_per_datum(buffer, bytes);
641 }
642 
643 static int iio_buffer_request_update(struct iio_dev *indio_dev,
644 	struct iio_buffer *buffer)
645 {
646 	int ret;
647 
648 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
649 	if (buffer->access->request_update) {
650 		ret = buffer->access->request_update(buffer);
651 		if (ret) {
652 			dev_dbg(&indio_dev->dev,
653 			       "Buffer not started: buffer parameter update failed (%d)\n",
654 				ret);
655 			return ret;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
662 static void iio_free_scan_mask(struct iio_dev *indio_dev,
663 	const unsigned long *mask)
664 {
665 	/* If the mask is dynamically allocated free it, otherwise do nothing */
666 	if (!indio_dev->available_scan_masks)
667 		bitmap_free(mask);
668 }
669 
670 struct iio_device_config {
671 	unsigned int mode;
672 	unsigned int watermark;
673 	const unsigned long *scan_mask;
674 	unsigned int scan_bytes;
675 	bool scan_timestamp;
676 };
677 
678 static int iio_verify_update(struct iio_dev *indio_dev,
679 	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
680 	struct iio_device_config *config)
681 {
682 	unsigned long *compound_mask;
683 	const unsigned long *scan_mask;
684 	bool strict_scanmask = false;
685 	struct iio_buffer *buffer;
686 	bool scan_timestamp;
687 	unsigned int modes;
688 
689 	memset(config, 0, sizeof(*config));
690 	config->watermark = ~0;
691 
692 	/*
693 	 * If there is just one buffer and we are removing it there is nothing
694 	 * to verify.
695 	 */
696 	if (remove_buffer && !insert_buffer &&
697 		list_is_singular(&indio_dev->buffer_list))
698 			return 0;
699 
700 	modes = indio_dev->modes;
701 
702 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
703 		if (buffer == remove_buffer)
704 			continue;
705 		modes &= buffer->access->modes;
706 		config->watermark = min(config->watermark, buffer->watermark);
707 	}
708 
709 	if (insert_buffer) {
710 		modes &= insert_buffer->access->modes;
711 		config->watermark = min(config->watermark,
712 			insert_buffer->watermark);
713 	}
714 
715 	/* Definitely possible for devices to support both of these. */
716 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
717 		config->mode = INDIO_BUFFER_TRIGGERED;
718 	} else if (modes & INDIO_BUFFER_HARDWARE) {
719 		/*
720 		 * Keep things simple for now and only allow a single buffer to
721 		 * be connected in hardware mode.
722 		 */
723 		if (insert_buffer && !list_empty(&indio_dev->buffer_list))
724 			return -EINVAL;
725 		config->mode = INDIO_BUFFER_HARDWARE;
726 		strict_scanmask = true;
727 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
728 		config->mode = INDIO_BUFFER_SOFTWARE;
729 	} else {
730 		/* Can only occur on first buffer */
731 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
732 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
733 		return -EINVAL;
734 	}
735 
736 	/* What scan mask do we actually have? */
737 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
738 	if (compound_mask == NULL)
739 		return -ENOMEM;
740 
741 	scan_timestamp = false;
742 
743 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
744 		if (buffer == remove_buffer)
745 			continue;
746 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
747 			  indio_dev->masklength);
748 		scan_timestamp |= buffer->scan_timestamp;
749 	}
750 
751 	if (insert_buffer) {
752 		bitmap_or(compound_mask, compound_mask,
753 			  insert_buffer->scan_mask, indio_dev->masklength);
754 		scan_timestamp |= insert_buffer->scan_timestamp;
755 	}
756 
757 	if (indio_dev->available_scan_masks) {
758 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
759 				    indio_dev->masklength,
760 				    compound_mask,
761 				    strict_scanmask);
762 		bitmap_free(compound_mask);
763 		if (scan_mask == NULL)
764 			return -EINVAL;
765 	} else {
766 	    scan_mask = compound_mask;
767 	}
768 
769 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
770 				    scan_mask, scan_timestamp);
771 	config->scan_mask = scan_mask;
772 	config->scan_timestamp = scan_timestamp;
773 
774 	return 0;
775 }
776 
777 /**
778  * struct iio_demux_table - table describing demux memcpy ops
779  * @from:	index to copy from
780  * @to:		index to copy to
781  * @length:	how many bytes to copy
782  * @l:		list head used for management
783  */
784 struct iio_demux_table {
785 	unsigned from;
786 	unsigned to;
787 	unsigned length;
788 	struct list_head l;
789 };
790 
791 static void iio_buffer_demux_free(struct iio_buffer *buffer)
792 {
793 	struct iio_demux_table *p, *q;
794 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
795 		list_del(&p->l);
796 		kfree(p);
797 	}
798 }
799 
800 static int iio_buffer_add_demux(struct iio_buffer *buffer,
801 	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
802 	unsigned int length)
803 {
804 
805 	if (*p && (*p)->from + (*p)->length == in_loc &&
806 		(*p)->to + (*p)->length == out_loc) {
807 		(*p)->length += length;
808 	} else {
809 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
810 		if (*p == NULL)
811 			return -ENOMEM;
812 		(*p)->from = in_loc;
813 		(*p)->to = out_loc;
814 		(*p)->length = length;
815 		list_add_tail(&(*p)->l, &buffer->demux_list);
816 	}
817 
818 	return 0;
819 }
820 
821 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
822 				   struct iio_buffer *buffer)
823 {
824 	int ret, in_ind = -1, out_ind, length;
825 	unsigned in_loc = 0, out_loc = 0;
826 	struct iio_demux_table *p = NULL;
827 
828 	/* Clear out any old demux */
829 	iio_buffer_demux_free(buffer);
830 	kfree(buffer->demux_bounce);
831 	buffer->demux_bounce = NULL;
832 
833 	/* First work out which scan mode we will actually have */
834 	if (bitmap_equal(indio_dev->active_scan_mask,
835 			 buffer->scan_mask,
836 			 indio_dev->masklength))
837 		return 0;
838 
839 	/* Now we have the two masks, work from least sig and build up sizes */
840 	for_each_set_bit(out_ind,
841 			 buffer->scan_mask,
842 			 indio_dev->masklength) {
843 		in_ind = find_next_bit(indio_dev->active_scan_mask,
844 				       indio_dev->masklength,
845 				       in_ind + 1);
846 		while (in_ind != out_ind) {
847 			in_ind = find_next_bit(indio_dev->active_scan_mask,
848 					       indio_dev->masklength,
849 					       in_ind + 1);
850 			length = iio_storage_bytes_for_si(indio_dev, in_ind);
851 			/* Make sure we are aligned */
852 			in_loc = roundup(in_loc, length) + length;
853 		}
854 		length = iio_storage_bytes_for_si(indio_dev, in_ind);
855 		out_loc = roundup(out_loc, length);
856 		in_loc = roundup(in_loc, length);
857 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
858 		if (ret)
859 			goto error_clear_mux_table;
860 		out_loc += length;
861 		in_loc += length;
862 	}
863 	/* Relies on scan_timestamp being last */
864 	if (buffer->scan_timestamp) {
865 		length = iio_storage_bytes_for_timestamp(indio_dev);
866 		out_loc = roundup(out_loc, length);
867 		in_loc = roundup(in_loc, length);
868 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
869 		if (ret)
870 			goto error_clear_mux_table;
871 		out_loc += length;
872 		in_loc += length;
873 	}
874 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
875 	if (buffer->demux_bounce == NULL) {
876 		ret = -ENOMEM;
877 		goto error_clear_mux_table;
878 	}
879 	return 0;
880 
881 error_clear_mux_table:
882 	iio_buffer_demux_free(buffer);
883 
884 	return ret;
885 }
886 
887 static int iio_update_demux(struct iio_dev *indio_dev)
888 {
889 	struct iio_buffer *buffer;
890 	int ret;
891 
892 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
893 		ret = iio_buffer_update_demux(indio_dev, buffer);
894 		if (ret < 0)
895 			goto error_clear_mux_table;
896 	}
897 	return 0;
898 
899 error_clear_mux_table:
900 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
901 		iio_buffer_demux_free(buffer);
902 
903 	return ret;
904 }
905 
906 static int iio_enable_buffers(struct iio_dev *indio_dev,
907 	struct iio_device_config *config)
908 {
909 	struct iio_buffer *buffer;
910 	int ret;
911 
912 	indio_dev->active_scan_mask = config->scan_mask;
913 	indio_dev->scan_timestamp = config->scan_timestamp;
914 	indio_dev->scan_bytes = config->scan_bytes;
915 
916 	iio_update_demux(indio_dev);
917 
918 	/* Wind up again */
919 	if (indio_dev->setup_ops->preenable) {
920 		ret = indio_dev->setup_ops->preenable(indio_dev);
921 		if (ret) {
922 			dev_dbg(&indio_dev->dev,
923 			       "Buffer not started: buffer preenable failed (%d)\n", ret);
924 			goto err_undo_config;
925 		}
926 	}
927 
928 	if (indio_dev->info->update_scan_mode) {
929 		ret = indio_dev->info
930 			->update_scan_mode(indio_dev,
931 					   indio_dev->active_scan_mask);
932 		if (ret < 0) {
933 			dev_dbg(&indio_dev->dev,
934 				"Buffer not started: update scan mode failed (%d)\n",
935 				ret);
936 			goto err_run_postdisable;
937 		}
938 	}
939 
940 	if (indio_dev->info->hwfifo_set_watermark)
941 		indio_dev->info->hwfifo_set_watermark(indio_dev,
942 			config->watermark);
943 
944 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
945 		ret = iio_buffer_enable(buffer, indio_dev);
946 		if (ret)
947 			goto err_disable_buffers;
948 	}
949 
950 	indio_dev->currentmode = config->mode;
951 
952 	if (indio_dev->setup_ops->postenable) {
953 		ret = indio_dev->setup_ops->postenable(indio_dev);
954 		if (ret) {
955 			dev_dbg(&indio_dev->dev,
956 			       "Buffer not started: postenable failed (%d)\n", ret);
957 			goto err_disable_buffers;
958 		}
959 	}
960 
961 	return 0;
962 
963 err_disable_buffers:
964 	list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
965 					     buffer_list)
966 		iio_buffer_disable(buffer, indio_dev);
967 err_run_postdisable:
968 	indio_dev->currentmode = INDIO_DIRECT_MODE;
969 	if (indio_dev->setup_ops->postdisable)
970 		indio_dev->setup_ops->postdisable(indio_dev);
971 err_undo_config:
972 	indio_dev->active_scan_mask = NULL;
973 
974 	return ret;
975 }
976 
977 static int iio_disable_buffers(struct iio_dev *indio_dev)
978 {
979 	struct iio_buffer *buffer;
980 	int ret = 0;
981 	int ret2;
982 
983 	/* Wind down existing buffers - iff there are any */
984 	if (list_empty(&indio_dev->buffer_list))
985 		return 0;
986 
987 	/*
988 	 * If things go wrong at some step in disable we still need to continue
989 	 * to perform the other steps, otherwise we leave the device in a
990 	 * inconsistent state. We return the error code for the first error we
991 	 * encountered.
992 	 */
993 
994 	if (indio_dev->setup_ops->predisable) {
995 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
996 		if (ret2 && !ret)
997 			ret = ret2;
998 	}
999 
1000 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1001 		ret2 = iio_buffer_disable(buffer, indio_dev);
1002 		if (ret2 && !ret)
1003 			ret = ret2;
1004 	}
1005 
1006 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1007 
1008 	if (indio_dev->setup_ops->postdisable) {
1009 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1010 		if (ret2 && !ret)
1011 			ret = ret2;
1012 	}
1013 
1014 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1015 	indio_dev->active_scan_mask = NULL;
1016 
1017 	return ret;
1018 }
1019 
1020 static int __iio_update_buffers(struct iio_dev *indio_dev,
1021 		       struct iio_buffer *insert_buffer,
1022 		       struct iio_buffer *remove_buffer)
1023 {
1024 	struct iio_device_config new_config;
1025 	int ret;
1026 
1027 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1028 		&new_config);
1029 	if (ret)
1030 		return ret;
1031 
1032 	if (insert_buffer) {
1033 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1034 		if (ret)
1035 			goto err_free_config;
1036 	}
1037 
1038 	ret = iio_disable_buffers(indio_dev);
1039 	if (ret)
1040 		goto err_deactivate_all;
1041 
1042 	if (remove_buffer)
1043 		iio_buffer_deactivate(remove_buffer);
1044 	if (insert_buffer)
1045 		iio_buffer_activate(indio_dev, insert_buffer);
1046 
1047 	/* If no buffers in list, we are done */
1048 	if (list_empty(&indio_dev->buffer_list))
1049 		return 0;
1050 
1051 	ret = iio_enable_buffers(indio_dev, &new_config);
1052 	if (ret)
1053 		goto err_deactivate_all;
1054 
1055 	return 0;
1056 
1057 err_deactivate_all:
1058 	/*
1059 	 * We've already verified that the config is valid earlier. If things go
1060 	 * wrong in either enable or disable the most likely reason is an IO
1061 	 * error from the device. In this case there is no good recovery
1062 	 * strategy. Just make sure to disable everything and leave the device
1063 	 * in a sane state.  With a bit of luck the device might come back to
1064 	 * life again later and userspace can try again.
1065 	 */
1066 	iio_buffer_deactivate_all(indio_dev);
1067 
1068 err_free_config:
1069 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1070 	return ret;
1071 }
1072 
1073 int iio_update_buffers(struct iio_dev *indio_dev,
1074 		       struct iio_buffer *insert_buffer,
1075 		       struct iio_buffer *remove_buffer)
1076 {
1077 	int ret;
1078 
1079 	if (insert_buffer == remove_buffer)
1080 		return 0;
1081 
1082 	mutex_lock(&indio_dev->info_exist_lock);
1083 	mutex_lock(&indio_dev->mlock);
1084 
1085 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1086 		insert_buffer = NULL;
1087 
1088 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1089 		remove_buffer = NULL;
1090 
1091 	if (!insert_buffer && !remove_buffer) {
1092 		ret = 0;
1093 		goto out_unlock;
1094 	}
1095 
1096 	if (indio_dev->info == NULL) {
1097 		ret = -ENODEV;
1098 		goto out_unlock;
1099 	}
1100 
1101 	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1102 
1103 out_unlock:
1104 	mutex_unlock(&indio_dev->mlock);
1105 	mutex_unlock(&indio_dev->info_exist_lock);
1106 
1107 	return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(iio_update_buffers);
1110 
1111 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1112 {
1113 	iio_disable_buffers(indio_dev);
1114 	iio_buffer_deactivate_all(indio_dev);
1115 }
1116 
1117 static ssize_t iio_buffer_store_enable(struct device *dev,
1118 				       struct device_attribute *attr,
1119 				       const char *buf,
1120 				       size_t len)
1121 {
1122 	int ret;
1123 	bool requested_state;
1124 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1125 	bool inlist;
1126 
1127 	ret = strtobool(buf, &requested_state);
1128 	if (ret < 0)
1129 		return ret;
1130 
1131 	mutex_lock(&indio_dev->mlock);
1132 
1133 	/* Find out if it is in the list */
1134 	inlist = iio_buffer_is_active(indio_dev->buffer);
1135 	/* Already in desired state */
1136 	if (inlist == requested_state)
1137 		goto done;
1138 
1139 	if (requested_state)
1140 		ret = __iio_update_buffers(indio_dev,
1141 					 indio_dev->buffer, NULL);
1142 	else
1143 		ret = __iio_update_buffers(indio_dev,
1144 					 NULL, indio_dev->buffer);
1145 
1146 done:
1147 	mutex_unlock(&indio_dev->mlock);
1148 	return (ret < 0) ? ret : len;
1149 }
1150 
1151 static const char * const iio_scan_elements_group_name = "scan_elements";
1152 
1153 static ssize_t iio_buffer_show_watermark(struct device *dev,
1154 					 struct device_attribute *attr,
1155 					 char *buf)
1156 {
1157 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1158 	struct iio_buffer *buffer = indio_dev->buffer;
1159 
1160 	return sprintf(buf, "%u\n", buffer->watermark);
1161 }
1162 
1163 static ssize_t iio_buffer_store_watermark(struct device *dev,
1164 					  struct device_attribute *attr,
1165 					  const char *buf,
1166 					  size_t len)
1167 {
1168 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1169 	struct iio_buffer *buffer = indio_dev->buffer;
1170 	unsigned int val;
1171 	int ret;
1172 
1173 	ret = kstrtouint(buf, 10, &val);
1174 	if (ret)
1175 		return ret;
1176 	if (!val)
1177 		return -EINVAL;
1178 
1179 	mutex_lock(&indio_dev->mlock);
1180 
1181 	if (val > buffer->length) {
1182 		ret = -EINVAL;
1183 		goto out;
1184 	}
1185 
1186 	if (iio_buffer_is_active(indio_dev->buffer)) {
1187 		ret = -EBUSY;
1188 		goto out;
1189 	}
1190 
1191 	buffer->watermark = val;
1192 out:
1193 	mutex_unlock(&indio_dev->mlock);
1194 
1195 	return ret ? ret : len;
1196 }
1197 
1198 static ssize_t iio_dma_show_data_available(struct device *dev,
1199 						struct device_attribute *attr,
1200 						char *buf)
1201 {
1202 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1203 	size_t bytes;
1204 
1205 	bytes = iio_buffer_data_available(indio_dev->buffer);
1206 
1207 	return sprintf(buf, "%zu\n", bytes);
1208 }
1209 
1210 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1211 		   iio_buffer_write_length);
1212 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1213 	S_IRUGO, iio_buffer_read_length, NULL);
1214 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1215 		   iio_buffer_show_enable, iio_buffer_store_enable);
1216 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1217 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
1218 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1219 	S_IRUGO, iio_buffer_show_watermark, NULL);
1220 static DEVICE_ATTR(data_available, S_IRUGO,
1221 		iio_dma_show_data_available, NULL);
1222 
1223 static struct attribute *iio_buffer_attrs[] = {
1224 	&dev_attr_length.attr,
1225 	&dev_attr_enable.attr,
1226 	&dev_attr_watermark.attr,
1227 	&dev_attr_data_available.attr,
1228 };
1229 
1230 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1231 {
1232 	struct iio_dev_attr *p;
1233 	struct attribute **attr;
1234 	struct iio_buffer *buffer = indio_dev->buffer;
1235 	int ret, i, attrn, attrcount, attrcount_orig = 0;
1236 	const struct iio_chan_spec *channels;
1237 
1238 	channels = indio_dev->channels;
1239 	if (channels) {
1240 		int ml = indio_dev->masklength;
1241 
1242 		for (i = 0; i < indio_dev->num_channels; i++)
1243 			ml = max(ml, channels[i].scan_index + 1);
1244 		indio_dev->masklength = ml;
1245 	}
1246 
1247 	if (!buffer)
1248 		return 0;
1249 
1250 	attrcount = 0;
1251 	if (buffer->attrs) {
1252 		while (buffer->attrs[attrcount] != NULL)
1253 			attrcount++;
1254 	}
1255 
1256 	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1257 		       sizeof(struct attribute *), GFP_KERNEL);
1258 	if (!attr)
1259 		return -ENOMEM;
1260 
1261 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1262 	if (!buffer->access->set_length)
1263 		attr[0] = &dev_attr_length_ro.attr;
1264 
1265 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1266 		attr[2] = &dev_attr_watermark_ro.attr;
1267 
1268 	if (buffer->attrs)
1269 		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1270 		       sizeof(struct attribute *) * attrcount);
1271 
1272 	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1273 
1274 	buffer->buffer_group.name = "buffer";
1275 	buffer->buffer_group.attrs = attr;
1276 
1277 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1278 
1279 	if (buffer->scan_el_attrs != NULL) {
1280 		attr = buffer->scan_el_attrs->attrs;
1281 		while (*attr++ != NULL)
1282 			attrcount_orig++;
1283 	}
1284 	attrcount = attrcount_orig;
1285 	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1286 	channels = indio_dev->channels;
1287 	if (channels) {
1288 		/* new magic */
1289 		for (i = 0; i < indio_dev->num_channels; i++) {
1290 			if (channels[i].scan_index < 0)
1291 				continue;
1292 
1293 			ret = iio_buffer_add_channel_sysfs(indio_dev,
1294 							 &channels[i]);
1295 			if (ret < 0)
1296 				goto error_cleanup_dynamic;
1297 			attrcount += ret;
1298 			if (channels[i].type == IIO_TIMESTAMP)
1299 				indio_dev->scan_index_timestamp =
1300 					channels[i].scan_index;
1301 		}
1302 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1303 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1304 							  GFP_KERNEL);
1305 			if (buffer->scan_mask == NULL) {
1306 				ret = -ENOMEM;
1307 				goto error_cleanup_dynamic;
1308 			}
1309 		}
1310 	}
1311 
1312 	buffer->scan_el_group.name = iio_scan_elements_group_name;
1313 
1314 	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1315 					      sizeof(buffer->scan_el_group.attrs[0]),
1316 					      GFP_KERNEL);
1317 	if (buffer->scan_el_group.attrs == NULL) {
1318 		ret = -ENOMEM;
1319 		goto error_free_scan_mask;
1320 	}
1321 	if (buffer->scan_el_attrs)
1322 		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
1323 		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
1324 	attrn = attrcount_orig;
1325 
1326 	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1327 		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1328 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1329 
1330 	return 0;
1331 
1332 error_free_scan_mask:
1333 	bitmap_free(buffer->scan_mask);
1334 error_cleanup_dynamic:
1335 	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1336 	kfree(indio_dev->buffer->buffer_group.attrs);
1337 
1338 	return ret;
1339 }
1340 
1341 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1342 {
1343 	if (!indio_dev->buffer)
1344 		return;
1345 
1346 	bitmap_free(indio_dev->buffer->scan_mask);
1347 	kfree(indio_dev->buffer->buffer_group.attrs);
1348 	kfree(indio_dev->buffer->scan_el_group.attrs);
1349 	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
1350 }
1351 
1352 /**
1353  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1354  * @indio_dev: the iio device
1355  * @mask: scan mask to be checked
1356  *
1357  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1358  * can be used for devices where only one channel can be active for sampling at
1359  * a time.
1360  */
1361 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1362 	const unsigned long *mask)
1363 {
1364 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1365 }
1366 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1367 
1368 static const void *iio_demux(struct iio_buffer *buffer,
1369 				 const void *datain)
1370 {
1371 	struct iio_demux_table *t;
1372 
1373 	if (list_empty(&buffer->demux_list))
1374 		return datain;
1375 	list_for_each_entry(t, &buffer->demux_list, l)
1376 		memcpy(buffer->demux_bounce + t->to,
1377 		       datain + t->from, t->length);
1378 
1379 	return buffer->demux_bounce;
1380 }
1381 
1382 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1383 {
1384 	const void *dataout = iio_demux(buffer, data);
1385 	int ret;
1386 
1387 	ret = buffer->access->store_to(buffer, dataout);
1388 	if (ret)
1389 		return ret;
1390 
1391 	/*
1392 	 * We can't just test for watermark to decide if we wake the poll queue
1393 	 * because read may request less samples than the watermark.
1394 	 */
1395 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1396 	return 0;
1397 }
1398 
1399 /**
1400  * iio_push_to_buffers() - push to a registered buffer.
1401  * @indio_dev:		iio_dev structure for device.
1402  * @data:		Full scan.
1403  */
1404 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1405 {
1406 	int ret;
1407 	struct iio_buffer *buf;
1408 
1409 	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1410 		ret = iio_push_to_buffer(buf, data);
1411 		if (ret < 0)
1412 			return ret;
1413 	}
1414 
1415 	return 0;
1416 }
1417 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1418 
1419 /**
1420  * iio_buffer_release() - Free a buffer's resources
1421  * @ref: Pointer to the kref embedded in the iio_buffer struct
1422  *
1423  * This function is called when the last reference to the buffer has been
1424  * dropped. It will typically free all resources allocated by the buffer. Do not
1425  * call this function manually, always use iio_buffer_put() when done using a
1426  * buffer.
1427  */
1428 static void iio_buffer_release(struct kref *ref)
1429 {
1430 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1431 
1432 	buffer->access->release(buffer);
1433 }
1434 
1435 /**
1436  * iio_buffer_get() - Grab a reference to the buffer
1437  * @buffer: The buffer to grab a reference for, may be NULL
1438  *
1439  * Returns the pointer to the buffer that was passed into the function.
1440  */
1441 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1442 {
1443 	if (buffer)
1444 		kref_get(&buffer->ref);
1445 
1446 	return buffer;
1447 }
1448 EXPORT_SYMBOL_GPL(iio_buffer_get);
1449 
1450 /**
1451  * iio_buffer_put() - Release the reference to the buffer
1452  * @buffer: The buffer to release the reference for, may be NULL
1453  */
1454 void iio_buffer_put(struct iio_buffer *buffer)
1455 {
1456 	if (buffer)
1457 		kref_put(&buffer->ref, iio_buffer_release);
1458 }
1459 EXPORT_SYMBOL_GPL(iio_buffer_put);
1460 
1461 /**
1462  * iio_device_attach_buffer - Attach a buffer to a IIO device
1463  * @indio_dev: The device the buffer should be attached to
1464  * @buffer: The buffer to attach to the device
1465  *
1466  * This function attaches a buffer to a IIO device. The buffer stays attached to
1467  * the device until the device is freed. The function should only be called at
1468  * most once per device.
1469  */
1470 void iio_device_attach_buffer(struct iio_dev *indio_dev,
1471 			      struct iio_buffer *buffer)
1472 {
1473 	indio_dev->buffer = iio_buffer_get(buffer);
1474 }
1475 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1476