1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/kernel.h>
13 #include <linux/export.h>
14 #include <linux/device.h>
15 #include <linux/fs.h>
16 #include <linux/cdev.h>
17 #include <linux/slab.h>
18 #include <linux/poll.h>
19 #include <linux/sched/signal.h>
20 
21 #include <linux/iio/iio.h>
22 #include "iio_core.h"
23 #include <linux/iio/sysfs.h>
24 #include <linux/iio/buffer.h>
25 #include <linux/iio/buffer_impl.h>
26 
27 static const char * const iio_endian_prefix[] = {
28 	[IIO_BE] = "be",
29 	[IIO_LE] = "le",
30 };
31 
32 static bool iio_buffer_is_active(struct iio_buffer *buf)
33 {
34 	return !list_empty(&buf->buffer_list);
35 }
36 
37 static size_t iio_buffer_data_available(struct iio_buffer *buf)
38 {
39 	return buf->access->data_available(buf);
40 }
41 
42 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
43 				   struct iio_buffer *buf, size_t required)
44 {
45 	if (!indio_dev->info->hwfifo_flush_to_buffer)
46 		return -ENODEV;
47 
48 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
49 }
50 
51 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
52 			     size_t to_wait, int to_flush)
53 {
54 	size_t avail;
55 	int flushed = 0;
56 
57 	/* wakeup if the device was unregistered */
58 	if (!indio_dev->info)
59 		return true;
60 
61 	/* drain the buffer if it was disabled */
62 	if (!iio_buffer_is_active(buf)) {
63 		to_wait = min_t(size_t, to_wait, 1);
64 		to_flush = 0;
65 	}
66 
67 	avail = iio_buffer_data_available(buf);
68 
69 	if (avail >= to_wait) {
70 		/* force a flush for non-blocking reads */
71 		if (!to_wait && avail < to_flush)
72 			iio_buffer_flush_hwfifo(indio_dev, buf,
73 						to_flush - avail);
74 		return true;
75 	}
76 
77 	if (to_flush)
78 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
79 						  to_wait - avail);
80 	if (flushed <= 0)
81 		return false;
82 
83 	if (avail + flushed >= to_wait)
84 		return true;
85 
86 	return false;
87 }
88 
89 /**
90  * iio_buffer_read_outer() - chrdev read for buffer access
91  * @filp:	File structure pointer for the char device
92  * @buf:	Destination buffer for iio buffer read
93  * @n:		First n bytes to read
94  * @f_ps:	Long offset provided by the user as a seek position
95  *
96  * This function relies on all buffer implementations having an
97  * iio_buffer as their first element.
98  *
99  * Return: negative values corresponding to error codes or ret != 0
100  *	   for ending the reading activity
101  **/
102 ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
103 			      size_t n, loff_t *f_ps)
104 {
105 	struct iio_dev *indio_dev = filp->private_data;
106 	struct iio_buffer *rb = indio_dev->buffer;
107 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
108 	size_t datum_size;
109 	size_t to_wait;
110 	int ret = 0;
111 
112 	if (!indio_dev->info)
113 		return -ENODEV;
114 
115 	if (!rb || !rb->access->read)
116 		return -EINVAL;
117 
118 	datum_size = rb->bytes_per_datum;
119 
120 	/*
121 	 * If datum_size is 0 there will never be anything to read from the
122 	 * buffer, so signal end of file now.
123 	 */
124 	if (!datum_size)
125 		return 0;
126 
127 	if (filp->f_flags & O_NONBLOCK)
128 		to_wait = 0;
129 	else
130 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
131 
132 	add_wait_queue(&rb->pollq, &wait);
133 	do {
134 		if (!indio_dev->info) {
135 			ret = -ENODEV;
136 			break;
137 		}
138 
139 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
140 			if (signal_pending(current)) {
141 				ret = -ERESTARTSYS;
142 				break;
143 			}
144 
145 			wait_woken(&wait, TASK_INTERRUPTIBLE,
146 				   MAX_SCHEDULE_TIMEOUT);
147 			continue;
148 		}
149 
150 		ret = rb->access->read(rb, n, buf);
151 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
152 			ret = -EAGAIN;
153 	} while (ret == 0);
154 	remove_wait_queue(&rb->pollq, &wait);
155 
156 	return ret;
157 }
158 
159 /**
160  * iio_buffer_poll() - poll the buffer to find out if it has data
161  * @filp:	File structure pointer for device access
162  * @wait:	Poll table structure pointer for which the driver adds
163  *		a wait queue
164  *
165  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
166  *	   or 0 for other cases
167  */
168 __poll_t iio_buffer_poll(struct file *filp,
169 			     struct poll_table_struct *wait)
170 {
171 	struct iio_dev *indio_dev = filp->private_data;
172 	struct iio_buffer *rb = indio_dev->buffer;
173 
174 	if (!indio_dev->info || rb == NULL)
175 		return 0;
176 
177 	poll_wait(filp, &rb->pollq, wait);
178 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
179 		return EPOLLIN | EPOLLRDNORM;
180 	return 0;
181 }
182 
183 /**
184  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
185  * @indio_dev: The IIO device
186  *
187  * Wakes up the event waitqueue used for poll(). Should usually
188  * be called when the device is unregistered.
189  */
190 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
191 {
192 	struct iio_buffer *buffer = indio_dev->buffer;
193 
194 	if (!buffer)
195 		return;
196 
197 	wake_up(&buffer->pollq);
198 }
199 
200 void iio_buffer_init(struct iio_buffer *buffer)
201 {
202 	INIT_LIST_HEAD(&buffer->demux_list);
203 	INIT_LIST_HEAD(&buffer->buffer_list);
204 	init_waitqueue_head(&buffer->pollq);
205 	kref_init(&buffer->ref);
206 	if (!buffer->watermark)
207 		buffer->watermark = 1;
208 }
209 EXPORT_SYMBOL(iio_buffer_init);
210 
211 /**
212  * iio_buffer_set_attrs - Set buffer specific attributes
213  * @buffer: The buffer for which we are setting attributes
214  * @attrs: Pointer to a null terminated list of pointers to attributes
215  */
216 void iio_buffer_set_attrs(struct iio_buffer *buffer,
217 			 const struct attribute **attrs)
218 {
219 	buffer->attrs = attrs;
220 }
221 EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
222 
223 static ssize_t iio_show_scan_index(struct device *dev,
224 				   struct device_attribute *attr,
225 				   char *buf)
226 {
227 	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
228 }
229 
230 static ssize_t iio_show_fixed_type(struct device *dev,
231 				   struct device_attribute *attr,
232 				   char *buf)
233 {
234 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
235 	u8 type = this_attr->c->scan_type.endianness;
236 
237 	if (type == IIO_CPU) {
238 #ifdef __LITTLE_ENDIAN
239 		type = IIO_LE;
240 #else
241 		type = IIO_BE;
242 #endif
243 	}
244 	if (this_attr->c->scan_type.repeat > 1)
245 		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
246 		       iio_endian_prefix[type],
247 		       this_attr->c->scan_type.sign,
248 		       this_attr->c->scan_type.realbits,
249 		       this_attr->c->scan_type.storagebits,
250 		       this_attr->c->scan_type.repeat,
251 		       this_attr->c->scan_type.shift);
252 		else
253 			return sprintf(buf, "%s:%c%d/%d>>%u\n",
254 		       iio_endian_prefix[type],
255 		       this_attr->c->scan_type.sign,
256 		       this_attr->c->scan_type.realbits,
257 		       this_attr->c->scan_type.storagebits,
258 		       this_attr->c->scan_type.shift);
259 }
260 
261 static ssize_t iio_scan_el_show(struct device *dev,
262 				struct device_attribute *attr,
263 				char *buf)
264 {
265 	int ret;
266 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
267 	struct iio_buffer *buffer = indio_dev->buffer;
268 
269 	/* Ensure ret is 0 or 1. */
270 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
271 		       buffer->scan_mask);
272 
273 	return sprintf(buf, "%d\n", ret);
274 }
275 
276 /* Note NULL used as error indicator as it doesn't make sense. */
277 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
278 					  unsigned int masklength,
279 					  const unsigned long *mask,
280 					  bool strict)
281 {
282 	if (bitmap_empty(mask, masklength))
283 		return NULL;
284 	while (*av_masks) {
285 		if (strict) {
286 			if (bitmap_equal(mask, av_masks, masklength))
287 				return av_masks;
288 		} else {
289 			if (bitmap_subset(mask, av_masks, masklength))
290 				return av_masks;
291 		}
292 		av_masks += BITS_TO_LONGS(masklength);
293 	}
294 	return NULL;
295 }
296 
297 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
298 	const unsigned long *mask)
299 {
300 	if (!indio_dev->setup_ops->validate_scan_mask)
301 		return true;
302 
303 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
304 }
305 
306 /**
307  * iio_scan_mask_set() - set particular bit in the scan mask
308  * @indio_dev: the iio device
309  * @buffer: the buffer whose scan mask we are interested in
310  * @bit: the bit to be set.
311  *
312  * Note that at this point we have no way of knowing what other
313  * buffers might request, hence this code only verifies that the
314  * individual buffers request is plausible.
315  */
316 static int iio_scan_mask_set(struct iio_dev *indio_dev,
317 		      struct iio_buffer *buffer, int bit)
318 {
319 	const unsigned long *mask;
320 	unsigned long *trialmask;
321 
322 	trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
323 	if (trialmask == NULL)
324 		return -ENOMEM;
325 	if (!indio_dev->masklength) {
326 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
327 		goto err_invalid_mask;
328 	}
329 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
330 	set_bit(bit, trialmask);
331 
332 	if (!iio_validate_scan_mask(indio_dev, trialmask))
333 		goto err_invalid_mask;
334 
335 	if (indio_dev->available_scan_masks) {
336 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
337 					   indio_dev->masklength,
338 					   trialmask, false);
339 		if (!mask)
340 			goto err_invalid_mask;
341 	}
342 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
343 
344 	bitmap_free(trialmask);
345 
346 	return 0;
347 
348 err_invalid_mask:
349 	bitmap_free(trialmask);
350 	return -EINVAL;
351 }
352 
353 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
354 {
355 	clear_bit(bit, buffer->scan_mask);
356 	return 0;
357 }
358 
359 static int iio_scan_mask_query(struct iio_dev *indio_dev,
360 			       struct iio_buffer *buffer, int bit)
361 {
362 	if (bit > indio_dev->masklength)
363 		return -EINVAL;
364 
365 	if (!buffer->scan_mask)
366 		return 0;
367 
368 	/* Ensure return value is 0 or 1. */
369 	return !!test_bit(bit, buffer->scan_mask);
370 };
371 
372 static ssize_t iio_scan_el_store(struct device *dev,
373 				 struct device_attribute *attr,
374 				 const char *buf,
375 				 size_t len)
376 {
377 	int ret;
378 	bool state;
379 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
380 	struct iio_buffer *buffer = indio_dev->buffer;
381 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
382 
383 	ret = strtobool(buf, &state);
384 	if (ret < 0)
385 		return ret;
386 	mutex_lock(&indio_dev->mlock);
387 	if (iio_buffer_is_active(buffer)) {
388 		ret = -EBUSY;
389 		goto error_ret;
390 	}
391 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
392 	if (ret < 0)
393 		goto error_ret;
394 	if (!state && ret) {
395 		ret = iio_scan_mask_clear(buffer, this_attr->address);
396 		if (ret)
397 			goto error_ret;
398 	} else if (state && !ret) {
399 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
400 		if (ret)
401 			goto error_ret;
402 	}
403 
404 error_ret:
405 	mutex_unlock(&indio_dev->mlock);
406 
407 	return ret < 0 ? ret : len;
408 
409 }
410 
411 static ssize_t iio_scan_el_ts_show(struct device *dev,
412 				   struct device_attribute *attr,
413 				   char *buf)
414 {
415 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
416 	struct iio_buffer *buffer = indio_dev->buffer;
417 
418 	return sprintf(buf, "%d\n", buffer->scan_timestamp);
419 }
420 
421 static ssize_t iio_scan_el_ts_store(struct device *dev,
422 				    struct device_attribute *attr,
423 				    const char *buf,
424 				    size_t len)
425 {
426 	int ret;
427 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
428 	struct iio_buffer *buffer = indio_dev->buffer;
429 	bool state;
430 
431 	ret = strtobool(buf, &state);
432 	if (ret < 0)
433 		return ret;
434 
435 	mutex_lock(&indio_dev->mlock);
436 	if (iio_buffer_is_active(buffer)) {
437 		ret = -EBUSY;
438 		goto error_ret;
439 	}
440 	buffer->scan_timestamp = state;
441 error_ret:
442 	mutex_unlock(&indio_dev->mlock);
443 
444 	return ret ? ret : len;
445 }
446 
447 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
448 					struct iio_buffer *buffer,
449 					const struct iio_chan_spec *chan)
450 {
451 	int ret, attrcount = 0;
452 
453 	ret = __iio_add_chan_devattr("index",
454 				     chan,
455 				     &iio_show_scan_index,
456 				     NULL,
457 				     0,
458 				     IIO_SEPARATE,
459 				     &indio_dev->dev,
460 				     &buffer->scan_el_dev_attr_list);
461 	if (ret)
462 		return ret;
463 	attrcount++;
464 	ret = __iio_add_chan_devattr("type",
465 				     chan,
466 				     &iio_show_fixed_type,
467 				     NULL,
468 				     0,
469 				     0,
470 				     &indio_dev->dev,
471 				     &buffer->scan_el_dev_attr_list);
472 	if (ret)
473 		return ret;
474 	attrcount++;
475 	if (chan->type != IIO_TIMESTAMP)
476 		ret = __iio_add_chan_devattr("en",
477 					     chan,
478 					     &iio_scan_el_show,
479 					     &iio_scan_el_store,
480 					     chan->scan_index,
481 					     0,
482 					     &indio_dev->dev,
483 					     &buffer->scan_el_dev_attr_list);
484 	else
485 		ret = __iio_add_chan_devattr("en",
486 					     chan,
487 					     &iio_scan_el_ts_show,
488 					     &iio_scan_el_ts_store,
489 					     chan->scan_index,
490 					     0,
491 					     &indio_dev->dev,
492 					     &buffer->scan_el_dev_attr_list);
493 	if (ret)
494 		return ret;
495 	attrcount++;
496 	ret = attrcount;
497 	return ret;
498 }
499 
500 static ssize_t iio_buffer_read_length(struct device *dev,
501 				      struct device_attribute *attr,
502 				      char *buf)
503 {
504 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
505 	struct iio_buffer *buffer = indio_dev->buffer;
506 
507 	return sprintf(buf, "%d\n", buffer->length);
508 }
509 
510 static ssize_t iio_buffer_write_length(struct device *dev,
511 				       struct device_attribute *attr,
512 				       const char *buf, size_t len)
513 {
514 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
515 	struct iio_buffer *buffer = indio_dev->buffer;
516 	unsigned int val;
517 	int ret;
518 
519 	ret = kstrtouint(buf, 10, &val);
520 	if (ret)
521 		return ret;
522 
523 	if (val == buffer->length)
524 		return len;
525 
526 	mutex_lock(&indio_dev->mlock);
527 	if (iio_buffer_is_active(buffer)) {
528 		ret = -EBUSY;
529 	} else {
530 		buffer->access->set_length(buffer, val);
531 		ret = 0;
532 	}
533 	if (ret)
534 		goto out;
535 	if (buffer->length && buffer->length < buffer->watermark)
536 		buffer->watermark = buffer->length;
537 out:
538 	mutex_unlock(&indio_dev->mlock);
539 
540 	return ret ? ret : len;
541 }
542 
543 static ssize_t iio_buffer_show_enable(struct device *dev,
544 				      struct device_attribute *attr,
545 				      char *buf)
546 {
547 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
548 	struct iio_buffer *buffer = indio_dev->buffer;
549 
550 	return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
551 }
552 
553 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
554 					     unsigned int scan_index)
555 {
556 	const struct iio_chan_spec *ch;
557 	unsigned int bytes;
558 
559 	ch = iio_find_channel_from_si(indio_dev, scan_index);
560 	bytes = ch->scan_type.storagebits / 8;
561 	if (ch->scan_type.repeat > 1)
562 		bytes *= ch->scan_type.repeat;
563 	return bytes;
564 }
565 
566 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
567 {
568 	return iio_storage_bytes_for_si(indio_dev,
569 					indio_dev->scan_index_timestamp);
570 }
571 
572 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
573 				const unsigned long *mask, bool timestamp)
574 {
575 	unsigned bytes = 0;
576 	int length, i, largest = 0;
577 
578 	/* How much space will the demuxed element take? */
579 	for_each_set_bit(i, mask,
580 			 indio_dev->masklength) {
581 		length = iio_storage_bytes_for_si(indio_dev, i);
582 		bytes = ALIGN(bytes, length);
583 		bytes += length;
584 		largest = max(largest, length);
585 	}
586 
587 	if (timestamp) {
588 		length = iio_storage_bytes_for_timestamp(indio_dev);
589 		bytes = ALIGN(bytes, length);
590 		bytes += length;
591 		largest = max(largest, length);
592 	}
593 
594 	bytes = ALIGN(bytes, largest);
595 	return bytes;
596 }
597 
598 static void iio_buffer_activate(struct iio_dev *indio_dev,
599 	struct iio_buffer *buffer)
600 {
601 	iio_buffer_get(buffer);
602 	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
603 }
604 
605 static void iio_buffer_deactivate(struct iio_buffer *buffer)
606 {
607 	list_del_init(&buffer->buffer_list);
608 	wake_up_interruptible(&buffer->pollq);
609 	iio_buffer_put(buffer);
610 }
611 
612 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
613 {
614 	struct iio_buffer *buffer, *_buffer;
615 
616 	list_for_each_entry_safe(buffer, _buffer,
617 			&indio_dev->buffer_list, buffer_list)
618 		iio_buffer_deactivate(buffer);
619 }
620 
621 static int iio_buffer_enable(struct iio_buffer *buffer,
622 	struct iio_dev *indio_dev)
623 {
624 	if (!buffer->access->enable)
625 		return 0;
626 	return buffer->access->enable(buffer, indio_dev);
627 }
628 
629 static int iio_buffer_disable(struct iio_buffer *buffer,
630 	struct iio_dev *indio_dev)
631 {
632 	if (!buffer->access->disable)
633 		return 0;
634 	return buffer->access->disable(buffer, indio_dev);
635 }
636 
637 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
638 	struct iio_buffer *buffer)
639 {
640 	unsigned int bytes;
641 
642 	if (!buffer->access->set_bytes_per_datum)
643 		return;
644 
645 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
646 		buffer->scan_timestamp);
647 
648 	buffer->access->set_bytes_per_datum(buffer, bytes);
649 }
650 
651 static int iio_buffer_request_update(struct iio_dev *indio_dev,
652 	struct iio_buffer *buffer)
653 {
654 	int ret;
655 
656 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
657 	if (buffer->access->request_update) {
658 		ret = buffer->access->request_update(buffer);
659 		if (ret) {
660 			dev_dbg(&indio_dev->dev,
661 			       "Buffer not started: buffer parameter update failed (%d)\n",
662 				ret);
663 			return ret;
664 		}
665 	}
666 
667 	return 0;
668 }
669 
670 static void iio_free_scan_mask(struct iio_dev *indio_dev,
671 	const unsigned long *mask)
672 {
673 	/* If the mask is dynamically allocated free it, otherwise do nothing */
674 	if (!indio_dev->available_scan_masks)
675 		bitmap_free(mask);
676 }
677 
678 struct iio_device_config {
679 	unsigned int mode;
680 	unsigned int watermark;
681 	const unsigned long *scan_mask;
682 	unsigned int scan_bytes;
683 	bool scan_timestamp;
684 };
685 
686 static int iio_verify_update(struct iio_dev *indio_dev,
687 	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
688 	struct iio_device_config *config)
689 {
690 	unsigned long *compound_mask;
691 	const unsigned long *scan_mask;
692 	bool strict_scanmask = false;
693 	struct iio_buffer *buffer;
694 	bool scan_timestamp;
695 	unsigned int modes;
696 
697 	if (insert_buffer &&
698 	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
699 		dev_dbg(&indio_dev->dev,
700 			"At least one scan element must be enabled first\n");
701 		return -EINVAL;
702 	}
703 
704 	memset(config, 0, sizeof(*config));
705 	config->watermark = ~0;
706 
707 	/*
708 	 * If there is just one buffer and we are removing it there is nothing
709 	 * to verify.
710 	 */
711 	if (remove_buffer && !insert_buffer &&
712 		list_is_singular(&indio_dev->buffer_list))
713 			return 0;
714 
715 	modes = indio_dev->modes;
716 
717 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
718 		if (buffer == remove_buffer)
719 			continue;
720 		modes &= buffer->access->modes;
721 		config->watermark = min(config->watermark, buffer->watermark);
722 	}
723 
724 	if (insert_buffer) {
725 		modes &= insert_buffer->access->modes;
726 		config->watermark = min(config->watermark,
727 			insert_buffer->watermark);
728 	}
729 
730 	/* Definitely possible for devices to support both of these. */
731 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
732 		config->mode = INDIO_BUFFER_TRIGGERED;
733 	} else if (modes & INDIO_BUFFER_HARDWARE) {
734 		/*
735 		 * Keep things simple for now and only allow a single buffer to
736 		 * be connected in hardware mode.
737 		 */
738 		if (insert_buffer && !list_empty(&indio_dev->buffer_list))
739 			return -EINVAL;
740 		config->mode = INDIO_BUFFER_HARDWARE;
741 		strict_scanmask = true;
742 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
743 		config->mode = INDIO_BUFFER_SOFTWARE;
744 	} else {
745 		/* Can only occur on first buffer */
746 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
747 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
748 		return -EINVAL;
749 	}
750 
751 	/* What scan mask do we actually have? */
752 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
753 	if (compound_mask == NULL)
754 		return -ENOMEM;
755 
756 	scan_timestamp = false;
757 
758 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
759 		if (buffer == remove_buffer)
760 			continue;
761 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
762 			  indio_dev->masklength);
763 		scan_timestamp |= buffer->scan_timestamp;
764 	}
765 
766 	if (insert_buffer) {
767 		bitmap_or(compound_mask, compound_mask,
768 			  insert_buffer->scan_mask, indio_dev->masklength);
769 		scan_timestamp |= insert_buffer->scan_timestamp;
770 	}
771 
772 	if (indio_dev->available_scan_masks) {
773 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
774 				    indio_dev->masklength,
775 				    compound_mask,
776 				    strict_scanmask);
777 		bitmap_free(compound_mask);
778 		if (scan_mask == NULL)
779 			return -EINVAL;
780 	} else {
781 	    scan_mask = compound_mask;
782 	}
783 
784 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
785 				    scan_mask, scan_timestamp);
786 	config->scan_mask = scan_mask;
787 	config->scan_timestamp = scan_timestamp;
788 
789 	return 0;
790 }
791 
792 /**
793  * struct iio_demux_table - table describing demux memcpy ops
794  * @from:	index to copy from
795  * @to:		index to copy to
796  * @length:	how many bytes to copy
797  * @l:		list head used for management
798  */
799 struct iio_demux_table {
800 	unsigned from;
801 	unsigned to;
802 	unsigned length;
803 	struct list_head l;
804 };
805 
806 static void iio_buffer_demux_free(struct iio_buffer *buffer)
807 {
808 	struct iio_demux_table *p, *q;
809 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
810 		list_del(&p->l);
811 		kfree(p);
812 	}
813 }
814 
815 static int iio_buffer_add_demux(struct iio_buffer *buffer,
816 	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
817 	unsigned int length)
818 {
819 
820 	if (*p && (*p)->from + (*p)->length == in_loc &&
821 		(*p)->to + (*p)->length == out_loc) {
822 		(*p)->length += length;
823 	} else {
824 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
825 		if (*p == NULL)
826 			return -ENOMEM;
827 		(*p)->from = in_loc;
828 		(*p)->to = out_loc;
829 		(*p)->length = length;
830 		list_add_tail(&(*p)->l, &buffer->demux_list);
831 	}
832 
833 	return 0;
834 }
835 
836 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
837 				   struct iio_buffer *buffer)
838 {
839 	int ret, in_ind = -1, out_ind, length;
840 	unsigned in_loc = 0, out_loc = 0;
841 	struct iio_demux_table *p = NULL;
842 
843 	/* Clear out any old demux */
844 	iio_buffer_demux_free(buffer);
845 	kfree(buffer->demux_bounce);
846 	buffer->demux_bounce = NULL;
847 
848 	/* First work out which scan mode we will actually have */
849 	if (bitmap_equal(indio_dev->active_scan_mask,
850 			 buffer->scan_mask,
851 			 indio_dev->masklength))
852 		return 0;
853 
854 	/* Now we have the two masks, work from least sig and build up sizes */
855 	for_each_set_bit(out_ind,
856 			 buffer->scan_mask,
857 			 indio_dev->masklength) {
858 		in_ind = find_next_bit(indio_dev->active_scan_mask,
859 				       indio_dev->masklength,
860 				       in_ind + 1);
861 		while (in_ind != out_ind) {
862 			in_ind = find_next_bit(indio_dev->active_scan_mask,
863 					       indio_dev->masklength,
864 					       in_ind + 1);
865 			length = iio_storage_bytes_for_si(indio_dev, in_ind);
866 			/* Make sure we are aligned */
867 			in_loc = roundup(in_loc, length) + length;
868 		}
869 		length = iio_storage_bytes_for_si(indio_dev, in_ind);
870 		out_loc = roundup(out_loc, length);
871 		in_loc = roundup(in_loc, length);
872 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
873 		if (ret)
874 			goto error_clear_mux_table;
875 		out_loc += length;
876 		in_loc += length;
877 	}
878 	/* Relies on scan_timestamp being last */
879 	if (buffer->scan_timestamp) {
880 		length = iio_storage_bytes_for_timestamp(indio_dev);
881 		out_loc = roundup(out_loc, length);
882 		in_loc = roundup(in_loc, length);
883 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
884 		if (ret)
885 			goto error_clear_mux_table;
886 		out_loc += length;
887 		in_loc += length;
888 	}
889 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
890 	if (buffer->demux_bounce == NULL) {
891 		ret = -ENOMEM;
892 		goto error_clear_mux_table;
893 	}
894 	return 0;
895 
896 error_clear_mux_table:
897 	iio_buffer_demux_free(buffer);
898 
899 	return ret;
900 }
901 
902 static int iio_update_demux(struct iio_dev *indio_dev)
903 {
904 	struct iio_buffer *buffer;
905 	int ret;
906 
907 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
908 		ret = iio_buffer_update_demux(indio_dev, buffer);
909 		if (ret < 0)
910 			goto error_clear_mux_table;
911 	}
912 	return 0;
913 
914 error_clear_mux_table:
915 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
916 		iio_buffer_demux_free(buffer);
917 
918 	return ret;
919 }
920 
921 static int iio_enable_buffers(struct iio_dev *indio_dev,
922 	struct iio_device_config *config)
923 {
924 	struct iio_buffer *buffer;
925 	int ret;
926 
927 	indio_dev->active_scan_mask = config->scan_mask;
928 	indio_dev->scan_timestamp = config->scan_timestamp;
929 	indio_dev->scan_bytes = config->scan_bytes;
930 	indio_dev->currentmode = config->mode;
931 
932 	iio_update_demux(indio_dev);
933 
934 	/* Wind up again */
935 	if (indio_dev->setup_ops->preenable) {
936 		ret = indio_dev->setup_ops->preenable(indio_dev);
937 		if (ret) {
938 			dev_dbg(&indio_dev->dev,
939 			       "Buffer not started: buffer preenable failed (%d)\n", ret);
940 			goto err_undo_config;
941 		}
942 	}
943 
944 	if (indio_dev->info->update_scan_mode) {
945 		ret = indio_dev->info
946 			->update_scan_mode(indio_dev,
947 					   indio_dev->active_scan_mask);
948 		if (ret < 0) {
949 			dev_dbg(&indio_dev->dev,
950 				"Buffer not started: update scan mode failed (%d)\n",
951 				ret);
952 			goto err_run_postdisable;
953 		}
954 	}
955 
956 	if (indio_dev->info->hwfifo_set_watermark)
957 		indio_dev->info->hwfifo_set_watermark(indio_dev,
958 			config->watermark);
959 
960 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
961 		ret = iio_buffer_enable(buffer, indio_dev);
962 		if (ret)
963 			goto err_disable_buffers;
964 	}
965 
966 	if (indio_dev->setup_ops->postenable) {
967 		ret = indio_dev->setup_ops->postenable(indio_dev);
968 		if (ret) {
969 			dev_dbg(&indio_dev->dev,
970 			       "Buffer not started: postenable failed (%d)\n", ret);
971 			goto err_disable_buffers;
972 		}
973 	}
974 
975 	return 0;
976 
977 err_disable_buffers:
978 	list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
979 					     buffer_list)
980 		iio_buffer_disable(buffer, indio_dev);
981 err_run_postdisable:
982 	if (indio_dev->setup_ops->postdisable)
983 		indio_dev->setup_ops->postdisable(indio_dev);
984 err_undo_config:
985 	indio_dev->currentmode = INDIO_DIRECT_MODE;
986 	indio_dev->active_scan_mask = NULL;
987 
988 	return ret;
989 }
990 
991 static int iio_disable_buffers(struct iio_dev *indio_dev)
992 {
993 	struct iio_buffer *buffer;
994 	int ret = 0;
995 	int ret2;
996 
997 	/* Wind down existing buffers - iff there are any */
998 	if (list_empty(&indio_dev->buffer_list))
999 		return 0;
1000 
1001 	/*
1002 	 * If things go wrong at some step in disable we still need to continue
1003 	 * to perform the other steps, otherwise we leave the device in a
1004 	 * inconsistent state. We return the error code for the first error we
1005 	 * encountered.
1006 	 */
1007 
1008 	if (indio_dev->setup_ops->predisable) {
1009 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1010 		if (ret2 && !ret)
1011 			ret = ret2;
1012 	}
1013 
1014 	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1015 		ret2 = iio_buffer_disable(buffer, indio_dev);
1016 		if (ret2 && !ret)
1017 			ret = ret2;
1018 	}
1019 
1020 	if (indio_dev->setup_ops->postdisable) {
1021 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1022 		if (ret2 && !ret)
1023 			ret = ret2;
1024 	}
1025 
1026 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1027 	indio_dev->active_scan_mask = NULL;
1028 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1029 
1030 	return ret;
1031 }
1032 
1033 static int __iio_update_buffers(struct iio_dev *indio_dev,
1034 		       struct iio_buffer *insert_buffer,
1035 		       struct iio_buffer *remove_buffer)
1036 {
1037 	struct iio_device_config new_config;
1038 	int ret;
1039 
1040 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1041 		&new_config);
1042 	if (ret)
1043 		return ret;
1044 
1045 	if (insert_buffer) {
1046 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1047 		if (ret)
1048 			goto err_free_config;
1049 	}
1050 
1051 	ret = iio_disable_buffers(indio_dev);
1052 	if (ret)
1053 		goto err_deactivate_all;
1054 
1055 	if (remove_buffer)
1056 		iio_buffer_deactivate(remove_buffer);
1057 	if (insert_buffer)
1058 		iio_buffer_activate(indio_dev, insert_buffer);
1059 
1060 	/* If no buffers in list, we are done */
1061 	if (list_empty(&indio_dev->buffer_list))
1062 		return 0;
1063 
1064 	ret = iio_enable_buffers(indio_dev, &new_config);
1065 	if (ret)
1066 		goto err_deactivate_all;
1067 
1068 	return 0;
1069 
1070 err_deactivate_all:
1071 	/*
1072 	 * We've already verified that the config is valid earlier. If things go
1073 	 * wrong in either enable or disable the most likely reason is an IO
1074 	 * error from the device. In this case there is no good recovery
1075 	 * strategy. Just make sure to disable everything and leave the device
1076 	 * in a sane state.  With a bit of luck the device might come back to
1077 	 * life again later and userspace can try again.
1078 	 */
1079 	iio_buffer_deactivate_all(indio_dev);
1080 
1081 err_free_config:
1082 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1083 	return ret;
1084 }
1085 
1086 int iio_update_buffers(struct iio_dev *indio_dev,
1087 		       struct iio_buffer *insert_buffer,
1088 		       struct iio_buffer *remove_buffer)
1089 {
1090 	int ret;
1091 
1092 	if (insert_buffer == remove_buffer)
1093 		return 0;
1094 
1095 	mutex_lock(&indio_dev->info_exist_lock);
1096 	mutex_lock(&indio_dev->mlock);
1097 
1098 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1099 		insert_buffer = NULL;
1100 
1101 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1102 		remove_buffer = NULL;
1103 
1104 	if (!insert_buffer && !remove_buffer) {
1105 		ret = 0;
1106 		goto out_unlock;
1107 	}
1108 
1109 	if (indio_dev->info == NULL) {
1110 		ret = -ENODEV;
1111 		goto out_unlock;
1112 	}
1113 
1114 	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1115 
1116 out_unlock:
1117 	mutex_unlock(&indio_dev->mlock);
1118 	mutex_unlock(&indio_dev->info_exist_lock);
1119 
1120 	return ret;
1121 }
1122 EXPORT_SYMBOL_GPL(iio_update_buffers);
1123 
1124 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1125 {
1126 	iio_disable_buffers(indio_dev);
1127 	iio_buffer_deactivate_all(indio_dev);
1128 }
1129 
1130 static ssize_t iio_buffer_store_enable(struct device *dev,
1131 				       struct device_attribute *attr,
1132 				       const char *buf,
1133 				       size_t len)
1134 {
1135 	int ret;
1136 	bool requested_state;
1137 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1138 	struct iio_buffer *buffer = indio_dev->buffer;
1139 	bool inlist;
1140 
1141 	ret = strtobool(buf, &requested_state);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	mutex_lock(&indio_dev->mlock);
1146 
1147 	/* Find out if it is in the list */
1148 	inlist = iio_buffer_is_active(buffer);
1149 	/* Already in desired state */
1150 	if (inlist == requested_state)
1151 		goto done;
1152 
1153 	if (requested_state)
1154 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1155 	else
1156 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1157 
1158 done:
1159 	mutex_unlock(&indio_dev->mlock);
1160 	return (ret < 0) ? ret : len;
1161 }
1162 
1163 static const char * const iio_scan_elements_group_name = "scan_elements";
1164 
1165 static ssize_t iio_buffer_show_watermark(struct device *dev,
1166 					 struct device_attribute *attr,
1167 					 char *buf)
1168 {
1169 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1170 	struct iio_buffer *buffer = indio_dev->buffer;
1171 
1172 	return sprintf(buf, "%u\n", buffer->watermark);
1173 }
1174 
1175 static ssize_t iio_buffer_store_watermark(struct device *dev,
1176 					  struct device_attribute *attr,
1177 					  const char *buf,
1178 					  size_t len)
1179 {
1180 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1181 	struct iio_buffer *buffer = indio_dev->buffer;
1182 	unsigned int val;
1183 	int ret;
1184 
1185 	ret = kstrtouint(buf, 10, &val);
1186 	if (ret)
1187 		return ret;
1188 	if (!val)
1189 		return -EINVAL;
1190 
1191 	mutex_lock(&indio_dev->mlock);
1192 
1193 	if (val > buffer->length) {
1194 		ret = -EINVAL;
1195 		goto out;
1196 	}
1197 
1198 	if (iio_buffer_is_active(buffer)) {
1199 		ret = -EBUSY;
1200 		goto out;
1201 	}
1202 
1203 	buffer->watermark = val;
1204 out:
1205 	mutex_unlock(&indio_dev->mlock);
1206 
1207 	return ret ? ret : len;
1208 }
1209 
1210 static ssize_t iio_dma_show_data_available(struct device *dev,
1211 						struct device_attribute *attr,
1212 						char *buf)
1213 {
1214 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1215 	struct iio_buffer *buffer = indio_dev->buffer;
1216 
1217 	return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
1218 }
1219 
1220 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1221 		   iio_buffer_write_length);
1222 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1223 	S_IRUGO, iio_buffer_read_length, NULL);
1224 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1225 		   iio_buffer_show_enable, iio_buffer_store_enable);
1226 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1227 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
1228 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1229 	S_IRUGO, iio_buffer_show_watermark, NULL);
1230 static DEVICE_ATTR(data_available, S_IRUGO,
1231 		iio_dma_show_data_available, NULL);
1232 
1233 static struct attribute *iio_buffer_attrs[] = {
1234 	&dev_attr_length.attr,
1235 	&dev_attr_enable.attr,
1236 	&dev_attr_watermark.attr,
1237 	&dev_attr_data_available.attr,
1238 };
1239 
1240 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1241 {
1242 	struct iio_dev_attr *p;
1243 	struct attribute **attr;
1244 	struct iio_buffer *buffer = indio_dev->buffer;
1245 	int ret, i, attrn, attrcount;
1246 	const struct iio_chan_spec *channels;
1247 
1248 	channels = indio_dev->channels;
1249 	if (channels) {
1250 		int ml = indio_dev->masklength;
1251 
1252 		for (i = 0; i < indio_dev->num_channels; i++)
1253 			ml = max(ml, channels[i].scan_index + 1);
1254 		indio_dev->masklength = ml;
1255 	}
1256 
1257 	if (!buffer)
1258 		return 0;
1259 
1260 	attrcount = 0;
1261 	if (buffer->attrs) {
1262 		while (buffer->attrs[attrcount] != NULL)
1263 			attrcount++;
1264 	}
1265 
1266 	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1267 		       sizeof(struct attribute *), GFP_KERNEL);
1268 	if (!attr)
1269 		return -ENOMEM;
1270 
1271 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1272 	if (!buffer->access->set_length)
1273 		attr[0] = &dev_attr_length_ro.attr;
1274 
1275 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1276 		attr[2] = &dev_attr_watermark_ro.attr;
1277 
1278 	if (buffer->attrs)
1279 		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1280 		       sizeof(struct attribute *) * attrcount);
1281 
1282 	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1283 
1284 	buffer->buffer_group.name = "buffer";
1285 	buffer->buffer_group.attrs = attr;
1286 
1287 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1288 
1289 	attrcount = 0;
1290 	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1291 	channels = indio_dev->channels;
1292 	if (channels) {
1293 		/* new magic */
1294 		for (i = 0; i < indio_dev->num_channels; i++) {
1295 			if (channels[i].scan_index < 0)
1296 				continue;
1297 
1298 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1299 							 &channels[i]);
1300 			if (ret < 0)
1301 				goto error_cleanup_dynamic;
1302 			attrcount += ret;
1303 			if (channels[i].type == IIO_TIMESTAMP)
1304 				indio_dev->scan_index_timestamp =
1305 					channels[i].scan_index;
1306 		}
1307 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1308 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1309 							  GFP_KERNEL);
1310 			if (buffer->scan_mask == NULL) {
1311 				ret = -ENOMEM;
1312 				goto error_cleanup_dynamic;
1313 			}
1314 		}
1315 	}
1316 
1317 	buffer->scan_el_group.name = iio_scan_elements_group_name;
1318 
1319 	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1320 					      sizeof(buffer->scan_el_group.attrs[0]),
1321 					      GFP_KERNEL);
1322 	if (buffer->scan_el_group.attrs == NULL) {
1323 		ret = -ENOMEM;
1324 		goto error_free_scan_mask;
1325 	}
1326 	attrn = 0;
1327 
1328 	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1329 		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1330 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1331 
1332 	return 0;
1333 
1334 error_free_scan_mask:
1335 	bitmap_free(buffer->scan_mask);
1336 error_cleanup_dynamic:
1337 	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1338 	kfree(buffer->buffer_group.attrs);
1339 
1340 	return ret;
1341 }
1342 
1343 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1344 {
1345 	struct iio_buffer *buffer = indio_dev->buffer;
1346 
1347 	if (!buffer)
1348 		return;
1349 
1350 	bitmap_free(buffer->scan_mask);
1351 	kfree(buffer->buffer_group.attrs);
1352 	kfree(buffer->scan_el_group.attrs);
1353 	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1354 }
1355 
1356 /**
1357  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1358  * @indio_dev: the iio device
1359  * @mask: scan mask to be checked
1360  *
1361  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1362  * can be used for devices where only one channel can be active for sampling at
1363  * a time.
1364  */
1365 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1366 	const unsigned long *mask)
1367 {
1368 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1369 }
1370 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1371 
1372 static const void *iio_demux(struct iio_buffer *buffer,
1373 				 const void *datain)
1374 {
1375 	struct iio_demux_table *t;
1376 
1377 	if (list_empty(&buffer->demux_list))
1378 		return datain;
1379 	list_for_each_entry(t, &buffer->demux_list, l)
1380 		memcpy(buffer->demux_bounce + t->to,
1381 		       datain + t->from, t->length);
1382 
1383 	return buffer->demux_bounce;
1384 }
1385 
1386 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1387 {
1388 	const void *dataout = iio_demux(buffer, data);
1389 	int ret;
1390 
1391 	ret = buffer->access->store_to(buffer, dataout);
1392 	if (ret)
1393 		return ret;
1394 
1395 	/*
1396 	 * We can't just test for watermark to decide if we wake the poll queue
1397 	 * because read may request less samples than the watermark.
1398 	 */
1399 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1400 	return 0;
1401 }
1402 
1403 /**
1404  * iio_push_to_buffers() - push to a registered buffer.
1405  * @indio_dev:		iio_dev structure for device.
1406  * @data:		Full scan.
1407  */
1408 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1409 {
1410 	int ret;
1411 	struct iio_buffer *buf;
1412 
1413 	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1414 		ret = iio_push_to_buffer(buf, data);
1415 		if (ret < 0)
1416 			return ret;
1417 	}
1418 
1419 	return 0;
1420 }
1421 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1422 
1423 /**
1424  * iio_buffer_release() - Free a buffer's resources
1425  * @ref: Pointer to the kref embedded in the iio_buffer struct
1426  *
1427  * This function is called when the last reference to the buffer has been
1428  * dropped. It will typically free all resources allocated by the buffer. Do not
1429  * call this function manually, always use iio_buffer_put() when done using a
1430  * buffer.
1431  */
1432 static void iio_buffer_release(struct kref *ref)
1433 {
1434 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1435 
1436 	buffer->access->release(buffer);
1437 }
1438 
1439 /**
1440  * iio_buffer_get() - Grab a reference to the buffer
1441  * @buffer: The buffer to grab a reference for, may be NULL
1442  *
1443  * Returns the pointer to the buffer that was passed into the function.
1444  */
1445 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1446 {
1447 	if (buffer)
1448 		kref_get(&buffer->ref);
1449 
1450 	return buffer;
1451 }
1452 EXPORT_SYMBOL_GPL(iio_buffer_get);
1453 
1454 /**
1455  * iio_buffer_put() - Release the reference to the buffer
1456  * @buffer: The buffer to release the reference for, may be NULL
1457  */
1458 void iio_buffer_put(struct iio_buffer *buffer)
1459 {
1460 	if (buffer)
1461 		kref_put(&buffer->ref, iio_buffer_release);
1462 }
1463 EXPORT_SYMBOL_GPL(iio_buffer_put);
1464 
1465 /**
1466  * iio_device_attach_buffer - Attach a buffer to a IIO device
1467  * @indio_dev: The device the buffer should be attached to
1468  * @buffer: The buffer to attach to the device
1469  *
1470  * This function attaches a buffer to a IIO device. The buffer stays attached to
1471  * the device until the device is freed. The function should only be called at
1472  * most once per device.
1473  */
1474 void iio_device_attach_buffer(struct iio_dev *indio_dev,
1475 			      struct iio_buffer *buffer)
1476 {
1477 	indio_dev->buffer = iio_buffer_get(buffer);
1478 }
1479 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1480