1 /* Industrial I/O event handling
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Based on elements of hwmon and input subsystems.
10  */
11 
12 #include <linux/anon_inodes.h>
13 #include <linux/device.h>
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/kfifo.h>
17 #include <linux/module.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 #include <linux/iio/iio.h>
24 #include "iio_core.h"
25 #include <linux/iio/sysfs.h>
26 #include <linux/iio/events.h>
27 
28 /**
29  * struct iio_event_interface - chrdev interface for an event line
30  * @wait:		wait queue to allow blocking reads of events
31  * @det_events:		list of detected events
32  * @dev_attr_list:	list of event interface sysfs attribute
33  * @flags:		file operations related flags including busy flag.
34  * @group:		event interface sysfs attribute group
35  * @read_lock:		lock to protect kfifo read operations
36  */
37 struct iio_event_interface {
38 	wait_queue_head_t	wait;
39 	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
40 
41 	struct list_head	dev_attr_list;
42 	unsigned long		flags;
43 	struct attribute_group	group;
44 	struct mutex		read_lock;
45 };
46 
47 bool iio_event_enabled(const struct iio_event_interface *ev_int)
48 {
49 	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
50 }
51 
52 /**
53  * iio_push_event() - try to add event to the list for userspace reading
54  * @indio_dev:		IIO device structure
55  * @ev_code:		What event
56  * @timestamp:		When the event occurred
57  *
58  * Note: The caller must make sure that this function is not running
59  * concurrently for the same indio_dev more than once.
60  *
61  * This function may be safely used as soon as a valid reference to iio_dev has
62  * been obtained via iio_device_alloc(), but any events that are submitted
63  * before iio_device_register() has successfully completed will be silently
64  * discarded.
65  **/
66 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
67 {
68 	struct iio_event_interface *ev_int = indio_dev->event_interface;
69 	struct iio_event_data ev;
70 	int copied;
71 
72 	if (!ev_int)
73 		return 0;
74 
75 	/* Does anyone care? */
76 	if (iio_event_enabled(ev_int)) {
77 
78 		ev.id = ev_code;
79 		ev.timestamp = timestamp;
80 
81 		copied = kfifo_put(&ev_int->det_events, ev);
82 		if (copied != 0)
83 			wake_up_poll(&ev_int->wait, POLLIN);
84 	}
85 
86 	return 0;
87 }
88 EXPORT_SYMBOL(iio_push_event);
89 
90 /**
91  * iio_event_poll() - poll the event queue to find out if it has data
92  * @filep:	File structure pointer to identify the device
93  * @wait:	Poll table pointer to add the wait queue on
94  *
95  * Return: (POLLIN | POLLRDNORM) if data is available for reading
96  *	   or a negative error code on failure
97  */
98 static __poll_t iio_event_poll(struct file *filep,
99 			     struct poll_table_struct *wait)
100 {
101 	struct iio_dev *indio_dev = filep->private_data;
102 	struct iio_event_interface *ev_int = indio_dev->event_interface;
103 	__poll_t events = 0;
104 
105 	if (!indio_dev->info)
106 		return events;
107 
108 	poll_wait(filep, &ev_int->wait, wait);
109 
110 	if (!kfifo_is_empty(&ev_int->det_events))
111 		events = POLLIN | POLLRDNORM;
112 
113 	return events;
114 }
115 
116 static ssize_t iio_event_chrdev_read(struct file *filep,
117 				     char __user *buf,
118 				     size_t count,
119 				     loff_t *f_ps)
120 {
121 	struct iio_dev *indio_dev = filep->private_data;
122 	struct iio_event_interface *ev_int = indio_dev->event_interface;
123 	unsigned int copied;
124 	int ret;
125 
126 	if (!indio_dev->info)
127 		return -ENODEV;
128 
129 	if (count < sizeof(struct iio_event_data))
130 		return -EINVAL;
131 
132 	do {
133 		if (kfifo_is_empty(&ev_int->det_events)) {
134 			if (filep->f_flags & O_NONBLOCK)
135 				return -EAGAIN;
136 
137 			ret = wait_event_interruptible(ev_int->wait,
138 					!kfifo_is_empty(&ev_int->det_events) ||
139 					indio_dev->info == NULL);
140 			if (ret)
141 				return ret;
142 			if (indio_dev->info == NULL)
143 				return -ENODEV;
144 		}
145 
146 		if (mutex_lock_interruptible(&ev_int->read_lock))
147 			return -ERESTARTSYS;
148 		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
149 		mutex_unlock(&ev_int->read_lock);
150 
151 		if (ret)
152 			return ret;
153 
154 		/*
155 		 * If we couldn't read anything from the fifo (a different
156 		 * thread might have been faster) we either return -EAGAIN if
157 		 * the file descriptor is non-blocking, otherwise we go back to
158 		 * sleep and wait for more data to arrive.
159 		 */
160 		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
161 			return -EAGAIN;
162 
163 	} while (copied == 0);
164 
165 	return copied;
166 }
167 
168 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
169 {
170 	struct iio_dev *indio_dev = filep->private_data;
171 	struct iio_event_interface *ev_int = indio_dev->event_interface;
172 
173 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
174 
175 	iio_device_put(indio_dev);
176 
177 	return 0;
178 }
179 
180 static const struct file_operations iio_event_chrdev_fileops = {
181 	.read =  iio_event_chrdev_read,
182 	.poll =  iio_event_poll,
183 	.release = iio_event_chrdev_release,
184 	.owner = THIS_MODULE,
185 	.llseek = noop_llseek,
186 };
187 
188 int iio_event_getfd(struct iio_dev *indio_dev)
189 {
190 	struct iio_event_interface *ev_int = indio_dev->event_interface;
191 	int fd;
192 
193 	if (ev_int == NULL)
194 		return -ENODEV;
195 
196 	fd = mutex_lock_interruptible(&indio_dev->mlock);
197 	if (fd)
198 		return fd;
199 
200 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
201 		fd = -EBUSY;
202 		goto unlock;
203 	}
204 
205 	iio_device_get(indio_dev);
206 
207 	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
208 				indio_dev, O_RDONLY | O_CLOEXEC);
209 	if (fd < 0) {
210 		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
211 		iio_device_put(indio_dev);
212 	} else {
213 		kfifo_reset_out(&ev_int->det_events);
214 	}
215 
216 unlock:
217 	mutex_unlock(&indio_dev->mlock);
218 	return fd;
219 }
220 
221 static const char * const iio_ev_type_text[] = {
222 	[IIO_EV_TYPE_THRESH] = "thresh",
223 	[IIO_EV_TYPE_MAG] = "mag",
224 	[IIO_EV_TYPE_ROC] = "roc",
225 	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
226 	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
227 	[IIO_EV_TYPE_CHANGE] = "change",
228 };
229 
230 static const char * const iio_ev_dir_text[] = {
231 	[IIO_EV_DIR_EITHER] = "either",
232 	[IIO_EV_DIR_RISING] = "rising",
233 	[IIO_EV_DIR_FALLING] = "falling"
234 };
235 
236 static const char * const iio_ev_info_text[] = {
237 	[IIO_EV_INFO_ENABLE] = "en",
238 	[IIO_EV_INFO_VALUE] = "value",
239 	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
240 	[IIO_EV_INFO_PERIOD] = "period",
241 	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
242 	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
243 };
244 
245 static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
246 {
247 	return attr->c->event_spec[attr->address & 0xffff].dir;
248 }
249 
250 static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
251 {
252 	return attr->c->event_spec[attr->address & 0xffff].type;
253 }
254 
255 static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
256 {
257 	return (attr->address >> 16) & 0xffff;
258 }
259 
260 static ssize_t iio_ev_state_store(struct device *dev,
261 				  struct device_attribute *attr,
262 				  const char *buf,
263 				  size_t len)
264 {
265 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
266 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
267 	int ret;
268 	bool val;
269 
270 	ret = strtobool(buf, &val);
271 	if (ret < 0)
272 		return ret;
273 
274 	ret = indio_dev->info->write_event_config(indio_dev,
275 		this_attr->c, iio_ev_attr_type(this_attr),
276 		iio_ev_attr_dir(this_attr), val);
277 
278 	return (ret < 0) ? ret : len;
279 }
280 
281 static ssize_t iio_ev_state_show(struct device *dev,
282 				 struct device_attribute *attr,
283 				 char *buf)
284 {
285 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
286 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
287 	int val;
288 
289 	val = indio_dev->info->read_event_config(indio_dev,
290 		this_attr->c, iio_ev_attr_type(this_attr),
291 		iio_ev_attr_dir(this_attr));
292 	if (val < 0)
293 		return val;
294 	else
295 		return sprintf(buf, "%d\n", val);
296 }
297 
298 static ssize_t iio_ev_value_show(struct device *dev,
299 				 struct device_attribute *attr,
300 				 char *buf)
301 {
302 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
303 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
304 	int val, val2, val_arr[2];
305 	int ret;
306 
307 	ret = indio_dev->info->read_event_value(indio_dev,
308 		this_attr->c, iio_ev_attr_type(this_attr),
309 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
310 		&val, &val2);
311 	if (ret < 0)
312 		return ret;
313 	val_arr[0] = val;
314 	val_arr[1] = val2;
315 	return iio_format_value(buf, ret, 2, val_arr);
316 }
317 
318 static ssize_t iio_ev_value_store(struct device *dev,
319 				  struct device_attribute *attr,
320 				  const char *buf,
321 				  size_t len)
322 {
323 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
324 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
325 	int val, val2;
326 	int ret;
327 
328 	if (!indio_dev->info->write_event_value)
329 		return -EINVAL;
330 
331 	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
332 	if (ret)
333 		return ret;
334 	ret = indio_dev->info->write_event_value(indio_dev,
335 		this_attr->c, iio_ev_attr_type(this_attr),
336 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
337 		val, val2);
338 	if (ret < 0)
339 		return ret;
340 
341 	return len;
342 }
343 
344 static int iio_device_add_event(struct iio_dev *indio_dev,
345 	const struct iio_chan_spec *chan, unsigned int spec_index,
346 	enum iio_event_type type, enum iio_event_direction dir,
347 	enum iio_shared_by shared_by, const unsigned long *mask)
348 {
349 	ssize_t (*show)(struct device *, struct device_attribute *, char *);
350 	ssize_t (*store)(struct device *, struct device_attribute *,
351 		const char *, size_t);
352 	unsigned int attrcount = 0;
353 	unsigned int i;
354 	char *postfix;
355 	int ret;
356 
357 	for_each_set_bit(i, mask, sizeof(*mask)*8) {
358 		if (i >= ARRAY_SIZE(iio_ev_info_text))
359 			return -EINVAL;
360 		if (dir != IIO_EV_DIR_NONE)
361 			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
362 					iio_ev_type_text[type],
363 					iio_ev_dir_text[dir],
364 					iio_ev_info_text[i]);
365 		else
366 			postfix = kasprintf(GFP_KERNEL, "%s_%s",
367 					iio_ev_type_text[type],
368 					iio_ev_info_text[i]);
369 		if (postfix == NULL)
370 			return -ENOMEM;
371 
372 		if (i == IIO_EV_INFO_ENABLE) {
373 			show = iio_ev_state_show;
374 			store = iio_ev_state_store;
375 		} else {
376 			show = iio_ev_value_show;
377 			store = iio_ev_value_store;
378 		}
379 
380 		ret = __iio_add_chan_devattr(postfix, chan, show, store,
381 			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
382 			&indio_dev->event_interface->dev_attr_list);
383 		kfree(postfix);
384 
385 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
386 			continue;
387 
388 		if (ret)
389 			return ret;
390 
391 		attrcount++;
392 	}
393 
394 	return attrcount;
395 }
396 
397 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
398 	struct iio_chan_spec const *chan)
399 {
400 	int ret = 0, i, attrcount = 0;
401 	enum iio_event_direction dir;
402 	enum iio_event_type type;
403 
404 	for (i = 0; i < chan->num_event_specs; i++) {
405 		type = chan->event_spec[i].type;
406 		dir = chan->event_spec[i].dir;
407 
408 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
409 			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
410 		if (ret < 0)
411 			return ret;
412 		attrcount += ret;
413 
414 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
415 			IIO_SHARED_BY_TYPE,
416 			&chan->event_spec[i].mask_shared_by_type);
417 		if (ret < 0)
418 			return ret;
419 		attrcount += ret;
420 
421 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
422 			IIO_SHARED_BY_DIR,
423 			&chan->event_spec[i].mask_shared_by_dir);
424 		if (ret < 0)
425 			return ret;
426 		attrcount += ret;
427 
428 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
429 			IIO_SHARED_BY_ALL,
430 			&chan->event_spec[i].mask_shared_by_all);
431 		if (ret < 0)
432 			return ret;
433 		attrcount += ret;
434 	}
435 	ret = attrcount;
436 	return ret;
437 }
438 
439 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
440 {
441 	int j, ret, attrcount = 0;
442 
443 	/* Dynamically created from the channels array */
444 	for (j = 0; j < indio_dev->num_channels; j++) {
445 		ret = iio_device_add_event_sysfs(indio_dev,
446 						 &indio_dev->channels[j]);
447 		if (ret < 0)
448 			return ret;
449 		attrcount += ret;
450 	}
451 	return attrcount;
452 }
453 
454 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
455 {
456 	int j;
457 
458 	for (j = 0; j < indio_dev->num_channels; j++) {
459 		if (indio_dev->channels[j].num_event_specs != 0)
460 			return true;
461 	}
462 	return false;
463 }
464 
465 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
466 {
467 	INIT_KFIFO(ev_int->det_events);
468 	init_waitqueue_head(&ev_int->wait);
469 	mutex_init(&ev_int->read_lock);
470 }
471 
472 static const char *iio_event_group_name = "events";
473 int iio_device_register_eventset(struct iio_dev *indio_dev)
474 {
475 	struct iio_dev_attr *p;
476 	int ret = 0, attrcount_orig = 0, attrcount, attrn;
477 	struct attribute **attr;
478 
479 	if (!(indio_dev->info->event_attrs ||
480 	      iio_check_for_dynamic_events(indio_dev)))
481 		return 0;
482 
483 	indio_dev->event_interface =
484 		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
485 	if (indio_dev->event_interface == NULL)
486 		return -ENOMEM;
487 
488 	INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
489 
490 	iio_setup_ev_int(indio_dev->event_interface);
491 	if (indio_dev->info->event_attrs != NULL) {
492 		attr = indio_dev->info->event_attrs->attrs;
493 		while (*attr++ != NULL)
494 			attrcount_orig++;
495 	}
496 	attrcount = attrcount_orig;
497 	if (indio_dev->channels) {
498 		ret = __iio_add_event_config_attrs(indio_dev);
499 		if (ret < 0)
500 			goto error_free_setup_event_lines;
501 		attrcount += ret;
502 	}
503 
504 	indio_dev->event_interface->group.name = iio_event_group_name;
505 	indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
506 							  sizeof(indio_dev->event_interface->group.attrs[0]),
507 							  GFP_KERNEL);
508 	if (indio_dev->event_interface->group.attrs == NULL) {
509 		ret = -ENOMEM;
510 		goto error_free_setup_event_lines;
511 	}
512 	if (indio_dev->info->event_attrs)
513 		memcpy(indio_dev->event_interface->group.attrs,
514 		       indio_dev->info->event_attrs->attrs,
515 		       sizeof(indio_dev->event_interface->group.attrs[0])
516 		       *attrcount_orig);
517 	attrn = attrcount_orig;
518 	/* Add all elements from the list. */
519 	list_for_each_entry(p,
520 			    &indio_dev->event_interface->dev_attr_list,
521 			    l)
522 		indio_dev->event_interface->group.attrs[attrn++] =
523 			&p->dev_attr.attr;
524 	indio_dev->groups[indio_dev->groupcounter++] =
525 		&indio_dev->event_interface->group;
526 
527 	return 0;
528 
529 error_free_setup_event_lines:
530 	iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
531 	kfree(indio_dev->event_interface);
532 	indio_dev->event_interface = NULL;
533 	return ret;
534 }
535 
536 /**
537  * iio_device_wakeup_eventset - Wakes up the event waitqueue
538  * @indio_dev: The IIO device
539  *
540  * Wakes up the event waitqueue used for poll() and blocking read().
541  * Should usually be called when the device is unregistered.
542  */
543 void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
544 {
545 	if (indio_dev->event_interface == NULL)
546 		return;
547 	wake_up(&indio_dev->event_interface->wait);
548 }
549 
550 void iio_device_unregister_eventset(struct iio_dev *indio_dev)
551 {
552 	if (indio_dev->event_interface == NULL)
553 		return;
554 	iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
555 	kfree(indio_dev->event_interface->group.attrs);
556 	kfree(indio_dev->event_interface);
557 }
558