1 /* Industrial I/O event handling
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Based on elements of hwmon and input subsystems.
10  */
11 
12 #include <linux/anon_inodes.h>
13 #include <linux/device.h>
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/kfifo.h>
17 #include <linux/module.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 #include <linux/iio/iio.h>
24 #include "iio_core.h"
25 #include <linux/iio/sysfs.h>
26 #include <linux/iio/events.h>
27 
28 /**
29  * struct iio_event_interface - chrdev interface for an event line
30  * @wait:		wait queue to allow blocking reads of events
31  * @det_events:		list of detected events
32  * @dev_attr_list:	list of event interface sysfs attribute
33  * @flags:		file operations related flags including busy flag.
34  * @group:		event interface sysfs attribute group
35  * @read_lock:		lock to protect kfifo read operations
36  */
37 struct iio_event_interface {
38 	wait_queue_head_t	wait;
39 	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
40 
41 	struct list_head	dev_attr_list;
42 	unsigned long		flags;
43 	struct attribute_group	group;
44 	struct mutex		read_lock;
45 };
46 
47 bool iio_event_enabled(const struct iio_event_interface *ev_int)
48 {
49 	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
50 }
51 
52 /**
53  * iio_push_event() - try to add event to the list for userspace reading
54  * @indio_dev:		IIO device structure
55  * @ev_code:		What event
56  * @timestamp:		When the event occurred
57  *
58  * Note: The caller must make sure that this function is not running
59  * concurrently for the same indio_dev more than once.
60  **/
61 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
62 {
63 	struct iio_event_interface *ev_int = indio_dev->event_interface;
64 	struct iio_event_data ev;
65 	int copied;
66 
67 	/* Does anyone care? */
68 	if (iio_event_enabled(ev_int)) {
69 
70 		ev.id = ev_code;
71 		ev.timestamp = timestamp;
72 
73 		copied = kfifo_put(&ev_int->det_events, ev);
74 		if (copied != 0)
75 			wake_up_poll(&ev_int->wait, POLLIN);
76 	}
77 
78 	return 0;
79 }
80 EXPORT_SYMBOL(iio_push_event);
81 
82 /**
83  * iio_event_poll() - poll the event queue to find out if it has data
84  * @filep:	File structure pointer to identify the device
85  * @wait:	Poll table pointer to add the wait queue on
86  *
87  * Return: (POLLIN | POLLRDNORM) if data is available for reading
88  *	   or a negative error code on failure
89  */
90 static unsigned int iio_event_poll(struct file *filep,
91 			     struct poll_table_struct *wait)
92 {
93 	struct iio_dev *indio_dev = filep->private_data;
94 	struct iio_event_interface *ev_int = indio_dev->event_interface;
95 	unsigned int events = 0;
96 
97 	if (!indio_dev->info)
98 		return events;
99 
100 	poll_wait(filep, &ev_int->wait, wait);
101 
102 	if (!kfifo_is_empty(&ev_int->det_events))
103 		events = POLLIN | POLLRDNORM;
104 
105 	return events;
106 }
107 
108 static ssize_t iio_event_chrdev_read(struct file *filep,
109 				     char __user *buf,
110 				     size_t count,
111 				     loff_t *f_ps)
112 {
113 	struct iio_dev *indio_dev = filep->private_data;
114 	struct iio_event_interface *ev_int = indio_dev->event_interface;
115 	unsigned int copied;
116 	int ret;
117 
118 	if (!indio_dev->info)
119 		return -ENODEV;
120 
121 	if (count < sizeof(struct iio_event_data))
122 		return -EINVAL;
123 
124 	do {
125 		if (kfifo_is_empty(&ev_int->det_events)) {
126 			if (filep->f_flags & O_NONBLOCK)
127 				return -EAGAIN;
128 
129 			ret = wait_event_interruptible(ev_int->wait,
130 					!kfifo_is_empty(&ev_int->det_events) ||
131 					indio_dev->info == NULL);
132 			if (ret)
133 				return ret;
134 			if (indio_dev->info == NULL)
135 				return -ENODEV;
136 		}
137 
138 		if (mutex_lock_interruptible(&ev_int->read_lock))
139 			return -ERESTARTSYS;
140 		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
141 		mutex_unlock(&ev_int->read_lock);
142 
143 		if (ret)
144 			return ret;
145 
146 		/*
147 		 * If we couldn't read anything from the fifo (a different
148 		 * thread might have been faster) we either return -EAGAIN if
149 		 * the file descriptor is non-blocking, otherwise we go back to
150 		 * sleep and wait for more data to arrive.
151 		 */
152 		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
153 			return -EAGAIN;
154 
155 	} while (copied == 0);
156 
157 	return copied;
158 }
159 
160 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
161 {
162 	struct iio_dev *indio_dev = filep->private_data;
163 	struct iio_event_interface *ev_int = indio_dev->event_interface;
164 
165 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
166 
167 	iio_device_put(indio_dev);
168 
169 	return 0;
170 }
171 
172 static const struct file_operations iio_event_chrdev_fileops = {
173 	.read =  iio_event_chrdev_read,
174 	.poll =  iio_event_poll,
175 	.release = iio_event_chrdev_release,
176 	.owner = THIS_MODULE,
177 	.llseek = noop_llseek,
178 };
179 
180 int iio_event_getfd(struct iio_dev *indio_dev)
181 {
182 	struct iio_event_interface *ev_int = indio_dev->event_interface;
183 	int fd;
184 
185 	if (ev_int == NULL)
186 		return -ENODEV;
187 
188 	fd = mutex_lock_interruptible(&indio_dev->mlock);
189 	if (fd)
190 		return fd;
191 
192 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
193 		fd = -EBUSY;
194 		goto unlock;
195 	}
196 
197 	iio_device_get(indio_dev);
198 
199 	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
200 				indio_dev, O_RDONLY | O_CLOEXEC);
201 	if (fd < 0) {
202 		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
203 		iio_device_put(indio_dev);
204 	} else {
205 		kfifo_reset_out(&ev_int->det_events);
206 	}
207 
208 unlock:
209 	mutex_unlock(&indio_dev->mlock);
210 	return fd;
211 }
212 
213 static const char * const iio_ev_type_text[] = {
214 	[IIO_EV_TYPE_THRESH] = "thresh",
215 	[IIO_EV_TYPE_MAG] = "mag",
216 	[IIO_EV_TYPE_ROC] = "roc",
217 	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
218 	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
219 	[IIO_EV_TYPE_CHANGE] = "change",
220 };
221 
222 static const char * const iio_ev_dir_text[] = {
223 	[IIO_EV_DIR_EITHER] = "either",
224 	[IIO_EV_DIR_RISING] = "rising",
225 	[IIO_EV_DIR_FALLING] = "falling"
226 };
227 
228 static const char * const iio_ev_info_text[] = {
229 	[IIO_EV_INFO_ENABLE] = "en",
230 	[IIO_EV_INFO_VALUE] = "value",
231 	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
232 	[IIO_EV_INFO_PERIOD] = "period",
233 	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
234 	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
235 };
236 
237 static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
238 {
239 	return attr->c->event_spec[attr->address & 0xffff].dir;
240 }
241 
242 static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
243 {
244 	return attr->c->event_spec[attr->address & 0xffff].type;
245 }
246 
247 static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
248 {
249 	return (attr->address >> 16) & 0xffff;
250 }
251 
252 static ssize_t iio_ev_state_store(struct device *dev,
253 				  struct device_attribute *attr,
254 				  const char *buf,
255 				  size_t len)
256 {
257 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
258 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
259 	int ret;
260 	bool val;
261 
262 	ret = strtobool(buf, &val);
263 	if (ret < 0)
264 		return ret;
265 
266 	ret = indio_dev->info->write_event_config(indio_dev,
267 		this_attr->c, iio_ev_attr_type(this_attr),
268 		iio_ev_attr_dir(this_attr), val);
269 
270 	return (ret < 0) ? ret : len;
271 }
272 
273 static ssize_t iio_ev_state_show(struct device *dev,
274 				 struct device_attribute *attr,
275 				 char *buf)
276 {
277 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
278 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
279 	int val;
280 
281 	val = indio_dev->info->read_event_config(indio_dev,
282 		this_attr->c, iio_ev_attr_type(this_attr),
283 		iio_ev_attr_dir(this_attr));
284 	if (val < 0)
285 		return val;
286 	else
287 		return sprintf(buf, "%d\n", val);
288 }
289 
290 static ssize_t iio_ev_value_show(struct device *dev,
291 				 struct device_attribute *attr,
292 				 char *buf)
293 {
294 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
295 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
296 	int val, val2, val_arr[2];
297 	int ret;
298 
299 	ret = indio_dev->info->read_event_value(indio_dev,
300 		this_attr->c, iio_ev_attr_type(this_attr),
301 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
302 		&val, &val2);
303 	if (ret < 0)
304 		return ret;
305 	val_arr[0] = val;
306 	val_arr[1] = val2;
307 	return iio_format_value(buf, ret, 2, val_arr);
308 }
309 
310 static ssize_t iio_ev_value_store(struct device *dev,
311 				  struct device_attribute *attr,
312 				  const char *buf,
313 				  size_t len)
314 {
315 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
316 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
317 	int val, val2;
318 	int ret;
319 
320 	if (!indio_dev->info->write_event_value)
321 		return -EINVAL;
322 
323 	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
324 	if (ret)
325 		return ret;
326 	ret = indio_dev->info->write_event_value(indio_dev,
327 		this_attr->c, iio_ev_attr_type(this_attr),
328 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
329 		val, val2);
330 	if (ret < 0)
331 		return ret;
332 
333 	return len;
334 }
335 
336 static int iio_device_add_event(struct iio_dev *indio_dev,
337 	const struct iio_chan_spec *chan, unsigned int spec_index,
338 	enum iio_event_type type, enum iio_event_direction dir,
339 	enum iio_shared_by shared_by, const unsigned long *mask)
340 {
341 	ssize_t (*show)(struct device *, struct device_attribute *, char *);
342 	ssize_t (*store)(struct device *, struct device_attribute *,
343 		const char *, size_t);
344 	unsigned int attrcount = 0;
345 	unsigned int i;
346 	char *postfix;
347 	int ret;
348 
349 	for_each_set_bit(i, mask, sizeof(*mask)*8) {
350 		if (i >= ARRAY_SIZE(iio_ev_info_text))
351 			return -EINVAL;
352 		if (dir != IIO_EV_DIR_NONE)
353 			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
354 					iio_ev_type_text[type],
355 					iio_ev_dir_text[dir],
356 					iio_ev_info_text[i]);
357 		else
358 			postfix = kasprintf(GFP_KERNEL, "%s_%s",
359 					iio_ev_type_text[type],
360 					iio_ev_info_text[i]);
361 		if (postfix == NULL)
362 			return -ENOMEM;
363 
364 		if (i == IIO_EV_INFO_ENABLE) {
365 			show = iio_ev_state_show;
366 			store = iio_ev_state_store;
367 		} else {
368 			show = iio_ev_value_show;
369 			store = iio_ev_value_store;
370 		}
371 
372 		ret = __iio_add_chan_devattr(postfix, chan, show, store,
373 			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
374 			&indio_dev->event_interface->dev_attr_list);
375 		kfree(postfix);
376 
377 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
378 			continue;
379 
380 		if (ret)
381 			return ret;
382 
383 		attrcount++;
384 	}
385 
386 	return attrcount;
387 }
388 
389 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
390 	struct iio_chan_spec const *chan)
391 {
392 	int ret = 0, i, attrcount = 0;
393 	enum iio_event_direction dir;
394 	enum iio_event_type type;
395 
396 	for (i = 0; i < chan->num_event_specs; i++) {
397 		type = chan->event_spec[i].type;
398 		dir = chan->event_spec[i].dir;
399 
400 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
401 			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
402 		if (ret < 0)
403 			return ret;
404 		attrcount += ret;
405 
406 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
407 			IIO_SHARED_BY_TYPE,
408 			&chan->event_spec[i].mask_shared_by_type);
409 		if (ret < 0)
410 			return ret;
411 		attrcount += ret;
412 
413 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
414 			IIO_SHARED_BY_DIR,
415 			&chan->event_spec[i].mask_shared_by_dir);
416 		if (ret < 0)
417 			return ret;
418 		attrcount += ret;
419 
420 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
421 			IIO_SHARED_BY_ALL,
422 			&chan->event_spec[i].mask_shared_by_all);
423 		if (ret < 0)
424 			return ret;
425 		attrcount += ret;
426 	}
427 	ret = attrcount;
428 	return ret;
429 }
430 
431 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
432 {
433 	int j, ret, attrcount = 0;
434 
435 	/* Dynamically created from the channels array */
436 	for (j = 0; j < indio_dev->num_channels; j++) {
437 		ret = iio_device_add_event_sysfs(indio_dev,
438 						 &indio_dev->channels[j]);
439 		if (ret < 0)
440 			return ret;
441 		attrcount += ret;
442 	}
443 	return attrcount;
444 }
445 
446 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
447 {
448 	int j;
449 
450 	for (j = 0; j < indio_dev->num_channels; j++) {
451 		if (indio_dev->channels[j].num_event_specs != 0)
452 			return true;
453 	}
454 	return false;
455 }
456 
457 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
458 {
459 	INIT_KFIFO(ev_int->det_events);
460 	init_waitqueue_head(&ev_int->wait);
461 	mutex_init(&ev_int->read_lock);
462 }
463 
464 static const char *iio_event_group_name = "events";
465 int iio_device_register_eventset(struct iio_dev *indio_dev)
466 {
467 	struct iio_dev_attr *p;
468 	int ret = 0, attrcount_orig = 0, attrcount, attrn;
469 	struct attribute **attr;
470 
471 	if (!(indio_dev->info->event_attrs ||
472 	      iio_check_for_dynamic_events(indio_dev)))
473 		return 0;
474 
475 	indio_dev->event_interface =
476 		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
477 	if (indio_dev->event_interface == NULL)
478 		return -ENOMEM;
479 
480 	INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
481 
482 	iio_setup_ev_int(indio_dev->event_interface);
483 	if (indio_dev->info->event_attrs != NULL) {
484 		attr = indio_dev->info->event_attrs->attrs;
485 		while (*attr++ != NULL)
486 			attrcount_orig++;
487 	}
488 	attrcount = attrcount_orig;
489 	if (indio_dev->channels) {
490 		ret = __iio_add_event_config_attrs(indio_dev);
491 		if (ret < 0)
492 			goto error_free_setup_event_lines;
493 		attrcount += ret;
494 	}
495 
496 	indio_dev->event_interface->group.name = iio_event_group_name;
497 	indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
498 							  sizeof(indio_dev->event_interface->group.attrs[0]),
499 							  GFP_KERNEL);
500 	if (indio_dev->event_interface->group.attrs == NULL) {
501 		ret = -ENOMEM;
502 		goto error_free_setup_event_lines;
503 	}
504 	if (indio_dev->info->event_attrs)
505 		memcpy(indio_dev->event_interface->group.attrs,
506 		       indio_dev->info->event_attrs->attrs,
507 		       sizeof(indio_dev->event_interface->group.attrs[0])
508 		       *attrcount_orig);
509 	attrn = attrcount_orig;
510 	/* Add all elements from the list. */
511 	list_for_each_entry(p,
512 			    &indio_dev->event_interface->dev_attr_list,
513 			    l)
514 		indio_dev->event_interface->group.attrs[attrn++] =
515 			&p->dev_attr.attr;
516 	indio_dev->groups[indio_dev->groupcounter++] =
517 		&indio_dev->event_interface->group;
518 
519 	return 0;
520 
521 error_free_setup_event_lines:
522 	iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
523 	kfree(indio_dev->event_interface);
524 	indio_dev->event_interface = NULL;
525 	return ret;
526 }
527 
528 /**
529  * iio_device_wakeup_eventset - Wakes up the event waitqueue
530  * @indio_dev: The IIO device
531  *
532  * Wakes up the event waitqueue used for poll() and blocking read().
533  * Should usually be called when the device is unregistered.
534  */
535 void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
536 {
537 	if (indio_dev->event_interface == NULL)
538 		return;
539 	wake_up(&indio_dev->event_interface->wait);
540 }
541 
542 void iio_device_unregister_eventset(struct iio_dev *indio_dev)
543 {
544 	if (indio_dev->event_interface == NULL)
545 		return;
546 	iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
547 	kfree(indio_dev->event_interface->group.attrs);
548 	kfree(indio_dev->event_interface);
549 }
550