xref: /openbmc/linux/drivers/iio/industrialio-event.c (revision ac8f933664c3a0e2d42f6ee9a2a6d25f87cb23f6)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /* Industrial I/O event handling
3   *
4   * Copyright (c) 2008 Jonathan Cameron
5   *
6   * Based on elements of hwmon and input subsystems.
7   */
8  
9  #include <linux/anon_inodes.h>
10  #include <linux/device.h>
11  #include <linux/fs.h>
12  #include <linux/kernel.h>
13  #include <linux/kfifo.h>
14  #include <linux/module.h>
15  #include <linux/poll.h>
16  #include <linux/sched.h>
17  #include <linux/slab.h>
18  #include <linux/uaccess.h>
19  #include <linux/wait.h>
20  #include <linux/iio/iio.h>
21  #include <linux/iio/iio-opaque.h>
22  #include "iio_core.h"
23  #include <linux/iio/sysfs.h>
24  #include <linux/iio/events.h>
25  
26  /**
27   * struct iio_event_interface - chrdev interface for an event line
28   * @wait:		wait queue to allow blocking reads of events
29   * @det_events:		list of detected events
30   * @dev_attr_list:	list of event interface sysfs attribute
31   * @flags:		file operations related flags including busy flag.
32   * @group:		event interface sysfs attribute group
33   * @read_lock:		lock to protect kfifo read operations
34   * @ioctl_handler:	handler for event ioctl() calls
35   */
36  struct iio_event_interface {
37  	wait_queue_head_t	wait;
38  	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
39  
40  	struct list_head	dev_attr_list;
41  	unsigned long		flags;
42  	struct attribute_group	group;
43  	struct mutex		read_lock;
44  	struct iio_ioctl_handler	ioctl_handler;
45  };
46  
iio_event_enabled(const struct iio_event_interface * ev_int)47  bool iio_event_enabled(const struct iio_event_interface *ev_int)
48  {
49  	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
50  }
51  
52  /**
53   * iio_push_event() - try to add event to the list for userspace reading
54   * @indio_dev:		IIO device structure
55   * @ev_code:		What event
56   * @timestamp:		When the event occurred
57   *
58   * Note: The caller must make sure that this function is not running
59   * concurrently for the same indio_dev more than once.
60   *
61   * This function may be safely used as soon as a valid reference to iio_dev has
62   * been obtained via iio_device_alloc(), but any events that are submitted
63   * before iio_device_register() has successfully completed will be silently
64   * discarded.
65   **/
iio_push_event(struct iio_dev * indio_dev,u64 ev_code,s64 timestamp)66  int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
67  {
68  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
69  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
70  	struct iio_event_data ev;
71  	int copied;
72  
73  	if (!ev_int)
74  		return 0;
75  
76  	/* Does anyone care? */
77  	if (iio_event_enabled(ev_int)) {
78  
79  		ev.id = ev_code;
80  		ev.timestamp = timestamp;
81  
82  		copied = kfifo_put(&ev_int->det_events, ev);
83  		if (copied != 0)
84  			wake_up_poll(&ev_int->wait, EPOLLIN);
85  	}
86  
87  	return 0;
88  }
89  EXPORT_SYMBOL(iio_push_event);
90  
91  /**
92   * iio_event_poll() - poll the event queue to find out if it has data
93   * @filep:	File structure pointer to identify the device
94   * @wait:	Poll table pointer to add the wait queue on
95   *
96   * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
97   *	   or a negative error code on failure
98   */
iio_event_poll(struct file * filep,struct poll_table_struct * wait)99  static __poll_t iio_event_poll(struct file *filep,
100  			     struct poll_table_struct *wait)
101  {
102  	struct iio_dev *indio_dev = filep->private_data;
103  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
104  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
105  	__poll_t events = 0;
106  
107  	if (!indio_dev->info)
108  		return events;
109  
110  	poll_wait(filep, &ev_int->wait, wait);
111  
112  	if (!kfifo_is_empty(&ev_int->det_events))
113  		events = EPOLLIN | EPOLLRDNORM;
114  
115  	return events;
116  }
117  
iio_event_chrdev_read(struct file * filep,char __user * buf,size_t count,loff_t * f_ps)118  static ssize_t iio_event_chrdev_read(struct file *filep,
119  				     char __user *buf,
120  				     size_t count,
121  				     loff_t *f_ps)
122  {
123  	struct iio_dev *indio_dev = filep->private_data;
124  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
125  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
126  	unsigned int copied;
127  	int ret;
128  
129  	if (!indio_dev->info)
130  		return -ENODEV;
131  
132  	if (count < sizeof(struct iio_event_data))
133  		return -EINVAL;
134  
135  	do {
136  		if (kfifo_is_empty(&ev_int->det_events)) {
137  			if (filep->f_flags & O_NONBLOCK)
138  				return -EAGAIN;
139  
140  			ret = wait_event_interruptible(ev_int->wait,
141  					!kfifo_is_empty(&ev_int->det_events) ||
142  					indio_dev->info == NULL);
143  			if (ret)
144  				return ret;
145  			if (indio_dev->info == NULL)
146  				return -ENODEV;
147  		}
148  
149  		if (mutex_lock_interruptible(&ev_int->read_lock))
150  			return -ERESTARTSYS;
151  		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
152  		mutex_unlock(&ev_int->read_lock);
153  
154  		if (ret)
155  			return ret;
156  
157  		/*
158  		 * If we couldn't read anything from the fifo (a different
159  		 * thread might have been faster) we either return -EAGAIN if
160  		 * the file descriptor is non-blocking, otherwise we go back to
161  		 * sleep and wait for more data to arrive.
162  		 */
163  		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
164  			return -EAGAIN;
165  
166  	} while (copied == 0);
167  
168  	return copied;
169  }
170  
iio_event_chrdev_release(struct inode * inode,struct file * filep)171  static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
172  {
173  	struct iio_dev *indio_dev = filep->private_data;
174  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
175  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
176  
177  	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
178  
179  	iio_device_put(indio_dev);
180  
181  	return 0;
182  }
183  
184  static const struct file_operations iio_event_chrdev_fileops = {
185  	.read =  iio_event_chrdev_read,
186  	.poll =  iio_event_poll,
187  	.release = iio_event_chrdev_release,
188  	.owner = THIS_MODULE,
189  	.llseek = noop_llseek,
190  };
191  
iio_event_getfd(struct iio_dev * indio_dev)192  static int iio_event_getfd(struct iio_dev *indio_dev)
193  {
194  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
195  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
196  	int fd;
197  
198  	if (ev_int == NULL)
199  		return -ENODEV;
200  
201  	fd = mutex_lock_interruptible(&iio_dev_opaque->mlock);
202  	if (fd)
203  		return fd;
204  
205  	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
206  		fd = -EBUSY;
207  		goto unlock;
208  	}
209  
210  	iio_device_get(indio_dev);
211  
212  	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
213  				indio_dev, O_RDONLY | O_CLOEXEC);
214  	if (fd < 0) {
215  		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
216  		iio_device_put(indio_dev);
217  	} else {
218  		kfifo_reset_out(&ev_int->det_events);
219  	}
220  
221  unlock:
222  	mutex_unlock(&iio_dev_opaque->mlock);
223  	return fd;
224  }
225  
226  static const char * const iio_ev_type_text[] = {
227  	[IIO_EV_TYPE_THRESH] = "thresh",
228  	[IIO_EV_TYPE_MAG] = "mag",
229  	[IIO_EV_TYPE_ROC] = "roc",
230  	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
231  	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
232  	[IIO_EV_TYPE_CHANGE] = "change",
233  	[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
234  	[IIO_EV_TYPE_GESTURE] = "gesture",
235  };
236  
237  static const char * const iio_ev_dir_text[] = {
238  	[IIO_EV_DIR_EITHER] = "either",
239  	[IIO_EV_DIR_RISING] = "rising",
240  	[IIO_EV_DIR_FALLING] = "falling",
241  	[IIO_EV_DIR_SINGLETAP] = "singletap",
242  	[IIO_EV_DIR_DOUBLETAP] = "doubletap",
243  };
244  
245  static const char * const iio_ev_info_text[] = {
246  	[IIO_EV_INFO_ENABLE] = "en",
247  	[IIO_EV_INFO_VALUE] = "value",
248  	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
249  	[IIO_EV_INFO_PERIOD] = "period",
250  	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
251  	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
252  	[IIO_EV_INFO_TIMEOUT] = "timeout",
253  	[IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout",
254  	[IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay",
255  	[IIO_EV_INFO_RUNNING_PERIOD] = "runningperiod",
256  	[IIO_EV_INFO_RUNNING_COUNT] = "runningcount",
257  };
258  
iio_ev_attr_dir(struct iio_dev_attr * attr)259  static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
260  {
261  	return attr->c->event_spec[attr->address & 0xffff].dir;
262  }
263  
iio_ev_attr_type(struct iio_dev_attr * attr)264  static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
265  {
266  	return attr->c->event_spec[attr->address & 0xffff].type;
267  }
268  
iio_ev_attr_info(struct iio_dev_attr * attr)269  static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
270  {
271  	return (attr->address >> 16) & 0xffff;
272  }
273  
iio_ev_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)274  static ssize_t iio_ev_state_store(struct device *dev,
275  				  struct device_attribute *attr,
276  				  const char *buf,
277  				  size_t len)
278  {
279  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
280  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
281  	int ret;
282  	bool val;
283  
284  	ret = kstrtobool(buf, &val);
285  	if (ret < 0)
286  		return ret;
287  
288  	if (!indio_dev->info->write_event_config)
289  		return -EINVAL;
290  
291  	ret = indio_dev->info->write_event_config(indio_dev,
292  		this_attr->c, iio_ev_attr_type(this_attr),
293  		iio_ev_attr_dir(this_attr), val);
294  
295  	return (ret < 0) ? ret : len;
296  }
297  
iio_ev_state_show(struct device * dev,struct device_attribute * attr,char * buf)298  static ssize_t iio_ev_state_show(struct device *dev,
299  				 struct device_attribute *attr,
300  				 char *buf)
301  {
302  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
303  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
304  	int val;
305  
306  	if (!indio_dev->info->read_event_config)
307  		return -EINVAL;
308  
309  	val = indio_dev->info->read_event_config(indio_dev,
310  		this_attr->c, iio_ev_attr_type(this_attr),
311  		iio_ev_attr_dir(this_attr));
312  	if (val < 0)
313  		return val;
314  	else
315  		return sysfs_emit(buf, "%d\n", val);
316  }
317  
iio_ev_value_show(struct device * dev,struct device_attribute * attr,char * buf)318  static ssize_t iio_ev_value_show(struct device *dev,
319  				 struct device_attribute *attr,
320  				 char *buf)
321  {
322  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
323  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
324  	int val, val2, val_arr[2];
325  	int ret;
326  
327  	if (!indio_dev->info->read_event_value)
328  		return -EINVAL;
329  
330  	ret = indio_dev->info->read_event_value(indio_dev,
331  		this_attr->c, iio_ev_attr_type(this_attr),
332  		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
333  		&val, &val2);
334  	if (ret < 0)
335  		return ret;
336  	val_arr[0] = val;
337  	val_arr[1] = val2;
338  	return iio_format_value(buf, ret, 2, val_arr);
339  }
340  
iio_ev_value_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)341  static ssize_t iio_ev_value_store(struct device *dev,
342  				  struct device_attribute *attr,
343  				  const char *buf,
344  				  size_t len)
345  {
346  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
347  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
348  	int val, val2;
349  	int ret;
350  
351  	if (!indio_dev->info->write_event_value)
352  		return -EINVAL;
353  
354  	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
355  	if (ret)
356  		return ret;
357  	ret = indio_dev->info->write_event_value(indio_dev,
358  		this_attr->c, iio_ev_attr_type(this_attr),
359  		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
360  		val, val2);
361  	if (ret < 0)
362  		return ret;
363  
364  	return len;
365  }
366  
iio_device_add_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,unsigned int spec_index,enum iio_event_type type,enum iio_event_direction dir,enum iio_shared_by shared_by,const unsigned long * mask)367  static int iio_device_add_event(struct iio_dev *indio_dev,
368  	const struct iio_chan_spec *chan, unsigned int spec_index,
369  	enum iio_event_type type, enum iio_event_direction dir,
370  	enum iio_shared_by shared_by, const unsigned long *mask)
371  {
372  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
373  	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
374  		char *buf);
375  	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
376  		const char *buf, size_t len);
377  	unsigned int attrcount = 0;
378  	unsigned int i;
379  	char *postfix;
380  	int ret;
381  
382  	for_each_set_bit(i, mask, sizeof(*mask)*8) {
383  		if (i >= ARRAY_SIZE(iio_ev_info_text))
384  			return -EINVAL;
385  		if (dir != IIO_EV_DIR_NONE)
386  			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
387  					iio_ev_type_text[type],
388  					iio_ev_dir_text[dir],
389  					iio_ev_info_text[i]);
390  		else
391  			postfix = kasprintf(GFP_KERNEL, "%s_%s",
392  					iio_ev_type_text[type],
393  					iio_ev_info_text[i]);
394  		if (postfix == NULL)
395  			return -ENOMEM;
396  
397  		if (i == IIO_EV_INFO_ENABLE) {
398  			show = iio_ev_state_show;
399  			store = iio_ev_state_store;
400  		} else {
401  			show = iio_ev_value_show;
402  			store = iio_ev_value_store;
403  		}
404  
405  		ret = __iio_add_chan_devattr(postfix, chan, show, store,
406  			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
407  			 NULL,
408  			&iio_dev_opaque->event_interface->dev_attr_list);
409  		kfree(postfix);
410  
411  		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
412  			continue;
413  
414  		if (ret)
415  			return ret;
416  
417  		attrcount++;
418  	}
419  
420  	return attrcount;
421  }
422  
iio_device_add_event_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)423  static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
424  	struct iio_chan_spec const *chan)
425  {
426  	int ret = 0, i, attrcount = 0;
427  	enum iio_event_direction dir;
428  	enum iio_event_type type;
429  
430  	for (i = 0; i < chan->num_event_specs; i++) {
431  		type = chan->event_spec[i].type;
432  		dir = chan->event_spec[i].dir;
433  
434  		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
435  			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
436  		if (ret < 0)
437  			return ret;
438  		attrcount += ret;
439  
440  		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
441  			IIO_SHARED_BY_TYPE,
442  			&chan->event_spec[i].mask_shared_by_type);
443  		if (ret < 0)
444  			return ret;
445  		attrcount += ret;
446  
447  		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
448  			IIO_SHARED_BY_DIR,
449  			&chan->event_spec[i].mask_shared_by_dir);
450  		if (ret < 0)
451  			return ret;
452  		attrcount += ret;
453  
454  		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
455  			IIO_SHARED_BY_ALL,
456  			&chan->event_spec[i].mask_shared_by_all);
457  		if (ret < 0)
458  			return ret;
459  		attrcount += ret;
460  	}
461  	ret = attrcount;
462  	return ret;
463  }
464  
__iio_add_event_config_attrs(struct iio_dev * indio_dev)465  static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
466  {
467  	int j, ret, attrcount = 0;
468  
469  	/* Dynamically created from the channels array */
470  	for (j = 0; j < indio_dev->num_channels; j++) {
471  		ret = iio_device_add_event_sysfs(indio_dev,
472  						 &indio_dev->channels[j]);
473  		if (ret < 0)
474  			return ret;
475  		attrcount += ret;
476  	}
477  	return attrcount;
478  }
479  
iio_check_for_dynamic_events(struct iio_dev * indio_dev)480  static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
481  {
482  	int j;
483  
484  	for (j = 0; j < indio_dev->num_channels; j++) {
485  		if (indio_dev->channels[j].num_event_specs != 0)
486  			return true;
487  	}
488  	return false;
489  }
490  
iio_setup_ev_int(struct iio_event_interface * ev_int)491  static void iio_setup_ev_int(struct iio_event_interface *ev_int)
492  {
493  	INIT_KFIFO(ev_int->det_events);
494  	init_waitqueue_head(&ev_int->wait);
495  	mutex_init(&ev_int->read_lock);
496  }
497  
iio_event_ioctl(struct iio_dev * indio_dev,struct file * filp,unsigned int cmd,unsigned long arg)498  static long iio_event_ioctl(struct iio_dev *indio_dev, struct file *filp,
499  			    unsigned int cmd, unsigned long arg)
500  {
501  	int __user *ip = (int __user *)arg;
502  	int fd;
503  
504  	if (cmd == IIO_GET_EVENT_FD_IOCTL) {
505  		fd = iio_event_getfd(indio_dev);
506  		if (fd < 0)
507  			return fd;
508  		if (copy_to_user(ip, &fd, sizeof(fd)))
509  			return -EFAULT;
510  		return 0;
511  	}
512  
513  	return IIO_IOCTL_UNHANDLED;
514  }
515  
516  static const char *iio_event_group_name = "events";
iio_device_register_eventset(struct iio_dev * indio_dev)517  int iio_device_register_eventset(struct iio_dev *indio_dev)
518  {
519  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
520  	struct iio_event_interface *ev_int;
521  	struct iio_dev_attr *p;
522  	int ret = 0, attrcount_orig = 0, attrcount, attrn;
523  	struct attribute **attr;
524  
525  	if (!(indio_dev->info->event_attrs ||
526  	      iio_check_for_dynamic_events(indio_dev)))
527  		return 0;
528  
529  	ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
530  	if (ev_int == NULL)
531  		return -ENOMEM;
532  
533  	iio_dev_opaque->event_interface = ev_int;
534  
535  	INIT_LIST_HEAD(&ev_int->dev_attr_list);
536  
537  	iio_setup_ev_int(ev_int);
538  	if (indio_dev->info->event_attrs != NULL) {
539  		attr = indio_dev->info->event_attrs->attrs;
540  		while (*attr++ != NULL)
541  			attrcount_orig++;
542  	}
543  	attrcount = attrcount_orig;
544  	if (indio_dev->channels) {
545  		ret = __iio_add_event_config_attrs(indio_dev);
546  		if (ret < 0)
547  			goto error_free_setup_event_lines;
548  		attrcount += ret;
549  	}
550  
551  	ev_int->group.name = iio_event_group_name;
552  	ev_int->group.attrs = kcalloc(attrcount + 1,
553  				      sizeof(ev_int->group.attrs[0]),
554  				      GFP_KERNEL);
555  	if (ev_int->group.attrs == NULL) {
556  		ret = -ENOMEM;
557  		goto error_free_setup_event_lines;
558  	}
559  	if (indio_dev->info->event_attrs)
560  		memcpy(ev_int->group.attrs,
561  		       indio_dev->info->event_attrs->attrs,
562  		       sizeof(ev_int->group.attrs[0]) * attrcount_orig);
563  	attrn = attrcount_orig;
564  	/* Add all elements from the list. */
565  	list_for_each_entry(p, &ev_int->dev_attr_list, l)
566  		ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
567  
568  	ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group);
569  	if (ret)
570  		goto error_free_group_attrs;
571  
572  	ev_int->ioctl_handler.ioctl = iio_event_ioctl;
573  	iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev,
574  					  &ev_int->ioctl_handler);
575  
576  	return 0;
577  
578  error_free_group_attrs:
579  	kfree(ev_int->group.attrs);
580  error_free_setup_event_lines:
581  	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
582  	kfree(ev_int);
583  	iio_dev_opaque->event_interface = NULL;
584  	return ret;
585  }
586  
587  /**
588   * iio_device_wakeup_eventset - Wakes up the event waitqueue
589   * @indio_dev: The IIO device
590   *
591   * Wakes up the event waitqueue used for poll() and blocking read().
592   * Should usually be called when the device is unregistered.
593   */
iio_device_wakeup_eventset(struct iio_dev * indio_dev)594  void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
595  {
596  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
597  
598  	if (iio_dev_opaque->event_interface == NULL)
599  		return;
600  	wake_up(&iio_dev_opaque->event_interface->wait);
601  }
602  
iio_device_unregister_eventset(struct iio_dev * indio_dev)603  void iio_device_unregister_eventset(struct iio_dev *indio_dev)
604  {
605  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
606  	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
607  
608  	if (ev_int == NULL)
609  		return;
610  
611  	iio_device_ioctl_handler_unregister(&ev_int->ioctl_handler);
612  	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
613  	kfree(ev_int->group.attrs);
614  	kfree(ev_int);
615  	iio_dev_opaque->event_interface = NULL;
616  }
617