1 /* Industrial I/O event handling 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Based on elements of hwmon and input subsystems. 10 */ 11 12 #include <linux/anon_inodes.h> 13 #include <linux/device.h> 14 #include <linux/fs.h> 15 #include <linux/kernel.h> 16 #include <linux/kfifo.h> 17 #include <linux/module.h> 18 #include <linux/poll.h> 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/uaccess.h> 22 #include <linux/wait.h> 23 #include <linux/iio/iio.h> 24 #include "iio_core.h" 25 #include <linux/iio/sysfs.h> 26 #include <linux/iio/events.h> 27 28 /** 29 * struct iio_event_interface - chrdev interface for an event line 30 * @wait: wait queue to allow blocking reads of events 31 * @det_events: list of detected events 32 * @dev_attr_list: list of event interface sysfs attribute 33 * @flags: file operations related flags including busy flag. 34 * @group: event interface sysfs attribute group 35 */ 36 struct iio_event_interface { 37 wait_queue_head_t wait; 38 DECLARE_KFIFO(det_events, struct iio_event_data, 16); 39 40 struct list_head dev_attr_list; 41 unsigned long flags; 42 struct attribute_group group; 43 }; 44 45 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) 46 { 47 struct iio_event_interface *ev_int = indio_dev->event_interface; 48 struct iio_event_data ev; 49 unsigned long flags; 50 int copied; 51 52 /* Does anyone care? */ 53 spin_lock_irqsave(&ev_int->wait.lock, flags); 54 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { 55 56 ev.id = ev_code; 57 ev.timestamp = timestamp; 58 59 copied = kfifo_put(&ev_int->det_events, ev); 60 if (copied != 0) 61 wake_up_locked_poll(&ev_int->wait, POLLIN); 62 } 63 spin_unlock_irqrestore(&ev_int->wait.lock, flags); 64 65 return 0; 66 } 67 EXPORT_SYMBOL(iio_push_event); 68 69 /** 70 * iio_event_poll() - poll the event queue to find out if it has data 71 */ 72 static unsigned int iio_event_poll(struct file *filep, 73 struct poll_table_struct *wait) 74 { 75 struct iio_dev *indio_dev = filep->private_data; 76 struct iio_event_interface *ev_int = indio_dev->event_interface; 77 unsigned int events = 0; 78 79 if (!indio_dev->info) 80 return -ENODEV; 81 82 poll_wait(filep, &ev_int->wait, wait); 83 84 spin_lock_irq(&ev_int->wait.lock); 85 if (!kfifo_is_empty(&ev_int->det_events)) 86 events = POLLIN | POLLRDNORM; 87 spin_unlock_irq(&ev_int->wait.lock); 88 89 return events; 90 } 91 92 static ssize_t iio_event_chrdev_read(struct file *filep, 93 char __user *buf, 94 size_t count, 95 loff_t *f_ps) 96 { 97 struct iio_dev *indio_dev = filep->private_data; 98 struct iio_event_interface *ev_int = indio_dev->event_interface; 99 unsigned int copied; 100 int ret; 101 102 if (!indio_dev->info) 103 return -ENODEV; 104 105 if (count < sizeof(struct iio_event_data)) 106 return -EINVAL; 107 108 spin_lock_irq(&ev_int->wait.lock); 109 if (kfifo_is_empty(&ev_int->det_events)) { 110 if (filep->f_flags & O_NONBLOCK) { 111 ret = -EAGAIN; 112 goto error_unlock; 113 } 114 /* Blocking on device; waiting for something to be there */ 115 ret = wait_event_interruptible_locked_irq(ev_int->wait, 116 !kfifo_is_empty(&ev_int->det_events) || 117 indio_dev->info == NULL); 118 if (ret) 119 goto error_unlock; 120 if (indio_dev->info == NULL) { 121 ret = -ENODEV; 122 goto error_unlock; 123 } 124 /* Single access device so no one else can get the data */ 125 } 126 127 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); 128 129 error_unlock: 130 spin_unlock_irq(&ev_int->wait.lock); 131 132 return ret ? ret : copied; 133 } 134 135 static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 136 { 137 struct iio_dev *indio_dev = filep->private_data; 138 struct iio_event_interface *ev_int = indio_dev->event_interface; 139 140 spin_lock_irq(&ev_int->wait.lock); 141 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 142 /* 143 * In order to maintain a clean state for reopening, 144 * clear out any awaiting events. The mask will prevent 145 * any new __iio_push_event calls running. 146 */ 147 kfifo_reset_out(&ev_int->det_events); 148 spin_unlock_irq(&ev_int->wait.lock); 149 150 iio_device_put(indio_dev); 151 152 return 0; 153 } 154 155 static const struct file_operations iio_event_chrdev_fileops = { 156 .read = iio_event_chrdev_read, 157 .poll = iio_event_poll, 158 .release = iio_event_chrdev_release, 159 .owner = THIS_MODULE, 160 .llseek = noop_llseek, 161 }; 162 163 int iio_event_getfd(struct iio_dev *indio_dev) 164 { 165 struct iio_event_interface *ev_int = indio_dev->event_interface; 166 int fd; 167 168 if (ev_int == NULL) 169 return -ENODEV; 170 171 spin_lock_irq(&ev_int->wait.lock); 172 if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { 173 spin_unlock_irq(&ev_int->wait.lock); 174 return -EBUSY; 175 } 176 spin_unlock_irq(&ev_int->wait.lock); 177 iio_device_get(indio_dev); 178 179 fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, 180 indio_dev, O_RDONLY | O_CLOEXEC); 181 if (fd < 0) { 182 spin_lock_irq(&ev_int->wait.lock); 183 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 184 spin_unlock_irq(&ev_int->wait.lock); 185 iio_device_put(indio_dev); 186 } 187 return fd; 188 } 189 190 static const char * const iio_ev_type_text[] = { 191 [IIO_EV_TYPE_THRESH] = "thresh", 192 [IIO_EV_TYPE_MAG] = "mag", 193 [IIO_EV_TYPE_ROC] = "roc", 194 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", 195 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", 196 }; 197 198 static const char * const iio_ev_dir_text[] = { 199 [IIO_EV_DIR_EITHER] = "either", 200 [IIO_EV_DIR_RISING] = "rising", 201 [IIO_EV_DIR_FALLING] = "falling" 202 }; 203 204 static const char * const iio_ev_info_text[] = { 205 [IIO_EV_INFO_ENABLE] = "en", 206 [IIO_EV_INFO_VALUE] = "value", 207 [IIO_EV_INFO_HYSTERESIS] = "hysteresis", 208 }; 209 210 static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr) 211 { 212 return attr->c->event_spec[attr->address & 0xffff].dir; 213 } 214 215 static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr) 216 { 217 return attr->c->event_spec[attr->address & 0xffff].type; 218 } 219 220 static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr) 221 { 222 return (attr->address >> 16) & 0xffff; 223 } 224 225 static ssize_t iio_ev_state_store(struct device *dev, 226 struct device_attribute *attr, 227 const char *buf, 228 size_t len) 229 { 230 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 231 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 232 int ret; 233 bool val; 234 235 ret = strtobool(buf, &val); 236 if (ret < 0) 237 return ret; 238 239 if (indio_dev->info->write_event_config) 240 ret = indio_dev->info->write_event_config(indio_dev, 241 this_attr->address, val); 242 else 243 ret = indio_dev->info->write_event_config_new(indio_dev, 244 this_attr->c, iio_ev_attr_type(this_attr), 245 iio_ev_attr_dir(this_attr), val); 246 247 return (ret < 0) ? ret : len; 248 } 249 250 static ssize_t iio_ev_state_show(struct device *dev, 251 struct device_attribute *attr, 252 char *buf) 253 { 254 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 255 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 256 int val; 257 258 if (indio_dev->info->read_event_config) 259 val = indio_dev->info->read_event_config(indio_dev, 260 this_attr->address); 261 else 262 val = indio_dev->info->read_event_config_new(indio_dev, 263 this_attr->c, iio_ev_attr_type(this_attr), 264 iio_ev_attr_dir(this_attr)); 265 if (val < 0) 266 return val; 267 else 268 return sprintf(buf, "%d\n", val); 269 } 270 271 static ssize_t iio_ev_value_show(struct device *dev, 272 struct device_attribute *attr, 273 char *buf) 274 { 275 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 276 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 277 int val, val2; 278 int ret; 279 280 if (indio_dev->info->read_event_value) { 281 ret = indio_dev->info->read_event_value(indio_dev, 282 this_attr->address, &val); 283 if (ret < 0) 284 return ret; 285 return sprintf(buf, "%d\n", val); 286 } else { 287 ret = indio_dev->info->read_event_value_new(indio_dev, 288 this_attr->c, iio_ev_attr_type(this_attr), 289 iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), 290 &val, &val2); 291 if (ret < 0) 292 return ret; 293 return iio_format_value(buf, ret, val, val2); 294 } 295 } 296 297 static ssize_t iio_ev_value_store(struct device *dev, 298 struct device_attribute *attr, 299 const char *buf, 300 size_t len) 301 { 302 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 303 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 304 int val, val2; 305 int ret; 306 307 if (!indio_dev->info->write_event_value && 308 !indio_dev->info->write_event_value_new) 309 return -EINVAL; 310 311 if (indio_dev->info->write_event_value) { 312 ret = kstrtoint(buf, 10, &val); 313 if (ret) 314 return ret; 315 ret = indio_dev->info->write_event_value(indio_dev, 316 this_attr->address, val); 317 } else { 318 ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); 319 if (ret) 320 return ret; 321 ret = indio_dev->info->write_event_value_new(indio_dev, 322 this_attr->c, iio_ev_attr_type(this_attr), 323 iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), 324 val, val2); 325 } 326 if (ret < 0) 327 return ret; 328 329 return len; 330 } 331 332 static int iio_device_add_event(struct iio_dev *indio_dev, 333 const struct iio_chan_spec *chan, unsigned int spec_index, 334 enum iio_event_type type, enum iio_event_direction dir, 335 enum iio_shared_by shared_by, const unsigned long *mask) 336 { 337 ssize_t (*show)(struct device *, struct device_attribute *, char *); 338 ssize_t (*store)(struct device *, struct device_attribute *, 339 const char *, size_t); 340 unsigned int attrcount = 0; 341 unsigned int i; 342 char *postfix; 343 int ret; 344 345 for_each_set_bit(i, mask, sizeof(*mask)) { 346 postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 347 iio_ev_type_text[type], iio_ev_dir_text[dir], 348 iio_ev_info_text[i]); 349 if (postfix == NULL) 350 return -ENOMEM; 351 352 if (i == IIO_EV_INFO_ENABLE) { 353 show = iio_ev_state_show; 354 store = iio_ev_state_store; 355 } else { 356 show = iio_ev_value_show; 357 store = iio_ev_value_store; 358 } 359 360 ret = __iio_add_chan_devattr(postfix, chan, show, store, 361 (i << 16) | spec_index, shared_by, &indio_dev->dev, 362 &indio_dev->event_interface->dev_attr_list); 363 kfree(postfix); 364 365 if (ret) 366 return ret; 367 368 attrcount++; 369 } 370 371 return attrcount; 372 } 373 374 static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev, 375 struct iio_chan_spec const *chan) 376 { 377 int ret = 0, i, attrcount = 0; 378 enum iio_event_direction dir; 379 enum iio_event_type type; 380 381 for (i = 0; i < chan->num_event_specs; i++) { 382 type = chan->event_spec[i].type; 383 dir = chan->event_spec[i].dir; 384 385 ret = iio_device_add_event(indio_dev, chan, i, type, dir, 386 IIO_SEPARATE, &chan->event_spec[i].mask_separate); 387 if (ret < 0) 388 goto error_ret; 389 attrcount += ret; 390 391 ret = iio_device_add_event(indio_dev, chan, i, type, dir, 392 IIO_SHARED_BY_TYPE, 393 &chan->event_spec[i].mask_shared_by_type); 394 if (ret < 0) 395 goto error_ret; 396 attrcount += ret; 397 398 ret = iio_device_add_event(indio_dev, chan, i, type, dir, 399 IIO_SHARED_BY_DIR, 400 &chan->event_spec[i].mask_shared_by_dir); 401 if (ret < 0) 402 goto error_ret; 403 attrcount += ret; 404 405 ret = iio_device_add_event(indio_dev, chan, i, type, dir, 406 IIO_SHARED_BY_ALL, 407 &chan->event_spec[i].mask_shared_by_all); 408 if (ret < 0) 409 goto error_ret; 410 attrcount += ret; 411 } 412 ret = attrcount; 413 error_ret: 414 return ret; 415 } 416 417 static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev, 418 struct iio_chan_spec const *chan) 419 { 420 int ret = 0, i, attrcount = 0; 421 u64 mask = 0; 422 char *postfix; 423 if (!chan->event_mask) 424 return 0; 425 426 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { 427 postfix = kasprintf(GFP_KERNEL, "%s_%s_en", 428 iio_ev_type_text[i/IIO_EV_DIR_MAX], 429 iio_ev_dir_text[i%IIO_EV_DIR_MAX]); 430 if (postfix == NULL) { 431 ret = -ENOMEM; 432 goto error_ret; 433 } 434 if (chan->modified) 435 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2, 436 i/IIO_EV_DIR_MAX, 437 i%IIO_EV_DIR_MAX); 438 else if (chan->differential) 439 mask = IIO_EVENT_CODE(chan->type, 440 0, 0, 441 i%IIO_EV_DIR_MAX, 442 i/IIO_EV_DIR_MAX, 443 0, 444 chan->channel, 445 chan->channel2); 446 else 447 mask = IIO_UNMOD_EVENT_CODE(chan->type, 448 chan->channel, 449 i/IIO_EV_DIR_MAX, 450 i%IIO_EV_DIR_MAX); 451 452 ret = __iio_add_chan_devattr(postfix, 453 chan, 454 &iio_ev_state_show, 455 iio_ev_state_store, 456 mask, 457 0, 458 &indio_dev->dev, 459 &indio_dev->event_interface-> 460 dev_attr_list); 461 kfree(postfix); 462 if (ret) 463 goto error_ret; 464 attrcount++; 465 postfix = kasprintf(GFP_KERNEL, "%s_%s_value", 466 iio_ev_type_text[i/IIO_EV_DIR_MAX], 467 iio_ev_dir_text[i%IIO_EV_DIR_MAX]); 468 if (postfix == NULL) { 469 ret = -ENOMEM; 470 goto error_ret; 471 } 472 ret = __iio_add_chan_devattr(postfix, chan, 473 iio_ev_value_show, 474 iio_ev_value_store, 475 mask, 476 0, 477 &indio_dev->dev, 478 &indio_dev->event_interface-> 479 dev_attr_list); 480 kfree(postfix); 481 if (ret) 482 goto error_ret; 483 attrcount++; 484 } 485 ret = attrcount; 486 error_ret: 487 return ret; 488 } 489 490 491 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, 492 struct iio_chan_spec const *chan) 493 { 494 if (chan->event_mask) 495 return iio_device_add_event_sysfs_old(indio_dev, chan); 496 else 497 return iio_device_add_event_sysfs_new(indio_dev, chan); 498 } 499 500 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) 501 { 502 int j, ret, attrcount = 0; 503 504 /* Dynically created from the channels array */ 505 for (j = 0; j < indio_dev->num_channels; j++) { 506 ret = iio_device_add_event_sysfs(indio_dev, 507 &indio_dev->channels[j]); 508 if (ret < 0) 509 return ret; 510 attrcount += ret; 511 } 512 return attrcount; 513 } 514 515 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) 516 { 517 int j; 518 519 for (j = 0; j < indio_dev->num_channels; j++) { 520 if (indio_dev->channels[j].event_mask != 0) 521 return true; 522 if (indio_dev->channels[j].num_event_specs != 0) 523 return true; 524 } 525 return false; 526 } 527 528 static void iio_setup_ev_int(struct iio_event_interface *ev_int) 529 { 530 INIT_KFIFO(ev_int->det_events); 531 init_waitqueue_head(&ev_int->wait); 532 } 533 534 static const char *iio_event_group_name = "events"; 535 int iio_device_register_eventset(struct iio_dev *indio_dev) 536 { 537 struct iio_dev_attr *p; 538 int ret = 0, attrcount_orig = 0, attrcount, attrn; 539 struct attribute **attr; 540 541 if (!(indio_dev->info->event_attrs || 542 iio_check_for_dynamic_events(indio_dev))) 543 return 0; 544 545 indio_dev->event_interface = 546 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); 547 if (indio_dev->event_interface == NULL) { 548 ret = -ENOMEM; 549 goto error_ret; 550 } 551 552 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); 553 554 iio_setup_ev_int(indio_dev->event_interface); 555 if (indio_dev->info->event_attrs != NULL) { 556 attr = indio_dev->info->event_attrs->attrs; 557 while (*attr++ != NULL) 558 attrcount_orig++; 559 } 560 attrcount = attrcount_orig; 561 if (indio_dev->channels) { 562 ret = __iio_add_event_config_attrs(indio_dev); 563 if (ret < 0) 564 goto error_free_setup_event_lines; 565 attrcount += ret; 566 } 567 568 indio_dev->event_interface->group.name = iio_event_group_name; 569 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, 570 sizeof(indio_dev->event_interface->group.attrs[0]), 571 GFP_KERNEL); 572 if (indio_dev->event_interface->group.attrs == NULL) { 573 ret = -ENOMEM; 574 goto error_free_setup_event_lines; 575 } 576 if (indio_dev->info->event_attrs) 577 memcpy(indio_dev->event_interface->group.attrs, 578 indio_dev->info->event_attrs->attrs, 579 sizeof(indio_dev->event_interface->group.attrs[0]) 580 *attrcount_orig); 581 attrn = attrcount_orig; 582 /* Add all elements from the list. */ 583 list_for_each_entry(p, 584 &indio_dev->event_interface->dev_attr_list, 585 l) 586 indio_dev->event_interface->group.attrs[attrn++] = 587 &p->dev_attr.attr; 588 indio_dev->groups[indio_dev->groupcounter++] = 589 &indio_dev->event_interface->group; 590 591 return 0; 592 593 error_free_setup_event_lines: 594 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); 595 kfree(indio_dev->event_interface); 596 error_ret: 597 598 return ret; 599 } 600 601 /** 602 * iio_device_wakeup_eventset - Wakes up the event waitqueue 603 * @indio_dev: The IIO device 604 * 605 * Wakes up the event waitqueue used for poll() and blocking read(). 606 * Should usually be called when the device is unregistered. 607 */ 608 void iio_device_wakeup_eventset(struct iio_dev *indio_dev) 609 { 610 if (indio_dev->event_interface == NULL) 611 return; 612 wake_up(&indio_dev->event_interface->wait); 613 } 614 615 void iio_device_unregister_eventset(struct iio_dev *indio_dev) 616 { 617 if (indio_dev->event_interface == NULL) 618 return; 619 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); 620 kfree(indio_dev->event_interface->group.attrs); 621 kfree(indio_dev->event_interface); 622 } 623