1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/compat.h> 9 #include <linux/compiler.h> 10 #include <linux/device.h> 11 #include <linux/err.h> 12 #include <linux/file.h> 13 #include <linux/gpio.h> 14 #include <linux/gpio/driver.h> 15 #include <linux/hte.h> 16 #include <linux/interrupt.h> 17 #include <linux/irqreturn.h> 18 #include <linux/kernel.h> 19 #include <linux/kfifo.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/poll.h> 24 #include <linux/seq_file.h> 25 #include <linux/spinlock.h> 26 #include <linux/timekeeping.h> 27 #include <linux/uaccess.h> 28 #include <linux/workqueue.h> 29 30 #include <uapi/linux/gpio.h> 31 32 #include "gpiolib.h" 33 #include "gpiolib-cdev.h" 34 35 /* 36 * Array sizes must ensure 64-bit alignment and not create holes in the 37 * struct packing. 38 */ 39 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 40 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 41 42 /* 43 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 44 */ 45 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 46 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 53 54 /* Character device interface to GPIO. 55 * 56 * The GPIO character device, /dev/gpiochipN, provides userspace an 57 * interface to gpiolib GPIOs via ioctl()s. 58 */ 59 60 typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); 61 typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); 62 typedef ssize_t (*read_fn)(struct file *, char __user *, 63 size_t count, loff_t *); 64 65 static __poll_t call_poll_locked(struct file *file, 66 struct poll_table_struct *wait, 67 struct gpio_device *gdev, poll_fn func) 68 { 69 __poll_t ret; 70 71 down_read(&gdev->sem); 72 ret = func(file, wait); 73 up_read(&gdev->sem); 74 75 return ret; 76 } 77 78 static long call_ioctl_locked(struct file *file, unsigned int cmd, 79 unsigned long arg, struct gpio_device *gdev, 80 ioctl_fn func) 81 { 82 long ret; 83 84 down_read(&gdev->sem); 85 ret = func(file, cmd, arg); 86 up_read(&gdev->sem); 87 88 return ret; 89 } 90 91 static ssize_t call_read_locked(struct file *file, char __user *buf, 92 size_t count, loff_t *f_ps, 93 struct gpio_device *gdev, read_fn func) 94 { 95 ssize_t ret; 96 97 down_read(&gdev->sem); 98 ret = func(file, buf, count, f_ps); 99 up_read(&gdev->sem); 100 101 return ret; 102 } 103 104 /* 105 * GPIO line handle management 106 */ 107 108 #ifdef CONFIG_GPIO_CDEV_V1 109 /** 110 * struct linehandle_state - contains the state of a userspace handle 111 * @gdev: the GPIO device the handle pertains to 112 * @label: consumer label used to tag descriptors 113 * @descs: the GPIO descriptors held by this handle 114 * @num_descs: the number of descriptors held in the descs array 115 */ 116 struct linehandle_state { 117 struct gpio_device *gdev; 118 const char *label; 119 struct gpio_desc *descs[GPIOHANDLES_MAX]; 120 u32 num_descs; 121 }; 122 123 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 124 (GPIOHANDLE_REQUEST_INPUT | \ 125 GPIOHANDLE_REQUEST_OUTPUT | \ 126 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 127 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 128 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 129 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 130 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 131 GPIOHANDLE_REQUEST_OPEN_SOURCE) 132 133 static int linehandle_validate_flags(u32 flags) 134 { 135 /* Return an error if an unknown flag is set */ 136 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 137 return -EINVAL; 138 139 /* 140 * Do not allow both INPUT & OUTPUT flags to be set as they are 141 * contradictory. 142 */ 143 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 144 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 145 return -EINVAL; 146 147 /* 148 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 149 * the hardware actually supports enabling both at the same time the 150 * electrical result would be disastrous. 151 */ 152 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 153 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 154 return -EINVAL; 155 156 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 157 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 158 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 159 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 160 return -EINVAL; 161 162 /* Bias flags only allowed for input or output mode. */ 163 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 164 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 165 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 166 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 167 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 168 return -EINVAL; 169 170 /* Only one bias flag can be set. */ 171 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 172 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 173 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 174 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 175 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 176 return -EINVAL; 177 178 return 0; 179 } 180 181 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 182 { 183 assign_bit(FLAG_ACTIVE_LOW, flagsp, 184 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 185 assign_bit(FLAG_OPEN_DRAIN, flagsp, 186 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 187 assign_bit(FLAG_OPEN_SOURCE, flagsp, 188 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 189 assign_bit(FLAG_PULL_UP, flagsp, 190 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 191 assign_bit(FLAG_PULL_DOWN, flagsp, 192 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 193 assign_bit(FLAG_BIAS_DISABLE, flagsp, 194 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 195 } 196 197 static long linehandle_set_config(struct linehandle_state *lh, 198 void __user *ip) 199 { 200 struct gpiohandle_config gcnf; 201 struct gpio_desc *desc; 202 int i, ret; 203 u32 lflags; 204 205 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 206 return -EFAULT; 207 208 lflags = gcnf.flags; 209 ret = linehandle_validate_flags(lflags); 210 if (ret) 211 return ret; 212 213 for (i = 0; i < lh->num_descs; i++) { 214 desc = lh->descs[i]; 215 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 216 217 /* 218 * Lines have to be requested explicitly for input 219 * or output, else the line will be treated "as is". 220 */ 221 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 222 int val = !!gcnf.default_values[i]; 223 224 ret = gpiod_direction_output(desc, val); 225 if (ret) 226 return ret; 227 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 228 ret = gpiod_direction_input(desc); 229 if (ret) 230 return ret; 231 } 232 233 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 234 } 235 return 0; 236 } 237 238 static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, 239 unsigned long arg) 240 { 241 struct linehandle_state *lh = file->private_data; 242 void __user *ip = (void __user *)arg; 243 struct gpiohandle_data ghd; 244 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 245 unsigned int i; 246 int ret; 247 248 if (!lh->gdev->chip) 249 return -ENODEV; 250 251 switch (cmd) { 252 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 253 /* NOTE: It's okay to read values of output lines */ 254 ret = gpiod_get_array_value_complex(false, true, 255 lh->num_descs, lh->descs, 256 NULL, vals); 257 if (ret) 258 return ret; 259 260 memset(&ghd, 0, sizeof(ghd)); 261 for (i = 0; i < lh->num_descs; i++) 262 ghd.values[i] = test_bit(i, vals); 263 264 if (copy_to_user(ip, &ghd, sizeof(ghd))) 265 return -EFAULT; 266 267 return 0; 268 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 269 /* 270 * All line descriptors were created at once with the same 271 * flags so just check if the first one is really output. 272 */ 273 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 274 return -EPERM; 275 276 if (copy_from_user(&ghd, ip, sizeof(ghd))) 277 return -EFAULT; 278 279 /* Clamp all values to [0,1] */ 280 for (i = 0; i < lh->num_descs; i++) 281 __assign_bit(i, vals, ghd.values[i]); 282 283 /* Reuse the array setting function */ 284 return gpiod_set_array_value_complex(false, 285 true, 286 lh->num_descs, 287 lh->descs, 288 NULL, 289 vals); 290 case GPIOHANDLE_SET_CONFIG_IOCTL: 291 return linehandle_set_config(lh, ip); 292 default: 293 return -EINVAL; 294 } 295 } 296 297 static long linehandle_ioctl(struct file *file, unsigned int cmd, 298 unsigned long arg) 299 { 300 struct linehandle_state *lh = file->private_data; 301 302 return call_ioctl_locked(file, cmd, arg, lh->gdev, 303 linehandle_ioctl_unlocked); 304 } 305 306 #ifdef CONFIG_COMPAT 307 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 308 unsigned long arg) 309 { 310 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 311 } 312 #endif 313 314 static void linehandle_free(struct linehandle_state *lh) 315 { 316 int i; 317 318 for (i = 0; i < lh->num_descs; i++) 319 if (lh->descs[i]) 320 gpiod_free(lh->descs[i]); 321 kfree(lh->label); 322 gpio_device_put(lh->gdev); 323 kfree(lh); 324 } 325 326 static int linehandle_release(struct inode *inode, struct file *file) 327 { 328 linehandle_free(file->private_data); 329 return 0; 330 } 331 332 static const struct file_operations linehandle_fileops = { 333 .release = linehandle_release, 334 .owner = THIS_MODULE, 335 .llseek = noop_llseek, 336 .unlocked_ioctl = linehandle_ioctl, 337 #ifdef CONFIG_COMPAT 338 .compat_ioctl = linehandle_ioctl_compat, 339 #endif 340 }; 341 342 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 343 { 344 struct gpiohandle_request handlereq; 345 struct linehandle_state *lh; 346 struct file *file; 347 int fd, i, ret; 348 u32 lflags; 349 350 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 351 return -EFAULT; 352 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 353 return -EINVAL; 354 355 lflags = handlereq.flags; 356 357 ret = linehandle_validate_flags(lflags); 358 if (ret) 359 return ret; 360 361 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 362 if (!lh) 363 return -ENOMEM; 364 lh->gdev = gpio_device_get(gdev); 365 366 if (handlereq.consumer_label[0] != '\0') { 367 /* label is only initialized if consumer_label is set */ 368 lh->label = kstrndup(handlereq.consumer_label, 369 sizeof(handlereq.consumer_label) - 1, 370 GFP_KERNEL); 371 if (!lh->label) { 372 ret = -ENOMEM; 373 goto out_free_lh; 374 } 375 } 376 377 lh->num_descs = handlereq.lines; 378 379 /* Request each GPIO */ 380 for (i = 0; i < handlereq.lines; i++) { 381 u32 offset = handlereq.lineoffsets[i]; 382 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 383 384 if (IS_ERR(desc)) { 385 ret = PTR_ERR(desc); 386 goto out_free_lh; 387 } 388 389 ret = gpiod_request_user(desc, lh->label); 390 if (ret) 391 goto out_free_lh; 392 lh->descs[i] = desc; 393 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 394 395 ret = gpiod_set_transitory(desc, false); 396 if (ret < 0) 397 goto out_free_lh; 398 399 /* 400 * Lines have to be requested explicitly for input 401 * or output, else the line will be treated "as is". 402 */ 403 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 404 int val = !!handlereq.default_values[i]; 405 406 ret = gpiod_direction_output(desc, val); 407 if (ret) 408 goto out_free_lh; 409 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 410 ret = gpiod_direction_input(desc); 411 if (ret) 412 goto out_free_lh; 413 } 414 415 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 416 417 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 418 offset); 419 } 420 421 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 422 if (fd < 0) { 423 ret = fd; 424 goto out_free_lh; 425 } 426 427 file = anon_inode_getfile("gpio-linehandle", 428 &linehandle_fileops, 429 lh, 430 O_RDONLY | O_CLOEXEC); 431 if (IS_ERR(file)) { 432 ret = PTR_ERR(file); 433 goto out_put_unused_fd; 434 } 435 436 handlereq.fd = fd; 437 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 438 /* 439 * fput() will trigger the release() callback, so do not go onto 440 * the regular error cleanup path here. 441 */ 442 fput(file); 443 put_unused_fd(fd); 444 return -EFAULT; 445 } 446 447 fd_install(fd, file); 448 449 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 450 lh->num_descs); 451 452 return 0; 453 454 out_put_unused_fd: 455 put_unused_fd(fd); 456 out_free_lh: 457 linehandle_free(lh); 458 return ret; 459 } 460 #endif /* CONFIG_GPIO_CDEV_V1 */ 461 462 /** 463 * struct line - contains the state of a requested line 464 * @desc: the GPIO descriptor for this line. 465 * @req: the corresponding line request 466 * @irq: the interrupt triggered in response to events on this GPIO 467 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 468 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 469 * @timestamp_ns: cache for the timestamp storing it between hardirq and 470 * IRQ thread, used to bring the timestamp close to the actual event 471 * @req_seqno: the seqno for the current edge event in the sequence of 472 * events for the corresponding line request. This is drawn from the @req. 473 * @line_seqno: the seqno for the current edge event in the sequence of 474 * events for this line. 475 * @work: the worker that implements software debouncing 476 * @sw_debounced: flag indicating if the software debouncer is active 477 * @level: the current debounced physical level of the line 478 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 479 * @raw_level: the line level at the time of event 480 * @total_discard_seq: the running counter of the discarded events 481 * @last_seqno: the last sequence number before debounce period expires 482 */ 483 struct line { 484 struct gpio_desc *desc; 485 /* 486 * -- edge detector specific fields -- 487 */ 488 struct linereq *req; 489 unsigned int irq; 490 /* 491 * The flags for the active edge detector configuration. 492 * 493 * edflags is set by linereq_create(), linereq_free(), and 494 * linereq_set_config_unlocked(), which are themselves mutually 495 * exclusive, and is accessed by edge_irq_thread(), 496 * process_hw_ts_thread() and debounce_work_func(), 497 * which can all live with a slightly stale value. 498 */ 499 u64 edflags; 500 /* 501 * timestamp_ns and req_seqno are accessed only by 502 * edge_irq_handler() and edge_irq_thread(), which are themselves 503 * mutually exclusive, so no additional protection is necessary. 504 */ 505 u64 timestamp_ns; 506 u32 req_seqno; 507 /* 508 * line_seqno is accessed by either edge_irq_thread() or 509 * debounce_work_func(), which are themselves mutually exclusive, 510 * so no additional protection is necessary. 511 */ 512 u32 line_seqno; 513 /* 514 * -- debouncer specific fields -- 515 */ 516 struct delayed_work work; 517 /* 518 * sw_debounce is accessed by linereq_set_config(), which is the 519 * only setter, and linereq_get_values(), which can live with a 520 * slightly stale value. 521 */ 522 unsigned int sw_debounced; 523 /* 524 * level is accessed by debounce_work_func(), which is the only 525 * setter, and linereq_get_values() which can live with a slightly 526 * stale value. 527 */ 528 unsigned int level; 529 #ifdef CONFIG_HTE 530 struct hte_ts_desc hdesc; 531 /* 532 * HTE provider sets line level at the time of event. The valid 533 * value is 0 or 1 and negative value for an error. 534 */ 535 int raw_level; 536 /* 537 * when sw_debounce is set on HTE enabled line, this is running 538 * counter of the discarded events. 539 */ 540 u32 total_discard_seq; 541 /* 542 * when sw_debounce is set on HTE enabled line, this variable records 543 * last sequence number before debounce period expires. 544 */ 545 u32 last_seqno; 546 #endif /* CONFIG_HTE */ 547 }; 548 549 /** 550 * struct linereq - contains the state of a userspace line request 551 * @gdev: the GPIO device the line request pertains to 552 * @label: consumer label used to tag GPIO descriptors 553 * @num_lines: the number of lines in the lines array 554 * @wait: wait queue that handles blocking reads of events 555 * @device_unregistered_nb: notifier block for receiving gdev unregister events 556 * @event_buffer_size: the number of elements allocated in @events 557 * @events: KFIFO for the GPIO events 558 * @seqno: the sequence number for edge events generated on all lines in 559 * this line request. Note that this is not used when @num_lines is 1, as 560 * the line_seqno is then the same and is cheaper to calculate. 561 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 562 * of configuration, particularly multi-step accesses to desc flags. 563 * @lines: the lines held by this line request, with @num_lines elements. 564 */ 565 struct linereq { 566 struct gpio_device *gdev; 567 const char *label; 568 u32 num_lines; 569 wait_queue_head_t wait; 570 struct notifier_block device_unregistered_nb; 571 u32 event_buffer_size; 572 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 573 atomic_t seqno; 574 struct mutex config_mutex; 575 struct line lines[]; 576 }; 577 578 #define GPIO_V2_LINE_BIAS_FLAGS \ 579 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 580 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 581 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 582 583 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 584 (GPIO_V2_LINE_FLAG_INPUT | \ 585 GPIO_V2_LINE_FLAG_OUTPUT) 586 587 #define GPIO_V2_LINE_DRIVE_FLAGS \ 588 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 589 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 590 591 #define GPIO_V2_LINE_EDGE_FLAGS \ 592 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 593 GPIO_V2_LINE_FLAG_EDGE_FALLING) 594 595 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 596 597 #define GPIO_V2_LINE_VALID_FLAGS \ 598 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 599 GPIO_V2_LINE_DIRECTION_FLAGS | \ 600 GPIO_V2_LINE_DRIVE_FLAGS | \ 601 GPIO_V2_LINE_EDGE_FLAGS | \ 602 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 603 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 604 GPIO_V2_LINE_BIAS_FLAGS) 605 606 /* subset of flags relevant for edge detector configuration */ 607 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 608 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 609 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 610 GPIO_V2_LINE_EDGE_FLAGS) 611 612 static int linereq_unregistered_notify(struct notifier_block *nb, 613 unsigned long action, void *data) 614 { 615 struct linereq *lr = container_of(nb, struct linereq, 616 device_unregistered_nb); 617 618 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR); 619 620 return NOTIFY_OK; 621 } 622 623 static void linereq_put_event(struct linereq *lr, 624 struct gpio_v2_line_event *le) 625 { 626 bool overflow = false; 627 628 spin_lock(&lr->wait.lock); 629 if (kfifo_is_full(&lr->events)) { 630 overflow = true; 631 kfifo_skip(&lr->events); 632 } 633 kfifo_in(&lr->events, le, 1); 634 spin_unlock(&lr->wait.lock); 635 if (!overflow) 636 wake_up_poll(&lr->wait, EPOLLIN); 637 else 638 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 639 } 640 641 static u64 line_event_timestamp(struct line *line) 642 { 643 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 644 return ktime_get_real_ns(); 645 else if (IS_ENABLED(CONFIG_HTE) && 646 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 647 return line->timestamp_ns; 648 649 return ktime_get_ns(); 650 } 651 652 static u32 line_event_id(int level) 653 { 654 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 655 GPIO_V2_LINE_EVENT_FALLING_EDGE; 656 } 657 658 #ifdef CONFIG_HTE 659 660 static enum hte_return process_hw_ts_thread(void *p) 661 { 662 struct line *line; 663 struct linereq *lr; 664 struct gpio_v2_line_event le; 665 u64 edflags; 666 int level; 667 668 if (!p) 669 return HTE_CB_HANDLED; 670 671 line = p; 672 lr = line->req; 673 674 memset(&le, 0, sizeof(le)); 675 676 le.timestamp_ns = line->timestamp_ns; 677 edflags = READ_ONCE(line->edflags); 678 679 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 680 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 681 level = (line->raw_level >= 0) ? 682 line->raw_level : 683 gpiod_get_raw_value_cansleep(line->desc); 684 685 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 686 level = !level; 687 688 le.id = line_event_id(level); 689 break; 690 case GPIO_V2_LINE_FLAG_EDGE_RISING: 691 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 692 break; 693 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 694 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 695 break; 696 default: 697 return HTE_CB_HANDLED; 698 } 699 le.line_seqno = line->line_seqno; 700 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 701 le.offset = gpio_chip_hwgpio(line->desc); 702 703 linereq_put_event(lr, &le); 704 705 return HTE_CB_HANDLED; 706 } 707 708 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 709 { 710 struct line *line; 711 struct linereq *lr; 712 int diff_seqno = 0; 713 714 if (!ts || !p) 715 return HTE_CB_HANDLED; 716 717 line = p; 718 line->timestamp_ns = ts->tsc; 719 line->raw_level = ts->raw_level; 720 lr = line->req; 721 722 if (READ_ONCE(line->sw_debounced)) { 723 line->total_discard_seq++; 724 line->last_seqno = ts->seq; 725 mod_delayed_work(system_wq, &line->work, 726 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 727 } else { 728 if (unlikely(ts->seq < line->line_seqno)) 729 return HTE_CB_HANDLED; 730 731 diff_seqno = ts->seq - line->line_seqno; 732 line->line_seqno = ts->seq; 733 if (lr->num_lines != 1) 734 line->req_seqno = atomic_add_return(diff_seqno, 735 &lr->seqno); 736 737 return HTE_RUN_SECOND_CB; 738 } 739 740 return HTE_CB_HANDLED; 741 } 742 743 static int hte_edge_setup(struct line *line, u64 eflags) 744 { 745 int ret; 746 unsigned long flags = 0; 747 struct hte_ts_desc *hdesc = &line->hdesc; 748 749 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 750 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 751 HTE_FALLING_EDGE_TS : 752 HTE_RISING_EDGE_TS; 753 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 754 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 755 HTE_RISING_EDGE_TS : 756 HTE_FALLING_EDGE_TS; 757 758 line->total_discard_seq = 0; 759 760 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 761 line->desc); 762 763 ret = hte_ts_get(NULL, hdesc, 0); 764 if (ret) 765 return ret; 766 767 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 768 line); 769 } 770 771 #else 772 773 static int hte_edge_setup(struct line *line, u64 eflags) 774 { 775 return 0; 776 } 777 #endif /* CONFIG_HTE */ 778 779 static irqreturn_t edge_irq_thread(int irq, void *p) 780 { 781 struct line *line = p; 782 struct linereq *lr = line->req; 783 struct gpio_v2_line_event le; 784 785 /* Do not leak kernel stack to userspace */ 786 memset(&le, 0, sizeof(le)); 787 788 if (line->timestamp_ns) { 789 le.timestamp_ns = line->timestamp_ns; 790 } else { 791 /* 792 * We may be running from a nested threaded interrupt in 793 * which case we didn't get the timestamp from 794 * edge_irq_handler(). 795 */ 796 le.timestamp_ns = line_event_timestamp(line); 797 if (lr->num_lines != 1) 798 line->req_seqno = atomic_inc_return(&lr->seqno); 799 } 800 line->timestamp_ns = 0; 801 802 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 803 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 804 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 805 break; 806 case GPIO_V2_LINE_FLAG_EDGE_RISING: 807 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 808 break; 809 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 810 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 811 break; 812 default: 813 return IRQ_NONE; 814 } 815 line->line_seqno++; 816 le.line_seqno = line->line_seqno; 817 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 818 le.offset = gpio_chip_hwgpio(line->desc); 819 820 linereq_put_event(lr, &le); 821 822 return IRQ_HANDLED; 823 } 824 825 static irqreturn_t edge_irq_handler(int irq, void *p) 826 { 827 struct line *line = p; 828 struct linereq *lr = line->req; 829 830 /* 831 * Just store the timestamp in hardirq context so we get it as 832 * close in time as possible to the actual event. 833 */ 834 line->timestamp_ns = line_event_timestamp(line); 835 836 if (lr->num_lines != 1) 837 line->req_seqno = atomic_inc_return(&lr->seqno); 838 839 return IRQ_WAKE_THREAD; 840 } 841 842 /* 843 * returns the current debounced logical value. 844 */ 845 static bool debounced_value(struct line *line) 846 { 847 bool value; 848 849 /* 850 * minor race - debouncer may be stopped here, so edge_detector_stop() 851 * must leave the value unchanged so the following will read the level 852 * from when the debouncer was last running. 853 */ 854 value = READ_ONCE(line->level); 855 856 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 857 value = !value; 858 859 return value; 860 } 861 862 static irqreturn_t debounce_irq_handler(int irq, void *p) 863 { 864 struct line *line = p; 865 866 mod_delayed_work(system_wq, &line->work, 867 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 868 869 return IRQ_HANDLED; 870 } 871 872 static void debounce_work_func(struct work_struct *work) 873 { 874 struct gpio_v2_line_event le; 875 struct line *line = container_of(work, struct line, work.work); 876 struct linereq *lr; 877 u64 eflags, edflags = READ_ONCE(line->edflags); 878 int level = -1; 879 #ifdef CONFIG_HTE 880 int diff_seqno; 881 882 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 883 level = line->raw_level; 884 #endif 885 if (level < 0) 886 level = gpiod_get_raw_value_cansleep(line->desc); 887 if (level < 0) { 888 pr_debug_ratelimited("debouncer failed to read line value\n"); 889 return; 890 } 891 892 if (READ_ONCE(line->level) == level) 893 return; 894 895 WRITE_ONCE(line->level, level); 896 897 /* -- edge detection -- */ 898 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 899 if (!eflags) 900 return; 901 902 /* switch from physical level to logical - if they differ */ 903 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 904 level = !level; 905 906 /* ignore edges that are not being monitored */ 907 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 908 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 909 return; 910 911 /* Do not leak kernel stack to userspace */ 912 memset(&le, 0, sizeof(le)); 913 914 lr = line->req; 915 le.timestamp_ns = line_event_timestamp(line); 916 le.offset = gpio_chip_hwgpio(line->desc); 917 #ifdef CONFIG_HTE 918 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 919 /* discard events except the last one */ 920 line->total_discard_seq -= 1; 921 diff_seqno = line->last_seqno - line->total_discard_seq - 922 line->line_seqno; 923 line->line_seqno = line->last_seqno - line->total_discard_seq; 924 le.line_seqno = line->line_seqno; 925 le.seqno = (lr->num_lines == 1) ? 926 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 927 } else 928 #endif /* CONFIG_HTE */ 929 { 930 line->line_seqno++; 931 le.line_seqno = line->line_seqno; 932 le.seqno = (lr->num_lines == 1) ? 933 le.line_seqno : atomic_inc_return(&lr->seqno); 934 } 935 936 le.id = line_event_id(level); 937 938 linereq_put_event(lr, &le); 939 } 940 941 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 942 { 943 unsigned long irqflags; 944 int ret, level, irq; 945 946 /* try hardware */ 947 ret = gpiod_set_debounce(line->desc, debounce_period_us); 948 if (!ret) { 949 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 950 return ret; 951 } 952 if (ret != -ENOTSUPP) 953 return ret; 954 955 if (debounce_period_us) { 956 /* setup software debounce */ 957 level = gpiod_get_raw_value_cansleep(line->desc); 958 if (level < 0) 959 return level; 960 961 if (!(IS_ENABLED(CONFIG_HTE) && 962 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 963 irq = gpiod_to_irq(line->desc); 964 if (irq < 0) 965 return -ENXIO; 966 967 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 968 ret = request_irq(irq, debounce_irq_handler, irqflags, 969 line->req->label, line); 970 if (ret) 971 return ret; 972 line->irq = irq; 973 } else { 974 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 975 if (ret) 976 return ret; 977 } 978 979 WRITE_ONCE(line->level, level); 980 WRITE_ONCE(line->sw_debounced, 1); 981 } 982 return 0; 983 } 984 985 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 986 unsigned int line_idx) 987 { 988 unsigned int i; 989 u64 mask = BIT_ULL(line_idx); 990 991 for (i = 0; i < lc->num_attrs; i++) { 992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 993 (lc->attrs[i].mask & mask)) 994 return true; 995 } 996 return false; 997 } 998 999 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 1000 unsigned int line_idx) 1001 { 1002 unsigned int i; 1003 u64 mask = BIT_ULL(line_idx); 1004 1005 for (i = 0; i < lc->num_attrs; i++) { 1006 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1007 (lc->attrs[i].mask & mask)) 1008 return lc->attrs[i].attr.debounce_period_us; 1009 } 1010 return 0; 1011 } 1012 1013 static void edge_detector_stop(struct line *line) 1014 { 1015 if (line->irq) { 1016 free_irq(line->irq, line); 1017 line->irq = 0; 1018 } 1019 1020 #ifdef CONFIG_HTE 1021 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1022 hte_ts_put(&line->hdesc); 1023 #endif 1024 1025 cancel_delayed_work_sync(&line->work); 1026 WRITE_ONCE(line->sw_debounced, 0); 1027 WRITE_ONCE(line->edflags, 0); 1028 if (line->desc) 1029 WRITE_ONCE(line->desc->debounce_period_us, 0); 1030 /* do not change line->level - see comment in debounced_value() */ 1031 } 1032 1033 static int edge_detector_setup(struct line *line, 1034 struct gpio_v2_line_config *lc, 1035 unsigned int line_idx, u64 edflags) 1036 { 1037 u32 debounce_period_us; 1038 unsigned long irqflags = 0; 1039 u64 eflags; 1040 int irq, ret; 1041 1042 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1043 if (eflags && !kfifo_initialized(&line->req->events)) { 1044 ret = kfifo_alloc(&line->req->events, 1045 line->req->event_buffer_size, GFP_KERNEL); 1046 if (ret) 1047 return ret; 1048 } 1049 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1050 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1051 ret = debounce_setup(line, debounce_period_us); 1052 if (ret) 1053 return ret; 1054 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 1055 } 1056 1057 /* detection disabled or sw debouncer will provide edge detection */ 1058 if (!eflags || READ_ONCE(line->sw_debounced)) 1059 return 0; 1060 1061 if (IS_ENABLED(CONFIG_HTE) && 1062 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1063 return hte_edge_setup(line, edflags); 1064 1065 irq = gpiod_to_irq(line->desc); 1066 if (irq < 0) 1067 return -ENXIO; 1068 1069 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1070 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1071 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1072 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1073 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1074 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1075 irqflags |= IRQF_ONESHOT; 1076 1077 /* Request a thread to read the events */ 1078 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1079 irqflags, line->req->label, line); 1080 if (ret) 1081 return ret; 1082 1083 line->irq = irq; 1084 return 0; 1085 } 1086 1087 static int edge_detector_update(struct line *line, 1088 struct gpio_v2_line_config *lc, 1089 unsigned int line_idx, u64 edflags) 1090 { 1091 u64 active_edflags = READ_ONCE(line->edflags); 1092 unsigned int debounce_period_us = 1093 gpio_v2_line_config_debounce_period(lc, line_idx); 1094 1095 if ((active_edflags == edflags) && 1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)) 1097 return 0; 1098 1099 /* sw debounced and still will be...*/ 1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 1102 return 0; 1103 } 1104 1105 /* reconfiguring edge detection or sw debounce being disabled */ 1106 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1107 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1108 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1109 edge_detector_stop(line); 1110 1111 return edge_detector_setup(line, lc, line_idx, edflags); 1112 } 1113 1114 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1115 unsigned int line_idx) 1116 { 1117 unsigned int i; 1118 u64 mask = BIT_ULL(line_idx); 1119 1120 for (i = 0; i < lc->num_attrs; i++) { 1121 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1122 (lc->attrs[i].mask & mask)) 1123 return lc->attrs[i].attr.flags; 1124 } 1125 return lc->flags; 1126 } 1127 1128 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1129 unsigned int line_idx) 1130 { 1131 unsigned int i; 1132 u64 mask = BIT_ULL(line_idx); 1133 1134 for (i = 0; i < lc->num_attrs; i++) { 1135 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1136 (lc->attrs[i].mask & mask)) 1137 return !!(lc->attrs[i].attr.values & mask); 1138 } 1139 return 0; 1140 } 1141 1142 static int gpio_v2_line_flags_validate(u64 flags) 1143 { 1144 /* Return an error if an unknown flag is set */ 1145 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1146 return -EINVAL; 1147 1148 if (!IS_ENABLED(CONFIG_HTE) && 1149 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1150 return -EOPNOTSUPP; 1151 1152 /* 1153 * Do not allow both INPUT and OUTPUT flags to be set as they are 1154 * contradictory. 1155 */ 1156 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1157 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1158 return -EINVAL; 1159 1160 /* Only allow one event clock source */ 1161 if (IS_ENABLED(CONFIG_HTE) && 1162 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1163 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1164 return -EINVAL; 1165 1166 /* Edge detection requires explicit input. */ 1167 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1168 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1169 return -EINVAL; 1170 1171 /* 1172 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1173 * request. If the hardware actually supports enabling both at the 1174 * same time the electrical result would be disastrous. 1175 */ 1176 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1177 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1178 return -EINVAL; 1179 1180 /* Drive requires explicit output direction. */ 1181 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1182 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1183 return -EINVAL; 1184 1185 /* Bias requires explicit direction. */ 1186 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1187 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1188 return -EINVAL; 1189 1190 /* Only one bias flag can be set. */ 1191 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1192 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1193 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1194 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1195 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1196 return -EINVAL; 1197 1198 return 0; 1199 } 1200 1201 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1202 unsigned int num_lines) 1203 { 1204 unsigned int i; 1205 u64 flags; 1206 int ret; 1207 1208 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1209 return -EINVAL; 1210 1211 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1212 return -EINVAL; 1213 1214 for (i = 0; i < num_lines; i++) { 1215 flags = gpio_v2_line_config_flags(lc, i); 1216 ret = gpio_v2_line_flags_validate(flags); 1217 if (ret) 1218 return ret; 1219 1220 /* debounce requires explicit input */ 1221 if (gpio_v2_line_config_debounced(lc, i) && 1222 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1223 return -EINVAL; 1224 } 1225 return 0; 1226 } 1227 1228 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1229 unsigned long *flagsp) 1230 { 1231 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1232 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1233 1234 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1235 set_bit(FLAG_IS_OUT, flagsp); 1236 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1237 clear_bit(FLAG_IS_OUT, flagsp); 1238 1239 assign_bit(FLAG_EDGE_RISING, flagsp, 1240 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1241 assign_bit(FLAG_EDGE_FALLING, flagsp, 1242 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1243 1244 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1245 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1246 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1247 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1248 1249 assign_bit(FLAG_PULL_UP, flagsp, 1250 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1251 assign_bit(FLAG_PULL_DOWN, flagsp, 1252 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1253 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1254 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1255 1256 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1257 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1258 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1259 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1260 } 1261 1262 static long linereq_get_values(struct linereq *lr, void __user *ip) 1263 { 1264 struct gpio_v2_line_values lv; 1265 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1266 struct gpio_desc **descs; 1267 unsigned int i, didx, num_get; 1268 bool val; 1269 int ret; 1270 1271 /* NOTE: It's ok to read values of output lines. */ 1272 if (copy_from_user(&lv, ip, sizeof(lv))) 1273 return -EFAULT; 1274 1275 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1276 if (lv.mask & BIT_ULL(i)) { 1277 num_get++; 1278 descs = &lr->lines[i].desc; 1279 } 1280 } 1281 1282 if (num_get == 0) 1283 return -EINVAL; 1284 1285 if (num_get != 1) { 1286 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1287 if (!descs) 1288 return -ENOMEM; 1289 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1290 if (lv.mask & BIT_ULL(i)) { 1291 descs[didx] = lr->lines[i].desc; 1292 didx++; 1293 } 1294 } 1295 } 1296 ret = gpiod_get_array_value_complex(false, true, num_get, 1297 descs, NULL, vals); 1298 1299 if (num_get != 1) 1300 kfree(descs); 1301 if (ret) 1302 return ret; 1303 1304 lv.bits = 0; 1305 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1306 if (lv.mask & BIT_ULL(i)) { 1307 if (lr->lines[i].sw_debounced) 1308 val = debounced_value(&lr->lines[i]); 1309 else 1310 val = test_bit(didx, vals); 1311 if (val) 1312 lv.bits |= BIT_ULL(i); 1313 didx++; 1314 } 1315 } 1316 1317 if (copy_to_user(ip, &lv, sizeof(lv))) 1318 return -EFAULT; 1319 1320 return 0; 1321 } 1322 1323 static long linereq_set_values_unlocked(struct linereq *lr, 1324 struct gpio_v2_line_values *lv) 1325 { 1326 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1327 struct gpio_desc **descs; 1328 unsigned int i, didx, num_set; 1329 int ret; 1330 1331 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1332 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1333 if (lv->mask & BIT_ULL(i)) { 1334 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1335 return -EPERM; 1336 if (lv->bits & BIT_ULL(i)) 1337 __set_bit(num_set, vals); 1338 num_set++; 1339 descs = &lr->lines[i].desc; 1340 } 1341 } 1342 if (num_set == 0) 1343 return -EINVAL; 1344 1345 if (num_set != 1) { 1346 /* build compacted desc array and values */ 1347 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1348 if (!descs) 1349 return -ENOMEM; 1350 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1351 if (lv->mask & BIT_ULL(i)) { 1352 descs[didx] = lr->lines[i].desc; 1353 didx++; 1354 } 1355 } 1356 } 1357 ret = gpiod_set_array_value_complex(false, true, num_set, 1358 descs, NULL, vals); 1359 1360 if (num_set != 1) 1361 kfree(descs); 1362 return ret; 1363 } 1364 1365 static long linereq_set_values(struct linereq *lr, void __user *ip) 1366 { 1367 struct gpio_v2_line_values lv; 1368 int ret; 1369 1370 if (copy_from_user(&lv, ip, sizeof(lv))) 1371 return -EFAULT; 1372 1373 mutex_lock(&lr->config_mutex); 1374 1375 ret = linereq_set_values_unlocked(lr, &lv); 1376 1377 mutex_unlock(&lr->config_mutex); 1378 1379 return ret; 1380 } 1381 1382 static long linereq_set_config_unlocked(struct linereq *lr, 1383 struct gpio_v2_line_config *lc) 1384 { 1385 struct gpio_desc *desc; 1386 struct line *line; 1387 unsigned int i; 1388 u64 flags, edflags; 1389 int ret; 1390 1391 for (i = 0; i < lr->num_lines; i++) { 1392 line = &lr->lines[i]; 1393 desc = lr->lines[i].desc; 1394 flags = gpio_v2_line_config_flags(lc, i); 1395 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1396 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1397 /* 1398 * Lines have to be requested explicitly for input 1399 * or output, else the line will be treated "as is". 1400 */ 1401 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1402 int val = gpio_v2_line_config_output_value(lc, i); 1403 1404 edge_detector_stop(line); 1405 ret = gpiod_direction_output(desc, val); 1406 if (ret) 1407 return ret; 1408 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1409 ret = gpiod_direction_input(desc); 1410 if (ret) 1411 return ret; 1412 1413 ret = edge_detector_update(line, lc, i, edflags); 1414 if (ret) 1415 return ret; 1416 } 1417 1418 WRITE_ONCE(line->edflags, edflags); 1419 1420 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 1421 } 1422 return 0; 1423 } 1424 1425 static long linereq_set_config(struct linereq *lr, void __user *ip) 1426 { 1427 struct gpio_v2_line_config lc; 1428 int ret; 1429 1430 if (copy_from_user(&lc, ip, sizeof(lc))) 1431 return -EFAULT; 1432 1433 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1434 if (ret) 1435 return ret; 1436 1437 mutex_lock(&lr->config_mutex); 1438 1439 ret = linereq_set_config_unlocked(lr, &lc); 1440 1441 mutex_unlock(&lr->config_mutex); 1442 1443 return ret; 1444 } 1445 1446 static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, 1447 unsigned long arg) 1448 { 1449 struct linereq *lr = file->private_data; 1450 void __user *ip = (void __user *)arg; 1451 1452 if (!lr->gdev->chip) 1453 return -ENODEV; 1454 1455 switch (cmd) { 1456 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1457 return linereq_get_values(lr, ip); 1458 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1459 return linereq_set_values(lr, ip); 1460 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1461 return linereq_set_config(lr, ip); 1462 default: 1463 return -EINVAL; 1464 } 1465 } 1466 1467 static long linereq_ioctl(struct file *file, unsigned int cmd, 1468 unsigned long arg) 1469 { 1470 struct linereq *lr = file->private_data; 1471 1472 return call_ioctl_locked(file, cmd, arg, lr->gdev, 1473 linereq_ioctl_unlocked); 1474 } 1475 1476 #ifdef CONFIG_COMPAT 1477 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1478 unsigned long arg) 1479 { 1480 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1481 } 1482 #endif 1483 1484 static __poll_t linereq_poll_unlocked(struct file *file, 1485 struct poll_table_struct *wait) 1486 { 1487 struct linereq *lr = file->private_data; 1488 __poll_t events = 0; 1489 1490 if (!lr->gdev->chip) 1491 return EPOLLHUP | EPOLLERR; 1492 1493 poll_wait(file, &lr->wait, wait); 1494 1495 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1496 &lr->wait.lock)) 1497 events = EPOLLIN | EPOLLRDNORM; 1498 1499 return events; 1500 } 1501 1502 static __poll_t linereq_poll(struct file *file, 1503 struct poll_table_struct *wait) 1504 { 1505 struct linereq *lr = file->private_data; 1506 1507 return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); 1508 } 1509 1510 static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, 1511 size_t count, loff_t *f_ps) 1512 { 1513 struct linereq *lr = file->private_data; 1514 struct gpio_v2_line_event le; 1515 ssize_t bytes_read = 0; 1516 int ret; 1517 1518 if (!lr->gdev->chip) 1519 return -ENODEV; 1520 1521 if (count < sizeof(le)) 1522 return -EINVAL; 1523 1524 do { 1525 spin_lock(&lr->wait.lock); 1526 if (kfifo_is_empty(&lr->events)) { 1527 if (bytes_read) { 1528 spin_unlock(&lr->wait.lock); 1529 return bytes_read; 1530 } 1531 1532 if (file->f_flags & O_NONBLOCK) { 1533 spin_unlock(&lr->wait.lock); 1534 return -EAGAIN; 1535 } 1536 1537 ret = wait_event_interruptible_locked(lr->wait, 1538 !kfifo_is_empty(&lr->events)); 1539 if (ret) { 1540 spin_unlock(&lr->wait.lock); 1541 return ret; 1542 } 1543 } 1544 1545 ret = kfifo_out(&lr->events, &le, 1); 1546 spin_unlock(&lr->wait.lock); 1547 if (ret != 1) { 1548 /* 1549 * This should never happen - we were holding the 1550 * lock from the moment we learned the fifo is no 1551 * longer empty until now. 1552 */ 1553 ret = -EIO; 1554 break; 1555 } 1556 1557 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1558 return -EFAULT; 1559 bytes_read += sizeof(le); 1560 } while (count >= bytes_read + sizeof(le)); 1561 1562 return bytes_read; 1563 } 1564 1565 static ssize_t linereq_read(struct file *file, char __user *buf, 1566 size_t count, loff_t *f_ps) 1567 { 1568 struct linereq *lr = file->private_data; 1569 1570 return call_read_locked(file, buf, count, f_ps, lr->gdev, 1571 linereq_read_unlocked); 1572 } 1573 1574 static void linereq_free(struct linereq *lr) 1575 { 1576 unsigned int i; 1577 1578 if (lr->device_unregistered_nb.notifier_call) 1579 blocking_notifier_chain_unregister(&lr->gdev->device_notifier, 1580 &lr->device_unregistered_nb); 1581 1582 for (i = 0; i < lr->num_lines; i++) { 1583 if (lr->lines[i].desc) { 1584 edge_detector_stop(&lr->lines[i]); 1585 gpiod_free(lr->lines[i].desc); 1586 } 1587 } 1588 kfifo_free(&lr->events); 1589 kfree(lr->label); 1590 gpio_device_put(lr->gdev); 1591 kfree(lr); 1592 } 1593 1594 static int linereq_release(struct inode *inode, struct file *file) 1595 { 1596 struct linereq *lr = file->private_data; 1597 1598 linereq_free(lr); 1599 return 0; 1600 } 1601 1602 #ifdef CONFIG_PROC_FS 1603 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1604 { 1605 struct linereq *lr = file->private_data; 1606 struct device *dev = &lr->gdev->dev; 1607 u16 i; 1608 1609 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1610 1611 for (i = 0; i < lr->num_lines; i++) 1612 seq_printf(out, "gpio-line:\t%d\n", 1613 gpio_chip_hwgpio(lr->lines[i].desc)); 1614 } 1615 #endif 1616 1617 static const struct file_operations line_fileops = { 1618 .release = linereq_release, 1619 .read = linereq_read, 1620 .poll = linereq_poll, 1621 .owner = THIS_MODULE, 1622 .llseek = noop_llseek, 1623 .unlocked_ioctl = linereq_ioctl, 1624 #ifdef CONFIG_COMPAT 1625 .compat_ioctl = linereq_ioctl_compat, 1626 #endif 1627 #ifdef CONFIG_PROC_FS 1628 .show_fdinfo = linereq_show_fdinfo, 1629 #endif 1630 }; 1631 1632 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1633 { 1634 struct gpio_v2_line_request ulr; 1635 struct gpio_v2_line_config *lc; 1636 struct linereq *lr; 1637 struct file *file; 1638 u64 flags, edflags; 1639 unsigned int i; 1640 int fd, ret; 1641 1642 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1643 return -EFAULT; 1644 1645 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1646 return -EINVAL; 1647 1648 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1649 return -EINVAL; 1650 1651 lc = &ulr.config; 1652 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1653 if (ret) 1654 return ret; 1655 1656 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1657 if (!lr) 1658 return -ENOMEM; 1659 1660 lr->gdev = gpio_device_get(gdev); 1661 1662 for (i = 0; i < ulr.num_lines; i++) { 1663 lr->lines[i].req = lr; 1664 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1665 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1666 } 1667 1668 if (ulr.consumer[0] != '\0') { 1669 /* label is only initialized if consumer is set */ 1670 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1671 GFP_KERNEL); 1672 if (!lr->label) { 1673 ret = -ENOMEM; 1674 goto out_free_linereq; 1675 } 1676 } 1677 1678 mutex_init(&lr->config_mutex); 1679 init_waitqueue_head(&lr->wait); 1680 lr->event_buffer_size = ulr.event_buffer_size; 1681 if (lr->event_buffer_size == 0) 1682 lr->event_buffer_size = ulr.num_lines * 16; 1683 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1684 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1685 1686 atomic_set(&lr->seqno, 0); 1687 lr->num_lines = ulr.num_lines; 1688 1689 /* Request each GPIO */ 1690 for (i = 0; i < ulr.num_lines; i++) { 1691 u32 offset = ulr.offsets[i]; 1692 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 1693 1694 if (IS_ERR(desc)) { 1695 ret = PTR_ERR(desc); 1696 goto out_free_linereq; 1697 } 1698 1699 ret = gpiod_request_user(desc, lr->label); 1700 if (ret) 1701 goto out_free_linereq; 1702 1703 lr->lines[i].desc = desc; 1704 flags = gpio_v2_line_config_flags(lc, i); 1705 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1706 1707 ret = gpiod_set_transitory(desc, false); 1708 if (ret < 0) 1709 goto out_free_linereq; 1710 1711 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1712 /* 1713 * Lines have to be requested explicitly for input 1714 * or output, else the line will be treated "as is". 1715 */ 1716 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1717 int val = gpio_v2_line_config_output_value(lc, i); 1718 1719 ret = gpiod_direction_output(desc, val); 1720 if (ret) 1721 goto out_free_linereq; 1722 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1723 ret = gpiod_direction_input(desc); 1724 if (ret) 1725 goto out_free_linereq; 1726 1727 ret = edge_detector_setup(&lr->lines[i], lc, i, 1728 edflags); 1729 if (ret) 1730 goto out_free_linereq; 1731 } 1732 1733 lr->lines[i].edflags = edflags; 1734 1735 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 1736 1737 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1738 offset); 1739 } 1740 1741 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify; 1742 ret = blocking_notifier_chain_register(&gdev->device_notifier, 1743 &lr->device_unregistered_nb); 1744 if (ret) 1745 goto out_free_linereq; 1746 1747 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1748 if (fd < 0) { 1749 ret = fd; 1750 goto out_free_linereq; 1751 } 1752 1753 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1754 O_RDONLY | O_CLOEXEC); 1755 if (IS_ERR(file)) { 1756 ret = PTR_ERR(file); 1757 goto out_put_unused_fd; 1758 } 1759 1760 ulr.fd = fd; 1761 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1762 /* 1763 * fput() will trigger the release() callback, so do not go onto 1764 * the regular error cleanup path here. 1765 */ 1766 fput(file); 1767 put_unused_fd(fd); 1768 return -EFAULT; 1769 } 1770 1771 fd_install(fd, file); 1772 1773 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1774 lr->num_lines); 1775 1776 return 0; 1777 1778 out_put_unused_fd: 1779 put_unused_fd(fd); 1780 out_free_linereq: 1781 linereq_free(lr); 1782 return ret; 1783 } 1784 1785 #ifdef CONFIG_GPIO_CDEV_V1 1786 1787 /* 1788 * GPIO line event management 1789 */ 1790 1791 /** 1792 * struct lineevent_state - contains the state of a userspace event 1793 * @gdev: the GPIO device the event pertains to 1794 * @label: consumer label used to tag descriptors 1795 * @desc: the GPIO descriptor held by this event 1796 * @eflags: the event flags this line was requested with 1797 * @irq: the interrupt that trigger in response to events on this GPIO 1798 * @wait: wait queue that handles blocking reads of events 1799 * @device_unregistered_nb: notifier block for receiving gdev unregister events 1800 * @events: KFIFO for the GPIO events 1801 * @timestamp: cache for the timestamp storing it between hardirq 1802 * and IRQ thread, used to bring the timestamp close to the actual 1803 * event 1804 */ 1805 struct lineevent_state { 1806 struct gpio_device *gdev; 1807 const char *label; 1808 struct gpio_desc *desc; 1809 u32 eflags; 1810 int irq; 1811 wait_queue_head_t wait; 1812 struct notifier_block device_unregistered_nb; 1813 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1814 u64 timestamp; 1815 }; 1816 1817 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1818 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1819 GPIOEVENT_REQUEST_FALLING_EDGE) 1820 1821 static __poll_t lineevent_poll_unlocked(struct file *file, 1822 struct poll_table_struct *wait) 1823 { 1824 struct lineevent_state *le = file->private_data; 1825 __poll_t events = 0; 1826 1827 if (!le->gdev->chip) 1828 return EPOLLHUP | EPOLLERR; 1829 1830 poll_wait(file, &le->wait, wait); 1831 1832 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1833 events = EPOLLIN | EPOLLRDNORM; 1834 1835 return events; 1836 } 1837 1838 static __poll_t lineevent_poll(struct file *file, 1839 struct poll_table_struct *wait) 1840 { 1841 struct lineevent_state *le = file->private_data; 1842 1843 return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); 1844 } 1845 1846 static int lineevent_unregistered_notify(struct notifier_block *nb, 1847 unsigned long action, void *data) 1848 { 1849 struct lineevent_state *le = container_of(nb, struct lineevent_state, 1850 device_unregistered_nb); 1851 1852 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR); 1853 1854 return NOTIFY_OK; 1855 } 1856 1857 struct compat_gpioeevent_data { 1858 compat_u64 timestamp; 1859 u32 id; 1860 }; 1861 1862 static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, 1863 size_t count, loff_t *f_ps) 1864 { 1865 struct lineevent_state *le = file->private_data; 1866 struct gpioevent_data ge; 1867 ssize_t bytes_read = 0; 1868 ssize_t ge_size; 1869 int ret; 1870 1871 if (!le->gdev->chip) 1872 return -ENODEV; 1873 1874 /* 1875 * When compatible system call is being used the struct gpioevent_data, 1876 * in case of at least ia32, has different size due to the alignment 1877 * differences. Because we have first member 64 bits followed by one of 1878 * 32 bits there is no gap between them. The only difference is the 1879 * padding at the end of the data structure. Hence, we calculate the 1880 * actual sizeof() and pass this as an argument to copy_to_user() to 1881 * drop unneeded bytes from the output. 1882 */ 1883 if (compat_need_64bit_alignment_fixup()) 1884 ge_size = sizeof(struct compat_gpioeevent_data); 1885 else 1886 ge_size = sizeof(struct gpioevent_data); 1887 if (count < ge_size) 1888 return -EINVAL; 1889 1890 do { 1891 spin_lock(&le->wait.lock); 1892 if (kfifo_is_empty(&le->events)) { 1893 if (bytes_read) { 1894 spin_unlock(&le->wait.lock); 1895 return bytes_read; 1896 } 1897 1898 if (file->f_flags & O_NONBLOCK) { 1899 spin_unlock(&le->wait.lock); 1900 return -EAGAIN; 1901 } 1902 1903 ret = wait_event_interruptible_locked(le->wait, 1904 !kfifo_is_empty(&le->events)); 1905 if (ret) { 1906 spin_unlock(&le->wait.lock); 1907 return ret; 1908 } 1909 } 1910 1911 ret = kfifo_out(&le->events, &ge, 1); 1912 spin_unlock(&le->wait.lock); 1913 if (ret != 1) { 1914 /* 1915 * This should never happen - we were holding the lock 1916 * from the moment we learned the fifo is no longer 1917 * empty until now. 1918 */ 1919 ret = -EIO; 1920 break; 1921 } 1922 1923 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 1924 return -EFAULT; 1925 bytes_read += ge_size; 1926 } while (count >= bytes_read + ge_size); 1927 1928 return bytes_read; 1929 } 1930 1931 static ssize_t lineevent_read(struct file *file, char __user *buf, 1932 size_t count, loff_t *f_ps) 1933 { 1934 struct lineevent_state *le = file->private_data; 1935 1936 return call_read_locked(file, buf, count, f_ps, le->gdev, 1937 lineevent_read_unlocked); 1938 } 1939 1940 static void lineevent_free(struct lineevent_state *le) 1941 { 1942 if (le->device_unregistered_nb.notifier_call) 1943 blocking_notifier_chain_unregister(&le->gdev->device_notifier, 1944 &le->device_unregistered_nb); 1945 if (le->irq) 1946 free_irq(le->irq, le); 1947 if (le->desc) 1948 gpiod_free(le->desc); 1949 kfree(le->label); 1950 gpio_device_put(le->gdev); 1951 kfree(le); 1952 } 1953 1954 static int lineevent_release(struct inode *inode, struct file *file) 1955 { 1956 lineevent_free(file->private_data); 1957 return 0; 1958 } 1959 1960 static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, 1961 unsigned long arg) 1962 { 1963 struct lineevent_state *le = file->private_data; 1964 void __user *ip = (void __user *)arg; 1965 struct gpiohandle_data ghd; 1966 1967 if (!le->gdev->chip) 1968 return -ENODEV; 1969 1970 /* 1971 * We can get the value for an event line but not set it, 1972 * because it is input by definition. 1973 */ 1974 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 1975 int val; 1976 1977 memset(&ghd, 0, sizeof(ghd)); 1978 1979 val = gpiod_get_value_cansleep(le->desc); 1980 if (val < 0) 1981 return val; 1982 ghd.values[0] = val; 1983 1984 if (copy_to_user(ip, &ghd, sizeof(ghd))) 1985 return -EFAULT; 1986 1987 return 0; 1988 } 1989 return -EINVAL; 1990 } 1991 1992 static long lineevent_ioctl(struct file *file, unsigned int cmd, 1993 unsigned long arg) 1994 { 1995 struct lineevent_state *le = file->private_data; 1996 1997 return call_ioctl_locked(file, cmd, arg, le->gdev, 1998 lineevent_ioctl_unlocked); 1999 } 2000 2001 #ifdef CONFIG_COMPAT 2002 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 2003 unsigned long arg) 2004 { 2005 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2006 } 2007 #endif 2008 2009 static const struct file_operations lineevent_fileops = { 2010 .release = lineevent_release, 2011 .read = lineevent_read, 2012 .poll = lineevent_poll, 2013 .owner = THIS_MODULE, 2014 .llseek = noop_llseek, 2015 .unlocked_ioctl = lineevent_ioctl, 2016 #ifdef CONFIG_COMPAT 2017 .compat_ioctl = lineevent_ioctl_compat, 2018 #endif 2019 }; 2020 2021 static irqreturn_t lineevent_irq_thread(int irq, void *p) 2022 { 2023 struct lineevent_state *le = p; 2024 struct gpioevent_data ge; 2025 int ret; 2026 2027 /* Do not leak kernel stack to userspace */ 2028 memset(&ge, 0, sizeof(ge)); 2029 2030 /* 2031 * We may be running from a nested threaded interrupt in which case 2032 * we didn't get the timestamp from lineevent_irq_handler(). 2033 */ 2034 if (!le->timestamp) 2035 ge.timestamp = ktime_get_ns(); 2036 else 2037 ge.timestamp = le->timestamp; 2038 2039 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2040 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2041 int level = gpiod_get_value_cansleep(le->desc); 2042 2043 if (level) 2044 /* Emit low-to-high event */ 2045 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2046 else 2047 /* Emit high-to-low event */ 2048 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2049 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2050 /* Emit low-to-high event */ 2051 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2052 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2053 /* Emit high-to-low event */ 2054 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2055 } else { 2056 return IRQ_NONE; 2057 } 2058 2059 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2060 1, &le->wait.lock); 2061 if (ret) 2062 wake_up_poll(&le->wait, EPOLLIN); 2063 else 2064 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2065 2066 return IRQ_HANDLED; 2067 } 2068 2069 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2070 { 2071 struct lineevent_state *le = p; 2072 2073 /* 2074 * Just store the timestamp in hardirq context so we get it as 2075 * close in time as possible to the actual event. 2076 */ 2077 le->timestamp = ktime_get_ns(); 2078 2079 return IRQ_WAKE_THREAD; 2080 } 2081 2082 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2083 { 2084 struct gpioevent_request eventreq; 2085 struct lineevent_state *le; 2086 struct gpio_desc *desc; 2087 struct file *file; 2088 u32 offset; 2089 u32 lflags; 2090 u32 eflags; 2091 int fd; 2092 int ret; 2093 int irq, irqflags = 0; 2094 2095 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2096 return -EFAULT; 2097 2098 offset = eventreq.lineoffset; 2099 lflags = eventreq.handleflags; 2100 eflags = eventreq.eventflags; 2101 2102 desc = gpiochip_get_desc(gdev->chip, offset); 2103 if (IS_ERR(desc)) 2104 return PTR_ERR(desc); 2105 2106 /* Return an error if a unknown flag is set */ 2107 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2108 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2109 return -EINVAL; 2110 2111 /* This is just wrong: we don't look for events on output lines */ 2112 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2113 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2114 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2115 return -EINVAL; 2116 2117 /* Only one bias flag can be set. */ 2118 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2119 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2120 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2121 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2122 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2123 return -EINVAL; 2124 2125 le = kzalloc(sizeof(*le), GFP_KERNEL); 2126 if (!le) 2127 return -ENOMEM; 2128 le->gdev = gpio_device_get(gdev); 2129 2130 if (eventreq.consumer_label[0] != '\0') { 2131 /* label is only initialized if consumer_label is set */ 2132 le->label = kstrndup(eventreq.consumer_label, 2133 sizeof(eventreq.consumer_label) - 1, 2134 GFP_KERNEL); 2135 if (!le->label) { 2136 ret = -ENOMEM; 2137 goto out_free_le; 2138 } 2139 } 2140 2141 ret = gpiod_request_user(desc, le->label); 2142 if (ret) 2143 goto out_free_le; 2144 le->desc = desc; 2145 le->eflags = eflags; 2146 2147 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2148 2149 ret = gpiod_direction_input(desc); 2150 if (ret) 2151 goto out_free_le; 2152 2153 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 2154 2155 irq = gpiod_to_irq(desc); 2156 if (irq <= 0) { 2157 ret = -ENODEV; 2158 goto out_free_le; 2159 } 2160 2161 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2162 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2163 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2164 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2165 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2166 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2167 irqflags |= IRQF_ONESHOT; 2168 2169 INIT_KFIFO(le->events); 2170 init_waitqueue_head(&le->wait); 2171 2172 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify; 2173 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2174 &le->device_unregistered_nb); 2175 if (ret) 2176 goto out_free_le; 2177 2178 /* Request a thread to read the events */ 2179 ret = request_threaded_irq(irq, 2180 lineevent_irq_handler, 2181 lineevent_irq_thread, 2182 irqflags, 2183 le->label, 2184 le); 2185 if (ret) 2186 goto out_free_le; 2187 2188 le->irq = irq; 2189 2190 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2191 if (fd < 0) { 2192 ret = fd; 2193 goto out_free_le; 2194 } 2195 2196 file = anon_inode_getfile("gpio-event", 2197 &lineevent_fileops, 2198 le, 2199 O_RDONLY | O_CLOEXEC); 2200 if (IS_ERR(file)) { 2201 ret = PTR_ERR(file); 2202 goto out_put_unused_fd; 2203 } 2204 2205 eventreq.fd = fd; 2206 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2207 /* 2208 * fput() will trigger the release() callback, so do not go onto 2209 * the regular error cleanup path here. 2210 */ 2211 fput(file); 2212 put_unused_fd(fd); 2213 return -EFAULT; 2214 } 2215 2216 fd_install(fd, file); 2217 2218 return 0; 2219 2220 out_put_unused_fd: 2221 put_unused_fd(fd); 2222 out_free_le: 2223 lineevent_free(le); 2224 return ret; 2225 } 2226 2227 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2228 struct gpioline_info *info_v1) 2229 { 2230 u64 flagsv2 = info_v2->flags; 2231 2232 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2233 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2234 info_v1->line_offset = info_v2->offset; 2235 info_v1->flags = 0; 2236 2237 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2238 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2239 2240 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2241 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2242 2243 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2244 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2245 2246 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2247 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2248 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2249 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2250 2251 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2252 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2253 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2254 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2255 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2256 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2257 } 2258 2259 static void gpio_v2_line_info_changed_to_v1( 2260 struct gpio_v2_line_info_changed *lic_v2, 2261 struct gpioline_info_changed *lic_v1) 2262 { 2263 memset(lic_v1, 0, sizeof(*lic_v1)); 2264 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2265 lic_v1->timestamp = lic_v2->timestamp_ns; 2266 lic_v1->event_type = lic_v2->event_type; 2267 } 2268 2269 #endif /* CONFIG_GPIO_CDEV_V1 */ 2270 2271 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2272 struct gpio_v2_line_info *info) 2273 { 2274 struct gpio_chip *gc = desc->gdev->chip; 2275 bool ok_for_pinctrl; 2276 unsigned long flags; 2277 u32 debounce_period_us; 2278 unsigned int num_attrs = 0; 2279 2280 memset(info, 0, sizeof(*info)); 2281 info->offset = gpio_chip_hwgpio(desc); 2282 2283 /* 2284 * This function takes a mutex so we must check this before taking 2285 * the spinlock. 2286 * 2287 * FIXME: find a non-racy way to retrieve this information. Maybe a 2288 * lock common to both frameworks? 2289 */ 2290 ok_for_pinctrl = 2291 pinctrl_gpio_can_use_line(gc->base + info->offset); 2292 2293 spin_lock_irqsave(&gpio_lock, flags); 2294 2295 if (desc->name) 2296 strscpy(info->name, desc->name, sizeof(info->name)); 2297 2298 if (desc->label) 2299 strscpy(info->consumer, desc->label, sizeof(info->consumer)); 2300 2301 /* 2302 * Userspace only need to know that the kernel is using this GPIO so 2303 * it can't use it. 2304 */ 2305 info->flags = 0; 2306 if (test_bit(FLAG_REQUESTED, &desc->flags) || 2307 test_bit(FLAG_IS_HOGGED, &desc->flags) || 2308 test_bit(FLAG_USED_AS_IRQ, &desc->flags) || 2309 test_bit(FLAG_EXPORT, &desc->flags) || 2310 test_bit(FLAG_SYSFS, &desc->flags) || 2311 !gpiochip_line_is_valid(gc, info->offset) || 2312 !ok_for_pinctrl) 2313 info->flags |= GPIO_V2_LINE_FLAG_USED; 2314 2315 if (test_bit(FLAG_IS_OUT, &desc->flags)) 2316 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2317 else 2318 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2319 2320 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2321 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2322 2323 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 2324 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2325 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 2326 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2327 2328 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) 2329 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2330 if (test_bit(FLAG_PULL_DOWN, &desc->flags)) 2331 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2332 if (test_bit(FLAG_PULL_UP, &desc->flags)) 2333 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2334 2335 if (test_bit(FLAG_EDGE_RISING, &desc->flags)) 2336 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2337 if (test_bit(FLAG_EDGE_FALLING, &desc->flags)) 2338 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2339 2340 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags)) 2341 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2342 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags)) 2343 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2344 2345 debounce_period_us = READ_ONCE(desc->debounce_period_us); 2346 if (debounce_period_us) { 2347 info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 2348 info->attrs[num_attrs].debounce_period_us = debounce_period_us; 2349 num_attrs++; 2350 } 2351 info->num_attrs = num_attrs; 2352 2353 spin_unlock_irqrestore(&gpio_lock, flags); 2354 } 2355 2356 struct gpio_chardev_data { 2357 struct gpio_device *gdev; 2358 wait_queue_head_t wait; 2359 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2360 struct notifier_block lineinfo_changed_nb; 2361 struct notifier_block device_unregistered_nb; 2362 unsigned long *watched_lines; 2363 #ifdef CONFIG_GPIO_CDEV_V1 2364 atomic_t watch_abi_version; 2365 #endif 2366 }; 2367 2368 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2369 { 2370 struct gpio_device *gdev = cdev->gdev; 2371 struct gpiochip_info chipinfo; 2372 2373 memset(&chipinfo, 0, sizeof(chipinfo)); 2374 2375 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2376 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2377 chipinfo.lines = gdev->ngpio; 2378 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2379 return -EFAULT; 2380 return 0; 2381 } 2382 2383 #ifdef CONFIG_GPIO_CDEV_V1 2384 /* 2385 * returns 0 if the versions match, else the previously selected ABI version 2386 */ 2387 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2388 unsigned int version) 2389 { 2390 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2391 2392 if (abiv == version) 2393 return 0; 2394 2395 return abiv; 2396 } 2397 2398 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2399 bool watch) 2400 { 2401 struct gpio_desc *desc; 2402 struct gpioline_info lineinfo; 2403 struct gpio_v2_line_info lineinfo_v2; 2404 2405 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2406 return -EFAULT; 2407 2408 /* this doubles as a range check on line_offset */ 2409 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset); 2410 if (IS_ERR(desc)) 2411 return PTR_ERR(desc); 2412 2413 if (watch) { 2414 if (lineinfo_ensure_abi_version(cdev, 1)) 2415 return -EPERM; 2416 2417 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2418 return -EBUSY; 2419 } 2420 2421 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2422 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2423 2424 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2425 if (watch) 2426 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2427 return -EFAULT; 2428 } 2429 2430 return 0; 2431 } 2432 #endif 2433 2434 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2435 bool watch) 2436 { 2437 struct gpio_desc *desc; 2438 struct gpio_v2_line_info lineinfo; 2439 2440 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2441 return -EFAULT; 2442 2443 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2444 return -EINVAL; 2445 2446 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset); 2447 if (IS_ERR(desc)) 2448 return PTR_ERR(desc); 2449 2450 if (watch) { 2451 #ifdef CONFIG_GPIO_CDEV_V1 2452 if (lineinfo_ensure_abi_version(cdev, 2)) 2453 return -EPERM; 2454 #endif 2455 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2456 return -EBUSY; 2457 } 2458 gpio_desc_to_lineinfo(desc, &lineinfo); 2459 2460 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2461 if (watch) 2462 clear_bit(lineinfo.offset, cdev->watched_lines); 2463 return -EFAULT; 2464 } 2465 2466 return 0; 2467 } 2468 2469 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2470 { 2471 __u32 offset; 2472 2473 if (copy_from_user(&offset, ip, sizeof(offset))) 2474 return -EFAULT; 2475 2476 if (offset >= cdev->gdev->ngpio) 2477 return -EINVAL; 2478 2479 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2480 return -EBUSY; 2481 2482 return 0; 2483 } 2484 2485 static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) 2486 { 2487 struct gpio_chardev_data *cdev = file->private_data; 2488 struct gpio_device *gdev = cdev->gdev; 2489 void __user *ip = (void __user *)arg; 2490 2491 /* We fail any subsequent ioctl():s when the chip is gone */ 2492 if (!gdev->chip) 2493 return -ENODEV; 2494 2495 /* Fill in the struct and pass to userspace */ 2496 switch (cmd) { 2497 case GPIO_GET_CHIPINFO_IOCTL: 2498 return chipinfo_get(cdev, ip); 2499 #ifdef CONFIG_GPIO_CDEV_V1 2500 case GPIO_GET_LINEHANDLE_IOCTL: 2501 return linehandle_create(gdev, ip); 2502 case GPIO_GET_LINEEVENT_IOCTL: 2503 return lineevent_create(gdev, ip); 2504 case GPIO_GET_LINEINFO_IOCTL: 2505 return lineinfo_get_v1(cdev, ip, false); 2506 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2507 return lineinfo_get_v1(cdev, ip, true); 2508 #endif /* CONFIG_GPIO_CDEV_V1 */ 2509 case GPIO_V2_GET_LINEINFO_IOCTL: 2510 return lineinfo_get(cdev, ip, false); 2511 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2512 return lineinfo_get(cdev, ip, true); 2513 case GPIO_V2_GET_LINE_IOCTL: 2514 return linereq_create(gdev, ip); 2515 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2516 return lineinfo_unwatch(cdev, ip); 2517 default: 2518 return -EINVAL; 2519 } 2520 } 2521 2522 /* 2523 * gpio_ioctl() - ioctl handler for the GPIO chardev 2524 */ 2525 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2526 { 2527 struct gpio_chardev_data *cdev = file->private_data; 2528 2529 return call_ioctl_locked(file, cmd, arg, cdev->gdev, 2530 gpio_ioctl_unlocked); 2531 } 2532 2533 #ifdef CONFIG_COMPAT 2534 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2535 unsigned long arg) 2536 { 2537 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2538 } 2539 #endif 2540 2541 static int lineinfo_changed_notify(struct notifier_block *nb, 2542 unsigned long action, void *data) 2543 { 2544 struct gpio_chardev_data *cdev = 2545 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2546 struct gpio_v2_line_info_changed chg; 2547 struct gpio_desc *desc = data; 2548 int ret; 2549 2550 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2551 return NOTIFY_DONE; 2552 2553 memset(&chg, 0, sizeof(chg)); 2554 chg.event_type = action; 2555 chg.timestamp_ns = ktime_get_ns(); 2556 gpio_desc_to_lineinfo(desc, &chg.info); 2557 2558 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2559 if (ret) 2560 wake_up_poll(&cdev->wait, EPOLLIN); 2561 else 2562 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2563 2564 return NOTIFY_OK; 2565 } 2566 2567 static int gpio_device_unregistered_notify(struct notifier_block *nb, 2568 unsigned long action, void *data) 2569 { 2570 struct gpio_chardev_data *cdev = container_of(nb, 2571 struct gpio_chardev_data, 2572 device_unregistered_nb); 2573 2574 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR); 2575 2576 return NOTIFY_OK; 2577 } 2578 2579 static __poll_t lineinfo_watch_poll_unlocked(struct file *file, 2580 struct poll_table_struct *pollt) 2581 { 2582 struct gpio_chardev_data *cdev = file->private_data; 2583 __poll_t events = 0; 2584 2585 if (!cdev->gdev->chip) 2586 return EPOLLHUP | EPOLLERR; 2587 2588 poll_wait(file, &cdev->wait, pollt); 2589 2590 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2591 &cdev->wait.lock)) 2592 events = EPOLLIN | EPOLLRDNORM; 2593 2594 return events; 2595 } 2596 2597 static __poll_t lineinfo_watch_poll(struct file *file, 2598 struct poll_table_struct *pollt) 2599 { 2600 struct gpio_chardev_data *cdev = file->private_data; 2601 2602 return call_poll_locked(file, pollt, cdev->gdev, 2603 lineinfo_watch_poll_unlocked); 2604 } 2605 2606 static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, 2607 size_t count, loff_t *off) 2608 { 2609 struct gpio_chardev_data *cdev = file->private_data; 2610 struct gpio_v2_line_info_changed event; 2611 ssize_t bytes_read = 0; 2612 int ret; 2613 size_t event_size; 2614 2615 if (!cdev->gdev->chip) 2616 return -ENODEV; 2617 2618 #ifndef CONFIG_GPIO_CDEV_V1 2619 event_size = sizeof(struct gpio_v2_line_info_changed); 2620 if (count < event_size) 2621 return -EINVAL; 2622 #endif 2623 2624 do { 2625 spin_lock(&cdev->wait.lock); 2626 if (kfifo_is_empty(&cdev->events)) { 2627 if (bytes_read) { 2628 spin_unlock(&cdev->wait.lock); 2629 return bytes_read; 2630 } 2631 2632 if (file->f_flags & O_NONBLOCK) { 2633 spin_unlock(&cdev->wait.lock); 2634 return -EAGAIN; 2635 } 2636 2637 ret = wait_event_interruptible_locked(cdev->wait, 2638 !kfifo_is_empty(&cdev->events)); 2639 if (ret) { 2640 spin_unlock(&cdev->wait.lock); 2641 return ret; 2642 } 2643 } 2644 #ifdef CONFIG_GPIO_CDEV_V1 2645 /* must be after kfifo check so watch_abi_version is set */ 2646 if (atomic_read(&cdev->watch_abi_version) == 2) 2647 event_size = sizeof(struct gpio_v2_line_info_changed); 2648 else 2649 event_size = sizeof(struct gpioline_info_changed); 2650 if (count < event_size) { 2651 spin_unlock(&cdev->wait.lock); 2652 return -EINVAL; 2653 } 2654 #endif 2655 ret = kfifo_out(&cdev->events, &event, 1); 2656 spin_unlock(&cdev->wait.lock); 2657 if (ret != 1) { 2658 ret = -EIO; 2659 break; 2660 /* We should never get here. See lineevent_read(). */ 2661 } 2662 2663 #ifdef CONFIG_GPIO_CDEV_V1 2664 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2665 if (copy_to_user(buf + bytes_read, &event, event_size)) 2666 return -EFAULT; 2667 } else { 2668 struct gpioline_info_changed event_v1; 2669 2670 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2671 if (copy_to_user(buf + bytes_read, &event_v1, 2672 event_size)) 2673 return -EFAULT; 2674 } 2675 #else 2676 if (copy_to_user(buf + bytes_read, &event, event_size)) 2677 return -EFAULT; 2678 #endif 2679 bytes_read += event_size; 2680 } while (count >= bytes_read + sizeof(event)); 2681 2682 return bytes_read; 2683 } 2684 2685 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2686 size_t count, loff_t *off) 2687 { 2688 struct gpio_chardev_data *cdev = file->private_data; 2689 2690 return call_read_locked(file, buf, count, off, cdev->gdev, 2691 lineinfo_watch_read_unlocked); 2692 } 2693 2694 /** 2695 * gpio_chrdev_open() - open the chardev for ioctl operations 2696 * @inode: inode for this chardev 2697 * @file: file struct for storing private data 2698 * Returns 0 on success 2699 */ 2700 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2701 { 2702 struct gpio_device *gdev = container_of(inode->i_cdev, 2703 struct gpio_device, chrdev); 2704 struct gpio_chardev_data *cdev; 2705 int ret = -ENOMEM; 2706 2707 down_read(&gdev->sem); 2708 2709 /* Fail on open if the backing gpiochip is gone */ 2710 if (!gdev->chip) { 2711 ret = -ENODEV; 2712 goto out_unlock; 2713 } 2714 2715 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2716 if (!cdev) 2717 goto out_unlock; 2718 2719 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); 2720 if (!cdev->watched_lines) 2721 goto out_free_cdev; 2722 2723 init_waitqueue_head(&cdev->wait); 2724 INIT_KFIFO(cdev->events); 2725 cdev->gdev = gpio_device_get(gdev); 2726 2727 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2728 ret = blocking_notifier_chain_register(&gdev->line_state_notifier, 2729 &cdev->lineinfo_changed_nb); 2730 if (ret) 2731 goto out_free_bitmap; 2732 2733 cdev->device_unregistered_nb.notifier_call = 2734 gpio_device_unregistered_notify; 2735 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2736 &cdev->device_unregistered_nb); 2737 if (ret) 2738 goto out_unregister_line_notifier; 2739 2740 file->private_data = cdev; 2741 2742 ret = nonseekable_open(inode, file); 2743 if (ret) 2744 goto out_unregister_device_notifier; 2745 2746 up_read(&gdev->sem); 2747 2748 return ret; 2749 2750 out_unregister_device_notifier: 2751 blocking_notifier_chain_unregister(&gdev->device_notifier, 2752 &cdev->device_unregistered_nb); 2753 out_unregister_line_notifier: 2754 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2755 &cdev->lineinfo_changed_nb); 2756 out_free_bitmap: 2757 gpio_device_put(gdev); 2758 bitmap_free(cdev->watched_lines); 2759 out_free_cdev: 2760 kfree(cdev); 2761 out_unlock: 2762 up_read(&gdev->sem); 2763 return ret; 2764 } 2765 2766 /** 2767 * gpio_chrdev_release() - close chardev after ioctl operations 2768 * @inode: inode for this chardev 2769 * @file: file struct for storing private data 2770 * Returns 0 on success 2771 */ 2772 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2773 { 2774 struct gpio_chardev_data *cdev = file->private_data; 2775 struct gpio_device *gdev = cdev->gdev; 2776 2777 bitmap_free(cdev->watched_lines); 2778 blocking_notifier_chain_unregister(&gdev->device_notifier, 2779 &cdev->device_unregistered_nb); 2780 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2781 &cdev->lineinfo_changed_nb); 2782 gpio_device_put(gdev); 2783 kfree(cdev); 2784 2785 return 0; 2786 } 2787 2788 static const struct file_operations gpio_fileops = { 2789 .release = gpio_chrdev_release, 2790 .open = gpio_chrdev_open, 2791 .poll = lineinfo_watch_poll, 2792 .read = lineinfo_watch_read, 2793 .owner = THIS_MODULE, 2794 .llseek = no_llseek, 2795 .unlocked_ioctl = gpio_ioctl, 2796 #ifdef CONFIG_COMPAT 2797 .compat_ioctl = gpio_ioctl_compat, 2798 #endif 2799 }; 2800 2801 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2802 { 2803 int ret; 2804 2805 cdev_init(&gdev->chrdev, &gpio_fileops); 2806 gdev->chrdev.owner = THIS_MODULE; 2807 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2808 2809 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2810 if (ret) 2811 return ret; 2812 2813 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n", 2814 MAJOR(devt), gdev->id); 2815 2816 return 0; 2817 } 2818 2819 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2820 { 2821 cdev_device_del(&gdev->chrdev, &gdev->dev); 2822 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL); 2823 } 2824