1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/compat.h> 9 #include <linux/compiler.h> 10 #include <linux/device.h> 11 #include <linux/err.h> 12 #include <linux/file.h> 13 #include <linux/gpio.h> 14 #include <linux/gpio/driver.h> 15 #include <linux/hte.h> 16 #include <linux/interrupt.h> 17 #include <linux/irqreturn.h> 18 #include <linux/kernel.h> 19 #include <linux/kfifo.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/poll.h> 24 #include <linux/seq_file.h> 25 #include <linux/spinlock.h> 26 #include <linux/timekeeping.h> 27 #include <linux/uaccess.h> 28 #include <linux/workqueue.h> 29 30 #include <uapi/linux/gpio.h> 31 32 #include "gpiolib.h" 33 #include "gpiolib-cdev.h" 34 35 /* 36 * Array sizes must ensure 64-bit alignment and not create holes in the 37 * struct packing. 38 */ 39 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 40 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 41 42 /* 43 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 44 */ 45 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 46 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 53 54 /* Character device interface to GPIO. 55 * 56 * The GPIO character device, /dev/gpiochipN, provides userspace an 57 * interface to gpiolib GPIOs via ioctl()s. 58 */ 59 60 typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); 61 typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); 62 typedef ssize_t (*read_fn)(struct file *, char __user *, 63 size_t count, loff_t *); 64 65 static __poll_t call_poll_locked(struct file *file, 66 struct poll_table_struct *wait, 67 struct gpio_device *gdev, poll_fn func) 68 { 69 __poll_t ret; 70 71 down_read(&gdev->sem); 72 ret = func(file, wait); 73 up_read(&gdev->sem); 74 75 return ret; 76 } 77 78 static long call_ioctl_locked(struct file *file, unsigned int cmd, 79 unsigned long arg, struct gpio_device *gdev, 80 ioctl_fn func) 81 { 82 long ret; 83 84 down_read(&gdev->sem); 85 ret = func(file, cmd, arg); 86 up_read(&gdev->sem); 87 88 return ret; 89 } 90 91 static ssize_t call_read_locked(struct file *file, char __user *buf, 92 size_t count, loff_t *f_ps, 93 struct gpio_device *gdev, read_fn func) 94 { 95 ssize_t ret; 96 97 down_read(&gdev->sem); 98 ret = func(file, buf, count, f_ps); 99 up_read(&gdev->sem); 100 101 return ret; 102 } 103 104 /* 105 * GPIO line handle management 106 */ 107 108 #ifdef CONFIG_GPIO_CDEV_V1 109 /** 110 * struct linehandle_state - contains the state of a userspace handle 111 * @gdev: the GPIO device the handle pertains to 112 * @label: consumer label used to tag descriptors 113 * @descs: the GPIO descriptors held by this handle 114 * @num_descs: the number of descriptors held in the descs array 115 */ 116 struct linehandle_state { 117 struct gpio_device *gdev; 118 const char *label; 119 struct gpio_desc *descs[GPIOHANDLES_MAX]; 120 u32 num_descs; 121 }; 122 123 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 124 (GPIOHANDLE_REQUEST_INPUT | \ 125 GPIOHANDLE_REQUEST_OUTPUT | \ 126 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 127 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 128 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 129 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 130 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 131 GPIOHANDLE_REQUEST_OPEN_SOURCE) 132 133 static int linehandle_validate_flags(u32 flags) 134 { 135 /* Return an error if an unknown flag is set */ 136 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 137 return -EINVAL; 138 139 /* 140 * Do not allow both INPUT & OUTPUT flags to be set as they are 141 * contradictory. 142 */ 143 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 144 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 145 return -EINVAL; 146 147 /* 148 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 149 * the hardware actually supports enabling both at the same time the 150 * electrical result would be disastrous. 151 */ 152 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 153 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 154 return -EINVAL; 155 156 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 157 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 158 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 159 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 160 return -EINVAL; 161 162 /* Bias flags only allowed for input or output mode. */ 163 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 164 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 165 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 166 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 167 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 168 return -EINVAL; 169 170 /* Only one bias flag can be set. */ 171 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 172 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 173 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 174 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 175 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 176 return -EINVAL; 177 178 return 0; 179 } 180 181 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 182 { 183 assign_bit(FLAG_ACTIVE_LOW, flagsp, 184 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 185 assign_bit(FLAG_OPEN_DRAIN, flagsp, 186 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 187 assign_bit(FLAG_OPEN_SOURCE, flagsp, 188 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 189 assign_bit(FLAG_PULL_UP, flagsp, 190 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 191 assign_bit(FLAG_PULL_DOWN, flagsp, 192 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 193 assign_bit(FLAG_BIAS_DISABLE, flagsp, 194 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 195 } 196 197 static long linehandle_set_config(struct linehandle_state *lh, 198 void __user *ip) 199 { 200 struct gpiohandle_config gcnf; 201 struct gpio_desc *desc; 202 int i, ret; 203 u32 lflags; 204 205 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 206 return -EFAULT; 207 208 lflags = gcnf.flags; 209 ret = linehandle_validate_flags(lflags); 210 if (ret) 211 return ret; 212 213 for (i = 0; i < lh->num_descs; i++) { 214 desc = lh->descs[i]; 215 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 216 217 /* 218 * Lines have to be requested explicitly for input 219 * or output, else the line will be treated "as is". 220 */ 221 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 222 int val = !!gcnf.default_values[i]; 223 224 ret = gpiod_direction_output(desc, val); 225 if (ret) 226 return ret; 227 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 228 ret = gpiod_direction_input(desc); 229 if (ret) 230 return ret; 231 } 232 233 blocking_notifier_call_chain(&desc->gdev->notifier, 234 GPIO_V2_LINE_CHANGED_CONFIG, 235 desc); 236 } 237 return 0; 238 } 239 240 static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, 241 unsigned long arg) 242 { 243 struct linehandle_state *lh = file->private_data; 244 void __user *ip = (void __user *)arg; 245 struct gpiohandle_data ghd; 246 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 247 unsigned int i; 248 int ret; 249 250 if (!lh->gdev->chip) 251 return -ENODEV; 252 253 switch (cmd) { 254 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 255 /* NOTE: It's okay to read values of output lines */ 256 ret = gpiod_get_array_value_complex(false, true, 257 lh->num_descs, lh->descs, 258 NULL, vals); 259 if (ret) 260 return ret; 261 262 memset(&ghd, 0, sizeof(ghd)); 263 for (i = 0; i < lh->num_descs; i++) 264 ghd.values[i] = test_bit(i, vals); 265 266 if (copy_to_user(ip, &ghd, sizeof(ghd))) 267 return -EFAULT; 268 269 return 0; 270 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 271 /* 272 * All line descriptors were created at once with the same 273 * flags so just check if the first one is really output. 274 */ 275 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 276 return -EPERM; 277 278 if (copy_from_user(&ghd, ip, sizeof(ghd))) 279 return -EFAULT; 280 281 /* Clamp all values to [0,1] */ 282 for (i = 0; i < lh->num_descs; i++) 283 __assign_bit(i, vals, ghd.values[i]); 284 285 /* Reuse the array setting function */ 286 return gpiod_set_array_value_complex(false, 287 true, 288 lh->num_descs, 289 lh->descs, 290 NULL, 291 vals); 292 case GPIOHANDLE_SET_CONFIG_IOCTL: 293 return linehandle_set_config(lh, ip); 294 default: 295 return -EINVAL; 296 } 297 } 298 299 static long linehandle_ioctl(struct file *file, unsigned int cmd, 300 unsigned long arg) 301 { 302 struct linehandle_state *lh = file->private_data; 303 304 return call_ioctl_locked(file, cmd, arg, lh->gdev, 305 linehandle_ioctl_unlocked); 306 } 307 308 #ifdef CONFIG_COMPAT 309 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 310 unsigned long arg) 311 { 312 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 313 } 314 #endif 315 316 static void linehandle_free(struct linehandle_state *lh) 317 { 318 int i; 319 320 for (i = 0; i < lh->num_descs; i++) 321 if (lh->descs[i]) 322 gpiod_free(lh->descs[i]); 323 kfree(lh->label); 324 put_device(&lh->gdev->dev); 325 kfree(lh); 326 } 327 328 static int linehandle_release(struct inode *inode, struct file *file) 329 { 330 linehandle_free(file->private_data); 331 return 0; 332 } 333 334 static const struct file_operations linehandle_fileops = { 335 .release = linehandle_release, 336 .owner = THIS_MODULE, 337 .llseek = noop_llseek, 338 .unlocked_ioctl = linehandle_ioctl, 339 #ifdef CONFIG_COMPAT 340 .compat_ioctl = linehandle_ioctl_compat, 341 #endif 342 }; 343 344 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 345 { 346 struct gpiohandle_request handlereq; 347 struct linehandle_state *lh; 348 struct file *file; 349 int fd, i, ret; 350 u32 lflags; 351 352 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 353 return -EFAULT; 354 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 355 return -EINVAL; 356 357 lflags = handlereq.flags; 358 359 ret = linehandle_validate_flags(lflags); 360 if (ret) 361 return ret; 362 363 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 364 if (!lh) 365 return -ENOMEM; 366 lh->gdev = gdev; 367 get_device(&gdev->dev); 368 369 if (handlereq.consumer_label[0] != '\0') { 370 /* label is only initialized if consumer_label is set */ 371 lh->label = kstrndup(handlereq.consumer_label, 372 sizeof(handlereq.consumer_label) - 1, 373 GFP_KERNEL); 374 if (!lh->label) { 375 ret = -ENOMEM; 376 goto out_free_lh; 377 } 378 } 379 380 lh->num_descs = handlereq.lines; 381 382 /* Request each GPIO */ 383 for (i = 0; i < handlereq.lines; i++) { 384 u32 offset = handlereq.lineoffsets[i]; 385 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 386 387 if (IS_ERR(desc)) { 388 ret = PTR_ERR(desc); 389 goto out_free_lh; 390 } 391 392 ret = gpiod_request_user(desc, lh->label); 393 if (ret) 394 goto out_free_lh; 395 lh->descs[i] = desc; 396 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 397 398 ret = gpiod_set_transitory(desc, false); 399 if (ret < 0) 400 goto out_free_lh; 401 402 /* 403 * Lines have to be requested explicitly for input 404 * or output, else the line will be treated "as is". 405 */ 406 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 407 int val = !!handlereq.default_values[i]; 408 409 ret = gpiod_direction_output(desc, val); 410 if (ret) 411 goto out_free_lh; 412 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 413 ret = gpiod_direction_input(desc); 414 if (ret) 415 goto out_free_lh; 416 } 417 418 blocking_notifier_call_chain(&desc->gdev->notifier, 419 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 420 421 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 422 offset); 423 } 424 425 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 426 if (fd < 0) { 427 ret = fd; 428 goto out_free_lh; 429 } 430 431 file = anon_inode_getfile("gpio-linehandle", 432 &linehandle_fileops, 433 lh, 434 O_RDONLY | O_CLOEXEC); 435 if (IS_ERR(file)) { 436 ret = PTR_ERR(file); 437 goto out_put_unused_fd; 438 } 439 440 handlereq.fd = fd; 441 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 442 /* 443 * fput() will trigger the release() callback, so do not go onto 444 * the regular error cleanup path here. 445 */ 446 fput(file); 447 put_unused_fd(fd); 448 return -EFAULT; 449 } 450 451 fd_install(fd, file); 452 453 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 454 lh->num_descs); 455 456 return 0; 457 458 out_put_unused_fd: 459 put_unused_fd(fd); 460 out_free_lh: 461 linehandle_free(lh); 462 return ret; 463 } 464 #endif /* CONFIG_GPIO_CDEV_V1 */ 465 466 /** 467 * struct line - contains the state of a requested line 468 * @desc: the GPIO descriptor for this line. 469 * @req: the corresponding line request 470 * @irq: the interrupt triggered in response to events on this GPIO 471 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 472 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 473 * @timestamp_ns: cache for the timestamp storing it between hardirq and 474 * IRQ thread, used to bring the timestamp close to the actual event 475 * @req_seqno: the seqno for the current edge event in the sequence of 476 * events for the corresponding line request. This is drawn from the @req. 477 * @line_seqno: the seqno for the current edge event in the sequence of 478 * events for this line. 479 * @work: the worker that implements software debouncing 480 * @sw_debounced: flag indicating if the software debouncer is active 481 * @level: the current debounced physical level of the line 482 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 483 * @raw_level: the line level at the time of event 484 * @total_discard_seq: the running counter of the discarded events 485 * @last_seqno: the last sequence number before debounce period expires 486 */ 487 struct line { 488 struct gpio_desc *desc; 489 /* 490 * -- edge detector specific fields -- 491 */ 492 struct linereq *req; 493 unsigned int irq; 494 /* 495 * The flags for the active edge detector configuration. 496 * 497 * edflags is set by linereq_create(), linereq_free(), and 498 * linereq_set_config_unlocked(), which are themselves mutually 499 * exclusive, and is accessed by edge_irq_thread(), 500 * process_hw_ts_thread() and debounce_work_func(), 501 * which can all live with a slightly stale value. 502 */ 503 u64 edflags; 504 /* 505 * timestamp_ns and req_seqno are accessed only by 506 * edge_irq_handler() and edge_irq_thread(), which are themselves 507 * mutually exclusive, so no additional protection is necessary. 508 */ 509 u64 timestamp_ns; 510 u32 req_seqno; 511 /* 512 * line_seqno is accessed by either edge_irq_thread() or 513 * debounce_work_func(), which are themselves mutually exclusive, 514 * so no additional protection is necessary. 515 */ 516 u32 line_seqno; 517 /* 518 * -- debouncer specific fields -- 519 */ 520 struct delayed_work work; 521 /* 522 * sw_debounce is accessed by linereq_set_config(), which is the 523 * only setter, and linereq_get_values(), which can live with a 524 * slightly stale value. 525 */ 526 unsigned int sw_debounced; 527 /* 528 * level is accessed by debounce_work_func(), which is the only 529 * setter, and linereq_get_values() which can live with a slightly 530 * stale value. 531 */ 532 unsigned int level; 533 #ifdef CONFIG_HTE 534 struct hte_ts_desc hdesc; 535 /* 536 * HTE provider sets line level at the time of event. The valid 537 * value is 0 or 1 and negative value for an error. 538 */ 539 int raw_level; 540 /* 541 * when sw_debounce is set on HTE enabled line, this is running 542 * counter of the discarded events. 543 */ 544 u32 total_discard_seq; 545 /* 546 * when sw_debounce is set on HTE enabled line, this variable records 547 * last sequence number before debounce period expires. 548 */ 549 u32 last_seqno; 550 #endif /* CONFIG_HTE */ 551 }; 552 553 /** 554 * struct linereq - contains the state of a userspace line request 555 * @gdev: the GPIO device the line request pertains to 556 * @label: consumer label used to tag GPIO descriptors 557 * @num_lines: the number of lines in the lines array 558 * @wait: wait queue that handles blocking reads of events 559 * @event_buffer_size: the number of elements allocated in @events 560 * @events: KFIFO for the GPIO events 561 * @seqno: the sequence number for edge events generated on all lines in 562 * this line request. Note that this is not used when @num_lines is 1, as 563 * the line_seqno is then the same and is cheaper to calculate. 564 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 565 * of configuration, particularly multi-step accesses to desc flags. 566 * @lines: the lines held by this line request, with @num_lines elements. 567 */ 568 struct linereq { 569 struct gpio_device *gdev; 570 const char *label; 571 u32 num_lines; 572 wait_queue_head_t wait; 573 u32 event_buffer_size; 574 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 575 atomic_t seqno; 576 struct mutex config_mutex; 577 struct line lines[]; 578 }; 579 580 #define GPIO_V2_LINE_BIAS_FLAGS \ 581 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 582 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 583 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 584 585 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 586 (GPIO_V2_LINE_FLAG_INPUT | \ 587 GPIO_V2_LINE_FLAG_OUTPUT) 588 589 #define GPIO_V2_LINE_DRIVE_FLAGS \ 590 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 591 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 592 593 #define GPIO_V2_LINE_EDGE_FLAGS \ 594 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 595 GPIO_V2_LINE_FLAG_EDGE_FALLING) 596 597 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 598 599 #define GPIO_V2_LINE_VALID_FLAGS \ 600 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 601 GPIO_V2_LINE_DIRECTION_FLAGS | \ 602 GPIO_V2_LINE_DRIVE_FLAGS | \ 603 GPIO_V2_LINE_EDGE_FLAGS | \ 604 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 605 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 606 GPIO_V2_LINE_BIAS_FLAGS) 607 608 /* subset of flags relevant for edge detector configuration */ 609 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 610 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 611 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 612 GPIO_V2_LINE_EDGE_FLAGS) 613 614 static void linereq_put_event(struct linereq *lr, 615 struct gpio_v2_line_event *le) 616 { 617 bool overflow = false; 618 619 spin_lock(&lr->wait.lock); 620 if (kfifo_is_full(&lr->events)) { 621 overflow = true; 622 kfifo_skip(&lr->events); 623 } 624 kfifo_in(&lr->events, le, 1); 625 spin_unlock(&lr->wait.lock); 626 if (!overflow) 627 wake_up_poll(&lr->wait, EPOLLIN); 628 else 629 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 630 } 631 632 static u64 line_event_timestamp(struct line *line) 633 { 634 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 635 return ktime_get_real_ns(); 636 else if (IS_ENABLED(CONFIG_HTE) && 637 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 638 return line->timestamp_ns; 639 640 return ktime_get_ns(); 641 } 642 643 static u32 line_event_id(int level) 644 { 645 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 646 GPIO_V2_LINE_EVENT_FALLING_EDGE; 647 } 648 649 #ifdef CONFIG_HTE 650 651 static enum hte_return process_hw_ts_thread(void *p) 652 { 653 struct line *line; 654 struct linereq *lr; 655 struct gpio_v2_line_event le; 656 u64 edflags; 657 int level; 658 659 if (!p) 660 return HTE_CB_HANDLED; 661 662 line = p; 663 lr = line->req; 664 665 memset(&le, 0, sizeof(le)); 666 667 le.timestamp_ns = line->timestamp_ns; 668 edflags = READ_ONCE(line->edflags); 669 670 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 671 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 672 level = (line->raw_level >= 0) ? 673 line->raw_level : 674 gpiod_get_raw_value_cansleep(line->desc); 675 676 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 677 level = !level; 678 679 le.id = line_event_id(level); 680 break; 681 case GPIO_V2_LINE_FLAG_EDGE_RISING: 682 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 683 break; 684 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 685 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 686 break; 687 default: 688 return HTE_CB_HANDLED; 689 } 690 le.line_seqno = line->line_seqno; 691 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 692 le.offset = gpio_chip_hwgpio(line->desc); 693 694 linereq_put_event(lr, &le); 695 696 return HTE_CB_HANDLED; 697 } 698 699 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 700 { 701 struct line *line; 702 struct linereq *lr; 703 int diff_seqno = 0; 704 705 if (!ts || !p) 706 return HTE_CB_HANDLED; 707 708 line = p; 709 line->timestamp_ns = ts->tsc; 710 line->raw_level = ts->raw_level; 711 lr = line->req; 712 713 if (READ_ONCE(line->sw_debounced)) { 714 line->total_discard_seq++; 715 line->last_seqno = ts->seq; 716 mod_delayed_work(system_wq, &line->work, 717 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 718 } else { 719 if (unlikely(ts->seq < line->line_seqno)) 720 return HTE_CB_HANDLED; 721 722 diff_seqno = ts->seq - line->line_seqno; 723 line->line_seqno = ts->seq; 724 if (lr->num_lines != 1) 725 line->req_seqno = atomic_add_return(diff_seqno, 726 &lr->seqno); 727 728 return HTE_RUN_SECOND_CB; 729 } 730 731 return HTE_CB_HANDLED; 732 } 733 734 static int hte_edge_setup(struct line *line, u64 eflags) 735 { 736 int ret; 737 unsigned long flags = 0; 738 struct hte_ts_desc *hdesc = &line->hdesc; 739 740 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 741 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 742 HTE_FALLING_EDGE_TS : 743 HTE_RISING_EDGE_TS; 744 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 745 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 746 HTE_RISING_EDGE_TS : 747 HTE_FALLING_EDGE_TS; 748 749 line->total_discard_seq = 0; 750 751 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 752 line->desc); 753 754 ret = hte_ts_get(NULL, hdesc, 0); 755 if (ret) 756 return ret; 757 758 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 759 line); 760 } 761 762 #else 763 764 static int hte_edge_setup(struct line *line, u64 eflags) 765 { 766 return 0; 767 } 768 #endif /* CONFIG_HTE */ 769 770 static irqreturn_t edge_irq_thread(int irq, void *p) 771 { 772 struct line *line = p; 773 struct linereq *lr = line->req; 774 struct gpio_v2_line_event le; 775 776 /* Do not leak kernel stack to userspace */ 777 memset(&le, 0, sizeof(le)); 778 779 if (line->timestamp_ns) { 780 le.timestamp_ns = line->timestamp_ns; 781 } else { 782 /* 783 * We may be running from a nested threaded interrupt in 784 * which case we didn't get the timestamp from 785 * edge_irq_handler(). 786 */ 787 le.timestamp_ns = line_event_timestamp(line); 788 if (lr->num_lines != 1) 789 line->req_seqno = atomic_inc_return(&lr->seqno); 790 } 791 line->timestamp_ns = 0; 792 793 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 794 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 795 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 796 break; 797 case GPIO_V2_LINE_FLAG_EDGE_RISING: 798 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 799 break; 800 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 801 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 802 break; 803 default: 804 return IRQ_NONE; 805 } 806 line->line_seqno++; 807 le.line_seqno = line->line_seqno; 808 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 809 le.offset = gpio_chip_hwgpio(line->desc); 810 811 linereq_put_event(lr, &le); 812 813 return IRQ_HANDLED; 814 } 815 816 static irqreturn_t edge_irq_handler(int irq, void *p) 817 { 818 struct line *line = p; 819 struct linereq *lr = line->req; 820 821 /* 822 * Just store the timestamp in hardirq context so we get it as 823 * close in time as possible to the actual event. 824 */ 825 line->timestamp_ns = line_event_timestamp(line); 826 827 if (lr->num_lines != 1) 828 line->req_seqno = atomic_inc_return(&lr->seqno); 829 830 return IRQ_WAKE_THREAD; 831 } 832 833 /* 834 * returns the current debounced logical value. 835 */ 836 static bool debounced_value(struct line *line) 837 { 838 bool value; 839 840 /* 841 * minor race - debouncer may be stopped here, so edge_detector_stop() 842 * must leave the value unchanged so the following will read the level 843 * from when the debouncer was last running. 844 */ 845 value = READ_ONCE(line->level); 846 847 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 848 value = !value; 849 850 return value; 851 } 852 853 static irqreturn_t debounce_irq_handler(int irq, void *p) 854 { 855 struct line *line = p; 856 857 mod_delayed_work(system_wq, &line->work, 858 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 859 860 return IRQ_HANDLED; 861 } 862 863 static void debounce_work_func(struct work_struct *work) 864 { 865 struct gpio_v2_line_event le; 866 struct line *line = container_of(work, struct line, work.work); 867 struct linereq *lr; 868 u64 eflags, edflags = READ_ONCE(line->edflags); 869 int level = -1; 870 #ifdef CONFIG_HTE 871 int diff_seqno; 872 873 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 874 level = line->raw_level; 875 #endif 876 if (level < 0) 877 level = gpiod_get_raw_value_cansleep(line->desc); 878 if (level < 0) { 879 pr_debug_ratelimited("debouncer failed to read line value\n"); 880 return; 881 } 882 883 if (READ_ONCE(line->level) == level) 884 return; 885 886 WRITE_ONCE(line->level, level); 887 888 /* -- edge detection -- */ 889 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 890 if (!eflags) 891 return; 892 893 /* switch from physical level to logical - if they differ */ 894 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 895 level = !level; 896 897 /* ignore edges that are not being monitored */ 898 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 899 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 900 return; 901 902 /* Do not leak kernel stack to userspace */ 903 memset(&le, 0, sizeof(le)); 904 905 lr = line->req; 906 le.timestamp_ns = line_event_timestamp(line); 907 le.offset = gpio_chip_hwgpio(line->desc); 908 #ifdef CONFIG_HTE 909 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 910 /* discard events except the last one */ 911 line->total_discard_seq -= 1; 912 diff_seqno = line->last_seqno - line->total_discard_seq - 913 line->line_seqno; 914 line->line_seqno = line->last_seqno - line->total_discard_seq; 915 le.line_seqno = line->line_seqno; 916 le.seqno = (lr->num_lines == 1) ? 917 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 918 } else 919 #endif /* CONFIG_HTE */ 920 { 921 line->line_seqno++; 922 le.line_seqno = line->line_seqno; 923 le.seqno = (lr->num_lines == 1) ? 924 le.line_seqno : atomic_inc_return(&lr->seqno); 925 } 926 927 le.id = line_event_id(level); 928 929 linereq_put_event(lr, &le); 930 } 931 932 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 933 { 934 unsigned long irqflags; 935 int ret, level, irq; 936 937 /* try hardware */ 938 ret = gpiod_set_debounce(line->desc, debounce_period_us); 939 if (!ret) { 940 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 941 return ret; 942 } 943 if (ret != -ENOTSUPP) 944 return ret; 945 946 if (debounce_period_us) { 947 /* setup software debounce */ 948 level = gpiod_get_raw_value_cansleep(line->desc); 949 if (level < 0) 950 return level; 951 952 if (!(IS_ENABLED(CONFIG_HTE) && 953 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 954 irq = gpiod_to_irq(line->desc); 955 if (irq < 0) 956 return -ENXIO; 957 958 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 959 ret = request_irq(irq, debounce_irq_handler, irqflags, 960 line->req->label, line); 961 if (ret) 962 return ret; 963 line->irq = irq; 964 } else { 965 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 966 if (ret) 967 return ret; 968 } 969 970 WRITE_ONCE(line->level, level); 971 WRITE_ONCE(line->sw_debounced, 1); 972 } 973 return 0; 974 } 975 976 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 977 unsigned int line_idx) 978 { 979 unsigned int i; 980 u64 mask = BIT_ULL(line_idx); 981 982 for (i = 0; i < lc->num_attrs; i++) { 983 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 984 (lc->attrs[i].mask & mask)) 985 return true; 986 } 987 return false; 988 } 989 990 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 991 unsigned int line_idx) 992 { 993 unsigned int i; 994 u64 mask = BIT_ULL(line_idx); 995 996 for (i = 0; i < lc->num_attrs; i++) { 997 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 998 (lc->attrs[i].mask & mask)) 999 return lc->attrs[i].attr.debounce_period_us; 1000 } 1001 return 0; 1002 } 1003 1004 static void edge_detector_stop(struct line *line) 1005 { 1006 if (line->irq) { 1007 free_irq(line->irq, line); 1008 line->irq = 0; 1009 } 1010 1011 #ifdef CONFIG_HTE 1012 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1013 hte_ts_put(&line->hdesc); 1014 #endif 1015 1016 cancel_delayed_work_sync(&line->work); 1017 WRITE_ONCE(line->sw_debounced, 0); 1018 WRITE_ONCE(line->edflags, 0); 1019 if (line->desc) 1020 WRITE_ONCE(line->desc->debounce_period_us, 0); 1021 /* do not change line->level - see comment in debounced_value() */ 1022 } 1023 1024 static int edge_detector_setup(struct line *line, 1025 struct gpio_v2_line_config *lc, 1026 unsigned int line_idx, u64 edflags) 1027 { 1028 u32 debounce_period_us; 1029 unsigned long irqflags = 0; 1030 u64 eflags; 1031 int irq, ret; 1032 1033 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1034 if (eflags && !kfifo_initialized(&line->req->events)) { 1035 ret = kfifo_alloc(&line->req->events, 1036 line->req->event_buffer_size, GFP_KERNEL); 1037 if (ret) 1038 return ret; 1039 } 1040 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1041 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1042 ret = debounce_setup(line, debounce_period_us); 1043 if (ret) 1044 return ret; 1045 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 1046 } 1047 1048 /* detection disabled or sw debouncer will provide edge detection */ 1049 if (!eflags || READ_ONCE(line->sw_debounced)) 1050 return 0; 1051 1052 if (IS_ENABLED(CONFIG_HTE) && 1053 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1054 return hte_edge_setup(line, edflags); 1055 1056 irq = gpiod_to_irq(line->desc); 1057 if (irq < 0) 1058 return -ENXIO; 1059 1060 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1061 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1062 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1065 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1066 irqflags |= IRQF_ONESHOT; 1067 1068 /* Request a thread to read the events */ 1069 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1070 irqflags, line->req->label, line); 1071 if (ret) 1072 return ret; 1073 1074 line->irq = irq; 1075 return 0; 1076 } 1077 1078 static int edge_detector_update(struct line *line, 1079 struct gpio_v2_line_config *lc, 1080 unsigned int line_idx, u64 edflags) 1081 { 1082 u64 active_edflags = READ_ONCE(line->edflags); 1083 unsigned int debounce_period_us = 1084 gpio_v2_line_config_debounce_period(lc, line_idx); 1085 1086 if ((active_edflags == edflags) && 1087 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)) 1088 return 0; 1089 1090 /* sw debounced and still will be...*/ 1091 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1092 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 1093 return 0; 1094 } 1095 1096 /* reconfiguring edge detection or sw debounce being disabled */ 1097 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1098 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1099 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1100 edge_detector_stop(line); 1101 1102 return edge_detector_setup(line, lc, line_idx, edflags); 1103 } 1104 1105 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1106 unsigned int line_idx) 1107 { 1108 unsigned int i; 1109 u64 mask = BIT_ULL(line_idx); 1110 1111 for (i = 0; i < lc->num_attrs; i++) { 1112 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1113 (lc->attrs[i].mask & mask)) 1114 return lc->attrs[i].attr.flags; 1115 } 1116 return lc->flags; 1117 } 1118 1119 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1120 unsigned int line_idx) 1121 { 1122 unsigned int i; 1123 u64 mask = BIT_ULL(line_idx); 1124 1125 for (i = 0; i < lc->num_attrs; i++) { 1126 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1127 (lc->attrs[i].mask & mask)) 1128 return !!(lc->attrs[i].attr.values & mask); 1129 } 1130 return 0; 1131 } 1132 1133 static int gpio_v2_line_flags_validate(u64 flags) 1134 { 1135 /* Return an error if an unknown flag is set */ 1136 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1137 return -EINVAL; 1138 1139 if (!IS_ENABLED(CONFIG_HTE) && 1140 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1141 return -EOPNOTSUPP; 1142 1143 /* 1144 * Do not allow both INPUT and OUTPUT flags to be set as they are 1145 * contradictory. 1146 */ 1147 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1148 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1149 return -EINVAL; 1150 1151 /* Only allow one event clock source */ 1152 if (IS_ENABLED(CONFIG_HTE) && 1153 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1154 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1155 return -EINVAL; 1156 1157 /* Edge detection requires explicit input. */ 1158 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1159 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1160 return -EINVAL; 1161 1162 /* 1163 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1164 * request. If the hardware actually supports enabling both at the 1165 * same time the electrical result would be disastrous. 1166 */ 1167 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1168 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1169 return -EINVAL; 1170 1171 /* Drive requires explicit output direction. */ 1172 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1173 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1174 return -EINVAL; 1175 1176 /* Bias requires explicit direction. */ 1177 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1178 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1179 return -EINVAL; 1180 1181 /* Only one bias flag can be set. */ 1182 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1183 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1184 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1185 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1186 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1187 return -EINVAL; 1188 1189 return 0; 1190 } 1191 1192 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1193 unsigned int num_lines) 1194 { 1195 unsigned int i; 1196 u64 flags; 1197 int ret; 1198 1199 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1200 return -EINVAL; 1201 1202 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1203 return -EINVAL; 1204 1205 for (i = 0; i < num_lines; i++) { 1206 flags = gpio_v2_line_config_flags(lc, i); 1207 ret = gpio_v2_line_flags_validate(flags); 1208 if (ret) 1209 return ret; 1210 1211 /* debounce requires explicit input */ 1212 if (gpio_v2_line_config_debounced(lc, i) && 1213 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1214 return -EINVAL; 1215 } 1216 return 0; 1217 } 1218 1219 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1220 unsigned long *flagsp) 1221 { 1222 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1223 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1224 1225 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1226 set_bit(FLAG_IS_OUT, flagsp); 1227 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1228 clear_bit(FLAG_IS_OUT, flagsp); 1229 1230 assign_bit(FLAG_EDGE_RISING, flagsp, 1231 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1232 assign_bit(FLAG_EDGE_FALLING, flagsp, 1233 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1234 1235 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1236 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1237 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1238 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1239 1240 assign_bit(FLAG_PULL_UP, flagsp, 1241 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1242 assign_bit(FLAG_PULL_DOWN, flagsp, 1243 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1244 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1245 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1246 1247 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1248 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1249 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1250 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1251 } 1252 1253 static long linereq_get_values(struct linereq *lr, void __user *ip) 1254 { 1255 struct gpio_v2_line_values lv; 1256 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1257 struct gpio_desc **descs; 1258 unsigned int i, didx, num_get; 1259 bool val; 1260 int ret; 1261 1262 /* NOTE: It's ok to read values of output lines. */ 1263 if (copy_from_user(&lv, ip, sizeof(lv))) 1264 return -EFAULT; 1265 1266 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1267 if (lv.mask & BIT_ULL(i)) { 1268 num_get++; 1269 descs = &lr->lines[i].desc; 1270 } 1271 } 1272 1273 if (num_get == 0) 1274 return -EINVAL; 1275 1276 if (num_get != 1) { 1277 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1278 if (!descs) 1279 return -ENOMEM; 1280 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1281 if (lv.mask & BIT_ULL(i)) { 1282 descs[didx] = lr->lines[i].desc; 1283 didx++; 1284 } 1285 } 1286 } 1287 ret = gpiod_get_array_value_complex(false, true, num_get, 1288 descs, NULL, vals); 1289 1290 if (num_get != 1) 1291 kfree(descs); 1292 if (ret) 1293 return ret; 1294 1295 lv.bits = 0; 1296 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1297 if (lv.mask & BIT_ULL(i)) { 1298 if (lr->lines[i].sw_debounced) 1299 val = debounced_value(&lr->lines[i]); 1300 else 1301 val = test_bit(didx, vals); 1302 if (val) 1303 lv.bits |= BIT_ULL(i); 1304 didx++; 1305 } 1306 } 1307 1308 if (copy_to_user(ip, &lv, sizeof(lv))) 1309 return -EFAULT; 1310 1311 return 0; 1312 } 1313 1314 static long linereq_set_values_unlocked(struct linereq *lr, 1315 struct gpio_v2_line_values *lv) 1316 { 1317 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1318 struct gpio_desc **descs; 1319 unsigned int i, didx, num_set; 1320 int ret; 1321 1322 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1323 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1324 if (lv->mask & BIT_ULL(i)) { 1325 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1326 return -EPERM; 1327 if (lv->bits & BIT_ULL(i)) 1328 __set_bit(num_set, vals); 1329 num_set++; 1330 descs = &lr->lines[i].desc; 1331 } 1332 } 1333 if (num_set == 0) 1334 return -EINVAL; 1335 1336 if (num_set != 1) { 1337 /* build compacted desc array and values */ 1338 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1339 if (!descs) 1340 return -ENOMEM; 1341 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1342 if (lv->mask & BIT_ULL(i)) { 1343 descs[didx] = lr->lines[i].desc; 1344 didx++; 1345 } 1346 } 1347 } 1348 ret = gpiod_set_array_value_complex(false, true, num_set, 1349 descs, NULL, vals); 1350 1351 if (num_set != 1) 1352 kfree(descs); 1353 return ret; 1354 } 1355 1356 static long linereq_set_values(struct linereq *lr, void __user *ip) 1357 { 1358 struct gpio_v2_line_values lv; 1359 int ret; 1360 1361 if (copy_from_user(&lv, ip, sizeof(lv))) 1362 return -EFAULT; 1363 1364 mutex_lock(&lr->config_mutex); 1365 1366 ret = linereq_set_values_unlocked(lr, &lv); 1367 1368 mutex_unlock(&lr->config_mutex); 1369 1370 return ret; 1371 } 1372 1373 static long linereq_set_config_unlocked(struct linereq *lr, 1374 struct gpio_v2_line_config *lc) 1375 { 1376 struct gpio_desc *desc; 1377 struct line *line; 1378 unsigned int i; 1379 u64 flags, edflags; 1380 int ret; 1381 1382 for (i = 0; i < lr->num_lines; i++) { 1383 line = &lr->lines[i]; 1384 desc = lr->lines[i].desc; 1385 flags = gpio_v2_line_config_flags(lc, i); 1386 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1387 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1388 /* 1389 * Lines have to be requested explicitly for input 1390 * or output, else the line will be treated "as is". 1391 */ 1392 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1393 int val = gpio_v2_line_config_output_value(lc, i); 1394 1395 edge_detector_stop(line); 1396 ret = gpiod_direction_output(desc, val); 1397 if (ret) 1398 return ret; 1399 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1400 ret = gpiod_direction_input(desc); 1401 if (ret) 1402 return ret; 1403 1404 ret = edge_detector_update(line, lc, i, edflags); 1405 if (ret) 1406 return ret; 1407 } 1408 1409 WRITE_ONCE(line->edflags, edflags); 1410 1411 blocking_notifier_call_chain(&desc->gdev->notifier, 1412 GPIO_V2_LINE_CHANGED_CONFIG, 1413 desc); 1414 } 1415 return 0; 1416 } 1417 1418 static long linereq_set_config(struct linereq *lr, void __user *ip) 1419 { 1420 struct gpio_v2_line_config lc; 1421 int ret; 1422 1423 if (copy_from_user(&lc, ip, sizeof(lc))) 1424 return -EFAULT; 1425 1426 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1427 if (ret) 1428 return ret; 1429 1430 mutex_lock(&lr->config_mutex); 1431 1432 ret = linereq_set_config_unlocked(lr, &lc); 1433 1434 mutex_unlock(&lr->config_mutex); 1435 1436 return ret; 1437 } 1438 1439 static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, 1440 unsigned long arg) 1441 { 1442 struct linereq *lr = file->private_data; 1443 void __user *ip = (void __user *)arg; 1444 1445 if (!lr->gdev->chip) 1446 return -ENODEV; 1447 1448 switch (cmd) { 1449 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1450 return linereq_get_values(lr, ip); 1451 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1452 return linereq_set_values(lr, ip); 1453 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1454 return linereq_set_config(lr, ip); 1455 default: 1456 return -EINVAL; 1457 } 1458 } 1459 1460 static long linereq_ioctl(struct file *file, unsigned int cmd, 1461 unsigned long arg) 1462 { 1463 struct linereq *lr = file->private_data; 1464 1465 return call_ioctl_locked(file, cmd, arg, lr->gdev, 1466 linereq_ioctl_unlocked); 1467 } 1468 1469 #ifdef CONFIG_COMPAT 1470 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1471 unsigned long arg) 1472 { 1473 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1474 } 1475 #endif 1476 1477 static __poll_t linereq_poll_unlocked(struct file *file, 1478 struct poll_table_struct *wait) 1479 { 1480 struct linereq *lr = file->private_data; 1481 __poll_t events = 0; 1482 1483 if (!lr->gdev->chip) 1484 return EPOLLHUP | EPOLLERR; 1485 1486 poll_wait(file, &lr->wait, wait); 1487 1488 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1489 &lr->wait.lock)) 1490 events = EPOLLIN | EPOLLRDNORM; 1491 1492 return events; 1493 } 1494 1495 static __poll_t linereq_poll(struct file *file, 1496 struct poll_table_struct *wait) 1497 { 1498 struct linereq *lr = file->private_data; 1499 1500 return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); 1501 } 1502 1503 static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, 1504 size_t count, loff_t *f_ps) 1505 { 1506 struct linereq *lr = file->private_data; 1507 struct gpio_v2_line_event le; 1508 ssize_t bytes_read = 0; 1509 int ret; 1510 1511 if (!lr->gdev->chip) 1512 return -ENODEV; 1513 1514 if (count < sizeof(le)) 1515 return -EINVAL; 1516 1517 do { 1518 spin_lock(&lr->wait.lock); 1519 if (kfifo_is_empty(&lr->events)) { 1520 if (bytes_read) { 1521 spin_unlock(&lr->wait.lock); 1522 return bytes_read; 1523 } 1524 1525 if (file->f_flags & O_NONBLOCK) { 1526 spin_unlock(&lr->wait.lock); 1527 return -EAGAIN; 1528 } 1529 1530 ret = wait_event_interruptible_locked(lr->wait, 1531 !kfifo_is_empty(&lr->events)); 1532 if (ret) { 1533 spin_unlock(&lr->wait.lock); 1534 return ret; 1535 } 1536 } 1537 1538 ret = kfifo_out(&lr->events, &le, 1); 1539 spin_unlock(&lr->wait.lock); 1540 if (ret != 1) { 1541 /* 1542 * This should never happen - we were holding the 1543 * lock from the moment we learned the fifo is no 1544 * longer empty until now. 1545 */ 1546 ret = -EIO; 1547 break; 1548 } 1549 1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1551 return -EFAULT; 1552 bytes_read += sizeof(le); 1553 } while (count >= bytes_read + sizeof(le)); 1554 1555 return bytes_read; 1556 } 1557 1558 static ssize_t linereq_read(struct file *file, char __user *buf, 1559 size_t count, loff_t *f_ps) 1560 { 1561 struct linereq *lr = file->private_data; 1562 1563 return call_read_locked(file, buf, count, f_ps, lr->gdev, 1564 linereq_read_unlocked); 1565 } 1566 1567 static void linereq_free(struct linereq *lr) 1568 { 1569 unsigned int i; 1570 1571 for (i = 0; i < lr->num_lines; i++) { 1572 if (lr->lines[i].desc) { 1573 edge_detector_stop(&lr->lines[i]); 1574 gpiod_free(lr->lines[i].desc); 1575 } 1576 } 1577 kfifo_free(&lr->events); 1578 kfree(lr->label); 1579 put_device(&lr->gdev->dev); 1580 kfree(lr); 1581 } 1582 1583 static int linereq_release(struct inode *inode, struct file *file) 1584 { 1585 struct linereq *lr = file->private_data; 1586 1587 linereq_free(lr); 1588 return 0; 1589 } 1590 1591 #ifdef CONFIG_PROC_FS 1592 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1593 { 1594 struct linereq *lr = file->private_data; 1595 struct device *dev = &lr->gdev->dev; 1596 u16 i; 1597 1598 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1599 1600 for (i = 0; i < lr->num_lines; i++) 1601 seq_printf(out, "gpio-line:\t%d\n", 1602 gpio_chip_hwgpio(lr->lines[i].desc)); 1603 } 1604 #endif 1605 1606 static const struct file_operations line_fileops = { 1607 .release = linereq_release, 1608 .read = linereq_read, 1609 .poll = linereq_poll, 1610 .owner = THIS_MODULE, 1611 .llseek = noop_llseek, 1612 .unlocked_ioctl = linereq_ioctl, 1613 #ifdef CONFIG_COMPAT 1614 .compat_ioctl = linereq_ioctl_compat, 1615 #endif 1616 #ifdef CONFIG_PROC_FS 1617 .show_fdinfo = linereq_show_fdinfo, 1618 #endif 1619 }; 1620 1621 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1622 { 1623 struct gpio_v2_line_request ulr; 1624 struct gpio_v2_line_config *lc; 1625 struct linereq *lr; 1626 struct file *file; 1627 u64 flags, edflags; 1628 unsigned int i; 1629 int fd, ret; 1630 1631 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1632 return -EFAULT; 1633 1634 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1635 return -EINVAL; 1636 1637 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1638 return -EINVAL; 1639 1640 lc = &ulr.config; 1641 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1642 if (ret) 1643 return ret; 1644 1645 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1646 if (!lr) 1647 return -ENOMEM; 1648 1649 lr->gdev = gdev; 1650 get_device(&gdev->dev); 1651 1652 for (i = 0; i < ulr.num_lines; i++) { 1653 lr->lines[i].req = lr; 1654 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1655 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1656 } 1657 1658 if (ulr.consumer[0] != '\0') { 1659 /* label is only initialized if consumer is set */ 1660 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1661 GFP_KERNEL); 1662 if (!lr->label) { 1663 ret = -ENOMEM; 1664 goto out_free_linereq; 1665 } 1666 } 1667 1668 mutex_init(&lr->config_mutex); 1669 init_waitqueue_head(&lr->wait); 1670 lr->event_buffer_size = ulr.event_buffer_size; 1671 if (lr->event_buffer_size == 0) 1672 lr->event_buffer_size = ulr.num_lines * 16; 1673 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1674 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1675 1676 atomic_set(&lr->seqno, 0); 1677 lr->num_lines = ulr.num_lines; 1678 1679 /* Request each GPIO */ 1680 for (i = 0; i < ulr.num_lines; i++) { 1681 u32 offset = ulr.offsets[i]; 1682 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 1683 1684 if (IS_ERR(desc)) { 1685 ret = PTR_ERR(desc); 1686 goto out_free_linereq; 1687 } 1688 1689 ret = gpiod_request_user(desc, lr->label); 1690 if (ret) 1691 goto out_free_linereq; 1692 1693 lr->lines[i].desc = desc; 1694 flags = gpio_v2_line_config_flags(lc, i); 1695 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1696 1697 ret = gpiod_set_transitory(desc, false); 1698 if (ret < 0) 1699 goto out_free_linereq; 1700 1701 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1702 /* 1703 * Lines have to be requested explicitly for input 1704 * or output, else the line will be treated "as is". 1705 */ 1706 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1707 int val = gpio_v2_line_config_output_value(lc, i); 1708 1709 ret = gpiod_direction_output(desc, val); 1710 if (ret) 1711 goto out_free_linereq; 1712 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1713 ret = gpiod_direction_input(desc); 1714 if (ret) 1715 goto out_free_linereq; 1716 1717 ret = edge_detector_setup(&lr->lines[i], lc, i, 1718 edflags); 1719 if (ret) 1720 goto out_free_linereq; 1721 } 1722 1723 lr->lines[i].edflags = edflags; 1724 1725 blocking_notifier_call_chain(&desc->gdev->notifier, 1726 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 1727 1728 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1729 offset); 1730 } 1731 1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1733 if (fd < 0) { 1734 ret = fd; 1735 goto out_free_linereq; 1736 } 1737 1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1739 O_RDONLY | O_CLOEXEC); 1740 if (IS_ERR(file)) { 1741 ret = PTR_ERR(file); 1742 goto out_put_unused_fd; 1743 } 1744 1745 ulr.fd = fd; 1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1747 /* 1748 * fput() will trigger the release() callback, so do not go onto 1749 * the regular error cleanup path here. 1750 */ 1751 fput(file); 1752 put_unused_fd(fd); 1753 return -EFAULT; 1754 } 1755 1756 fd_install(fd, file); 1757 1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1759 lr->num_lines); 1760 1761 return 0; 1762 1763 out_put_unused_fd: 1764 put_unused_fd(fd); 1765 out_free_linereq: 1766 linereq_free(lr); 1767 return ret; 1768 } 1769 1770 #ifdef CONFIG_GPIO_CDEV_V1 1771 1772 /* 1773 * GPIO line event management 1774 */ 1775 1776 /** 1777 * struct lineevent_state - contains the state of a userspace event 1778 * @gdev: the GPIO device the event pertains to 1779 * @label: consumer label used to tag descriptors 1780 * @desc: the GPIO descriptor held by this event 1781 * @eflags: the event flags this line was requested with 1782 * @irq: the interrupt that trigger in response to events on this GPIO 1783 * @wait: wait queue that handles blocking reads of events 1784 * @events: KFIFO for the GPIO events 1785 * @timestamp: cache for the timestamp storing it between hardirq 1786 * and IRQ thread, used to bring the timestamp close to the actual 1787 * event 1788 */ 1789 struct lineevent_state { 1790 struct gpio_device *gdev; 1791 const char *label; 1792 struct gpio_desc *desc; 1793 u32 eflags; 1794 int irq; 1795 wait_queue_head_t wait; 1796 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1797 u64 timestamp; 1798 }; 1799 1800 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1801 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1802 GPIOEVENT_REQUEST_FALLING_EDGE) 1803 1804 static __poll_t lineevent_poll_unlocked(struct file *file, 1805 struct poll_table_struct *wait) 1806 { 1807 struct lineevent_state *le = file->private_data; 1808 __poll_t events = 0; 1809 1810 if (!le->gdev->chip) 1811 return EPOLLHUP | EPOLLERR; 1812 1813 poll_wait(file, &le->wait, wait); 1814 1815 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1816 events = EPOLLIN | EPOLLRDNORM; 1817 1818 return events; 1819 } 1820 1821 static __poll_t lineevent_poll(struct file *file, 1822 struct poll_table_struct *wait) 1823 { 1824 struct lineevent_state *le = file->private_data; 1825 1826 return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); 1827 } 1828 1829 struct compat_gpioeevent_data { 1830 compat_u64 timestamp; 1831 u32 id; 1832 }; 1833 1834 static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, 1835 size_t count, loff_t *f_ps) 1836 { 1837 struct lineevent_state *le = file->private_data; 1838 struct gpioevent_data ge; 1839 ssize_t bytes_read = 0; 1840 ssize_t ge_size; 1841 int ret; 1842 1843 if (!le->gdev->chip) 1844 return -ENODEV; 1845 1846 /* 1847 * When compatible system call is being used the struct gpioevent_data, 1848 * in case of at least ia32, has different size due to the alignment 1849 * differences. Because we have first member 64 bits followed by one of 1850 * 32 bits there is no gap between them. The only difference is the 1851 * padding at the end of the data structure. Hence, we calculate the 1852 * actual sizeof() and pass this as an argument to copy_to_user() to 1853 * drop unneeded bytes from the output. 1854 */ 1855 if (compat_need_64bit_alignment_fixup()) 1856 ge_size = sizeof(struct compat_gpioeevent_data); 1857 else 1858 ge_size = sizeof(struct gpioevent_data); 1859 if (count < ge_size) 1860 return -EINVAL; 1861 1862 do { 1863 spin_lock(&le->wait.lock); 1864 if (kfifo_is_empty(&le->events)) { 1865 if (bytes_read) { 1866 spin_unlock(&le->wait.lock); 1867 return bytes_read; 1868 } 1869 1870 if (file->f_flags & O_NONBLOCK) { 1871 spin_unlock(&le->wait.lock); 1872 return -EAGAIN; 1873 } 1874 1875 ret = wait_event_interruptible_locked(le->wait, 1876 !kfifo_is_empty(&le->events)); 1877 if (ret) { 1878 spin_unlock(&le->wait.lock); 1879 return ret; 1880 } 1881 } 1882 1883 ret = kfifo_out(&le->events, &ge, 1); 1884 spin_unlock(&le->wait.lock); 1885 if (ret != 1) { 1886 /* 1887 * This should never happen - we were holding the lock 1888 * from the moment we learned the fifo is no longer 1889 * empty until now. 1890 */ 1891 ret = -EIO; 1892 break; 1893 } 1894 1895 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 1896 return -EFAULT; 1897 bytes_read += ge_size; 1898 } while (count >= bytes_read + ge_size); 1899 1900 return bytes_read; 1901 } 1902 1903 static ssize_t lineevent_read(struct file *file, char __user *buf, 1904 size_t count, loff_t *f_ps) 1905 { 1906 struct lineevent_state *le = file->private_data; 1907 1908 return call_read_locked(file, buf, count, f_ps, le->gdev, 1909 lineevent_read_unlocked); 1910 } 1911 1912 static void lineevent_free(struct lineevent_state *le) 1913 { 1914 if (le->irq) 1915 free_irq(le->irq, le); 1916 if (le->desc) 1917 gpiod_free(le->desc); 1918 kfree(le->label); 1919 put_device(&le->gdev->dev); 1920 kfree(le); 1921 } 1922 1923 static int lineevent_release(struct inode *inode, struct file *file) 1924 { 1925 lineevent_free(file->private_data); 1926 return 0; 1927 } 1928 1929 static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, 1930 unsigned long arg) 1931 { 1932 struct lineevent_state *le = file->private_data; 1933 void __user *ip = (void __user *)arg; 1934 struct gpiohandle_data ghd; 1935 1936 if (!le->gdev->chip) 1937 return -ENODEV; 1938 1939 /* 1940 * We can get the value for an event line but not set it, 1941 * because it is input by definition. 1942 */ 1943 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 1944 int val; 1945 1946 memset(&ghd, 0, sizeof(ghd)); 1947 1948 val = gpiod_get_value_cansleep(le->desc); 1949 if (val < 0) 1950 return val; 1951 ghd.values[0] = val; 1952 1953 if (copy_to_user(ip, &ghd, sizeof(ghd))) 1954 return -EFAULT; 1955 1956 return 0; 1957 } 1958 return -EINVAL; 1959 } 1960 1961 static long lineevent_ioctl(struct file *file, unsigned int cmd, 1962 unsigned long arg) 1963 { 1964 struct lineevent_state *le = file->private_data; 1965 1966 return call_ioctl_locked(file, cmd, arg, le->gdev, 1967 lineevent_ioctl_unlocked); 1968 } 1969 1970 #ifdef CONFIG_COMPAT 1971 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 1972 unsigned long arg) 1973 { 1974 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1975 } 1976 #endif 1977 1978 static const struct file_operations lineevent_fileops = { 1979 .release = lineevent_release, 1980 .read = lineevent_read, 1981 .poll = lineevent_poll, 1982 .owner = THIS_MODULE, 1983 .llseek = noop_llseek, 1984 .unlocked_ioctl = lineevent_ioctl, 1985 #ifdef CONFIG_COMPAT 1986 .compat_ioctl = lineevent_ioctl_compat, 1987 #endif 1988 }; 1989 1990 static irqreturn_t lineevent_irq_thread(int irq, void *p) 1991 { 1992 struct lineevent_state *le = p; 1993 struct gpioevent_data ge; 1994 int ret; 1995 1996 /* Do not leak kernel stack to userspace */ 1997 memset(&ge, 0, sizeof(ge)); 1998 1999 /* 2000 * We may be running from a nested threaded interrupt in which case 2001 * we didn't get the timestamp from lineevent_irq_handler(). 2002 */ 2003 if (!le->timestamp) 2004 ge.timestamp = ktime_get_ns(); 2005 else 2006 ge.timestamp = le->timestamp; 2007 2008 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2009 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2010 int level = gpiod_get_value_cansleep(le->desc); 2011 2012 if (level) 2013 /* Emit low-to-high event */ 2014 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2015 else 2016 /* Emit high-to-low event */ 2017 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2018 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2019 /* Emit low-to-high event */ 2020 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2021 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2022 /* Emit high-to-low event */ 2023 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2024 } else { 2025 return IRQ_NONE; 2026 } 2027 2028 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2029 1, &le->wait.lock); 2030 if (ret) 2031 wake_up_poll(&le->wait, EPOLLIN); 2032 else 2033 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2034 2035 return IRQ_HANDLED; 2036 } 2037 2038 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2039 { 2040 struct lineevent_state *le = p; 2041 2042 /* 2043 * Just store the timestamp in hardirq context so we get it as 2044 * close in time as possible to the actual event. 2045 */ 2046 le->timestamp = ktime_get_ns(); 2047 2048 return IRQ_WAKE_THREAD; 2049 } 2050 2051 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2052 { 2053 struct gpioevent_request eventreq; 2054 struct lineevent_state *le; 2055 struct gpio_desc *desc; 2056 struct file *file; 2057 u32 offset; 2058 u32 lflags; 2059 u32 eflags; 2060 int fd; 2061 int ret; 2062 int irq, irqflags = 0; 2063 2064 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2065 return -EFAULT; 2066 2067 offset = eventreq.lineoffset; 2068 lflags = eventreq.handleflags; 2069 eflags = eventreq.eventflags; 2070 2071 desc = gpiochip_get_desc(gdev->chip, offset); 2072 if (IS_ERR(desc)) 2073 return PTR_ERR(desc); 2074 2075 /* Return an error if a unknown flag is set */ 2076 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2077 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2078 return -EINVAL; 2079 2080 /* This is just wrong: we don't look for events on output lines */ 2081 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2082 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2083 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2084 return -EINVAL; 2085 2086 /* Only one bias flag can be set. */ 2087 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2088 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2089 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2090 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2091 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2092 return -EINVAL; 2093 2094 le = kzalloc(sizeof(*le), GFP_KERNEL); 2095 if (!le) 2096 return -ENOMEM; 2097 le->gdev = gdev; 2098 get_device(&gdev->dev); 2099 2100 if (eventreq.consumer_label[0] != '\0') { 2101 /* label is only initialized if consumer_label is set */ 2102 le->label = kstrndup(eventreq.consumer_label, 2103 sizeof(eventreq.consumer_label) - 1, 2104 GFP_KERNEL); 2105 if (!le->label) { 2106 ret = -ENOMEM; 2107 goto out_free_le; 2108 } 2109 } 2110 2111 ret = gpiod_request_user(desc, le->label); 2112 if (ret) 2113 goto out_free_le; 2114 le->desc = desc; 2115 le->eflags = eflags; 2116 2117 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2118 2119 ret = gpiod_direction_input(desc); 2120 if (ret) 2121 goto out_free_le; 2122 2123 blocking_notifier_call_chain(&desc->gdev->notifier, 2124 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 2125 2126 irq = gpiod_to_irq(desc); 2127 if (irq <= 0) { 2128 ret = -ENODEV; 2129 goto out_free_le; 2130 } 2131 2132 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2133 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2134 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2135 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2136 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2137 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2138 irqflags |= IRQF_ONESHOT; 2139 2140 INIT_KFIFO(le->events); 2141 init_waitqueue_head(&le->wait); 2142 2143 /* Request a thread to read the events */ 2144 ret = request_threaded_irq(irq, 2145 lineevent_irq_handler, 2146 lineevent_irq_thread, 2147 irqflags, 2148 le->label, 2149 le); 2150 if (ret) 2151 goto out_free_le; 2152 2153 le->irq = irq; 2154 2155 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2156 if (fd < 0) { 2157 ret = fd; 2158 goto out_free_le; 2159 } 2160 2161 file = anon_inode_getfile("gpio-event", 2162 &lineevent_fileops, 2163 le, 2164 O_RDONLY | O_CLOEXEC); 2165 if (IS_ERR(file)) { 2166 ret = PTR_ERR(file); 2167 goto out_put_unused_fd; 2168 } 2169 2170 eventreq.fd = fd; 2171 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2172 /* 2173 * fput() will trigger the release() callback, so do not go onto 2174 * the regular error cleanup path here. 2175 */ 2176 fput(file); 2177 put_unused_fd(fd); 2178 return -EFAULT; 2179 } 2180 2181 fd_install(fd, file); 2182 2183 return 0; 2184 2185 out_put_unused_fd: 2186 put_unused_fd(fd); 2187 out_free_le: 2188 lineevent_free(le); 2189 return ret; 2190 } 2191 2192 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2193 struct gpioline_info *info_v1) 2194 { 2195 u64 flagsv2 = info_v2->flags; 2196 2197 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2198 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2199 info_v1->line_offset = info_v2->offset; 2200 info_v1->flags = 0; 2201 2202 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2203 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2204 2205 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2206 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2207 2208 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2209 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2210 2211 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2212 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2215 2216 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2217 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2222 } 2223 2224 static void gpio_v2_line_info_changed_to_v1( 2225 struct gpio_v2_line_info_changed *lic_v2, 2226 struct gpioline_info_changed *lic_v1) 2227 { 2228 memset(lic_v1, 0, sizeof(*lic_v1)); 2229 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2230 lic_v1->timestamp = lic_v2->timestamp_ns; 2231 lic_v1->event_type = lic_v2->event_type; 2232 } 2233 2234 #endif /* CONFIG_GPIO_CDEV_V1 */ 2235 2236 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2237 struct gpio_v2_line_info *info) 2238 { 2239 struct gpio_chip *gc = desc->gdev->chip; 2240 bool ok_for_pinctrl; 2241 unsigned long flags; 2242 u32 debounce_period_us; 2243 unsigned int num_attrs = 0; 2244 2245 memset(info, 0, sizeof(*info)); 2246 info->offset = gpio_chip_hwgpio(desc); 2247 2248 /* 2249 * This function takes a mutex so we must check this before taking 2250 * the spinlock. 2251 * 2252 * FIXME: find a non-racy way to retrieve this information. Maybe a 2253 * lock common to both frameworks? 2254 */ 2255 ok_for_pinctrl = 2256 pinctrl_gpio_can_use_line(gc->base + info->offset); 2257 2258 spin_lock_irqsave(&gpio_lock, flags); 2259 2260 if (desc->name) 2261 strscpy(info->name, desc->name, sizeof(info->name)); 2262 2263 if (desc->label) 2264 strscpy(info->consumer, desc->label, sizeof(info->consumer)); 2265 2266 /* 2267 * Userspace only need to know that the kernel is using this GPIO so 2268 * it can't use it. 2269 */ 2270 info->flags = 0; 2271 if (test_bit(FLAG_REQUESTED, &desc->flags) || 2272 test_bit(FLAG_IS_HOGGED, &desc->flags) || 2273 test_bit(FLAG_USED_AS_IRQ, &desc->flags) || 2274 test_bit(FLAG_EXPORT, &desc->flags) || 2275 test_bit(FLAG_SYSFS, &desc->flags) || 2276 !gpiochip_line_is_valid(gc, info->offset) || 2277 !ok_for_pinctrl) 2278 info->flags |= GPIO_V2_LINE_FLAG_USED; 2279 2280 if (test_bit(FLAG_IS_OUT, &desc->flags)) 2281 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2282 else 2283 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2284 2285 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2286 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2287 2288 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 2289 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2290 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 2291 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2292 2293 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) 2294 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2295 if (test_bit(FLAG_PULL_DOWN, &desc->flags)) 2296 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2297 if (test_bit(FLAG_PULL_UP, &desc->flags)) 2298 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2299 2300 if (test_bit(FLAG_EDGE_RISING, &desc->flags)) 2301 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2302 if (test_bit(FLAG_EDGE_FALLING, &desc->flags)) 2303 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2304 2305 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags)) 2306 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2307 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags)) 2308 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2309 2310 debounce_period_us = READ_ONCE(desc->debounce_period_us); 2311 if (debounce_period_us) { 2312 info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 2313 info->attrs[num_attrs].debounce_period_us = debounce_period_us; 2314 num_attrs++; 2315 } 2316 info->num_attrs = num_attrs; 2317 2318 spin_unlock_irqrestore(&gpio_lock, flags); 2319 } 2320 2321 struct gpio_chardev_data { 2322 struct gpio_device *gdev; 2323 wait_queue_head_t wait; 2324 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2325 struct notifier_block lineinfo_changed_nb; 2326 unsigned long *watched_lines; 2327 #ifdef CONFIG_GPIO_CDEV_V1 2328 atomic_t watch_abi_version; 2329 #endif 2330 }; 2331 2332 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2333 { 2334 struct gpio_device *gdev = cdev->gdev; 2335 struct gpiochip_info chipinfo; 2336 2337 memset(&chipinfo, 0, sizeof(chipinfo)); 2338 2339 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2340 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2341 chipinfo.lines = gdev->ngpio; 2342 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2343 return -EFAULT; 2344 return 0; 2345 } 2346 2347 #ifdef CONFIG_GPIO_CDEV_V1 2348 /* 2349 * returns 0 if the versions match, else the previously selected ABI version 2350 */ 2351 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2352 unsigned int version) 2353 { 2354 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2355 2356 if (abiv == version) 2357 return 0; 2358 2359 return abiv; 2360 } 2361 2362 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2363 bool watch) 2364 { 2365 struct gpio_desc *desc; 2366 struct gpioline_info lineinfo; 2367 struct gpio_v2_line_info lineinfo_v2; 2368 2369 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2370 return -EFAULT; 2371 2372 /* this doubles as a range check on line_offset */ 2373 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset); 2374 if (IS_ERR(desc)) 2375 return PTR_ERR(desc); 2376 2377 if (watch) { 2378 if (lineinfo_ensure_abi_version(cdev, 1)) 2379 return -EPERM; 2380 2381 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2382 return -EBUSY; 2383 } 2384 2385 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2386 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2387 2388 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2389 if (watch) 2390 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2391 return -EFAULT; 2392 } 2393 2394 return 0; 2395 } 2396 #endif 2397 2398 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2399 bool watch) 2400 { 2401 struct gpio_desc *desc; 2402 struct gpio_v2_line_info lineinfo; 2403 2404 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2405 return -EFAULT; 2406 2407 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2408 return -EINVAL; 2409 2410 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset); 2411 if (IS_ERR(desc)) 2412 return PTR_ERR(desc); 2413 2414 if (watch) { 2415 #ifdef CONFIG_GPIO_CDEV_V1 2416 if (lineinfo_ensure_abi_version(cdev, 2)) 2417 return -EPERM; 2418 #endif 2419 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2420 return -EBUSY; 2421 } 2422 gpio_desc_to_lineinfo(desc, &lineinfo); 2423 2424 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2425 if (watch) 2426 clear_bit(lineinfo.offset, cdev->watched_lines); 2427 return -EFAULT; 2428 } 2429 2430 return 0; 2431 } 2432 2433 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2434 { 2435 __u32 offset; 2436 2437 if (copy_from_user(&offset, ip, sizeof(offset))) 2438 return -EFAULT; 2439 2440 if (offset >= cdev->gdev->ngpio) 2441 return -EINVAL; 2442 2443 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2444 return -EBUSY; 2445 2446 return 0; 2447 } 2448 2449 /* 2450 * gpio_ioctl() - ioctl handler for the GPIO chardev 2451 */ 2452 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2453 { 2454 struct gpio_chardev_data *cdev = file->private_data; 2455 struct gpio_device *gdev = cdev->gdev; 2456 void __user *ip = (void __user *)arg; 2457 2458 /* We fail any subsequent ioctl():s when the chip is gone */ 2459 if (!gdev->chip) 2460 return -ENODEV; 2461 2462 /* Fill in the struct and pass to userspace */ 2463 switch (cmd) { 2464 case GPIO_GET_CHIPINFO_IOCTL: 2465 return chipinfo_get(cdev, ip); 2466 #ifdef CONFIG_GPIO_CDEV_V1 2467 case GPIO_GET_LINEHANDLE_IOCTL: 2468 return linehandle_create(gdev, ip); 2469 case GPIO_GET_LINEEVENT_IOCTL: 2470 return lineevent_create(gdev, ip); 2471 case GPIO_GET_LINEINFO_IOCTL: 2472 return lineinfo_get_v1(cdev, ip, false); 2473 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2474 return lineinfo_get_v1(cdev, ip, true); 2475 #endif /* CONFIG_GPIO_CDEV_V1 */ 2476 case GPIO_V2_GET_LINEINFO_IOCTL: 2477 return lineinfo_get(cdev, ip, false); 2478 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2479 return lineinfo_get(cdev, ip, true); 2480 case GPIO_V2_GET_LINE_IOCTL: 2481 return linereq_create(gdev, ip); 2482 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2483 return lineinfo_unwatch(cdev, ip); 2484 default: 2485 return -EINVAL; 2486 } 2487 } 2488 2489 #ifdef CONFIG_COMPAT 2490 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2491 unsigned long arg) 2492 { 2493 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2494 } 2495 #endif 2496 2497 static struct gpio_chardev_data * 2498 to_gpio_chardev_data(struct notifier_block *nb) 2499 { 2500 return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2501 } 2502 2503 static int lineinfo_changed_notify(struct notifier_block *nb, 2504 unsigned long action, void *data) 2505 { 2506 struct gpio_chardev_data *cdev = to_gpio_chardev_data(nb); 2507 struct gpio_v2_line_info_changed chg; 2508 struct gpio_desc *desc = data; 2509 int ret; 2510 2511 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2512 return NOTIFY_DONE; 2513 2514 memset(&chg, 0, sizeof(chg)); 2515 chg.event_type = action; 2516 chg.timestamp_ns = ktime_get_ns(); 2517 gpio_desc_to_lineinfo(desc, &chg.info); 2518 2519 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2520 if (ret) 2521 wake_up_poll(&cdev->wait, EPOLLIN); 2522 else 2523 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2524 2525 return NOTIFY_OK; 2526 } 2527 2528 static __poll_t lineinfo_watch_poll_unlocked(struct file *file, 2529 struct poll_table_struct *pollt) 2530 { 2531 struct gpio_chardev_data *cdev = file->private_data; 2532 __poll_t events = 0; 2533 2534 if (!cdev->gdev->chip) 2535 return EPOLLHUP | EPOLLERR; 2536 2537 poll_wait(file, &cdev->wait, pollt); 2538 2539 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2540 &cdev->wait.lock)) 2541 events = EPOLLIN | EPOLLRDNORM; 2542 2543 return events; 2544 } 2545 2546 static __poll_t lineinfo_watch_poll(struct file *file, 2547 struct poll_table_struct *pollt) 2548 { 2549 struct gpio_chardev_data *cdev = file->private_data; 2550 2551 return call_poll_locked(file, pollt, cdev->gdev, 2552 lineinfo_watch_poll_unlocked); 2553 } 2554 2555 static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, 2556 size_t count, loff_t *off) 2557 { 2558 struct gpio_chardev_data *cdev = file->private_data; 2559 struct gpio_v2_line_info_changed event; 2560 ssize_t bytes_read = 0; 2561 int ret; 2562 size_t event_size; 2563 2564 if (!cdev->gdev->chip) 2565 return -ENODEV; 2566 2567 #ifndef CONFIG_GPIO_CDEV_V1 2568 event_size = sizeof(struct gpio_v2_line_info_changed); 2569 if (count < event_size) 2570 return -EINVAL; 2571 #endif 2572 2573 do { 2574 spin_lock(&cdev->wait.lock); 2575 if (kfifo_is_empty(&cdev->events)) { 2576 if (bytes_read) { 2577 spin_unlock(&cdev->wait.lock); 2578 return bytes_read; 2579 } 2580 2581 if (file->f_flags & O_NONBLOCK) { 2582 spin_unlock(&cdev->wait.lock); 2583 return -EAGAIN; 2584 } 2585 2586 ret = wait_event_interruptible_locked(cdev->wait, 2587 !kfifo_is_empty(&cdev->events)); 2588 if (ret) { 2589 spin_unlock(&cdev->wait.lock); 2590 return ret; 2591 } 2592 } 2593 #ifdef CONFIG_GPIO_CDEV_V1 2594 /* must be after kfifo check so watch_abi_version is set */ 2595 if (atomic_read(&cdev->watch_abi_version) == 2) 2596 event_size = sizeof(struct gpio_v2_line_info_changed); 2597 else 2598 event_size = sizeof(struct gpioline_info_changed); 2599 if (count < event_size) { 2600 spin_unlock(&cdev->wait.lock); 2601 return -EINVAL; 2602 } 2603 #endif 2604 ret = kfifo_out(&cdev->events, &event, 1); 2605 spin_unlock(&cdev->wait.lock); 2606 if (ret != 1) { 2607 ret = -EIO; 2608 break; 2609 /* We should never get here. See lineevent_read(). */ 2610 } 2611 2612 #ifdef CONFIG_GPIO_CDEV_V1 2613 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2614 if (copy_to_user(buf + bytes_read, &event, event_size)) 2615 return -EFAULT; 2616 } else { 2617 struct gpioline_info_changed event_v1; 2618 2619 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2620 if (copy_to_user(buf + bytes_read, &event_v1, 2621 event_size)) 2622 return -EFAULT; 2623 } 2624 #else 2625 if (copy_to_user(buf + bytes_read, &event, event_size)) 2626 return -EFAULT; 2627 #endif 2628 bytes_read += event_size; 2629 } while (count >= bytes_read + sizeof(event)); 2630 2631 return bytes_read; 2632 } 2633 2634 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2635 size_t count, loff_t *off) 2636 { 2637 struct gpio_chardev_data *cdev = file->private_data; 2638 2639 return call_read_locked(file, buf, count, off, cdev->gdev, 2640 lineinfo_watch_read_unlocked); 2641 } 2642 2643 /** 2644 * gpio_chrdev_open() - open the chardev for ioctl operations 2645 * @inode: inode for this chardev 2646 * @file: file struct for storing private data 2647 * Returns 0 on success 2648 */ 2649 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2650 { 2651 struct gpio_device *gdev = container_of(inode->i_cdev, 2652 struct gpio_device, chrdev); 2653 struct gpio_chardev_data *cdev; 2654 int ret = -ENOMEM; 2655 2656 down_read(&gdev->sem); 2657 2658 /* Fail on open if the backing gpiochip is gone */ 2659 if (!gdev->chip) { 2660 ret = -ENODEV; 2661 goto out_unlock; 2662 } 2663 2664 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2665 if (!cdev) 2666 goto out_unlock; 2667 2668 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); 2669 if (!cdev->watched_lines) 2670 goto out_free_cdev; 2671 2672 init_waitqueue_head(&cdev->wait); 2673 INIT_KFIFO(cdev->events); 2674 cdev->gdev = gdev; 2675 2676 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2677 ret = blocking_notifier_chain_register(&gdev->notifier, 2678 &cdev->lineinfo_changed_nb); 2679 if (ret) 2680 goto out_free_bitmap; 2681 2682 get_device(&gdev->dev); 2683 file->private_data = cdev; 2684 2685 ret = nonseekable_open(inode, file); 2686 if (ret) 2687 goto out_unregister_notifier; 2688 2689 up_read(&gdev->sem); 2690 2691 return ret; 2692 2693 out_unregister_notifier: 2694 blocking_notifier_chain_unregister(&gdev->notifier, 2695 &cdev->lineinfo_changed_nb); 2696 out_free_bitmap: 2697 bitmap_free(cdev->watched_lines); 2698 out_free_cdev: 2699 kfree(cdev); 2700 out_unlock: 2701 up_read(&gdev->sem); 2702 return ret; 2703 } 2704 2705 /** 2706 * gpio_chrdev_release() - close chardev after ioctl operations 2707 * @inode: inode for this chardev 2708 * @file: file struct for storing private data 2709 * Returns 0 on success 2710 */ 2711 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2712 { 2713 struct gpio_chardev_data *cdev = file->private_data; 2714 struct gpio_device *gdev = cdev->gdev; 2715 2716 bitmap_free(cdev->watched_lines); 2717 blocking_notifier_chain_unregister(&gdev->notifier, 2718 &cdev->lineinfo_changed_nb); 2719 put_device(&gdev->dev); 2720 kfree(cdev); 2721 2722 return 0; 2723 } 2724 2725 static const struct file_operations gpio_fileops = { 2726 .release = gpio_chrdev_release, 2727 .open = gpio_chrdev_open, 2728 .poll = lineinfo_watch_poll, 2729 .read = lineinfo_watch_read, 2730 .owner = THIS_MODULE, 2731 .llseek = no_llseek, 2732 .unlocked_ioctl = gpio_ioctl, 2733 #ifdef CONFIG_COMPAT 2734 .compat_ioctl = gpio_ioctl_compat, 2735 #endif 2736 }; 2737 2738 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2739 { 2740 int ret; 2741 2742 cdev_init(&gdev->chrdev, &gpio_fileops); 2743 gdev->chrdev.owner = THIS_MODULE; 2744 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2745 2746 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2747 if (ret) 2748 return ret; 2749 2750 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n", 2751 MAJOR(devt), gdev->id); 2752 2753 return 0; 2754 } 2755 2756 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2757 { 2758 cdev_device_del(&gdev->chrdev, &gdev->dev); 2759 } 2760