1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/cleanup.h>
9 #include <linux/compat.h>
10 #include <linux/compiler.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/file.h>
14 #include <linux/gpio.h>
15 #include <linux/gpio/driver.h>
16 #include <linux/hte.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqreturn.h>
19 #include <linux/kernel.h>
20 #include <linux/kfifo.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/poll.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/spinlock.h>
28 #include <linux/timekeeping.h>
29 #include <linux/uaccess.h>
30 #include <linux/workqueue.h>
31
32 #include <uapi/linux/gpio.h>
33
34 #include "gpiolib.h"
35 #include "gpiolib-cdev.h"
36
37 /*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44 /*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56 /* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62 typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
63 typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
64 typedef ssize_t (*read_fn)(struct file *, char __user *,
65 size_t count, loff_t *);
66
call_poll_locked(struct file * file,struct poll_table_struct * wait,struct gpio_device * gdev,poll_fn func)67 static __poll_t call_poll_locked(struct file *file,
68 struct poll_table_struct *wait,
69 struct gpio_device *gdev, poll_fn func)
70 {
71 __poll_t ret;
72
73 down_read(&gdev->sem);
74 ret = func(file, wait);
75 up_read(&gdev->sem);
76
77 return ret;
78 }
79
call_ioctl_locked(struct file * file,unsigned int cmd,unsigned long arg,struct gpio_device * gdev,ioctl_fn func)80 static long call_ioctl_locked(struct file *file, unsigned int cmd,
81 unsigned long arg, struct gpio_device *gdev,
82 ioctl_fn func)
83 {
84 long ret;
85
86 down_read(&gdev->sem);
87 ret = func(file, cmd, arg);
88 up_read(&gdev->sem);
89
90 return ret;
91 }
92
call_read_locked(struct file * file,char __user * buf,size_t count,loff_t * f_ps,struct gpio_device * gdev,read_fn func)93 static ssize_t call_read_locked(struct file *file, char __user *buf,
94 size_t count, loff_t *f_ps,
95 struct gpio_device *gdev, read_fn func)
96 {
97 ssize_t ret;
98
99 down_read(&gdev->sem);
100 ret = func(file, buf, count, f_ps);
101 up_read(&gdev->sem);
102
103 return ret;
104 }
105
106 /*
107 * GPIO line handle management
108 */
109
110 #ifdef CONFIG_GPIO_CDEV_V1
111 /**
112 * struct linehandle_state - contains the state of a userspace handle
113 * @gdev: the GPIO device the handle pertains to
114 * @label: consumer label used to tag descriptors
115 * @descs: the GPIO descriptors held by this handle
116 * @num_descs: the number of descriptors held in the descs array
117 */
118 struct linehandle_state {
119 struct gpio_device *gdev;
120 const char *label;
121 struct gpio_desc *descs[GPIOHANDLES_MAX];
122 u32 num_descs;
123 };
124
125 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
126 (GPIOHANDLE_REQUEST_INPUT | \
127 GPIOHANDLE_REQUEST_OUTPUT | \
128 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
129 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
130 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
131 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
132 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
133 GPIOHANDLE_REQUEST_OPEN_SOURCE)
134
135 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
136 (GPIOHANDLE_REQUEST_INPUT | \
137 GPIOHANDLE_REQUEST_OUTPUT)
138
linehandle_validate_flags(u32 flags)139 static int linehandle_validate_flags(u32 flags)
140 {
141 /* Return an error if an unknown flag is set */
142 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
143 return -EINVAL;
144
145 /*
146 * Do not allow both INPUT & OUTPUT flags to be set as they are
147 * contradictory.
148 */
149 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
150 (flags & GPIOHANDLE_REQUEST_OUTPUT))
151 return -EINVAL;
152
153 /*
154 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
155 * the hardware actually supports enabling both at the same time the
156 * electrical result would be disastrous.
157 */
158 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
159 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
160 return -EINVAL;
161
162 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
163 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
164 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
165 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
166 return -EINVAL;
167
168 /* Bias flags only allowed for input or output mode. */
169 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
170 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
171 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
172 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
173 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
174 return -EINVAL;
175
176 /* Only one bias flag can be set. */
177 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
178 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
179 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
180 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
181 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
182 return -EINVAL;
183
184 return 0;
185 }
186
linehandle_flags_to_desc_flags(u32 lflags,unsigned long * flagsp)187 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
188 {
189 assign_bit(FLAG_ACTIVE_LOW, flagsp,
190 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
191 assign_bit(FLAG_OPEN_DRAIN, flagsp,
192 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
193 assign_bit(FLAG_OPEN_SOURCE, flagsp,
194 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
195 assign_bit(FLAG_PULL_UP, flagsp,
196 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
197 assign_bit(FLAG_PULL_DOWN, flagsp,
198 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
199 assign_bit(FLAG_BIAS_DISABLE, flagsp,
200 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
201 }
202
linehandle_set_config(struct linehandle_state * lh,void __user * ip)203 static long linehandle_set_config(struct linehandle_state *lh,
204 void __user *ip)
205 {
206 struct gpiohandle_config gcnf;
207 struct gpio_desc *desc;
208 int i, ret;
209 u32 lflags;
210
211 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
212 return -EFAULT;
213
214 lflags = gcnf.flags;
215 ret = linehandle_validate_flags(lflags);
216 if (ret)
217 return ret;
218
219 /* Lines must be reconfigured explicitly as input or output. */
220 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
221 return -EINVAL;
222
223 for (i = 0; i < lh->num_descs; i++) {
224 desc = lh->descs[i];
225 linehandle_flags_to_desc_flags(lflags, &desc->flags);
226
227 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
228 int val = !!gcnf.default_values[i];
229
230 ret = gpiod_direction_output(desc, val);
231 if (ret)
232 return ret;
233 } else {
234 ret = gpiod_direction_input(desc);
235 if (ret)
236 return ret;
237 }
238
239 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
240 }
241 return 0;
242 }
243
linehandle_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)244 static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
245 unsigned long arg)
246 {
247 struct linehandle_state *lh = file->private_data;
248 void __user *ip = (void __user *)arg;
249 struct gpiohandle_data ghd;
250 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
251 unsigned int i;
252 int ret;
253
254 if (!lh->gdev->chip)
255 return -ENODEV;
256
257 switch (cmd) {
258 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
259 /* NOTE: It's okay to read values of output lines */
260 ret = gpiod_get_array_value_complex(false, true,
261 lh->num_descs, lh->descs,
262 NULL, vals);
263 if (ret)
264 return ret;
265
266 memset(&ghd, 0, sizeof(ghd));
267 for (i = 0; i < lh->num_descs; i++)
268 ghd.values[i] = test_bit(i, vals);
269
270 if (copy_to_user(ip, &ghd, sizeof(ghd)))
271 return -EFAULT;
272
273 return 0;
274 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
275 /*
276 * All line descriptors were created at once with the same
277 * flags so just check if the first one is really output.
278 */
279 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
280 return -EPERM;
281
282 if (copy_from_user(&ghd, ip, sizeof(ghd)))
283 return -EFAULT;
284
285 /* Clamp all values to [0,1] */
286 for (i = 0; i < lh->num_descs; i++)
287 __assign_bit(i, vals, ghd.values[i]);
288
289 /* Reuse the array setting function */
290 return gpiod_set_array_value_complex(false,
291 true,
292 lh->num_descs,
293 lh->descs,
294 NULL,
295 vals);
296 case GPIOHANDLE_SET_CONFIG_IOCTL:
297 return linehandle_set_config(lh, ip);
298 default:
299 return -EINVAL;
300 }
301 }
302
linehandle_ioctl(struct file * file,unsigned int cmd,unsigned long arg)303 static long linehandle_ioctl(struct file *file, unsigned int cmd,
304 unsigned long arg)
305 {
306 struct linehandle_state *lh = file->private_data;
307
308 return call_ioctl_locked(file, cmd, arg, lh->gdev,
309 linehandle_ioctl_unlocked);
310 }
311
312 #ifdef CONFIG_COMPAT
linehandle_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)313 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
314 unsigned long arg)
315 {
316 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
317 }
318 #endif
319
linehandle_free(struct linehandle_state * lh)320 static void linehandle_free(struct linehandle_state *lh)
321 {
322 int i;
323
324 for (i = 0; i < lh->num_descs; i++)
325 if (lh->descs[i])
326 gpiod_free(lh->descs[i]);
327 kfree(lh->label);
328 gpio_device_put(lh->gdev);
329 kfree(lh);
330 }
331
linehandle_release(struct inode * inode,struct file * file)332 static int linehandle_release(struct inode *inode, struct file *file)
333 {
334 linehandle_free(file->private_data);
335 return 0;
336 }
337
338 static const struct file_operations linehandle_fileops = {
339 .release = linehandle_release,
340 .owner = THIS_MODULE,
341 .llseek = noop_llseek,
342 .unlocked_ioctl = linehandle_ioctl,
343 #ifdef CONFIG_COMPAT
344 .compat_ioctl = linehandle_ioctl_compat,
345 #endif
346 };
347
linehandle_create(struct gpio_device * gdev,void __user * ip)348 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
349 {
350 struct gpiohandle_request handlereq;
351 struct linehandle_state *lh;
352 struct file *file;
353 int fd, i, ret;
354 u32 lflags;
355
356 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
357 return -EFAULT;
358 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
359 return -EINVAL;
360
361 lflags = handlereq.flags;
362
363 ret = linehandle_validate_flags(lflags);
364 if (ret)
365 return ret;
366
367 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
368 if (!lh)
369 return -ENOMEM;
370 lh->gdev = gpio_device_get(gdev);
371
372 if (handlereq.consumer_label[0] != '\0') {
373 /* label is only initialized if consumer_label is set */
374 lh->label = kstrndup(handlereq.consumer_label,
375 sizeof(handlereq.consumer_label) - 1,
376 GFP_KERNEL);
377 if (!lh->label) {
378 ret = -ENOMEM;
379 goto out_free_lh;
380 }
381 }
382
383 lh->num_descs = handlereq.lines;
384
385 /* Request each GPIO */
386 for (i = 0; i < handlereq.lines; i++) {
387 u32 offset = handlereq.lineoffsets[i];
388 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
389
390 if (IS_ERR(desc)) {
391 ret = PTR_ERR(desc);
392 goto out_free_lh;
393 }
394
395 ret = gpiod_request_user(desc, lh->label);
396 if (ret)
397 goto out_free_lh;
398 lh->descs[i] = desc;
399 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
400
401 ret = gpiod_set_transitory(desc, false);
402 if (ret < 0)
403 goto out_free_lh;
404
405 /*
406 * Lines have to be requested explicitly for input
407 * or output, else the line will be treated "as is".
408 */
409 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
410 int val = !!handlereq.default_values[i];
411
412 ret = gpiod_direction_output(desc, val);
413 if (ret)
414 goto out_free_lh;
415 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
416 ret = gpiod_direction_input(desc);
417 if (ret)
418 goto out_free_lh;
419 }
420
421 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
422
423 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
424 offset);
425 }
426
427 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
428 if (fd < 0) {
429 ret = fd;
430 goto out_free_lh;
431 }
432
433 file = anon_inode_getfile("gpio-linehandle",
434 &linehandle_fileops,
435 lh,
436 O_RDONLY | O_CLOEXEC);
437 if (IS_ERR(file)) {
438 ret = PTR_ERR(file);
439 goto out_put_unused_fd;
440 }
441
442 handlereq.fd = fd;
443 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
444 /*
445 * fput() will trigger the release() callback, so do not go onto
446 * the regular error cleanup path here.
447 */
448 fput(file);
449 put_unused_fd(fd);
450 return -EFAULT;
451 }
452
453 fd_install(fd, file);
454
455 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
456 lh->num_descs);
457
458 return 0;
459
460 out_put_unused_fd:
461 put_unused_fd(fd);
462 out_free_lh:
463 linehandle_free(lh);
464 return ret;
465 }
466 #endif /* CONFIG_GPIO_CDEV_V1 */
467
468 /**
469 * struct line - contains the state of a requested line
470 * @node: to store the object in supinfo_tree if supplemental
471 * @desc: the GPIO descriptor for this line.
472 * @req: the corresponding line request
473 * @irq: the interrupt triggered in response to events on this GPIO
474 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
475 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
476 * @timestamp_ns: cache for the timestamp storing it between hardirq and
477 * IRQ thread, used to bring the timestamp close to the actual event
478 * @req_seqno: the seqno for the current edge event in the sequence of
479 * events for the corresponding line request. This is drawn from the @req.
480 * @line_seqno: the seqno for the current edge event in the sequence of
481 * events for this line.
482 * @work: the worker that implements software debouncing
483 * @debounce_period_us: the debounce period in microseconds
484 * @sw_debounced: flag indicating if the software debouncer is active
485 * @level: the current debounced physical level of the line
486 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
487 * @raw_level: the line level at the time of event
488 * @total_discard_seq: the running counter of the discarded events
489 * @last_seqno: the last sequence number before debounce period expires
490 */
491 struct line {
492 struct rb_node node;
493 struct gpio_desc *desc;
494 /*
495 * -- edge detector specific fields --
496 */
497 struct linereq *req;
498 unsigned int irq;
499 /*
500 * The flags for the active edge detector configuration.
501 *
502 * edflags is set by linereq_create(), linereq_free(), and
503 * linereq_set_config_unlocked(), which are themselves mutually
504 * exclusive, and is accessed by edge_irq_thread(),
505 * process_hw_ts_thread() and debounce_work_func(),
506 * which can all live with a slightly stale value.
507 */
508 u64 edflags;
509 /*
510 * timestamp_ns and req_seqno are accessed only by
511 * edge_irq_handler() and edge_irq_thread(), which are themselves
512 * mutually exclusive, so no additional protection is necessary.
513 */
514 u64 timestamp_ns;
515 u32 req_seqno;
516 /*
517 * line_seqno is accessed by either edge_irq_thread() or
518 * debounce_work_func(), which are themselves mutually exclusive,
519 * so no additional protection is necessary.
520 */
521 u32 line_seqno;
522 /*
523 * -- debouncer specific fields --
524 */
525 struct delayed_work work;
526 /*
527 * debounce_period_us is accessed by debounce_irq_handler() and
528 * process_hw_ts() which are disabled when modified by
529 * debounce_setup(), edge_detector_setup() or edge_detector_stop()
530 * or can live with a stale version when updated by
531 * edge_detector_update().
532 * The modifying functions are themselves mutually exclusive.
533 */
534 unsigned int debounce_period_us;
535 /*
536 * sw_debounce is accessed by linereq_set_config(), which is the
537 * only setter, and linereq_get_values(), which can live with a
538 * slightly stale value.
539 */
540 unsigned int sw_debounced;
541 /*
542 * level is accessed by debounce_work_func(), which is the only
543 * setter, and linereq_get_values() which can live with a slightly
544 * stale value.
545 */
546 unsigned int level;
547 #ifdef CONFIG_HTE
548 struct hte_ts_desc hdesc;
549 /*
550 * HTE provider sets line level at the time of event. The valid
551 * value is 0 or 1 and negative value for an error.
552 */
553 int raw_level;
554 /*
555 * when sw_debounce is set on HTE enabled line, this is running
556 * counter of the discarded events.
557 */
558 u32 total_discard_seq;
559 /*
560 * when sw_debounce is set on HTE enabled line, this variable records
561 * last sequence number before debounce period expires.
562 */
563 u32 last_seqno;
564 #endif /* CONFIG_HTE */
565 };
566
567 /*
568 * a rbtree of the struct lines containing supplemental info.
569 * Used to populate gpio_v2_line_info with cdev specific fields not contained
570 * in the struct gpio_desc.
571 * A line is determined to contain supplemental information by
572 * line_has_supinfo().
573 */
574 static struct rb_root supinfo_tree = RB_ROOT;
575 /* covers supinfo_tree */
576 static DEFINE_SPINLOCK(supinfo_lock);
577
578 /**
579 * struct linereq - contains the state of a userspace line request
580 * @gdev: the GPIO device the line request pertains to
581 * @label: consumer label used to tag GPIO descriptors
582 * @num_lines: the number of lines in the lines array
583 * @wait: wait queue that handles blocking reads of events
584 * @device_unregistered_nb: notifier block for receiving gdev unregister events
585 * @event_buffer_size: the number of elements allocated in @events
586 * @events: KFIFO for the GPIO events
587 * @seqno: the sequence number for edge events generated on all lines in
588 * this line request. Note that this is not used when @num_lines is 1, as
589 * the line_seqno is then the same and is cheaper to calculate.
590 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
591 * of configuration, particularly multi-step accesses to desc flags and
592 * changes to supinfo status.
593 * @lines: the lines held by this line request, with @num_lines elements.
594 */
595 struct linereq {
596 struct gpio_device *gdev;
597 const char *label;
598 u32 num_lines;
599 wait_queue_head_t wait;
600 struct notifier_block device_unregistered_nb;
601 u32 event_buffer_size;
602 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
603 atomic_t seqno;
604 struct mutex config_mutex;
605 struct line lines[];
606 };
607
supinfo_insert(struct line * line)608 static void supinfo_insert(struct line *line)
609 {
610 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
611 struct line *entry;
612
613 guard(spinlock)(&supinfo_lock);
614
615 while (*new) {
616 entry = container_of(*new, struct line, node);
617
618 parent = *new;
619 if (line->desc < entry->desc) {
620 new = &((*new)->rb_left);
621 } else if (line->desc > entry->desc) {
622 new = &((*new)->rb_right);
623 } else {
624 /* this should never happen */
625 WARN(1, "duplicate line inserted");
626 return;
627 }
628 }
629
630 rb_link_node(&line->node, parent, new);
631 rb_insert_color(&line->node, &supinfo_tree);
632 }
633
supinfo_erase(struct line * line)634 static void supinfo_erase(struct line *line)
635 {
636 guard(spinlock)(&supinfo_lock);
637
638 rb_erase(&line->node, &supinfo_tree);
639 }
640
supinfo_find(struct gpio_desc * desc)641 static struct line *supinfo_find(struct gpio_desc *desc)
642 {
643 struct rb_node *node = supinfo_tree.rb_node;
644 struct line *line;
645
646 while (node) {
647 line = container_of(node, struct line, node);
648 if (desc < line->desc)
649 node = node->rb_left;
650 else if (desc > line->desc)
651 node = node->rb_right;
652 else
653 return line;
654 }
655 return NULL;
656 }
657
supinfo_to_lineinfo(struct gpio_desc * desc,struct gpio_v2_line_info * info)658 static void supinfo_to_lineinfo(struct gpio_desc *desc,
659 struct gpio_v2_line_info *info)
660 {
661 struct gpio_v2_line_attribute *attr;
662 struct line *line;
663
664 guard(spinlock)(&supinfo_lock);
665
666 line = supinfo_find(desc);
667 if (!line)
668 return;
669
670 attr = &info->attrs[info->num_attrs];
671 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
672 attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
673 info->num_attrs++;
674 }
675
line_has_supinfo(struct line * line)676 static inline bool line_has_supinfo(struct line *line)
677 {
678 return READ_ONCE(line->debounce_period_us);
679 }
680
681 /*
682 * Checks line_has_supinfo() before and after the change to avoid unnecessary
683 * supinfo_tree access.
684 * Called indirectly by linereq_create() or linereq_set_config() so line
685 * is already protected from concurrent changes.
686 */
line_set_debounce_period(struct line * line,unsigned int debounce_period_us)687 static void line_set_debounce_period(struct line *line,
688 unsigned int debounce_period_us)
689 {
690 bool was_suppl = line_has_supinfo(line);
691
692 WRITE_ONCE(line->debounce_period_us, debounce_period_us);
693
694 /* if supinfo status is unchanged then we're done */
695 if (line_has_supinfo(line) == was_suppl)
696 return;
697
698 /* supinfo status has changed, so update the tree */
699 if (was_suppl)
700 supinfo_erase(line);
701 else
702 supinfo_insert(line);
703 }
704
705 #define GPIO_V2_LINE_BIAS_FLAGS \
706 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
707 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
708 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
709
710 #define GPIO_V2_LINE_DIRECTION_FLAGS \
711 (GPIO_V2_LINE_FLAG_INPUT | \
712 GPIO_V2_LINE_FLAG_OUTPUT)
713
714 #define GPIO_V2_LINE_DRIVE_FLAGS \
715 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
716 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
717
718 #define GPIO_V2_LINE_EDGE_FLAGS \
719 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
720 GPIO_V2_LINE_FLAG_EDGE_FALLING)
721
722 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
723
724 #define GPIO_V2_LINE_VALID_FLAGS \
725 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
726 GPIO_V2_LINE_DIRECTION_FLAGS | \
727 GPIO_V2_LINE_DRIVE_FLAGS | \
728 GPIO_V2_LINE_EDGE_FLAGS | \
729 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
730 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
731 GPIO_V2_LINE_BIAS_FLAGS)
732
733 /* subset of flags relevant for edge detector configuration */
734 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
735 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
736 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
737 GPIO_V2_LINE_EDGE_FLAGS)
738
linereq_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)739 static int linereq_unregistered_notify(struct notifier_block *nb,
740 unsigned long action, void *data)
741 {
742 struct linereq *lr = container_of(nb, struct linereq,
743 device_unregistered_nb);
744
745 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
746
747 return NOTIFY_OK;
748 }
749
linereq_put_event(struct linereq * lr,struct gpio_v2_line_event * le)750 static void linereq_put_event(struct linereq *lr,
751 struct gpio_v2_line_event *le)
752 {
753 bool overflow = false;
754
755 spin_lock(&lr->wait.lock);
756 if (kfifo_is_full(&lr->events)) {
757 overflow = true;
758 kfifo_skip(&lr->events);
759 }
760 kfifo_in(&lr->events, le, 1);
761 spin_unlock(&lr->wait.lock);
762 if (!overflow)
763 wake_up_poll(&lr->wait, EPOLLIN);
764 else
765 pr_debug_ratelimited("event FIFO is full - event dropped\n");
766 }
767
line_event_timestamp(struct line * line)768 static u64 line_event_timestamp(struct line *line)
769 {
770 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
771 return ktime_get_real_ns();
772 else if (IS_ENABLED(CONFIG_HTE) &&
773 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
774 return line->timestamp_ns;
775
776 return ktime_get_ns();
777 }
778
line_event_id(int level)779 static u32 line_event_id(int level)
780 {
781 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
782 GPIO_V2_LINE_EVENT_FALLING_EDGE;
783 }
784
make_irq_label(const char * orig)785 static inline char *make_irq_label(const char *orig)
786 {
787 char *new;
788
789 if (!orig)
790 return NULL;
791
792 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
793 if (!new)
794 return ERR_PTR(-ENOMEM);
795
796 return new;
797 }
798
free_irq_label(const char * label)799 static inline void free_irq_label(const char *label)
800 {
801 kfree(label);
802 }
803
804 #ifdef CONFIG_HTE
805
process_hw_ts_thread(void * p)806 static enum hte_return process_hw_ts_thread(void *p)
807 {
808 struct line *line;
809 struct linereq *lr;
810 struct gpio_v2_line_event le;
811 u64 edflags;
812 int level;
813
814 if (!p)
815 return HTE_CB_HANDLED;
816
817 line = p;
818 lr = line->req;
819
820 memset(&le, 0, sizeof(le));
821
822 le.timestamp_ns = line->timestamp_ns;
823 edflags = READ_ONCE(line->edflags);
824
825 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
826 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
827 level = (line->raw_level >= 0) ?
828 line->raw_level :
829 gpiod_get_raw_value_cansleep(line->desc);
830
831 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
832 level = !level;
833
834 le.id = line_event_id(level);
835 break;
836 case GPIO_V2_LINE_FLAG_EDGE_RISING:
837 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
838 break;
839 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
840 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
841 break;
842 default:
843 return HTE_CB_HANDLED;
844 }
845 le.line_seqno = line->line_seqno;
846 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
847 le.offset = gpio_chip_hwgpio(line->desc);
848
849 linereq_put_event(lr, &le);
850
851 return HTE_CB_HANDLED;
852 }
853
process_hw_ts(struct hte_ts_data * ts,void * p)854 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
855 {
856 struct line *line;
857 struct linereq *lr;
858 int diff_seqno = 0;
859
860 if (!ts || !p)
861 return HTE_CB_HANDLED;
862
863 line = p;
864 line->timestamp_ns = ts->tsc;
865 line->raw_level = ts->raw_level;
866 lr = line->req;
867
868 if (READ_ONCE(line->sw_debounced)) {
869 line->total_discard_seq++;
870 line->last_seqno = ts->seq;
871 mod_delayed_work(system_wq, &line->work,
872 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
873 } else {
874 if (unlikely(ts->seq < line->line_seqno))
875 return HTE_CB_HANDLED;
876
877 diff_seqno = ts->seq - line->line_seqno;
878 line->line_seqno = ts->seq;
879 if (lr->num_lines != 1)
880 line->req_seqno = atomic_add_return(diff_seqno,
881 &lr->seqno);
882
883 return HTE_RUN_SECOND_CB;
884 }
885
886 return HTE_CB_HANDLED;
887 }
888
hte_edge_setup(struct line * line,u64 eflags)889 static int hte_edge_setup(struct line *line, u64 eflags)
890 {
891 int ret;
892 unsigned long flags = 0;
893 struct hte_ts_desc *hdesc = &line->hdesc;
894
895 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
896 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
897 HTE_FALLING_EDGE_TS :
898 HTE_RISING_EDGE_TS;
899 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
900 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
901 HTE_RISING_EDGE_TS :
902 HTE_FALLING_EDGE_TS;
903
904 line->total_discard_seq = 0;
905
906 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
907 line->desc);
908
909 ret = hte_ts_get(NULL, hdesc, 0);
910 if (ret)
911 return ret;
912
913 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
914 line);
915 }
916
917 #else
918
hte_edge_setup(struct line * line,u64 eflags)919 static int hte_edge_setup(struct line *line, u64 eflags)
920 {
921 return 0;
922 }
923 #endif /* CONFIG_HTE */
924
edge_irq_thread(int irq,void * p)925 static irqreturn_t edge_irq_thread(int irq, void *p)
926 {
927 struct line *line = p;
928 struct linereq *lr = line->req;
929 struct gpio_v2_line_event le;
930
931 /* Do not leak kernel stack to userspace */
932 memset(&le, 0, sizeof(le));
933
934 if (line->timestamp_ns) {
935 le.timestamp_ns = line->timestamp_ns;
936 } else {
937 /*
938 * We may be running from a nested threaded interrupt in
939 * which case we didn't get the timestamp from
940 * edge_irq_handler().
941 */
942 le.timestamp_ns = line_event_timestamp(line);
943 if (lr->num_lines != 1)
944 line->req_seqno = atomic_inc_return(&lr->seqno);
945 }
946 line->timestamp_ns = 0;
947
948 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
949 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
950 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
951 break;
952 case GPIO_V2_LINE_FLAG_EDGE_RISING:
953 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
954 break;
955 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
956 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
957 break;
958 default:
959 return IRQ_NONE;
960 }
961 line->line_seqno++;
962 le.line_seqno = line->line_seqno;
963 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
964 le.offset = gpio_chip_hwgpio(line->desc);
965
966 linereq_put_event(lr, &le);
967
968 return IRQ_HANDLED;
969 }
970
edge_irq_handler(int irq,void * p)971 static irqreturn_t edge_irq_handler(int irq, void *p)
972 {
973 struct line *line = p;
974 struct linereq *lr = line->req;
975
976 /*
977 * Just store the timestamp in hardirq context so we get it as
978 * close in time as possible to the actual event.
979 */
980 line->timestamp_ns = line_event_timestamp(line);
981
982 if (lr->num_lines != 1)
983 line->req_seqno = atomic_inc_return(&lr->seqno);
984
985 return IRQ_WAKE_THREAD;
986 }
987
988 /*
989 * returns the current debounced logical value.
990 */
debounced_value(struct line * line)991 static bool debounced_value(struct line *line)
992 {
993 bool value;
994
995 /*
996 * minor race - debouncer may be stopped here, so edge_detector_stop()
997 * must leave the value unchanged so the following will read the level
998 * from when the debouncer was last running.
999 */
1000 value = READ_ONCE(line->level);
1001
1002 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
1003 value = !value;
1004
1005 return value;
1006 }
1007
debounce_irq_handler(int irq,void * p)1008 static irqreturn_t debounce_irq_handler(int irq, void *p)
1009 {
1010 struct line *line = p;
1011
1012 mod_delayed_work(system_wq, &line->work,
1013 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
1014
1015 return IRQ_HANDLED;
1016 }
1017
debounce_work_func(struct work_struct * work)1018 static void debounce_work_func(struct work_struct *work)
1019 {
1020 struct gpio_v2_line_event le;
1021 struct line *line = container_of(work, struct line, work.work);
1022 struct linereq *lr;
1023 u64 eflags, edflags = READ_ONCE(line->edflags);
1024 int level = -1;
1025 #ifdef CONFIG_HTE
1026 int diff_seqno;
1027
1028 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1029 level = line->raw_level;
1030 #endif
1031 if (level < 0)
1032 level = gpiod_get_raw_value_cansleep(line->desc);
1033 if (level < 0) {
1034 pr_debug_ratelimited("debouncer failed to read line value\n");
1035 return;
1036 }
1037
1038 if (READ_ONCE(line->level) == level)
1039 return;
1040
1041 WRITE_ONCE(line->level, level);
1042
1043 /* -- edge detection -- */
1044 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1045 if (!eflags)
1046 return;
1047
1048 /* switch from physical level to logical - if they differ */
1049 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
1050 level = !level;
1051
1052 /* ignore edges that are not being monitored */
1053 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
1054 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
1055 return;
1056
1057 /* Do not leak kernel stack to userspace */
1058 memset(&le, 0, sizeof(le));
1059
1060 lr = line->req;
1061 le.timestamp_ns = line_event_timestamp(line);
1062 le.offset = gpio_chip_hwgpio(line->desc);
1063 #ifdef CONFIG_HTE
1064 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
1065 /* discard events except the last one */
1066 line->total_discard_seq -= 1;
1067 diff_seqno = line->last_seqno - line->total_discard_seq -
1068 line->line_seqno;
1069 line->line_seqno = line->last_seqno - line->total_discard_seq;
1070 le.line_seqno = line->line_seqno;
1071 le.seqno = (lr->num_lines == 1) ?
1072 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
1073 } else
1074 #endif /* CONFIG_HTE */
1075 {
1076 line->line_seqno++;
1077 le.line_seqno = line->line_seqno;
1078 le.seqno = (lr->num_lines == 1) ?
1079 le.line_seqno : atomic_inc_return(&lr->seqno);
1080 }
1081
1082 le.id = line_event_id(level);
1083
1084 linereq_put_event(lr, &le);
1085 }
1086
debounce_setup(struct line * line,unsigned int debounce_period_us)1087 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
1088 {
1089 unsigned long irqflags;
1090 int ret, level, irq;
1091 char *label;
1092
1093 /* try hardware */
1094 ret = gpiod_set_debounce(line->desc, debounce_period_us);
1095 if (!ret) {
1096 line_set_debounce_period(line, debounce_period_us);
1097 return ret;
1098 }
1099 if (ret != -ENOTSUPP)
1100 return ret;
1101
1102 if (debounce_period_us) {
1103 /* setup software debounce */
1104 level = gpiod_get_raw_value_cansleep(line->desc);
1105 if (level < 0)
1106 return level;
1107
1108 if (!(IS_ENABLED(CONFIG_HTE) &&
1109 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
1110 irq = gpiod_to_irq(line->desc);
1111 if (irq < 0)
1112 return -ENXIO;
1113
1114 label = make_irq_label(line->req->label);
1115 if (IS_ERR(label))
1116 return -ENOMEM;
1117
1118 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
1119 ret = request_irq(irq, debounce_irq_handler, irqflags,
1120 label, line);
1121 if (ret) {
1122 free_irq_label(label);
1123 return ret;
1124 }
1125 line->irq = irq;
1126 } else {
1127 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
1128 if (ret)
1129 return ret;
1130 }
1131
1132 WRITE_ONCE(line->level, level);
1133 WRITE_ONCE(line->sw_debounced, 1);
1134 }
1135 return 0;
1136 }
1137
gpio_v2_line_config_debounced(struct gpio_v2_line_config * lc,unsigned int line_idx)1138 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
1139 unsigned int line_idx)
1140 {
1141 unsigned int i;
1142 u64 mask = BIT_ULL(line_idx);
1143
1144 for (i = 0; i < lc->num_attrs; i++) {
1145 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1146 (lc->attrs[i].mask & mask))
1147 return true;
1148 }
1149 return false;
1150 }
1151
gpio_v2_line_config_debounce_period(struct gpio_v2_line_config * lc,unsigned int line_idx)1152 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
1153 unsigned int line_idx)
1154 {
1155 unsigned int i;
1156 u64 mask = BIT_ULL(line_idx);
1157
1158 for (i = 0; i < lc->num_attrs; i++) {
1159 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1160 (lc->attrs[i].mask & mask))
1161 return lc->attrs[i].attr.debounce_period_us;
1162 }
1163 return 0;
1164 }
1165
edge_detector_stop(struct line * line)1166 static void edge_detector_stop(struct line *line)
1167 {
1168 if (line->irq) {
1169 free_irq_label(free_irq(line->irq, line));
1170 line->irq = 0;
1171 }
1172
1173 #ifdef CONFIG_HTE
1174 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1175 hte_ts_put(&line->hdesc);
1176 #endif
1177
1178 cancel_delayed_work_sync(&line->work);
1179 WRITE_ONCE(line->sw_debounced, 0);
1180 WRITE_ONCE(line->edflags, 0);
1181 line_set_debounce_period(line, 0);
1182 /* do not change line->level - see comment in debounced_value() */
1183 }
1184
edge_detector_setup(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1185 static int edge_detector_setup(struct line *line,
1186 struct gpio_v2_line_config *lc,
1187 unsigned int line_idx, u64 edflags)
1188 {
1189 u32 debounce_period_us;
1190 unsigned long irqflags = 0;
1191 u64 eflags;
1192 int irq, ret;
1193 char *label;
1194
1195 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1196 if (eflags && !kfifo_initialized(&line->req->events)) {
1197 ret = kfifo_alloc(&line->req->events,
1198 line->req->event_buffer_size, GFP_KERNEL);
1199 if (ret)
1200 return ret;
1201 }
1202 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1203 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1204 ret = debounce_setup(line, debounce_period_us);
1205 if (ret)
1206 return ret;
1207 line_set_debounce_period(line, debounce_period_us);
1208 }
1209
1210 /* detection disabled or sw debouncer will provide edge detection */
1211 if (!eflags || READ_ONCE(line->sw_debounced))
1212 return 0;
1213
1214 if (IS_ENABLED(CONFIG_HTE) &&
1215 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1216 return hte_edge_setup(line, edflags);
1217
1218 irq = gpiod_to_irq(line->desc);
1219 if (irq < 0)
1220 return -ENXIO;
1221
1222 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1223 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1224 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1225 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1226 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1227 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1228 irqflags |= IRQF_ONESHOT;
1229
1230 label = make_irq_label(line->req->label);
1231 if (IS_ERR(label))
1232 return PTR_ERR(label);
1233
1234 /* Request a thread to read the events */
1235 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1236 irqflags, label, line);
1237 if (ret) {
1238 free_irq_label(label);
1239 return ret;
1240 }
1241
1242 line->irq = irq;
1243 return 0;
1244 }
1245
edge_detector_update(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1246 static int edge_detector_update(struct line *line,
1247 struct gpio_v2_line_config *lc,
1248 unsigned int line_idx, u64 edflags)
1249 {
1250 u64 eflags;
1251 int ret;
1252 u64 active_edflags = READ_ONCE(line->edflags);
1253 unsigned int debounce_period_us =
1254 gpio_v2_line_config_debounce_period(lc, line_idx);
1255
1256 if ((active_edflags == edflags) &&
1257 (READ_ONCE(line->debounce_period_us) == debounce_period_us))
1258 return 0;
1259
1260 /* sw debounced and still will be...*/
1261 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1262 line_set_debounce_period(line, debounce_period_us);
1263 /*
1264 * ensure event fifo is initialised if edge detection
1265 * is now enabled.
1266 */
1267 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1268 if (eflags && !kfifo_initialized(&line->req->events)) {
1269 ret = kfifo_alloc(&line->req->events,
1270 line->req->event_buffer_size,
1271 GFP_KERNEL);
1272 if (ret)
1273 return ret;
1274 }
1275 return 0;
1276 }
1277
1278 /* reconfiguring edge detection or sw debounce being disabled */
1279 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1280 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1281 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1282 edge_detector_stop(line);
1283
1284 return edge_detector_setup(line, lc, line_idx, edflags);
1285 }
1286
gpio_v2_line_config_flags(struct gpio_v2_line_config * lc,unsigned int line_idx)1287 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1288 unsigned int line_idx)
1289 {
1290 unsigned int i;
1291 u64 mask = BIT_ULL(line_idx);
1292
1293 for (i = 0; i < lc->num_attrs; i++) {
1294 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1295 (lc->attrs[i].mask & mask))
1296 return lc->attrs[i].attr.flags;
1297 }
1298 return lc->flags;
1299 }
1300
gpio_v2_line_config_output_value(struct gpio_v2_line_config * lc,unsigned int line_idx)1301 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1302 unsigned int line_idx)
1303 {
1304 unsigned int i;
1305 u64 mask = BIT_ULL(line_idx);
1306
1307 for (i = 0; i < lc->num_attrs; i++) {
1308 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1309 (lc->attrs[i].mask & mask))
1310 return !!(lc->attrs[i].attr.values & mask);
1311 }
1312 return 0;
1313 }
1314
gpio_v2_line_flags_validate(u64 flags)1315 static int gpio_v2_line_flags_validate(u64 flags)
1316 {
1317 /* Return an error if an unknown flag is set */
1318 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1319 return -EINVAL;
1320
1321 if (!IS_ENABLED(CONFIG_HTE) &&
1322 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1323 return -EOPNOTSUPP;
1324
1325 /*
1326 * Do not allow both INPUT and OUTPUT flags to be set as they are
1327 * contradictory.
1328 */
1329 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1330 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1331 return -EINVAL;
1332
1333 /* Only allow one event clock source */
1334 if (IS_ENABLED(CONFIG_HTE) &&
1335 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1336 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1337 return -EINVAL;
1338
1339 /* Edge detection requires explicit input. */
1340 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1341 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1342 return -EINVAL;
1343
1344 /*
1345 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1346 * request. If the hardware actually supports enabling both at the
1347 * same time the electrical result would be disastrous.
1348 */
1349 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1350 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1351 return -EINVAL;
1352
1353 /* Drive requires explicit output direction. */
1354 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1355 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1356 return -EINVAL;
1357
1358 /* Bias requires explicit direction. */
1359 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1360 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1361 return -EINVAL;
1362
1363 /* Only one bias flag can be set. */
1364 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1365 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1366 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1367 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1368 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1369 return -EINVAL;
1370
1371 return 0;
1372 }
1373
gpio_v2_line_config_validate(struct gpio_v2_line_config * lc,unsigned int num_lines)1374 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1375 unsigned int num_lines)
1376 {
1377 unsigned int i;
1378 u64 flags;
1379 int ret;
1380
1381 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1382 return -EINVAL;
1383
1384 if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
1385 return -EINVAL;
1386
1387 for (i = 0; i < num_lines; i++) {
1388 flags = gpio_v2_line_config_flags(lc, i);
1389 ret = gpio_v2_line_flags_validate(flags);
1390 if (ret)
1391 return ret;
1392
1393 /* debounce requires explicit input */
1394 if (gpio_v2_line_config_debounced(lc, i) &&
1395 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1396 return -EINVAL;
1397 }
1398 return 0;
1399 }
1400
gpio_v2_line_config_flags_to_desc_flags(u64 flags,unsigned long * flagsp)1401 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
1402 unsigned long *flagsp)
1403 {
1404 assign_bit(FLAG_ACTIVE_LOW, flagsp,
1405 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1406
1407 if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
1408 set_bit(FLAG_IS_OUT, flagsp);
1409 else if (flags & GPIO_V2_LINE_FLAG_INPUT)
1410 clear_bit(FLAG_IS_OUT, flagsp);
1411
1412 assign_bit(FLAG_EDGE_RISING, flagsp,
1413 flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1414 assign_bit(FLAG_EDGE_FALLING, flagsp,
1415 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1416
1417 assign_bit(FLAG_OPEN_DRAIN, flagsp,
1418 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1419 assign_bit(FLAG_OPEN_SOURCE, flagsp,
1420 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1421
1422 assign_bit(FLAG_PULL_UP, flagsp,
1423 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1424 assign_bit(FLAG_PULL_DOWN, flagsp,
1425 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1426 assign_bit(FLAG_BIAS_DISABLE, flagsp,
1427 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1428
1429 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
1430 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1431 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
1432 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1433 }
1434
linereq_get_values(struct linereq * lr,void __user * ip)1435 static long linereq_get_values(struct linereq *lr, void __user *ip)
1436 {
1437 struct gpio_v2_line_values lv;
1438 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1439 struct gpio_desc **descs;
1440 unsigned int i, didx, num_get;
1441 bool val;
1442 int ret;
1443
1444 /* NOTE: It's ok to read values of output lines. */
1445 if (copy_from_user(&lv, ip, sizeof(lv)))
1446 return -EFAULT;
1447
1448 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1449 if (lv.mask & BIT_ULL(i)) {
1450 num_get++;
1451 descs = &lr->lines[i].desc;
1452 }
1453 }
1454
1455 if (num_get == 0)
1456 return -EINVAL;
1457
1458 if (num_get != 1) {
1459 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1460 if (!descs)
1461 return -ENOMEM;
1462 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1463 if (lv.mask & BIT_ULL(i)) {
1464 descs[didx] = lr->lines[i].desc;
1465 didx++;
1466 }
1467 }
1468 }
1469 ret = gpiod_get_array_value_complex(false, true, num_get,
1470 descs, NULL, vals);
1471
1472 if (num_get != 1)
1473 kfree(descs);
1474 if (ret)
1475 return ret;
1476
1477 lv.bits = 0;
1478 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1479 if (lv.mask & BIT_ULL(i)) {
1480 if (lr->lines[i].sw_debounced)
1481 val = debounced_value(&lr->lines[i]);
1482 else
1483 val = test_bit(didx, vals);
1484 if (val)
1485 lv.bits |= BIT_ULL(i);
1486 didx++;
1487 }
1488 }
1489
1490 if (copy_to_user(ip, &lv, sizeof(lv)))
1491 return -EFAULT;
1492
1493 return 0;
1494 }
1495
linereq_set_values_unlocked(struct linereq * lr,struct gpio_v2_line_values * lv)1496 static long linereq_set_values_unlocked(struct linereq *lr,
1497 struct gpio_v2_line_values *lv)
1498 {
1499 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1500 struct gpio_desc **descs;
1501 unsigned int i, didx, num_set;
1502 int ret;
1503
1504 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1505 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1506 if (lv->mask & BIT_ULL(i)) {
1507 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1508 return -EPERM;
1509 if (lv->bits & BIT_ULL(i))
1510 __set_bit(num_set, vals);
1511 num_set++;
1512 descs = &lr->lines[i].desc;
1513 }
1514 }
1515 if (num_set == 0)
1516 return -EINVAL;
1517
1518 if (num_set != 1) {
1519 /* build compacted desc array and values */
1520 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1521 if (!descs)
1522 return -ENOMEM;
1523 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1524 if (lv->mask & BIT_ULL(i)) {
1525 descs[didx] = lr->lines[i].desc;
1526 didx++;
1527 }
1528 }
1529 }
1530 ret = gpiod_set_array_value_complex(false, true, num_set,
1531 descs, NULL, vals);
1532
1533 if (num_set != 1)
1534 kfree(descs);
1535 return ret;
1536 }
1537
linereq_set_values(struct linereq * lr,void __user * ip)1538 static long linereq_set_values(struct linereq *lr, void __user *ip)
1539 {
1540 struct gpio_v2_line_values lv;
1541 int ret;
1542
1543 if (copy_from_user(&lv, ip, sizeof(lv)))
1544 return -EFAULT;
1545
1546 mutex_lock(&lr->config_mutex);
1547
1548 ret = linereq_set_values_unlocked(lr, &lv);
1549
1550 mutex_unlock(&lr->config_mutex);
1551
1552 return ret;
1553 }
1554
linereq_set_config_unlocked(struct linereq * lr,struct gpio_v2_line_config * lc)1555 static long linereq_set_config_unlocked(struct linereq *lr,
1556 struct gpio_v2_line_config *lc)
1557 {
1558 struct gpio_desc *desc;
1559 struct line *line;
1560 unsigned int i;
1561 u64 flags, edflags;
1562 int ret;
1563
1564 for (i = 0; i < lr->num_lines; i++) {
1565 line = &lr->lines[i];
1566 desc = lr->lines[i].desc;
1567 flags = gpio_v2_line_config_flags(lc, i);
1568 /*
1569 * Lines not explicitly reconfigured as input or output
1570 * are left unchanged.
1571 */
1572 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1573 continue;
1574 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1575 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1576 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1577 int val = gpio_v2_line_config_output_value(lc, i);
1578
1579 edge_detector_stop(line);
1580 ret = gpiod_direction_output(desc, val);
1581 if (ret)
1582 return ret;
1583 } else {
1584 ret = gpiod_direction_input(desc);
1585 if (ret)
1586 return ret;
1587
1588 ret = edge_detector_update(line, lc, i, edflags);
1589 if (ret)
1590 return ret;
1591 }
1592
1593 WRITE_ONCE(line->edflags, edflags);
1594
1595 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1596 }
1597 return 0;
1598 }
1599
linereq_set_config(struct linereq * lr,void __user * ip)1600 static long linereq_set_config(struct linereq *lr, void __user *ip)
1601 {
1602 struct gpio_v2_line_config lc;
1603 int ret;
1604
1605 if (copy_from_user(&lc, ip, sizeof(lc)))
1606 return -EFAULT;
1607
1608 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1609 if (ret)
1610 return ret;
1611
1612 mutex_lock(&lr->config_mutex);
1613
1614 ret = linereq_set_config_unlocked(lr, &lc);
1615
1616 mutex_unlock(&lr->config_mutex);
1617
1618 return ret;
1619 }
1620
linereq_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)1621 static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
1622 unsigned long arg)
1623 {
1624 struct linereq *lr = file->private_data;
1625 void __user *ip = (void __user *)arg;
1626
1627 if (!lr->gdev->chip)
1628 return -ENODEV;
1629
1630 switch (cmd) {
1631 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1632 return linereq_get_values(lr, ip);
1633 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1634 return linereq_set_values(lr, ip);
1635 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1636 return linereq_set_config(lr, ip);
1637 default:
1638 return -EINVAL;
1639 }
1640 }
1641
linereq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1642 static long linereq_ioctl(struct file *file, unsigned int cmd,
1643 unsigned long arg)
1644 {
1645 struct linereq *lr = file->private_data;
1646
1647 return call_ioctl_locked(file, cmd, arg, lr->gdev,
1648 linereq_ioctl_unlocked);
1649 }
1650
1651 #ifdef CONFIG_COMPAT
linereq_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1652 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1653 unsigned long arg)
1654 {
1655 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1656 }
1657 #endif
1658
linereq_poll_unlocked(struct file * file,struct poll_table_struct * wait)1659 static __poll_t linereq_poll_unlocked(struct file *file,
1660 struct poll_table_struct *wait)
1661 {
1662 struct linereq *lr = file->private_data;
1663 __poll_t events = 0;
1664
1665 if (!lr->gdev->chip)
1666 return EPOLLHUP | EPOLLERR;
1667
1668 poll_wait(file, &lr->wait, wait);
1669
1670 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1671 &lr->wait.lock))
1672 events = EPOLLIN | EPOLLRDNORM;
1673
1674 return events;
1675 }
1676
linereq_poll(struct file * file,struct poll_table_struct * wait)1677 static __poll_t linereq_poll(struct file *file,
1678 struct poll_table_struct *wait)
1679 {
1680 struct linereq *lr = file->private_data;
1681
1682 return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
1683 }
1684
linereq_read_unlocked(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1685 static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
1686 size_t count, loff_t *f_ps)
1687 {
1688 struct linereq *lr = file->private_data;
1689 struct gpio_v2_line_event le;
1690 ssize_t bytes_read = 0;
1691 int ret;
1692
1693 if (!lr->gdev->chip)
1694 return -ENODEV;
1695
1696 if (count < sizeof(le))
1697 return -EINVAL;
1698
1699 do {
1700 spin_lock(&lr->wait.lock);
1701 if (kfifo_is_empty(&lr->events)) {
1702 if (bytes_read) {
1703 spin_unlock(&lr->wait.lock);
1704 return bytes_read;
1705 }
1706
1707 if (file->f_flags & O_NONBLOCK) {
1708 spin_unlock(&lr->wait.lock);
1709 return -EAGAIN;
1710 }
1711
1712 ret = wait_event_interruptible_locked(lr->wait,
1713 !kfifo_is_empty(&lr->events));
1714 if (ret) {
1715 spin_unlock(&lr->wait.lock);
1716 return ret;
1717 }
1718 }
1719
1720 ret = kfifo_out(&lr->events, &le, 1);
1721 spin_unlock(&lr->wait.lock);
1722 if (ret != 1) {
1723 /*
1724 * This should never happen - we were holding the
1725 * lock from the moment we learned the fifo is no
1726 * longer empty until now.
1727 */
1728 ret = -EIO;
1729 break;
1730 }
1731
1732 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1733 return -EFAULT;
1734 bytes_read += sizeof(le);
1735 } while (count >= bytes_read + sizeof(le));
1736
1737 return bytes_read;
1738 }
1739
linereq_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1740 static ssize_t linereq_read(struct file *file, char __user *buf,
1741 size_t count, loff_t *f_ps)
1742 {
1743 struct linereq *lr = file->private_data;
1744
1745 return call_read_locked(file, buf, count, f_ps, lr->gdev,
1746 linereq_read_unlocked);
1747 }
1748
linereq_free(struct linereq * lr)1749 static void linereq_free(struct linereq *lr)
1750 {
1751 struct line *line;
1752 unsigned int i;
1753
1754 if (lr->device_unregistered_nb.notifier_call)
1755 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1756 &lr->device_unregistered_nb);
1757
1758 for (i = 0; i < lr->num_lines; i++) {
1759 line = &lr->lines[i];
1760 if (!line->desc)
1761 continue;
1762
1763 edge_detector_stop(line);
1764 if (line_has_supinfo(line))
1765 supinfo_erase(line);
1766 gpiod_free(line->desc);
1767 }
1768 kfifo_free(&lr->events);
1769 kfree(lr->label);
1770 gpio_device_put(lr->gdev);
1771 kfree(lr);
1772 }
1773
linereq_release(struct inode * inode,struct file * file)1774 static int linereq_release(struct inode *inode, struct file *file)
1775 {
1776 struct linereq *lr = file->private_data;
1777
1778 linereq_free(lr);
1779 return 0;
1780 }
1781
1782 #ifdef CONFIG_PROC_FS
linereq_show_fdinfo(struct seq_file * out,struct file * file)1783 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1784 {
1785 struct linereq *lr = file->private_data;
1786 struct device *dev = &lr->gdev->dev;
1787 u16 i;
1788
1789 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1790
1791 for (i = 0; i < lr->num_lines; i++)
1792 seq_printf(out, "gpio-line:\t%d\n",
1793 gpio_chip_hwgpio(lr->lines[i].desc));
1794 }
1795 #endif
1796
1797 static const struct file_operations line_fileops = {
1798 .release = linereq_release,
1799 .read = linereq_read,
1800 .poll = linereq_poll,
1801 .owner = THIS_MODULE,
1802 .llseek = noop_llseek,
1803 .unlocked_ioctl = linereq_ioctl,
1804 #ifdef CONFIG_COMPAT
1805 .compat_ioctl = linereq_ioctl_compat,
1806 #endif
1807 #ifdef CONFIG_PROC_FS
1808 .show_fdinfo = linereq_show_fdinfo,
1809 #endif
1810 };
1811
linereq_create(struct gpio_device * gdev,void __user * ip)1812 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1813 {
1814 struct gpio_v2_line_request ulr;
1815 struct gpio_v2_line_config *lc;
1816 struct linereq *lr;
1817 struct file *file;
1818 u64 flags, edflags;
1819 unsigned int i;
1820 int fd, ret;
1821
1822 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1823 return -EFAULT;
1824
1825 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1826 return -EINVAL;
1827
1828 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
1829 return -EINVAL;
1830
1831 lc = &ulr.config;
1832 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1833 if (ret)
1834 return ret;
1835
1836 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1837 if (!lr)
1838 return -ENOMEM;
1839
1840 lr->gdev = gpio_device_get(gdev);
1841
1842 for (i = 0; i < ulr.num_lines; i++) {
1843 lr->lines[i].req = lr;
1844 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1845 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1846 }
1847
1848 if (ulr.consumer[0] != '\0') {
1849 /* label is only initialized if consumer is set */
1850 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1851 GFP_KERNEL);
1852 if (!lr->label) {
1853 ret = -ENOMEM;
1854 goto out_free_linereq;
1855 }
1856 }
1857
1858 mutex_init(&lr->config_mutex);
1859 init_waitqueue_head(&lr->wait);
1860 lr->event_buffer_size = ulr.event_buffer_size;
1861 if (lr->event_buffer_size == 0)
1862 lr->event_buffer_size = ulr.num_lines * 16;
1863 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1864 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1865
1866 atomic_set(&lr->seqno, 0);
1867 lr->num_lines = ulr.num_lines;
1868
1869 /* Request each GPIO */
1870 for (i = 0; i < ulr.num_lines; i++) {
1871 u32 offset = ulr.offsets[i];
1872 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
1873
1874 if (IS_ERR(desc)) {
1875 ret = PTR_ERR(desc);
1876 goto out_free_linereq;
1877 }
1878
1879 ret = gpiod_request_user(desc, lr->label);
1880 if (ret)
1881 goto out_free_linereq;
1882
1883 lr->lines[i].desc = desc;
1884 flags = gpio_v2_line_config_flags(lc, i);
1885 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1886
1887 ret = gpiod_set_transitory(desc, false);
1888 if (ret < 0)
1889 goto out_free_linereq;
1890
1891 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1892 /*
1893 * Lines have to be requested explicitly for input
1894 * or output, else the line will be treated "as is".
1895 */
1896 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1897 int val = gpio_v2_line_config_output_value(lc, i);
1898
1899 ret = gpiod_direction_output(desc, val);
1900 if (ret)
1901 goto out_free_linereq;
1902 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1903 ret = gpiod_direction_input(desc);
1904 if (ret)
1905 goto out_free_linereq;
1906
1907 ret = edge_detector_setup(&lr->lines[i], lc, i,
1908 edflags);
1909 if (ret)
1910 goto out_free_linereq;
1911 }
1912
1913 lr->lines[i].edflags = edflags;
1914
1915 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1916
1917 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1918 offset);
1919 }
1920
1921 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1922 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1923 &lr->device_unregistered_nb);
1924 if (ret)
1925 goto out_free_linereq;
1926
1927 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1928 if (fd < 0) {
1929 ret = fd;
1930 goto out_free_linereq;
1931 }
1932
1933 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1934 O_RDONLY | O_CLOEXEC);
1935 if (IS_ERR(file)) {
1936 ret = PTR_ERR(file);
1937 goto out_put_unused_fd;
1938 }
1939
1940 ulr.fd = fd;
1941 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1942 /*
1943 * fput() will trigger the release() callback, so do not go onto
1944 * the regular error cleanup path here.
1945 */
1946 fput(file);
1947 put_unused_fd(fd);
1948 return -EFAULT;
1949 }
1950
1951 fd_install(fd, file);
1952
1953 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1954 lr->num_lines);
1955
1956 return 0;
1957
1958 out_put_unused_fd:
1959 put_unused_fd(fd);
1960 out_free_linereq:
1961 linereq_free(lr);
1962 return ret;
1963 }
1964
1965 #ifdef CONFIG_GPIO_CDEV_V1
1966
1967 /*
1968 * GPIO line event management
1969 */
1970
1971 /**
1972 * struct lineevent_state - contains the state of a userspace event
1973 * @gdev: the GPIO device the event pertains to
1974 * @label: consumer label used to tag descriptors
1975 * @desc: the GPIO descriptor held by this event
1976 * @eflags: the event flags this line was requested with
1977 * @irq: the interrupt that trigger in response to events on this GPIO
1978 * @wait: wait queue that handles blocking reads of events
1979 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1980 * @events: KFIFO for the GPIO events
1981 * @timestamp: cache for the timestamp storing it between hardirq
1982 * and IRQ thread, used to bring the timestamp close to the actual
1983 * event
1984 */
1985 struct lineevent_state {
1986 struct gpio_device *gdev;
1987 const char *label;
1988 struct gpio_desc *desc;
1989 u32 eflags;
1990 int irq;
1991 wait_queue_head_t wait;
1992 struct notifier_block device_unregistered_nb;
1993 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1994 u64 timestamp;
1995 };
1996
1997 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1998 (GPIOEVENT_REQUEST_RISING_EDGE | \
1999 GPIOEVENT_REQUEST_FALLING_EDGE)
2000
lineevent_poll_unlocked(struct file * file,struct poll_table_struct * wait)2001 static __poll_t lineevent_poll_unlocked(struct file *file,
2002 struct poll_table_struct *wait)
2003 {
2004 struct lineevent_state *le = file->private_data;
2005 __poll_t events = 0;
2006
2007 if (!le->gdev->chip)
2008 return EPOLLHUP | EPOLLERR;
2009
2010 poll_wait(file, &le->wait, wait);
2011
2012 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
2013 events = EPOLLIN | EPOLLRDNORM;
2014
2015 return events;
2016 }
2017
lineevent_poll(struct file * file,struct poll_table_struct * wait)2018 static __poll_t lineevent_poll(struct file *file,
2019 struct poll_table_struct *wait)
2020 {
2021 struct lineevent_state *le = file->private_data;
2022
2023 return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
2024 }
2025
lineevent_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)2026 static int lineevent_unregistered_notify(struct notifier_block *nb,
2027 unsigned long action, void *data)
2028 {
2029 struct lineevent_state *le = container_of(nb, struct lineevent_state,
2030 device_unregistered_nb);
2031
2032 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
2033
2034 return NOTIFY_OK;
2035 }
2036
2037 struct compat_gpioeevent_data {
2038 compat_u64 timestamp;
2039 u32 id;
2040 };
2041
lineevent_read_unlocked(struct file * file,char __user * buf,size_t count,loff_t * f_ps)2042 static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
2043 size_t count, loff_t *f_ps)
2044 {
2045 struct lineevent_state *le = file->private_data;
2046 struct gpioevent_data ge;
2047 ssize_t bytes_read = 0;
2048 ssize_t ge_size;
2049 int ret;
2050
2051 if (!le->gdev->chip)
2052 return -ENODEV;
2053
2054 /*
2055 * When compatible system call is being used the struct gpioevent_data,
2056 * in case of at least ia32, has different size due to the alignment
2057 * differences. Because we have first member 64 bits followed by one of
2058 * 32 bits there is no gap between them. The only difference is the
2059 * padding at the end of the data structure. Hence, we calculate the
2060 * actual sizeof() and pass this as an argument to copy_to_user() to
2061 * drop unneeded bytes from the output.
2062 */
2063 if (compat_need_64bit_alignment_fixup())
2064 ge_size = sizeof(struct compat_gpioeevent_data);
2065 else
2066 ge_size = sizeof(struct gpioevent_data);
2067 if (count < ge_size)
2068 return -EINVAL;
2069
2070 do {
2071 spin_lock(&le->wait.lock);
2072 if (kfifo_is_empty(&le->events)) {
2073 if (bytes_read) {
2074 spin_unlock(&le->wait.lock);
2075 return bytes_read;
2076 }
2077
2078 if (file->f_flags & O_NONBLOCK) {
2079 spin_unlock(&le->wait.lock);
2080 return -EAGAIN;
2081 }
2082
2083 ret = wait_event_interruptible_locked(le->wait,
2084 !kfifo_is_empty(&le->events));
2085 if (ret) {
2086 spin_unlock(&le->wait.lock);
2087 return ret;
2088 }
2089 }
2090
2091 ret = kfifo_out(&le->events, &ge, 1);
2092 spin_unlock(&le->wait.lock);
2093 if (ret != 1) {
2094 /*
2095 * This should never happen - we were holding the lock
2096 * from the moment we learned the fifo is no longer
2097 * empty until now.
2098 */
2099 ret = -EIO;
2100 break;
2101 }
2102
2103 if (copy_to_user(buf + bytes_read, &ge, ge_size))
2104 return -EFAULT;
2105 bytes_read += ge_size;
2106 } while (count >= bytes_read + ge_size);
2107
2108 return bytes_read;
2109 }
2110
lineevent_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)2111 static ssize_t lineevent_read(struct file *file, char __user *buf,
2112 size_t count, loff_t *f_ps)
2113 {
2114 struct lineevent_state *le = file->private_data;
2115
2116 return call_read_locked(file, buf, count, f_ps, le->gdev,
2117 lineevent_read_unlocked);
2118 }
2119
lineevent_free(struct lineevent_state * le)2120 static void lineevent_free(struct lineevent_state *le)
2121 {
2122 if (le->device_unregistered_nb.notifier_call)
2123 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
2124 &le->device_unregistered_nb);
2125 if (le->irq)
2126 free_irq_label(free_irq(le->irq, le));
2127 if (le->desc)
2128 gpiod_free(le->desc);
2129 kfree(le->label);
2130 gpio_device_put(le->gdev);
2131 kfree(le);
2132 }
2133
lineevent_release(struct inode * inode,struct file * file)2134 static int lineevent_release(struct inode *inode, struct file *file)
2135 {
2136 lineevent_free(file->private_data);
2137 return 0;
2138 }
2139
lineevent_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)2140 static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
2141 unsigned long arg)
2142 {
2143 struct lineevent_state *le = file->private_data;
2144 void __user *ip = (void __user *)arg;
2145 struct gpiohandle_data ghd;
2146
2147 if (!le->gdev->chip)
2148 return -ENODEV;
2149
2150 /*
2151 * We can get the value for an event line but not set it,
2152 * because it is input by definition.
2153 */
2154 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
2155 int val;
2156
2157 memset(&ghd, 0, sizeof(ghd));
2158
2159 val = gpiod_get_value_cansleep(le->desc);
2160 if (val < 0)
2161 return val;
2162 ghd.values[0] = val;
2163
2164 if (copy_to_user(ip, &ghd, sizeof(ghd)))
2165 return -EFAULT;
2166
2167 return 0;
2168 }
2169 return -EINVAL;
2170 }
2171
lineevent_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2172 static long lineevent_ioctl(struct file *file, unsigned int cmd,
2173 unsigned long arg)
2174 {
2175 struct lineevent_state *le = file->private_data;
2176
2177 return call_ioctl_locked(file, cmd, arg, le->gdev,
2178 lineevent_ioctl_unlocked);
2179 }
2180
2181 #ifdef CONFIG_COMPAT
lineevent_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)2182 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
2183 unsigned long arg)
2184 {
2185 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2186 }
2187 #endif
2188
2189 static const struct file_operations lineevent_fileops = {
2190 .release = lineevent_release,
2191 .read = lineevent_read,
2192 .poll = lineevent_poll,
2193 .owner = THIS_MODULE,
2194 .llseek = noop_llseek,
2195 .unlocked_ioctl = lineevent_ioctl,
2196 #ifdef CONFIG_COMPAT
2197 .compat_ioctl = lineevent_ioctl_compat,
2198 #endif
2199 };
2200
lineevent_irq_thread(int irq,void * p)2201 static irqreturn_t lineevent_irq_thread(int irq, void *p)
2202 {
2203 struct lineevent_state *le = p;
2204 struct gpioevent_data ge;
2205 int ret;
2206
2207 /* Do not leak kernel stack to userspace */
2208 memset(&ge, 0, sizeof(ge));
2209
2210 /*
2211 * We may be running from a nested threaded interrupt in which case
2212 * we didn't get the timestamp from lineevent_irq_handler().
2213 */
2214 if (!le->timestamp)
2215 ge.timestamp = ktime_get_ns();
2216 else
2217 ge.timestamp = le->timestamp;
2218
2219 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
2220 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2221 int level = gpiod_get_value_cansleep(le->desc);
2222
2223 if (level)
2224 /* Emit low-to-high event */
2225 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2226 else
2227 /* Emit high-to-low event */
2228 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2229 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2230 /* Emit low-to-high event */
2231 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2232 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2233 /* Emit high-to-low event */
2234 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2235 } else {
2236 return IRQ_NONE;
2237 }
2238
2239 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2240 1, &le->wait.lock);
2241 if (ret)
2242 wake_up_poll(&le->wait, EPOLLIN);
2243 else
2244 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2245
2246 return IRQ_HANDLED;
2247 }
2248
lineevent_irq_handler(int irq,void * p)2249 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2250 {
2251 struct lineevent_state *le = p;
2252
2253 /*
2254 * Just store the timestamp in hardirq context so we get it as
2255 * close in time as possible to the actual event.
2256 */
2257 le->timestamp = ktime_get_ns();
2258
2259 return IRQ_WAKE_THREAD;
2260 }
2261
lineevent_create(struct gpio_device * gdev,void __user * ip)2262 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2263 {
2264 struct gpioevent_request eventreq;
2265 struct lineevent_state *le;
2266 struct gpio_desc *desc;
2267 struct file *file;
2268 u32 offset;
2269 u32 lflags;
2270 u32 eflags;
2271 int fd;
2272 int ret;
2273 int irq, irqflags = 0;
2274 char *label;
2275
2276 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2277 return -EFAULT;
2278
2279 offset = eventreq.lineoffset;
2280 lflags = eventreq.handleflags;
2281 eflags = eventreq.eventflags;
2282
2283 desc = gpiochip_get_desc(gdev->chip, offset);
2284 if (IS_ERR(desc))
2285 return PTR_ERR(desc);
2286
2287 /* Return an error if a unknown flag is set */
2288 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2289 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2290 return -EINVAL;
2291
2292 /* This is just wrong: we don't look for events on output lines */
2293 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2294 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2295 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2296 return -EINVAL;
2297
2298 /* Only one bias flag can be set. */
2299 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2300 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2301 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2302 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2303 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2304 return -EINVAL;
2305
2306 le = kzalloc(sizeof(*le), GFP_KERNEL);
2307 if (!le)
2308 return -ENOMEM;
2309 le->gdev = gpio_device_get(gdev);
2310
2311 if (eventreq.consumer_label[0] != '\0') {
2312 /* label is only initialized if consumer_label is set */
2313 le->label = kstrndup(eventreq.consumer_label,
2314 sizeof(eventreq.consumer_label) - 1,
2315 GFP_KERNEL);
2316 if (!le->label) {
2317 ret = -ENOMEM;
2318 goto out_free_le;
2319 }
2320 }
2321
2322 ret = gpiod_request_user(desc, le->label);
2323 if (ret)
2324 goto out_free_le;
2325 le->desc = desc;
2326 le->eflags = eflags;
2327
2328 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2329
2330 ret = gpiod_direction_input(desc);
2331 if (ret)
2332 goto out_free_le;
2333
2334 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2335
2336 irq = gpiod_to_irq(desc);
2337 if (irq <= 0) {
2338 ret = -ENODEV;
2339 goto out_free_le;
2340 }
2341
2342 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2343 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2344 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2345 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2346 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2347 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2348 irqflags |= IRQF_ONESHOT;
2349
2350 INIT_KFIFO(le->events);
2351 init_waitqueue_head(&le->wait);
2352
2353 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2354 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2355 &le->device_unregistered_nb);
2356 if (ret)
2357 goto out_free_le;
2358
2359 label = make_irq_label(le->label);
2360 if (IS_ERR(label)) {
2361 ret = PTR_ERR(label);
2362 goto out_free_le;
2363 }
2364
2365 /* Request a thread to read the events */
2366 ret = request_threaded_irq(irq,
2367 lineevent_irq_handler,
2368 lineevent_irq_thread,
2369 irqflags,
2370 label,
2371 le);
2372 if (ret) {
2373 free_irq_label(label);
2374 goto out_free_le;
2375 }
2376
2377 le->irq = irq;
2378
2379 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2380 if (fd < 0) {
2381 ret = fd;
2382 goto out_free_le;
2383 }
2384
2385 file = anon_inode_getfile("gpio-event",
2386 &lineevent_fileops,
2387 le,
2388 O_RDONLY | O_CLOEXEC);
2389 if (IS_ERR(file)) {
2390 ret = PTR_ERR(file);
2391 goto out_put_unused_fd;
2392 }
2393
2394 eventreq.fd = fd;
2395 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2396 /*
2397 * fput() will trigger the release() callback, so do not go onto
2398 * the regular error cleanup path here.
2399 */
2400 fput(file);
2401 put_unused_fd(fd);
2402 return -EFAULT;
2403 }
2404
2405 fd_install(fd, file);
2406
2407 return 0;
2408
2409 out_put_unused_fd:
2410 put_unused_fd(fd);
2411 out_free_le:
2412 lineevent_free(le);
2413 return ret;
2414 }
2415
gpio_v2_line_info_to_v1(struct gpio_v2_line_info * info_v2,struct gpioline_info * info_v1)2416 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2417 struct gpioline_info *info_v1)
2418 {
2419 u64 flagsv2 = info_v2->flags;
2420
2421 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2422 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2423 info_v1->line_offset = info_v2->offset;
2424 info_v1->flags = 0;
2425
2426 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2427 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2428
2429 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2430 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2431
2432 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2433 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2434
2435 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2436 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2437 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2438 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2439
2440 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2441 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2442 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2443 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2444 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2445 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2446 }
2447
gpio_v2_line_info_changed_to_v1(struct gpio_v2_line_info_changed * lic_v2,struct gpioline_info_changed * lic_v1)2448 static void gpio_v2_line_info_changed_to_v1(
2449 struct gpio_v2_line_info_changed *lic_v2,
2450 struct gpioline_info_changed *lic_v1)
2451 {
2452 memset(lic_v1, 0, sizeof(*lic_v1));
2453 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2454 lic_v1->timestamp = lic_v2->timestamp_ns;
2455 lic_v1->event_type = lic_v2->event_type;
2456 }
2457
2458 #endif /* CONFIG_GPIO_CDEV_V1 */
2459
gpio_desc_to_lineinfo(struct gpio_desc * desc,struct gpio_v2_line_info * info)2460 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2461 struct gpio_v2_line_info *info)
2462 {
2463 struct gpio_chip *gc = desc->gdev->chip;
2464 bool ok_for_pinctrl;
2465 unsigned long flags;
2466
2467 memset(info, 0, sizeof(*info));
2468 info->offset = gpio_chip_hwgpio(desc);
2469
2470 /*
2471 * This function takes a mutex so we must check this before taking
2472 * the spinlock.
2473 *
2474 * FIXME: find a non-racy way to retrieve this information. Maybe a
2475 * lock common to both frameworks?
2476 */
2477 ok_for_pinctrl =
2478 pinctrl_gpio_can_use_line(gc->base + info->offset);
2479
2480 spin_lock_irqsave(&gpio_lock, flags);
2481
2482 if (desc->name)
2483 strscpy(info->name, desc->name, sizeof(info->name));
2484
2485 if (desc->label)
2486 strscpy(info->consumer, desc->label, sizeof(info->consumer));
2487
2488 /*
2489 * Userspace only need to know that the kernel is using this GPIO so
2490 * it can't use it.
2491 */
2492 info->flags = 0;
2493 if (test_bit(FLAG_REQUESTED, &desc->flags) ||
2494 test_bit(FLAG_IS_HOGGED, &desc->flags) ||
2495 test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
2496 test_bit(FLAG_EXPORT, &desc->flags) ||
2497 test_bit(FLAG_SYSFS, &desc->flags) ||
2498 !gpiochip_line_is_valid(gc, info->offset) ||
2499 !ok_for_pinctrl)
2500 info->flags |= GPIO_V2_LINE_FLAG_USED;
2501
2502 if (test_bit(FLAG_IS_OUT, &desc->flags))
2503 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2504 else
2505 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2506
2507 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
2508 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2509
2510 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2511 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2512 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2513 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2514
2515 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
2516 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2517 if (test_bit(FLAG_PULL_DOWN, &desc->flags))
2518 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2519 if (test_bit(FLAG_PULL_UP, &desc->flags))
2520 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2521
2522 if (test_bit(FLAG_EDGE_RISING, &desc->flags))
2523 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2524 if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
2525 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2526
2527 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
2528 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2529 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
2530 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2531
2532 spin_unlock_irqrestore(&gpio_lock, flags);
2533 }
2534
2535 struct gpio_chardev_data {
2536 struct gpio_device *gdev;
2537 wait_queue_head_t wait;
2538 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2539 struct notifier_block lineinfo_changed_nb;
2540 struct notifier_block device_unregistered_nb;
2541 unsigned long *watched_lines;
2542 #ifdef CONFIG_GPIO_CDEV_V1
2543 atomic_t watch_abi_version;
2544 #endif
2545 };
2546
chipinfo_get(struct gpio_chardev_data * cdev,void __user * ip)2547 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2548 {
2549 struct gpio_device *gdev = cdev->gdev;
2550 struct gpiochip_info chipinfo;
2551
2552 memset(&chipinfo, 0, sizeof(chipinfo));
2553
2554 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2555 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2556 chipinfo.lines = gdev->ngpio;
2557 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2558 return -EFAULT;
2559 return 0;
2560 }
2561
2562 #ifdef CONFIG_GPIO_CDEV_V1
2563 /*
2564 * returns 0 if the versions match, else the previously selected ABI version
2565 */
lineinfo_ensure_abi_version(struct gpio_chardev_data * cdata,unsigned int version)2566 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2567 unsigned int version)
2568 {
2569 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2570
2571 if (abiv == version)
2572 return 0;
2573
2574 return abiv;
2575 }
2576
lineinfo_get_v1(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2577 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2578 bool watch)
2579 {
2580 struct gpio_desc *desc;
2581 struct gpioline_info lineinfo;
2582 struct gpio_v2_line_info lineinfo_v2;
2583
2584 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2585 return -EFAULT;
2586
2587 /* this doubles as a range check on line_offset */
2588 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
2589 if (IS_ERR(desc))
2590 return PTR_ERR(desc);
2591
2592 if (watch) {
2593 if (lineinfo_ensure_abi_version(cdev, 1))
2594 return -EPERM;
2595
2596 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2597 return -EBUSY;
2598 }
2599
2600 gpio_desc_to_lineinfo(desc, &lineinfo_v2);
2601 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2602
2603 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2604 if (watch)
2605 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2606 return -EFAULT;
2607 }
2608
2609 return 0;
2610 }
2611 #endif
2612
lineinfo_get(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2613 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2614 bool watch)
2615 {
2616 struct gpio_desc *desc;
2617 struct gpio_v2_line_info lineinfo;
2618
2619 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2620 return -EFAULT;
2621
2622 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
2623 return -EINVAL;
2624
2625 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
2626 if (IS_ERR(desc))
2627 return PTR_ERR(desc);
2628
2629 if (watch) {
2630 #ifdef CONFIG_GPIO_CDEV_V1
2631 if (lineinfo_ensure_abi_version(cdev, 2))
2632 return -EPERM;
2633 #endif
2634 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2635 return -EBUSY;
2636 }
2637 gpio_desc_to_lineinfo(desc, &lineinfo);
2638 supinfo_to_lineinfo(desc, &lineinfo);
2639
2640 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2641 if (watch)
2642 clear_bit(lineinfo.offset, cdev->watched_lines);
2643 return -EFAULT;
2644 }
2645
2646 return 0;
2647 }
2648
lineinfo_unwatch(struct gpio_chardev_data * cdev,void __user * ip)2649 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2650 {
2651 __u32 offset;
2652
2653 if (copy_from_user(&offset, ip, sizeof(offset)))
2654 return -EFAULT;
2655
2656 if (offset >= cdev->gdev->ngpio)
2657 return -EINVAL;
2658
2659 if (!test_and_clear_bit(offset, cdev->watched_lines))
2660 return -EBUSY;
2661
2662 return 0;
2663 }
2664
gpio_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)2665 static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
2666 {
2667 struct gpio_chardev_data *cdev = file->private_data;
2668 struct gpio_device *gdev = cdev->gdev;
2669 void __user *ip = (void __user *)arg;
2670
2671 /* We fail any subsequent ioctl():s when the chip is gone */
2672 if (!gdev->chip)
2673 return -ENODEV;
2674
2675 /* Fill in the struct and pass to userspace */
2676 switch (cmd) {
2677 case GPIO_GET_CHIPINFO_IOCTL:
2678 return chipinfo_get(cdev, ip);
2679 #ifdef CONFIG_GPIO_CDEV_V1
2680 case GPIO_GET_LINEHANDLE_IOCTL:
2681 return linehandle_create(gdev, ip);
2682 case GPIO_GET_LINEEVENT_IOCTL:
2683 return lineevent_create(gdev, ip);
2684 case GPIO_GET_LINEINFO_IOCTL:
2685 return lineinfo_get_v1(cdev, ip, false);
2686 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2687 return lineinfo_get_v1(cdev, ip, true);
2688 #endif /* CONFIG_GPIO_CDEV_V1 */
2689 case GPIO_V2_GET_LINEINFO_IOCTL:
2690 return lineinfo_get(cdev, ip, false);
2691 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2692 return lineinfo_get(cdev, ip, true);
2693 case GPIO_V2_GET_LINE_IOCTL:
2694 return linereq_create(gdev, ip);
2695 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2696 return lineinfo_unwatch(cdev, ip);
2697 default:
2698 return -EINVAL;
2699 }
2700 }
2701
2702 /*
2703 * gpio_ioctl() - ioctl handler for the GPIO chardev
2704 */
gpio_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2705 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2706 {
2707 struct gpio_chardev_data *cdev = file->private_data;
2708
2709 return call_ioctl_locked(file, cmd, arg, cdev->gdev,
2710 gpio_ioctl_unlocked);
2711 }
2712
2713 #ifdef CONFIG_COMPAT
gpio_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)2714 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2715 unsigned long arg)
2716 {
2717 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2718 }
2719 #endif
2720
lineinfo_changed_notify(struct notifier_block * nb,unsigned long action,void * data)2721 static int lineinfo_changed_notify(struct notifier_block *nb,
2722 unsigned long action, void *data)
2723 {
2724 struct gpio_chardev_data *cdev =
2725 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2726 struct gpio_v2_line_info_changed chg;
2727 struct gpio_desc *desc = data;
2728 int ret;
2729
2730 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2731 return NOTIFY_DONE;
2732
2733 memset(&chg, 0, sizeof(chg));
2734 chg.event_type = action;
2735 chg.timestamp_ns = ktime_get_ns();
2736 gpio_desc_to_lineinfo(desc, &chg.info);
2737 supinfo_to_lineinfo(desc, &chg.info);
2738
2739 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
2740 if (ret)
2741 wake_up_poll(&cdev->wait, EPOLLIN);
2742 else
2743 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2744
2745 return NOTIFY_OK;
2746 }
2747
gpio_device_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)2748 static int gpio_device_unregistered_notify(struct notifier_block *nb,
2749 unsigned long action, void *data)
2750 {
2751 struct gpio_chardev_data *cdev = container_of(nb,
2752 struct gpio_chardev_data,
2753 device_unregistered_nb);
2754
2755 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2756
2757 return NOTIFY_OK;
2758 }
2759
lineinfo_watch_poll_unlocked(struct file * file,struct poll_table_struct * pollt)2760 static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
2761 struct poll_table_struct *pollt)
2762 {
2763 struct gpio_chardev_data *cdev = file->private_data;
2764 __poll_t events = 0;
2765
2766 if (!cdev->gdev->chip)
2767 return EPOLLHUP | EPOLLERR;
2768
2769 poll_wait(file, &cdev->wait, pollt);
2770
2771 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2772 &cdev->wait.lock))
2773 events = EPOLLIN | EPOLLRDNORM;
2774
2775 return events;
2776 }
2777
lineinfo_watch_poll(struct file * file,struct poll_table_struct * pollt)2778 static __poll_t lineinfo_watch_poll(struct file *file,
2779 struct poll_table_struct *pollt)
2780 {
2781 struct gpio_chardev_data *cdev = file->private_data;
2782
2783 return call_poll_locked(file, pollt, cdev->gdev,
2784 lineinfo_watch_poll_unlocked);
2785 }
2786
lineinfo_watch_read_unlocked(struct file * file,char __user * buf,size_t count,loff_t * off)2787 static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
2788 size_t count, loff_t *off)
2789 {
2790 struct gpio_chardev_data *cdev = file->private_data;
2791 struct gpio_v2_line_info_changed event;
2792 ssize_t bytes_read = 0;
2793 int ret;
2794 size_t event_size;
2795
2796 if (!cdev->gdev->chip)
2797 return -ENODEV;
2798
2799 #ifndef CONFIG_GPIO_CDEV_V1
2800 event_size = sizeof(struct gpio_v2_line_info_changed);
2801 if (count < event_size)
2802 return -EINVAL;
2803 #endif
2804
2805 do {
2806 spin_lock(&cdev->wait.lock);
2807 if (kfifo_is_empty(&cdev->events)) {
2808 if (bytes_read) {
2809 spin_unlock(&cdev->wait.lock);
2810 return bytes_read;
2811 }
2812
2813 if (file->f_flags & O_NONBLOCK) {
2814 spin_unlock(&cdev->wait.lock);
2815 return -EAGAIN;
2816 }
2817
2818 ret = wait_event_interruptible_locked(cdev->wait,
2819 !kfifo_is_empty(&cdev->events));
2820 if (ret) {
2821 spin_unlock(&cdev->wait.lock);
2822 return ret;
2823 }
2824 }
2825 #ifdef CONFIG_GPIO_CDEV_V1
2826 /* must be after kfifo check so watch_abi_version is set */
2827 if (atomic_read(&cdev->watch_abi_version) == 2)
2828 event_size = sizeof(struct gpio_v2_line_info_changed);
2829 else
2830 event_size = sizeof(struct gpioline_info_changed);
2831 if (count < event_size) {
2832 spin_unlock(&cdev->wait.lock);
2833 return -EINVAL;
2834 }
2835 #endif
2836 ret = kfifo_out(&cdev->events, &event, 1);
2837 spin_unlock(&cdev->wait.lock);
2838 if (ret != 1) {
2839 ret = -EIO;
2840 break;
2841 /* We should never get here. See lineevent_read(). */
2842 }
2843
2844 #ifdef CONFIG_GPIO_CDEV_V1
2845 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2846 if (copy_to_user(buf + bytes_read, &event, event_size))
2847 return -EFAULT;
2848 } else {
2849 struct gpioline_info_changed event_v1;
2850
2851 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2852 if (copy_to_user(buf + bytes_read, &event_v1,
2853 event_size))
2854 return -EFAULT;
2855 }
2856 #else
2857 if (copy_to_user(buf + bytes_read, &event, event_size))
2858 return -EFAULT;
2859 #endif
2860 bytes_read += event_size;
2861 } while (count >= bytes_read + sizeof(event));
2862
2863 return bytes_read;
2864 }
2865
lineinfo_watch_read(struct file * file,char __user * buf,size_t count,loff_t * off)2866 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2867 size_t count, loff_t *off)
2868 {
2869 struct gpio_chardev_data *cdev = file->private_data;
2870
2871 return call_read_locked(file, buf, count, off, cdev->gdev,
2872 lineinfo_watch_read_unlocked);
2873 }
2874
2875 /**
2876 * gpio_chrdev_open() - open the chardev for ioctl operations
2877 * @inode: inode for this chardev
2878 * @file: file struct for storing private data
2879 * Returns 0 on success
2880 */
gpio_chrdev_open(struct inode * inode,struct file * file)2881 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2882 {
2883 struct gpio_device *gdev = container_of(inode->i_cdev,
2884 struct gpio_device, chrdev);
2885 struct gpio_chardev_data *cdev;
2886 int ret = -ENOMEM;
2887
2888 down_read(&gdev->sem);
2889
2890 /* Fail on open if the backing gpiochip is gone */
2891 if (!gdev->chip) {
2892 ret = -ENODEV;
2893 goto out_unlock;
2894 }
2895
2896 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2897 if (!cdev)
2898 goto out_unlock;
2899
2900 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
2901 if (!cdev->watched_lines)
2902 goto out_free_cdev;
2903
2904 init_waitqueue_head(&cdev->wait);
2905 INIT_KFIFO(cdev->events);
2906 cdev->gdev = gpio_device_get(gdev);
2907
2908 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2909 ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
2910 &cdev->lineinfo_changed_nb);
2911 if (ret)
2912 goto out_free_bitmap;
2913
2914 cdev->device_unregistered_nb.notifier_call =
2915 gpio_device_unregistered_notify;
2916 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2917 &cdev->device_unregistered_nb);
2918 if (ret)
2919 goto out_unregister_line_notifier;
2920
2921 file->private_data = cdev;
2922
2923 ret = nonseekable_open(inode, file);
2924 if (ret)
2925 goto out_unregister_device_notifier;
2926
2927 up_read(&gdev->sem);
2928
2929 return ret;
2930
2931 out_unregister_device_notifier:
2932 blocking_notifier_chain_unregister(&gdev->device_notifier,
2933 &cdev->device_unregistered_nb);
2934 out_unregister_line_notifier:
2935 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2936 &cdev->lineinfo_changed_nb);
2937 out_free_bitmap:
2938 gpio_device_put(gdev);
2939 bitmap_free(cdev->watched_lines);
2940 out_free_cdev:
2941 kfree(cdev);
2942 out_unlock:
2943 up_read(&gdev->sem);
2944 return ret;
2945 }
2946
2947 /**
2948 * gpio_chrdev_release() - close chardev after ioctl operations
2949 * @inode: inode for this chardev
2950 * @file: file struct for storing private data
2951 * Returns 0 on success
2952 */
gpio_chrdev_release(struct inode * inode,struct file * file)2953 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2954 {
2955 struct gpio_chardev_data *cdev = file->private_data;
2956 struct gpio_device *gdev = cdev->gdev;
2957
2958 blocking_notifier_chain_unregister(&gdev->device_notifier,
2959 &cdev->device_unregistered_nb);
2960 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2961 &cdev->lineinfo_changed_nb);
2962 bitmap_free(cdev->watched_lines);
2963 gpio_device_put(gdev);
2964 kfree(cdev);
2965
2966 return 0;
2967 }
2968
2969 static const struct file_operations gpio_fileops = {
2970 .release = gpio_chrdev_release,
2971 .open = gpio_chrdev_open,
2972 .poll = lineinfo_watch_poll,
2973 .read = lineinfo_watch_read,
2974 .owner = THIS_MODULE,
2975 .llseek = no_llseek,
2976 .unlocked_ioctl = gpio_ioctl,
2977 #ifdef CONFIG_COMPAT
2978 .compat_ioctl = gpio_ioctl_compat,
2979 #endif
2980 };
2981
gpiolib_cdev_register(struct gpio_device * gdev,dev_t devt)2982 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2983 {
2984 int ret;
2985
2986 cdev_init(&gdev->chrdev, &gpio_fileops);
2987 gdev->chrdev.owner = THIS_MODULE;
2988 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2989
2990 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2991 if (ret)
2992 return ret;
2993
2994 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
2995 MAJOR(devt), gdev->id);
2996
2997 return 0;
2998 }
2999
gpiolib_cdev_unregister(struct gpio_device * gdev)3000 void gpiolib_cdev_unregister(struct gpio_device *gdev)
3001 {
3002 cdev_device_del(&gdev->chrdev, &gdev->dev);
3003 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
3004 }
3005