1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The input core
4 *
5 * Copyright (c) 1999-2002 Vojtech Pavlik
6 */
7
8
9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/idr.h>
14 #include <linux/input/mt.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/major.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/pm.h>
23 #include <linux/poll.h>
24 #include <linux/device.h>
25 #include <linux/kstrtox.h>
26 #include <linux/mutex.h>
27 #include <linux/rcupdate.h>
28 #include "input-compat.h"
29 #include "input-core-private.h"
30 #include "input-poller.h"
31
32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
33 MODULE_DESCRIPTION("Input core");
34 MODULE_LICENSE("GPL");
35
36 #define INPUT_MAX_CHAR_DEVICES 1024
37 #define INPUT_FIRST_DYNAMIC_DEV 256
38 static DEFINE_IDA(input_ida);
39
40 static LIST_HEAD(input_dev_list);
41 static LIST_HEAD(input_handler_list);
42
43 /*
44 * input_mutex protects access to both input_dev_list and input_handler_list.
45 * This also causes input_[un]register_device and input_[un]register_handler
46 * be mutually exclusive which simplifies locking in drivers implementing
47 * input handlers.
48 */
49 static DEFINE_MUTEX(input_mutex);
50
51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
52
53 static const unsigned int input_max_code[EV_CNT] = {
54 [EV_KEY] = KEY_MAX,
55 [EV_REL] = REL_MAX,
56 [EV_ABS] = ABS_MAX,
57 [EV_MSC] = MSC_MAX,
58 [EV_SW] = SW_MAX,
59 [EV_LED] = LED_MAX,
60 [EV_SND] = SND_MAX,
61 [EV_FF] = FF_MAX,
62 };
63
is_event_supported(unsigned int code,unsigned long * bm,unsigned int max)64 static inline int is_event_supported(unsigned int code,
65 unsigned long *bm, unsigned int max)
66 {
67 return code <= max && test_bit(code, bm);
68 }
69
input_defuzz_abs_event(int value,int old_val,int fuzz)70 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
71 {
72 if (fuzz) {
73 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
74 return old_val;
75
76 if (value > old_val - fuzz && value < old_val + fuzz)
77 return (old_val * 3 + value) / 4;
78
79 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
80 return (old_val + value) / 2;
81 }
82
83 return value;
84 }
85
input_start_autorepeat(struct input_dev * dev,int code)86 static void input_start_autorepeat(struct input_dev *dev, int code)
87 {
88 if (test_bit(EV_REP, dev->evbit) &&
89 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
90 dev->timer.function) {
91 dev->repeat_key = code;
92 mod_timer(&dev->timer,
93 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
94 }
95 }
96
input_stop_autorepeat(struct input_dev * dev)97 static void input_stop_autorepeat(struct input_dev *dev)
98 {
99 del_timer(&dev->timer);
100 }
101
102 /*
103 * Pass event first through all filters and then, if event has not been
104 * filtered out, through all open handles. This function is called with
105 * dev->event_lock held and interrupts disabled.
106 */
input_to_handler(struct input_handle * handle,struct input_value * vals,unsigned int count)107 static unsigned int input_to_handler(struct input_handle *handle,
108 struct input_value *vals, unsigned int count)
109 {
110 struct input_handler *handler = handle->handler;
111 struct input_value *end = vals;
112 struct input_value *v;
113
114 if (handler->filter) {
115 for (v = vals; v != vals + count; v++) {
116 if (handler->filter(handle, v->type, v->code, v->value))
117 continue;
118 if (end != v)
119 *end = *v;
120 end++;
121 }
122 count = end - vals;
123 }
124
125 if (!count)
126 return 0;
127
128 if (handler->events)
129 handler->events(handle, vals, count);
130 else if (handler->event)
131 for (v = vals; v != vals + count; v++)
132 handler->event(handle, v->type, v->code, v->value);
133
134 return count;
135 }
136
137 /*
138 * Pass values first through all filters and then, if event has not been
139 * filtered out, through all open handles. This function is called with
140 * dev->event_lock held and interrupts disabled.
141 */
input_pass_values(struct input_dev * dev,struct input_value * vals,unsigned int count)142 static void input_pass_values(struct input_dev *dev,
143 struct input_value *vals, unsigned int count)
144 {
145 struct input_handle *handle;
146 struct input_value *v;
147
148 lockdep_assert_held(&dev->event_lock);
149
150 if (!count)
151 return;
152
153 rcu_read_lock();
154
155 handle = rcu_dereference(dev->grab);
156 if (handle) {
157 count = input_to_handler(handle, vals, count);
158 } else {
159 list_for_each_entry_rcu(handle, &dev->h_list, d_node)
160 if (handle->open) {
161 count = input_to_handler(handle, vals, count);
162 if (!count)
163 break;
164 }
165 }
166
167 rcu_read_unlock();
168
169 /* trigger auto repeat for key events */
170 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
171 for (v = vals; v != vals + count; v++) {
172 if (v->type == EV_KEY && v->value != 2) {
173 if (v->value)
174 input_start_autorepeat(dev, v->code);
175 else
176 input_stop_autorepeat(dev);
177 }
178 }
179 }
180 }
181
182 #define INPUT_IGNORE_EVENT 0
183 #define INPUT_PASS_TO_HANDLERS 1
184 #define INPUT_PASS_TO_DEVICE 2
185 #define INPUT_SLOT 4
186 #define INPUT_FLUSH 8
187 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
188
input_handle_abs_event(struct input_dev * dev,unsigned int code,int * pval)189 static int input_handle_abs_event(struct input_dev *dev,
190 unsigned int code, int *pval)
191 {
192 struct input_mt *mt = dev->mt;
193 bool is_new_slot = false;
194 bool is_mt_event;
195 int *pold;
196
197 if (code == ABS_MT_SLOT) {
198 /*
199 * "Stage" the event; we'll flush it later, when we
200 * get actual touch data.
201 */
202 if (mt && *pval >= 0 && *pval < mt->num_slots)
203 mt->slot = *pval;
204
205 return INPUT_IGNORE_EVENT;
206 }
207
208 is_mt_event = input_is_mt_value(code);
209
210 if (!is_mt_event) {
211 pold = &dev->absinfo[code].value;
212 } else if (mt) {
213 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
214 is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value;
215 } else {
216 /*
217 * Bypass filtering for multi-touch events when
218 * not employing slots.
219 */
220 pold = NULL;
221 }
222
223 if (pold) {
224 *pval = input_defuzz_abs_event(*pval, *pold,
225 dev->absinfo[code].fuzz);
226 if (*pold == *pval)
227 return INPUT_IGNORE_EVENT;
228
229 *pold = *pval;
230 }
231
232 /* Flush pending "slot" event */
233 if (is_new_slot) {
234 dev->absinfo[ABS_MT_SLOT].value = mt->slot;
235 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
236 }
237
238 return INPUT_PASS_TO_HANDLERS;
239 }
240
input_get_disposition(struct input_dev * dev,unsigned int type,unsigned int code,int * pval)241 static int input_get_disposition(struct input_dev *dev,
242 unsigned int type, unsigned int code, int *pval)
243 {
244 int disposition = INPUT_IGNORE_EVENT;
245 int value = *pval;
246
247 /* filter-out events from inhibited devices */
248 if (dev->inhibited)
249 return INPUT_IGNORE_EVENT;
250
251 switch (type) {
252
253 case EV_SYN:
254 switch (code) {
255 case SYN_CONFIG:
256 disposition = INPUT_PASS_TO_ALL;
257 break;
258
259 case SYN_REPORT:
260 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
261 break;
262 case SYN_MT_REPORT:
263 disposition = INPUT_PASS_TO_HANDLERS;
264 break;
265 }
266 break;
267
268 case EV_KEY:
269 if (is_event_supported(code, dev->keybit, KEY_MAX)) {
270
271 /* auto-repeat bypasses state updates */
272 if (value == 2) {
273 disposition = INPUT_PASS_TO_HANDLERS;
274 break;
275 }
276
277 if (!!test_bit(code, dev->key) != !!value) {
278
279 __change_bit(code, dev->key);
280 disposition = INPUT_PASS_TO_HANDLERS;
281 }
282 }
283 break;
284
285 case EV_SW:
286 if (is_event_supported(code, dev->swbit, SW_MAX) &&
287 !!test_bit(code, dev->sw) != !!value) {
288
289 __change_bit(code, dev->sw);
290 disposition = INPUT_PASS_TO_HANDLERS;
291 }
292 break;
293
294 case EV_ABS:
295 if (is_event_supported(code, dev->absbit, ABS_MAX))
296 disposition = input_handle_abs_event(dev, code, &value);
297
298 break;
299
300 case EV_REL:
301 if (is_event_supported(code, dev->relbit, REL_MAX) && value)
302 disposition = INPUT_PASS_TO_HANDLERS;
303
304 break;
305
306 case EV_MSC:
307 if (is_event_supported(code, dev->mscbit, MSC_MAX))
308 disposition = INPUT_PASS_TO_ALL;
309
310 break;
311
312 case EV_LED:
313 if (is_event_supported(code, dev->ledbit, LED_MAX) &&
314 !!test_bit(code, dev->led) != !!value) {
315
316 __change_bit(code, dev->led);
317 disposition = INPUT_PASS_TO_ALL;
318 }
319 break;
320
321 case EV_SND:
322 if (is_event_supported(code, dev->sndbit, SND_MAX)) {
323
324 if (!!test_bit(code, dev->snd) != !!value)
325 __change_bit(code, dev->snd);
326 disposition = INPUT_PASS_TO_ALL;
327 }
328 break;
329
330 case EV_REP:
331 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
332 dev->rep[code] = value;
333 disposition = INPUT_PASS_TO_ALL;
334 }
335 break;
336
337 case EV_FF:
338 if (value >= 0)
339 disposition = INPUT_PASS_TO_ALL;
340 break;
341
342 case EV_PWR:
343 disposition = INPUT_PASS_TO_ALL;
344 break;
345 }
346
347 *pval = value;
348 return disposition;
349 }
350
input_event_dispose(struct input_dev * dev,int disposition,unsigned int type,unsigned int code,int value)351 static void input_event_dispose(struct input_dev *dev, int disposition,
352 unsigned int type, unsigned int code, int value)
353 {
354 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
355 dev->event(dev, type, code, value);
356
357 if (!dev->vals)
358 return;
359
360 if (disposition & INPUT_PASS_TO_HANDLERS) {
361 struct input_value *v;
362
363 if (disposition & INPUT_SLOT) {
364 v = &dev->vals[dev->num_vals++];
365 v->type = EV_ABS;
366 v->code = ABS_MT_SLOT;
367 v->value = dev->mt->slot;
368 }
369
370 v = &dev->vals[dev->num_vals++];
371 v->type = type;
372 v->code = code;
373 v->value = value;
374 }
375
376 if (disposition & INPUT_FLUSH) {
377 if (dev->num_vals >= 2)
378 input_pass_values(dev, dev->vals, dev->num_vals);
379 dev->num_vals = 0;
380 /*
381 * Reset the timestamp on flush so we won't end up
382 * with a stale one. Note we only need to reset the
383 * monolithic one as we use its presence when deciding
384 * whether to generate a synthetic timestamp.
385 */
386 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
387 } else if (dev->num_vals >= dev->max_vals - 2) {
388 dev->vals[dev->num_vals++] = input_value_sync;
389 input_pass_values(dev, dev->vals, dev->num_vals);
390 dev->num_vals = 0;
391 }
392 }
393
input_handle_event(struct input_dev * dev,unsigned int type,unsigned int code,int value)394 void input_handle_event(struct input_dev *dev,
395 unsigned int type, unsigned int code, int value)
396 {
397 int disposition;
398
399 lockdep_assert_held(&dev->event_lock);
400
401 disposition = input_get_disposition(dev, type, code, &value);
402 if (disposition != INPUT_IGNORE_EVENT) {
403 if (type != EV_SYN)
404 add_input_randomness(type, code, value);
405
406 input_event_dispose(dev, disposition, type, code, value);
407 }
408 }
409
410 /**
411 * input_event() - report new input event
412 * @dev: device that generated the event
413 * @type: type of the event
414 * @code: event code
415 * @value: value of the event
416 *
417 * This function should be used by drivers implementing various input
418 * devices to report input events. See also input_inject_event().
419 *
420 * NOTE: input_event() may be safely used right after input device was
421 * allocated with input_allocate_device(), even before it is registered
422 * with input_register_device(), but the event will not reach any of the
423 * input handlers. Such early invocation of input_event() may be used
424 * to 'seed' initial state of a switch or initial position of absolute
425 * axis, etc.
426 */
input_event(struct input_dev * dev,unsigned int type,unsigned int code,int value)427 void input_event(struct input_dev *dev,
428 unsigned int type, unsigned int code, int value)
429 {
430 unsigned long flags;
431
432 if (is_event_supported(type, dev->evbit, EV_MAX)) {
433
434 spin_lock_irqsave(&dev->event_lock, flags);
435 input_handle_event(dev, type, code, value);
436 spin_unlock_irqrestore(&dev->event_lock, flags);
437 }
438 }
439 EXPORT_SYMBOL(input_event);
440
441 /**
442 * input_inject_event() - send input event from input handler
443 * @handle: input handle to send event through
444 * @type: type of the event
445 * @code: event code
446 * @value: value of the event
447 *
448 * Similar to input_event() but will ignore event if device is
449 * "grabbed" and handle injecting event is not the one that owns
450 * the device.
451 */
input_inject_event(struct input_handle * handle,unsigned int type,unsigned int code,int value)452 void input_inject_event(struct input_handle *handle,
453 unsigned int type, unsigned int code, int value)
454 {
455 struct input_dev *dev = handle->dev;
456 struct input_handle *grab;
457 unsigned long flags;
458
459 if (is_event_supported(type, dev->evbit, EV_MAX)) {
460 spin_lock_irqsave(&dev->event_lock, flags);
461
462 rcu_read_lock();
463 grab = rcu_dereference(dev->grab);
464 if (!grab || grab == handle)
465 input_handle_event(dev, type, code, value);
466 rcu_read_unlock();
467
468 spin_unlock_irqrestore(&dev->event_lock, flags);
469 }
470 }
471 EXPORT_SYMBOL(input_inject_event);
472
473 /**
474 * input_alloc_absinfo - allocates array of input_absinfo structs
475 * @dev: the input device emitting absolute events
476 *
477 * If the absinfo struct the caller asked for is already allocated, this
478 * functions will not do anything.
479 */
input_alloc_absinfo(struct input_dev * dev)480 void input_alloc_absinfo(struct input_dev *dev)
481 {
482 if (dev->absinfo)
483 return;
484
485 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
486 if (!dev->absinfo) {
487 dev_err(dev->dev.parent ?: &dev->dev,
488 "%s: unable to allocate memory\n", __func__);
489 /*
490 * We will handle this allocation failure in
491 * input_register_device() when we refuse to register input
492 * device with ABS bits but without absinfo.
493 */
494 }
495 }
496 EXPORT_SYMBOL(input_alloc_absinfo);
497
input_set_abs_params(struct input_dev * dev,unsigned int axis,int min,int max,int fuzz,int flat)498 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
499 int min, int max, int fuzz, int flat)
500 {
501 struct input_absinfo *absinfo;
502
503 __set_bit(EV_ABS, dev->evbit);
504 __set_bit(axis, dev->absbit);
505
506 input_alloc_absinfo(dev);
507 if (!dev->absinfo)
508 return;
509
510 absinfo = &dev->absinfo[axis];
511 absinfo->minimum = min;
512 absinfo->maximum = max;
513 absinfo->fuzz = fuzz;
514 absinfo->flat = flat;
515 }
516 EXPORT_SYMBOL(input_set_abs_params);
517
518 /**
519 * input_copy_abs - Copy absinfo from one input_dev to another
520 * @dst: Destination input device to copy the abs settings to
521 * @dst_axis: ABS_* value selecting the destination axis
522 * @src: Source input device to copy the abs settings from
523 * @src_axis: ABS_* value selecting the source axis
524 *
525 * Set absinfo for the selected destination axis by copying it from
526 * the specified source input device's source axis.
527 * This is useful to e.g. setup a pen/stylus input-device for combined
528 * touchscreen/pen hardware where the pen uses the same coordinates as
529 * the touchscreen.
530 */
input_copy_abs(struct input_dev * dst,unsigned int dst_axis,const struct input_dev * src,unsigned int src_axis)531 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
532 const struct input_dev *src, unsigned int src_axis)
533 {
534 /* src must have EV_ABS and src_axis set */
535 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
536 test_bit(src_axis, src->absbit))))
537 return;
538
539 /*
540 * input_alloc_absinfo() may have failed for the source. Our caller is
541 * expected to catch this when registering the input devices, which may
542 * happen after the input_copy_abs() call.
543 */
544 if (!src->absinfo)
545 return;
546
547 input_set_capability(dst, EV_ABS, dst_axis);
548 if (!dst->absinfo)
549 return;
550
551 dst->absinfo[dst_axis] = src->absinfo[src_axis];
552 }
553 EXPORT_SYMBOL(input_copy_abs);
554
555 /**
556 * input_grab_device - grabs device for exclusive use
557 * @handle: input handle that wants to own the device
558 *
559 * When a device is grabbed by an input handle all events generated by
560 * the device are delivered only to this handle. Also events injected
561 * by other input handles are ignored while device is grabbed.
562 */
input_grab_device(struct input_handle * handle)563 int input_grab_device(struct input_handle *handle)
564 {
565 struct input_dev *dev = handle->dev;
566 int retval;
567
568 retval = mutex_lock_interruptible(&dev->mutex);
569 if (retval)
570 return retval;
571
572 if (dev->grab) {
573 retval = -EBUSY;
574 goto out;
575 }
576
577 rcu_assign_pointer(dev->grab, handle);
578
579 out:
580 mutex_unlock(&dev->mutex);
581 return retval;
582 }
583 EXPORT_SYMBOL(input_grab_device);
584
__input_release_device(struct input_handle * handle)585 static void __input_release_device(struct input_handle *handle)
586 {
587 struct input_dev *dev = handle->dev;
588 struct input_handle *grabber;
589
590 grabber = rcu_dereference_protected(dev->grab,
591 lockdep_is_held(&dev->mutex));
592 if (grabber == handle) {
593 rcu_assign_pointer(dev->grab, NULL);
594 /* Make sure input_pass_values() notices that grab is gone */
595 synchronize_rcu();
596
597 list_for_each_entry(handle, &dev->h_list, d_node)
598 if (handle->open && handle->handler->start)
599 handle->handler->start(handle);
600 }
601 }
602
603 /**
604 * input_release_device - release previously grabbed device
605 * @handle: input handle that owns the device
606 *
607 * Releases previously grabbed device so that other input handles can
608 * start receiving input events. Upon release all handlers attached
609 * to the device have their start() method called so they have a change
610 * to synchronize device state with the rest of the system.
611 */
input_release_device(struct input_handle * handle)612 void input_release_device(struct input_handle *handle)
613 {
614 struct input_dev *dev = handle->dev;
615
616 mutex_lock(&dev->mutex);
617 __input_release_device(handle);
618 mutex_unlock(&dev->mutex);
619 }
620 EXPORT_SYMBOL(input_release_device);
621
622 /**
623 * input_open_device - open input device
624 * @handle: handle through which device is being accessed
625 *
626 * This function should be called by input handlers when they
627 * want to start receive events from given input device.
628 */
input_open_device(struct input_handle * handle)629 int input_open_device(struct input_handle *handle)
630 {
631 struct input_dev *dev = handle->dev;
632 int retval;
633
634 retval = mutex_lock_interruptible(&dev->mutex);
635 if (retval)
636 return retval;
637
638 if (dev->going_away) {
639 retval = -ENODEV;
640 goto out;
641 }
642
643 handle->open++;
644
645 if (dev->users++ || dev->inhibited) {
646 /*
647 * Device is already opened and/or inhibited,
648 * so we can exit immediately and report success.
649 */
650 goto out;
651 }
652
653 if (dev->open) {
654 retval = dev->open(dev);
655 if (retval) {
656 dev->users--;
657 handle->open--;
658 /*
659 * Make sure we are not delivering any more events
660 * through this handle
661 */
662 synchronize_rcu();
663 goto out;
664 }
665 }
666
667 if (dev->poller)
668 input_dev_poller_start(dev->poller);
669
670 out:
671 mutex_unlock(&dev->mutex);
672 return retval;
673 }
674 EXPORT_SYMBOL(input_open_device);
675
input_flush_device(struct input_handle * handle,struct file * file)676 int input_flush_device(struct input_handle *handle, struct file *file)
677 {
678 struct input_dev *dev = handle->dev;
679 int retval;
680
681 retval = mutex_lock_interruptible(&dev->mutex);
682 if (retval)
683 return retval;
684
685 if (dev->flush)
686 retval = dev->flush(dev, file);
687
688 mutex_unlock(&dev->mutex);
689 return retval;
690 }
691 EXPORT_SYMBOL(input_flush_device);
692
693 /**
694 * input_close_device - close input device
695 * @handle: handle through which device is being accessed
696 *
697 * This function should be called by input handlers when they
698 * want to stop receive events from given input device.
699 */
input_close_device(struct input_handle * handle)700 void input_close_device(struct input_handle *handle)
701 {
702 struct input_dev *dev = handle->dev;
703
704 mutex_lock(&dev->mutex);
705
706 __input_release_device(handle);
707
708 if (!--dev->users && !dev->inhibited) {
709 if (dev->poller)
710 input_dev_poller_stop(dev->poller);
711 if (dev->close)
712 dev->close(dev);
713 }
714
715 if (!--handle->open) {
716 /*
717 * synchronize_rcu() makes sure that input_pass_values()
718 * completed and that no more input events are delivered
719 * through this handle
720 */
721 synchronize_rcu();
722 }
723
724 mutex_unlock(&dev->mutex);
725 }
726 EXPORT_SYMBOL(input_close_device);
727
728 /*
729 * Simulate keyup events for all keys that are marked as pressed.
730 * The function must be called with dev->event_lock held.
731 */
input_dev_release_keys(struct input_dev * dev)732 static bool input_dev_release_keys(struct input_dev *dev)
733 {
734 bool need_sync = false;
735 int code;
736
737 lockdep_assert_held(&dev->event_lock);
738
739 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
740 for_each_set_bit(code, dev->key, KEY_CNT) {
741 input_handle_event(dev, EV_KEY, code, 0);
742 need_sync = true;
743 }
744 }
745
746 return need_sync;
747 }
748
749 /*
750 * Prepare device for unregistering
751 */
input_disconnect_device(struct input_dev * dev)752 static void input_disconnect_device(struct input_dev *dev)
753 {
754 struct input_handle *handle;
755
756 /*
757 * Mark device as going away. Note that we take dev->mutex here
758 * not to protect access to dev->going_away but rather to ensure
759 * that there are no threads in the middle of input_open_device()
760 */
761 mutex_lock(&dev->mutex);
762 dev->going_away = true;
763 mutex_unlock(&dev->mutex);
764
765 spin_lock_irq(&dev->event_lock);
766
767 /*
768 * Simulate keyup events for all pressed keys so that handlers
769 * are not left with "stuck" keys. The driver may continue
770 * generate events even after we done here but they will not
771 * reach any handlers.
772 */
773 if (input_dev_release_keys(dev))
774 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
775
776 list_for_each_entry(handle, &dev->h_list, d_node)
777 handle->open = 0;
778
779 spin_unlock_irq(&dev->event_lock);
780 }
781
782 /**
783 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
784 * @ke: keymap entry containing scancode to be converted.
785 * @scancode: pointer to the location where converted scancode should
786 * be stored.
787 *
788 * This function is used to convert scancode stored in &struct keymap_entry
789 * into scalar form understood by legacy keymap handling methods. These
790 * methods expect scancodes to be represented as 'unsigned int'.
791 */
input_scancode_to_scalar(const struct input_keymap_entry * ke,unsigned int * scancode)792 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
793 unsigned int *scancode)
794 {
795 switch (ke->len) {
796 case 1:
797 *scancode = *((u8 *)ke->scancode);
798 break;
799
800 case 2:
801 *scancode = *((u16 *)ke->scancode);
802 break;
803
804 case 4:
805 *scancode = *((u32 *)ke->scancode);
806 break;
807
808 default:
809 return -EINVAL;
810 }
811
812 return 0;
813 }
814 EXPORT_SYMBOL(input_scancode_to_scalar);
815
816 /*
817 * Those routines handle the default case where no [gs]etkeycode() is
818 * defined. In this case, an array indexed by the scancode is used.
819 */
820
input_fetch_keycode(struct input_dev * dev,unsigned int index)821 static unsigned int input_fetch_keycode(struct input_dev *dev,
822 unsigned int index)
823 {
824 switch (dev->keycodesize) {
825 case 1:
826 return ((u8 *)dev->keycode)[index];
827
828 case 2:
829 return ((u16 *)dev->keycode)[index];
830
831 default:
832 return ((u32 *)dev->keycode)[index];
833 }
834 }
835
input_default_getkeycode(struct input_dev * dev,struct input_keymap_entry * ke)836 static int input_default_getkeycode(struct input_dev *dev,
837 struct input_keymap_entry *ke)
838 {
839 unsigned int index;
840 int error;
841
842 if (!dev->keycodesize)
843 return -EINVAL;
844
845 if (ke->flags & INPUT_KEYMAP_BY_INDEX)
846 index = ke->index;
847 else {
848 error = input_scancode_to_scalar(ke, &index);
849 if (error)
850 return error;
851 }
852
853 if (index >= dev->keycodemax)
854 return -EINVAL;
855
856 ke->keycode = input_fetch_keycode(dev, index);
857 ke->index = index;
858 ke->len = sizeof(index);
859 memcpy(ke->scancode, &index, sizeof(index));
860
861 return 0;
862 }
863
input_default_setkeycode(struct input_dev * dev,const struct input_keymap_entry * ke,unsigned int * old_keycode)864 static int input_default_setkeycode(struct input_dev *dev,
865 const struct input_keymap_entry *ke,
866 unsigned int *old_keycode)
867 {
868 unsigned int index;
869 int error;
870 int i;
871
872 if (!dev->keycodesize)
873 return -EINVAL;
874
875 if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
876 index = ke->index;
877 } else {
878 error = input_scancode_to_scalar(ke, &index);
879 if (error)
880 return error;
881 }
882
883 if (index >= dev->keycodemax)
884 return -EINVAL;
885
886 if (dev->keycodesize < sizeof(ke->keycode) &&
887 (ke->keycode >> (dev->keycodesize * 8)))
888 return -EINVAL;
889
890 switch (dev->keycodesize) {
891 case 1: {
892 u8 *k = (u8 *)dev->keycode;
893 *old_keycode = k[index];
894 k[index] = ke->keycode;
895 break;
896 }
897 case 2: {
898 u16 *k = (u16 *)dev->keycode;
899 *old_keycode = k[index];
900 k[index] = ke->keycode;
901 break;
902 }
903 default: {
904 u32 *k = (u32 *)dev->keycode;
905 *old_keycode = k[index];
906 k[index] = ke->keycode;
907 break;
908 }
909 }
910
911 if (*old_keycode <= KEY_MAX) {
912 __clear_bit(*old_keycode, dev->keybit);
913 for (i = 0; i < dev->keycodemax; i++) {
914 if (input_fetch_keycode(dev, i) == *old_keycode) {
915 __set_bit(*old_keycode, dev->keybit);
916 /* Setting the bit twice is useless, so break */
917 break;
918 }
919 }
920 }
921
922 __set_bit(ke->keycode, dev->keybit);
923 return 0;
924 }
925
926 /**
927 * input_get_keycode - retrieve keycode currently mapped to a given scancode
928 * @dev: input device which keymap is being queried
929 * @ke: keymap entry
930 *
931 * This function should be called by anyone interested in retrieving current
932 * keymap. Presently evdev handlers use it.
933 */
input_get_keycode(struct input_dev * dev,struct input_keymap_entry * ke)934 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
935 {
936 unsigned long flags;
937 int retval;
938
939 spin_lock_irqsave(&dev->event_lock, flags);
940 retval = dev->getkeycode(dev, ke);
941 spin_unlock_irqrestore(&dev->event_lock, flags);
942
943 return retval;
944 }
945 EXPORT_SYMBOL(input_get_keycode);
946
947 /**
948 * input_set_keycode - attribute a keycode to a given scancode
949 * @dev: input device which keymap is being updated
950 * @ke: new keymap entry
951 *
952 * This function should be called by anyone needing to update current
953 * keymap. Presently keyboard and evdev handlers use it.
954 */
input_set_keycode(struct input_dev * dev,const struct input_keymap_entry * ke)955 int input_set_keycode(struct input_dev *dev,
956 const struct input_keymap_entry *ke)
957 {
958 unsigned long flags;
959 unsigned int old_keycode;
960 int retval;
961
962 if (ke->keycode > KEY_MAX)
963 return -EINVAL;
964
965 spin_lock_irqsave(&dev->event_lock, flags);
966
967 retval = dev->setkeycode(dev, ke, &old_keycode);
968 if (retval)
969 goto out;
970
971 /* Make sure KEY_RESERVED did not get enabled. */
972 __clear_bit(KEY_RESERVED, dev->keybit);
973
974 /*
975 * Simulate keyup event if keycode is not present
976 * in the keymap anymore
977 */
978 if (old_keycode > KEY_MAX) {
979 dev_warn(dev->dev.parent ?: &dev->dev,
980 "%s: got too big old keycode %#x\n",
981 __func__, old_keycode);
982 } else if (test_bit(EV_KEY, dev->evbit) &&
983 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
984 __test_and_clear_bit(old_keycode, dev->key)) {
985 /*
986 * We have to use input_event_dispose() here directly instead
987 * of input_handle_event() because the key we want to release
988 * here is considered no longer supported by the device and
989 * input_handle_event() will ignore it.
990 */
991 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
992 EV_KEY, old_keycode, 0);
993 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
994 EV_SYN, SYN_REPORT, 1);
995 }
996
997 out:
998 spin_unlock_irqrestore(&dev->event_lock, flags);
999
1000 return retval;
1001 }
1002 EXPORT_SYMBOL(input_set_keycode);
1003
input_match_device_id(const struct input_dev * dev,const struct input_device_id * id)1004 bool input_match_device_id(const struct input_dev *dev,
1005 const struct input_device_id *id)
1006 {
1007 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
1008 if (id->bustype != dev->id.bustype)
1009 return false;
1010
1011 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
1012 if (id->vendor != dev->id.vendor)
1013 return false;
1014
1015 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
1016 if (id->product != dev->id.product)
1017 return false;
1018
1019 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
1020 if (id->version != dev->id.version)
1021 return false;
1022
1023 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
1024 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
1025 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
1026 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
1027 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
1028 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
1029 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
1030 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
1031 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
1032 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
1033 return false;
1034 }
1035
1036 return true;
1037 }
1038 EXPORT_SYMBOL(input_match_device_id);
1039
input_match_device(struct input_handler * handler,struct input_dev * dev)1040 static const struct input_device_id *input_match_device(struct input_handler *handler,
1041 struct input_dev *dev)
1042 {
1043 const struct input_device_id *id;
1044
1045 for (id = handler->id_table; id->flags || id->driver_info; id++) {
1046 if (input_match_device_id(dev, id) &&
1047 (!handler->match || handler->match(handler, dev))) {
1048 return id;
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
input_attach_handler(struct input_dev * dev,struct input_handler * handler)1055 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
1056 {
1057 const struct input_device_id *id;
1058 int error;
1059
1060 id = input_match_device(handler, dev);
1061 if (!id)
1062 return -ENODEV;
1063
1064 error = handler->connect(handler, dev, id);
1065 if (error && error != -ENODEV)
1066 pr_err("failed to attach handler %s to device %s, error: %d\n",
1067 handler->name, kobject_name(&dev->dev.kobj), error);
1068
1069 return error;
1070 }
1071
1072 #ifdef CONFIG_COMPAT
1073
input_bits_to_string(char * buf,int buf_size,unsigned long bits,bool skip_empty)1074 static int input_bits_to_string(char *buf, int buf_size,
1075 unsigned long bits, bool skip_empty)
1076 {
1077 int len = 0;
1078
1079 if (in_compat_syscall()) {
1080 u32 dword = bits >> 32;
1081 if (dword || !skip_empty)
1082 len += snprintf(buf, buf_size, "%x ", dword);
1083
1084 dword = bits & 0xffffffffUL;
1085 if (dword || !skip_empty || len)
1086 len += snprintf(buf + len, max(buf_size - len, 0),
1087 "%x", dword);
1088 } else {
1089 if (bits || !skip_empty)
1090 len += snprintf(buf, buf_size, "%lx", bits);
1091 }
1092
1093 return len;
1094 }
1095
1096 #else /* !CONFIG_COMPAT */
1097
input_bits_to_string(char * buf,int buf_size,unsigned long bits,bool skip_empty)1098 static int input_bits_to_string(char *buf, int buf_size,
1099 unsigned long bits, bool skip_empty)
1100 {
1101 return bits || !skip_empty ?
1102 snprintf(buf, buf_size, "%lx", bits) : 0;
1103 }
1104
1105 #endif
1106
1107 #ifdef CONFIG_PROC_FS
1108
1109 static struct proc_dir_entry *proc_bus_input_dir;
1110 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1111 static int input_devices_state;
1112
input_wakeup_procfs_readers(void)1113 static inline void input_wakeup_procfs_readers(void)
1114 {
1115 input_devices_state++;
1116 wake_up(&input_devices_poll_wait);
1117 }
1118
input_proc_devices_poll(struct file * file,poll_table * wait)1119 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1120 {
1121 poll_wait(file, &input_devices_poll_wait, wait);
1122 if (file->f_version != input_devices_state) {
1123 file->f_version = input_devices_state;
1124 return EPOLLIN | EPOLLRDNORM;
1125 }
1126
1127 return 0;
1128 }
1129
1130 union input_seq_state {
1131 struct {
1132 unsigned short pos;
1133 bool mutex_acquired;
1134 };
1135 void *p;
1136 };
1137
input_devices_seq_start(struct seq_file * seq,loff_t * pos)1138 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1139 {
1140 union input_seq_state *state = (union input_seq_state *)&seq->private;
1141 int error;
1142
1143 /* We need to fit into seq->private pointer */
1144 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1145
1146 error = mutex_lock_interruptible(&input_mutex);
1147 if (error) {
1148 state->mutex_acquired = false;
1149 return ERR_PTR(error);
1150 }
1151
1152 state->mutex_acquired = true;
1153
1154 return seq_list_start(&input_dev_list, *pos);
1155 }
1156
input_devices_seq_next(struct seq_file * seq,void * v,loff_t * pos)1157 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1158 {
1159 return seq_list_next(v, &input_dev_list, pos);
1160 }
1161
input_seq_stop(struct seq_file * seq,void * v)1162 static void input_seq_stop(struct seq_file *seq, void *v)
1163 {
1164 union input_seq_state *state = (union input_seq_state *)&seq->private;
1165
1166 if (state->mutex_acquired)
1167 mutex_unlock(&input_mutex);
1168 }
1169
input_seq_print_bitmap(struct seq_file * seq,const char * name,unsigned long * bitmap,int max)1170 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1171 unsigned long *bitmap, int max)
1172 {
1173 int i;
1174 bool skip_empty = true;
1175 char buf[18];
1176
1177 seq_printf(seq, "B: %s=", name);
1178
1179 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1180 if (input_bits_to_string(buf, sizeof(buf),
1181 bitmap[i], skip_empty)) {
1182 skip_empty = false;
1183 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1184 }
1185 }
1186
1187 /*
1188 * If no output was produced print a single 0.
1189 */
1190 if (skip_empty)
1191 seq_putc(seq, '0');
1192
1193 seq_putc(seq, '\n');
1194 }
1195
input_devices_seq_show(struct seq_file * seq,void * v)1196 static int input_devices_seq_show(struct seq_file *seq, void *v)
1197 {
1198 struct input_dev *dev = container_of(v, struct input_dev, node);
1199 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1200 struct input_handle *handle;
1201
1202 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1203 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1204
1205 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1206 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1207 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1208 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1209 seq_puts(seq, "H: Handlers=");
1210
1211 list_for_each_entry(handle, &dev->h_list, d_node)
1212 seq_printf(seq, "%s ", handle->name);
1213 seq_putc(seq, '\n');
1214
1215 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1216
1217 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1218 if (test_bit(EV_KEY, dev->evbit))
1219 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1220 if (test_bit(EV_REL, dev->evbit))
1221 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1222 if (test_bit(EV_ABS, dev->evbit))
1223 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1224 if (test_bit(EV_MSC, dev->evbit))
1225 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1226 if (test_bit(EV_LED, dev->evbit))
1227 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1228 if (test_bit(EV_SND, dev->evbit))
1229 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1230 if (test_bit(EV_FF, dev->evbit))
1231 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1232 if (test_bit(EV_SW, dev->evbit))
1233 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1234
1235 seq_putc(seq, '\n');
1236
1237 kfree(path);
1238 return 0;
1239 }
1240
1241 static const struct seq_operations input_devices_seq_ops = {
1242 .start = input_devices_seq_start,
1243 .next = input_devices_seq_next,
1244 .stop = input_seq_stop,
1245 .show = input_devices_seq_show,
1246 };
1247
input_proc_devices_open(struct inode * inode,struct file * file)1248 static int input_proc_devices_open(struct inode *inode, struct file *file)
1249 {
1250 return seq_open(file, &input_devices_seq_ops);
1251 }
1252
1253 static const struct proc_ops input_devices_proc_ops = {
1254 .proc_open = input_proc_devices_open,
1255 .proc_poll = input_proc_devices_poll,
1256 .proc_read = seq_read,
1257 .proc_lseek = seq_lseek,
1258 .proc_release = seq_release,
1259 };
1260
input_handlers_seq_start(struct seq_file * seq,loff_t * pos)1261 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1262 {
1263 union input_seq_state *state = (union input_seq_state *)&seq->private;
1264 int error;
1265
1266 /* We need to fit into seq->private pointer */
1267 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1268
1269 error = mutex_lock_interruptible(&input_mutex);
1270 if (error) {
1271 state->mutex_acquired = false;
1272 return ERR_PTR(error);
1273 }
1274
1275 state->mutex_acquired = true;
1276 state->pos = *pos;
1277
1278 return seq_list_start(&input_handler_list, *pos);
1279 }
1280
input_handlers_seq_next(struct seq_file * seq,void * v,loff_t * pos)1281 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1282 {
1283 union input_seq_state *state = (union input_seq_state *)&seq->private;
1284
1285 state->pos = *pos + 1;
1286 return seq_list_next(v, &input_handler_list, pos);
1287 }
1288
input_handlers_seq_show(struct seq_file * seq,void * v)1289 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1290 {
1291 struct input_handler *handler = container_of(v, struct input_handler, node);
1292 union input_seq_state *state = (union input_seq_state *)&seq->private;
1293
1294 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1295 if (handler->filter)
1296 seq_puts(seq, " (filter)");
1297 if (handler->legacy_minors)
1298 seq_printf(seq, " Minor=%d", handler->minor);
1299 seq_putc(seq, '\n');
1300
1301 return 0;
1302 }
1303
1304 static const struct seq_operations input_handlers_seq_ops = {
1305 .start = input_handlers_seq_start,
1306 .next = input_handlers_seq_next,
1307 .stop = input_seq_stop,
1308 .show = input_handlers_seq_show,
1309 };
1310
input_proc_handlers_open(struct inode * inode,struct file * file)1311 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1312 {
1313 return seq_open(file, &input_handlers_seq_ops);
1314 }
1315
1316 static const struct proc_ops input_handlers_proc_ops = {
1317 .proc_open = input_proc_handlers_open,
1318 .proc_read = seq_read,
1319 .proc_lseek = seq_lseek,
1320 .proc_release = seq_release,
1321 };
1322
input_proc_init(void)1323 static int __init input_proc_init(void)
1324 {
1325 struct proc_dir_entry *entry;
1326
1327 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1328 if (!proc_bus_input_dir)
1329 return -ENOMEM;
1330
1331 entry = proc_create("devices", 0, proc_bus_input_dir,
1332 &input_devices_proc_ops);
1333 if (!entry)
1334 goto fail1;
1335
1336 entry = proc_create("handlers", 0, proc_bus_input_dir,
1337 &input_handlers_proc_ops);
1338 if (!entry)
1339 goto fail2;
1340
1341 return 0;
1342
1343 fail2: remove_proc_entry("devices", proc_bus_input_dir);
1344 fail1: remove_proc_entry("bus/input", NULL);
1345 return -ENOMEM;
1346 }
1347
input_proc_exit(void)1348 static void input_proc_exit(void)
1349 {
1350 remove_proc_entry("devices", proc_bus_input_dir);
1351 remove_proc_entry("handlers", proc_bus_input_dir);
1352 remove_proc_entry("bus/input", NULL);
1353 }
1354
1355 #else /* !CONFIG_PROC_FS */
input_wakeup_procfs_readers(void)1356 static inline void input_wakeup_procfs_readers(void) { }
input_proc_init(void)1357 static inline int input_proc_init(void) { return 0; }
input_proc_exit(void)1358 static inline void input_proc_exit(void) { }
1359 #endif
1360
1361 #define INPUT_DEV_STRING_ATTR_SHOW(name) \
1362 static ssize_t input_dev_show_##name(struct device *dev, \
1363 struct device_attribute *attr, \
1364 char *buf) \
1365 { \
1366 struct input_dev *input_dev = to_input_dev(dev); \
1367 \
1368 return scnprintf(buf, PAGE_SIZE, "%s\n", \
1369 input_dev->name ? input_dev->name : ""); \
1370 } \
1371 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1372
1373 INPUT_DEV_STRING_ATTR_SHOW(name);
1374 INPUT_DEV_STRING_ATTR_SHOW(phys);
1375 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1376
input_print_modalias_bits(char * buf,int size,char name,const unsigned long * bm,unsigned int min_bit,unsigned int max_bit)1377 static int input_print_modalias_bits(char *buf, int size,
1378 char name, const unsigned long *bm,
1379 unsigned int min_bit, unsigned int max_bit)
1380 {
1381 int bit = min_bit;
1382 int len = 0;
1383
1384 len += snprintf(buf, max(size, 0), "%c", name);
1385 for_each_set_bit_from(bit, bm, max_bit)
1386 len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
1387 return len;
1388 }
1389
input_print_modalias_parts(char * buf,int size,int full_len,const struct input_dev * id)1390 static int input_print_modalias_parts(char *buf, int size, int full_len,
1391 const struct input_dev *id)
1392 {
1393 int len, klen, remainder, space;
1394
1395 len = snprintf(buf, max(size, 0),
1396 "input:b%04Xv%04Xp%04Xe%04X-",
1397 id->id.bustype, id->id.vendor,
1398 id->id.product, id->id.version);
1399
1400 len += input_print_modalias_bits(buf + len, size - len,
1401 'e', id->evbit, 0, EV_MAX);
1402
1403 /*
1404 * Calculate the remaining space in the buffer making sure we
1405 * have place for the terminating 0.
1406 */
1407 space = max(size - (len + 1), 0);
1408
1409 klen = input_print_modalias_bits(buf + len, size - len,
1410 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1411 len += klen;
1412
1413 /*
1414 * If we have more data than we can fit in the buffer, check
1415 * if we can trim key data to fit in the rest. We will indicate
1416 * that key data is incomplete by adding "+" sign at the end, like
1417 * this: * "k1,2,3,45,+,".
1418 *
1419 * Note that we shortest key info (if present) is "k+," so we
1420 * can only try to trim if key data is longer than that.
1421 */
1422 if (full_len && size < full_len + 1 && klen > 3) {
1423 remainder = full_len - len;
1424 /*
1425 * We can only trim if we have space for the remainder
1426 * and also for at least "k+," which is 3 more characters.
1427 */
1428 if (remainder <= space - 3) {
1429 /*
1430 * We are guaranteed to have 'k' in the buffer, so
1431 * we need at least 3 additional bytes for storing
1432 * "+," in addition to the remainder.
1433 */
1434 for (int i = size - 1 - remainder - 3; i >= 0; i--) {
1435 if (buf[i] == 'k' || buf[i] == ',') {
1436 strcpy(buf + i + 1, "+,");
1437 len = i + 3; /* Not counting '\0' */
1438 break;
1439 }
1440 }
1441 }
1442 }
1443
1444 len += input_print_modalias_bits(buf + len, size - len,
1445 'r', id->relbit, 0, REL_MAX);
1446 len += input_print_modalias_bits(buf + len, size - len,
1447 'a', id->absbit, 0, ABS_MAX);
1448 len += input_print_modalias_bits(buf + len, size - len,
1449 'm', id->mscbit, 0, MSC_MAX);
1450 len += input_print_modalias_bits(buf + len, size - len,
1451 'l', id->ledbit, 0, LED_MAX);
1452 len += input_print_modalias_bits(buf + len, size - len,
1453 's', id->sndbit, 0, SND_MAX);
1454 len += input_print_modalias_bits(buf + len, size - len,
1455 'f', id->ffbit, 0, FF_MAX);
1456 len += input_print_modalias_bits(buf + len, size - len,
1457 'w', id->swbit, 0, SW_MAX);
1458
1459 return len;
1460 }
1461
input_print_modalias(char * buf,int size,const struct input_dev * id)1462 static int input_print_modalias(char *buf, int size, const struct input_dev *id)
1463 {
1464 int full_len;
1465
1466 /*
1467 * Printing is done in 2 passes: first one figures out total length
1468 * needed for the modalias string, second one will try to trim key
1469 * data in case when buffer is too small for the entire modalias.
1470 * If the buffer is too small regardless, it will fill as much as it
1471 * can (without trimming key data) into the buffer and leave it to
1472 * the caller to figure out what to do with the result.
1473 */
1474 full_len = input_print_modalias_parts(NULL, 0, 0, id);
1475 return input_print_modalias_parts(buf, size, full_len, id);
1476 }
1477
input_dev_show_modalias(struct device * dev,struct device_attribute * attr,char * buf)1478 static ssize_t input_dev_show_modalias(struct device *dev,
1479 struct device_attribute *attr,
1480 char *buf)
1481 {
1482 struct input_dev *id = to_input_dev(dev);
1483 ssize_t len;
1484
1485 len = input_print_modalias(buf, PAGE_SIZE, id);
1486 if (len < PAGE_SIZE - 2)
1487 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1488
1489 return min_t(int, len, PAGE_SIZE);
1490 }
1491 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1492
1493 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
1494 int max, int add_cr);
1495
input_dev_show_properties(struct device * dev,struct device_attribute * attr,char * buf)1496 static ssize_t input_dev_show_properties(struct device *dev,
1497 struct device_attribute *attr,
1498 char *buf)
1499 {
1500 struct input_dev *input_dev = to_input_dev(dev);
1501 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1502 INPUT_PROP_MAX, true);
1503 return min_t(int, len, PAGE_SIZE);
1504 }
1505 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1506
1507 static int input_inhibit_device(struct input_dev *dev);
1508 static int input_uninhibit_device(struct input_dev *dev);
1509
inhibited_show(struct device * dev,struct device_attribute * attr,char * buf)1510 static ssize_t inhibited_show(struct device *dev,
1511 struct device_attribute *attr,
1512 char *buf)
1513 {
1514 struct input_dev *input_dev = to_input_dev(dev);
1515
1516 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
1517 }
1518
inhibited_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1519 static ssize_t inhibited_store(struct device *dev,
1520 struct device_attribute *attr, const char *buf,
1521 size_t len)
1522 {
1523 struct input_dev *input_dev = to_input_dev(dev);
1524 ssize_t rv;
1525 bool inhibited;
1526
1527 if (kstrtobool(buf, &inhibited))
1528 return -EINVAL;
1529
1530 if (inhibited)
1531 rv = input_inhibit_device(input_dev);
1532 else
1533 rv = input_uninhibit_device(input_dev);
1534
1535 if (rv != 0)
1536 return rv;
1537
1538 return len;
1539 }
1540
1541 static DEVICE_ATTR_RW(inhibited);
1542
1543 static struct attribute *input_dev_attrs[] = {
1544 &dev_attr_name.attr,
1545 &dev_attr_phys.attr,
1546 &dev_attr_uniq.attr,
1547 &dev_attr_modalias.attr,
1548 &dev_attr_properties.attr,
1549 &dev_attr_inhibited.attr,
1550 NULL
1551 };
1552
1553 static const struct attribute_group input_dev_attr_group = {
1554 .attrs = input_dev_attrs,
1555 };
1556
1557 #define INPUT_DEV_ID_ATTR(name) \
1558 static ssize_t input_dev_show_id_##name(struct device *dev, \
1559 struct device_attribute *attr, \
1560 char *buf) \
1561 { \
1562 struct input_dev *input_dev = to_input_dev(dev); \
1563 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
1564 } \
1565 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1566
1567 INPUT_DEV_ID_ATTR(bustype);
1568 INPUT_DEV_ID_ATTR(vendor);
1569 INPUT_DEV_ID_ATTR(product);
1570 INPUT_DEV_ID_ATTR(version);
1571
1572 static struct attribute *input_dev_id_attrs[] = {
1573 &dev_attr_bustype.attr,
1574 &dev_attr_vendor.attr,
1575 &dev_attr_product.attr,
1576 &dev_attr_version.attr,
1577 NULL
1578 };
1579
1580 static const struct attribute_group input_dev_id_attr_group = {
1581 .name = "id",
1582 .attrs = input_dev_id_attrs,
1583 };
1584
input_print_bitmap(char * buf,int buf_size,const unsigned long * bitmap,int max,int add_cr)1585 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
1586 int max, int add_cr)
1587 {
1588 int i;
1589 int len = 0;
1590 bool skip_empty = true;
1591
1592 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1593 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1594 bitmap[i], skip_empty);
1595 if (len) {
1596 skip_empty = false;
1597 if (i > 0)
1598 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1599 }
1600 }
1601
1602 /*
1603 * If no output was produced print a single 0.
1604 */
1605 if (len == 0)
1606 len = snprintf(buf, buf_size, "%d", 0);
1607
1608 if (add_cr)
1609 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1610
1611 return len;
1612 }
1613
1614 #define INPUT_DEV_CAP_ATTR(ev, bm) \
1615 static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1616 struct device_attribute *attr, \
1617 char *buf) \
1618 { \
1619 struct input_dev *input_dev = to_input_dev(dev); \
1620 int len = input_print_bitmap(buf, PAGE_SIZE, \
1621 input_dev->bm##bit, ev##_MAX, \
1622 true); \
1623 return min_t(int, len, PAGE_SIZE); \
1624 } \
1625 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1626
1627 INPUT_DEV_CAP_ATTR(EV, ev);
1628 INPUT_DEV_CAP_ATTR(KEY, key);
1629 INPUT_DEV_CAP_ATTR(REL, rel);
1630 INPUT_DEV_CAP_ATTR(ABS, abs);
1631 INPUT_DEV_CAP_ATTR(MSC, msc);
1632 INPUT_DEV_CAP_ATTR(LED, led);
1633 INPUT_DEV_CAP_ATTR(SND, snd);
1634 INPUT_DEV_CAP_ATTR(FF, ff);
1635 INPUT_DEV_CAP_ATTR(SW, sw);
1636
1637 static struct attribute *input_dev_caps_attrs[] = {
1638 &dev_attr_ev.attr,
1639 &dev_attr_key.attr,
1640 &dev_attr_rel.attr,
1641 &dev_attr_abs.attr,
1642 &dev_attr_msc.attr,
1643 &dev_attr_led.attr,
1644 &dev_attr_snd.attr,
1645 &dev_attr_ff.attr,
1646 &dev_attr_sw.attr,
1647 NULL
1648 };
1649
1650 static const struct attribute_group input_dev_caps_attr_group = {
1651 .name = "capabilities",
1652 .attrs = input_dev_caps_attrs,
1653 };
1654
1655 static const struct attribute_group *input_dev_attr_groups[] = {
1656 &input_dev_attr_group,
1657 &input_dev_id_attr_group,
1658 &input_dev_caps_attr_group,
1659 &input_poller_attribute_group,
1660 NULL
1661 };
1662
input_dev_release(struct device * device)1663 static void input_dev_release(struct device *device)
1664 {
1665 struct input_dev *dev = to_input_dev(device);
1666
1667 input_ff_destroy(dev);
1668 input_mt_destroy_slots(dev);
1669 kfree(dev->poller);
1670 kfree(dev->absinfo);
1671 kfree(dev->vals);
1672 kfree(dev);
1673
1674 module_put(THIS_MODULE);
1675 }
1676
1677 /*
1678 * Input uevent interface - loading event handlers based on
1679 * device bitfields.
1680 */
input_add_uevent_bm_var(struct kobj_uevent_env * env,const char * name,const unsigned long * bitmap,int max)1681 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1682 const char *name, const unsigned long *bitmap, int max)
1683 {
1684 int len;
1685
1686 if (add_uevent_var(env, "%s", name))
1687 return -ENOMEM;
1688
1689 len = input_print_bitmap(&env->buf[env->buflen - 1],
1690 sizeof(env->buf) - env->buflen,
1691 bitmap, max, false);
1692 if (len >= (sizeof(env->buf) - env->buflen))
1693 return -ENOMEM;
1694
1695 env->buflen += len;
1696 return 0;
1697 }
1698
1699 /*
1700 * This is a pretty gross hack. When building uevent data the driver core
1701 * may try adding more environment variables to kobj_uevent_env without
1702 * telling us, so we have no idea how much of the buffer we can use to
1703 * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
1704 * reduce amount of memory we will use for the modalias environment variable.
1705 *
1706 * The potential additions are:
1707 *
1708 * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
1709 * HOME=/ (6 bytes)
1710 * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
1711 *
1712 * 68 bytes total. Allow extra buffer - 96 bytes
1713 */
1714 #define UEVENT_ENV_EXTRA_LEN 96
1715
input_add_uevent_modalias_var(struct kobj_uevent_env * env,const struct input_dev * dev)1716 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1717 const struct input_dev *dev)
1718 {
1719 int len;
1720
1721 if (add_uevent_var(env, "MODALIAS="))
1722 return -ENOMEM;
1723
1724 len = input_print_modalias(&env->buf[env->buflen - 1],
1725 (int)sizeof(env->buf) - env->buflen -
1726 UEVENT_ENV_EXTRA_LEN,
1727 dev);
1728 if (len >= ((int)sizeof(env->buf) - env->buflen -
1729 UEVENT_ENV_EXTRA_LEN))
1730 return -ENOMEM;
1731
1732 env->buflen += len;
1733 return 0;
1734 }
1735
1736 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
1737 do { \
1738 int err = add_uevent_var(env, fmt, val); \
1739 if (err) \
1740 return err; \
1741 } while (0)
1742
1743 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
1744 do { \
1745 int err = input_add_uevent_bm_var(env, name, bm, max); \
1746 if (err) \
1747 return err; \
1748 } while (0)
1749
1750 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
1751 do { \
1752 int err = input_add_uevent_modalias_var(env, dev); \
1753 if (err) \
1754 return err; \
1755 } while (0)
1756
input_dev_uevent(const struct device * device,struct kobj_uevent_env * env)1757 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
1758 {
1759 const struct input_dev *dev = to_input_dev(device);
1760
1761 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1762 dev->id.bustype, dev->id.vendor,
1763 dev->id.product, dev->id.version);
1764 if (dev->name)
1765 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1766 if (dev->phys)
1767 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1768 if (dev->uniq)
1769 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1770
1771 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1772
1773 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1774 if (test_bit(EV_KEY, dev->evbit))
1775 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1776 if (test_bit(EV_REL, dev->evbit))
1777 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1778 if (test_bit(EV_ABS, dev->evbit))
1779 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1780 if (test_bit(EV_MSC, dev->evbit))
1781 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1782 if (test_bit(EV_LED, dev->evbit))
1783 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1784 if (test_bit(EV_SND, dev->evbit))
1785 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1786 if (test_bit(EV_FF, dev->evbit))
1787 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1788 if (test_bit(EV_SW, dev->evbit))
1789 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1790
1791 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1792
1793 return 0;
1794 }
1795
1796 #define INPUT_DO_TOGGLE(dev, type, bits, on) \
1797 do { \
1798 int i; \
1799 bool active; \
1800 \
1801 if (!test_bit(EV_##type, dev->evbit)) \
1802 break; \
1803 \
1804 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
1805 active = test_bit(i, dev->bits); \
1806 if (!active && !on) \
1807 continue; \
1808 \
1809 dev->event(dev, EV_##type, i, on ? active : 0); \
1810 } \
1811 } while (0)
1812
input_dev_toggle(struct input_dev * dev,bool activate)1813 static void input_dev_toggle(struct input_dev *dev, bool activate)
1814 {
1815 if (!dev->event)
1816 return;
1817
1818 INPUT_DO_TOGGLE(dev, LED, led, activate);
1819 INPUT_DO_TOGGLE(dev, SND, snd, activate);
1820
1821 if (activate && test_bit(EV_REP, dev->evbit)) {
1822 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1823 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1824 }
1825 }
1826
1827 /**
1828 * input_reset_device() - reset/restore the state of input device
1829 * @dev: input device whose state needs to be reset
1830 *
1831 * This function tries to reset the state of an opened input device and
1832 * bring internal state and state if the hardware in sync with each other.
1833 * We mark all keys as released, restore LED state, repeat rate, etc.
1834 */
input_reset_device(struct input_dev * dev)1835 void input_reset_device(struct input_dev *dev)
1836 {
1837 unsigned long flags;
1838
1839 mutex_lock(&dev->mutex);
1840 spin_lock_irqsave(&dev->event_lock, flags);
1841
1842 input_dev_toggle(dev, true);
1843 if (input_dev_release_keys(dev))
1844 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1845
1846 spin_unlock_irqrestore(&dev->event_lock, flags);
1847 mutex_unlock(&dev->mutex);
1848 }
1849 EXPORT_SYMBOL(input_reset_device);
1850
input_inhibit_device(struct input_dev * dev)1851 static int input_inhibit_device(struct input_dev *dev)
1852 {
1853 mutex_lock(&dev->mutex);
1854
1855 if (dev->inhibited)
1856 goto out;
1857
1858 if (dev->users) {
1859 if (dev->close)
1860 dev->close(dev);
1861 if (dev->poller)
1862 input_dev_poller_stop(dev->poller);
1863 }
1864
1865 spin_lock_irq(&dev->event_lock);
1866 input_mt_release_slots(dev);
1867 input_dev_release_keys(dev);
1868 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1869 input_dev_toggle(dev, false);
1870 spin_unlock_irq(&dev->event_lock);
1871
1872 dev->inhibited = true;
1873
1874 out:
1875 mutex_unlock(&dev->mutex);
1876 return 0;
1877 }
1878
input_uninhibit_device(struct input_dev * dev)1879 static int input_uninhibit_device(struct input_dev *dev)
1880 {
1881 int ret = 0;
1882
1883 mutex_lock(&dev->mutex);
1884
1885 if (!dev->inhibited)
1886 goto out;
1887
1888 if (dev->users) {
1889 if (dev->open) {
1890 ret = dev->open(dev);
1891 if (ret)
1892 goto out;
1893 }
1894 if (dev->poller)
1895 input_dev_poller_start(dev->poller);
1896 }
1897
1898 dev->inhibited = false;
1899 spin_lock_irq(&dev->event_lock);
1900 input_dev_toggle(dev, true);
1901 spin_unlock_irq(&dev->event_lock);
1902
1903 out:
1904 mutex_unlock(&dev->mutex);
1905 return ret;
1906 }
1907
input_dev_suspend(struct device * dev)1908 static int input_dev_suspend(struct device *dev)
1909 {
1910 struct input_dev *input_dev = to_input_dev(dev);
1911
1912 spin_lock_irq(&input_dev->event_lock);
1913
1914 /*
1915 * Keys that are pressed now are unlikely to be
1916 * still pressed when we resume.
1917 */
1918 if (input_dev_release_keys(input_dev))
1919 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1920
1921 /* Turn off LEDs and sounds, if any are active. */
1922 input_dev_toggle(input_dev, false);
1923
1924 spin_unlock_irq(&input_dev->event_lock);
1925
1926 return 0;
1927 }
1928
input_dev_resume(struct device * dev)1929 static int input_dev_resume(struct device *dev)
1930 {
1931 struct input_dev *input_dev = to_input_dev(dev);
1932
1933 spin_lock_irq(&input_dev->event_lock);
1934
1935 /* Restore state of LEDs and sounds, if any were active. */
1936 input_dev_toggle(input_dev, true);
1937
1938 spin_unlock_irq(&input_dev->event_lock);
1939
1940 return 0;
1941 }
1942
input_dev_freeze(struct device * dev)1943 static int input_dev_freeze(struct device *dev)
1944 {
1945 struct input_dev *input_dev = to_input_dev(dev);
1946
1947 spin_lock_irq(&input_dev->event_lock);
1948
1949 /*
1950 * Keys that are pressed now are unlikely to be
1951 * still pressed when we resume.
1952 */
1953 if (input_dev_release_keys(input_dev))
1954 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1955
1956 spin_unlock_irq(&input_dev->event_lock);
1957
1958 return 0;
1959 }
1960
input_dev_poweroff(struct device * dev)1961 static int input_dev_poweroff(struct device *dev)
1962 {
1963 struct input_dev *input_dev = to_input_dev(dev);
1964
1965 spin_lock_irq(&input_dev->event_lock);
1966
1967 /* Turn off LEDs and sounds, if any are active. */
1968 input_dev_toggle(input_dev, false);
1969
1970 spin_unlock_irq(&input_dev->event_lock);
1971
1972 return 0;
1973 }
1974
1975 static const struct dev_pm_ops input_dev_pm_ops = {
1976 .suspend = input_dev_suspend,
1977 .resume = input_dev_resume,
1978 .freeze = input_dev_freeze,
1979 .poweroff = input_dev_poweroff,
1980 .restore = input_dev_resume,
1981 };
1982
1983 static const struct device_type input_dev_type = {
1984 .groups = input_dev_attr_groups,
1985 .release = input_dev_release,
1986 .uevent = input_dev_uevent,
1987 .pm = pm_sleep_ptr(&input_dev_pm_ops),
1988 };
1989
input_devnode(const struct device * dev,umode_t * mode)1990 static char *input_devnode(const struct device *dev, umode_t *mode)
1991 {
1992 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1993 }
1994
1995 struct class input_class = {
1996 .name = "input",
1997 .devnode = input_devnode,
1998 };
1999 EXPORT_SYMBOL_GPL(input_class);
2000
2001 /**
2002 * input_allocate_device - allocate memory for new input device
2003 *
2004 * Returns prepared struct input_dev or %NULL.
2005 *
2006 * NOTE: Use input_free_device() to free devices that have not been
2007 * registered; input_unregister_device() should be used for already
2008 * registered devices.
2009 */
input_allocate_device(void)2010 struct input_dev *input_allocate_device(void)
2011 {
2012 static atomic_t input_no = ATOMIC_INIT(-1);
2013 struct input_dev *dev;
2014
2015 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2016 if (dev) {
2017 dev->dev.type = &input_dev_type;
2018 dev->dev.class = &input_class;
2019 device_initialize(&dev->dev);
2020 mutex_init(&dev->mutex);
2021 spin_lock_init(&dev->event_lock);
2022 timer_setup(&dev->timer, NULL, 0);
2023 INIT_LIST_HEAD(&dev->h_list);
2024 INIT_LIST_HEAD(&dev->node);
2025
2026 dev_set_name(&dev->dev, "input%lu",
2027 (unsigned long)atomic_inc_return(&input_no));
2028
2029 __module_get(THIS_MODULE);
2030 }
2031
2032 return dev;
2033 }
2034 EXPORT_SYMBOL(input_allocate_device);
2035
2036 struct input_devres {
2037 struct input_dev *input;
2038 };
2039
devm_input_device_match(struct device * dev,void * res,void * data)2040 static int devm_input_device_match(struct device *dev, void *res, void *data)
2041 {
2042 struct input_devres *devres = res;
2043
2044 return devres->input == data;
2045 }
2046
devm_input_device_release(struct device * dev,void * res)2047 static void devm_input_device_release(struct device *dev, void *res)
2048 {
2049 struct input_devres *devres = res;
2050 struct input_dev *input = devres->input;
2051
2052 dev_dbg(dev, "%s: dropping reference to %s\n",
2053 __func__, dev_name(&input->dev));
2054 input_put_device(input);
2055 }
2056
2057 /**
2058 * devm_input_allocate_device - allocate managed input device
2059 * @dev: device owning the input device being created
2060 *
2061 * Returns prepared struct input_dev or %NULL.
2062 *
2063 * Managed input devices do not need to be explicitly unregistered or
2064 * freed as it will be done automatically when owner device unbinds from
2065 * its driver (or binding fails). Once managed input device is allocated,
2066 * it is ready to be set up and registered in the same fashion as regular
2067 * input device. There are no special devm_input_device_[un]register()
2068 * variants, regular ones work with both managed and unmanaged devices,
2069 * should you need them. In most cases however, managed input device need
2070 * not be explicitly unregistered or freed.
2071 *
2072 * NOTE: the owner device is set up as parent of input device and users
2073 * should not override it.
2074 */
devm_input_allocate_device(struct device * dev)2075 struct input_dev *devm_input_allocate_device(struct device *dev)
2076 {
2077 struct input_dev *input;
2078 struct input_devres *devres;
2079
2080 devres = devres_alloc(devm_input_device_release,
2081 sizeof(*devres), GFP_KERNEL);
2082 if (!devres)
2083 return NULL;
2084
2085 input = input_allocate_device();
2086 if (!input) {
2087 devres_free(devres);
2088 return NULL;
2089 }
2090
2091 input->dev.parent = dev;
2092 input->devres_managed = true;
2093
2094 devres->input = input;
2095 devres_add(dev, devres);
2096
2097 return input;
2098 }
2099 EXPORT_SYMBOL(devm_input_allocate_device);
2100
2101 /**
2102 * input_free_device - free memory occupied by input_dev structure
2103 * @dev: input device to free
2104 *
2105 * This function should only be used if input_register_device()
2106 * was not called yet or if it failed. Once device was registered
2107 * use input_unregister_device() and memory will be freed once last
2108 * reference to the device is dropped.
2109 *
2110 * Device should be allocated by input_allocate_device().
2111 *
2112 * NOTE: If there are references to the input device then memory
2113 * will not be freed until last reference is dropped.
2114 */
input_free_device(struct input_dev * dev)2115 void input_free_device(struct input_dev *dev)
2116 {
2117 if (dev) {
2118 if (dev->devres_managed)
2119 WARN_ON(devres_destroy(dev->dev.parent,
2120 devm_input_device_release,
2121 devm_input_device_match,
2122 dev));
2123 input_put_device(dev);
2124 }
2125 }
2126 EXPORT_SYMBOL(input_free_device);
2127
2128 /**
2129 * input_set_timestamp - set timestamp for input events
2130 * @dev: input device to set timestamp for
2131 * @timestamp: the time at which the event has occurred
2132 * in CLOCK_MONOTONIC
2133 *
2134 * This function is intended to provide to the input system a more
2135 * accurate time of when an event actually occurred. The driver should
2136 * call this function as soon as a timestamp is acquired ensuring
2137 * clock conversions in input_set_timestamp are done correctly.
2138 *
2139 * The system entering suspend state between timestamp acquisition and
2140 * calling input_set_timestamp can result in inaccurate conversions.
2141 */
input_set_timestamp(struct input_dev * dev,ktime_t timestamp)2142 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
2143 {
2144 dev->timestamp[INPUT_CLK_MONO] = timestamp;
2145 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
2146 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
2147 TK_OFFS_BOOT);
2148 }
2149 EXPORT_SYMBOL(input_set_timestamp);
2150
2151 /**
2152 * input_get_timestamp - get timestamp for input events
2153 * @dev: input device to get timestamp from
2154 *
2155 * A valid timestamp is a timestamp of non-zero value.
2156 */
input_get_timestamp(struct input_dev * dev)2157 ktime_t *input_get_timestamp(struct input_dev *dev)
2158 {
2159 const ktime_t invalid_timestamp = ktime_set(0, 0);
2160
2161 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
2162 input_set_timestamp(dev, ktime_get());
2163
2164 return dev->timestamp;
2165 }
2166 EXPORT_SYMBOL(input_get_timestamp);
2167
2168 /**
2169 * input_set_capability - mark device as capable of a certain event
2170 * @dev: device that is capable of emitting or accepting event
2171 * @type: type of the event (EV_KEY, EV_REL, etc...)
2172 * @code: event code
2173 *
2174 * In addition to setting up corresponding bit in appropriate capability
2175 * bitmap the function also adjusts dev->evbit.
2176 */
input_set_capability(struct input_dev * dev,unsigned int type,unsigned int code)2177 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
2178 {
2179 if (type < EV_CNT && input_max_code[type] &&
2180 code > input_max_code[type]) {
2181 pr_err("%s: invalid code %u for type %u\n", __func__, code,
2182 type);
2183 dump_stack();
2184 return;
2185 }
2186
2187 switch (type) {
2188 case EV_KEY:
2189 __set_bit(code, dev->keybit);
2190 break;
2191
2192 case EV_REL:
2193 __set_bit(code, dev->relbit);
2194 break;
2195
2196 case EV_ABS:
2197 input_alloc_absinfo(dev);
2198 __set_bit(code, dev->absbit);
2199 break;
2200
2201 case EV_MSC:
2202 __set_bit(code, dev->mscbit);
2203 break;
2204
2205 case EV_SW:
2206 __set_bit(code, dev->swbit);
2207 break;
2208
2209 case EV_LED:
2210 __set_bit(code, dev->ledbit);
2211 break;
2212
2213 case EV_SND:
2214 __set_bit(code, dev->sndbit);
2215 break;
2216
2217 case EV_FF:
2218 __set_bit(code, dev->ffbit);
2219 break;
2220
2221 case EV_PWR:
2222 /* do nothing */
2223 break;
2224
2225 default:
2226 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
2227 dump_stack();
2228 return;
2229 }
2230
2231 __set_bit(type, dev->evbit);
2232 }
2233 EXPORT_SYMBOL(input_set_capability);
2234
input_estimate_events_per_packet(struct input_dev * dev)2235 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
2236 {
2237 int mt_slots;
2238 int i;
2239 unsigned int events;
2240
2241 if (dev->mt) {
2242 mt_slots = dev->mt->num_slots;
2243 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
2244 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
2245 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
2246 mt_slots = clamp(mt_slots, 2, 32);
2247 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
2248 mt_slots = 2;
2249 } else {
2250 mt_slots = 0;
2251 }
2252
2253 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
2254
2255 if (test_bit(EV_ABS, dev->evbit))
2256 for_each_set_bit(i, dev->absbit, ABS_CNT)
2257 events += input_is_mt_axis(i) ? mt_slots : 1;
2258
2259 if (test_bit(EV_REL, dev->evbit))
2260 events += bitmap_weight(dev->relbit, REL_CNT);
2261
2262 /* Make room for KEY and MSC events */
2263 events += 7;
2264
2265 return events;
2266 }
2267
2268 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \
2269 do { \
2270 if (!test_bit(EV_##type, dev->evbit)) \
2271 memset(dev->bits##bit, 0, \
2272 sizeof(dev->bits##bit)); \
2273 } while (0)
2274
input_cleanse_bitmasks(struct input_dev * dev)2275 static void input_cleanse_bitmasks(struct input_dev *dev)
2276 {
2277 INPUT_CLEANSE_BITMASK(dev, KEY, key);
2278 INPUT_CLEANSE_BITMASK(dev, REL, rel);
2279 INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2280 INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2281 INPUT_CLEANSE_BITMASK(dev, LED, led);
2282 INPUT_CLEANSE_BITMASK(dev, SND, snd);
2283 INPUT_CLEANSE_BITMASK(dev, FF, ff);
2284 INPUT_CLEANSE_BITMASK(dev, SW, sw);
2285 }
2286
__input_unregister_device(struct input_dev * dev)2287 static void __input_unregister_device(struct input_dev *dev)
2288 {
2289 struct input_handle *handle, *next;
2290
2291 input_disconnect_device(dev);
2292
2293 mutex_lock(&input_mutex);
2294
2295 list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2296 handle->handler->disconnect(handle);
2297 WARN_ON(!list_empty(&dev->h_list));
2298
2299 del_timer_sync(&dev->timer);
2300 list_del_init(&dev->node);
2301
2302 input_wakeup_procfs_readers();
2303
2304 mutex_unlock(&input_mutex);
2305
2306 device_del(&dev->dev);
2307 }
2308
devm_input_device_unregister(struct device * dev,void * res)2309 static void devm_input_device_unregister(struct device *dev, void *res)
2310 {
2311 struct input_devres *devres = res;
2312 struct input_dev *input = devres->input;
2313
2314 dev_dbg(dev, "%s: unregistering device %s\n",
2315 __func__, dev_name(&input->dev));
2316 __input_unregister_device(input);
2317 }
2318
2319 /*
2320 * Generate software autorepeat event. Note that we take
2321 * dev->event_lock here to avoid racing with input_event
2322 * which may cause keys get "stuck".
2323 */
input_repeat_key(struct timer_list * t)2324 static void input_repeat_key(struct timer_list *t)
2325 {
2326 struct input_dev *dev = from_timer(dev, t, timer);
2327 unsigned long flags;
2328
2329 spin_lock_irqsave(&dev->event_lock, flags);
2330
2331 if (!dev->inhibited &&
2332 test_bit(dev->repeat_key, dev->key) &&
2333 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2334
2335 input_set_timestamp(dev, ktime_get());
2336 input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
2337 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
2338
2339 if (dev->rep[REP_PERIOD])
2340 mod_timer(&dev->timer, jiffies +
2341 msecs_to_jiffies(dev->rep[REP_PERIOD]));
2342 }
2343
2344 spin_unlock_irqrestore(&dev->event_lock, flags);
2345 }
2346
2347 /**
2348 * input_enable_softrepeat - enable software autorepeat
2349 * @dev: input device
2350 * @delay: repeat delay
2351 * @period: repeat period
2352 *
2353 * Enable software autorepeat on the input device.
2354 */
input_enable_softrepeat(struct input_dev * dev,int delay,int period)2355 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2356 {
2357 dev->timer.function = input_repeat_key;
2358 dev->rep[REP_DELAY] = delay;
2359 dev->rep[REP_PERIOD] = period;
2360 }
2361 EXPORT_SYMBOL(input_enable_softrepeat);
2362
input_device_enabled(struct input_dev * dev)2363 bool input_device_enabled(struct input_dev *dev)
2364 {
2365 lockdep_assert_held(&dev->mutex);
2366
2367 return !dev->inhibited && dev->users > 0;
2368 }
2369 EXPORT_SYMBOL_GPL(input_device_enabled);
2370
2371 /**
2372 * input_register_device - register device with input core
2373 * @dev: device to be registered
2374 *
2375 * This function registers device with input core. The device must be
2376 * allocated with input_allocate_device() and all it's capabilities
2377 * set up before registering.
2378 * If function fails the device must be freed with input_free_device().
2379 * Once device has been successfully registered it can be unregistered
2380 * with input_unregister_device(); input_free_device() should not be
2381 * called in this case.
2382 *
2383 * Note that this function is also used to register managed input devices
2384 * (ones allocated with devm_input_allocate_device()). Such managed input
2385 * devices need not be explicitly unregistered or freed, their tear down
2386 * is controlled by the devres infrastructure. It is also worth noting
2387 * that tear down of managed input devices is internally a 2-step process:
2388 * registered managed input device is first unregistered, but stays in
2389 * memory and can still handle input_event() calls (although events will
2390 * not be delivered anywhere). The freeing of managed input device will
2391 * happen later, when devres stack is unwound to the point where device
2392 * allocation was made.
2393 */
input_register_device(struct input_dev * dev)2394 int input_register_device(struct input_dev *dev)
2395 {
2396 struct input_devres *devres = NULL;
2397 struct input_handler *handler;
2398 unsigned int packet_size;
2399 const char *path;
2400 int error;
2401
2402 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2403 dev_err(&dev->dev,
2404 "Absolute device without dev->absinfo, refusing to register\n");
2405 return -EINVAL;
2406 }
2407
2408 if (dev->devres_managed) {
2409 devres = devres_alloc(devm_input_device_unregister,
2410 sizeof(*devres), GFP_KERNEL);
2411 if (!devres)
2412 return -ENOMEM;
2413
2414 devres->input = dev;
2415 }
2416
2417 /* Every input device generates EV_SYN/SYN_REPORT events. */
2418 __set_bit(EV_SYN, dev->evbit);
2419
2420 /* KEY_RESERVED is not supposed to be transmitted to userspace. */
2421 __clear_bit(KEY_RESERVED, dev->keybit);
2422
2423 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2424 input_cleanse_bitmasks(dev);
2425
2426 packet_size = input_estimate_events_per_packet(dev);
2427 if (dev->hint_events_per_packet < packet_size)
2428 dev->hint_events_per_packet = packet_size;
2429
2430 dev->max_vals = dev->hint_events_per_packet + 2;
2431 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2432 if (!dev->vals) {
2433 error = -ENOMEM;
2434 goto err_devres_free;
2435 }
2436
2437 /*
2438 * If delay and period are pre-set by the driver, then autorepeating
2439 * is handled by the driver itself and we don't do it in input.c.
2440 */
2441 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2442 input_enable_softrepeat(dev, 250, 33);
2443
2444 if (!dev->getkeycode)
2445 dev->getkeycode = input_default_getkeycode;
2446
2447 if (!dev->setkeycode)
2448 dev->setkeycode = input_default_setkeycode;
2449
2450 if (dev->poller)
2451 input_dev_poller_finalize(dev->poller);
2452
2453 error = device_add(&dev->dev);
2454 if (error)
2455 goto err_free_vals;
2456
2457 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2458 pr_info("%s as %s\n",
2459 dev->name ? dev->name : "Unspecified device",
2460 path ? path : "N/A");
2461 kfree(path);
2462
2463 error = mutex_lock_interruptible(&input_mutex);
2464 if (error)
2465 goto err_device_del;
2466
2467 list_add_tail(&dev->node, &input_dev_list);
2468
2469 list_for_each_entry(handler, &input_handler_list, node)
2470 input_attach_handler(dev, handler);
2471
2472 input_wakeup_procfs_readers();
2473
2474 mutex_unlock(&input_mutex);
2475
2476 if (dev->devres_managed) {
2477 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2478 __func__, dev_name(&dev->dev));
2479 devres_add(dev->dev.parent, devres);
2480 }
2481 return 0;
2482
2483 err_device_del:
2484 device_del(&dev->dev);
2485 err_free_vals:
2486 kfree(dev->vals);
2487 dev->vals = NULL;
2488 err_devres_free:
2489 devres_free(devres);
2490 return error;
2491 }
2492 EXPORT_SYMBOL(input_register_device);
2493
2494 /**
2495 * input_unregister_device - unregister previously registered device
2496 * @dev: device to be unregistered
2497 *
2498 * This function unregisters an input device. Once device is unregistered
2499 * the caller should not try to access it as it may get freed at any moment.
2500 */
input_unregister_device(struct input_dev * dev)2501 void input_unregister_device(struct input_dev *dev)
2502 {
2503 if (dev->devres_managed) {
2504 WARN_ON(devres_destroy(dev->dev.parent,
2505 devm_input_device_unregister,
2506 devm_input_device_match,
2507 dev));
2508 __input_unregister_device(dev);
2509 /*
2510 * We do not do input_put_device() here because it will be done
2511 * when 2nd devres fires up.
2512 */
2513 } else {
2514 __input_unregister_device(dev);
2515 input_put_device(dev);
2516 }
2517 }
2518 EXPORT_SYMBOL(input_unregister_device);
2519
2520 /**
2521 * input_register_handler - register a new input handler
2522 * @handler: handler to be registered
2523 *
2524 * This function registers a new input handler (interface) for input
2525 * devices in the system and attaches it to all input devices that
2526 * are compatible with the handler.
2527 */
input_register_handler(struct input_handler * handler)2528 int input_register_handler(struct input_handler *handler)
2529 {
2530 struct input_dev *dev;
2531 int error;
2532
2533 error = mutex_lock_interruptible(&input_mutex);
2534 if (error)
2535 return error;
2536
2537 INIT_LIST_HEAD(&handler->h_list);
2538
2539 list_add_tail(&handler->node, &input_handler_list);
2540
2541 list_for_each_entry(dev, &input_dev_list, node)
2542 input_attach_handler(dev, handler);
2543
2544 input_wakeup_procfs_readers();
2545
2546 mutex_unlock(&input_mutex);
2547 return 0;
2548 }
2549 EXPORT_SYMBOL(input_register_handler);
2550
2551 /**
2552 * input_unregister_handler - unregisters an input handler
2553 * @handler: handler to be unregistered
2554 *
2555 * This function disconnects a handler from its input devices and
2556 * removes it from lists of known handlers.
2557 */
input_unregister_handler(struct input_handler * handler)2558 void input_unregister_handler(struct input_handler *handler)
2559 {
2560 struct input_handle *handle, *next;
2561
2562 mutex_lock(&input_mutex);
2563
2564 list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2565 handler->disconnect(handle);
2566 WARN_ON(!list_empty(&handler->h_list));
2567
2568 list_del_init(&handler->node);
2569
2570 input_wakeup_procfs_readers();
2571
2572 mutex_unlock(&input_mutex);
2573 }
2574 EXPORT_SYMBOL(input_unregister_handler);
2575
2576 /**
2577 * input_handler_for_each_handle - handle iterator
2578 * @handler: input handler to iterate
2579 * @data: data for the callback
2580 * @fn: function to be called for each handle
2581 *
2582 * Iterate over @bus's list of devices, and call @fn for each, passing
2583 * it @data and stop when @fn returns a non-zero value. The function is
2584 * using RCU to traverse the list and therefore may be using in atomic
2585 * contexts. The @fn callback is invoked from RCU critical section and
2586 * thus must not sleep.
2587 */
input_handler_for_each_handle(struct input_handler * handler,void * data,int (* fn)(struct input_handle *,void *))2588 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2589 int (*fn)(struct input_handle *, void *))
2590 {
2591 struct input_handle *handle;
2592 int retval = 0;
2593
2594 rcu_read_lock();
2595
2596 list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2597 retval = fn(handle, data);
2598 if (retval)
2599 break;
2600 }
2601
2602 rcu_read_unlock();
2603
2604 return retval;
2605 }
2606 EXPORT_SYMBOL(input_handler_for_each_handle);
2607
2608 /**
2609 * input_register_handle - register a new input handle
2610 * @handle: handle to register
2611 *
2612 * This function puts a new input handle onto device's
2613 * and handler's lists so that events can flow through
2614 * it once it is opened using input_open_device().
2615 *
2616 * This function is supposed to be called from handler's
2617 * connect() method.
2618 */
input_register_handle(struct input_handle * handle)2619 int input_register_handle(struct input_handle *handle)
2620 {
2621 struct input_handler *handler = handle->handler;
2622 struct input_dev *dev = handle->dev;
2623 int error;
2624
2625 /*
2626 * We take dev->mutex here to prevent race with
2627 * input_release_device().
2628 */
2629 error = mutex_lock_interruptible(&dev->mutex);
2630 if (error)
2631 return error;
2632
2633 /*
2634 * Filters go to the head of the list, normal handlers
2635 * to the tail.
2636 */
2637 if (handler->filter)
2638 list_add_rcu(&handle->d_node, &dev->h_list);
2639 else
2640 list_add_tail_rcu(&handle->d_node, &dev->h_list);
2641
2642 mutex_unlock(&dev->mutex);
2643
2644 /*
2645 * Since we are supposed to be called from ->connect()
2646 * which is mutually exclusive with ->disconnect()
2647 * we can't be racing with input_unregister_handle()
2648 * and so separate lock is not needed here.
2649 */
2650 list_add_tail_rcu(&handle->h_node, &handler->h_list);
2651
2652 if (handler->start)
2653 handler->start(handle);
2654
2655 return 0;
2656 }
2657 EXPORT_SYMBOL(input_register_handle);
2658
2659 /**
2660 * input_unregister_handle - unregister an input handle
2661 * @handle: handle to unregister
2662 *
2663 * This function removes input handle from device's
2664 * and handler's lists.
2665 *
2666 * This function is supposed to be called from handler's
2667 * disconnect() method.
2668 */
input_unregister_handle(struct input_handle * handle)2669 void input_unregister_handle(struct input_handle *handle)
2670 {
2671 struct input_dev *dev = handle->dev;
2672
2673 list_del_rcu(&handle->h_node);
2674
2675 /*
2676 * Take dev->mutex to prevent race with input_release_device().
2677 */
2678 mutex_lock(&dev->mutex);
2679 list_del_rcu(&handle->d_node);
2680 mutex_unlock(&dev->mutex);
2681
2682 synchronize_rcu();
2683 }
2684 EXPORT_SYMBOL(input_unregister_handle);
2685
2686 /**
2687 * input_get_new_minor - allocates a new input minor number
2688 * @legacy_base: beginning or the legacy range to be searched
2689 * @legacy_num: size of legacy range
2690 * @allow_dynamic: whether we can also take ID from the dynamic range
2691 *
2692 * This function allocates a new device minor for from input major namespace.
2693 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2694 * parameters and whether ID can be allocated from dynamic range if there are
2695 * no free IDs in legacy range.
2696 */
input_get_new_minor(int legacy_base,unsigned int legacy_num,bool allow_dynamic)2697 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2698 bool allow_dynamic)
2699 {
2700 /*
2701 * This function should be called from input handler's ->connect()
2702 * methods, which are serialized with input_mutex, so no additional
2703 * locking is needed here.
2704 */
2705 if (legacy_base >= 0) {
2706 int minor = ida_simple_get(&input_ida,
2707 legacy_base,
2708 legacy_base + legacy_num,
2709 GFP_KERNEL);
2710 if (minor >= 0 || !allow_dynamic)
2711 return minor;
2712 }
2713
2714 return ida_simple_get(&input_ida,
2715 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2716 GFP_KERNEL);
2717 }
2718 EXPORT_SYMBOL(input_get_new_minor);
2719
2720 /**
2721 * input_free_minor - release previously allocated minor
2722 * @minor: minor to be released
2723 *
2724 * This function releases previously allocated input minor so that it can be
2725 * reused later.
2726 */
input_free_minor(unsigned int minor)2727 void input_free_minor(unsigned int minor)
2728 {
2729 ida_simple_remove(&input_ida, minor);
2730 }
2731 EXPORT_SYMBOL(input_free_minor);
2732
input_init(void)2733 static int __init input_init(void)
2734 {
2735 int err;
2736
2737 err = class_register(&input_class);
2738 if (err) {
2739 pr_err("unable to register input_dev class\n");
2740 return err;
2741 }
2742
2743 err = input_proc_init();
2744 if (err)
2745 goto fail1;
2746
2747 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2748 INPUT_MAX_CHAR_DEVICES, "input");
2749 if (err) {
2750 pr_err("unable to register char major %d", INPUT_MAJOR);
2751 goto fail2;
2752 }
2753
2754 return 0;
2755
2756 fail2: input_proc_exit();
2757 fail1: class_unregister(&input_class);
2758 return err;
2759 }
2760
input_exit(void)2761 static void __exit input_exit(void)
2762 {
2763 input_proc_exit();
2764 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2765 INPUT_MAX_CHAR_DEVICES);
2766 class_unregister(&input_class);
2767 }
2768
2769 subsys_initcall(input_init);
2770 module_exit(input_exit);
2771