xref: /openbmc/linux/drivers/input/input.c (revision 3b73c45e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The input core
4  *
5  * Copyright (c) 1999-2002 Vojtech Pavlik
6  */
7 
8 
9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10 
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/idr.h>
14 #include <linux/input/mt.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/major.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/poll.h>
23 #include <linux/device.h>
24 #include <linux/mutex.h>
25 #include <linux/rcupdate.h>
26 #include "input-compat.h"
27 #include "input-core-private.h"
28 #include "input-poller.h"
29 
30 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
31 MODULE_DESCRIPTION("Input core");
32 MODULE_LICENSE("GPL");
33 
34 #define INPUT_MAX_CHAR_DEVICES		1024
35 #define INPUT_FIRST_DYNAMIC_DEV		256
36 static DEFINE_IDA(input_ida);
37 
38 static LIST_HEAD(input_dev_list);
39 static LIST_HEAD(input_handler_list);
40 
41 /*
42  * input_mutex protects access to both input_dev_list and input_handler_list.
43  * This also causes input_[un]register_device and input_[un]register_handler
44  * be mutually exclusive which simplifies locking in drivers implementing
45  * input handlers.
46  */
47 static DEFINE_MUTEX(input_mutex);
48 
49 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
50 
51 static const unsigned int input_max_code[EV_CNT] = {
52 	[EV_KEY] = KEY_MAX,
53 	[EV_REL] = REL_MAX,
54 	[EV_ABS] = ABS_MAX,
55 	[EV_MSC] = MSC_MAX,
56 	[EV_SW] = SW_MAX,
57 	[EV_LED] = LED_MAX,
58 	[EV_SND] = SND_MAX,
59 	[EV_FF] = FF_MAX,
60 };
61 
62 static inline int is_event_supported(unsigned int code,
63 				     unsigned long *bm, unsigned int max)
64 {
65 	return code <= max && test_bit(code, bm);
66 }
67 
68 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
69 {
70 	if (fuzz) {
71 		if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
72 			return old_val;
73 
74 		if (value > old_val - fuzz && value < old_val + fuzz)
75 			return (old_val * 3 + value) / 4;
76 
77 		if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
78 			return (old_val + value) / 2;
79 	}
80 
81 	return value;
82 }
83 
84 static void input_start_autorepeat(struct input_dev *dev, int code)
85 {
86 	if (test_bit(EV_REP, dev->evbit) &&
87 	    dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
88 	    dev->timer.function) {
89 		dev->repeat_key = code;
90 		mod_timer(&dev->timer,
91 			  jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
92 	}
93 }
94 
95 static void input_stop_autorepeat(struct input_dev *dev)
96 {
97 	del_timer(&dev->timer);
98 }
99 
100 /*
101  * Pass event first through all filters and then, if event has not been
102  * filtered out, through all open handles. This function is called with
103  * dev->event_lock held and interrupts disabled.
104  */
105 static unsigned int input_to_handler(struct input_handle *handle,
106 			struct input_value *vals, unsigned int count)
107 {
108 	struct input_handler *handler = handle->handler;
109 	struct input_value *end = vals;
110 	struct input_value *v;
111 
112 	if (handler->filter) {
113 		for (v = vals; v != vals + count; v++) {
114 			if (handler->filter(handle, v->type, v->code, v->value))
115 				continue;
116 			if (end != v)
117 				*end = *v;
118 			end++;
119 		}
120 		count = end - vals;
121 	}
122 
123 	if (!count)
124 		return 0;
125 
126 	if (handler->events)
127 		handler->events(handle, vals, count);
128 	else if (handler->event)
129 		for (v = vals; v != vals + count; v++)
130 			handler->event(handle, v->type, v->code, v->value);
131 
132 	return count;
133 }
134 
135 /*
136  * Pass values first through all filters and then, if event has not been
137  * filtered out, through all open handles. This function is called with
138  * dev->event_lock held and interrupts disabled.
139  */
140 static void input_pass_values(struct input_dev *dev,
141 			      struct input_value *vals, unsigned int count)
142 {
143 	struct input_handle *handle;
144 	struct input_value *v;
145 
146 	lockdep_assert_held(&dev->event_lock);
147 
148 	if (!count)
149 		return;
150 
151 	rcu_read_lock();
152 
153 	handle = rcu_dereference(dev->grab);
154 	if (handle) {
155 		count = input_to_handler(handle, vals, count);
156 	} else {
157 		list_for_each_entry_rcu(handle, &dev->h_list, d_node)
158 			if (handle->open) {
159 				count = input_to_handler(handle, vals, count);
160 				if (!count)
161 					break;
162 			}
163 	}
164 
165 	rcu_read_unlock();
166 
167 	/* trigger auto repeat for key events */
168 	if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
169 		for (v = vals; v != vals + count; v++) {
170 			if (v->type == EV_KEY && v->value != 2) {
171 				if (v->value)
172 					input_start_autorepeat(dev, v->code);
173 				else
174 					input_stop_autorepeat(dev);
175 			}
176 		}
177 	}
178 }
179 
180 #define INPUT_IGNORE_EVENT	0
181 #define INPUT_PASS_TO_HANDLERS	1
182 #define INPUT_PASS_TO_DEVICE	2
183 #define INPUT_SLOT		4
184 #define INPUT_FLUSH		8
185 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
186 
187 static int input_handle_abs_event(struct input_dev *dev,
188 				  unsigned int code, int *pval)
189 {
190 	struct input_mt *mt = dev->mt;
191 	bool is_mt_event;
192 	int *pold;
193 
194 	if (code == ABS_MT_SLOT) {
195 		/*
196 		 * "Stage" the event; we'll flush it later, when we
197 		 * get actual touch data.
198 		 */
199 		if (mt && *pval >= 0 && *pval < mt->num_slots)
200 			mt->slot = *pval;
201 
202 		return INPUT_IGNORE_EVENT;
203 	}
204 
205 	is_mt_event = input_is_mt_value(code);
206 
207 	if (!is_mt_event) {
208 		pold = &dev->absinfo[code].value;
209 	} else if (mt) {
210 		pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
211 	} else {
212 		/*
213 		 * Bypass filtering for multi-touch events when
214 		 * not employing slots.
215 		 */
216 		pold = NULL;
217 	}
218 
219 	if (pold) {
220 		*pval = input_defuzz_abs_event(*pval, *pold,
221 						dev->absinfo[code].fuzz);
222 		if (*pold == *pval)
223 			return INPUT_IGNORE_EVENT;
224 
225 		*pold = *pval;
226 	}
227 
228 	/* Flush pending "slot" event */
229 	if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
230 		input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
231 		return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
232 	}
233 
234 	return INPUT_PASS_TO_HANDLERS;
235 }
236 
237 static int input_get_disposition(struct input_dev *dev,
238 			  unsigned int type, unsigned int code, int *pval)
239 {
240 	int disposition = INPUT_IGNORE_EVENT;
241 	int value = *pval;
242 
243 	/* filter-out events from inhibited devices */
244 	if (dev->inhibited)
245 		return INPUT_IGNORE_EVENT;
246 
247 	switch (type) {
248 
249 	case EV_SYN:
250 		switch (code) {
251 		case SYN_CONFIG:
252 			disposition = INPUT_PASS_TO_ALL;
253 			break;
254 
255 		case SYN_REPORT:
256 			disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
257 			break;
258 		case SYN_MT_REPORT:
259 			disposition = INPUT_PASS_TO_HANDLERS;
260 			break;
261 		}
262 		break;
263 
264 	case EV_KEY:
265 		if (is_event_supported(code, dev->keybit, KEY_MAX)) {
266 
267 			/* auto-repeat bypasses state updates */
268 			if (value == 2) {
269 				disposition = INPUT_PASS_TO_HANDLERS;
270 				break;
271 			}
272 
273 			if (!!test_bit(code, dev->key) != !!value) {
274 
275 				__change_bit(code, dev->key);
276 				disposition = INPUT_PASS_TO_HANDLERS;
277 			}
278 		}
279 		break;
280 
281 	case EV_SW:
282 		if (is_event_supported(code, dev->swbit, SW_MAX) &&
283 		    !!test_bit(code, dev->sw) != !!value) {
284 
285 			__change_bit(code, dev->sw);
286 			disposition = INPUT_PASS_TO_HANDLERS;
287 		}
288 		break;
289 
290 	case EV_ABS:
291 		if (is_event_supported(code, dev->absbit, ABS_MAX))
292 			disposition = input_handle_abs_event(dev, code, &value);
293 
294 		break;
295 
296 	case EV_REL:
297 		if (is_event_supported(code, dev->relbit, REL_MAX) && value)
298 			disposition = INPUT_PASS_TO_HANDLERS;
299 
300 		break;
301 
302 	case EV_MSC:
303 		if (is_event_supported(code, dev->mscbit, MSC_MAX))
304 			disposition = INPUT_PASS_TO_ALL;
305 
306 		break;
307 
308 	case EV_LED:
309 		if (is_event_supported(code, dev->ledbit, LED_MAX) &&
310 		    !!test_bit(code, dev->led) != !!value) {
311 
312 			__change_bit(code, dev->led);
313 			disposition = INPUT_PASS_TO_ALL;
314 		}
315 		break;
316 
317 	case EV_SND:
318 		if (is_event_supported(code, dev->sndbit, SND_MAX)) {
319 
320 			if (!!test_bit(code, dev->snd) != !!value)
321 				__change_bit(code, dev->snd);
322 			disposition = INPUT_PASS_TO_ALL;
323 		}
324 		break;
325 
326 	case EV_REP:
327 		if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
328 			dev->rep[code] = value;
329 			disposition = INPUT_PASS_TO_ALL;
330 		}
331 		break;
332 
333 	case EV_FF:
334 		if (value >= 0)
335 			disposition = INPUT_PASS_TO_ALL;
336 		break;
337 
338 	case EV_PWR:
339 		disposition = INPUT_PASS_TO_ALL;
340 		break;
341 	}
342 
343 	*pval = value;
344 	return disposition;
345 }
346 
347 static void input_event_dispose(struct input_dev *dev, int disposition,
348 				unsigned int type, unsigned int code, int value)
349 {
350 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
351 		dev->event(dev, type, code, value);
352 
353 	if (!dev->vals)
354 		return;
355 
356 	if (disposition & INPUT_PASS_TO_HANDLERS) {
357 		struct input_value *v;
358 
359 		if (disposition & INPUT_SLOT) {
360 			v = &dev->vals[dev->num_vals++];
361 			v->type = EV_ABS;
362 			v->code = ABS_MT_SLOT;
363 			v->value = dev->mt->slot;
364 		}
365 
366 		v = &dev->vals[dev->num_vals++];
367 		v->type = type;
368 		v->code = code;
369 		v->value = value;
370 	}
371 
372 	if (disposition & INPUT_FLUSH) {
373 		if (dev->num_vals >= 2)
374 			input_pass_values(dev, dev->vals, dev->num_vals);
375 		dev->num_vals = 0;
376 		/*
377 		 * Reset the timestamp on flush so we won't end up
378 		 * with a stale one. Note we only need to reset the
379 		 * monolithic one as we use its presence when deciding
380 		 * whether to generate a synthetic timestamp.
381 		 */
382 		dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
383 	} else if (dev->num_vals >= dev->max_vals - 2) {
384 		dev->vals[dev->num_vals++] = input_value_sync;
385 		input_pass_values(dev, dev->vals, dev->num_vals);
386 		dev->num_vals = 0;
387 	}
388 }
389 
390 void input_handle_event(struct input_dev *dev,
391 			unsigned int type, unsigned int code, int value)
392 {
393 	int disposition;
394 
395 	lockdep_assert_held(&dev->event_lock);
396 
397 	disposition = input_get_disposition(dev, type, code, &value);
398 	if (disposition != INPUT_IGNORE_EVENT) {
399 		if (type != EV_SYN)
400 			add_input_randomness(type, code, value);
401 
402 		input_event_dispose(dev, disposition, type, code, value);
403 	}
404 }
405 
406 /**
407  * input_event() - report new input event
408  * @dev: device that generated the event
409  * @type: type of the event
410  * @code: event code
411  * @value: value of the event
412  *
413  * This function should be used by drivers implementing various input
414  * devices to report input events. See also input_inject_event().
415  *
416  * NOTE: input_event() may be safely used right after input device was
417  * allocated with input_allocate_device(), even before it is registered
418  * with input_register_device(), but the event will not reach any of the
419  * input handlers. Such early invocation of input_event() may be used
420  * to 'seed' initial state of a switch or initial position of absolute
421  * axis, etc.
422  */
423 void input_event(struct input_dev *dev,
424 		 unsigned int type, unsigned int code, int value)
425 {
426 	unsigned long flags;
427 
428 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
429 
430 		spin_lock_irqsave(&dev->event_lock, flags);
431 		input_handle_event(dev, type, code, value);
432 		spin_unlock_irqrestore(&dev->event_lock, flags);
433 	}
434 }
435 EXPORT_SYMBOL(input_event);
436 
437 /**
438  * input_inject_event() - send input event from input handler
439  * @handle: input handle to send event through
440  * @type: type of the event
441  * @code: event code
442  * @value: value of the event
443  *
444  * Similar to input_event() but will ignore event if device is
445  * "grabbed" and handle injecting event is not the one that owns
446  * the device.
447  */
448 void input_inject_event(struct input_handle *handle,
449 			unsigned int type, unsigned int code, int value)
450 {
451 	struct input_dev *dev = handle->dev;
452 	struct input_handle *grab;
453 	unsigned long flags;
454 
455 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
456 		spin_lock_irqsave(&dev->event_lock, flags);
457 
458 		rcu_read_lock();
459 		grab = rcu_dereference(dev->grab);
460 		if (!grab || grab == handle)
461 			input_handle_event(dev, type, code, value);
462 		rcu_read_unlock();
463 
464 		spin_unlock_irqrestore(&dev->event_lock, flags);
465 	}
466 }
467 EXPORT_SYMBOL(input_inject_event);
468 
469 /**
470  * input_alloc_absinfo - allocates array of input_absinfo structs
471  * @dev: the input device emitting absolute events
472  *
473  * If the absinfo struct the caller asked for is already allocated, this
474  * functions will not do anything.
475  */
476 void input_alloc_absinfo(struct input_dev *dev)
477 {
478 	if (dev->absinfo)
479 		return;
480 
481 	dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
482 	if (!dev->absinfo) {
483 		dev_err(dev->dev.parent ?: &dev->dev,
484 			"%s: unable to allocate memory\n", __func__);
485 		/*
486 		 * We will handle this allocation failure in
487 		 * input_register_device() when we refuse to register input
488 		 * device with ABS bits but without absinfo.
489 		 */
490 	}
491 }
492 EXPORT_SYMBOL(input_alloc_absinfo);
493 
494 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
495 			  int min, int max, int fuzz, int flat)
496 {
497 	struct input_absinfo *absinfo;
498 
499 	__set_bit(EV_ABS, dev->evbit);
500 	__set_bit(axis, dev->absbit);
501 
502 	input_alloc_absinfo(dev);
503 	if (!dev->absinfo)
504 		return;
505 
506 	absinfo = &dev->absinfo[axis];
507 	absinfo->minimum = min;
508 	absinfo->maximum = max;
509 	absinfo->fuzz = fuzz;
510 	absinfo->flat = flat;
511 }
512 EXPORT_SYMBOL(input_set_abs_params);
513 
514 /**
515  * input_copy_abs - Copy absinfo from one input_dev to another
516  * @dst: Destination input device to copy the abs settings to
517  * @dst_axis: ABS_* value selecting the destination axis
518  * @src: Source input device to copy the abs settings from
519  * @src_axis: ABS_* value selecting the source axis
520  *
521  * Set absinfo for the selected destination axis by copying it from
522  * the specified source input device's source axis.
523  * This is useful to e.g. setup a pen/stylus input-device for combined
524  * touchscreen/pen hardware where the pen uses the same coordinates as
525  * the touchscreen.
526  */
527 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
528 		    const struct input_dev *src, unsigned int src_axis)
529 {
530 	/* src must have EV_ABS and src_axis set */
531 	if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
532 		      test_bit(src_axis, src->absbit))))
533 		return;
534 
535 	/*
536 	 * input_alloc_absinfo() may have failed for the source. Our caller is
537 	 * expected to catch this when registering the input devices, which may
538 	 * happen after the input_copy_abs() call.
539 	 */
540 	if (!src->absinfo)
541 		return;
542 
543 	input_set_capability(dst, EV_ABS, dst_axis);
544 	if (!dst->absinfo)
545 		return;
546 
547 	dst->absinfo[dst_axis] = src->absinfo[src_axis];
548 }
549 EXPORT_SYMBOL(input_copy_abs);
550 
551 /**
552  * input_grab_device - grabs device for exclusive use
553  * @handle: input handle that wants to own the device
554  *
555  * When a device is grabbed by an input handle all events generated by
556  * the device are delivered only to this handle. Also events injected
557  * by other input handles are ignored while device is grabbed.
558  */
559 int input_grab_device(struct input_handle *handle)
560 {
561 	struct input_dev *dev = handle->dev;
562 	int retval;
563 
564 	retval = mutex_lock_interruptible(&dev->mutex);
565 	if (retval)
566 		return retval;
567 
568 	if (dev->grab) {
569 		retval = -EBUSY;
570 		goto out;
571 	}
572 
573 	rcu_assign_pointer(dev->grab, handle);
574 
575  out:
576 	mutex_unlock(&dev->mutex);
577 	return retval;
578 }
579 EXPORT_SYMBOL(input_grab_device);
580 
581 static void __input_release_device(struct input_handle *handle)
582 {
583 	struct input_dev *dev = handle->dev;
584 	struct input_handle *grabber;
585 
586 	grabber = rcu_dereference_protected(dev->grab,
587 					    lockdep_is_held(&dev->mutex));
588 	if (grabber == handle) {
589 		rcu_assign_pointer(dev->grab, NULL);
590 		/* Make sure input_pass_values() notices that grab is gone */
591 		synchronize_rcu();
592 
593 		list_for_each_entry(handle, &dev->h_list, d_node)
594 			if (handle->open && handle->handler->start)
595 				handle->handler->start(handle);
596 	}
597 }
598 
599 /**
600  * input_release_device - release previously grabbed device
601  * @handle: input handle that owns the device
602  *
603  * Releases previously grabbed device so that other input handles can
604  * start receiving input events. Upon release all handlers attached
605  * to the device have their start() method called so they have a change
606  * to synchronize device state with the rest of the system.
607  */
608 void input_release_device(struct input_handle *handle)
609 {
610 	struct input_dev *dev = handle->dev;
611 
612 	mutex_lock(&dev->mutex);
613 	__input_release_device(handle);
614 	mutex_unlock(&dev->mutex);
615 }
616 EXPORT_SYMBOL(input_release_device);
617 
618 /**
619  * input_open_device - open input device
620  * @handle: handle through which device is being accessed
621  *
622  * This function should be called by input handlers when they
623  * want to start receive events from given input device.
624  */
625 int input_open_device(struct input_handle *handle)
626 {
627 	struct input_dev *dev = handle->dev;
628 	int retval;
629 
630 	retval = mutex_lock_interruptible(&dev->mutex);
631 	if (retval)
632 		return retval;
633 
634 	if (dev->going_away) {
635 		retval = -ENODEV;
636 		goto out;
637 	}
638 
639 	handle->open++;
640 
641 	if (dev->users++ || dev->inhibited) {
642 		/*
643 		 * Device is already opened and/or inhibited,
644 		 * so we can exit immediately and report success.
645 		 */
646 		goto out;
647 	}
648 
649 	if (dev->open) {
650 		retval = dev->open(dev);
651 		if (retval) {
652 			dev->users--;
653 			handle->open--;
654 			/*
655 			 * Make sure we are not delivering any more events
656 			 * through this handle
657 			 */
658 			synchronize_rcu();
659 			goto out;
660 		}
661 	}
662 
663 	if (dev->poller)
664 		input_dev_poller_start(dev->poller);
665 
666  out:
667 	mutex_unlock(&dev->mutex);
668 	return retval;
669 }
670 EXPORT_SYMBOL(input_open_device);
671 
672 int input_flush_device(struct input_handle *handle, struct file *file)
673 {
674 	struct input_dev *dev = handle->dev;
675 	int retval;
676 
677 	retval = mutex_lock_interruptible(&dev->mutex);
678 	if (retval)
679 		return retval;
680 
681 	if (dev->flush)
682 		retval = dev->flush(dev, file);
683 
684 	mutex_unlock(&dev->mutex);
685 	return retval;
686 }
687 EXPORT_SYMBOL(input_flush_device);
688 
689 /**
690  * input_close_device - close input device
691  * @handle: handle through which device is being accessed
692  *
693  * This function should be called by input handlers when they
694  * want to stop receive events from given input device.
695  */
696 void input_close_device(struct input_handle *handle)
697 {
698 	struct input_dev *dev = handle->dev;
699 
700 	mutex_lock(&dev->mutex);
701 
702 	__input_release_device(handle);
703 
704 	if (!dev->inhibited && !--dev->users) {
705 		if (dev->poller)
706 			input_dev_poller_stop(dev->poller);
707 		if (dev->close)
708 			dev->close(dev);
709 	}
710 
711 	if (!--handle->open) {
712 		/*
713 		 * synchronize_rcu() makes sure that input_pass_values()
714 		 * completed and that no more input events are delivered
715 		 * through this handle
716 		 */
717 		synchronize_rcu();
718 	}
719 
720 	mutex_unlock(&dev->mutex);
721 }
722 EXPORT_SYMBOL(input_close_device);
723 
724 /*
725  * Simulate keyup events for all keys that are marked as pressed.
726  * The function must be called with dev->event_lock held.
727  */
728 static bool input_dev_release_keys(struct input_dev *dev)
729 {
730 	bool need_sync = false;
731 	int code;
732 
733 	lockdep_assert_held(&dev->event_lock);
734 
735 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
736 		for_each_set_bit(code, dev->key, KEY_CNT) {
737 			input_handle_event(dev, EV_KEY, code, 0);
738 			need_sync = true;
739 		}
740 	}
741 
742 	return need_sync;
743 }
744 
745 /*
746  * Prepare device for unregistering
747  */
748 static void input_disconnect_device(struct input_dev *dev)
749 {
750 	struct input_handle *handle;
751 
752 	/*
753 	 * Mark device as going away. Note that we take dev->mutex here
754 	 * not to protect access to dev->going_away but rather to ensure
755 	 * that there are no threads in the middle of input_open_device()
756 	 */
757 	mutex_lock(&dev->mutex);
758 	dev->going_away = true;
759 	mutex_unlock(&dev->mutex);
760 
761 	spin_lock_irq(&dev->event_lock);
762 
763 	/*
764 	 * Simulate keyup events for all pressed keys so that handlers
765 	 * are not left with "stuck" keys. The driver may continue
766 	 * generate events even after we done here but they will not
767 	 * reach any handlers.
768 	 */
769 	if (input_dev_release_keys(dev))
770 		input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
771 
772 	list_for_each_entry(handle, &dev->h_list, d_node)
773 		handle->open = 0;
774 
775 	spin_unlock_irq(&dev->event_lock);
776 }
777 
778 /**
779  * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
780  * @ke: keymap entry containing scancode to be converted.
781  * @scancode: pointer to the location where converted scancode should
782  *	be stored.
783  *
784  * This function is used to convert scancode stored in &struct keymap_entry
785  * into scalar form understood by legacy keymap handling methods. These
786  * methods expect scancodes to be represented as 'unsigned int'.
787  */
788 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
789 			     unsigned int *scancode)
790 {
791 	switch (ke->len) {
792 	case 1:
793 		*scancode = *((u8 *)ke->scancode);
794 		break;
795 
796 	case 2:
797 		*scancode = *((u16 *)ke->scancode);
798 		break;
799 
800 	case 4:
801 		*scancode = *((u32 *)ke->scancode);
802 		break;
803 
804 	default:
805 		return -EINVAL;
806 	}
807 
808 	return 0;
809 }
810 EXPORT_SYMBOL(input_scancode_to_scalar);
811 
812 /*
813  * Those routines handle the default case where no [gs]etkeycode() is
814  * defined. In this case, an array indexed by the scancode is used.
815  */
816 
817 static unsigned int input_fetch_keycode(struct input_dev *dev,
818 					unsigned int index)
819 {
820 	switch (dev->keycodesize) {
821 	case 1:
822 		return ((u8 *)dev->keycode)[index];
823 
824 	case 2:
825 		return ((u16 *)dev->keycode)[index];
826 
827 	default:
828 		return ((u32 *)dev->keycode)[index];
829 	}
830 }
831 
832 static int input_default_getkeycode(struct input_dev *dev,
833 				    struct input_keymap_entry *ke)
834 {
835 	unsigned int index;
836 	int error;
837 
838 	if (!dev->keycodesize)
839 		return -EINVAL;
840 
841 	if (ke->flags & INPUT_KEYMAP_BY_INDEX)
842 		index = ke->index;
843 	else {
844 		error = input_scancode_to_scalar(ke, &index);
845 		if (error)
846 			return error;
847 	}
848 
849 	if (index >= dev->keycodemax)
850 		return -EINVAL;
851 
852 	ke->keycode = input_fetch_keycode(dev, index);
853 	ke->index = index;
854 	ke->len = sizeof(index);
855 	memcpy(ke->scancode, &index, sizeof(index));
856 
857 	return 0;
858 }
859 
860 static int input_default_setkeycode(struct input_dev *dev,
861 				    const struct input_keymap_entry *ke,
862 				    unsigned int *old_keycode)
863 {
864 	unsigned int index;
865 	int error;
866 	int i;
867 
868 	if (!dev->keycodesize)
869 		return -EINVAL;
870 
871 	if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
872 		index = ke->index;
873 	} else {
874 		error = input_scancode_to_scalar(ke, &index);
875 		if (error)
876 			return error;
877 	}
878 
879 	if (index >= dev->keycodemax)
880 		return -EINVAL;
881 
882 	if (dev->keycodesize < sizeof(ke->keycode) &&
883 			(ke->keycode >> (dev->keycodesize * 8)))
884 		return -EINVAL;
885 
886 	switch (dev->keycodesize) {
887 		case 1: {
888 			u8 *k = (u8 *)dev->keycode;
889 			*old_keycode = k[index];
890 			k[index] = ke->keycode;
891 			break;
892 		}
893 		case 2: {
894 			u16 *k = (u16 *)dev->keycode;
895 			*old_keycode = k[index];
896 			k[index] = ke->keycode;
897 			break;
898 		}
899 		default: {
900 			u32 *k = (u32 *)dev->keycode;
901 			*old_keycode = k[index];
902 			k[index] = ke->keycode;
903 			break;
904 		}
905 	}
906 
907 	if (*old_keycode <= KEY_MAX) {
908 		__clear_bit(*old_keycode, dev->keybit);
909 		for (i = 0; i < dev->keycodemax; i++) {
910 			if (input_fetch_keycode(dev, i) == *old_keycode) {
911 				__set_bit(*old_keycode, dev->keybit);
912 				/* Setting the bit twice is useless, so break */
913 				break;
914 			}
915 		}
916 	}
917 
918 	__set_bit(ke->keycode, dev->keybit);
919 	return 0;
920 }
921 
922 /**
923  * input_get_keycode - retrieve keycode currently mapped to a given scancode
924  * @dev: input device which keymap is being queried
925  * @ke: keymap entry
926  *
927  * This function should be called by anyone interested in retrieving current
928  * keymap. Presently evdev handlers use it.
929  */
930 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
931 {
932 	unsigned long flags;
933 	int retval;
934 
935 	spin_lock_irqsave(&dev->event_lock, flags);
936 	retval = dev->getkeycode(dev, ke);
937 	spin_unlock_irqrestore(&dev->event_lock, flags);
938 
939 	return retval;
940 }
941 EXPORT_SYMBOL(input_get_keycode);
942 
943 /**
944  * input_set_keycode - attribute a keycode to a given scancode
945  * @dev: input device which keymap is being updated
946  * @ke: new keymap entry
947  *
948  * This function should be called by anyone needing to update current
949  * keymap. Presently keyboard and evdev handlers use it.
950  */
951 int input_set_keycode(struct input_dev *dev,
952 		      const struct input_keymap_entry *ke)
953 {
954 	unsigned long flags;
955 	unsigned int old_keycode;
956 	int retval;
957 
958 	if (ke->keycode > KEY_MAX)
959 		return -EINVAL;
960 
961 	spin_lock_irqsave(&dev->event_lock, flags);
962 
963 	retval = dev->setkeycode(dev, ke, &old_keycode);
964 	if (retval)
965 		goto out;
966 
967 	/* Make sure KEY_RESERVED did not get enabled. */
968 	__clear_bit(KEY_RESERVED, dev->keybit);
969 
970 	/*
971 	 * Simulate keyup event if keycode is not present
972 	 * in the keymap anymore
973 	 */
974 	if (old_keycode > KEY_MAX) {
975 		dev_warn(dev->dev.parent ?: &dev->dev,
976 			 "%s: got too big old keycode %#x\n",
977 			 __func__, old_keycode);
978 	} else if (test_bit(EV_KEY, dev->evbit) &&
979 		   !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
980 		   __test_and_clear_bit(old_keycode, dev->key)) {
981 		/*
982 		 * We have to use input_event_dispose() here directly instead
983 		 * of input_handle_event() because the key we want to release
984 		 * here is considered no longer supported by the device and
985 		 * input_handle_event() will ignore it.
986 		 */
987 		input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
988 				    EV_KEY, old_keycode, 0);
989 		input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
990 				    EV_SYN, SYN_REPORT, 1);
991 	}
992 
993  out:
994 	spin_unlock_irqrestore(&dev->event_lock, flags);
995 
996 	return retval;
997 }
998 EXPORT_SYMBOL(input_set_keycode);
999 
1000 bool input_match_device_id(const struct input_dev *dev,
1001 			   const struct input_device_id *id)
1002 {
1003 	if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
1004 		if (id->bustype != dev->id.bustype)
1005 			return false;
1006 
1007 	if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
1008 		if (id->vendor != dev->id.vendor)
1009 			return false;
1010 
1011 	if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
1012 		if (id->product != dev->id.product)
1013 			return false;
1014 
1015 	if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
1016 		if (id->version != dev->id.version)
1017 			return false;
1018 
1019 	if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
1020 	    !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
1021 	    !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
1022 	    !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
1023 	    !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
1024 	    !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
1025 	    !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
1026 	    !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
1027 	    !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
1028 	    !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
1029 		return false;
1030 	}
1031 
1032 	return true;
1033 }
1034 EXPORT_SYMBOL(input_match_device_id);
1035 
1036 static const struct input_device_id *input_match_device(struct input_handler *handler,
1037 							struct input_dev *dev)
1038 {
1039 	const struct input_device_id *id;
1040 
1041 	for (id = handler->id_table; id->flags || id->driver_info; id++) {
1042 		if (input_match_device_id(dev, id) &&
1043 		    (!handler->match || handler->match(handler, dev))) {
1044 			return id;
1045 		}
1046 	}
1047 
1048 	return NULL;
1049 }
1050 
1051 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
1052 {
1053 	const struct input_device_id *id;
1054 	int error;
1055 
1056 	id = input_match_device(handler, dev);
1057 	if (!id)
1058 		return -ENODEV;
1059 
1060 	error = handler->connect(handler, dev, id);
1061 	if (error && error != -ENODEV)
1062 		pr_err("failed to attach handler %s to device %s, error: %d\n",
1063 		       handler->name, kobject_name(&dev->dev.kobj), error);
1064 
1065 	return error;
1066 }
1067 
1068 #ifdef CONFIG_COMPAT
1069 
1070 static int input_bits_to_string(char *buf, int buf_size,
1071 				unsigned long bits, bool skip_empty)
1072 {
1073 	int len = 0;
1074 
1075 	if (in_compat_syscall()) {
1076 		u32 dword = bits >> 32;
1077 		if (dword || !skip_empty)
1078 			len += snprintf(buf, buf_size, "%x ", dword);
1079 
1080 		dword = bits & 0xffffffffUL;
1081 		if (dword || !skip_empty || len)
1082 			len += snprintf(buf + len, max(buf_size - len, 0),
1083 					"%x", dword);
1084 	} else {
1085 		if (bits || !skip_empty)
1086 			len += snprintf(buf, buf_size, "%lx", bits);
1087 	}
1088 
1089 	return len;
1090 }
1091 
1092 #else /* !CONFIG_COMPAT */
1093 
1094 static int input_bits_to_string(char *buf, int buf_size,
1095 				unsigned long bits, bool skip_empty)
1096 {
1097 	return bits || !skip_empty ?
1098 		snprintf(buf, buf_size, "%lx", bits) : 0;
1099 }
1100 
1101 #endif
1102 
1103 #ifdef CONFIG_PROC_FS
1104 
1105 static struct proc_dir_entry *proc_bus_input_dir;
1106 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1107 static int input_devices_state;
1108 
1109 static inline void input_wakeup_procfs_readers(void)
1110 {
1111 	input_devices_state++;
1112 	wake_up(&input_devices_poll_wait);
1113 }
1114 
1115 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1116 {
1117 	poll_wait(file, &input_devices_poll_wait, wait);
1118 	if (file->f_version != input_devices_state) {
1119 		file->f_version = input_devices_state;
1120 		return EPOLLIN | EPOLLRDNORM;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 union input_seq_state {
1127 	struct {
1128 		unsigned short pos;
1129 		bool mutex_acquired;
1130 	};
1131 	void *p;
1132 };
1133 
1134 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1135 {
1136 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1137 	int error;
1138 
1139 	/* We need to fit into seq->private pointer */
1140 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1141 
1142 	error = mutex_lock_interruptible(&input_mutex);
1143 	if (error) {
1144 		state->mutex_acquired = false;
1145 		return ERR_PTR(error);
1146 	}
1147 
1148 	state->mutex_acquired = true;
1149 
1150 	return seq_list_start(&input_dev_list, *pos);
1151 }
1152 
1153 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1154 {
1155 	return seq_list_next(v, &input_dev_list, pos);
1156 }
1157 
1158 static void input_seq_stop(struct seq_file *seq, void *v)
1159 {
1160 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1161 
1162 	if (state->mutex_acquired)
1163 		mutex_unlock(&input_mutex);
1164 }
1165 
1166 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1167 				   unsigned long *bitmap, int max)
1168 {
1169 	int i;
1170 	bool skip_empty = true;
1171 	char buf[18];
1172 
1173 	seq_printf(seq, "B: %s=", name);
1174 
1175 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1176 		if (input_bits_to_string(buf, sizeof(buf),
1177 					 bitmap[i], skip_empty)) {
1178 			skip_empty = false;
1179 			seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1180 		}
1181 	}
1182 
1183 	/*
1184 	 * If no output was produced print a single 0.
1185 	 */
1186 	if (skip_empty)
1187 		seq_putc(seq, '0');
1188 
1189 	seq_putc(seq, '\n');
1190 }
1191 
1192 static int input_devices_seq_show(struct seq_file *seq, void *v)
1193 {
1194 	struct input_dev *dev = container_of(v, struct input_dev, node);
1195 	const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1196 	struct input_handle *handle;
1197 
1198 	seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1199 		   dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1200 
1201 	seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1202 	seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1203 	seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1204 	seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1205 	seq_puts(seq, "H: Handlers=");
1206 
1207 	list_for_each_entry(handle, &dev->h_list, d_node)
1208 		seq_printf(seq, "%s ", handle->name);
1209 	seq_putc(seq, '\n');
1210 
1211 	input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1212 
1213 	input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1214 	if (test_bit(EV_KEY, dev->evbit))
1215 		input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1216 	if (test_bit(EV_REL, dev->evbit))
1217 		input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1218 	if (test_bit(EV_ABS, dev->evbit))
1219 		input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1220 	if (test_bit(EV_MSC, dev->evbit))
1221 		input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1222 	if (test_bit(EV_LED, dev->evbit))
1223 		input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1224 	if (test_bit(EV_SND, dev->evbit))
1225 		input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1226 	if (test_bit(EV_FF, dev->evbit))
1227 		input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1228 	if (test_bit(EV_SW, dev->evbit))
1229 		input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1230 
1231 	seq_putc(seq, '\n');
1232 
1233 	kfree(path);
1234 	return 0;
1235 }
1236 
1237 static const struct seq_operations input_devices_seq_ops = {
1238 	.start	= input_devices_seq_start,
1239 	.next	= input_devices_seq_next,
1240 	.stop	= input_seq_stop,
1241 	.show	= input_devices_seq_show,
1242 };
1243 
1244 static int input_proc_devices_open(struct inode *inode, struct file *file)
1245 {
1246 	return seq_open(file, &input_devices_seq_ops);
1247 }
1248 
1249 static const struct proc_ops input_devices_proc_ops = {
1250 	.proc_open	= input_proc_devices_open,
1251 	.proc_poll	= input_proc_devices_poll,
1252 	.proc_read	= seq_read,
1253 	.proc_lseek	= seq_lseek,
1254 	.proc_release	= seq_release,
1255 };
1256 
1257 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1258 {
1259 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1260 	int error;
1261 
1262 	/* We need to fit into seq->private pointer */
1263 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1264 
1265 	error = mutex_lock_interruptible(&input_mutex);
1266 	if (error) {
1267 		state->mutex_acquired = false;
1268 		return ERR_PTR(error);
1269 	}
1270 
1271 	state->mutex_acquired = true;
1272 	state->pos = *pos;
1273 
1274 	return seq_list_start(&input_handler_list, *pos);
1275 }
1276 
1277 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1278 {
1279 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1280 
1281 	state->pos = *pos + 1;
1282 	return seq_list_next(v, &input_handler_list, pos);
1283 }
1284 
1285 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1286 {
1287 	struct input_handler *handler = container_of(v, struct input_handler, node);
1288 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1289 
1290 	seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1291 	if (handler->filter)
1292 		seq_puts(seq, " (filter)");
1293 	if (handler->legacy_minors)
1294 		seq_printf(seq, " Minor=%d", handler->minor);
1295 	seq_putc(seq, '\n');
1296 
1297 	return 0;
1298 }
1299 
1300 static const struct seq_operations input_handlers_seq_ops = {
1301 	.start	= input_handlers_seq_start,
1302 	.next	= input_handlers_seq_next,
1303 	.stop	= input_seq_stop,
1304 	.show	= input_handlers_seq_show,
1305 };
1306 
1307 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1308 {
1309 	return seq_open(file, &input_handlers_seq_ops);
1310 }
1311 
1312 static const struct proc_ops input_handlers_proc_ops = {
1313 	.proc_open	= input_proc_handlers_open,
1314 	.proc_read	= seq_read,
1315 	.proc_lseek	= seq_lseek,
1316 	.proc_release	= seq_release,
1317 };
1318 
1319 static int __init input_proc_init(void)
1320 {
1321 	struct proc_dir_entry *entry;
1322 
1323 	proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1324 	if (!proc_bus_input_dir)
1325 		return -ENOMEM;
1326 
1327 	entry = proc_create("devices", 0, proc_bus_input_dir,
1328 			    &input_devices_proc_ops);
1329 	if (!entry)
1330 		goto fail1;
1331 
1332 	entry = proc_create("handlers", 0, proc_bus_input_dir,
1333 			    &input_handlers_proc_ops);
1334 	if (!entry)
1335 		goto fail2;
1336 
1337 	return 0;
1338 
1339  fail2:	remove_proc_entry("devices", proc_bus_input_dir);
1340  fail1: remove_proc_entry("bus/input", NULL);
1341 	return -ENOMEM;
1342 }
1343 
1344 static void input_proc_exit(void)
1345 {
1346 	remove_proc_entry("devices", proc_bus_input_dir);
1347 	remove_proc_entry("handlers", proc_bus_input_dir);
1348 	remove_proc_entry("bus/input", NULL);
1349 }
1350 
1351 #else /* !CONFIG_PROC_FS */
1352 static inline void input_wakeup_procfs_readers(void) { }
1353 static inline int input_proc_init(void) { return 0; }
1354 static inline void input_proc_exit(void) { }
1355 #endif
1356 
1357 #define INPUT_DEV_STRING_ATTR_SHOW(name)				\
1358 static ssize_t input_dev_show_##name(struct device *dev,		\
1359 				     struct device_attribute *attr,	\
1360 				     char *buf)				\
1361 {									\
1362 	struct input_dev *input_dev = to_input_dev(dev);		\
1363 									\
1364 	return scnprintf(buf, PAGE_SIZE, "%s\n",			\
1365 			 input_dev->name ? input_dev->name : "");	\
1366 }									\
1367 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1368 
1369 INPUT_DEV_STRING_ATTR_SHOW(name);
1370 INPUT_DEV_STRING_ATTR_SHOW(phys);
1371 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1372 
1373 static int input_print_modalias_bits(char *buf, int size,
1374 				     char name, unsigned long *bm,
1375 				     unsigned int min_bit, unsigned int max_bit)
1376 {
1377 	int len = 0, i;
1378 
1379 	len += snprintf(buf, max(size, 0), "%c", name);
1380 	for (i = min_bit; i < max_bit; i++)
1381 		if (bm[BIT_WORD(i)] & BIT_MASK(i))
1382 			len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1383 	return len;
1384 }
1385 
1386 static int input_print_modalias(char *buf, int size, struct input_dev *id,
1387 				int add_cr)
1388 {
1389 	int len;
1390 
1391 	len = snprintf(buf, max(size, 0),
1392 		       "input:b%04Xv%04Xp%04Xe%04X-",
1393 		       id->id.bustype, id->id.vendor,
1394 		       id->id.product, id->id.version);
1395 
1396 	len += input_print_modalias_bits(buf + len, size - len,
1397 				'e', id->evbit, 0, EV_MAX);
1398 	len += input_print_modalias_bits(buf + len, size - len,
1399 				'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1400 	len += input_print_modalias_bits(buf + len, size - len,
1401 				'r', id->relbit, 0, REL_MAX);
1402 	len += input_print_modalias_bits(buf + len, size - len,
1403 				'a', id->absbit, 0, ABS_MAX);
1404 	len += input_print_modalias_bits(buf + len, size - len,
1405 				'm', id->mscbit, 0, MSC_MAX);
1406 	len += input_print_modalias_bits(buf + len, size - len,
1407 				'l', id->ledbit, 0, LED_MAX);
1408 	len += input_print_modalias_bits(buf + len, size - len,
1409 				's', id->sndbit, 0, SND_MAX);
1410 	len += input_print_modalias_bits(buf + len, size - len,
1411 				'f', id->ffbit, 0, FF_MAX);
1412 	len += input_print_modalias_bits(buf + len, size - len,
1413 				'w', id->swbit, 0, SW_MAX);
1414 
1415 	if (add_cr)
1416 		len += snprintf(buf + len, max(size - len, 0), "\n");
1417 
1418 	return len;
1419 }
1420 
1421 static ssize_t input_dev_show_modalias(struct device *dev,
1422 				       struct device_attribute *attr,
1423 				       char *buf)
1424 {
1425 	struct input_dev *id = to_input_dev(dev);
1426 	ssize_t len;
1427 
1428 	len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1429 
1430 	return min_t(int, len, PAGE_SIZE);
1431 }
1432 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1433 
1434 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1435 			      int max, int add_cr);
1436 
1437 static ssize_t input_dev_show_properties(struct device *dev,
1438 					 struct device_attribute *attr,
1439 					 char *buf)
1440 {
1441 	struct input_dev *input_dev = to_input_dev(dev);
1442 	int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1443 				     INPUT_PROP_MAX, true);
1444 	return min_t(int, len, PAGE_SIZE);
1445 }
1446 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1447 
1448 static int input_inhibit_device(struct input_dev *dev);
1449 static int input_uninhibit_device(struct input_dev *dev);
1450 
1451 static ssize_t inhibited_show(struct device *dev,
1452 			      struct device_attribute *attr,
1453 			      char *buf)
1454 {
1455 	struct input_dev *input_dev = to_input_dev(dev);
1456 
1457 	return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
1458 }
1459 
1460 static ssize_t inhibited_store(struct device *dev,
1461 			       struct device_attribute *attr, const char *buf,
1462 			       size_t len)
1463 {
1464 	struct input_dev *input_dev = to_input_dev(dev);
1465 	ssize_t rv;
1466 	bool inhibited;
1467 
1468 	if (strtobool(buf, &inhibited))
1469 		return -EINVAL;
1470 
1471 	if (inhibited)
1472 		rv = input_inhibit_device(input_dev);
1473 	else
1474 		rv = input_uninhibit_device(input_dev);
1475 
1476 	if (rv != 0)
1477 		return rv;
1478 
1479 	return len;
1480 }
1481 
1482 static DEVICE_ATTR_RW(inhibited);
1483 
1484 static struct attribute *input_dev_attrs[] = {
1485 	&dev_attr_name.attr,
1486 	&dev_attr_phys.attr,
1487 	&dev_attr_uniq.attr,
1488 	&dev_attr_modalias.attr,
1489 	&dev_attr_properties.attr,
1490 	&dev_attr_inhibited.attr,
1491 	NULL
1492 };
1493 
1494 static const struct attribute_group input_dev_attr_group = {
1495 	.attrs	= input_dev_attrs,
1496 };
1497 
1498 #define INPUT_DEV_ID_ATTR(name)						\
1499 static ssize_t input_dev_show_id_##name(struct device *dev,		\
1500 					struct device_attribute *attr,	\
1501 					char *buf)			\
1502 {									\
1503 	struct input_dev *input_dev = to_input_dev(dev);		\
1504 	return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name);	\
1505 }									\
1506 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1507 
1508 INPUT_DEV_ID_ATTR(bustype);
1509 INPUT_DEV_ID_ATTR(vendor);
1510 INPUT_DEV_ID_ATTR(product);
1511 INPUT_DEV_ID_ATTR(version);
1512 
1513 static struct attribute *input_dev_id_attrs[] = {
1514 	&dev_attr_bustype.attr,
1515 	&dev_attr_vendor.attr,
1516 	&dev_attr_product.attr,
1517 	&dev_attr_version.attr,
1518 	NULL
1519 };
1520 
1521 static const struct attribute_group input_dev_id_attr_group = {
1522 	.name	= "id",
1523 	.attrs	= input_dev_id_attrs,
1524 };
1525 
1526 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1527 			      int max, int add_cr)
1528 {
1529 	int i;
1530 	int len = 0;
1531 	bool skip_empty = true;
1532 
1533 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1534 		len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1535 					    bitmap[i], skip_empty);
1536 		if (len) {
1537 			skip_empty = false;
1538 			if (i > 0)
1539 				len += snprintf(buf + len, max(buf_size - len, 0), " ");
1540 		}
1541 	}
1542 
1543 	/*
1544 	 * If no output was produced print a single 0.
1545 	 */
1546 	if (len == 0)
1547 		len = snprintf(buf, buf_size, "%d", 0);
1548 
1549 	if (add_cr)
1550 		len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1551 
1552 	return len;
1553 }
1554 
1555 #define INPUT_DEV_CAP_ATTR(ev, bm)					\
1556 static ssize_t input_dev_show_cap_##bm(struct device *dev,		\
1557 				       struct device_attribute *attr,	\
1558 				       char *buf)			\
1559 {									\
1560 	struct input_dev *input_dev = to_input_dev(dev);		\
1561 	int len = input_print_bitmap(buf, PAGE_SIZE,			\
1562 				     input_dev->bm##bit, ev##_MAX,	\
1563 				     true);				\
1564 	return min_t(int, len, PAGE_SIZE);				\
1565 }									\
1566 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1567 
1568 INPUT_DEV_CAP_ATTR(EV, ev);
1569 INPUT_DEV_CAP_ATTR(KEY, key);
1570 INPUT_DEV_CAP_ATTR(REL, rel);
1571 INPUT_DEV_CAP_ATTR(ABS, abs);
1572 INPUT_DEV_CAP_ATTR(MSC, msc);
1573 INPUT_DEV_CAP_ATTR(LED, led);
1574 INPUT_DEV_CAP_ATTR(SND, snd);
1575 INPUT_DEV_CAP_ATTR(FF, ff);
1576 INPUT_DEV_CAP_ATTR(SW, sw);
1577 
1578 static struct attribute *input_dev_caps_attrs[] = {
1579 	&dev_attr_ev.attr,
1580 	&dev_attr_key.attr,
1581 	&dev_attr_rel.attr,
1582 	&dev_attr_abs.attr,
1583 	&dev_attr_msc.attr,
1584 	&dev_attr_led.attr,
1585 	&dev_attr_snd.attr,
1586 	&dev_attr_ff.attr,
1587 	&dev_attr_sw.attr,
1588 	NULL
1589 };
1590 
1591 static const struct attribute_group input_dev_caps_attr_group = {
1592 	.name	= "capabilities",
1593 	.attrs	= input_dev_caps_attrs,
1594 };
1595 
1596 static const struct attribute_group *input_dev_attr_groups[] = {
1597 	&input_dev_attr_group,
1598 	&input_dev_id_attr_group,
1599 	&input_dev_caps_attr_group,
1600 	&input_poller_attribute_group,
1601 	NULL
1602 };
1603 
1604 static void input_dev_release(struct device *device)
1605 {
1606 	struct input_dev *dev = to_input_dev(device);
1607 
1608 	input_ff_destroy(dev);
1609 	input_mt_destroy_slots(dev);
1610 	kfree(dev->poller);
1611 	kfree(dev->absinfo);
1612 	kfree(dev->vals);
1613 	kfree(dev);
1614 
1615 	module_put(THIS_MODULE);
1616 }
1617 
1618 /*
1619  * Input uevent interface - loading event handlers based on
1620  * device bitfields.
1621  */
1622 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1623 				   const char *name, unsigned long *bitmap, int max)
1624 {
1625 	int len;
1626 
1627 	if (add_uevent_var(env, "%s", name))
1628 		return -ENOMEM;
1629 
1630 	len = input_print_bitmap(&env->buf[env->buflen - 1],
1631 				 sizeof(env->buf) - env->buflen,
1632 				 bitmap, max, false);
1633 	if (len >= (sizeof(env->buf) - env->buflen))
1634 		return -ENOMEM;
1635 
1636 	env->buflen += len;
1637 	return 0;
1638 }
1639 
1640 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1641 					 struct input_dev *dev)
1642 {
1643 	int len;
1644 
1645 	if (add_uevent_var(env, "MODALIAS="))
1646 		return -ENOMEM;
1647 
1648 	len = input_print_modalias(&env->buf[env->buflen - 1],
1649 				   sizeof(env->buf) - env->buflen,
1650 				   dev, 0);
1651 	if (len >= (sizeof(env->buf) - env->buflen))
1652 		return -ENOMEM;
1653 
1654 	env->buflen += len;
1655 	return 0;
1656 }
1657 
1658 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...)				\
1659 	do {								\
1660 		int err = add_uevent_var(env, fmt, val);		\
1661 		if (err)						\
1662 			return err;					\
1663 	} while (0)
1664 
1665 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max)				\
1666 	do {								\
1667 		int err = input_add_uevent_bm_var(env, name, bm, max);	\
1668 		if (err)						\
1669 			return err;					\
1670 	} while (0)
1671 
1672 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev)				\
1673 	do {								\
1674 		int err = input_add_uevent_modalias_var(env, dev);	\
1675 		if (err)						\
1676 			return err;					\
1677 	} while (0)
1678 
1679 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1680 {
1681 	struct input_dev *dev = to_input_dev(device);
1682 
1683 	INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1684 				dev->id.bustype, dev->id.vendor,
1685 				dev->id.product, dev->id.version);
1686 	if (dev->name)
1687 		INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1688 	if (dev->phys)
1689 		INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1690 	if (dev->uniq)
1691 		INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1692 
1693 	INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1694 
1695 	INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1696 	if (test_bit(EV_KEY, dev->evbit))
1697 		INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1698 	if (test_bit(EV_REL, dev->evbit))
1699 		INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1700 	if (test_bit(EV_ABS, dev->evbit))
1701 		INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1702 	if (test_bit(EV_MSC, dev->evbit))
1703 		INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1704 	if (test_bit(EV_LED, dev->evbit))
1705 		INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1706 	if (test_bit(EV_SND, dev->evbit))
1707 		INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1708 	if (test_bit(EV_FF, dev->evbit))
1709 		INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1710 	if (test_bit(EV_SW, dev->evbit))
1711 		INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1712 
1713 	INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1714 
1715 	return 0;
1716 }
1717 
1718 #define INPUT_DO_TOGGLE(dev, type, bits, on)				\
1719 	do {								\
1720 		int i;							\
1721 		bool active;						\
1722 									\
1723 		if (!test_bit(EV_##type, dev->evbit))			\
1724 			break;						\
1725 									\
1726 		for_each_set_bit(i, dev->bits##bit, type##_CNT) {	\
1727 			active = test_bit(i, dev->bits);		\
1728 			if (!active && !on)				\
1729 				continue;				\
1730 									\
1731 			dev->event(dev, EV_##type, i, on ? active : 0);	\
1732 		}							\
1733 	} while (0)
1734 
1735 static void input_dev_toggle(struct input_dev *dev, bool activate)
1736 {
1737 	if (!dev->event)
1738 		return;
1739 
1740 	INPUT_DO_TOGGLE(dev, LED, led, activate);
1741 	INPUT_DO_TOGGLE(dev, SND, snd, activate);
1742 
1743 	if (activate && test_bit(EV_REP, dev->evbit)) {
1744 		dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1745 		dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1746 	}
1747 }
1748 
1749 /**
1750  * input_reset_device() - reset/restore the state of input device
1751  * @dev: input device whose state needs to be reset
1752  *
1753  * This function tries to reset the state of an opened input device and
1754  * bring internal state and state if the hardware in sync with each other.
1755  * We mark all keys as released, restore LED state, repeat rate, etc.
1756  */
1757 void input_reset_device(struct input_dev *dev)
1758 {
1759 	unsigned long flags;
1760 
1761 	mutex_lock(&dev->mutex);
1762 	spin_lock_irqsave(&dev->event_lock, flags);
1763 
1764 	input_dev_toggle(dev, true);
1765 	if (input_dev_release_keys(dev))
1766 		input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1767 
1768 	spin_unlock_irqrestore(&dev->event_lock, flags);
1769 	mutex_unlock(&dev->mutex);
1770 }
1771 EXPORT_SYMBOL(input_reset_device);
1772 
1773 static int input_inhibit_device(struct input_dev *dev)
1774 {
1775 	mutex_lock(&dev->mutex);
1776 
1777 	if (dev->inhibited)
1778 		goto out;
1779 
1780 	if (dev->users) {
1781 		if (dev->close)
1782 			dev->close(dev);
1783 		if (dev->poller)
1784 			input_dev_poller_stop(dev->poller);
1785 	}
1786 
1787 	spin_lock_irq(&dev->event_lock);
1788 	input_mt_release_slots(dev);
1789 	input_dev_release_keys(dev);
1790 	input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1791 	input_dev_toggle(dev, false);
1792 	spin_unlock_irq(&dev->event_lock);
1793 
1794 	dev->inhibited = true;
1795 
1796 out:
1797 	mutex_unlock(&dev->mutex);
1798 	return 0;
1799 }
1800 
1801 static int input_uninhibit_device(struct input_dev *dev)
1802 {
1803 	int ret = 0;
1804 
1805 	mutex_lock(&dev->mutex);
1806 
1807 	if (!dev->inhibited)
1808 		goto out;
1809 
1810 	if (dev->users) {
1811 		if (dev->open) {
1812 			ret = dev->open(dev);
1813 			if (ret)
1814 				goto out;
1815 		}
1816 		if (dev->poller)
1817 			input_dev_poller_start(dev->poller);
1818 	}
1819 
1820 	dev->inhibited = false;
1821 	spin_lock_irq(&dev->event_lock);
1822 	input_dev_toggle(dev, true);
1823 	spin_unlock_irq(&dev->event_lock);
1824 
1825 out:
1826 	mutex_unlock(&dev->mutex);
1827 	return ret;
1828 }
1829 
1830 #ifdef CONFIG_PM_SLEEP
1831 static int input_dev_suspend(struct device *dev)
1832 {
1833 	struct input_dev *input_dev = to_input_dev(dev);
1834 
1835 	spin_lock_irq(&input_dev->event_lock);
1836 
1837 	/*
1838 	 * Keys that are pressed now are unlikely to be
1839 	 * still pressed when we resume.
1840 	 */
1841 	if (input_dev_release_keys(input_dev))
1842 		input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1843 
1844 	/* Turn off LEDs and sounds, if any are active. */
1845 	input_dev_toggle(input_dev, false);
1846 
1847 	spin_unlock_irq(&input_dev->event_lock);
1848 
1849 	return 0;
1850 }
1851 
1852 static int input_dev_resume(struct device *dev)
1853 {
1854 	struct input_dev *input_dev = to_input_dev(dev);
1855 
1856 	spin_lock_irq(&input_dev->event_lock);
1857 
1858 	/* Restore state of LEDs and sounds, if any were active. */
1859 	input_dev_toggle(input_dev, true);
1860 
1861 	spin_unlock_irq(&input_dev->event_lock);
1862 
1863 	return 0;
1864 }
1865 
1866 static int input_dev_freeze(struct device *dev)
1867 {
1868 	struct input_dev *input_dev = to_input_dev(dev);
1869 
1870 	spin_lock_irq(&input_dev->event_lock);
1871 
1872 	/*
1873 	 * Keys that are pressed now are unlikely to be
1874 	 * still pressed when we resume.
1875 	 */
1876 	if (input_dev_release_keys(input_dev))
1877 		input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1878 
1879 	spin_unlock_irq(&input_dev->event_lock);
1880 
1881 	return 0;
1882 }
1883 
1884 static int input_dev_poweroff(struct device *dev)
1885 {
1886 	struct input_dev *input_dev = to_input_dev(dev);
1887 
1888 	spin_lock_irq(&input_dev->event_lock);
1889 
1890 	/* Turn off LEDs and sounds, if any are active. */
1891 	input_dev_toggle(input_dev, false);
1892 
1893 	spin_unlock_irq(&input_dev->event_lock);
1894 
1895 	return 0;
1896 }
1897 
1898 static const struct dev_pm_ops input_dev_pm_ops = {
1899 	.suspend	= input_dev_suspend,
1900 	.resume		= input_dev_resume,
1901 	.freeze		= input_dev_freeze,
1902 	.poweroff	= input_dev_poweroff,
1903 	.restore	= input_dev_resume,
1904 };
1905 #endif /* CONFIG_PM */
1906 
1907 static const struct device_type input_dev_type = {
1908 	.groups		= input_dev_attr_groups,
1909 	.release	= input_dev_release,
1910 	.uevent		= input_dev_uevent,
1911 #ifdef CONFIG_PM_SLEEP
1912 	.pm		= &input_dev_pm_ops,
1913 #endif
1914 };
1915 
1916 static char *input_devnode(struct device *dev, umode_t *mode)
1917 {
1918 	return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1919 }
1920 
1921 struct class input_class = {
1922 	.name		= "input",
1923 	.devnode	= input_devnode,
1924 };
1925 EXPORT_SYMBOL_GPL(input_class);
1926 
1927 /**
1928  * input_allocate_device - allocate memory for new input device
1929  *
1930  * Returns prepared struct input_dev or %NULL.
1931  *
1932  * NOTE: Use input_free_device() to free devices that have not been
1933  * registered; input_unregister_device() should be used for already
1934  * registered devices.
1935  */
1936 struct input_dev *input_allocate_device(void)
1937 {
1938 	static atomic_t input_no = ATOMIC_INIT(-1);
1939 	struct input_dev *dev;
1940 
1941 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1942 	if (dev) {
1943 		dev->dev.type = &input_dev_type;
1944 		dev->dev.class = &input_class;
1945 		device_initialize(&dev->dev);
1946 		mutex_init(&dev->mutex);
1947 		spin_lock_init(&dev->event_lock);
1948 		timer_setup(&dev->timer, NULL, 0);
1949 		INIT_LIST_HEAD(&dev->h_list);
1950 		INIT_LIST_HEAD(&dev->node);
1951 
1952 		dev_set_name(&dev->dev, "input%lu",
1953 			     (unsigned long)atomic_inc_return(&input_no));
1954 
1955 		__module_get(THIS_MODULE);
1956 	}
1957 
1958 	return dev;
1959 }
1960 EXPORT_SYMBOL(input_allocate_device);
1961 
1962 struct input_devres {
1963 	struct input_dev *input;
1964 };
1965 
1966 static int devm_input_device_match(struct device *dev, void *res, void *data)
1967 {
1968 	struct input_devres *devres = res;
1969 
1970 	return devres->input == data;
1971 }
1972 
1973 static void devm_input_device_release(struct device *dev, void *res)
1974 {
1975 	struct input_devres *devres = res;
1976 	struct input_dev *input = devres->input;
1977 
1978 	dev_dbg(dev, "%s: dropping reference to %s\n",
1979 		__func__, dev_name(&input->dev));
1980 	input_put_device(input);
1981 }
1982 
1983 /**
1984  * devm_input_allocate_device - allocate managed input device
1985  * @dev: device owning the input device being created
1986  *
1987  * Returns prepared struct input_dev or %NULL.
1988  *
1989  * Managed input devices do not need to be explicitly unregistered or
1990  * freed as it will be done automatically when owner device unbinds from
1991  * its driver (or binding fails). Once managed input device is allocated,
1992  * it is ready to be set up and registered in the same fashion as regular
1993  * input device. There are no special devm_input_device_[un]register()
1994  * variants, regular ones work with both managed and unmanaged devices,
1995  * should you need them. In most cases however, managed input device need
1996  * not be explicitly unregistered or freed.
1997  *
1998  * NOTE: the owner device is set up as parent of input device and users
1999  * should not override it.
2000  */
2001 struct input_dev *devm_input_allocate_device(struct device *dev)
2002 {
2003 	struct input_dev *input;
2004 	struct input_devres *devres;
2005 
2006 	devres = devres_alloc(devm_input_device_release,
2007 			      sizeof(*devres), GFP_KERNEL);
2008 	if (!devres)
2009 		return NULL;
2010 
2011 	input = input_allocate_device();
2012 	if (!input) {
2013 		devres_free(devres);
2014 		return NULL;
2015 	}
2016 
2017 	input->dev.parent = dev;
2018 	input->devres_managed = true;
2019 
2020 	devres->input = input;
2021 	devres_add(dev, devres);
2022 
2023 	return input;
2024 }
2025 EXPORT_SYMBOL(devm_input_allocate_device);
2026 
2027 /**
2028  * input_free_device - free memory occupied by input_dev structure
2029  * @dev: input device to free
2030  *
2031  * This function should only be used if input_register_device()
2032  * was not called yet or if it failed. Once device was registered
2033  * use input_unregister_device() and memory will be freed once last
2034  * reference to the device is dropped.
2035  *
2036  * Device should be allocated by input_allocate_device().
2037  *
2038  * NOTE: If there are references to the input device then memory
2039  * will not be freed until last reference is dropped.
2040  */
2041 void input_free_device(struct input_dev *dev)
2042 {
2043 	if (dev) {
2044 		if (dev->devres_managed)
2045 			WARN_ON(devres_destroy(dev->dev.parent,
2046 						devm_input_device_release,
2047 						devm_input_device_match,
2048 						dev));
2049 		input_put_device(dev);
2050 	}
2051 }
2052 EXPORT_SYMBOL(input_free_device);
2053 
2054 /**
2055  * input_set_timestamp - set timestamp for input events
2056  * @dev: input device to set timestamp for
2057  * @timestamp: the time at which the event has occurred
2058  *   in CLOCK_MONOTONIC
2059  *
2060  * This function is intended to provide to the input system a more
2061  * accurate time of when an event actually occurred. The driver should
2062  * call this function as soon as a timestamp is acquired ensuring
2063  * clock conversions in input_set_timestamp are done correctly.
2064  *
2065  * The system entering suspend state between timestamp acquisition and
2066  * calling input_set_timestamp can result in inaccurate conversions.
2067  */
2068 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
2069 {
2070 	dev->timestamp[INPUT_CLK_MONO] = timestamp;
2071 	dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
2072 	dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
2073 							   TK_OFFS_BOOT);
2074 }
2075 EXPORT_SYMBOL(input_set_timestamp);
2076 
2077 /**
2078  * input_get_timestamp - get timestamp for input events
2079  * @dev: input device to get timestamp from
2080  *
2081  * A valid timestamp is a timestamp of non-zero value.
2082  */
2083 ktime_t *input_get_timestamp(struct input_dev *dev)
2084 {
2085 	const ktime_t invalid_timestamp = ktime_set(0, 0);
2086 
2087 	if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
2088 		input_set_timestamp(dev, ktime_get());
2089 
2090 	return dev->timestamp;
2091 }
2092 EXPORT_SYMBOL(input_get_timestamp);
2093 
2094 /**
2095  * input_set_capability - mark device as capable of a certain event
2096  * @dev: device that is capable of emitting or accepting event
2097  * @type: type of the event (EV_KEY, EV_REL, etc...)
2098  * @code: event code
2099  *
2100  * In addition to setting up corresponding bit in appropriate capability
2101  * bitmap the function also adjusts dev->evbit.
2102  */
2103 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
2104 {
2105 	if (type < EV_CNT && input_max_code[type] &&
2106 	    code > input_max_code[type]) {
2107 		pr_err("%s: invalid code %u for type %u\n", __func__, code,
2108 		       type);
2109 		dump_stack();
2110 		return;
2111 	}
2112 
2113 	switch (type) {
2114 	case EV_KEY:
2115 		__set_bit(code, dev->keybit);
2116 		break;
2117 
2118 	case EV_REL:
2119 		__set_bit(code, dev->relbit);
2120 		break;
2121 
2122 	case EV_ABS:
2123 		input_alloc_absinfo(dev);
2124 		__set_bit(code, dev->absbit);
2125 		break;
2126 
2127 	case EV_MSC:
2128 		__set_bit(code, dev->mscbit);
2129 		break;
2130 
2131 	case EV_SW:
2132 		__set_bit(code, dev->swbit);
2133 		break;
2134 
2135 	case EV_LED:
2136 		__set_bit(code, dev->ledbit);
2137 		break;
2138 
2139 	case EV_SND:
2140 		__set_bit(code, dev->sndbit);
2141 		break;
2142 
2143 	case EV_FF:
2144 		__set_bit(code, dev->ffbit);
2145 		break;
2146 
2147 	case EV_PWR:
2148 		/* do nothing */
2149 		break;
2150 
2151 	default:
2152 		pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
2153 		dump_stack();
2154 		return;
2155 	}
2156 
2157 	__set_bit(type, dev->evbit);
2158 }
2159 EXPORT_SYMBOL(input_set_capability);
2160 
2161 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
2162 {
2163 	int mt_slots;
2164 	int i;
2165 	unsigned int events;
2166 
2167 	if (dev->mt) {
2168 		mt_slots = dev->mt->num_slots;
2169 	} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
2170 		mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
2171 			   dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
2172 		mt_slots = clamp(mt_slots, 2, 32);
2173 	} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
2174 		mt_slots = 2;
2175 	} else {
2176 		mt_slots = 0;
2177 	}
2178 
2179 	events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
2180 
2181 	if (test_bit(EV_ABS, dev->evbit))
2182 		for_each_set_bit(i, dev->absbit, ABS_CNT)
2183 			events += input_is_mt_axis(i) ? mt_slots : 1;
2184 
2185 	if (test_bit(EV_REL, dev->evbit))
2186 		events += bitmap_weight(dev->relbit, REL_CNT);
2187 
2188 	/* Make room for KEY and MSC events */
2189 	events += 7;
2190 
2191 	return events;
2192 }
2193 
2194 #define INPUT_CLEANSE_BITMASK(dev, type, bits)				\
2195 	do {								\
2196 		if (!test_bit(EV_##type, dev->evbit))			\
2197 			memset(dev->bits##bit, 0,			\
2198 				sizeof(dev->bits##bit));		\
2199 	} while (0)
2200 
2201 static void input_cleanse_bitmasks(struct input_dev *dev)
2202 {
2203 	INPUT_CLEANSE_BITMASK(dev, KEY, key);
2204 	INPUT_CLEANSE_BITMASK(dev, REL, rel);
2205 	INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2206 	INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2207 	INPUT_CLEANSE_BITMASK(dev, LED, led);
2208 	INPUT_CLEANSE_BITMASK(dev, SND, snd);
2209 	INPUT_CLEANSE_BITMASK(dev, FF, ff);
2210 	INPUT_CLEANSE_BITMASK(dev, SW, sw);
2211 }
2212 
2213 static void __input_unregister_device(struct input_dev *dev)
2214 {
2215 	struct input_handle *handle, *next;
2216 
2217 	input_disconnect_device(dev);
2218 
2219 	mutex_lock(&input_mutex);
2220 
2221 	list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2222 		handle->handler->disconnect(handle);
2223 	WARN_ON(!list_empty(&dev->h_list));
2224 
2225 	del_timer_sync(&dev->timer);
2226 	list_del_init(&dev->node);
2227 
2228 	input_wakeup_procfs_readers();
2229 
2230 	mutex_unlock(&input_mutex);
2231 
2232 	device_del(&dev->dev);
2233 }
2234 
2235 static void devm_input_device_unregister(struct device *dev, void *res)
2236 {
2237 	struct input_devres *devres = res;
2238 	struct input_dev *input = devres->input;
2239 
2240 	dev_dbg(dev, "%s: unregistering device %s\n",
2241 		__func__, dev_name(&input->dev));
2242 	__input_unregister_device(input);
2243 }
2244 
2245 /*
2246  * Generate software autorepeat event. Note that we take
2247  * dev->event_lock here to avoid racing with input_event
2248  * which may cause keys get "stuck".
2249  */
2250 static void input_repeat_key(struct timer_list *t)
2251 {
2252 	struct input_dev *dev = from_timer(dev, t, timer);
2253 	unsigned long flags;
2254 
2255 	spin_lock_irqsave(&dev->event_lock, flags);
2256 
2257 	if (!dev->inhibited &&
2258 	    test_bit(dev->repeat_key, dev->key) &&
2259 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2260 
2261 		input_set_timestamp(dev, ktime_get());
2262 		input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
2263 		input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
2264 
2265 		if (dev->rep[REP_PERIOD])
2266 			mod_timer(&dev->timer, jiffies +
2267 					msecs_to_jiffies(dev->rep[REP_PERIOD]));
2268 	}
2269 
2270 	spin_unlock_irqrestore(&dev->event_lock, flags);
2271 }
2272 
2273 /**
2274  * input_enable_softrepeat - enable software autorepeat
2275  * @dev: input device
2276  * @delay: repeat delay
2277  * @period: repeat period
2278  *
2279  * Enable software autorepeat on the input device.
2280  */
2281 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2282 {
2283 	dev->timer.function = input_repeat_key;
2284 	dev->rep[REP_DELAY] = delay;
2285 	dev->rep[REP_PERIOD] = period;
2286 }
2287 EXPORT_SYMBOL(input_enable_softrepeat);
2288 
2289 bool input_device_enabled(struct input_dev *dev)
2290 {
2291 	lockdep_assert_held(&dev->mutex);
2292 
2293 	return !dev->inhibited && dev->users > 0;
2294 }
2295 EXPORT_SYMBOL_GPL(input_device_enabled);
2296 
2297 /**
2298  * input_register_device - register device with input core
2299  * @dev: device to be registered
2300  *
2301  * This function registers device with input core. The device must be
2302  * allocated with input_allocate_device() and all it's capabilities
2303  * set up before registering.
2304  * If function fails the device must be freed with input_free_device().
2305  * Once device has been successfully registered it can be unregistered
2306  * with input_unregister_device(); input_free_device() should not be
2307  * called in this case.
2308  *
2309  * Note that this function is also used to register managed input devices
2310  * (ones allocated with devm_input_allocate_device()). Such managed input
2311  * devices need not be explicitly unregistered or freed, their tear down
2312  * is controlled by the devres infrastructure. It is also worth noting
2313  * that tear down of managed input devices is internally a 2-step process:
2314  * registered managed input device is first unregistered, but stays in
2315  * memory and can still handle input_event() calls (although events will
2316  * not be delivered anywhere). The freeing of managed input device will
2317  * happen later, when devres stack is unwound to the point where device
2318  * allocation was made.
2319  */
2320 int input_register_device(struct input_dev *dev)
2321 {
2322 	struct input_devres *devres = NULL;
2323 	struct input_handler *handler;
2324 	unsigned int packet_size;
2325 	const char *path;
2326 	int error;
2327 
2328 	if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2329 		dev_err(&dev->dev,
2330 			"Absolute device without dev->absinfo, refusing to register\n");
2331 		return -EINVAL;
2332 	}
2333 
2334 	if (dev->devres_managed) {
2335 		devres = devres_alloc(devm_input_device_unregister,
2336 				      sizeof(*devres), GFP_KERNEL);
2337 		if (!devres)
2338 			return -ENOMEM;
2339 
2340 		devres->input = dev;
2341 	}
2342 
2343 	/* Every input device generates EV_SYN/SYN_REPORT events. */
2344 	__set_bit(EV_SYN, dev->evbit);
2345 
2346 	/* KEY_RESERVED is not supposed to be transmitted to userspace. */
2347 	__clear_bit(KEY_RESERVED, dev->keybit);
2348 
2349 	/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2350 	input_cleanse_bitmasks(dev);
2351 
2352 	packet_size = input_estimate_events_per_packet(dev);
2353 	if (dev->hint_events_per_packet < packet_size)
2354 		dev->hint_events_per_packet = packet_size;
2355 
2356 	dev->max_vals = dev->hint_events_per_packet + 2;
2357 	dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2358 	if (!dev->vals) {
2359 		error = -ENOMEM;
2360 		goto err_devres_free;
2361 	}
2362 
2363 	/*
2364 	 * If delay and period are pre-set by the driver, then autorepeating
2365 	 * is handled by the driver itself and we don't do it in input.c.
2366 	 */
2367 	if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2368 		input_enable_softrepeat(dev, 250, 33);
2369 
2370 	if (!dev->getkeycode)
2371 		dev->getkeycode = input_default_getkeycode;
2372 
2373 	if (!dev->setkeycode)
2374 		dev->setkeycode = input_default_setkeycode;
2375 
2376 	if (dev->poller)
2377 		input_dev_poller_finalize(dev->poller);
2378 
2379 	error = device_add(&dev->dev);
2380 	if (error)
2381 		goto err_free_vals;
2382 
2383 	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2384 	pr_info("%s as %s\n",
2385 		dev->name ? dev->name : "Unspecified device",
2386 		path ? path : "N/A");
2387 	kfree(path);
2388 
2389 	error = mutex_lock_interruptible(&input_mutex);
2390 	if (error)
2391 		goto err_device_del;
2392 
2393 	list_add_tail(&dev->node, &input_dev_list);
2394 
2395 	list_for_each_entry(handler, &input_handler_list, node)
2396 		input_attach_handler(dev, handler);
2397 
2398 	input_wakeup_procfs_readers();
2399 
2400 	mutex_unlock(&input_mutex);
2401 
2402 	if (dev->devres_managed) {
2403 		dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2404 			__func__, dev_name(&dev->dev));
2405 		devres_add(dev->dev.parent, devres);
2406 	}
2407 	return 0;
2408 
2409 err_device_del:
2410 	device_del(&dev->dev);
2411 err_free_vals:
2412 	kfree(dev->vals);
2413 	dev->vals = NULL;
2414 err_devres_free:
2415 	devres_free(devres);
2416 	return error;
2417 }
2418 EXPORT_SYMBOL(input_register_device);
2419 
2420 /**
2421  * input_unregister_device - unregister previously registered device
2422  * @dev: device to be unregistered
2423  *
2424  * This function unregisters an input device. Once device is unregistered
2425  * the caller should not try to access it as it may get freed at any moment.
2426  */
2427 void input_unregister_device(struct input_dev *dev)
2428 {
2429 	if (dev->devres_managed) {
2430 		WARN_ON(devres_destroy(dev->dev.parent,
2431 					devm_input_device_unregister,
2432 					devm_input_device_match,
2433 					dev));
2434 		__input_unregister_device(dev);
2435 		/*
2436 		 * We do not do input_put_device() here because it will be done
2437 		 * when 2nd devres fires up.
2438 		 */
2439 	} else {
2440 		__input_unregister_device(dev);
2441 		input_put_device(dev);
2442 	}
2443 }
2444 EXPORT_SYMBOL(input_unregister_device);
2445 
2446 /**
2447  * input_register_handler - register a new input handler
2448  * @handler: handler to be registered
2449  *
2450  * This function registers a new input handler (interface) for input
2451  * devices in the system and attaches it to all input devices that
2452  * are compatible with the handler.
2453  */
2454 int input_register_handler(struct input_handler *handler)
2455 {
2456 	struct input_dev *dev;
2457 	int error;
2458 
2459 	error = mutex_lock_interruptible(&input_mutex);
2460 	if (error)
2461 		return error;
2462 
2463 	INIT_LIST_HEAD(&handler->h_list);
2464 
2465 	list_add_tail(&handler->node, &input_handler_list);
2466 
2467 	list_for_each_entry(dev, &input_dev_list, node)
2468 		input_attach_handler(dev, handler);
2469 
2470 	input_wakeup_procfs_readers();
2471 
2472 	mutex_unlock(&input_mutex);
2473 	return 0;
2474 }
2475 EXPORT_SYMBOL(input_register_handler);
2476 
2477 /**
2478  * input_unregister_handler - unregisters an input handler
2479  * @handler: handler to be unregistered
2480  *
2481  * This function disconnects a handler from its input devices and
2482  * removes it from lists of known handlers.
2483  */
2484 void input_unregister_handler(struct input_handler *handler)
2485 {
2486 	struct input_handle *handle, *next;
2487 
2488 	mutex_lock(&input_mutex);
2489 
2490 	list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2491 		handler->disconnect(handle);
2492 	WARN_ON(!list_empty(&handler->h_list));
2493 
2494 	list_del_init(&handler->node);
2495 
2496 	input_wakeup_procfs_readers();
2497 
2498 	mutex_unlock(&input_mutex);
2499 }
2500 EXPORT_SYMBOL(input_unregister_handler);
2501 
2502 /**
2503  * input_handler_for_each_handle - handle iterator
2504  * @handler: input handler to iterate
2505  * @data: data for the callback
2506  * @fn: function to be called for each handle
2507  *
2508  * Iterate over @bus's list of devices, and call @fn for each, passing
2509  * it @data and stop when @fn returns a non-zero value. The function is
2510  * using RCU to traverse the list and therefore may be using in atomic
2511  * contexts. The @fn callback is invoked from RCU critical section and
2512  * thus must not sleep.
2513  */
2514 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2515 				  int (*fn)(struct input_handle *, void *))
2516 {
2517 	struct input_handle *handle;
2518 	int retval = 0;
2519 
2520 	rcu_read_lock();
2521 
2522 	list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2523 		retval = fn(handle, data);
2524 		if (retval)
2525 			break;
2526 	}
2527 
2528 	rcu_read_unlock();
2529 
2530 	return retval;
2531 }
2532 EXPORT_SYMBOL(input_handler_for_each_handle);
2533 
2534 /**
2535  * input_register_handle - register a new input handle
2536  * @handle: handle to register
2537  *
2538  * This function puts a new input handle onto device's
2539  * and handler's lists so that events can flow through
2540  * it once it is opened using input_open_device().
2541  *
2542  * This function is supposed to be called from handler's
2543  * connect() method.
2544  */
2545 int input_register_handle(struct input_handle *handle)
2546 {
2547 	struct input_handler *handler = handle->handler;
2548 	struct input_dev *dev = handle->dev;
2549 	int error;
2550 
2551 	/*
2552 	 * We take dev->mutex here to prevent race with
2553 	 * input_release_device().
2554 	 */
2555 	error = mutex_lock_interruptible(&dev->mutex);
2556 	if (error)
2557 		return error;
2558 
2559 	/*
2560 	 * Filters go to the head of the list, normal handlers
2561 	 * to the tail.
2562 	 */
2563 	if (handler->filter)
2564 		list_add_rcu(&handle->d_node, &dev->h_list);
2565 	else
2566 		list_add_tail_rcu(&handle->d_node, &dev->h_list);
2567 
2568 	mutex_unlock(&dev->mutex);
2569 
2570 	/*
2571 	 * Since we are supposed to be called from ->connect()
2572 	 * which is mutually exclusive with ->disconnect()
2573 	 * we can't be racing with input_unregister_handle()
2574 	 * and so separate lock is not needed here.
2575 	 */
2576 	list_add_tail_rcu(&handle->h_node, &handler->h_list);
2577 
2578 	if (handler->start)
2579 		handler->start(handle);
2580 
2581 	return 0;
2582 }
2583 EXPORT_SYMBOL(input_register_handle);
2584 
2585 /**
2586  * input_unregister_handle - unregister an input handle
2587  * @handle: handle to unregister
2588  *
2589  * This function removes input handle from device's
2590  * and handler's lists.
2591  *
2592  * This function is supposed to be called from handler's
2593  * disconnect() method.
2594  */
2595 void input_unregister_handle(struct input_handle *handle)
2596 {
2597 	struct input_dev *dev = handle->dev;
2598 
2599 	list_del_rcu(&handle->h_node);
2600 
2601 	/*
2602 	 * Take dev->mutex to prevent race with input_release_device().
2603 	 */
2604 	mutex_lock(&dev->mutex);
2605 	list_del_rcu(&handle->d_node);
2606 	mutex_unlock(&dev->mutex);
2607 
2608 	synchronize_rcu();
2609 }
2610 EXPORT_SYMBOL(input_unregister_handle);
2611 
2612 /**
2613  * input_get_new_minor - allocates a new input minor number
2614  * @legacy_base: beginning or the legacy range to be searched
2615  * @legacy_num: size of legacy range
2616  * @allow_dynamic: whether we can also take ID from the dynamic range
2617  *
2618  * This function allocates a new device minor for from input major namespace.
2619  * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2620  * parameters and whether ID can be allocated from dynamic range if there are
2621  * no free IDs in legacy range.
2622  */
2623 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2624 			bool allow_dynamic)
2625 {
2626 	/*
2627 	 * This function should be called from input handler's ->connect()
2628 	 * methods, which are serialized with input_mutex, so no additional
2629 	 * locking is needed here.
2630 	 */
2631 	if (legacy_base >= 0) {
2632 		int minor = ida_simple_get(&input_ida,
2633 					   legacy_base,
2634 					   legacy_base + legacy_num,
2635 					   GFP_KERNEL);
2636 		if (minor >= 0 || !allow_dynamic)
2637 			return minor;
2638 	}
2639 
2640 	return ida_simple_get(&input_ida,
2641 			      INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2642 			      GFP_KERNEL);
2643 }
2644 EXPORT_SYMBOL(input_get_new_minor);
2645 
2646 /**
2647  * input_free_minor - release previously allocated minor
2648  * @minor: minor to be released
2649  *
2650  * This function releases previously allocated input minor so that it can be
2651  * reused later.
2652  */
2653 void input_free_minor(unsigned int minor)
2654 {
2655 	ida_simple_remove(&input_ida, minor);
2656 }
2657 EXPORT_SYMBOL(input_free_minor);
2658 
2659 static int __init input_init(void)
2660 {
2661 	int err;
2662 
2663 	err = class_register(&input_class);
2664 	if (err) {
2665 		pr_err("unable to register input_dev class\n");
2666 		return err;
2667 	}
2668 
2669 	err = input_proc_init();
2670 	if (err)
2671 		goto fail1;
2672 
2673 	err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2674 				     INPUT_MAX_CHAR_DEVICES, "input");
2675 	if (err) {
2676 		pr_err("unable to register char major %d", INPUT_MAJOR);
2677 		goto fail2;
2678 	}
2679 
2680 	return 0;
2681 
2682  fail2:	input_proc_exit();
2683  fail1:	class_unregister(&input_class);
2684 	return err;
2685 }
2686 
2687 static void __exit input_exit(void)
2688 {
2689 	input_proc_exit();
2690 	unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2691 				 INPUT_MAX_CHAR_DEVICES);
2692 	class_unregister(&input_class);
2693 }
2694 
2695 subsys_initcall(input_init);
2696 module_exit(input_exit);
2697