xref: /openbmc/linux/drivers/input/input.c (revision b593bce5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The input core
4  *
5  * Copyright (c) 1999-2002 Vojtech Pavlik
6  */
7 
8 
9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10 
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/idr.h>
14 #include <linux/input/mt.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/major.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/poll.h>
23 #include <linux/device.h>
24 #include <linux/mutex.h>
25 #include <linux/rcupdate.h>
26 #include "input-compat.h"
27 
28 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
29 MODULE_DESCRIPTION("Input core");
30 MODULE_LICENSE("GPL");
31 
32 #define INPUT_MAX_CHAR_DEVICES		1024
33 #define INPUT_FIRST_DYNAMIC_DEV		256
34 static DEFINE_IDA(input_ida);
35 
36 static LIST_HEAD(input_dev_list);
37 static LIST_HEAD(input_handler_list);
38 
39 /*
40  * input_mutex protects access to both input_dev_list and input_handler_list.
41  * This also causes input_[un]register_device and input_[un]register_handler
42  * be mutually exclusive which simplifies locking in drivers implementing
43  * input handlers.
44  */
45 static DEFINE_MUTEX(input_mutex);
46 
47 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
48 
49 static inline int is_event_supported(unsigned int code,
50 				     unsigned long *bm, unsigned int max)
51 {
52 	return code <= max && test_bit(code, bm);
53 }
54 
55 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
56 {
57 	if (fuzz) {
58 		if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
59 			return old_val;
60 
61 		if (value > old_val - fuzz && value < old_val + fuzz)
62 			return (old_val * 3 + value) / 4;
63 
64 		if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
65 			return (old_val + value) / 2;
66 	}
67 
68 	return value;
69 }
70 
71 static void input_start_autorepeat(struct input_dev *dev, int code)
72 {
73 	if (test_bit(EV_REP, dev->evbit) &&
74 	    dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
75 	    dev->timer.function) {
76 		dev->repeat_key = code;
77 		mod_timer(&dev->timer,
78 			  jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
79 	}
80 }
81 
82 static void input_stop_autorepeat(struct input_dev *dev)
83 {
84 	del_timer(&dev->timer);
85 }
86 
87 /*
88  * Pass event first through all filters and then, if event has not been
89  * filtered out, through all open handles. This function is called with
90  * dev->event_lock held and interrupts disabled.
91  */
92 static unsigned int input_to_handler(struct input_handle *handle,
93 			struct input_value *vals, unsigned int count)
94 {
95 	struct input_handler *handler = handle->handler;
96 	struct input_value *end = vals;
97 	struct input_value *v;
98 
99 	if (handler->filter) {
100 		for (v = vals; v != vals + count; v++) {
101 			if (handler->filter(handle, v->type, v->code, v->value))
102 				continue;
103 			if (end != v)
104 				*end = *v;
105 			end++;
106 		}
107 		count = end - vals;
108 	}
109 
110 	if (!count)
111 		return 0;
112 
113 	if (handler->events)
114 		handler->events(handle, vals, count);
115 	else if (handler->event)
116 		for (v = vals; v != vals + count; v++)
117 			handler->event(handle, v->type, v->code, v->value);
118 
119 	return count;
120 }
121 
122 /*
123  * Pass values first through all filters and then, if event has not been
124  * filtered out, through all open handles. This function is called with
125  * dev->event_lock held and interrupts disabled.
126  */
127 static void input_pass_values(struct input_dev *dev,
128 			      struct input_value *vals, unsigned int count)
129 {
130 	struct input_handle *handle;
131 	struct input_value *v;
132 
133 	if (!count)
134 		return;
135 
136 	rcu_read_lock();
137 
138 	handle = rcu_dereference(dev->grab);
139 	if (handle) {
140 		count = input_to_handler(handle, vals, count);
141 	} else {
142 		list_for_each_entry_rcu(handle, &dev->h_list, d_node)
143 			if (handle->open) {
144 				count = input_to_handler(handle, vals, count);
145 				if (!count)
146 					break;
147 			}
148 	}
149 
150 	rcu_read_unlock();
151 
152 	/* trigger auto repeat for key events */
153 	if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
154 		for (v = vals; v != vals + count; v++) {
155 			if (v->type == EV_KEY && v->value != 2) {
156 				if (v->value)
157 					input_start_autorepeat(dev, v->code);
158 				else
159 					input_stop_autorepeat(dev);
160 			}
161 		}
162 	}
163 }
164 
165 static void input_pass_event(struct input_dev *dev,
166 			     unsigned int type, unsigned int code, int value)
167 {
168 	struct input_value vals[] = { { type, code, value } };
169 
170 	input_pass_values(dev, vals, ARRAY_SIZE(vals));
171 }
172 
173 /*
174  * Generate software autorepeat event. Note that we take
175  * dev->event_lock here to avoid racing with input_event
176  * which may cause keys get "stuck".
177  */
178 static void input_repeat_key(struct timer_list *t)
179 {
180 	struct input_dev *dev = from_timer(dev, t, timer);
181 	unsigned long flags;
182 
183 	spin_lock_irqsave(&dev->event_lock, flags);
184 
185 	if (test_bit(dev->repeat_key, dev->key) &&
186 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
187 		struct input_value vals[] =  {
188 			{ EV_KEY, dev->repeat_key, 2 },
189 			input_value_sync
190 		};
191 
192 		input_pass_values(dev, vals, ARRAY_SIZE(vals));
193 
194 		if (dev->rep[REP_PERIOD])
195 			mod_timer(&dev->timer, jiffies +
196 					msecs_to_jiffies(dev->rep[REP_PERIOD]));
197 	}
198 
199 	spin_unlock_irqrestore(&dev->event_lock, flags);
200 }
201 
202 #define INPUT_IGNORE_EVENT	0
203 #define INPUT_PASS_TO_HANDLERS	1
204 #define INPUT_PASS_TO_DEVICE	2
205 #define INPUT_SLOT		4
206 #define INPUT_FLUSH		8
207 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
208 
209 static int input_handle_abs_event(struct input_dev *dev,
210 				  unsigned int code, int *pval)
211 {
212 	struct input_mt *mt = dev->mt;
213 	bool is_mt_event;
214 	int *pold;
215 
216 	if (code == ABS_MT_SLOT) {
217 		/*
218 		 * "Stage" the event; we'll flush it later, when we
219 		 * get actual touch data.
220 		 */
221 		if (mt && *pval >= 0 && *pval < mt->num_slots)
222 			mt->slot = *pval;
223 
224 		return INPUT_IGNORE_EVENT;
225 	}
226 
227 	is_mt_event = input_is_mt_value(code);
228 
229 	if (!is_mt_event) {
230 		pold = &dev->absinfo[code].value;
231 	} else if (mt) {
232 		pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
233 	} else {
234 		/*
235 		 * Bypass filtering for multi-touch events when
236 		 * not employing slots.
237 		 */
238 		pold = NULL;
239 	}
240 
241 	if (pold) {
242 		*pval = input_defuzz_abs_event(*pval, *pold,
243 						dev->absinfo[code].fuzz);
244 		if (*pold == *pval)
245 			return INPUT_IGNORE_EVENT;
246 
247 		*pold = *pval;
248 	}
249 
250 	/* Flush pending "slot" event */
251 	if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
252 		input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
253 		return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
254 	}
255 
256 	return INPUT_PASS_TO_HANDLERS;
257 }
258 
259 static int input_get_disposition(struct input_dev *dev,
260 			  unsigned int type, unsigned int code, int *pval)
261 {
262 	int disposition = INPUT_IGNORE_EVENT;
263 	int value = *pval;
264 
265 	switch (type) {
266 
267 	case EV_SYN:
268 		switch (code) {
269 		case SYN_CONFIG:
270 			disposition = INPUT_PASS_TO_ALL;
271 			break;
272 
273 		case SYN_REPORT:
274 			disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
275 			break;
276 		case SYN_MT_REPORT:
277 			disposition = INPUT_PASS_TO_HANDLERS;
278 			break;
279 		}
280 		break;
281 
282 	case EV_KEY:
283 		if (is_event_supported(code, dev->keybit, KEY_MAX)) {
284 
285 			/* auto-repeat bypasses state updates */
286 			if (value == 2) {
287 				disposition = INPUT_PASS_TO_HANDLERS;
288 				break;
289 			}
290 
291 			if (!!test_bit(code, dev->key) != !!value) {
292 
293 				__change_bit(code, dev->key);
294 				disposition = INPUT_PASS_TO_HANDLERS;
295 			}
296 		}
297 		break;
298 
299 	case EV_SW:
300 		if (is_event_supported(code, dev->swbit, SW_MAX) &&
301 		    !!test_bit(code, dev->sw) != !!value) {
302 
303 			__change_bit(code, dev->sw);
304 			disposition = INPUT_PASS_TO_HANDLERS;
305 		}
306 		break;
307 
308 	case EV_ABS:
309 		if (is_event_supported(code, dev->absbit, ABS_MAX))
310 			disposition = input_handle_abs_event(dev, code, &value);
311 
312 		break;
313 
314 	case EV_REL:
315 		if (is_event_supported(code, dev->relbit, REL_MAX) && value)
316 			disposition = INPUT_PASS_TO_HANDLERS;
317 
318 		break;
319 
320 	case EV_MSC:
321 		if (is_event_supported(code, dev->mscbit, MSC_MAX))
322 			disposition = INPUT_PASS_TO_ALL;
323 
324 		break;
325 
326 	case EV_LED:
327 		if (is_event_supported(code, dev->ledbit, LED_MAX) &&
328 		    !!test_bit(code, dev->led) != !!value) {
329 
330 			__change_bit(code, dev->led);
331 			disposition = INPUT_PASS_TO_ALL;
332 		}
333 		break;
334 
335 	case EV_SND:
336 		if (is_event_supported(code, dev->sndbit, SND_MAX)) {
337 
338 			if (!!test_bit(code, dev->snd) != !!value)
339 				__change_bit(code, dev->snd);
340 			disposition = INPUT_PASS_TO_ALL;
341 		}
342 		break;
343 
344 	case EV_REP:
345 		if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
346 			dev->rep[code] = value;
347 			disposition = INPUT_PASS_TO_ALL;
348 		}
349 		break;
350 
351 	case EV_FF:
352 		if (value >= 0)
353 			disposition = INPUT_PASS_TO_ALL;
354 		break;
355 
356 	case EV_PWR:
357 		disposition = INPUT_PASS_TO_ALL;
358 		break;
359 	}
360 
361 	*pval = value;
362 	return disposition;
363 }
364 
365 static void input_handle_event(struct input_dev *dev,
366 			       unsigned int type, unsigned int code, int value)
367 {
368 	int disposition = input_get_disposition(dev, type, code, &value);
369 
370 	if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
371 		add_input_randomness(type, code, value);
372 
373 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
374 		dev->event(dev, type, code, value);
375 
376 	if (!dev->vals)
377 		return;
378 
379 	if (disposition & INPUT_PASS_TO_HANDLERS) {
380 		struct input_value *v;
381 
382 		if (disposition & INPUT_SLOT) {
383 			v = &dev->vals[dev->num_vals++];
384 			v->type = EV_ABS;
385 			v->code = ABS_MT_SLOT;
386 			v->value = dev->mt->slot;
387 		}
388 
389 		v = &dev->vals[dev->num_vals++];
390 		v->type = type;
391 		v->code = code;
392 		v->value = value;
393 	}
394 
395 	if (disposition & INPUT_FLUSH) {
396 		if (dev->num_vals >= 2)
397 			input_pass_values(dev, dev->vals, dev->num_vals);
398 		dev->num_vals = 0;
399 	} else if (dev->num_vals >= dev->max_vals - 2) {
400 		dev->vals[dev->num_vals++] = input_value_sync;
401 		input_pass_values(dev, dev->vals, dev->num_vals);
402 		dev->num_vals = 0;
403 	}
404 
405 }
406 
407 /**
408  * input_event() - report new input event
409  * @dev: device that generated the event
410  * @type: type of the event
411  * @code: event code
412  * @value: value of the event
413  *
414  * This function should be used by drivers implementing various input
415  * devices to report input events. See also input_inject_event().
416  *
417  * NOTE: input_event() may be safely used right after input device was
418  * allocated with input_allocate_device(), even before it is registered
419  * with input_register_device(), but the event will not reach any of the
420  * input handlers. Such early invocation of input_event() may be used
421  * to 'seed' initial state of a switch or initial position of absolute
422  * axis, etc.
423  */
424 void input_event(struct input_dev *dev,
425 		 unsigned int type, unsigned int code, int value)
426 {
427 	unsigned long flags;
428 
429 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
430 
431 		spin_lock_irqsave(&dev->event_lock, flags);
432 		input_handle_event(dev, type, code, value);
433 		spin_unlock_irqrestore(&dev->event_lock, flags);
434 	}
435 }
436 EXPORT_SYMBOL(input_event);
437 
438 /**
439  * input_inject_event() - send input event from input handler
440  * @handle: input handle to send event through
441  * @type: type of the event
442  * @code: event code
443  * @value: value of the event
444  *
445  * Similar to input_event() but will ignore event if device is
446  * "grabbed" and handle injecting event is not the one that owns
447  * the device.
448  */
449 void input_inject_event(struct input_handle *handle,
450 			unsigned int type, unsigned int code, int value)
451 {
452 	struct input_dev *dev = handle->dev;
453 	struct input_handle *grab;
454 	unsigned long flags;
455 
456 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
457 		spin_lock_irqsave(&dev->event_lock, flags);
458 
459 		rcu_read_lock();
460 		grab = rcu_dereference(dev->grab);
461 		if (!grab || grab == handle)
462 			input_handle_event(dev, type, code, value);
463 		rcu_read_unlock();
464 
465 		spin_unlock_irqrestore(&dev->event_lock, flags);
466 	}
467 }
468 EXPORT_SYMBOL(input_inject_event);
469 
470 /**
471  * input_alloc_absinfo - allocates array of input_absinfo structs
472  * @dev: the input device emitting absolute events
473  *
474  * If the absinfo struct the caller asked for is already allocated, this
475  * functions will not do anything.
476  */
477 void input_alloc_absinfo(struct input_dev *dev)
478 {
479 	if (dev->absinfo)
480 		return;
481 
482 	dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
483 	if (!dev->absinfo) {
484 		dev_err(dev->dev.parent ?: &dev->dev,
485 			"%s: unable to allocate memory\n", __func__);
486 		/*
487 		 * We will handle this allocation failure in
488 		 * input_register_device() when we refuse to register input
489 		 * device with ABS bits but without absinfo.
490 		 */
491 	}
492 }
493 EXPORT_SYMBOL(input_alloc_absinfo);
494 
495 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
496 			  int min, int max, int fuzz, int flat)
497 {
498 	struct input_absinfo *absinfo;
499 
500 	input_alloc_absinfo(dev);
501 	if (!dev->absinfo)
502 		return;
503 
504 	absinfo = &dev->absinfo[axis];
505 	absinfo->minimum = min;
506 	absinfo->maximum = max;
507 	absinfo->fuzz = fuzz;
508 	absinfo->flat = flat;
509 
510 	__set_bit(EV_ABS, dev->evbit);
511 	__set_bit(axis, dev->absbit);
512 }
513 EXPORT_SYMBOL(input_set_abs_params);
514 
515 
516 /**
517  * input_grab_device - grabs device for exclusive use
518  * @handle: input handle that wants to own the device
519  *
520  * When a device is grabbed by an input handle all events generated by
521  * the device are delivered only to this handle. Also events injected
522  * by other input handles are ignored while device is grabbed.
523  */
524 int input_grab_device(struct input_handle *handle)
525 {
526 	struct input_dev *dev = handle->dev;
527 	int retval;
528 
529 	retval = mutex_lock_interruptible(&dev->mutex);
530 	if (retval)
531 		return retval;
532 
533 	if (dev->grab) {
534 		retval = -EBUSY;
535 		goto out;
536 	}
537 
538 	rcu_assign_pointer(dev->grab, handle);
539 
540  out:
541 	mutex_unlock(&dev->mutex);
542 	return retval;
543 }
544 EXPORT_SYMBOL(input_grab_device);
545 
546 static void __input_release_device(struct input_handle *handle)
547 {
548 	struct input_dev *dev = handle->dev;
549 	struct input_handle *grabber;
550 
551 	grabber = rcu_dereference_protected(dev->grab,
552 					    lockdep_is_held(&dev->mutex));
553 	if (grabber == handle) {
554 		rcu_assign_pointer(dev->grab, NULL);
555 		/* Make sure input_pass_event() notices that grab is gone */
556 		synchronize_rcu();
557 
558 		list_for_each_entry(handle, &dev->h_list, d_node)
559 			if (handle->open && handle->handler->start)
560 				handle->handler->start(handle);
561 	}
562 }
563 
564 /**
565  * input_release_device - release previously grabbed device
566  * @handle: input handle that owns the device
567  *
568  * Releases previously grabbed device so that other input handles can
569  * start receiving input events. Upon release all handlers attached
570  * to the device have their start() method called so they have a change
571  * to synchronize device state with the rest of the system.
572  */
573 void input_release_device(struct input_handle *handle)
574 {
575 	struct input_dev *dev = handle->dev;
576 
577 	mutex_lock(&dev->mutex);
578 	__input_release_device(handle);
579 	mutex_unlock(&dev->mutex);
580 }
581 EXPORT_SYMBOL(input_release_device);
582 
583 /**
584  * input_open_device - open input device
585  * @handle: handle through which device is being accessed
586  *
587  * This function should be called by input handlers when they
588  * want to start receive events from given input device.
589  */
590 int input_open_device(struct input_handle *handle)
591 {
592 	struct input_dev *dev = handle->dev;
593 	int retval;
594 
595 	retval = mutex_lock_interruptible(&dev->mutex);
596 	if (retval)
597 		return retval;
598 
599 	if (dev->going_away) {
600 		retval = -ENODEV;
601 		goto out;
602 	}
603 
604 	handle->open++;
605 
606 	if (!dev->users++ && dev->open)
607 		retval = dev->open(dev);
608 
609 	if (retval) {
610 		dev->users--;
611 		if (!--handle->open) {
612 			/*
613 			 * Make sure we are not delivering any more events
614 			 * through this handle
615 			 */
616 			synchronize_rcu();
617 		}
618 	}
619 
620  out:
621 	mutex_unlock(&dev->mutex);
622 	return retval;
623 }
624 EXPORT_SYMBOL(input_open_device);
625 
626 int input_flush_device(struct input_handle *handle, struct file *file)
627 {
628 	struct input_dev *dev = handle->dev;
629 	int retval;
630 
631 	retval = mutex_lock_interruptible(&dev->mutex);
632 	if (retval)
633 		return retval;
634 
635 	if (dev->flush)
636 		retval = dev->flush(dev, file);
637 
638 	mutex_unlock(&dev->mutex);
639 	return retval;
640 }
641 EXPORT_SYMBOL(input_flush_device);
642 
643 /**
644  * input_close_device - close input device
645  * @handle: handle through which device is being accessed
646  *
647  * This function should be called by input handlers when they
648  * want to stop receive events from given input device.
649  */
650 void input_close_device(struct input_handle *handle)
651 {
652 	struct input_dev *dev = handle->dev;
653 
654 	mutex_lock(&dev->mutex);
655 
656 	__input_release_device(handle);
657 
658 	if (!--dev->users && dev->close)
659 		dev->close(dev);
660 
661 	if (!--handle->open) {
662 		/*
663 		 * synchronize_rcu() makes sure that input_pass_event()
664 		 * completed and that no more input events are delivered
665 		 * through this handle
666 		 */
667 		synchronize_rcu();
668 	}
669 
670 	mutex_unlock(&dev->mutex);
671 }
672 EXPORT_SYMBOL(input_close_device);
673 
674 /*
675  * Simulate keyup events for all keys that are marked as pressed.
676  * The function must be called with dev->event_lock held.
677  */
678 static void input_dev_release_keys(struct input_dev *dev)
679 {
680 	bool need_sync = false;
681 	int code;
682 
683 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
684 		for_each_set_bit(code, dev->key, KEY_CNT) {
685 			input_pass_event(dev, EV_KEY, code, 0);
686 			need_sync = true;
687 		}
688 
689 		if (need_sync)
690 			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
691 
692 		memset(dev->key, 0, sizeof(dev->key));
693 	}
694 }
695 
696 /*
697  * Prepare device for unregistering
698  */
699 static void input_disconnect_device(struct input_dev *dev)
700 {
701 	struct input_handle *handle;
702 
703 	/*
704 	 * Mark device as going away. Note that we take dev->mutex here
705 	 * not to protect access to dev->going_away but rather to ensure
706 	 * that there are no threads in the middle of input_open_device()
707 	 */
708 	mutex_lock(&dev->mutex);
709 	dev->going_away = true;
710 	mutex_unlock(&dev->mutex);
711 
712 	spin_lock_irq(&dev->event_lock);
713 
714 	/*
715 	 * Simulate keyup events for all pressed keys so that handlers
716 	 * are not left with "stuck" keys. The driver may continue
717 	 * generate events even after we done here but they will not
718 	 * reach any handlers.
719 	 */
720 	input_dev_release_keys(dev);
721 
722 	list_for_each_entry(handle, &dev->h_list, d_node)
723 		handle->open = 0;
724 
725 	spin_unlock_irq(&dev->event_lock);
726 }
727 
728 /**
729  * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
730  * @ke: keymap entry containing scancode to be converted.
731  * @scancode: pointer to the location where converted scancode should
732  *	be stored.
733  *
734  * This function is used to convert scancode stored in &struct keymap_entry
735  * into scalar form understood by legacy keymap handling methods. These
736  * methods expect scancodes to be represented as 'unsigned int'.
737  */
738 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
739 			     unsigned int *scancode)
740 {
741 	switch (ke->len) {
742 	case 1:
743 		*scancode = *((u8 *)ke->scancode);
744 		break;
745 
746 	case 2:
747 		*scancode = *((u16 *)ke->scancode);
748 		break;
749 
750 	case 4:
751 		*scancode = *((u32 *)ke->scancode);
752 		break;
753 
754 	default:
755 		return -EINVAL;
756 	}
757 
758 	return 0;
759 }
760 EXPORT_SYMBOL(input_scancode_to_scalar);
761 
762 /*
763  * Those routines handle the default case where no [gs]etkeycode() is
764  * defined. In this case, an array indexed by the scancode is used.
765  */
766 
767 static unsigned int input_fetch_keycode(struct input_dev *dev,
768 					unsigned int index)
769 {
770 	switch (dev->keycodesize) {
771 	case 1:
772 		return ((u8 *)dev->keycode)[index];
773 
774 	case 2:
775 		return ((u16 *)dev->keycode)[index];
776 
777 	default:
778 		return ((u32 *)dev->keycode)[index];
779 	}
780 }
781 
782 static int input_default_getkeycode(struct input_dev *dev,
783 				    struct input_keymap_entry *ke)
784 {
785 	unsigned int index;
786 	int error;
787 
788 	if (!dev->keycodesize)
789 		return -EINVAL;
790 
791 	if (ke->flags & INPUT_KEYMAP_BY_INDEX)
792 		index = ke->index;
793 	else {
794 		error = input_scancode_to_scalar(ke, &index);
795 		if (error)
796 			return error;
797 	}
798 
799 	if (index >= dev->keycodemax)
800 		return -EINVAL;
801 
802 	ke->keycode = input_fetch_keycode(dev, index);
803 	ke->index = index;
804 	ke->len = sizeof(index);
805 	memcpy(ke->scancode, &index, sizeof(index));
806 
807 	return 0;
808 }
809 
810 static int input_default_setkeycode(struct input_dev *dev,
811 				    const struct input_keymap_entry *ke,
812 				    unsigned int *old_keycode)
813 {
814 	unsigned int index;
815 	int error;
816 	int i;
817 
818 	if (!dev->keycodesize)
819 		return -EINVAL;
820 
821 	if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
822 		index = ke->index;
823 	} else {
824 		error = input_scancode_to_scalar(ke, &index);
825 		if (error)
826 			return error;
827 	}
828 
829 	if (index >= dev->keycodemax)
830 		return -EINVAL;
831 
832 	if (dev->keycodesize < sizeof(ke->keycode) &&
833 			(ke->keycode >> (dev->keycodesize * 8)))
834 		return -EINVAL;
835 
836 	switch (dev->keycodesize) {
837 		case 1: {
838 			u8 *k = (u8 *)dev->keycode;
839 			*old_keycode = k[index];
840 			k[index] = ke->keycode;
841 			break;
842 		}
843 		case 2: {
844 			u16 *k = (u16 *)dev->keycode;
845 			*old_keycode = k[index];
846 			k[index] = ke->keycode;
847 			break;
848 		}
849 		default: {
850 			u32 *k = (u32 *)dev->keycode;
851 			*old_keycode = k[index];
852 			k[index] = ke->keycode;
853 			break;
854 		}
855 	}
856 
857 	__clear_bit(*old_keycode, dev->keybit);
858 	__set_bit(ke->keycode, dev->keybit);
859 
860 	for (i = 0; i < dev->keycodemax; i++) {
861 		if (input_fetch_keycode(dev, i) == *old_keycode) {
862 			__set_bit(*old_keycode, dev->keybit);
863 			break; /* Setting the bit twice is useless, so break */
864 		}
865 	}
866 
867 	return 0;
868 }
869 
870 /**
871  * input_get_keycode - retrieve keycode currently mapped to a given scancode
872  * @dev: input device which keymap is being queried
873  * @ke: keymap entry
874  *
875  * This function should be called by anyone interested in retrieving current
876  * keymap. Presently evdev handlers use it.
877  */
878 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
879 {
880 	unsigned long flags;
881 	int retval;
882 
883 	spin_lock_irqsave(&dev->event_lock, flags);
884 	retval = dev->getkeycode(dev, ke);
885 	spin_unlock_irqrestore(&dev->event_lock, flags);
886 
887 	return retval;
888 }
889 EXPORT_SYMBOL(input_get_keycode);
890 
891 /**
892  * input_set_keycode - attribute a keycode to a given scancode
893  * @dev: input device which keymap is being updated
894  * @ke: new keymap entry
895  *
896  * This function should be called by anyone needing to update current
897  * keymap. Presently keyboard and evdev handlers use it.
898  */
899 int input_set_keycode(struct input_dev *dev,
900 		      const struct input_keymap_entry *ke)
901 {
902 	unsigned long flags;
903 	unsigned int old_keycode;
904 	int retval;
905 
906 	if (ke->keycode > KEY_MAX)
907 		return -EINVAL;
908 
909 	spin_lock_irqsave(&dev->event_lock, flags);
910 
911 	retval = dev->setkeycode(dev, ke, &old_keycode);
912 	if (retval)
913 		goto out;
914 
915 	/* Make sure KEY_RESERVED did not get enabled. */
916 	__clear_bit(KEY_RESERVED, dev->keybit);
917 
918 	/*
919 	 * Simulate keyup event if keycode is not present
920 	 * in the keymap anymore
921 	 */
922 	if (test_bit(EV_KEY, dev->evbit) &&
923 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
924 	    __test_and_clear_bit(old_keycode, dev->key)) {
925 		struct input_value vals[] =  {
926 			{ EV_KEY, old_keycode, 0 },
927 			input_value_sync
928 		};
929 
930 		input_pass_values(dev, vals, ARRAY_SIZE(vals));
931 	}
932 
933  out:
934 	spin_unlock_irqrestore(&dev->event_lock, flags);
935 
936 	return retval;
937 }
938 EXPORT_SYMBOL(input_set_keycode);
939 
940 bool input_match_device_id(const struct input_dev *dev,
941 			   const struct input_device_id *id)
942 {
943 	if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
944 		if (id->bustype != dev->id.bustype)
945 			return false;
946 
947 	if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
948 		if (id->vendor != dev->id.vendor)
949 			return false;
950 
951 	if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
952 		if (id->product != dev->id.product)
953 			return false;
954 
955 	if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
956 		if (id->version != dev->id.version)
957 			return false;
958 
959 	if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
960 	    !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
961 	    !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
962 	    !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
963 	    !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
964 	    !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
965 	    !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
966 	    !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
967 	    !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
968 	    !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
969 		return false;
970 	}
971 
972 	return true;
973 }
974 EXPORT_SYMBOL(input_match_device_id);
975 
976 static const struct input_device_id *input_match_device(struct input_handler *handler,
977 							struct input_dev *dev)
978 {
979 	const struct input_device_id *id;
980 
981 	for (id = handler->id_table; id->flags || id->driver_info; id++) {
982 		if (input_match_device_id(dev, id) &&
983 		    (!handler->match || handler->match(handler, dev))) {
984 			return id;
985 		}
986 	}
987 
988 	return NULL;
989 }
990 
991 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
992 {
993 	const struct input_device_id *id;
994 	int error;
995 
996 	id = input_match_device(handler, dev);
997 	if (!id)
998 		return -ENODEV;
999 
1000 	error = handler->connect(handler, dev, id);
1001 	if (error && error != -ENODEV)
1002 		pr_err("failed to attach handler %s to device %s, error: %d\n",
1003 		       handler->name, kobject_name(&dev->dev.kobj), error);
1004 
1005 	return error;
1006 }
1007 
1008 #ifdef CONFIG_COMPAT
1009 
1010 static int input_bits_to_string(char *buf, int buf_size,
1011 				unsigned long bits, bool skip_empty)
1012 {
1013 	int len = 0;
1014 
1015 	if (in_compat_syscall()) {
1016 		u32 dword = bits >> 32;
1017 		if (dword || !skip_empty)
1018 			len += snprintf(buf, buf_size, "%x ", dword);
1019 
1020 		dword = bits & 0xffffffffUL;
1021 		if (dword || !skip_empty || len)
1022 			len += snprintf(buf + len, max(buf_size - len, 0),
1023 					"%x", dword);
1024 	} else {
1025 		if (bits || !skip_empty)
1026 			len += snprintf(buf, buf_size, "%lx", bits);
1027 	}
1028 
1029 	return len;
1030 }
1031 
1032 #else /* !CONFIG_COMPAT */
1033 
1034 static int input_bits_to_string(char *buf, int buf_size,
1035 				unsigned long bits, bool skip_empty)
1036 {
1037 	return bits || !skip_empty ?
1038 		snprintf(buf, buf_size, "%lx", bits) : 0;
1039 }
1040 
1041 #endif
1042 
1043 #ifdef CONFIG_PROC_FS
1044 
1045 static struct proc_dir_entry *proc_bus_input_dir;
1046 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1047 static int input_devices_state;
1048 
1049 static inline void input_wakeup_procfs_readers(void)
1050 {
1051 	input_devices_state++;
1052 	wake_up(&input_devices_poll_wait);
1053 }
1054 
1055 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1056 {
1057 	poll_wait(file, &input_devices_poll_wait, wait);
1058 	if (file->f_version != input_devices_state) {
1059 		file->f_version = input_devices_state;
1060 		return EPOLLIN | EPOLLRDNORM;
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 union input_seq_state {
1067 	struct {
1068 		unsigned short pos;
1069 		bool mutex_acquired;
1070 	};
1071 	void *p;
1072 };
1073 
1074 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1075 {
1076 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1077 	int error;
1078 
1079 	/* We need to fit into seq->private pointer */
1080 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1081 
1082 	error = mutex_lock_interruptible(&input_mutex);
1083 	if (error) {
1084 		state->mutex_acquired = false;
1085 		return ERR_PTR(error);
1086 	}
1087 
1088 	state->mutex_acquired = true;
1089 
1090 	return seq_list_start(&input_dev_list, *pos);
1091 }
1092 
1093 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1094 {
1095 	return seq_list_next(v, &input_dev_list, pos);
1096 }
1097 
1098 static void input_seq_stop(struct seq_file *seq, void *v)
1099 {
1100 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1101 
1102 	if (state->mutex_acquired)
1103 		mutex_unlock(&input_mutex);
1104 }
1105 
1106 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1107 				   unsigned long *bitmap, int max)
1108 {
1109 	int i;
1110 	bool skip_empty = true;
1111 	char buf[18];
1112 
1113 	seq_printf(seq, "B: %s=", name);
1114 
1115 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1116 		if (input_bits_to_string(buf, sizeof(buf),
1117 					 bitmap[i], skip_empty)) {
1118 			skip_empty = false;
1119 			seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1120 		}
1121 	}
1122 
1123 	/*
1124 	 * If no output was produced print a single 0.
1125 	 */
1126 	if (skip_empty)
1127 		seq_putc(seq, '0');
1128 
1129 	seq_putc(seq, '\n');
1130 }
1131 
1132 static int input_devices_seq_show(struct seq_file *seq, void *v)
1133 {
1134 	struct input_dev *dev = container_of(v, struct input_dev, node);
1135 	const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1136 	struct input_handle *handle;
1137 
1138 	seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1139 		   dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1140 
1141 	seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1142 	seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1143 	seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1144 	seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1145 	seq_puts(seq, "H: Handlers=");
1146 
1147 	list_for_each_entry(handle, &dev->h_list, d_node)
1148 		seq_printf(seq, "%s ", handle->name);
1149 	seq_putc(seq, '\n');
1150 
1151 	input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1152 
1153 	input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1154 	if (test_bit(EV_KEY, dev->evbit))
1155 		input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1156 	if (test_bit(EV_REL, dev->evbit))
1157 		input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1158 	if (test_bit(EV_ABS, dev->evbit))
1159 		input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1160 	if (test_bit(EV_MSC, dev->evbit))
1161 		input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1162 	if (test_bit(EV_LED, dev->evbit))
1163 		input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1164 	if (test_bit(EV_SND, dev->evbit))
1165 		input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1166 	if (test_bit(EV_FF, dev->evbit))
1167 		input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1168 	if (test_bit(EV_SW, dev->evbit))
1169 		input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1170 
1171 	seq_putc(seq, '\n');
1172 
1173 	kfree(path);
1174 	return 0;
1175 }
1176 
1177 static const struct seq_operations input_devices_seq_ops = {
1178 	.start	= input_devices_seq_start,
1179 	.next	= input_devices_seq_next,
1180 	.stop	= input_seq_stop,
1181 	.show	= input_devices_seq_show,
1182 };
1183 
1184 static int input_proc_devices_open(struct inode *inode, struct file *file)
1185 {
1186 	return seq_open(file, &input_devices_seq_ops);
1187 }
1188 
1189 static const struct file_operations input_devices_fileops = {
1190 	.owner		= THIS_MODULE,
1191 	.open		= input_proc_devices_open,
1192 	.poll		= input_proc_devices_poll,
1193 	.read		= seq_read,
1194 	.llseek		= seq_lseek,
1195 	.release	= seq_release,
1196 };
1197 
1198 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1199 {
1200 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1201 	int error;
1202 
1203 	/* We need to fit into seq->private pointer */
1204 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1205 
1206 	error = mutex_lock_interruptible(&input_mutex);
1207 	if (error) {
1208 		state->mutex_acquired = false;
1209 		return ERR_PTR(error);
1210 	}
1211 
1212 	state->mutex_acquired = true;
1213 	state->pos = *pos;
1214 
1215 	return seq_list_start(&input_handler_list, *pos);
1216 }
1217 
1218 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1219 {
1220 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1221 
1222 	state->pos = *pos + 1;
1223 	return seq_list_next(v, &input_handler_list, pos);
1224 }
1225 
1226 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1227 {
1228 	struct input_handler *handler = container_of(v, struct input_handler, node);
1229 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1230 
1231 	seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1232 	if (handler->filter)
1233 		seq_puts(seq, " (filter)");
1234 	if (handler->legacy_minors)
1235 		seq_printf(seq, " Minor=%d", handler->minor);
1236 	seq_putc(seq, '\n');
1237 
1238 	return 0;
1239 }
1240 
1241 static const struct seq_operations input_handlers_seq_ops = {
1242 	.start	= input_handlers_seq_start,
1243 	.next	= input_handlers_seq_next,
1244 	.stop	= input_seq_stop,
1245 	.show	= input_handlers_seq_show,
1246 };
1247 
1248 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1249 {
1250 	return seq_open(file, &input_handlers_seq_ops);
1251 }
1252 
1253 static const struct file_operations input_handlers_fileops = {
1254 	.owner		= THIS_MODULE,
1255 	.open		= input_proc_handlers_open,
1256 	.read		= seq_read,
1257 	.llseek		= seq_lseek,
1258 	.release	= seq_release,
1259 };
1260 
1261 static int __init input_proc_init(void)
1262 {
1263 	struct proc_dir_entry *entry;
1264 
1265 	proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1266 	if (!proc_bus_input_dir)
1267 		return -ENOMEM;
1268 
1269 	entry = proc_create("devices", 0, proc_bus_input_dir,
1270 			    &input_devices_fileops);
1271 	if (!entry)
1272 		goto fail1;
1273 
1274 	entry = proc_create("handlers", 0, proc_bus_input_dir,
1275 			    &input_handlers_fileops);
1276 	if (!entry)
1277 		goto fail2;
1278 
1279 	return 0;
1280 
1281  fail2:	remove_proc_entry("devices", proc_bus_input_dir);
1282  fail1: remove_proc_entry("bus/input", NULL);
1283 	return -ENOMEM;
1284 }
1285 
1286 static void input_proc_exit(void)
1287 {
1288 	remove_proc_entry("devices", proc_bus_input_dir);
1289 	remove_proc_entry("handlers", proc_bus_input_dir);
1290 	remove_proc_entry("bus/input", NULL);
1291 }
1292 
1293 #else /* !CONFIG_PROC_FS */
1294 static inline void input_wakeup_procfs_readers(void) { }
1295 static inline int input_proc_init(void) { return 0; }
1296 static inline void input_proc_exit(void) { }
1297 #endif
1298 
1299 #define INPUT_DEV_STRING_ATTR_SHOW(name)				\
1300 static ssize_t input_dev_show_##name(struct device *dev,		\
1301 				     struct device_attribute *attr,	\
1302 				     char *buf)				\
1303 {									\
1304 	struct input_dev *input_dev = to_input_dev(dev);		\
1305 									\
1306 	return scnprintf(buf, PAGE_SIZE, "%s\n",			\
1307 			 input_dev->name ? input_dev->name : "");	\
1308 }									\
1309 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1310 
1311 INPUT_DEV_STRING_ATTR_SHOW(name);
1312 INPUT_DEV_STRING_ATTR_SHOW(phys);
1313 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1314 
1315 static int input_print_modalias_bits(char *buf, int size,
1316 				     char name, unsigned long *bm,
1317 				     unsigned int min_bit, unsigned int max_bit)
1318 {
1319 	int len = 0, i;
1320 
1321 	len += snprintf(buf, max(size, 0), "%c", name);
1322 	for (i = min_bit; i < max_bit; i++)
1323 		if (bm[BIT_WORD(i)] & BIT_MASK(i))
1324 			len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1325 	return len;
1326 }
1327 
1328 static int input_print_modalias(char *buf, int size, struct input_dev *id,
1329 				int add_cr)
1330 {
1331 	int len;
1332 
1333 	len = snprintf(buf, max(size, 0),
1334 		       "input:b%04Xv%04Xp%04Xe%04X-",
1335 		       id->id.bustype, id->id.vendor,
1336 		       id->id.product, id->id.version);
1337 
1338 	len += input_print_modalias_bits(buf + len, size - len,
1339 				'e', id->evbit, 0, EV_MAX);
1340 	len += input_print_modalias_bits(buf + len, size - len,
1341 				'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1342 	len += input_print_modalias_bits(buf + len, size - len,
1343 				'r', id->relbit, 0, REL_MAX);
1344 	len += input_print_modalias_bits(buf + len, size - len,
1345 				'a', id->absbit, 0, ABS_MAX);
1346 	len += input_print_modalias_bits(buf + len, size - len,
1347 				'm', id->mscbit, 0, MSC_MAX);
1348 	len += input_print_modalias_bits(buf + len, size - len,
1349 				'l', id->ledbit, 0, LED_MAX);
1350 	len += input_print_modalias_bits(buf + len, size - len,
1351 				's', id->sndbit, 0, SND_MAX);
1352 	len += input_print_modalias_bits(buf + len, size - len,
1353 				'f', id->ffbit, 0, FF_MAX);
1354 	len += input_print_modalias_bits(buf + len, size - len,
1355 				'w', id->swbit, 0, SW_MAX);
1356 
1357 	if (add_cr)
1358 		len += snprintf(buf + len, max(size - len, 0), "\n");
1359 
1360 	return len;
1361 }
1362 
1363 static ssize_t input_dev_show_modalias(struct device *dev,
1364 				       struct device_attribute *attr,
1365 				       char *buf)
1366 {
1367 	struct input_dev *id = to_input_dev(dev);
1368 	ssize_t len;
1369 
1370 	len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1371 
1372 	return min_t(int, len, PAGE_SIZE);
1373 }
1374 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1375 
1376 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1377 			      int max, int add_cr);
1378 
1379 static ssize_t input_dev_show_properties(struct device *dev,
1380 					 struct device_attribute *attr,
1381 					 char *buf)
1382 {
1383 	struct input_dev *input_dev = to_input_dev(dev);
1384 	int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1385 				     INPUT_PROP_MAX, true);
1386 	return min_t(int, len, PAGE_SIZE);
1387 }
1388 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1389 
1390 static struct attribute *input_dev_attrs[] = {
1391 	&dev_attr_name.attr,
1392 	&dev_attr_phys.attr,
1393 	&dev_attr_uniq.attr,
1394 	&dev_attr_modalias.attr,
1395 	&dev_attr_properties.attr,
1396 	NULL
1397 };
1398 
1399 static const struct attribute_group input_dev_attr_group = {
1400 	.attrs	= input_dev_attrs,
1401 };
1402 
1403 #define INPUT_DEV_ID_ATTR(name)						\
1404 static ssize_t input_dev_show_id_##name(struct device *dev,		\
1405 					struct device_attribute *attr,	\
1406 					char *buf)			\
1407 {									\
1408 	struct input_dev *input_dev = to_input_dev(dev);		\
1409 	return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name);	\
1410 }									\
1411 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1412 
1413 INPUT_DEV_ID_ATTR(bustype);
1414 INPUT_DEV_ID_ATTR(vendor);
1415 INPUT_DEV_ID_ATTR(product);
1416 INPUT_DEV_ID_ATTR(version);
1417 
1418 static struct attribute *input_dev_id_attrs[] = {
1419 	&dev_attr_bustype.attr,
1420 	&dev_attr_vendor.attr,
1421 	&dev_attr_product.attr,
1422 	&dev_attr_version.attr,
1423 	NULL
1424 };
1425 
1426 static const struct attribute_group input_dev_id_attr_group = {
1427 	.name	= "id",
1428 	.attrs	= input_dev_id_attrs,
1429 };
1430 
1431 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1432 			      int max, int add_cr)
1433 {
1434 	int i;
1435 	int len = 0;
1436 	bool skip_empty = true;
1437 
1438 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1439 		len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1440 					    bitmap[i], skip_empty);
1441 		if (len) {
1442 			skip_empty = false;
1443 			if (i > 0)
1444 				len += snprintf(buf + len, max(buf_size - len, 0), " ");
1445 		}
1446 	}
1447 
1448 	/*
1449 	 * If no output was produced print a single 0.
1450 	 */
1451 	if (len == 0)
1452 		len = snprintf(buf, buf_size, "%d", 0);
1453 
1454 	if (add_cr)
1455 		len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1456 
1457 	return len;
1458 }
1459 
1460 #define INPUT_DEV_CAP_ATTR(ev, bm)					\
1461 static ssize_t input_dev_show_cap_##bm(struct device *dev,		\
1462 				       struct device_attribute *attr,	\
1463 				       char *buf)			\
1464 {									\
1465 	struct input_dev *input_dev = to_input_dev(dev);		\
1466 	int len = input_print_bitmap(buf, PAGE_SIZE,			\
1467 				     input_dev->bm##bit, ev##_MAX,	\
1468 				     true);				\
1469 	return min_t(int, len, PAGE_SIZE);				\
1470 }									\
1471 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1472 
1473 INPUT_DEV_CAP_ATTR(EV, ev);
1474 INPUT_DEV_CAP_ATTR(KEY, key);
1475 INPUT_DEV_CAP_ATTR(REL, rel);
1476 INPUT_DEV_CAP_ATTR(ABS, abs);
1477 INPUT_DEV_CAP_ATTR(MSC, msc);
1478 INPUT_DEV_CAP_ATTR(LED, led);
1479 INPUT_DEV_CAP_ATTR(SND, snd);
1480 INPUT_DEV_CAP_ATTR(FF, ff);
1481 INPUT_DEV_CAP_ATTR(SW, sw);
1482 
1483 static struct attribute *input_dev_caps_attrs[] = {
1484 	&dev_attr_ev.attr,
1485 	&dev_attr_key.attr,
1486 	&dev_attr_rel.attr,
1487 	&dev_attr_abs.attr,
1488 	&dev_attr_msc.attr,
1489 	&dev_attr_led.attr,
1490 	&dev_attr_snd.attr,
1491 	&dev_attr_ff.attr,
1492 	&dev_attr_sw.attr,
1493 	NULL
1494 };
1495 
1496 static const struct attribute_group input_dev_caps_attr_group = {
1497 	.name	= "capabilities",
1498 	.attrs	= input_dev_caps_attrs,
1499 };
1500 
1501 static const struct attribute_group *input_dev_attr_groups[] = {
1502 	&input_dev_attr_group,
1503 	&input_dev_id_attr_group,
1504 	&input_dev_caps_attr_group,
1505 	NULL
1506 };
1507 
1508 static void input_dev_release(struct device *device)
1509 {
1510 	struct input_dev *dev = to_input_dev(device);
1511 
1512 	input_ff_destroy(dev);
1513 	input_mt_destroy_slots(dev);
1514 	kfree(dev->absinfo);
1515 	kfree(dev->vals);
1516 	kfree(dev);
1517 
1518 	module_put(THIS_MODULE);
1519 }
1520 
1521 /*
1522  * Input uevent interface - loading event handlers based on
1523  * device bitfields.
1524  */
1525 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1526 				   const char *name, unsigned long *bitmap, int max)
1527 {
1528 	int len;
1529 
1530 	if (add_uevent_var(env, "%s", name))
1531 		return -ENOMEM;
1532 
1533 	len = input_print_bitmap(&env->buf[env->buflen - 1],
1534 				 sizeof(env->buf) - env->buflen,
1535 				 bitmap, max, false);
1536 	if (len >= (sizeof(env->buf) - env->buflen))
1537 		return -ENOMEM;
1538 
1539 	env->buflen += len;
1540 	return 0;
1541 }
1542 
1543 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1544 					 struct input_dev *dev)
1545 {
1546 	int len;
1547 
1548 	if (add_uevent_var(env, "MODALIAS="))
1549 		return -ENOMEM;
1550 
1551 	len = input_print_modalias(&env->buf[env->buflen - 1],
1552 				   sizeof(env->buf) - env->buflen,
1553 				   dev, 0);
1554 	if (len >= (sizeof(env->buf) - env->buflen))
1555 		return -ENOMEM;
1556 
1557 	env->buflen += len;
1558 	return 0;
1559 }
1560 
1561 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...)				\
1562 	do {								\
1563 		int err = add_uevent_var(env, fmt, val);		\
1564 		if (err)						\
1565 			return err;					\
1566 	} while (0)
1567 
1568 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max)				\
1569 	do {								\
1570 		int err = input_add_uevent_bm_var(env, name, bm, max);	\
1571 		if (err)						\
1572 			return err;					\
1573 	} while (0)
1574 
1575 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev)				\
1576 	do {								\
1577 		int err = input_add_uevent_modalias_var(env, dev);	\
1578 		if (err)						\
1579 			return err;					\
1580 	} while (0)
1581 
1582 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1583 {
1584 	struct input_dev *dev = to_input_dev(device);
1585 
1586 	INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1587 				dev->id.bustype, dev->id.vendor,
1588 				dev->id.product, dev->id.version);
1589 	if (dev->name)
1590 		INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1591 	if (dev->phys)
1592 		INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1593 	if (dev->uniq)
1594 		INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1595 
1596 	INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1597 
1598 	INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1599 	if (test_bit(EV_KEY, dev->evbit))
1600 		INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1601 	if (test_bit(EV_REL, dev->evbit))
1602 		INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1603 	if (test_bit(EV_ABS, dev->evbit))
1604 		INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1605 	if (test_bit(EV_MSC, dev->evbit))
1606 		INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1607 	if (test_bit(EV_LED, dev->evbit))
1608 		INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1609 	if (test_bit(EV_SND, dev->evbit))
1610 		INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1611 	if (test_bit(EV_FF, dev->evbit))
1612 		INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1613 	if (test_bit(EV_SW, dev->evbit))
1614 		INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1615 
1616 	INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1617 
1618 	return 0;
1619 }
1620 
1621 #define INPUT_DO_TOGGLE(dev, type, bits, on)				\
1622 	do {								\
1623 		int i;							\
1624 		bool active;						\
1625 									\
1626 		if (!test_bit(EV_##type, dev->evbit))			\
1627 			break;						\
1628 									\
1629 		for_each_set_bit(i, dev->bits##bit, type##_CNT) {	\
1630 			active = test_bit(i, dev->bits);		\
1631 			if (!active && !on)				\
1632 				continue;				\
1633 									\
1634 			dev->event(dev, EV_##type, i, on ? active : 0);	\
1635 		}							\
1636 	} while (0)
1637 
1638 static void input_dev_toggle(struct input_dev *dev, bool activate)
1639 {
1640 	if (!dev->event)
1641 		return;
1642 
1643 	INPUT_DO_TOGGLE(dev, LED, led, activate);
1644 	INPUT_DO_TOGGLE(dev, SND, snd, activate);
1645 
1646 	if (activate && test_bit(EV_REP, dev->evbit)) {
1647 		dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1648 		dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1649 	}
1650 }
1651 
1652 /**
1653  * input_reset_device() - reset/restore the state of input device
1654  * @dev: input device whose state needs to be reset
1655  *
1656  * This function tries to reset the state of an opened input device and
1657  * bring internal state and state if the hardware in sync with each other.
1658  * We mark all keys as released, restore LED state, repeat rate, etc.
1659  */
1660 void input_reset_device(struct input_dev *dev)
1661 {
1662 	unsigned long flags;
1663 
1664 	mutex_lock(&dev->mutex);
1665 	spin_lock_irqsave(&dev->event_lock, flags);
1666 
1667 	input_dev_toggle(dev, true);
1668 	input_dev_release_keys(dev);
1669 
1670 	spin_unlock_irqrestore(&dev->event_lock, flags);
1671 	mutex_unlock(&dev->mutex);
1672 }
1673 EXPORT_SYMBOL(input_reset_device);
1674 
1675 #ifdef CONFIG_PM_SLEEP
1676 static int input_dev_suspend(struct device *dev)
1677 {
1678 	struct input_dev *input_dev = to_input_dev(dev);
1679 
1680 	spin_lock_irq(&input_dev->event_lock);
1681 
1682 	/*
1683 	 * Keys that are pressed now are unlikely to be
1684 	 * still pressed when we resume.
1685 	 */
1686 	input_dev_release_keys(input_dev);
1687 
1688 	/* Turn off LEDs and sounds, if any are active. */
1689 	input_dev_toggle(input_dev, false);
1690 
1691 	spin_unlock_irq(&input_dev->event_lock);
1692 
1693 	return 0;
1694 }
1695 
1696 static int input_dev_resume(struct device *dev)
1697 {
1698 	struct input_dev *input_dev = to_input_dev(dev);
1699 
1700 	spin_lock_irq(&input_dev->event_lock);
1701 
1702 	/* Restore state of LEDs and sounds, if any were active. */
1703 	input_dev_toggle(input_dev, true);
1704 
1705 	spin_unlock_irq(&input_dev->event_lock);
1706 
1707 	return 0;
1708 }
1709 
1710 static int input_dev_freeze(struct device *dev)
1711 {
1712 	struct input_dev *input_dev = to_input_dev(dev);
1713 
1714 	spin_lock_irq(&input_dev->event_lock);
1715 
1716 	/*
1717 	 * Keys that are pressed now are unlikely to be
1718 	 * still pressed when we resume.
1719 	 */
1720 	input_dev_release_keys(input_dev);
1721 
1722 	spin_unlock_irq(&input_dev->event_lock);
1723 
1724 	return 0;
1725 }
1726 
1727 static int input_dev_poweroff(struct device *dev)
1728 {
1729 	struct input_dev *input_dev = to_input_dev(dev);
1730 
1731 	spin_lock_irq(&input_dev->event_lock);
1732 
1733 	/* Turn off LEDs and sounds, if any are active. */
1734 	input_dev_toggle(input_dev, false);
1735 
1736 	spin_unlock_irq(&input_dev->event_lock);
1737 
1738 	return 0;
1739 }
1740 
1741 static const struct dev_pm_ops input_dev_pm_ops = {
1742 	.suspend	= input_dev_suspend,
1743 	.resume		= input_dev_resume,
1744 	.freeze		= input_dev_freeze,
1745 	.poweroff	= input_dev_poweroff,
1746 	.restore	= input_dev_resume,
1747 };
1748 #endif /* CONFIG_PM */
1749 
1750 static const struct device_type input_dev_type = {
1751 	.groups		= input_dev_attr_groups,
1752 	.release	= input_dev_release,
1753 	.uevent		= input_dev_uevent,
1754 #ifdef CONFIG_PM_SLEEP
1755 	.pm		= &input_dev_pm_ops,
1756 #endif
1757 };
1758 
1759 static char *input_devnode(struct device *dev, umode_t *mode)
1760 {
1761 	return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1762 }
1763 
1764 struct class input_class = {
1765 	.name		= "input",
1766 	.devnode	= input_devnode,
1767 };
1768 EXPORT_SYMBOL_GPL(input_class);
1769 
1770 /**
1771  * input_allocate_device - allocate memory for new input device
1772  *
1773  * Returns prepared struct input_dev or %NULL.
1774  *
1775  * NOTE: Use input_free_device() to free devices that have not been
1776  * registered; input_unregister_device() should be used for already
1777  * registered devices.
1778  */
1779 struct input_dev *input_allocate_device(void)
1780 {
1781 	static atomic_t input_no = ATOMIC_INIT(-1);
1782 	struct input_dev *dev;
1783 
1784 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1785 	if (dev) {
1786 		dev->dev.type = &input_dev_type;
1787 		dev->dev.class = &input_class;
1788 		device_initialize(&dev->dev);
1789 		mutex_init(&dev->mutex);
1790 		spin_lock_init(&dev->event_lock);
1791 		timer_setup(&dev->timer, NULL, 0);
1792 		INIT_LIST_HEAD(&dev->h_list);
1793 		INIT_LIST_HEAD(&dev->node);
1794 
1795 		dev_set_name(&dev->dev, "input%lu",
1796 			     (unsigned long)atomic_inc_return(&input_no));
1797 
1798 		__module_get(THIS_MODULE);
1799 	}
1800 
1801 	return dev;
1802 }
1803 EXPORT_SYMBOL(input_allocate_device);
1804 
1805 struct input_devres {
1806 	struct input_dev *input;
1807 };
1808 
1809 static int devm_input_device_match(struct device *dev, void *res, void *data)
1810 {
1811 	struct input_devres *devres = res;
1812 
1813 	return devres->input == data;
1814 }
1815 
1816 static void devm_input_device_release(struct device *dev, void *res)
1817 {
1818 	struct input_devres *devres = res;
1819 	struct input_dev *input = devres->input;
1820 
1821 	dev_dbg(dev, "%s: dropping reference to %s\n",
1822 		__func__, dev_name(&input->dev));
1823 	input_put_device(input);
1824 }
1825 
1826 /**
1827  * devm_input_allocate_device - allocate managed input device
1828  * @dev: device owning the input device being created
1829  *
1830  * Returns prepared struct input_dev or %NULL.
1831  *
1832  * Managed input devices do not need to be explicitly unregistered or
1833  * freed as it will be done automatically when owner device unbinds from
1834  * its driver (or binding fails). Once managed input device is allocated,
1835  * it is ready to be set up and registered in the same fashion as regular
1836  * input device. There are no special devm_input_device_[un]register()
1837  * variants, regular ones work with both managed and unmanaged devices,
1838  * should you need them. In most cases however, managed input device need
1839  * not be explicitly unregistered or freed.
1840  *
1841  * NOTE: the owner device is set up as parent of input device and users
1842  * should not override it.
1843  */
1844 struct input_dev *devm_input_allocate_device(struct device *dev)
1845 {
1846 	struct input_dev *input;
1847 	struct input_devres *devres;
1848 
1849 	devres = devres_alloc(devm_input_device_release,
1850 			      sizeof(*devres), GFP_KERNEL);
1851 	if (!devres)
1852 		return NULL;
1853 
1854 	input = input_allocate_device();
1855 	if (!input) {
1856 		devres_free(devres);
1857 		return NULL;
1858 	}
1859 
1860 	input->dev.parent = dev;
1861 	input->devres_managed = true;
1862 
1863 	devres->input = input;
1864 	devres_add(dev, devres);
1865 
1866 	return input;
1867 }
1868 EXPORT_SYMBOL(devm_input_allocate_device);
1869 
1870 /**
1871  * input_free_device - free memory occupied by input_dev structure
1872  * @dev: input device to free
1873  *
1874  * This function should only be used if input_register_device()
1875  * was not called yet or if it failed. Once device was registered
1876  * use input_unregister_device() and memory will be freed once last
1877  * reference to the device is dropped.
1878  *
1879  * Device should be allocated by input_allocate_device().
1880  *
1881  * NOTE: If there are references to the input device then memory
1882  * will not be freed until last reference is dropped.
1883  */
1884 void input_free_device(struct input_dev *dev)
1885 {
1886 	if (dev) {
1887 		if (dev->devres_managed)
1888 			WARN_ON(devres_destroy(dev->dev.parent,
1889 						devm_input_device_release,
1890 						devm_input_device_match,
1891 						dev));
1892 		input_put_device(dev);
1893 	}
1894 }
1895 EXPORT_SYMBOL(input_free_device);
1896 
1897 /**
1898  * input_set_capability - mark device as capable of a certain event
1899  * @dev: device that is capable of emitting or accepting event
1900  * @type: type of the event (EV_KEY, EV_REL, etc...)
1901  * @code: event code
1902  *
1903  * In addition to setting up corresponding bit in appropriate capability
1904  * bitmap the function also adjusts dev->evbit.
1905  */
1906 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
1907 {
1908 	switch (type) {
1909 	case EV_KEY:
1910 		__set_bit(code, dev->keybit);
1911 		break;
1912 
1913 	case EV_REL:
1914 		__set_bit(code, dev->relbit);
1915 		break;
1916 
1917 	case EV_ABS:
1918 		input_alloc_absinfo(dev);
1919 		if (!dev->absinfo)
1920 			return;
1921 
1922 		__set_bit(code, dev->absbit);
1923 		break;
1924 
1925 	case EV_MSC:
1926 		__set_bit(code, dev->mscbit);
1927 		break;
1928 
1929 	case EV_SW:
1930 		__set_bit(code, dev->swbit);
1931 		break;
1932 
1933 	case EV_LED:
1934 		__set_bit(code, dev->ledbit);
1935 		break;
1936 
1937 	case EV_SND:
1938 		__set_bit(code, dev->sndbit);
1939 		break;
1940 
1941 	case EV_FF:
1942 		__set_bit(code, dev->ffbit);
1943 		break;
1944 
1945 	case EV_PWR:
1946 		/* do nothing */
1947 		break;
1948 
1949 	default:
1950 		pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
1951 		dump_stack();
1952 		return;
1953 	}
1954 
1955 	__set_bit(type, dev->evbit);
1956 }
1957 EXPORT_SYMBOL(input_set_capability);
1958 
1959 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1960 {
1961 	int mt_slots;
1962 	int i;
1963 	unsigned int events;
1964 
1965 	if (dev->mt) {
1966 		mt_slots = dev->mt->num_slots;
1967 	} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1968 		mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1969 			   dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1970 		mt_slots = clamp(mt_slots, 2, 32);
1971 	} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1972 		mt_slots = 2;
1973 	} else {
1974 		mt_slots = 0;
1975 	}
1976 
1977 	events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1978 
1979 	if (test_bit(EV_ABS, dev->evbit))
1980 		for_each_set_bit(i, dev->absbit, ABS_CNT)
1981 			events += input_is_mt_axis(i) ? mt_slots : 1;
1982 
1983 	if (test_bit(EV_REL, dev->evbit))
1984 		events += bitmap_weight(dev->relbit, REL_CNT);
1985 
1986 	/* Make room for KEY and MSC events */
1987 	events += 7;
1988 
1989 	return events;
1990 }
1991 
1992 #define INPUT_CLEANSE_BITMASK(dev, type, bits)				\
1993 	do {								\
1994 		if (!test_bit(EV_##type, dev->evbit))			\
1995 			memset(dev->bits##bit, 0,			\
1996 				sizeof(dev->bits##bit));		\
1997 	} while (0)
1998 
1999 static void input_cleanse_bitmasks(struct input_dev *dev)
2000 {
2001 	INPUT_CLEANSE_BITMASK(dev, KEY, key);
2002 	INPUT_CLEANSE_BITMASK(dev, REL, rel);
2003 	INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2004 	INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2005 	INPUT_CLEANSE_BITMASK(dev, LED, led);
2006 	INPUT_CLEANSE_BITMASK(dev, SND, snd);
2007 	INPUT_CLEANSE_BITMASK(dev, FF, ff);
2008 	INPUT_CLEANSE_BITMASK(dev, SW, sw);
2009 }
2010 
2011 static void __input_unregister_device(struct input_dev *dev)
2012 {
2013 	struct input_handle *handle, *next;
2014 
2015 	input_disconnect_device(dev);
2016 
2017 	mutex_lock(&input_mutex);
2018 
2019 	list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2020 		handle->handler->disconnect(handle);
2021 	WARN_ON(!list_empty(&dev->h_list));
2022 
2023 	del_timer_sync(&dev->timer);
2024 	list_del_init(&dev->node);
2025 
2026 	input_wakeup_procfs_readers();
2027 
2028 	mutex_unlock(&input_mutex);
2029 
2030 	device_del(&dev->dev);
2031 }
2032 
2033 static void devm_input_device_unregister(struct device *dev, void *res)
2034 {
2035 	struct input_devres *devres = res;
2036 	struct input_dev *input = devres->input;
2037 
2038 	dev_dbg(dev, "%s: unregistering device %s\n",
2039 		__func__, dev_name(&input->dev));
2040 	__input_unregister_device(input);
2041 }
2042 
2043 /**
2044  * input_enable_softrepeat - enable software autorepeat
2045  * @dev: input device
2046  * @delay: repeat delay
2047  * @period: repeat period
2048  *
2049  * Enable software autorepeat on the input device.
2050  */
2051 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2052 {
2053 	dev->timer.function = input_repeat_key;
2054 	dev->rep[REP_DELAY] = delay;
2055 	dev->rep[REP_PERIOD] = period;
2056 }
2057 EXPORT_SYMBOL(input_enable_softrepeat);
2058 
2059 /**
2060  * input_register_device - register device with input core
2061  * @dev: device to be registered
2062  *
2063  * This function registers device with input core. The device must be
2064  * allocated with input_allocate_device() and all it's capabilities
2065  * set up before registering.
2066  * If function fails the device must be freed with input_free_device().
2067  * Once device has been successfully registered it can be unregistered
2068  * with input_unregister_device(); input_free_device() should not be
2069  * called in this case.
2070  *
2071  * Note that this function is also used to register managed input devices
2072  * (ones allocated with devm_input_allocate_device()). Such managed input
2073  * devices need not be explicitly unregistered or freed, their tear down
2074  * is controlled by the devres infrastructure. It is also worth noting
2075  * that tear down of managed input devices is internally a 2-step process:
2076  * registered managed input device is first unregistered, but stays in
2077  * memory and can still handle input_event() calls (although events will
2078  * not be delivered anywhere). The freeing of managed input device will
2079  * happen later, when devres stack is unwound to the point where device
2080  * allocation was made.
2081  */
2082 int input_register_device(struct input_dev *dev)
2083 {
2084 	struct input_devres *devres = NULL;
2085 	struct input_handler *handler;
2086 	unsigned int packet_size;
2087 	const char *path;
2088 	int error;
2089 
2090 	if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2091 		dev_err(&dev->dev,
2092 			"Absolute device without dev->absinfo, refusing to register\n");
2093 		return -EINVAL;
2094 	}
2095 
2096 	if (dev->devres_managed) {
2097 		devres = devres_alloc(devm_input_device_unregister,
2098 				      sizeof(*devres), GFP_KERNEL);
2099 		if (!devres)
2100 			return -ENOMEM;
2101 
2102 		devres->input = dev;
2103 	}
2104 
2105 	/* Every input device generates EV_SYN/SYN_REPORT events. */
2106 	__set_bit(EV_SYN, dev->evbit);
2107 
2108 	/* KEY_RESERVED is not supposed to be transmitted to userspace. */
2109 	__clear_bit(KEY_RESERVED, dev->keybit);
2110 
2111 	/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2112 	input_cleanse_bitmasks(dev);
2113 
2114 	packet_size = input_estimate_events_per_packet(dev);
2115 	if (dev->hint_events_per_packet < packet_size)
2116 		dev->hint_events_per_packet = packet_size;
2117 
2118 	dev->max_vals = dev->hint_events_per_packet + 2;
2119 	dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2120 	if (!dev->vals) {
2121 		error = -ENOMEM;
2122 		goto err_devres_free;
2123 	}
2124 
2125 	/*
2126 	 * If delay and period are pre-set by the driver, then autorepeating
2127 	 * is handled by the driver itself and we don't do it in input.c.
2128 	 */
2129 	if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2130 		input_enable_softrepeat(dev, 250, 33);
2131 
2132 	if (!dev->getkeycode)
2133 		dev->getkeycode = input_default_getkeycode;
2134 
2135 	if (!dev->setkeycode)
2136 		dev->setkeycode = input_default_setkeycode;
2137 
2138 	error = device_add(&dev->dev);
2139 	if (error)
2140 		goto err_free_vals;
2141 
2142 	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2143 	pr_info("%s as %s\n",
2144 		dev->name ? dev->name : "Unspecified device",
2145 		path ? path : "N/A");
2146 	kfree(path);
2147 
2148 	error = mutex_lock_interruptible(&input_mutex);
2149 	if (error)
2150 		goto err_device_del;
2151 
2152 	list_add_tail(&dev->node, &input_dev_list);
2153 
2154 	list_for_each_entry(handler, &input_handler_list, node)
2155 		input_attach_handler(dev, handler);
2156 
2157 	input_wakeup_procfs_readers();
2158 
2159 	mutex_unlock(&input_mutex);
2160 
2161 	if (dev->devres_managed) {
2162 		dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2163 			__func__, dev_name(&dev->dev));
2164 		devres_add(dev->dev.parent, devres);
2165 	}
2166 	return 0;
2167 
2168 err_device_del:
2169 	device_del(&dev->dev);
2170 err_free_vals:
2171 	kfree(dev->vals);
2172 	dev->vals = NULL;
2173 err_devres_free:
2174 	devres_free(devres);
2175 	return error;
2176 }
2177 EXPORT_SYMBOL(input_register_device);
2178 
2179 /**
2180  * input_unregister_device - unregister previously registered device
2181  * @dev: device to be unregistered
2182  *
2183  * This function unregisters an input device. Once device is unregistered
2184  * the caller should not try to access it as it may get freed at any moment.
2185  */
2186 void input_unregister_device(struct input_dev *dev)
2187 {
2188 	if (dev->devres_managed) {
2189 		WARN_ON(devres_destroy(dev->dev.parent,
2190 					devm_input_device_unregister,
2191 					devm_input_device_match,
2192 					dev));
2193 		__input_unregister_device(dev);
2194 		/*
2195 		 * We do not do input_put_device() here because it will be done
2196 		 * when 2nd devres fires up.
2197 		 */
2198 	} else {
2199 		__input_unregister_device(dev);
2200 		input_put_device(dev);
2201 	}
2202 }
2203 EXPORT_SYMBOL(input_unregister_device);
2204 
2205 /**
2206  * input_register_handler - register a new input handler
2207  * @handler: handler to be registered
2208  *
2209  * This function registers a new input handler (interface) for input
2210  * devices in the system and attaches it to all input devices that
2211  * are compatible with the handler.
2212  */
2213 int input_register_handler(struct input_handler *handler)
2214 {
2215 	struct input_dev *dev;
2216 	int error;
2217 
2218 	error = mutex_lock_interruptible(&input_mutex);
2219 	if (error)
2220 		return error;
2221 
2222 	INIT_LIST_HEAD(&handler->h_list);
2223 
2224 	list_add_tail(&handler->node, &input_handler_list);
2225 
2226 	list_for_each_entry(dev, &input_dev_list, node)
2227 		input_attach_handler(dev, handler);
2228 
2229 	input_wakeup_procfs_readers();
2230 
2231 	mutex_unlock(&input_mutex);
2232 	return 0;
2233 }
2234 EXPORT_SYMBOL(input_register_handler);
2235 
2236 /**
2237  * input_unregister_handler - unregisters an input handler
2238  * @handler: handler to be unregistered
2239  *
2240  * This function disconnects a handler from its input devices and
2241  * removes it from lists of known handlers.
2242  */
2243 void input_unregister_handler(struct input_handler *handler)
2244 {
2245 	struct input_handle *handle, *next;
2246 
2247 	mutex_lock(&input_mutex);
2248 
2249 	list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2250 		handler->disconnect(handle);
2251 	WARN_ON(!list_empty(&handler->h_list));
2252 
2253 	list_del_init(&handler->node);
2254 
2255 	input_wakeup_procfs_readers();
2256 
2257 	mutex_unlock(&input_mutex);
2258 }
2259 EXPORT_SYMBOL(input_unregister_handler);
2260 
2261 /**
2262  * input_handler_for_each_handle - handle iterator
2263  * @handler: input handler to iterate
2264  * @data: data for the callback
2265  * @fn: function to be called for each handle
2266  *
2267  * Iterate over @bus's list of devices, and call @fn for each, passing
2268  * it @data and stop when @fn returns a non-zero value. The function is
2269  * using RCU to traverse the list and therefore may be using in atomic
2270  * contexts. The @fn callback is invoked from RCU critical section and
2271  * thus must not sleep.
2272  */
2273 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2274 				  int (*fn)(struct input_handle *, void *))
2275 {
2276 	struct input_handle *handle;
2277 	int retval = 0;
2278 
2279 	rcu_read_lock();
2280 
2281 	list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2282 		retval = fn(handle, data);
2283 		if (retval)
2284 			break;
2285 	}
2286 
2287 	rcu_read_unlock();
2288 
2289 	return retval;
2290 }
2291 EXPORT_SYMBOL(input_handler_for_each_handle);
2292 
2293 /**
2294  * input_register_handle - register a new input handle
2295  * @handle: handle to register
2296  *
2297  * This function puts a new input handle onto device's
2298  * and handler's lists so that events can flow through
2299  * it once it is opened using input_open_device().
2300  *
2301  * This function is supposed to be called from handler's
2302  * connect() method.
2303  */
2304 int input_register_handle(struct input_handle *handle)
2305 {
2306 	struct input_handler *handler = handle->handler;
2307 	struct input_dev *dev = handle->dev;
2308 	int error;
2309 
2310 	/*
2311 	 * We take dev->mutex here to prevent race with
2312 	 * input_release_device().
2313 	 */
2314 	error = mutex_lock_interruptible(&dev->mutex);
2315 	if (error)
2316 		return error;
2317 
2318 	/*
2319 	 * Filters go to the head of the list, normal handlers
2320 	 * to the tail.
2321 	 */
2322 	if (handler->filter)
2323 		list_add_rcu(&handle->d_node, &dev->h_list);
2324 	else
2325 		list_add_tail_rcu(&handle->d_node, &dev->h_list);
2326 
2327 	mutex_unlock(&dev->mutex);
2328 
2329 	/*
2330 	 * Since we are supposed to be called from ->connect()
2331 	 * which is mutually exclusive with ->disconnect()
2332 	 * we can't be racing with input_unregister_handle()
2333 	 * and so separate lock is not needed here.
2334 	 */
2335 	list_add_tail_rcu(&handle->h_node, &handler->h_list);
2336 
2337 	if (handler->start)
2338 		handler->start(handle);
2339 
2340 	return 0;
2341 }
2342 EXPORT_SYMBOL(input_register_handle);
2343 
2344 /**
2345  * input_unregister_handle - unregister an input handle
2346  * @handle: handle to unregister
2347  *
2348  * This function removes input handle from device's
2349  * and handler's lists.
2350  *
2351  * This function is supposed to be called from handler's
2352  * disconnect() method.
2353  */
2354 void input_unregister_handle(struct input_handle *handle)
2355 {
2356 	struct input_dev *dev = handle->dev;
2357 
2358 	list_del_rcu(&handle->h_node);
2359 
2360 	/*
2361 	 * Take dev->mutex to prevent race with input_release_device().
2362 	 */
2363 	mutex_lock(&dev->mutex);
2364 	list_del_rcu(&handle->d_node);
2365 	mutex_unlock(&dev->mutex);
2366 
2367 	synchronize_rcu();
2368 }
2369 EXPORT_SYMBOL(input_unregister_handle);
2370 
2371 /**
2372  * input_get_new_minor - allocates a new input minor number
2373  * @legacy_base: beginning or the legacy range to be searched
2374  * @legacy_num: size of legacy range
2375  * @allow_dynamic: whether we can also take ID from the dynamic range
2376  *
2377  * This function allocates a new device minor for from input major namespace.
2378  * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2379  * parameters and whether ID can be allocated from dynamic range if there are
2380  * no free IDs in legacy range.
2381  */
2382 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2383 			bool allow_dynamic)
2384 {
2385 	/*
2386 	 * This function should be called from input handler's ->connect()
2387 	 * methods, which are serialized with input_mutex, so no additional
2388 	 * locking is needed here.
2389 	 */
2390 	if (legacy_base >= 0) {
2391 		int minor = ida_simple_get(&input_ida,
2392 					   legacy_base,
2393 					   legacy_base + legacy_num,
2394 					   GFP_KERNEL);
2395 		if (minor >= 0 || !allow_dynamic)
2396 			return minor;
2397 	}
2398 
2399 	return ida_simple_get(&input_ida,
2400 			      INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2401 			      GFP_KERNEL);
2402 }
2403 EXPORT_SYMBOL(input_get_new_minor);
2404 
2405 /**
2406  * input_free_minor - release previously allocated minor
2407  * @minor: minor to be released
2408  *
2409  * This function releases previously allocated input minor so that it can be
2410  * reused later.
2411  */
2412 void input_free_minor(unsigned int minor)
2413 {
2414 	ida_simple_remove(&input_ida, minor);
2415 }
2416 EXPORT_SYMBOL(input_free_minor);
2417 
2418 static int __init input_init(void)
2419 {
2420 	int err;
2421 
2422 	err = class_register(&input_class);
2423 	if (err) {
2424 		pr_err("unable to register input_dev class\n");
2425 		return err;
2426 	}
2427 
2428 	err = input_proc_init();
2429 	if (err)
2430 		goto fail1;
2431 
2432 	err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2433 				     INPUT_MAX_CHAR_DEVICES, "input");
2434 	if (err) {
2435 		pr_err("unable to register char major %d", INPUT_MAJOR);
2436 		goto fail2;
2437 	}
2438 
2439 	return 0;
2440 
2441  fail2:	input_proc_exit();
2442  fail1:	class_unregister(&input_class);
2443 	return err;
2444 }
2445 
2446 static void __exit input_exit(void)
2447 {
2448 	input_proc_exit();
2449 	unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2450 				 INPUT_MAX_CHAR_DEVICES);
2451 	class_unregister(&input_class);
2452 }
2453 
2454 subsys_initcall(input_init);
2455 module_exit(input_exit);
2456