xref: /openbmc/linux/drivers/hid/uhid.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * User-space I/O driver support for HID subsystem
4   * Copyright (c) 2012 David Herrmann
5   */
6  
7  /*
8   */
9  
10  #include <linux/atomic.h>
11  #include <linux/compat.h>
12  #include <linux/cred.h>
13  #include <linux/device.h>
14  #include <linux/fs.h>
15  #include <linux/hid.h>
16  #include <linux/input.h>
17  #include <linux/miscdevice.h>
18  #include <linux/module.h>
19  #include <linux/mutex.h>
20  #include <linux/poll.h>
21  #include <linux/sched.h>
22  #include <linux/spinlock.h>
23  #include <linux/uhid.h>
24  #include <linux/wait.h>
25  
26  #define UHID_NAME	"uhid"
27  #define UHID_BUFSIZE	32
28  
29  struct uhid_device {
30  	struct mutex devlock;
31  
32  	/* This flag tracks whether the HID device is usable for commands from
33  	 * userspace. The flag is already set before hid_add_device(), which
34  	 * runs in workqueue context, to allow hid_add_device() to communicate
35  	 * with userspace.
36  	 * However, if hid_add_device() fails, the flag is cleared without
37  	 * holding devlock.
38  	 * We guarantee that if @running changes from true to false while you're
39  	 * holding @devlock, it's still fine to access @hid.
40  	 */
41  	bool running;
42  
43  	__u8 *rd_data;
44  	uint rd_size;
45  
46  	/* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
47  	struct hid_device *hid;
48  	struct uhid_event input_buf;
49  
50  	wait_queue_head_t waitq;
51  	spinlock_t qlock;
52  	__u8 head;
53  	__u8 tail;
54  	struct uhid_event *outq[UHID_BUFSIZE];
55  
56  	/* blocking GET_REPORT support; state changes protected by qlock */
57  	struct mutex report_lock;
58  	wait_queue_head_t report_wait;
59  	bool report_running;
60  	u32 report_id;
61  	u32 report_type;
62  	struct uhid_event report_buf;
63  	struct work_struct worker;
64  };
65  
66  static struct miscdevice uhid_misc;
67  
uhid_device_add_worker(struct work_struct * work)68  static void uhid_device_add_worker(struct work_struct *work)
69  {
70  	struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
71  	int ret;
72  
73  	ret = hid_add_device(uhid->hid);
74  	if (ret) {
75  		hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
76  
77  		/* We used to call hid_destroy_device() here, but that's really
78  		 * messy to get right because we have to coordinate with
79  		 * concurrent writes from userspace that might be in the middle
80  		 * of using uhid->hid.
81  		 * Just leave uhid->hid as-is for now, and clean it up when
82  		 * userspace tries to close or reinitialize the uhid instance.
83  		 *
84  		 * However, we do have to clear the ->running flag and do a
85  		 * wakeup to make sure userspace knows that the device is gone.
86  		 */
87  		WRITE_ONCE(uhid->running, false);
88  		wake_up_interruptible(&uhid->report_wait);
89  	}
90  }
91  
uhid_queue(struct uhid_device * uhid,struct uhid_event * ev)92  static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
93  {
94  	__u8 newhead;
95  
96  	newhead = (uhid->head + 1) % UHID_BUFSIZE;
97  
98  	if (newhead != uhid->tail) {
99  		uhid->outq[uhid->head] = ev;
100  		uhid->head = newhead;
101  		wake_up_interruptible(&uhid->waitq);
102  	} else {
103  		hid_warn(uhid->hid, "Output queue is full\n");
104  		kfree(ev);
105  	}
106  }
107  
uhid_queue_event(struct uhid_device * uhid,__u32 event)108  static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
109  {
110  	unsigned long flags;
111  	struct uhid_event *ev;
112  
113  	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
114  	if (!ev)
115  		return -ENOMEM;
116  
117  	ev->type = event;
118  
119  	spin_lock_irqsave(&uhid->qlock, flags);
120  	uhid_queue(uhid, ev);
121  	spin_unlock_irqrestore(&uhid->qlock, flags);
122  
123  	return 0;
124  }
125  
uhid_hid_start(struct hid_device * hid)126  static int uhid_hid_start(struct hid_device *hid)
127  {
128  	struct uhid_device *uhid = hid->driver_data;
129  	struct uhid_event *ev;
130  	unsigned long flags;
131  
132  	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
133  	if (!ev)
134  		return -ENOMEM;
135  
136  	ev->type = UHID_START;
137  
138  	if (hid->report_enum[HID_FEATURE_REPORT].numbered)
139  		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
140  	if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
141  		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
142  	if (hid->report_enum[HID_INPUT_REPORT].numbered)
143  		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
144  
145  	spin_lock_irqsave(&uhid->qlock, flags);
146  	uhid_queue(uhid, ev);
147  	spin_unlock_irqrestore(&uhid->qlock, flags);
148  
149  	return 0;
150  }
151  
uhid_hid_stop(struct hid_device * hid)152  static void uhid_hid_stop(struct hid_device *hid)
153  {
154  	struct uhid_device *uhid = hid->driver_data;
155  
156  	hid->claimed = 0;
157  	uhid_queue_event(uhid, UHID_STOP);
158  }
159  
uhid_hid_open(struct hid_device * hid)160  static int uhid_hid_open(struct hid_device *hid)
161  {
162  	struct uhid_device *uhid = hid->driver_data;
163  
164  	return uhid_queue_event(uhid, UHID_OPEN);
165  }
166  
uhid_hid_close(struct hid_device * hid)167  static void uhid_hid_close(struct hid_device *hid)
168  {
169  	struct uhid_device *uhid = hid->driver_data;
170  
171  	uhid_queue_event(uhid, UHID_CLOSE);
172  }
173  
uhid_hid_parse(struct hid_device * hid)174  static int uhid_hid_parse(struct hid_device *hid)
175  {
176  	struct uhid_device *uhid = hid->driver_data;
177  
178  	return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
179  }
180  
181  /* must be called with report_lock held */
__uhid_report_queue_and_wait(struct uhid_device * uhid,struct uhid_event * ev,__u32 * report_id)182  static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
183  					struct uhid_event *ev,
184  					__u32 *report_id)
185  {
186  	unsigned long flags;
187  	int ret;
188  
189  	spin_lock_irqsave(&uhid->qlock, flags);
190  	*report_id = ++uhid->report_id;
191  	uhid->report_type = ev->type + 1;
192  	uhid->report_running = true;
193  	uhid_queue(uhid, ev);
194  	spin_unlock_irqrestore(&uhid->qlock, flags);
195  
196  	ret = wait_event_interruptible_timeout(uhid->report_wait,
197  				!uhid->report_running || !READ_ONCE(uhid->running),
198  				5 * HZ);
199  	if (!ret || !READ_ONCE(uhid->running) || uhid->report_running)
200  		ret = -EIO;
201  	else if (ret < 0)
202  		ret = -ERESTARTSYS;
203  	else
204  		ret = 0;
205  
206  	uhid->report_running = false;
207  
208  	return ret;
209  }
210  
uhid_report_wake_up(struct uhid_device * uhid,u32 id,const struct uhid_event * ev)211  static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
212  				const struct uhid_event *ev)
213  {
214  	unsigned long flags;
215  
216  	spin_lock_irqsave(&uhid->qlock, flags);
217  
218  	/* id for old report; drop it silently */
219  	if (uhid->report_type != ev->type || uhid->report_id != id)
220  		goto unlock;
221  	if (!uhid->report_running)
222  		goto unlock;
223  
224  	memcpy(&uhid->report_buf, ev, sizeof(*ev));
225  	uhid->report_running = false;
226  	wake_up_interruptible(&uhid->report_wait);
227  
228  unlock:
229  	spin_unlock_irqrestore(&uhid->qlock, flags);
230  }
231  
uhid_hid_get_report(struct hid_device * hid,unsigned char rnum,u8 * buf,size_t count,u8 rtype)232  static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
233  			       u8 *buf, size_t count, u8 rtype)
234  {
235  	struct uhid_device *uhid = hid->driver_data;
236  	struct uhid_get_report_reply_req *req;
237  	struct uhid_event *ev;
238  	int ret;
239  
240  	if (!READ_ONCE(uhid->running))
241  		return -EIO;
242  
243  	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
244  	if (!ev)
245  		return -ENOMEM;
246  
247  	ev->type = UHID_GET_REPORT;
248  	ev->u.get_report.rnum = rnum;
249  	ev->u.get_report.rtype = rtype;
250  
251  	ret = mutex_lock_interruptible(&uhid->report_lock);
252  	if (ret) {
253  		kfree(ev);
254  		return ret;
255  	}
256  
257  	/* this _always_ takes ownership of @ev */
258  	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
259  	if (ret)
260  		goto unlock;
261  
262  	req = &uhid->report_buf.u.get_report_reply;
263  	if (req->err) {
264  		ret = -EIO;
265  	} else {
266  		ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
267  		memcpy(buf, req->data, ret);
268  	}
269  
270  unlock:
271  	mutex_unlock(&uhid->report_lock);
272  	return ret;
273  }
274  
uhid_hid_set_report(struct hid_device * hid,unsigned char rnum,const u8 * buf,size_t count,u8 rtype)275  static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
276  			       const u8 *buf, size_t count, u8 rtype)
277  {
278  	struct uhid_device *uhid = hid->driver_data;
279  	struct uhid_event *ev;
280  	int ret;
281  
282  	if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX)
283  		return -EIO;
284  
285  	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
286  	if (!ev)
287  		return -ENOMEM;
288  
289  	ev->type = UHID_SET_REPORT;
290  	ev->u.set_report.rnum = rnum;
291  	ev->u.set_report.rtype = rtype;
292  	ev->u.set_report.size = count;
293  	memcpy(ev->u.set_report.data, buf, count);
294  
295  	ret = mutex_lock_interruptible(&uhid->report_lock);
296  	if (ret) {
297  		kfree(ev);
298  		return ret;
299  	}
300  
301  	/* this _always_ takes ownership of @ev */
302  	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
303  	if (ret)
304  		goto unlock;
305  
306  	if (uhid->report_buf.u.set_report_reply.err)
307  		ret = -EIO;
308  	else
309  		ret = count;
310  
311  unlock:
312  	mutex_unlock(&uhid->report_lock);
313  	return ret;
314  }
315  
uhid_hid_raw_request(struct hid_device * hid,unsigned char reportnum,__u8 * buf,size_t len,unsigned char rtype,int reqtype)316  static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
317  				__u8 *buf, size_t len, unsigned char rtype,
318  				int reqtype)
319  {
320  	u8 u_rtype;
321  
322  	switch (rtype) {
323  	case HID_FEATURE_REPORT:
324  		u_rtype = UHID_FEATURE_REPORT;
325  		break;
326  	case HID_OUTPUT_REPORT:
327  		u_rtype = UHID_OUTPUT_REPORT;
328  		break;
329  	case HID_INPUT_REPORT:
330  		u_rtype = UHID_INPUT_REPORT;
331  		break;
332  	default:
333  		return -EINVAL;
334  	}
335  
336  	switch (reqtype) {
337  	case HID_REQ_GET_REPORT:
338  		return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
339  	case HID_REQ_SET_REPORT:
340  		return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
341  	default:
342  		return -EIO;
343  	}
344  }
345  
uhid_hid_output_raw(struct hid_device * hid,__u8 * buf,size_t count,unsigned char report_type)346  static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
347  			       unsigned char report_type)
348  {
349  	struct uhid_device *uhid = hid->driver_data;
350  	__u8 rtype;
351  	unsigned long flags;
352  	struct uhid_event *ev;
353  
354  	switch (report_type) {
355  	case HID_FEATURE_REPORT:
356  		rtype = UHID_FEATURE_REPORT;
357  		break;
358  	case HID_OUTPUT_REPORT:
359  		rtype = UHID_OUTPUT_REPORT;
360  		break;
361  	default:
362  		return -EINVAL;
363  	}
364  
365  	if (count < 1 || count > UHID_DATA_MAX)
366  		return -EINVAL;
367  
368  	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
369  	if (!ev)
370  		return -ENOMEM;
371  
372  	ev->type = UHID_OUTPUT;
373  	ev->u.output.size = count;
374  	ev->u.output.rtype = rtype;
375  	memcpy(ev->u.output.data, buf, count);
376  
377  	spin_lock_irqsave(&uhid->qlock, flags);
378  	uhid_queue(uhid, ev);
379  	spin_unlock_irqrestore(&uhid->qlock, flags);
380  
381  	return count;
382  }
383  
uhid_hid_output_report(struct hid_device * hid,__u8 * buf,size_t count)384  static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
385  				  size_t count)
386  {
387  	return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
388  }
389  
390  static const struct hid_ll_driver uhid_hid_driver = {
391  	.start = uhid_hid_start,
392  	.stop = uhid_hid_stop,
393  	.open = uhid_hid_open,
394  	.close = uhid_hid_close,
395  	.parse = uhid_hid_parse,
396  	.raw_request = uhid_hid_raw_request,
397  	.output_report = uhid_hid_output_report,
398  	.max_buffer_size = UHID_DATA_MAX,
399  };
400  
401  #ifdef CONFIG_COMPAT
402  
403  /* Apparently we haven't stepped on these rakes enough times yet. */
404  struct uhid_create_req_compat {
405  	__u8 name[128];
406  	__u8 phys[64];
407  	__u8 uniq[64];
408  
409  	compat_uptr_t rd_data;
410  	__u16 rd_size;
411  
412  	__u16 bus;
413  	__u32 vendor;
414  	__u32 product;
415  	__u32 version;
416  	__u32 country;
417  } __attribute__((__packed__));
418  
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)419  static int uhid_event_from_user(const char __user *buffer, size_t len,
420  				struct uhid_event *event)
421  {
422  	if (in_compat_syscall()) {
423  		u32 type;
424  
425  		if (get_user(type, buffer))
426  			return -EFAULT;
427  
428  		if (type == UHID_CREATE) {
429  			/*
430  			 * This is our messed up request with compat pointer.
431  			 * It is largish (more than 256 bytes) so we better
432  			 * allocate it from the heap.
433  			 */
434  			struct uhid_create_req_compat *compat;
435  
436  			compat = kzalloc(sizeof(*compat), GFP_KERNEL);
437  			if (!compat)
438  				return -ENOMEM;
439  
440  			buffer += sizeof(type);
441  			len -= sizeof(type);
442  			if (copy_from_user(compat, buffer,
443  					   min(len, sizeof(*compat)))) {
444  				kfree(compat);
445  				return -EFAULT;
446  			}
447  
448  			/* Shuffle the data over to proper structure */
449  			event->type = type;
450  
451  			memcpy(event->u.create.name, compat->name,
452  				sizeof(compat->name));
453  			memcpy(event->u.create.phys, compat->phys,
454  				sizeof(compat->phys));
455  			memcpy(event->u.create.uniq, compat->uniq,
456  				sizeof(compat->uniq));
457  
458  			event->u.create.rd_data = compat_ptr(compat->rd_data);
459  			event->u.create.rd_size = compat->rd_size;
460  
461  			event->u.create.bus = compat->bus;
462  			event->u.create.vendor = compat->vendor;
463  			event->u.create.product = compat->product;
464  			event->u.create.version = compat->version;
465  			event->u.create.country = compat->country;
466  
467  			kfree(compat);
468  			return 0;
469  		}
470  		/* All others can be copied directly */
471  	}
472  
473  	if (copy_from_user(event, buffer, min(len, sizeof(*event))))
474  		return -EFAULT;
475  
476  	return 0;
477  }
478  #else
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)479  static int uhid_event_from_user(const char __user *buffer, size_t len,
480  				struct uhid_event *event)
481  {
482  	if (copy_from_user(event, buffer, min(len, sizeof(*event))))
483  		return -EFAULT;
484  
485  	return 0;
486  }
487  #endif
488  
uhid_dev_create2(struct uhid_device * uhid,const struct uhid_event * ev)489  static int uhid_dev_create2(struct uhid_device *uhid,
490  			    const struct uhid_event *ev)
491  {
492  	struct hid_device *hid;
493  	size_t rd_size, len;
494  	void *rd_data;
495  	int ret;
496  
497  	if (uhid->hid)
498  		return -EALREADY;
499  
500  	rd_size = ev->u.create2.rd_size;
501  	if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
502  		return -EINVAL;
503  
504  	rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
505  	if (!rd_data)
506  		return -ENOMEM;
507  
508  	uhid->rd_size = rd_size;
509  	uhid->rd_data = rd_data;
510  
511  	hid = hid_allocate_device();
512  	if (IS_ERR(hid)) {
513  		ret = PTR_ERR(hid);
514  		goto err_free;
515  	}
516  
517  	/* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
518  	len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
519  	strncpy(hid->name, ev->u.create2.name, len);
520  	len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
521  	strncpy(hid->phys, ev->u.create2.phys, len);
522  	len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
523  	strncpy(hid->uniq, ev->u.create2.uniq, len);
524  
525  	hid->ll_driver = &uhid_hid_driver;
526  	hid->bus = ev->u.create2.bus;
527  	hid->vendor = ev->u.create2.vendor;
528  	hid->product = ev->u.create2.product;
529  	hid->version = ev->u.create2.version;
530  	hid->country = ev->u.create2.country;
531  	hid->driver_data = uhid;
532  	hid->dev.parent = uhid_misc.this_device;
533  
534  	uhid->hid = hid;
535  	uhid->running = true;
536  
537  	/* Adding of a HID device is done through a worker, to allow HID drivers
538  	 * which use feature requests during .probe to work, without they would
539  	 * be blocked on devlock, which is held by uhid_char_write.
540  	 */
541  	schedule_work(&uhid->worker);
542  
543  	return 0;
544  
545  err_free:
546  	kfree(uhid->rd_data);
547  	uhid->rd_data = NULL;
548  	uhid->rd_size = 0;
549  	return ret;
550  }
551  
uhid_dev_create(struct uhid_device * uhid,struct uhid_event * ev)552  static int uhid_dev_create(struct uhid_device *uhid,
553  			   struct uhid_event *ev)
554  {
555  	struct uhid_create_req orig;
556  
557  	orig = ev->u.create;
558  
559  	if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
560  		return -EINVAL;
561  	if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
562  		return -EFAULT;
563  
564  	memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
565  	memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
566  	memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
567  	ev->u.create2.rd_size = orig.rd_size;
568  	ev->u.create2.bus = orig.bus;
569  	ev->u.create2.vendor = orig.vendor;
570  	ev->u.create2.product = orig.product;
571  	ev->u.create2.version = orig.version;
572  	ev->u.create2.country = orig.country;
573  
574  	return uhid_dev_create2(uhid, ev);
575  }
576  
uhid_dev_destroy(struct uhid_device * uhid)577  static int uhid_dev_destroy(struct uhid_device *uhid)
578  {
579  	if (!uhid->hid)
580  		return -EINVAL;
581  
582  	WRITE_ONCE(uhid->running, false);
583  	wake_up_interruptible(&uhid->report_wait);
584  
585  	cancel_work_sync(&uhid->worker);
586  
587  	hid_destroy_device(uhid->hid);
588  	uhid->hid = NULL;
589  	kfree(uhid->rd_data);
590  
591  	return 0;
592  }
593  
uhid_dev_input(struct uhid_device * uhid,struct uhid_event * ev)594  static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
595  {
596  	if (!READ_ONCE(uhid->running))
597  		return -EINVAL;
598  
599  	hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
600  			 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
601  
602  	return 0;
603  }
604  
uhid_dev_input2(struct uhid_device * uhid,struct uhid_event * ev)605  static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
606  {
607  	if (!READ_ONCE(uhid->running))
608  		return -EINVAL;
609  
610  	hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
611  			 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
612  
613  	return 0;
614  }
615  
uhid_dev_get_report_reply(struct uhid_device * uhid,struct uhid_event * ev)616  static int uhid_dev_get_report_reply(struct uhid_device *uhid,
617  				     struct uhid_event *ev)
618  {
619  	if (!READ_ONCE(uhid->running))
620  		return -EINVAL;
621  
622  	uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
623  	return 0;
624  }
625  
uhid_dev_set_report_reply(struct uhid_device * uhid,struct uhid_event * ev)626  static int uhid_dev_set_report_reply(struct uhid_device *uhid,
627  				     struct uhid_event *ev)
628  {
629  	if (!READ_ONCE(uhid->running))
630  		return -EINVAL;
631  
632  	uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
633  	return 0;
634  }
635  
uhid_char_open(struct inode * inode,struct file * file)636  static int uhid_char_open(struct inode *inode, struct file *file)
637  {
638  	struct uhid_device *uhid;
639  
640  	uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
641  	if (!uhid)
642  		return -ENOMEM;
643  
644  	mutex_init(&uhid->devlock);
645  	mutex_init(&uhid->report_lock);
646  	spin_lock_init(&uhid->qlock);
647  	init_waitqueue_head(&uhid->waitq);
648  	init_waitqueue_head(&uhid->report_wait);
649  	uhid->running = false;
650  	INIT_WORK(&uhid->worker, uhid_device_add_worker);
651  
652  	file->private_data = uhid;
653  	stream_open(inode, file);
654  
655  	return 0;
656  }
657  
uhid_char_release(struct inode * inode,struct file * file)658  static int uhid_char_release(struct inode *inode, struct file *file)
659  {
660  	struct uhid_device *uhid = file->private_data;
661  	unsigned int i;
662  
663  	uhid_dev_destroy(uhid);
664  
665  	for (i = 0; i < UHID_BUFSIZE; ++i)
666  		kfree(uhid->outq[i]);
667  
668  	kfree(uhid);
669  
670  	return 0;
671  }
672  
uhid_char_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)673  static ssize_t uhid_char_read(struct file *file, char __user *buffer,
674  				size_t count, loff_t *ppos)
675  {
676  	struct uhid_device *uhid = file->private_data;
677  	int ret;
678  	unsigned long flags;
679  	size_t len;
680  
681  	/* they need at least the "type" member of uhid_event */
682  	if (count < sizeof(__u32))
683  		return -EINVAL;
684  
685  try_again:
686  	if (file->f_flags & O_NONBLOCK) {
687  		if (uhid->head == uhid->tail)
688  			return -EAGAIN;
689  	} else {
690  		ret = wait_event_interruptible(uhid->waitq,
691  						uhid->head != uhid->tail);
692  		if (ret)
693  			return ret;
694  	}
695  
696  	ret = mutex_lock_interruptible(&uhid->devlock);
697  	if (ret)
698  		return ret;
699  
700  	if (uhid->head == uhid->tail) {
701  		mutex_unlock(&uhid->devlock);
702  		goto try_again;
703  	} else {
704  		len = min(count, sizeof(**uhid->outq));
705  		if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
706  			ret = -EFAULT;
707  		} else {
708  			kfree(uhid->outq[uhid->tail]);
709  			uhid->outq[uhid->tail] = NULL;
710  
711  			spin_lock_irqsave(&uhid->qlock, flags);
712  			uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
713  			spin_unlock_irqrestore(&uhid->qlock, flags);
714  		}
715  	}
716  
717  	mutex_unlock(&uhid->devlock);
718  	return ret ? ret : len;
719  }
720  
uhid_char_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)721  static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
722  				size_t count, loff_t *ppos)
723  {
724  	struct uhid_device *uhid = file->private_data;
725  	int ret;
726  	size_t len;
727  
728  	/* we need at least the "type" member of uhid_event */
729  	if (count < sizeof(__u32))
730  		return -EINVAL;
731  
732  	ret = mutex_lock_interruptible(&uhid->devlock);
733  	if (ret)
734  		return ret;
735  
736  	memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
737  	len = min(count, sizeof(uhid->input_buf));
738  
739  	ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
740  	if (ret)
741  		goto unlock;
742  
743  	switch (uhid->input_buf.type) {
744  	case UHID_CREATE:
745  		/*
746  		 * 'struct uhid_create_req' contains a __user pointer which is
747  		 * copied from, so it's unsafe to allow this with elevated
748  		 * privileges (e.g. from a setuid binary) or via kernel_write().
749  		 */
750  		if (file->f_cred != current_cred()) {
751  			pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
752  				    task_tgid_vnr(current), current->comm);
753  			ret = -EACCES;
754  			goto unlock;
755  		}
756  		ret = uhid_dev_create(uhid, &uhid->input_buf);
757  		break;
758  	case UHID_CREATE2:
759  		ret = uhid_dev_create2(uhid, &uhid->input_buf);
760  		break;
761  	case UHID_DESTROY:
762  		ret = uhid_dev_destroy(uhid);
763  		break;
764  	case UHID_INPUT:
765  		ret = uhid_dev_input(uhid, &uhid->input_buf);
766  		break;
767  	case UHID_INPUT2:
768  		ret = uhid_dev_input2(uhid, &uhid->input_buf);
769  		break;
770  	case UHID_GET_REPORT_REPLY:
771  		ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
772  		break;
773  	case UHID_SET_REPORT_REPLY:
774  		ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
775  		break;
776  	default:
777  		ret = -EOPNOTSUPP;
778  	}
779  
780  unlock:
781  	mutex_unlock(&uhid->devlock);
782  
783  	/* return "count" not "len" to not confuse the caller */
784  	return ret ? ret : count;
785  }
786  
uhid_char_poll(struct file * file,poll_table * wait)787  static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
788  {
789  	struct uhid_device *uhid = file->private_data;
790  	__poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
791  
792  	poll_wait(file, &uhid->waitq, wait);
793  
794  	if (uhid->head != uhid->tail)
795  		mask |= EPOLLIN | EPOLLRDNORM;
796  
797  	return mask;
798  }
799  
800  static const struct file_operations uhid_fops = {
801  	.owner		= THIS_MODULE,
802  	.open		= uhid_char_open,
803  	.release	= uhid_char_release,
804  	.read		= uhid_char_read,
805  	.write		= uhid_char_write,
806  	.poll		= uhid_char_poll,
807  	.llseek		= no_llseek,
808  };
809  
810  static struct miscdevice uhid_misc = {
811  	.fops		= &uhid_fops,
812  	.minor		= UHID_MINOR,
813  	.name		= UHID_NAME,
814  };
815  module_misc_device(uhid_misc);
816  
817  MODULE_LICENSE("GPL");
818  MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
819  MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
820  MODULE_ALIAS_MISCDEV(UHID_MINOR);
821  MODULE_ALIAS("devname:" UHID_NAME);
822