1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * inode.c -- user mode filesystem api for usb gadget controllers
4 *
5 * Copyright (C) 2003-2004 David Brownell
6 * Copyright (C) 2003 Agilent Technologies
7 */
8
9
10 /* #define VERBOSE_DEBUG */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/kthread.h>
25 #include <linux/aio.h>
26 #include <linux/uio.h>
27 #include <linux/refcount.h>
28 #include <linux/delay.h>
29 #include <linux/device.h>
30 #include <linux/moduleparam.h>
31
32 #include <linux/usb/gadgetfs.h>
33 #include <linux/usb/gadget.h>
34
35
36 /*
37 * The gadgetfs API maps each endpoint to a file descriptor so that you
38 * can use standard synchronous read/write calls for I/O. There's some
39 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
40 * drivers show how this works in practice. You can also use AIO to
41 * eliminate I/O gaps between requests, to help when streaming data.
42 *
43 * Key parts that must be USB-specific are protocols defining how the
44 * read/write operations relate to the hardware state machines. There
45 * are two types of files. One type is for the device, implementing ep0.
46 * The other type is for each IN or OUT endpoint. In both cases, the
47 * user mode driver must configure the hardware before using it.
48 *
49 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
50 * (by writing configuration and device descriptors). Afterwards it
51 * may serve as a source of device events, used to handle all control
52 * requests other than basic enumeration.
53 *
54 * - Then, after a SET_CONFIGURATION control request, ep_config() is
55 * called when each /dev/gadget/ep* file is configured (by writing
56 * endpoint descriptors). Afterwards these files are used to write()
57 * IN data or to read() OUT data. To halt the endpoint, a "wrong
58 * direction" request is issued (like reading an IN endpoint).
59 *
60 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
61 * not possible on all hardware. For example, precise fault handling with
62 * respect to data left in endpoint fifos after aborted operations; or
63 * selective clearing of endpoint halts, to implement SET_INTERFACE.
64 */
65
66 #define DRIVER_DESC "USB Gadget filesystem"
67 #define DRIVER_VERSION "24 Aug 2004"
68
69 static const char driver_desc [] = DRIVER_DESC;
70 static const char shortname [] = "gadgetfs";
71
72 MODULE_DESCRIPTION (DRIVER_DESC);
73 MODULE_AUTHOR ("David Brownell");
74 MODULE_LICENSE ("GPL");
75
76 static int ep_open(struct inode *, struct file *);
77
78
79 /*----------------------------------------------------------------------*/
80
81 #define GADGETFS_MAGIC 0xaee71ee7
82
83 /* /dev/gadget/$CHIP represents ep0 and the whole device */
84 enum ep0_state {
85 /* DISABLED is the initial state. */
86 STATE_DEV_DISABLED = 0,
87
88 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
89 * ep0/device i/o modes and binding to the controller. Driver
90 * must always write descriptors to initialize the device, then
91 * the device becomes UNCONNECTED until enumeration.
92 */
93 STATE_DEV_OPENED,
94
95 /* From then on, ep0 fd is in either of two basic modes:
96 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97 * - SETUP: read/write will transfer control data and succeed;
98 * or if "wrong direction", performs protocol stall
99 */
100 STATE_DEV_UNCONNECTED,
101 STATE_DEV_CONNECTED,
102 STATE_DEV_SETUP,
103
104 /* UNBOUND means the driver closed ep0, so the device won't be
105 * accessible again (DEV_DISABLED) until all fds are closed.
106 */
107 STATE_DEV_UNBOUND,
108 };
109
110 /* enough for the whole queue: most events invalidate others */
111 #define N_EVENT 5
112
113 #define RBUF_SIZE 256
114
115 struct dev_data {
116 spinlock_t lock;
117 refcount_t count;
118 int udc_usage;
119 enum ep0_state state; /* P: lock */
120 struct usb_gadgetfs_event event [N_EVENT];
121 unsigned ev_next;
122 struct fasync_struct *fasync;
123 u8 current_config;
124
125 /* drivers reading ep0 MUST handle control requests (SETUP)
126 * reported that way; else the host will time out.
127 */
128 unsigned usermode_setup : 1,
129 setup_in : 1,
130 setup_can_stall : 1,
131 setup_out_ready : 1,
132 setup_out_error : 1,
133 setup_abort : 1,
134 gadget_registered : 1;
135 unsigned setup_wLength;
136
137 /* the rest is basically write-once */
138 struct usb_config_descriptor *config, *hs_config;
139 struct usb_device_descriptor *dev;
140 struct usb_request *req;
141 struct usb_gadget *gadget;
142 struct list_head epfiles;
143 void *buf;
144 wait_queue_head_t wait;
145 struct super_block *sb;
146 struct dentry *dentry;
147
148 /* except this scratch i/o buffer for ep0 */
149 u8 rbuf[RBUF_SIZE];
150 };
151
get_dev(struct dev_data * data)152 static inline void get_dev (struct dev_data *data)
153 {
154 refcount_inc (&data->count);
155 }
156
put_dev(struct dev_data * data)157 static void put_dev (struct dev_data *data)
158 {
159 if (likely (!refcount_dec_and_test (&data->count)))
160 return;
161 /* needs no more cleanup */
162 BUG_ON (waitqueue_active (&data->wait));
163 kfree (data);
164 }
165
dev_new(void)166 static struct dev_data *dev_new (void)
167 {
168 struct dev_data *dev;
169
170 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
171 if (!dev)
172 return NULL;
173 dev->state = STATE_DEV_DISABLED;
174 refcount_set (&dev->count, 1);
175 spin_lock_init (&dev->lock);
176 INIT_LIST_HEAD (&dev->epfiles);
177 init_waitqueue_head (&dev->wait);
178 return dev;
179 }
180
181 /*----------------------------------------------------------------------*/
182
183 /* other /dev/gadget/$ENDPOINT files represent endpoints */
184 enum ep_state {
185 STATE_EP_DISABLED = 0,
186 STATE_EP_READY,
187 STATE_EP_ENABLED,
188 STATE_EP_UNBOUND,
189 };
190
191 struct ep_data {
192 struct mutex lock;
193 enum ep_state state;
194 refcount_t count;
195 struct dev_data *dev;
196 /* must hold dev->lock before accessing ep or req */
197 struct usb_ep *ep;
198 struct usb_request *req;
199 ssize_t status;
200 char name [16];
201 struct usb_endpoint_descriptor desc, hs_desc;
202 struct list_head epfiles;
203 wait_queue_head_t wait;
204 struct dentry *dentry;
205 };
206
get_ep(struct ep_data * data)207 static inline void get_ep (struct ep_data *data)
208 {
209 refcount_inc (&data->count);
210 }
211
put_ep(struct ep_data * data)212 static void put_ep (struct ep_data *data)
213 {
214 if (likely (!refcount_dec_and_test (&data->count)))
215 return;
216 put_dev (data->dev);
217 /* needs no more cleanup */
218 BUG_ON (!list_empty (&data->epfiles));
219 BUG_ON (waitqueue_active (&data->wait));
220 kfree (data);
221 }
222
223 /*----------------------------------------------------------------------*/
224
225 /* most "how to use the hardware" policy choices are in userspace:
226 * mapping endpoint roles (which the driver needs) to the capabilities
227 * which the usb controller has. most of those capabilities are exposed
228 * implicitly, starting with the driver name and then endpoint names.
229 */
230
231 static const char *CHIP;
232 static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
233
234 /*----------------------------------------------------------------------*/
235
236 /* NOTE: don't use dev_printk calls before binding to the gadget
237 * at the end of ep0 configuration, or after unbind.
238 */
239
240 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
241 #define xprintk(d,level,fmt,args...) \
242 printk(level "%s: " fmt , shortname , ## args)
243
244 #ifdef DEBUG
245 #define DBG(dev,fmt,args...) \
246 xprintk(dev , KERN_DEBUG , fmt , ## args)
247 #else
248 #define DBG(dev,fmt,args...) \
249 do { } while (0)
250 #endif /* DEBUG */
251
252 #ifdef VERBOSE_DEBUG
253 #define VDEBUG DBG
254 #else
255 #define VDEBUG(dev,fmt,args...) \
256 do { } while (0)
257 #endif /* DEBUG */
258
259 #define ERROR(dev,fmt,args...) \
260 xprintk(dev , KERN_ERR , fmt , ## args)
261 #define INFO(dev,fmt,args...) \
262 xprintk(dev , KERN_INFO , fmt , ## args)
263
264
265 /*----------------------------------------------------------------------*/
266
267 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
268 *
269 * After opening, configure non-control endpoints. Then use normal
270 * stream read() and write() requests; and maybe ioctl() to get more
271 * precise FIFO status when recovering from cancellation.
272 */
273
epio_complete(struct usb_ep * ep,struct usb_request * req)274 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
275 {
276 struct ep_data *epdata = ep->driver_data;
277
278 if (!req->context)
279 return;
280 if (req->status)
281 epdata->status = req->status;
282 else
283 epdata->status = req->actual;
284 complete ((struct completion *)req->context);
285 }
286
287 /* tasklock endpoint, returning when it's connected.
288 * still need dev->lock to use epdata->ep.
289 */
290 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)291 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
292 {
293 int val;
294
295 if (f_flags & O_NONBLOCK) {
296 if (!mutex_trylock(&epdata->lock))
297 goto nonblock;
298 if (epdata->state != STATE_EP_ENABLED &&
299 (!is_write || epdata->state != STATE_EP_READY)) {
300 mutex_unlock(&epdata->lock);
301 nonblock:
302 val = -EAGAIN;
303 } else
304 val = 0;
305 return val;
306 }
307
308 val = mutex_lock_interruptible(&epdata->lock);
309 if (val < 0)
310 return val;
311
312 switch (epdata->state) {
313 case STATE_EP_ENABLED:
314 return 0;
315 case STATE_EP_READY: /* not configured yet */
316 if (is_write)
317 return 0;
318 fallthrough;
319 case STATE_EP_UNBOUND: /* clean disconnect */
320 break;
321 // case STATE_EP_DISABLED: /* "can't happen" */
322 default: /* error! */
323 pr_debug ("%s: ep %p not available, state %d\n",
324 shortname, epdata, epdata->state);
325 }
326 mutex_unlock(&epdata->lock);
327 return -ENODEV;
328 }
329
330 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)331 ep_io (struct ep_data *epdata, void *buf, unsigned len)
332 {
333 DECLARE_COMPLETION_ONSTACK (done);
334 int value;
335
336 spin_lock_irq (&epdata->dev->lock);
337 if (likely (epdata->ep != NULL)) {
338 struct usb_request *req = epdata->req;
339
340 req->context = &done;
341 req->complete = epio_complete;
342 req->buf = buf;
343 req->length = len;
344 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
345 } else
346 value = -ENODEV;
347 spin_unlock_irq (&epdata->dev->lock);
348
349 if (likely (value == 0)) {
350 value = wait_for_completion_interruptible(&done);
351 if (value != 0) {
352 spin_lock_irq (&epdata->dev->lock);
353 if (likely (epdata->ep != NULL)) {
354 DBG (epdata->dev, "%s i/o interrupted\n",
355 epdata->name);
356 usb_ep_dequeue (epdata->ep, epdata->req);
357 spin_unlock_irq (&epdata->dev->lock);
358
359 wait_for_completion(&done);
360 if (epdata->status == -ECONNRESET)
361 epdata->status = -EINTR;
362 } else {
363 spin_unlock_irq (&epdata->dev->lock);
364
365 DBG (epdata->dev, "endpoint gone\n");
366 wait_for_completion(&done);
367 epdata->status = -ENODEV;
368 }
369 }
370 return epdata->status;
371 }
372 return value;
373 }
374
375 static int
ep_release(struct inode * inode,struct file * fd)376 ep_release (struct inode *inode, struct file *fd)
377 {
378 struct ep_data *data = fd->private_data;
379 int value;
380
381 value = mutex_lock_interruptible(&data->lock);
382 if (value < 0)
383 return value;
384
385 /* clean up if this can be reopened */
386 if (data->state != STATE_EP_UNBOUND) {
387 data->state = STATE_EP_DISABLED;
388 data->desc.bDescriptorType = 0;
389 data->hs_desc.bDescriptorType = 0;
390 usb_ep_disable(data->ep);
391 }
392 mutex_unlock(&data->lock);
393 put_ep (data);
394 return 0;
395 }
396
ep_ioctl(struct file * fd,unsigned code,unsigned long value)397 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
398 {
399 struct ep_data *data = fd->private_data;
400 int status;
401
402 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
403 return status;
404
405 spin_lock_irq (&data->dev->lock);
406 if (likely (data->ep != NULL)) {
407 switch (code) {
408 case GADGETFS_FIFO_STATUS:
409 status = usb_ep_fifo_status (data->ep);
410 break;
411 case GADGETFS_FIFO_FLUSH:
412 usb_ep_fifo_flush (data->ep);
413 break;
414 case GADGETFS_CLEAR_HALT:
415 status = usb_ep_clear_halt (data->ep);
416 break;
417 default:
418 status = -ENOTTY;
419 }
420 } else
421 status = -ENODEV;
422 spin_unlock_irq (&data->dev->lock);
423 mutex_unlock(&data->lock);
424 return status;
425 }
426
427 /*----------------------------------------------------------------------*/
428
429 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
430
431 struct kiocb_priv {
432 struct usb_request *req;
433 struct ep_data *epdata;
434 struct kiocb *iocb;
435 struct mm_struct *mm;
436 struct work_struct work;
437 void *buf;
438 struct iov_iter to;
439 const void *to_free;
440 unsigned actual;
441 };
442
ep_aio_cancel(struct kiocb * iocb)443 static int ep_aio_cancel(struct kiocb *iocb)
444 {
445 struct kiocb_priv *priv = iocb->private;
446 struct ep_data *epdata;
447 int value;
448
449 local_irq_disable();
450 epdata = priv->epdata;
451 // spin_lock(&epdata->dev->lock);
452 if (likely(epdata && epdata->ep && priv->req))
453 value = usb_ep_dequeue (epdata->ep, priv->req);
454 else
455 value = -EINVAL;
456 // spin_unlock(&epdata->dev->lock);
457 local_irq_enable();
458
459 return value;
460 }
461
ep_user_copy_worker(struct work_struct * work)462 static void ep_user_copy_worker(struct work_struct *work)
463 {
464 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
465 struct mm_struct *mm = priv->mm;
466 struct kiocb *iocb = priv->iocb;
467 size_t ret;
468
469 kthread_use_mm(mm);
470 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
471 kthread_unuse_mm(mm);
472 if (!ret)
473 ret = -EFAULT;
474
475 /* completing the iocb can drop the ctx and mm, don't touch mm after */
476 iocb->ki_complete(iocb, ret);
477
478 kfree(priv->buf);
479 kfree(priv->to_free);
480 kfree(priv);
481 }
482
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)483 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
484 {
485 struct kiocb *iocb = req->context;
486 struct kiocb_priv *priv = iocb->private;
487 struct ep_data *epdata = priv->epdata;
488
489 /* lock against disconnect (and ideally, cancel) */
490 spin_lock(&epdata->dev->lock);
491 priv->req = NULL;
492 priv->epdata = NULL;
493
494 /* if this was a write or a read returning no data then we
495 * don't need to copy anything to userspace, so we can
496 * complete the aio request immediately.
497 */
498 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
499 kfree(req->buf);
500 kfree(priv->to_free);
501 kfree(priv);
502 iocb->private = NULL;
503 iocb->ki_complete(iocb,
504 req->actual ? req->actual : (long)req->status);
505 } else {
506 /* ep_copy_to_user() won't report both; we hide some faults */
507 if (unlikely(0 != req->status))
508 DBG(epdata->dev, "%s fault %d len %d\n",
509 ep->name, req->status, req->actual);
510
511 priv->buf = req->buf;
512 priv->actual = req->actual;
513 INIT_WORK(&priv->work, ep_user_copy_worker);
514 schedule_work(&priv->work);
515 }
516
517 usb_ep_free_request(ep, req);
518 spin_unlock(&epdata->dev->lock);
519 put_ep(epdata);
520 }
521
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)522 static ssize_t ep_aio(struct kiocb *iocb,
523 struct kiocb_priv *priv,
524 struct ep_data *epdata,
525 char *buf,
526 size_t len)
527 {
528 struct usb_request *req;
529 ssize_t value;
530
531 iocb->private = priv;
532 priv->iocb = iocb;
533
534 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
535 get_ep(epdata);
536 priv->epdata = epdata;
537 priv->actual = 0;
538 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
539
540 /* each kiocb is coupled to one usb_request, but we can't
541 * allocate or submit those if the host disconnected.
542 */
543 spin_lock_irq(&epdata->dev->lock);
544 value = -ENODEV;
545 if (unlikely(epdata->ep == NULL))
546 goto fail;
547
548 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
549 value = -ENOMEM;
550 if (unlikely(!req))
551 goto fail;
552
553 priv->req = req;
554 req->buf = buf;
555 req->length = len;
556 req->complete = ep_aio_complete;
557 req->context = iocb;
558 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
559 if (unlikely(0 != value)) {
560 usb_ep_free_request(epdata->ep, req);
561 goto fail;
562 }
563 spin_unlock_irq(&epdata->dev->lock);
564 return -EIOCBQUEUED;
565
566 fail:
567 spin_unlock_irq(&epdata->dev->lock);
568 kfree(priv->to_free);
569 kfree(priv);
570 put_ep(epdata);
571 return value;
572 }
573
574 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)575 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
576 {
577 struct file *file = iocb->ki_filp;
578 struct ep_data *epdata = file->private_data;
579 size_t len = iov_iter_count(to);
580 ssize_t value;
581 char *buf;
582
583 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
584 return value;
585
586 /* halt any endpoint by doing a "wrong direction" i/o call */
587 if (usb_endpoint_dir_in(&epdata->desc)) {
588 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
589 !is_sync_kiocb(iocb)) {
590 mutex_unlock(&epdata->lock);
591 return -EINVAL;
592 }
593 DBG (epdata->dev, "%s halt\n", epdata->name);
594 spin_lock_irq(&epdata->dev->lock);
595 if (likely(epdata->ep != NULL))
596 usb_ep_set_halt(epdata->ep);
597 spin_unlock_irq(&epdata->dev->lock);
598 mutex_unlock(&epdata->lock);
599 return -EBADMSG;
600 }
601
602 buf = kmalloc(len, GFP_KERNEL);
603 if (unlikely(!buf)) {
604 mutex_unlock(&epdata->lock);
605 return -ENOMEM;
606 }
607 if (is_sync_kiocb(iocb)) {
608 value = ep_io(epdata, buf, len);
609 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
610 value = -EFAULT;
611 } else {
612 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
613 value = -ENOMEM;
614 if (!priv)
615 goto fail;
616 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
617 if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
618 kfree(priv);
619 goto fail;
620 }
621 value = ep_aio(iocb, priv, epdata, buf, len);
622 if (value == -EIOCBQUEUED)
623 buf = NULL;
624 }
625 fail:
626 kfree(buf);
627 mutex_unlock(&epdata->lock);
628 return value;
629 }
630
631 static ssize_t ep_config(struct ep_data *, const char *, size_t);
632
633 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)634 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
635 {
636 struct file *file = iocb->ki_filp;
637 struct ep_data *epdata = file->private_data;
638 size_t len = iov_iter_count(from);
639 bool configured;
640 ssize_t value;
641 char *buf;
642
643 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
644 return value;
645
646 configured = epdata->state == STATE_EP_ENABLED;
647
648 /* halt any endpoint by doing a "wrong direction" i/o call */
649 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
650 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
651 !is_sync_kiocb(iocb)) {
652 mutex_unlock(&epdata->lock);
653 return -EINVAL;
654 }
655 DBG (epdata->dev, "%s halt\n", epdata->name);
656 spin_lock_irq(&epdata->dev->lock);
657 if (likely(epdata->ep != NULL))
658 usb_ep_set_halt(epdata->ep);
659 spin_unlock_irq(&epdata->dev->lock);
660 mutex_unlock(&epdata->lock);
661 return -EBADMSG;
662 }
663
664 buf = kmalloc(len, GFP_KERNEL);
665 if (unlikely(!buf)) {
666 mutex_unlock(&epdata->lock);
667 return -ENOMEM;
668 }
669
670 if (unlikely(!copy_from_iter_full(buf, len, from))) {
671 value = -EFAULT;
672 goto out;
673 }
674
675 if (unlikely(!configured)) {
676 value = ep_config(epdata, buf, len);
677 } else if (is_sync_kiocb(iocb)) {
678 value = ep_io(epdata, buf, len);
679 } else {
680 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
681 value = -ENOMEM;
682 if (priv) {
683 value = ep_aio(iocb, priv, epdata, buf, len);
684 if (value == -EIOCBQUEUED)
685 buf = NULL;
686 }
687 }
688 out:
689 kfree(buf);
690 mutex_unlock(&epdata->lock);
691 return value;
692 }
693
694 /*----------------------------------------------------------------------*/
695
696 /* used after endpoint configuration */
697 static const struct file_operations ep_io_operations = {
698 .owner = THIS_MODULE,
699
700 .open = ep_open,
701 .release = ep_release,
702 .llseek = no_llseek,
703 .unlocked_ioctl = ep_ioctl,
704 .read_iter = ep_read_iter,
705 .write_iter = ep_write_iter,
706 };
707
708 /* ENDPOINT INITIALIZATION
709 *
710 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
711 * status = write (fd, descriptors, sizeof descriptors)
712 *
713 * That write establishes the endpoint configuration, configuring
714 * the controller to process bulk, interrupt, or isochronous transfers
715 * at the right maxpacket size, and so on.
716 *
717 * The descriptors are message type 1, identified by a host order u32
718 * at the beginning of what's written. Descriptor order is: full/low
719 * speed descriptor, then optional high speed descriptor.
720 */
721 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)722 ep_config (struct ep_data *data, const char *buf, size_t len)
723 {
724 struct usb_ep *ep;
725 u32 tag;
726 int value, length = len;
727
728 if (data->state != STATE_EP_READY) {
729 value = -EL2HLT;
730 goto fail;
731 }
732
733 value = len;
734 if (len < USB_DT_ENDPOINT_SIZE + 4)
735 goto fail0;
736
737 /* we might need to change message format someday */
738 memcpy(&tag, buf, 4);
739 if (tag != 1) {
740 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
741 goto fail0;
742 }
743 buf += 4;
744 len -= 4;
745
746 /* NOTE: audio endpoint extensions not accepted here;
747 * just don't include the extra bytes.
748 */
749
750 /* full/low speed descriptor, then high speed */
751 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
752 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
753 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
754 goto fail0;
755 if (len != USB_DT_ENDPOINT_SIZE) {
756 if (len != 2 * USB_DT_ENDPOINT_SIZE)
757 goto fail0;
758 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
759 USB_DT_ENDPOINT_SIZE);
760 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
761 || data->hs_desc.bDescriptorType
762 != USB_DT_ENDPOINT) {
763 DBG(data->dev, "config %s, bad hs length or type\n",
764 data->name);
765 goto fail0;
766 }
767 }
768
769 spin_lock_irq (&data->dev->lock);
770 if (data->dev->state == STATE_DEV_UNBOUND) {
771 value = -ENOENT;
772 goto gone;
773 } else {
774 ep = data->ep;
775 if (ep == NULL) {
776 value = -ENODEV;
777 goto gone;
778 }
779 }
780 switch (data->dev->gadget->speed) {
781 case USB_SPEED_LOW:
782 case USB_SPEED_FULL:
783 ep->desc = &data->desc;
784 break;
785 case USB_SPEED_HIGH:
786 /* fails if caller didn't provide that descriptor... */
787 ep->desc = &data->hs_desc;
788 break;
789 default:
790 DBG(data->dev, "unconnected, %s init abandoned\n",
791 data->name);
792 value = -EINVAL;
793 goto gone;
794 }
795 value = usb_ep_enable(ep);
796 if (value == 0) {
797 data->state = STATE_EP_ENABLED;
798 value = length;
799 }
800 gone:
801 spin_unlock_irq (&data->dev->lock);
802 if (value < 0) {
803 fail:
804 data->desc.bDescriptorType = 0;
805 data->hs_desc.bDescriptorType = 0;
806 }
807 return value;
808 fail0:
809 value = -EINVAL;
810 goto fail;
811 }
812
813 static int
ep_open(struct inode * inode,struct file * fd)814 ep_open (struct inode *inode, struct file *fd)
815 {
816 struct ep_data *data = inode->i_private;
817 int value = -EBUSY;
818
819 if (mutex_lock_interruptible(&data->lock) != 0)
820 return -EINTR;
821 spin_lock_irq (&data->dev->lock);
822 if (data->dev->state == STATE_DEV_UNBOUND)
823 value = -ENOENT;
824 else if (data->state == STATE_EP_DISABLED) {
825 value = 0;
826 data->state = STATE_EP_READY;
827 get_ep (data);
828 fd->private_data = data;
829 VDEBUG (data->dev, "%s ready\n", data->name);
830 } else
831 DBG (data->dev, "%s state %d\n",
832 data->name, data->state);
833 spin_unlock_irq (&data->dev->lock);
834 mutex_unlock(&data->lock);
835 return value;
836 }
837
838 /*----------------------------------------------------------------------*/
839
840 /* EP0 IMPLEMENTATION can be partly in userspace.
841 *
842 * Drivers that use this facility receive various events, including
843 * control requests the kernel doesn't handle. Drivers that don't
844 * use this facility may be too simple-minded for real applications.
845 */
846
ep0_readable(struct dev_data * dev)847 static inline void ep0_readable (struct dev_data *dev)
848 {
849 wake_up (&dev->wait);
850 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
851 }
852
clean_req(struct usb_ep * ep,struct usb_request * req)853 static void clean_req (struct usb_ep *ep, struct usb_request *req)
854 {
855 struct dev_data *dev = ep->driver_data;
856
857 if (req->buf != dev->rbuf) {
858 kfree(req->buf);
859 req->buf = dev->rbuf;
860 }
861 req->complete = epio_complete;
862 dev->setup_out_ready = 0;
863 }
864
ep0_complete(struct usb_ep * ep,struct usb_request * req)865 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
866 {
867 struct dev_data *dev = ep->driver_data;
868 unsigned long flags;
869 int free = 1;
870
871 /* for control OUT, data must still get to userspace */
872 spin_lock_irqsave(&dev->lock, flags);
873 if (!dev->setup_in) {
874 dev->setup_out_error = (req->status != 0);
875 if (!dev->setup_out_error)
876 free = 0;
877 dev->setup_out_ready = 1;
878 ep0_readable (dev);
879 }
880
881 /* clean up as appropriate */
882 if (free && req->buf != &dev->rbuf)
883 clean_req (ep, req);
884 req->complete = epio_complete;
885 spin_unlock_irqrestore(&dev->lock, flags);
886 }
887
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)888 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
889 {
890 struct dev_data *dev = ep->driver_data;
891
892 if (dev->setup_out_ready) {
893 DBG (dev, "ep0 request busy!\n");
894 return -EBUSY;
895 }
896 if (len > sizeof (dev->rbuf))
897 req->buf = kmalloc(len, GFP_ATOMIC);
898 if (req->buf == NULL) {
899 req->buf = dev->rbuf;
900 return -ENOMEM;
901 }
902 req->complete = ep0_complete;
903 req->length = len;
904 req->zero = 0;
905 return 0;
906 }
907
908 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)909 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
910 {
911 struct dev_data *dev = fd->private_data;
912 ssize_t retval;
913 enum ep0_state state;
914
915 spin_lock_irq (&dev->lock);
916 if (dev->state <= STATE_DEV_OPENED) {
917 retval = -EINVAL;
918 goto done;
919 }
920
921 /* report fd mode change before acting on it */
922 if (dev->setup_abort) {
923 dev->setup_abort = 0;
924 retval = -EIDRM;
925 goto done;
926 }
927
928 /* control DATA stage */
929 if ((state = dev->state) == STATE_DEV_SETUP) {
930
931 if (dev->setup_in) { /* stall IN */
932 VDEBUG(dev, "ep0in stall\n");
933 (void) usb_ep_set_halt (dev->gadget->ep0);
934 retval = -EL2HLT;
935 dev->state = STATE_DEV_CONNECTED;
936
937 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
938 struct usb_ep *ep = dev->gadget->ep0;
939 struct usb_request *req = dev->req;
940
941 if ((retval = setup_req (ep, req, 0)) == 0) {
942 ++dev->udc_usage;
943 spin_unlock_irq (&dev->lock);
944 retval = usb_ep_queue (ep, req, GFP_KERNEL);
945 spin_lock_irq (&dev->lock);
946 --dev->udc_usage;
947 }
948 dev->state = STATE_DEV_CONNECTED;
949
950 /* assume that was SET_CONFIGURATION */
951 if (dev->current_config) {
952 unsigned power;
953
954 if (gadget_is_dualspeed(dev->gadget)
955 && (dev->gadget->speed
956 == USB_SPEED_HIGH))
957 power = dev->hs_config->bMaxPower;
958 else
959 power = dev->config->bMaxPower;
960 usb_gadget_vbus_draw(dev->gadget, 2 * power);
961 }
962
963 } else { /* collect OUT data */
964 if ((fd->f_flags & O_NONBLOCK) != 0
965 && !dev->setup_out_ready) {
966 retval = -EAGAIN;
967 goto done;
968 }
969 spin_unlock_irq (&dev->lock);
970 retval = wait_event_interruptible (dev->wait,
971 dev->setup_out_ready != 0);
972
973 /* FIXME state could change from under us */
974 spin_lock_irq (&dev->lock);
975 if (retval)
976 goto done;
977
978 if (dev->state != STATE_DEV_SETUP) {
979 retval = -ECANCELED;
980 goto done;
981 }
982 dev->state = STATE_DEV_CONNECTED;
983
984 if (dev->setup_out_error)
985 retval = -EIO;
986 else {
987 len = min (len, (size_t)dev->req->actual);
988 ++dev->udc_usage;
989 spin_unlock_irq(&dev->lock);
990 if (copy_to_user (buf, dev->req->buf, len))
991 retval = -EFAULT;
992 else
993 retval = len;
994 spin_lock_irq(&dev->lock);
995 --dev->udc_usage;
996 clean_req (dev->gadget->ep0, dev->req);
997 /* NOTE userspace can't yet choose to stall */
998 }
999 }
1000 goto done;
1001 }
1002
1003 /* else normal: return event data */
1004 if (len < sizeof dev->event [0]) {
1005 retval = -EINVAL;
1006 goto done;
1007 }
1008 len -= len % sizeof (struct usb_gadgetfs_event);
1009 dev->usermode_setup = 1;
1010
1011 scan:
1012 /* return queued events right away */
1013 if (dev->ev_next != 0) {
1014 unsigned i, n;
1015
1016 n = len / sizeof (struct usb_gadgetfs_event);
1017 if (dev->ev_next < n)
1018 n = dev->ev_next;
1019
1020 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1021 for (i = 0; i < n; i++) {
1022 if (dev->event [i].type == GADGETFS_SETUP) {
1023 dev->state = STATE_DEV_SETUP;
1024 n = i + 1;
1025 break;
1026 }
1027 }
1028 spin_unlock_irq (&dev->lock);
1029 len = n * sizeof (struct usb_gadgetfs_event);
1030 if (copy_to_user (buf, &dev->event, len))
1031 retval = -EFAULT;
1032 else
1033 retval = len;
1034 if (len > 0) {
1035 /* NOTE this doesn't guard against broken drivers;
1036 * concurrent ep0 readers may lose events.
1037 */
1038 spin_lock_irq (&dev->lock);
1039 if (dev->ev_next > n) {
1040 memmove(&dev->event[0], &dev->event[n],
1041 sizeof (struct usb_gadgetfs_event)
1042 * (dev->ev_next - n));
1043 }
1044 dev->ev_next -= n;
1045 spin_unlock_irq (&dev->lock);
1046 }
1047 return retval;
1048 }
1049 if (fd->f_flags & O_NONBLOCK) {
1050 retval = -EAGAIN;
1051 goto done;
1052 }
1053
1054 switch (state) {
1055 default:
1056 DBG (dev, "fail %s, state %d\n", __func__, state);
1057 retval = -ESRCH;
1058 break;
1059 case STATE_DEV_UNCONNECTED:
1060 case STATE_DEV_CONNECTED:
1061 spin_unlock_irq (&dev->lock);
1062 DBG (dev, "%s wait\n", __func__);
1063
1064 /* wait for events */
1065 retval = wait_event_interruptible (dev->wait,
1066 dev->ev_next != 0);
1067 if (retval < 0)
1068 return retval;
1069 spin_lock_irq (&dev->lock);
1070 goto scan;
1071 }
1072
1073 done:
1074 spin_unlock_irq (&dev->lock);
1075 return retval;
1076 }
1077
1078 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1079 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1080 {
1081 struct usb_gadgetfs_event *event;
1082 unsigned i;
1083
1084 switch (type) {
1085 /* these events purge the queue */
1086 case GADGETFS_DISCONNECT:
1087 if (dev->state == STATE_DEV_SETUP)
1088 dev->setup_abort = 1;
1089 fallthrough;
1090 case GADGETFS_CONNECT:
1091 dev->ev_next = 0;
1092 break;
1093 case GADGETFS_SETUP: /* previous request timed out */
1094 case GADGETFS_SUSPEND: /* same effect */
1095 /* these events can't be repeated */
1096 for (i = 0; i != dev->ev_next; i++) {
1097 if (dev->event [i].type != type)
1098 continue;
1099 DBG(dev, "discard old event[%d] %d\n", i, type);
1100 dev->ev_next--;
1101 if (i == dev->ev_next)
1102 break;
1103 /* indices start at zero, for simplicity */
1104 memmove (&dev->event [i], &dev->event [i + 1],
1105 sizeof (struct usb_gadgetfs_event)
1106 * (dev->ev_next - i));
1107 }
1108 break;
1109 default:
1110 BUG ();
1111 }
1112 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1113 event = &dev->event [dev->ev_next++];
1114 BUG_ON (dev->ev_next > N_EVENT);
1115 memset (event, 0, sizeof *event);
1116 event->type = type;
1117 return event;
1118 }
1119
1120 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1121 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1122 {
1123 struct dev_data *dev = fd->private_data;
1124 ssize_t retval = -ESRCH;
1125
1126 /* report fd mode change before acting on it */
1127 if (dev->setup_abort) {
1128 dev->setup_abort = 0;
1129 retval = -EIDRM;
1130
1131 /* data and/or status stage for control request */
1132 } else if (dev->state == STATE_DEV_SETUP) {
1133
1134 len = min_t(size_t, len, dev->setup_wLength);
1135 if (dev->setup_in) {
1136 retval = setup_req (dev->gadget->ep0, dev->req, len);
1137 if (retval == 0) {
1138 dev->state = STATE_DEV_CONNECTED;
1139 ++dev->udc_usage;
1140 spin_unlock_irq (&dev->lock);
1141 if (copy_from_user (dev->req->buf, buf, len))
1142 retval = -EFAULT;
1143 else {
1144 if (len < dev->setup_wLength)
1145 dev->req->zero = 1;
1146 retval = usb_ep_queue (
1147 dev->gadget->ep0, dev->req,
1148 GFP_KERNEL);
1149 }
1150 spin_lock_irq(&dev->lock);
1151 --dev->udc_usage;
1152 if (retval < 0) {
1153 clean_req (dev->gadget->ep0, dev->req);
1154 } else
1155 retval = len;
1156
1157 return retval;
1158 }
1159
1160 /* can stall some OUT transfers */
1161 } else if (dev->setup_can_stall) {
1162 VDEBUG(dev, "ep0out stall\n");
1163 (void) usb_ep_set_halt (dev->gadget->ep0);
1164 retval = -EL2HLT;
1165 dev->state = STATE_DEV_CONNECTED;
1166 } else {
1167 DBG(dev, "bogus ep0out stall!\n");
1168 }
1169 } else
1170 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1171
1172 return retval;
1173 }
1174
1175 static int
ep0_fasync(int f,struct file * fd,int on)1176 ep0_fasync (int f, struct file *fd, int on)
1177 {
1178 struct dev_data *dev = fd->private_data;
1179 // caller must F_SETOWN before signal delivery happens
1180 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1181 return fasync_helper (f, fd, on, &dev->fasync);
1182 }
1183
1184 static struct usb_gadget_driver gadgetfs_driver;
1185
1186 static int
dev_release(struct inode * inode,struct file * fd)1187 dev_release (struct inode *inode, struct file *fd)
1188 {
1189 struct dev_data *dev = fd->private_data;
1190
1191 /* closing ep0 === shutdown all */
1192
1193 if (dev->gadget_registered) {
1194 usb_gadget_unregister_driver (&gadgetfs_driver);
1195 dev->gadget_registered = false;
1196 }
1197
1198 /* at this point "good" hardware has disconnected the
1199 * device from USB; the host won't see it any more.
1200 * alternatively, all host requests will time out.
1201 */
1202
1203 kfree (dev->buf);
1204 dev->buf = NULL;
1205
1206 /* other endpoints were all decoupled from this device */
1207 spin_lock_irq(&dev->lock);
1208 dev->state = STATE_DEV_DISABLED;
1209 spin_unlock_irq(&dev->lock);
1210
1211 put_dev (dev);
1212 return 0;
1213 }
1214
1215 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1216 ep0_poll (struct file *fd, poll_table *wait)
1217 {
1218 struct dev_data *dev = fd->private_data;
1219 __poll_t mask = 0;
1220
1221 if (dev->state <= STATE_DEV_OPENED)
1222 return DEFAULT_POLLMASK;
1223
1224 poll_wait(fd, &dev->wait, wait);
1225
1226 spin_lock_irq(&dev->lock);
1227
1228 /* report fd mode change before acting on it */
1229 if (dev->setup_abort) {
1230 dev->setup_abort = 0;
1231 mask = EPOLLHUP;
1232 goto out;
1233 }
1234
1235 if (dev->state == STATE_DEV_SETUP) {
1236 if (dev->setup_in || dev->setup_can_stall)
1237 mask = EPOLLOUT;
1238 } else {
1239 if (dev->ev_next != 0)
1240 mask = EPOLLIN;
1241 }
1242 out:
1243 spin_unlock_irq(&dev->lock);
1244 return mask;
1245 }
1246
gadget_dev_ioctl(struct file * fd,unsigned code,unsigned long value)1247 static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1248 {
1249 struct dev_data *dev = fd->private_data;
1250 struct usb_gadget *gadget = dev->gadget;
1251 long ret = -ENOTTY;
1252
1253 spin_lock_irq(&dev->lock);
1254 if (dev->state == STATE_DEV_OPENED ||
1255 dev->state == STATE_DEV_UNBOUND) {
1256 /* Not bound to a UDC */
1257 } else if (gadget->ops->ioctl) {
1258 ++dev->udc_usage;
1259 spin_unlock_irq(&dev->lock);
1260
1261 ret = gadget->ops->ioctl (gadget, code, value);
1262
1263 spin_lock_irq(&dev->lock);
1264 --dev->udc_usage;
1265 }
1266 spin_unlock_irq(&dev->lock);
1267
1268 return ret;
1269 }
1270
1271 /*----------------------------------------------------------------------*/
1272
1273 /* The in-kernel gadget driver handles most ep0 issues, in particular
1274 * enumerating the single configuration (as provided from user space).
1275 *
1276 * Unrecognized ep0 requests may be handled in user space.
1277 */
1278
make_qualifier(struct dev_data * dev)1279 static void make_qualifier (struct dev_data *dev)
1280 {
1281 struct usb_qualifier_descriptor qual;
1282 struct usb_device_descriptor *desc;
1283
1284 qual.bLength = sizeof qual;
1285 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1286 qual.bcdUSB = cpu_to_le16 (0x0200);
1287
1288 desc = dev->dev;
1289 qual.bDeviceClass = desc->bDeviceClass;
1290 qual.bDeviceSubClass = desc->bDeviceSubClass;
1291 qual.bDeviceProtocol = desc->bDeviceProtocol;
1292
1293 /* assumes ep0 uses the same value for both speeds ... */
1294 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1295
1296 qual.bNumConfigurations = 1;
1297 qual.bRESERVED = 0;
1298
1299 memcpy (dev->rbuf, &qual, sizeof qual);
1300 }
1301
1302 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1303 config_buf (struct dev_data *dev, u8 type, unsigned index)
1304 {
1305 int len;
1306 int hs = 0;
1307
1308 /* only one configuration */
1309 if (index > 0)
1310 return -EINVAL;
1311
1312 if (gadget_is_dualspeed(dev->gadget)) {
1313 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1314 if (type == USB_DT_OTHER_SPEED_CONFIG)
1315 hs = !hs;
1316 }
1317 if (hs) {
1318 dev->req->buf = dev->hs_config;
1319 len = le16_to_cpu(dev->hs_config->wTotalLength);
1320 } else {
1321 dev->req->buf = dev->config;
1322 len = le16_to_cpu(dev->config->wTotalLength);
1323 }
1324 ((u8 *)dev->req->buf) [1] = type;
1325 return len;
1326 }
1327
1328 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1329 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1330 {
1331 struct dev_data *dev = get_gadget_data (gadget);
1332 struct usb_request *req = dev->req;
1333 int value = -EOPNOTSUPP;
1334 struct usb_gadgetfs_event *event;
1335 u16 w_value = le16_to_cpu(ctrl->wValue);
1336 u16 w_length = le16_to_cpu(ctrl->wLength);
1337
1338 if (w_length > RBUF_SIZE) {
1339 if (ctrl->bRequestType & USB_DIR_IN) {
1340 /* Cast away the const, we are going to overwrite on purpose. */
1341 __le16 *temp = (__le16 *)&ctrl->wLength;
1342
1343 *temp = cpu_to_le16(RBUF_SIZE);
1344 w_length = RBUF_SIZE;
1345 } else {
1346 return value;
1347 }
1348 }
1349
1350 spin_lock (&dev->lock);
1351 dev->setup_abort = 0;
1352 if (dev->state == STATE_DEV_UNCONNECTED) {
1353 if (gadget_is_dualspeed(gadget)
1354 && gadget->speed == USB_SPEED_HIGH
1355 && dev->hs_config == NULL) {
1356 spin_unlock(&dev->lock);
1357 ERROR (dev, "no high speed config??\n");
1358 return -EINVAL;
1359 }
1360
1361 dev->state = STATE_DEV_CONNECTED;
1362
1363 INFO (dev, "connected\n");
1364 event = next_event (dev, GADGETFS_CONNECT);
1365 event->u.speed = gadget->speed;
1366 ep0_readable (dev);
1367
1368 /* host may have given up waiting for response. we can miss control
1369 * requests handled lower down (device/endpoint status and features);
1370 * then ep0_{read,write} will report the wrong status. controller
1371 * driver will have aborted pending i/o.
1372 */
1373 } else if (dev->state == STATE_DEV_SETUP)
1374 dev->setup_abort = 1;
1375
1376 req->buf = dev->rbuf;
1377 req->context = NULL;
1378 switch (ctrl->bRequest) {
1379
1380 case USB_REQ_GET_DESCRIPTOR:
1381 if (ctrl->bRequestType != USB_DIR_IN)
1382 goto unrecognized;
1383 switch (w_value >> 8) {
1384
1385 case USB_DT_DEVICE:
1386 value = min (w_length, (u16) sizeof *dev->dev);
1387 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1388 req->buf = dev->dev;
1389 break;
1390 case USB_DT_DEVICE_QUALIFIER:
1391 if (!dev->hs_config)
1392 break;
1393 value = min (w_length, (u16)
1394 sizeof (struct usb_qualifier_descriptor));
1395 make_qualifier (dev);
1396 break;
1397 case USB_DT_OTHER_SPEED_CONFIG:
1398 case USB_DT_CONFIG:
1399 value = config_buf (dev,
1400 w_value >> 8,
1401 w_value & 0xff);
1402 if (value >= 0)
1403 value = min (w_length, (u16) value);
1404 break;
1405 case USB_DT_STRING:
1406 goto unrecognized;
1407
1408 default: // all others are errors
1409 break;
1410 }
1411 break;
1412
1413 /* currently one config, two speeds */
1414 case USB_REQ_SET_CONFIGURATION:
1415 if (ctrl->bRequestType != 0)
1416 goto unrecognized;
1417 if (0 == (u8) w_value) {
1418 value = 0;
1419 dev->current_config = 0;
1420 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1421 // user mode expected to disable endpoints
1422 } else {
1423 u8 config, power;
1424
1425 if (gadget_is_dualspeed(gadget)
1426 && gadget->speed == USB_SPEED_HIGH) {
1427 config = dev->hs_config->bConfigurationValue;
1428 power = dev->hs_config->bMaxPower;
1429 } else {
1430 config = dev->config->bConfigurationValue;
1431 power = dev->config->bMaxPower;
1432 }
1433
1434 if (config == (u8) w_value) {
1435 value = 0;
1436 dev->current_config = config;
1437 usb_gadget_vbus_draw(gadget, 2 * power);
1438 }
1439 }
1440
1441 /* report SET_CONFIGURATION like any other control request,
1442 * except that usermode may not stall this. the next
1443 * request mustn't be allowed start until this finishes:
1444 * endpoints and threads set up, etc.
1445 *
1446 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1447 * has bad/racey automagic that prevents synchronizing here.
1448 * even kernel mode drivers often miss them.
1449 */
1450 if (value == 0) {
1451 INFO (dev, "configuration #%d\n", dev->current_config);
1452 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1453 if (dev->usermode_setup) {
1454 dev->setup_can_stall = 0;
1455 goto delegate;
1456 }
1457 }
1458 break;
1459
1460 #ifndef CONFIG_USB_PXA25X
1461 /* PXA automagically handles this request too */
1462 case USB_REQ_GET_CONFIGURATION:
1463 if (ctrl->bRequestType != 0x80)
1464 goto unrecognized;
1465 *(u8 *)req->buf = dev->current_config;
1466 value = min (w_length, (u16) 1);
1467 break;
1468 #endif
1469
1470 default:
1471 unrecognized:
1472 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1473 dev->usermode_setup ? "delegate" : "fail",
1474 ctrl->bRequestType, ctrl->bRequest,
1475 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1476
1477 /* if there's an ep0 reader, don't stall */
1478 if (dev->usermode_setup) {
1479 dev->setup_can_stall = 1;
1480 delegate:
1481 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1482 ? 1 : 0;
1483 dev->setup_wLength = w_length;
1484 dev->setup_out_ready = 0;
1485 dev->setup_out_error = 0;
1486
1487 /* read DATA stage for OUT right away */
1488 if (unlikely (!dev->setup_in && w_length)) {
1489 value = setup_req (gadget->ep0, dev->req,
1490 w_length);
1491 if (value < 0)
1492 break;
1493
1494 ++dev->udc_usage;
1495 spin_unlock (&dev->lock);
1496 value = usb_ep_queue (gadget->ep0, dev->req,
1497 GFP_KERNEL);
1498 spin_lock (&dev->lock);
1499 --dev->udc_usage;
1500 if (value < 0) {
1501 clean_req (gadget->ep0, dev->req);
1502 break;
1503 }
1504
1505 /* we can't currently stall these */
1506 dev->setup_can_stall = 0;
1507 }
1508
1509 /* state changes when reader collects event */
1510 event = next_event (dev, GADGETFS_SETUP);
1511 event->u.setup = *ctrl;
1512 ep0_readable (dev);
1513 spin_unlock (&dev->lock);
1514 return 0;
1515 }
1516 }
1517
1518 /* proceed with data transfer and status phases? */
1519 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1520 req->length = value;
1521 req->zero = value < w_length;
1522
1523 ++dev->udc_usage;
1524 spin_unlock (&dev->lock);
1525 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1526 spin_lock(&dev->lock);
1527 --dev->udc_usage;
1528 spin_unlock(&dev->lock);
1529 if (value < 0) {
1530 DBG (dev, "ep_queue --> %d\n", value);
1531 req->status = 0;
1532 }
1533 return value;
1534 }
1535
1536 /* device stalls when value < 0 */
1537 spin_unlock (&dev->lock);
1538 return value;
1539 }
1540
destroy_ep_files(struct dev_data * dev)1541 static void destroy_ep_files (struct dev_data *dev)
1542 {
1543 DBG (dev, "%s %d\n", __func__, dev->state);
1544
1545 /* dev->state must prevent interference */
1546 spin_lock_irq (&dev->lock);
1547 while (!list_empty(&dev->epfiles)) {
1548 struct ep_data *ep;
1549 struct inode *parent;
1550 struct dentry *dentry;
1551
1552 /* break link to FS */
1553 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1554 list_del_init (&ep->epfiles);
1555 spin_unlock_irq (&dev->lock);
1556
1557 dentry = ep->dentry;
1558 ep->dentry = NULL;
1559 parent = d_inode(dentry->d_parent);
1560
1561 /* break link to controller */
1562 mutex_lock(&ep->lock);
1563 if (ep->state == STATE_EP_ENABLED)
1564 (void) usb_ep_disable (ep->ep);
1565 ep->state = STATE_EP_UNBOUND;
1566 usb_ep_free_request (ep->ep, ep->req);
1567 ep->ep = NULL;
1568 mutex_unlock(&ep->lock);
1569
1570 wake_up (&ep->wait);
1571 put_ep (ep);
1572
1573 /* break link to dcache */
1574 inode_lock(parent);
1575 d_delete (dentry);
1576 dput (dentry);
1577 inode_unlock(parent);
1578
1579 spin_lock_irq (&dev->lock);
1580 }
1581 spin_unlock_irq (&dev->lock);
1582 }
1583
1584
1585 static struct dentry *
1586 gadgetfs_create_file (struct super_block *sb, char const *name,
1587 void *data, const struct file_operations *fops);
1588
activate_ep_files(struct dev_data * dev)1589 static int activate_ep_files (struct dev_data *dev)
1590 {
1591 struct usb_ep *ep;
1592 struct ep_data *data;
1593
1594 gadget_for_each_ep (ep, dev->gadget) {
1595
1596 data = kzalloc(sizeof(*data), GFP_KERNEL);
1597 if (!data)
1598 goto enomem0;
1599 data->state = STATE_EP_DISABLED;
1600 mutex_init(&data->lock);
1601 init_waitqueue_head (&data->wait);
1602
1603 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1604 refcount_set (&data->count, 1);
1605 data->dev = dev;
1606 get_dev (dev);
1607
1608 data->ep = ep;
1609 ep->driver_data = data;
1610
1611 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1612 if (!data->req)
1613 goto enomem1;
1614
1615 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1616 data, &ep_io_operations);
1617 if (!data->dentry)
1618 goto enomem2;
1619 list_add_tail (&data->epfiles, &dev->epfiles);
1620 }
1621 return 0;
1622
1623 enomem2:
1624 usb_ep_free_request (ep, data->req);
1625 enomem1:
1626 put_dev (dev);
1627 kfree (data);
1628 enomem0:
1629 DBG (dev, "%s enomem\n", __func__);
1630 destroy_ep_files (dev);
1631 return -ENOMEM;
1632 }
1633
1634 static void
gadgetfs_unbind(struct usb_gadget * gadget)1635 gadgetfs_unbind (struct usb_gadget *gadget)
1636 {
1637 struct dev_data *dev = get_gadget_data (gadget);
1638
1639 DBG (dev, "%s\n", __func__);
1640
1641 spin_lock_irq (&dev->lock);
1642 dev->state = STATE_DEV_UNBOUND;
1643 while (dev->udc_usage > 0) {
1644 spin_unlock_irq(&dev->lock);
1645 usleep_range(1000, 2000);
1646 spin_lock_irq(&dev->lock);
1647 }
1648 spin_unlock_irq (&dev->lock);
1649
1650 destroy_ep_files (dev);
1651 gadget->ep0->driver_data = NULL;
1652 set_gadget_data (gadget, NULL);
1653
1654 /* we've already been disconnected ... no i/o is active */
1655 if (dev->req)
1656 usb_ep_free_request (gadget->ep0, dev->req);
1657 DBG (dev, "%s done\n", __func__);
1658 put_dev (dev);
1659 }
1660
1661 static struct dev_data *the_device;
1662
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1663 static int gadgetfs_bind(struct usb_gadget *gadget,
1664 struct usb_gadget_driver *driver)
1665 {
1666 struct dev_data *dev = the_device;
1667
1668 if (!dev)
1669 return -ESRCH;
1670 if (0 != strcmp (CHIP, gadget->name)) {
1671 pr_err("%s expected %s controller not %s\n",
1672 shortname, CHIP, gadget->name);
1673 return -ENODEV;
1674 }
1675
1676 set_gadget_data (gadget, dev);
1677 dev->gadget = gadget;
1678 gadget->ep0->driver_data = dev;
1679
1680 /* preallocate control response and buffer */
1681 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1682 if (!dev->req)
1683 goto enomem;
1684 dev->req->context = NULL;
1685 dev->req->complete = epio_complete;
1686
1687 if (activate_ep_files (dev) < 0)
1688 goto enomem;
1689
1690 INFO (dev, "bound to %s driver\n", gadget->name);
1691 spin_lock_irq(&dev->lock);
1692 dev->state = STATE_DEV_UNCONNECTED;
1693 spin_unlock_irq(&dev->lock);
1694 get_dev (dev);
1695 return 0;
1696
1697 enomem:
1698 gadgetfs_unbind (gadget);
1699 return -ENOMEM;
1700 }
1701
1702 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1703 gadgetfs_disconnect (struct usb_gadget *gadget)
1704 {
1705 struct dev_data *dev = get_gadget_data (gadget);
1706 unsigned long flags;
1707
1708 spin_lock_irqsave (&dev->lock, flags);
1709 if (dev->state == STATE_DEV_UNCONNECTED)
1710 goto exit;
1711 dev->state = STATE_DEV_UNCONNECTED;
1712
1713 INFO (dev, "disconnected\n");
1714 next_event (dev, GADGETFS_DISCONNECT);
1715 ep0_readable (dev);
1716 exit:
1717 spin_unlock_irqrestore (&dev->lock, flags);
1718 }
1719
1720 static void
gadgetfs_suspend(struct usb_gadget * gadget)1721 gadgetfs_suspend (struct usb_gadget *gadget)
1722 {
1723 struct dev_data *dev = get_gadget_data (gadget);
1724 unsigned long flags;
1725
1726 INFO (dev, "suspended from state %d\n", dev->state);
1727 spin_lock_irqsave(&dev->lock, flags);
1728 switch (dev->state) {
1729 case STATE_DEV_SETUP: // VERY odd... host died??
1730 case STATE_DEV_CONNECTED:
1731 case STATE_DEV_UNCONNECTED:
1732 next_event (dev, GADGETFS_SUSPEND);
1733 ep0_readable (dev);
1734 fallthrough;
1735 default:
1736 break;
1737 }
1738 spin_unlock_irqrestore(&dev->lock, flags);
1739 }
1740
1741 static struct usb_gadget_driver gadgetfs_driver = {
1742 .function = (char *) driver_desc,
1743 .bind = gadgetfs_bind,
1744 .unbind = gadgetfs_unbind,
1745 .setup = gadgetfs_setup,
1746 .reset = gadgetfs_disconnect,
1747 .disconnect = gadgetfs_disconnect,
1748 .suspend = gadgetfs_suspend,
1749
1750 .driver = {
1751 .name = shortname,
1752 },
1753 };
1754
1755 /*----------------------------------------------------------------------*/
1756 /* DEVICE INITIALIZATION
1757 *
1758 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1759 * status = write (fd, descriptors, sizeof descriptors)
1760 *
1761 * That write establishes the device configuration, so the kernel can
1762 * bind to the controller ... guaranteeing it can handle enumeration
1763 * at all necessary speeds. Descriptor order is:
1764 *
1765 * . message tag (u32, host order) ... for now, must be zero; it
1766 * would change to support features like multi-config devices
1767 * . full/low speed config ... all wTotalLength bytes (with interface,
1768 * class, altsetting, endpoint, and other descriptors)
1769 * . high speed config ... all descriptors, for high speed operation;
1770 * this one's optional except for high-speed hardware
1771 * . device descriptor
1772 *
1773 * Endpoints are not yet enabled. Drivers must wait until device
1774 * configuration and interface altsetting changes create
1775 * the need to configure (or unconfigure) them.
1776 *
1777 * After initialization, the device stays active for as long as that
1778 * $CHIP file is open. Events must then be read from that descriptor,
1779 * such as configuration notifications.
1780 */
1781
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1782 static int is_valid_config(struct usb_config_descriptor *config,
1783 unsigned int total)
1784 {
1785 return config->bDescriptorType == USB_DT_CONFIG
1786 && config->bLength == USB_DT_CONFIG_SIZE
1787 && total >= USB_DT_CONFIG_SIZE
1788 && config->bConfigurationValue != 0
1789 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1790 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1791 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1792 /* FIXME check lengths: walk to end */
1793 }
1794
1795 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1796 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1797 {
1798 struct dev_data *dev = fd->private_data;
1799 ssize_t value, length = len;
1800 unsigned total;
1801 u32 tag;
1802 char *kbuf;
1803
1804 spin_lock_irq(&dev->lock);
1805 if (dev->state > STATE_DEV_OPENED) {
1806 value = ep0_write(fd, buf, len, ptr);
1807 spin_unlock_irq(&dev->lock);
1808 return value;
1809 }
1810 spin_unlock_irq(&dev->lock);
1811
1812 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1813 (len > PAGE_SIZE * 4))
1814 return -EINVAL;
1815
1816 /* we might need to change message format someday */
1817 if (copy_from_user (&tag, buf, 4))
1818 return -EFAULT;
1819 if (tag != 0)
1820 return -EINVAL;
1821 buf += 4;
1822 length -= 4;
1823
1824 kbuf = memdup_user(buf, length);
1825 if (IS_ERR(kbuf))
1826 return PTR_ERR(kbuf);
1827
1828 spin_lock_irq (&dev->lock);
1829 value = -EINVAL;
1830 if (dev->buf) {
1831 spin_unlock_irq(&dev->lock);
1832 kfree(kbuf);
1833 return value;
1834 }
1835 dev->buf = kbuf;
1836
1837 /* full or low speed config */
1838 dev->config = (void *) kbuf;
1839 total = le16_to_cpu(dev->config->wTotalLength);
1840 if (!is_valid_config(dev->config, total) ||
1841 total > length - USB_DT_DEVICE_SIZE)
1842 goto fail;
1843 kbuf += total;
1844 length -= total;
1845
1846 /* optional high speed config */
1847 if (kbuf [1] == USB_DT_CONFIG) {
1848 dev->hs_config = (void *) kbuf;
1849 total = le16_to_cpu(dev->hs_config->wTotalLength);
1850 if (!is_valid_config(dev->hs_config, total) ||
1851 total > length - USB_DT_DEVICE_SIZE)
1852 goto fail;
1853 kbuf += total;
1854 length -= total;
1855 } else {
1856 dev->hs_config = NULL;
1857 }
1858
1859 /* could support multiple configs, using another encoding! */
1860
1861 /* device descriptor (tweaked for paranoia) */
1862 if (length != USB_DT_DEVICE_SIZE)
1863 goto fail;
1864 dev->dev = (void *)kbuf;
1865 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1866 || dev->dev->bDescriptorType != USB_DT_DEVICE
1867 || dev->dev->bNumConfigurations != 1)
1868 goto fail;
1869 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1870
1871 /* triggers gadgetfs_bind(); then we can enumerate. */
1872 spin_unlock_irq (&dev->lock);
1873 if (dev->hs_config)
1874 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1875 else
1876 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1877
1878 value = usb_gadget_register_driver(&gadgetfs_driver);
1879 if (value != 0) {
1880 spin_lock_irq(&dev->lock);
1881 goto fail;
1882 } else {
1883 /* at this point "good" hardware has for the first time
1884 * let the USB the host see us. alternatively, if users
1885 * unplug/replug that will clear all the error state.
1886 *
1887 * note: everything running before here was guaranteed
1888 * to choke driver model style diagnostics. from here
1889 * on, they can work ... except in cleanup paths that
1890 * kick in after the ep0 descriptor is closed.
1891 */
1892 value = len;
1893 dev->gadget_registered = true;
1894 }
1895 return value;
1896
1897 fail:
1898 dev->config = NULL;
1899 dev->hs_config = NULL;
1900 dev->dev = NULL;
1901 spin_unlock_irq (&dev->lock);
1902 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1903 kfree (dev->buf);
1904 dev->buf = NULL;
1905 return value;
1906 }
1907
1908 static int
gadget_dev_open(struct inode * inode,struct file * fd)1909 gadget_dev_open (struct inode *inode, struct file *fd)
1910 {
1911 struct dev_data *dev = inode->i_private;
1912 int value = -EBUSY;
1913
1914 spin_lock_irq(&dev->lock);
1915 if (dev->state == STATE_DEV_DISABLED) {
1916 dev->ev_next = 0;
1917 dev->state = STATE_DEV_OPENED;
1918 fd->private_data = dev;
1919 get_dev (dev);
1920 value = 0;
1921 }
1922 spin_unlock_irq(&dev->lock);
1923 return value;
1924 }
1925
1926 static const struct file_operations ep0_operations = {
1927 .llseek = no_llseek,
1928
1929 .open = gadget_dev_open,
1930 .read = ep0_read,
1931 .write = dev_config,
1932 .fasync = ep0_fasync,
1933 .poll = ep0_poll,
1934 .unlocked_ioctl = gadget_dev_ioctl,
1935 .release = dev_release,
1936 };
1937
1938 /*----------------------------------------------------------------------*/
1939
1940 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1941 *
1942 * Mounting the filesystem creates a controller file, used first for
1943 * device configuration then later for event monitoring.
1944 */
1945
1946
1947 /* FIXME PAM etc could set this security policy without mount options
1948 * if epfiles inherited ownership and permissons from ep0 ...
1949 */
1950
1951 static unsigned default_uid;
1952 static unsigned default_gid;
1953 static unsigned default_perm = S_IRUSR | S_IWUSR;
1954
1955 module_param (default_uid, uint, 0644);
1956 module_param (default_gid, uint, 0644);
1957 module_param (default_perm, uint, 0644);
1958
1959
1960 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1961 gadgetfs_make_inode (struct super_block *sb,
1962 void *data, const struct file_operations *fops,
1963 int mode)
1964 {
1965 struct inode *inode = new_inode (sb);
1966
1967 if (inode) {
1968 inode->i_ino = get_next_ino();
1969 inode->i_mode = mode;
1970 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1971 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1972 inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
1973 inode->i_private = data;
1974 inode->i_fop = fops;
1975 }
1976 return inode;
1977 }
1978
1979 /* creates in fs root directory, so non-renamable and non-linkable.
1980 * so inode and dentry are paired, until device reconfig.
1981 */
1982 static struct dentry *
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1983 gadgetfs_create_file (struct super_block *sb, char const *name,
1984 void *data, const struct file_operations *fops)
1985 {
1986 struct dentry *dentry;
1987 struct inode *inode;
1988
1989 dentry = d_alloc_name(sb->s_root, name);
1990 if (!dentry)
1991 return NULL;
1992
1993 inode = gadgetfs_make_inode (sb, data, fops,
1994 S_IFREG | (default_perm & S_IRWXUGO));
1995 if (!inode) {
1996 dput(dentry);
1997 return NULL;
1998 }
1999 d_add (dentry, inode);
2000 return dentry;
2001 }
2002
2003 static const struct super_operations gadget_fs_operations = {
2004 .statfs = simple_statfs,
2005 .drop_inode = generic_delete_inode,
2006 };
2007
2008 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2009 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2010 {
2011 struct inode *inode;
2012 struct dev_data *dev;
2013 int rc;
2014
2015 mutex_lock(&sb_mutex);
2016
2017 if (the_device) {
2018 rc = -ESRCH;
2019 goto Done;
2020 }
2021
2022 CHIP = usb_get_gadget_udc_name();
2023 if (!CHIP) {
2024 rc = -ENODEV;
2025 goto Done;
2026 }
2027
2028 /* superblock */
2029 sb->s_blocksize = PAGE_SIZE;
2030 sb->s_blocksize_bits = PAGE_SHIFT;
2031 sb->s_magic = GADGETFS_MAGIC;
2032 sb->s_op = &gadget_fs_operations;
2033 sb->s_time_gran = 1;
2034
2035 /* root inode */
2036 inode = gadgetfs_make_inode (sb,
2037 NULL, &simple_dir_operations,
2038 S_IFDIR | S_IRUGO | S_IXUGO);
2039 if (!inode)
2040 goto Enomem;
2041 inode->i_op = &simple_dir_inode_operations;
2042 if (!(sb->s_root = d_make_root (inode)))
2043 goto Enomem;
2044
2045 /* the ep0 file is named after the controller we expect;
2046 * user mode code can use it for sanity checks, like we do.
2047 */
2048 dev = dev_new ();
2049 if (!dev)
2050 goto Enomem;
2051
2052 dev->sb = sb;
2053 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2054 if (!dev->dentry) {
2055 put_dev(dev);
2056 goto Enomem;
2057 }
2058
2059 /* other endpoint files are available after hardware setup,
2060 * from binding to a controller.
2061 */
2062 the_device = dev;
2063 rc = 0;
2064 goto Done;
2065
2066 Enomem:
2067 kfree(CHIP);
2068 CHIP = NULL;
2069 rc = -ENOMEM;
2070
2071 Done:
2072 mutex_unlock(&sb_mutex);
2073 return rc;
2074 }
2075
2076 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2077 static int gadgetfs_get_tree(struct fs_context *fc)
2078 {
2079 return get_tree_single(fc, gadgetfs_fill_super);
2080 }
2081
2082 static const struct fs_context_operations gadgetfs_context_ops = {
2083 .get_tree = gadgetfs_get_tree,
2084 };
2085
gadgetfs_init_fs_context(struct fs_context * fc)2086 static int gadgetfs_init_fs_context(struct fs_context *fc)
2087 {
2088 fc->ops = &gadgetfs_context_ops;
2089 return 0;
2090 }
2091
2092 static void
gadgetfs_kill_sb(struct super_block * sb)2093 gadgetfs_kill_sb (struct super_block *sb)
2094 {
2095 mutex_lock(&sb_mutex);
2096 kill_litter_super (sb);
2097 if (the_device) {
2098 put_dev (the_device);
2099 the_device = NULL;
2100 }
2101 kfree(CHIP);
2102 CHIP = NULL;
2103 mutex_unlock(&sb_mutex);
2104 }
2105
2106 /*----------------------------------------------------------------------*/
2107
2108 static struct file_system_type gadgetfs_type = {
2109 .owner = THIS_MODULE,
2110 .name = shortname,
2111 .init_fs_context = gadgetfs_init_fs_context,
2112 .kill_sb = gadgetfs_kill_sb,
2113 };
2114 MODULE_ALIAS_FS("gadgetfs");
2115
2116 /*----------------------------------------------------------------------*/
2117
gadgetfs_init(void)2118 static int __init gadgetfs_init (void)
2119 {
2120 int status;
2121
2122 status = register_filesystem (&gadgetfs_type);
2123 if (status == 0)
2124 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2125 shortname, driver_desc);
2126 return status;
2127 }
2128 module_init (gadgetfs_init);
2129
gadgetfs_cleanup(void)2130 static void __exit gadgetfs_cleanup (void)
2131 {
2132 pr_debug ("unregister %s\n", shortname);
2133 unregister_filesystem (&gadgetfs_type);
2134 }
2135 module_exit (gadgetfs_cleanup);
2136
2137