xref: /openbmc/linux/drivers/usb/gadget/legacy/inode.c (revision af958a38)
1 /*
2  * inode.c -- user mode filesystem api for usb gadget controllers
3  *
4  * Copyright (C) 2003-2004 David Brownell
5  * Copyright (C) 2003 Agilent Technologies
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/fs.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <asm/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27 #include <linux/mmu_context.h>
28 #include <linux/aio.h>
29 
30 #include <linux/device.h>
31 #include <linux/moduleparam.h>
32 
33 #include <linux/usb/gadgetfs.h>
34 #include <linux/usb/gadget.h>
35 
36 
37 /*
38  * The gadgetfs API maps each endpoint to a file descriptor so that you
39  * can use standard synchronous read/write calls for I/O.  There's some
40  * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
41  * drivers show how this works in practice.  You can also use AIO to
42  * eliminate I/O gaps between requests, to help when streaming data.
43  *
44  * Key parts that must be USB-specific are protocols defining how the
45  * read/write operations relate to the hardware state machines.  There
46  * are two types of files.  One type is for the device, implementing ep0.
47  * The other type is for each IN or OUT endpoint.  In both cases, the
48  * user mode driver must configure the hardware before using it.
49  *
50  * - First, dev_config() is called when /dev/gadget/$CHIP is configured
51  *   (by writing configuration and device descriptors).  Afterwards it
52  *   may serve as a source of device events, used to handle all control
53  *   requests other than basic enumeration.
54  *
55  * - Then, after a SET_CONFIGURATION control request, ep_config() is
56  *   called when each /dev/gadget/ep* file is configured (by writing
57  *   endpoint descriptors).  Afterwards these files are used to write()
58  *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
59  *   direction" request is issued (like reading an IN endpoint).
60  *
61  * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
62  * not possible on all hardware.  For example, precise fault handling with
63  * respect to data left in endpoint fifos after aborted operations; or
64  * selective clearing of endpoint halts, to implement SET_INTERFACE.
65  */
66 
67 #define	DRIVER_DESC	"USB Gadget filesystem"
68 #define	DRIVER_VERSION	"24 Aug 2004"
69 
70 static const char driver_desc [] = DRIVER_DESC;
71 static const char shortname [] = "gadgetfs";
72 
73 MODULE_DESCRIPTION (DRIVER_DESC);
74 MODULE_AUTHOR ("David Brownell");
75 MODULE_LICENSE ("GPL");
76 
77 
78 /*----------------------------------------------------------------------*/
79 
80 #define GADGETFS_MAGIC		0xaee71ee7
81 
82 /* /dev/gadget/$CHIP represents ep0 and the whole device */
83 enum ep0_state {
84 	/* DISBLED is the initial state.
85 	 */
86 	STATE_DEV_DISABLED = 0,
87 
88 	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
89 	 * ep0/device i/o modes and binding to the controller.  Driver
90 	 * must always write descriptors to initialize the device, then
91 	 * the device becomes UNCONNECTED until enumeration.
92 	 */
93 	STATE_DEV_OPENED,
94 
95 	/* From then on, ep0 fd is in either of two basic modes:
96 	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97 	 * - SETUP: read/write will transfer control data and succeed;
98 	 *   or if "wrong direction", performs protocol stall
99 	 */
100 	STATE_DEV_UNCONNECTED,
101 	STATE_DEV_CONNECTED,
102 	STATE_DEV_SETUP,
103 
104 	/* UNBOUND means the driver closed ep0, so the device won't be
105 	 * accessible again (DEV_DISABLED) until all fds are closed.
106 	 */
107 	STATE_DEV_UNBOUND,
108 };
109 
110 /* enough for the whole queue: most events invalidate others */
111 #define	N_EVENT			5
112 
113 struct dev_data {
114 	spinlock_t			lock;
115 	atomic_t			count;
116 	enum ep0_state			state;		/* P: lock */
117 	struct usb_gadgetfs_event	event [N_EVENT];
118 	unsigned			ev_next;
119 	struct fasync_struct		*fasync;
120 	u8				current_config;
121 
122 	/* drivers reading ep0 MUST handle control requests (SETUP)
123 	 * reported that way; else the host will time out.
124 	 */
125 	unsigned			usermode_setup : 1,
126 					setup_in : 1,
127 					setup_can_stall : 1,
128 					setup_out_ready : 1,
129 					setup_out_error : 1,
130 					setup_abort : 1;
131 	unsigned			setup_wLength;
132 
133 	/* the rest is basically write-once */
134 	struct usb_config_descriptor	*config, *hs_config;
135 	struct usb_device_descriptor	*dev;
136 	struct usb_request		*req;
137 	struct usb_gadget		*gadget;
138 	struct list_head		epfiles;
139 	void				*buf;
140 	wait_queue_head_t		wait;
141 	struct super_block		*sb;
142 	struct dentry			*dentry;
143 
144 	/* except this scratch i/o buffer for ep0 */
145 	u8				rbuf [256];
146 };
147 
148 static inline void get_dev (struct dev_data *data)
149 {
150 	atomic_inc (&data->count);
151 }
152 
153 static void put_dev (struct dev_data *data)
154 {
155 	if (likely (!atomic_dec_and_test (&data->count)))
156 		return;
157 	/* needs no more cleanup */
158 	BUG_ON (waitqueue_active (&data->wait));
159 	kfree (data);
160 }
161 
162 static struct dev_data *dev_new (void)
163 {
164 	struct dev_data		*dev;
165 
166 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
167 	if (!dev)
168 		return NULL;
169 	dev->state = STATE_DEV_DISABLED;
170 	atomic_set (&dev->count, 1);
171 	spin_lock_init (&dev->lock);
172 	INIT_LIST_HEAD (&dev->epfiles);
173 	init_waitqueue_head (&dev->wait);
174 	return dev;
175 }
176 
177 /*----------------------------------------------------------------------*/
178 
179 /* other /dev/gadget/$ENDPOINT files represent endpoints */
180 enum ep_state {
181 	STATE_EP_DISABLED = 0,
182 	STATE_EP_READY,
183 	STATE_EP_ENABLED,
184 	STATE_EP_UNBOUND,
185 };
186 
187 struct ep_data {
188 	struct mutex			lock;
189 	enum ep_state			state;
190 	atomic_t			count;
191 	struct dev_data			*dev;
192 	/* must hold dev->lock before accessing ep or req */
193 	struct usb_ep			*ep;
194 	struct usb_request		*req;
195 	ssize_t				status;
196 	char				name [16];
197 	struct usb_endpoint_descriptor	desc, hs_desc;
198 	struct list_head		epfiles;
199 	wait_queue_head_t		wait;
200 	struct dentry			*dentry;
201 	struct inode			*inode;
202 };
203 
204 static inline void get_ep (struct ep_data *data)
205 {
206 	atomic_inc (&data->count);
207 }
208 
209 static void put_ep (struct ep_data *data)
210 {
211 	if (likely (!atomic_dec_and_test (&data->count)))
212 		return;
213 	put_dev (data->dev);
214 	/* needs no more cleanup */
215 	BUG_ON (!list_empty (&data->epfiles));
216 	BUG_ON (waitqueue_active (&data->wait));
217 	kfree (data);
218 }
219 
220 /*----------------------------------------------------------------------*/
221 
222 /* most "how to use the hardware" policy choices are in userspace:
223  * mapping endpoint roles (which the driver needs) to the capabilities
224  * which the usb controller has.  most of those capabilities are exposed
225  * implicitly, starting with the driver name and then endpoint names.
226  */
227 
228 static const char *CHIP;
229 
230 /*----------------------------------------------------------------------*/
231 
232 /* NOTE:  don't use dev_printk calls before binding to the gadget
233  * at the end of ep0 configuration, or after unbind.
234  */
235 
236 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
237 #define xprintk(d,level,fmt,args...) \
238 	printk(level "%s: " fmt , shortname , ## args)
239 
240 #ifdef DEBUG
241 #define DBG(dev,fmt,args...) \
242 	xprintk(dev , KERN_DEBUG , fmt , ## args)
243 #else
244 #define DBG(dev,fmt,args...) \
245 	do { } while (0)
246 #endif /* DEBUG */
247 
248 #ifdef VERBOSE_DEBUG
249 #define VDEBUG	DBG
250 #else
251 #define VDEBUG(dev,fmt,args...) \
252 	do { } while (0)
253 #endif /* DEBUG */
254 
255 #define ERROR(dev,fmt,args...) \
256 	xprintk(dev , KERN_ERR , fmt , ## args)
257 #define INFO(dev,fmt,args...) \
258 	xprintk(dev , KERN_INFO , fmt , ## args)
259 
260 
261 /*----------------------------------------------------------------------*/
262 
263 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
264  *
265  * After opening, configure non-control endpoints.  Then use normal
266  * stream read() and write() requests; and maybe ioctl() to get more
267  * precise FIFO status when recovering from cancellation.
268  */
269 
270 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
271 {
272 	struct ep_data	*epdata = ep->driver_data;
273 
274 	if (!req->context)
275 		return;
276 	if (req->status)
277 		epdata->status = req->status;
278 	else
279 		epdata->status = req->actual;
280 	complete ((struct completion *)req->context);
281 }
282 
283 /* tasklock endpoint, returning when it's connected.
284  * still need dev->lock to use epdata->ep.
285  */
286 static int
287 get_ready_ep (unsigned f_flags, struct ep_data *epdata)
288 {
289 	int	val;
290 
291 	if (f_flags & O_NONBLOCK) {
292 		if (!mutex_trylock(&epdata->lock))
293 			goto nonblock;
294 		if (epdata->state != STATE_EP_ENABLED) {
295 			mutex_unlock(&epdata->lock);
296 nonblock:
297 			val = -EAGAIN;
298 		} else
299 			val = 0;
300 		return val;
301 	}
302 
303 	val = mutex_lock_interruptible(&epdata->lock);
304 	if (val < 0)
305 		return val;
306 
307 	switch (epdata->state) {
308 	case STATE_EP_ENABLED:
309 		break;
310 	// case STATE_EP_DISABLED:		/* "can't happen" */
311 	// case STATE_EP_READY:			/* "can't happen" */
312 	default:				/* error! */
313 		pr_debug ("%s: ep %p not available, state %d\n",
314 				shortname, epdata, epdata->state);
315 		// FALLTHROUGH
316 	case STATE_EP_UNBOUND:			/* clean disconnect */
317 		val = -ENODEV;
318 		mutex_unlock(&epdata->lock);
319 	}
320 	return val;
321 }
322 
323 static ssize_t
324 ep_io (struct ep_data *epdata, void *buf, unsigned len)
325 {
326 	DECLARE_COMPLETION_ONSTACK (done);
327 	int value;
328 
329 	spin_lock_irq (&epdata->dev->lock);
330 	if (likely (epdata->ep != NULL)) {
331 		struct usb_request	*req = epdata->req;
332 
333 		req->context = &done;
334 		req->complete = epio_complete;
335 		req->buf = buf;
336 		req->length = len;
337 		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
338 	} else
339 		value = -ENODEV;
340 	spin_unlock_irq (&epdata->dev->lock);
341 
342 	if (likely (value == 0)) {
343 		value = wait_event_interruptible (done.wait, done.done);
344 		if (value != 0) {
345 			spin_lock_irq (&epdata->dev->lock);
346 			if (likely (epdata->ep != NULL)) {
347 				DBG (epdata->dev, "%s i/o interrupted\n",
348 						epdata->name);
349 				usb_ep_dequeue (epdata->ep, epdata->req);
350 				spin_unlock_irq (&epdata->dev->lock);
351 
352 				wait_event (done.wait, done.done);
353 				if (epdata->status == -ECONNRESET)
354 					epdata->status = -EINTR;
355 			} else {
356 				spin_unlock_irq (&epdata->dev->lock);
357 
358 				DBG (epdata->dev, "endpoint gone\n");
359 				epdata->status = -ENODEV;
360 			}
361 		}
362 		return epdata->status;
363 	}
364 	return value;
365 }
366 
367 
368 /* handle a synchronous OUT bulk/intr/iso transfer */
369 static ssize_t
370 ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
371 {
372 	struct ep_data		*data = fd->private_data;
373 	void			*kbuf;
374 	ssize_t			value;
375 
376 	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
377 		return value;
378 
379 	/* halt any endpoint by doing a "wrong direction" i/o call */
380 	if (usb_endpoint_dir_in(&data->desc)) {
381 		if (usb_endpoint_xfer_isoc(&data->desc)) {
382 			mutex_unlock(&data->lock);
383 			return -EINVAL;
384 		}
385 		DBG (data->dev, "%s halt\n", data->name);
386 		spin_lock_irq (&data->dev->lock);
387 		if (likely (data->ep != NULL))
388 			usb_ep_set_halt (data->ep);
389 		spin_unlock_irq (&data->dev->lock);
390 		mutex_unlock(&data->lock);
391 		return -EBADMSG;
392 	}
393 
394 	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
395 
396 	value = -ENOMEM;
397 	kbuf = kmalloc (len, GFP_KERNEL);
398 	if (unlikely (!kbuf))
399 		goto free1;
400 
401 	value = ep_io (data, kbuf, len);
402 	VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
403 		data->name, len, (int) value);
404 	if (value >= 0 && copy_to_user (buf, kbuf, value))
405 		value = -EFAULT;
406 
407 free1:
408 	mutex_unlock(&data->lock);
409 	kfree (kbuf);
410 	return value;
411 }
412 
413 /* handle a synchronous IN bulk/intr/iso transfer */
414 static ssize_t
415 ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
416 {
417 	struct ep_data		*data = fd->private_data;
418 	void			*kbuf;
419 	ssize_t			value;
420 
421 	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
422 		return value;
423 
424 	/* halt any endpoint by doing a "wrong direction" i/o call */
425 	if (!usb_endpoint_dir_in(&data->desc)) {
426 		if (usb_endpoint_xfer_isoc(&data->desc)) {
427 			mutex_unlock(&data->lock);
428 			return -EINVAL;
429 		}
430 		DBG (data->dev, "%s halt\n", data->name);
431 		spin_lock_irq (&data->dev->lock);
432 		if (likely (data->ep != NULL))
433 			usb_ep_set_halt (data->ep);
434 		spin_unlock_irq (&data->dev->lock);
435 		mutex_unlock(&data->lock);
436 		return -EBADMSG;
437 	}
438 
439 	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
440 
441 	value = -ENOMEM;
442 	kbuf = memdup_user(buf, len);
443 	if (IS_ERR(kbuf)) {
444 		value = PTR_ERR(kbuf);
445 		goto free1;
446 	}
447 
448 	value = ep_io (data, kbuf, len);
449 	VDEBUG (data->dev, "%s write %zu IN, status %d\n",
450 		data->name, len, (int) value);
451 free1:
452 	mutex_unlock(&data->lock);
453 	return value;
454 }
455 
456 static int
457 ep_release (struct inode *inode, struct file *fd)
458 {
459 	struct ep_data		*data = fd->private_data;
460 	int value;
461 
462 	value = mutex_lock_interruptible(&data->lock);
463 	if (value < 0)
464 		return value;
465 
466 	/* clean up if this can be reopened */
467 	if (data->state != STATE_EP_UNBOUND) {
468 		data->state = STATE_EP_DISABLED;
469 		data->desc.bDescriptorType = 0;
470 		data->hs_desc.bDescriptorType = 0;
471 		usb_ep_disable(data->ep);
472 	}
473 	mutex_unlock(&data->lock);
474 	put_ep (data);
475 	return 0;
476 }
477 
478 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
479 {
480 	struct ep_data		*data = fd->private_data;
481 	int			status;
482 
483 	if ((status = get_ready_ep (fd->f_flags, data)) < 0)
484 		return status;
485 
486 	spin_lock_irq (&data->dev->lock);
487 	if (likely (data->ep != NULL)) {
488 		switch (code) {
489 		case GADGETFS_FIFO_STATUS:
490 			status = usb_ep_fifo_status (data->ep);
491 			break;
492 		case GADGETFS_FIFO_FLUSH:
493 			usb_ep_fifo_flush (data->ep);
494 			break;
495 		case GADGETFS_CLEAR_HALT:
496 			status = usb_ep_clear_halt (data->ep);
497 			break;
498 		default:
499 			status = -ENOTTY;
500 		}
501 	} else
502 		status = -ENODEV;
503 	spin_unlock_irq (&data->dev->lock);
504 	mutex_unlock(&data->lock);
505 	return status;
506 }
507 
508 /*----------------------------------------------------------------------*/
509 
510 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
511 
512 struct kiocb_priv {
513 	struct usb_request	*req;
514 	struct ep_data		*epdata;
515 	struct kiocb		*iocb;
516 	struct mm_struct	*mm;
517 	struct work_struct	work;
518 	void			*buf;
519 	const struct iovec	*iv;
520 	unsigned long		nr_segs;
521 	unsigned		actual;
522 };
523 
524 static int ep_aio_cancel(struct kiocb *iocb)
525 {
526 	struct kiocb_priv	*priv = iocb->private;
527 	struct ep_data		*epdata;
528 	int			value;
529 
530 	local_irq_disable();
531 	epdata = priv->epdata;
532 	// spin_lock(&epdata->dev->lock);
533 	if (likely(epdata && epdata->ep && priv->req))
534 		value = usb_ep_dequeue (epdata->ep, priv->req);
535 	else
536 		value = -EINVAL;
537 	// spin_unlock(&epdata->dev->lock);
538 	local_irq_enable();
539 
540 	return value;
541 }
542 
543 static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
544 {
545 	ssize_t			len, total;
546 	void			*to_copy;
547 	int			i;
548 
549 	/* copy stuff into user buffers */
550 	total = priv->actual;
551 	len = 0;
552 	to_copy = priv->buf;
553 	for (i=0; i < priv->nr_segs; i++) {
554 		ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
555 
556 		if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
557 			if (len == 0)
558 				len = -EFAULT;
559 			break;
560 		}
561 
562 		total -= this;
563 		len += this;
564 		to_copy += this;
565 		if (total == 0)
566 			break;
567 	}
568 
569 	return len;
570 }
571 
572 static void ep_user_copy_worker(struct work_struct *work)
573 {
574 	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
575 	struct mm_struct *mm = priv->mm;
576 	struct kiocb *iocb = priv->iocb;
577 	size_t ret;
578 
579 	use_mm(mm);
580 	ret = ep_copy_to_user(priv);
581 	unuse_mm(mm);
582 
583 	/* completing the iocb can drop the ctx and mm, don't touch mm after */
584 	aio_complete(iocb, ret, ret);
585 
586 	kfree(priv->buf);
587 	kfree(priv);
588 }
589 
590 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
591 {
592 	struct kiocb		*iocb = req->context;
593 	struct kiocb_priv	*priv = iocb->private;
594 	struct ep_data		*epdata = priv->epdata;
595 
596 	/* lock against disconnect (and ideally, cancel) */
597 	spin_lock(&epdata->dev->lock);
598 	priv->req = NULL;
599 	priv->epdata = NULL;
600 
601 	/* if this was a write or a read returning no data then we
602 	 * don't need to copy anything to userspace, so we can
603 	 * complete the aio request immediately.
604 	 */
605 	if (priv->iv == NULL || unlikely(req->actual == 0)) {
606 		kfree(req->buf);
607 		kfree(priv);
608 		iocb->private = NULL;
609 		/* aio_complete() reports bytes-transferred _and_ faults */
610 		aio_complete(iocb, req->actual ? req->actual : req->status,
611 				req->status);
612 	} else {
613 		/* ep_copy_to_user() won't report both; we hide some faults */
614 		if (unlikely(0 != req->status))
615 			DBG(epdata->dev, "%s fault %d len %d\n",
616 				ep->name, req->status, req->actual);
617 
618 		priv->buf = req->buf;
619 		priv->actual = req->actual;
620 		schedule_work(&priv->work);
621 	}
622 	spin_unlock(&epdata->dev->lock);
623 
624 	usb_ep_free_request(ep, req);
625 	put_ep(epdata);
626 }
627 
628 static ssize_t
629 ep_aio_rwtail(
630 	struct kiocb	*iocb,
631 	char		*buf,
632 	size_t		len,
633 	struct ep_data	*epdata,
634 	const struct iovec *iv,
635 	unsigned long	nr_segs
636 )
637 {
638 	struct kiocb_priv	*priv;
639 	struct usb_request	*req;
640 	ssize_t			value;
641 
642 	priv = kmalloc(sizeof *priv, GFP_KERNEL);
643 	if (!priv) {
644 		value = -ENOMEM;
645 fail:
646 		kfree(buf);
647 		return value;
648 	}
649 	iocb->private = priv;
650 	priv->iocb = iocb;
651 	priv->iv = iv;
652 	priv->nr_segs = nr_segs;
653 	INIT_WORK(&priv->work, ep_user_copy_worker);
654 
655 	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
656 	if (unlikely(value < 0)) {
657 		kfree(priv);
658 		goto fail;
659 	}
660 
661 	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
662 	get_ep(epdata);
663 	priv->epdata = epdata;
664 	priv->actual = 0;
665 	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
666 
667 	/* each kiocb is coupled to one usb_request, but we can't
668 	 * allocate or submit those if the host disconnected.
669 	 */
670 	spin_lock_irq(&epdata->dev->lock);
671 	if (likely(epdata->ep)) {
672 		req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
673 		if (likely(req)) {
674 			priv->req = req;
675 			req->buf = buf;
676 			req->length = len;
677 			req->complete = ep_aio_complete;
678 			req->context = iocb;
679 			value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
680 			if (unlikely(0 != value))
681 				usb_ep_free_request(epdata->ep, req);
682 		} else
683 			value = -EAGAIN;
684 	} else
685 		value = -ENODEV;
686 	spin_unlock_irq(&epdata->dev->lock);
687 
688 	mutex_unlock(&epdata->lock);
689 
690 	if (unlikely(value)) {
691 		kfree(priv);
692 		put_ep(epdata);
693 	} else
694 		value = -EIOCBQUEUED;
695 	return value;
696 }
697 
698 static ssize_t
699 ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
700 		unsigned long nr_segs, loff_t o)
701 {
702 	struct ep_data		*epdata = iocb->ki_filp->private_data;
703 	char			*buf;
704 
705 	if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
706 		return -EINVAL;
707 
708 	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
709 	if (unlikely(!buf))
710 		return -ENOMEM;
711 
712 	return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
713 }
714 
715 static ssize_t
716 ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
717 		unsigned long nr_segs, loff_t o)
718 {
719 	struct ep_data		*epdata = iocb->ki_filp->private_data;
720 	char			*buf;
721 	size_t			len = 0;
722 	int			i = 0;
723 
724 	if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
725 		return -EINVAL;
726 
727 	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
728 	if (unlikely(!buf))
729 		return -ENOMEM;
730 
731 	for (i=0; i < nr_segs; i++) {
732 		if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
733 				iov[i].iov_len) != 0)) {
734 			kfree(buf);
735 			return -EFAULT;
736 		}
737 		len += iov[i].iov_len;
738 	}
739 	return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
740 }
741 
742 /*----------------------------------------------------------------------*/
743 
744 /* used after endpoint configuration */
745 static const struct file_operations ep_io_operations = {
746 	.owner =	THIS_MODULE,
747 	.llseek =	no_llseek,
748 
749 	.read =		ep_read,
750 	.write =	ep_write,
751 	.unlocked_ioctl = ep_ioctl,
752 	.release =	ep_release,
753 
754 	.aio_read =	ep_aio_read,
755 	.aio_write =	ep_aio_write,
756 };
757 
758 /* ENDPOINT INITIALIZATION
759  *
760  *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
761  *     status = write (fd, descriptors, sizeof descriptors)
762  *
763  * That write establishes the endpoint configuration, configuring
764  * the controller to process bulk, interrupt, or isochronous transfers
765  * at the right maxpacket size, and so on.
766  *
767  * The descriptors are message type 1, identified by a host order u32
768  * at the beginning of what's written.  Descriptor order is: full/low
769  * speed descriptor, then optional high speed descriptor.
770  */
771 static ssize_t
772 ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
773 {
774 	struct ep_data		*data = fd->private_data;
775 	struct usb_ep		*ep;
776 	u32			tag;
777 	int			value, length = len;
778 
779 	value = mutex_lock_interruptible(&data->lock);
780 	if (value < 0)
781 		return value;
782 
783 	if (data->state != STATE_EP_READY) {
784 		value = -EL2HLT;
785 		goto fail;
786 	}
787 
788 	value = len;
789 	if (len < USB_DT_ENDPOINT_SIZE + 4)
790 		goto fail0;
791 
792 	/* we might need to change message format someday */
793 	if (copy_from_user (&tag, buf, 4)) {
794 		goto fail1;
795 	}
796 	if (tag != 1) {
797 		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
798 		goto fail0;
799 	}
800 	buf += 4;
801 	len -= 4;
802 
803 	/* NOTE:  audio endpoint extensions not accepted here;
804 	 * just don't include the extra bytes.
805 	 */
806 
807 	/* full/low speed descriptor, then high speed */
808 	if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
809 		goto fail1;
810 	}
811 	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
812 			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
813 		goto fail0;
814 	if (len != USB_DT_ENDPOINT_SIZE) {
815 		if (len != 2 * USB_DT_ENDPOINT_SIZE)
816 			goto fail0;
817 		if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
818 					USB_DT_ENDPOINT_SIZE)) {
819 			goto fail1;
820 		}
821 		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
822 				|| data->hs_desc.bDescriptorType
823 					!= USB_DT_ENDPOINT) {
824 			DBG(data->dev, "config %s, bad hs length or type\n",
825 					data->name);
826 			goto fail0;
827 		}
828 	}
829 
830 	spin_lock_irq (&data->dev->lock);
831 	if (data->dev->state == STATE_DEV_UNBOUND) {
832 		value = -ENOENT;
833 		goto gone;
834 	} else if ((ep = data->ep) == NULL) {
835 		value = -ENODEV;
836 		goto gone;
837 	}
838 	switch (data->dev->gadget->speed) {
839 	case USB_SPEED_LOW:
840 	case USB_SPEED_FULL:
841 		ep->desc = &data->desc;
842 		value = usb_ep_enable(ep);
843 		if (value == 0)
844 			data->state = STATE_EP_ENABLED;
845 		break;
846 	case USB_SPEED_HIGH:
847 		/* fails if caller didn't provide that descriptor... */
848 		ep->desc = &data->hs_desc;
849 		value = usb_ep_enable(ep);
850 		if (value == 0)
851 			data->state = STATE_EP_ENABLED;
852 		break;
853 	default:
854 		DBG(data->dev, "unconnected, %s init abandoned\n",
855 				data->name);
856 		value = -EINVAL;
857 	}
858 	if (value == 0) {
859 		fd->f_op = &ep_io_operations;
860 		value = length;
861 	}
862 gone:
863 	spin_unlock_irq (&data->dev->lock);
864 	if (value < 0) {
865 fail:
866 		data->desc.bDescriptorType = 0;
867 		data->hs_desc.bDescriptorType = 0;
868 	}
869 	mutex_unlock(&data->lock);
870 	return value;
871 fail0:
872 	value = -EINVAL;
873 	goto fail;
874 fail1:
875 	value = -EFAULT;
876 	goto fail;
877 }
878 
879 static int
880 ep_open (struct inode *inode, struct file *fd)
881 {
882 	struct ep_data		*data = inode->i_private;
883 	int			value = -EBUSY;
884 
885 	if (mutex_lock_interruptible(&data->lock) != 0)
886 		return -EINTR;
887 	spin_lock_irq (&data->dev->lock);
888 	if (data->dev->state == STATE_DEV_UNBOUND)
889 		value = -ENOENT;
890 	else if (data->state == STATE_EP_DISABLED) {
891 		value = 0;
892 		data->state = STATE_EP_READY;
893 		get_ep (data);
894 		fd->private_data = data;
895 		VDEBUG (data->dev, "%s ready\n", data->name);
896 	} else
897 		DBG (data->dev, "%s state %d\n",
898 			data->name, data->state);
899 	spin_unlock_irq (&data->dev->lock);
900 	mutex_unlock(&data->lock);
901 	return value;
902 }
903 
904 /* used before endpoint configuration */
905 static const struct file_operations ep_config_operations = {
906 	.llseek =	no_llseek,
907 
908 	.open =		ep_open,
909 	.write =	ep_config,
910 	.release =	ep_release,
911 };
912 
913 /*----------------------------------------------------------------------*/
914 
915 /* EP0 IMPLEMENTATION can be partly in userspace.
916  *
917  * Drivers that use this facility receive various events, including
918  * control requests the kernel doesn't handle.  Drivers that don't
919  * use this facility may be too simple-minded for real applications.
920  */
921 
922 static inline void ep0_readable (struct dev_data *dev)
923 {
924 	wake_up (&dev->wait);
925 	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
926 }
927 
928 static void clean_req (struct usb_ep *ep, struct usb_request *req)
929 {
930 	struct dev_data		*dev = ep->driver_data;
931 
932 	if (req->buf != dev->rbuf) {
933 		kfree(req->buf);
934 		req->buf = dev->rbuf;
935 	}
936 	req->complete = epio_complete;
937 	dev->setup_out_ready = 0;
938 }
939 
940 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
941 {
942 	struct dev_data		*dev = ep->driver_data;
943 	unsigned long		flags;
944 	int			free = 1;
945 
946 	/* for control OUT, data must still get to userspace */
947 	spin_lock_irqsave(&dev->lock, flags);
948 	if (!dev->setup_in) {
949 		dev->setup_out_error = (req->status != 0);
950 		if (!dev->setup_out_error)
951 			free = 0;
952 		dev->setup_out_ready = 1;
953 		ep0_readable (dev);
954 	}
955 
956 	/* clean up as appropriate */
957 	if (free && req->buf != &dev->rbuf)
958 		clean_req (ep, req);
959 	req->complete = epio_complete;
960 	spin_unlock_irqrestore(&dev->lock, flags);
961 }
962 
963 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
964 {
965 	struct dev_data	*dev = ep->driver_data;
966 
967 	if (dev->setup_out_ready) {
968 		DBG (dev, "ep0 request busy!\n");
969 		return -EBUSY;
970 	}
971 	if (len > sizeof (dev->rbuf))
972 		req->buf = kmalloc(len, GFP_ATOMIC);
973 	if (req->buf == NULL) {
974 		req->buf = dev->rbuf;
975 		return -ENOMEM;
976 	}
977 	req->complete = ep0_complete;
978 	req->length = len;
979 	req->zero = 0;
980 	return 0;
981 }
982 
983 static ssize_t
984 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
985 {
986 	struct dev_data			*dev = fd->private_data;
987 	ssize_t				retval;
988 	enum ep0_state			state;
989 
990 	spin_lock_irq (&dev->lock);
991 
992 	/* report fd mode change before acting on it */
993 	if (dev->setup_abort) {
994 		dev->setup_abort = 0;
995 		retval = -EIDRM;
996 		goto done;
997 	}
998 
999 	/* control DATA stage */
1000 	if ((state = dev->state) == STATE_DEV_SETUP) {
1001 
1002 		if (dev->setup_in) {		/* stall IN */
1003 			VDEBUG(dev, "ep0in stall\n");
1004 			(void) usb_ep_set_halt (dev->gadget->ep0);
1005 			retval = -EL2HLT;
1006 			dev->state = STATE_DEV_CONNECTED;
1007 
1008 		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
1009 			struct usb_ep		*ep = dev->gadget->ep0;
1010 			struct usb_request	*req = dev->req;
1011 
1012 			if ((retval = setup_req (ep, req, 0)) == 0)
1013 				retval = usb_ep_queue (ep, req, GFP_ATOMIC);
1014 			dev->state = STATE_DEV_CONNECTED;
1015 
1016 			/* assume that was SET_CONFIGURATION */
1017 			if (dev->current_config) {
1018 				unsigned power;
1019 
1020 				if (gadget_is_dualspeed(dev->gadget)
1021 						&& (dev->gadget->speed
1022 							== USB_SPEED_HIGH))
1023 					power = dev->hs_config->bMaxPower;
1024 				else
1025 					power = dev->config->bMaxPower;
1026 				usb_gadget_vbus_draw(dev->gadget, 2 * power);
1027 			}
1028 
1029 		} else {			/* collect OUT data */
1030 			if ((fd->f_flags & O_NONBLOCK) != 0
1031 					&& !dev->setup_out_ready) {
1032 				retval = -EAGAIN;
1033 				goto done;
1034 			}
1035 			spin_unlock_irq (&dev->lock);
1036 			retval = wait_event_interruptible (dev->wait,
1037 					dev->setup_out_ready != 0);
1038 
1039 			/* FIXME state could change from under us */
1040 			spin_lock_irq (&dev->lock);
1041 			if (retval)
1042 				goto done;
1043 
1044 			if (dev->state != STATE_DEV_SETUP) {
1045 				retval = -ECANCELED;
1046 				goto done;
1047 			}
1048 			dev->state = STATE_DEV_CONNECTED;
1049 
1050 			if (dev->setup_out_error)
1051 				retval = -EIO;
1052 			else {
1053 				len = min (len, (size_t)dev->req->actual);
1054 // FIXME don't call this with the spinlock held ...
1055 				if (copy_to_user (buf, dev->req->buf, len))
1056 					retval = -EFAULT;
1057 				else
1058 					retval = len;
1059 				clean_req (dev->gadget->ep0, dev->req);
1060 				/* NOTE userspace can't yet choose to stall */
1061 			}
1062 		}
1063 		goto done;
1064 	}
1065 
1066 	/* else normal: return event data */
1067 	if (len < sizeof dev->event [0]) {
1068 		retval = -EINVAL;
1069 		goto done;
1070 	}
1071 	len -= len % sizeof (struct usb_gadgetfs_event);
1072 	dev->usermode_setup = 1;
1073 
1074 scan:
1075 	/* return queued events right away */
1076 	if (dev->ev_next != 0) {
1077 		unsigned		i, n;
1078 
1079 		n = len / sizeof (struct usb_gadgetfs_event);
1080 		if (dev->ev_next < n)
1081 			n = dev->ev_next;
1082 
1083 		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1084 		for (i = 0; i < n; i++) {
1085 			if (dev->event [i].type == GADGETFS_SETUP) {
1086 				dev->state = STATE_DEV_SETUP;
1087 				n = i + 1;
1088 				break;
1089 			}
1090 		}
1091 		spin_unlock_irq (&dev->lock);
1092 		len = n * sizeof (struct usb_gadgetfs_event);
1093 		if (copy_to_user (buf, &dev->event, len))
1094 			retval = -EFAULT;
1095 		else
1096 			retval = len;
1097 		if (len > 0) {
1098 			/* NOTE this doesn't guard against broken drivers;
1099 			 * concurrent ep0 readers may lose events.
1100 			 */
1101 			spin_lock_irq (&dev->lock);
1102 			if (dev->ev_next > n) {
1103 				memmove(&dev->event[0], &dev->event[n],
1104 					sizeof (struct usb_gadgetfs_event)
1105 						* (dev->ev_next - n));
1106 			}
1107 			dev->ev_next -= n;
1108 			spin_unlock_irq (&dev->lock);
1109 		}
1110 		return retval;
1111 	}
1112 	if (fd->f_flags & O_NONBLOCK) {
1113 		retval = -EAGAIN;
1114 		goto done;
1115 	}
1116 
1117 	switch (state) {
1118 	default:
1119 		DBG (dev, "fail %s, state %d\n", __func__, state);
1120 		retval = -ESRCH;
1121 		break;
1122 	case STATE_DEV_UNCONNECTED:
1123 	case STATE_DEV_CONNECTED:
1124 		spin_unlock_irq (&dev->lock);
1125 		DBG (dev, "%s wait\n", __func__);
1126 
1127 		/* wait for events */
1128 		retval = wait_event_interruptible (dev->wait,
1129 				dev->ev_next != 0);
1130 		if (retval < 0)
1131 			return retval;
1132 		spin_lock_irq (&dev->lock);
1133 		goto scan;
1134 	}
1135 
1136 done:
1137 	spin_unlock_irq (&dev->lock);
1138 	return retval;
1139 }
1140 
1141 static struct usb_gadgetfs_event *
1142 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1143 {
1144 	struct usb_gadgetfs_event	*event;
1145 	unsigned			i;
1146 
1147 	switch (type) {
1148 	/* these events purge the queue */
1149 	case GADGETFS_DISCONNECT:
1150 		if (dev->state == STATE_DEV_SETUP)
1151 			dev->setup_abort = 1;
1152 		// FALL THROUGH
1153 	case GADGETFS_CONNECT:
1154 		dev->ev_next = 0;
1155 		break;
1156 	case GADGETFS_SETUP:		/* previous request timed out */
1157 	case GADGETFS_SUSPEND:		/* same effect */
1158 		/* these events can't be repeated */
1159 		for (i = 0; i != dev->ev_next; i++) {
1160 			if (dev->event [i].type != type)
1161 				continue;
1162 			DBG(dev, "discard old event[%d] %d\n", i, type);
1163 			dev->ev_next--;
1164 			if (i == dev->ev_next)
1165 				break;
1166 			/* indices start at zero, for simplicity */
1167 			memmove (&dev->event [i], &dev->event [i + 1],
1168 				sizeof (struct usb_gadgetfs_event)
1169 					* (dev->ev_next - i));
1170 		}
1171 		break;
1172 	default:
1173 		BUG ();
1174 	}
1175 	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1176 	event = &dev->event [dev->ev_next++];
1177 	BUG_ON (dev->ev_next > N_EVENT);
1178 	memset (event, 0, sizeof *event);
1179 	event->type = type;
1180 	return event;
1181 }
1182 
1183 static ssize_t
1184 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1185 {
1186 	struct dev_data		*dev = fd->private_data;
1187 	ssize_t			retval = -ESRCH;
1188 
1189 	spin_lock_irq (&dev->lock);
1190 
1191 	/* report fd mode change before acting on it */
1192 	if (dev->setup_abort) {
1193 		dev->setup_abort = 0;
1194 		retval = -EIDRM;
1195 
1196 	/* data and/or status stage for control request */
1197 	} else if (dev->state == STATE_DEV_SETUP) {
1198 
1199 		/* IN DATA+STATUS caller makes len <= wLength */
1200 		if (dev->setup_in) {
1201 			retval = setup_req (dev->gadget->ep0, dev->req, len);
1202 			if (retval == 0) {
1203 				dev->state = STATE_DEV_CONNECTED;
1204 				spin_unlock_irq (&dev->lock);
1205 				if (copy_from_user (dev->req->buf, buf, len))
1206 					retval = -EFAULT;
1207 				else {
1208 					if (len < dev->setup_wLength)
1209 						dev->req->zero = 1;
1210 					retval = usb_ep_queue (
1211 						dev->gadget->ep0, dev->req,
1212 						GFP_KERNEL);
1213 				}
1214 				if (retval < 0) {
1215 					spin_lock_irq (&dev->lock);
1216 					clean_req (dev->gadget->ep0, dev->req);
1217 					spin_unlock_irq (&dev->lock);
1218 				} else
1219 					retval = len;
1220 
1221 				return retval;
1222 			}
1223 
1224 		/* can stall some OUT transfers */
1225 		} else if (dev->setup_can_stall) {
1226 			VDEBUG(dev, "ep0out stall\n");
1227 			(void) usb_ep_set_halt (dev->gadget->ep0);
1228 			retval = -EL2HLT;
1229 			dev->state = STATE_DEV_CONNECTED;
1230 		} else {
1231 			DBG(dev, "bogus ep0out stall!\n");
1232 		}
1233 	} else
1234 		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1235 
1236 	spin_unlock_irq (&dev->lock);
1237 	return retval;
1238 }
1239 
1240 static int
1241 ep0_fasync (int f, struct file *fd, int on)
1242 {
1243 	struct dev_data		*dev = fd->private_data;
1244 	// caller must F_SETOWN before signal delivery happens
1245 	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1246 	return fasync_helper (f, fd, on, &dev->fasync);
1247 }
1248 
1249 static struct usb_gadget_driver gadgetfs_driver;
1250 
1251 static int
1252 dev_release (struct inode *inode, struct file *fd)
1253 {
1254 	struct dev_data		*dev = fd->private_data;
1255 
1256 	/* closing ep0 === shutdown all */
1257 
1258 	usb_gadget_unregister_driver (&gadgetfs_driver);
1259 
1260 	/* at this point "good" hardware has disconnected the
1261 	 * device from USB; the host won't see it any more.
1262 	 * alternatively, all host requests will time out.
1263 	 */
1264 
1265 	kfree (dev->buf);
1266 	dev->buf = NULL;
1267 
1268 	/* other endpoints were all decoupled from this device */
1269 	spin_lock_irq(&dev->lock);
1270 	dev->state = STATE_DEV_DISABLED;
1271 	spin_unlock_irq(&dev->lock);
1272 
1273 	put_dev (dev);
1274 	return 0;
1275 }
1276 
1277 static unsigned int
1278 ep0_poll (struct file *fd, poll_table *wait)
1279 {
1280        struct dev_data         *dev = fd->private_data;
1281        int                     mask = 0;
1282 
1283        poll_wait(fd, &dev->wait, wait);
1284 
1285        spin_lock_irq (&dev->lock);
1286 
1287        /* report fd mode change before acting on it */
1288        if (dev->setup_abort) {
1289                dev->setup_abort = 0;
1290                mask = POLLHUP;
1291                goto out;
1292        }
1293 
1294        if (dev->state == STATE_DEV_SETUP) {
1295                if (dev->setup_in || dev->setup_can_stall)
1296                        mask = POLLOUT;
1297        } else {
1298                if (dev->ev_next != 0)
1299                        mask = POLLIN;
1300        }
1301 out:
1302        spin_unlock_irq(&dev->lock);
1303        return mask;
1304 }
1305 
1306 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1307 {
1308 	struct dev_data		*dev = fd->private_data;
1309 	struct usb_gadget	*gadget = dev->gadget;
1310 	long ret = -ENOTTY;
1311 
1312 	if (gadget->ops->ioctl)
1313 		ret = gadget->ops->ioctl (gadget, code, value);
1314 
1315 	return ret;
1316 }
1317 
1318 /* used after device configuration */
1319 static const struct file_operations ep0_io_operations = {
1320 	.owner =	THIS_MODULE,
1321 	.llseek =	no_llseek,
1322 
1323 	.read =		ep0_read,
1324 	.write =	ep0_write,
1325 	.fasync =	ep0_fasync,
1326 	.poll =		ep0_poll,
1327 	.unlocked_ioctl =	dev_ioctl,
1328 	.release =	dev_release,
1329 };
1330 
1331 /*----------------------------------------------------------------------*/
1332 
1333 /* The in-kernel gadget driver handles most ep0 issues, in particular
1334  * enumerating the single configuration (as provided from user space).
1335  *
1336  * Unrecognized ep0 requests may be handled in user space.
1337  */
1338 
1339 static void make_qualifier (struct dev_data *dev)
1340 {
1341 	struct usb_qualifier_descriptor		qual;
1342 	struct usb_device_descriptor		*desc;
1343 
1344 	qual.bLength = sizeof qual;
1345 	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1346 	qual.bcdUSB = cpu_to_le16 (0x0200);
1347 
1348 	desc = dev->dev;
1349 	qual.bDeviceClass = desc->bDeviceClass;
1350 	qual.bDeviceSubClass = desc->bDeviceSubClass;
1351 	qual.bDeviceProtocol = desc->bDeviceProtocol;
1352 
1353 	/* assumes ep0 uses the same value for both speeds ... */
1354 	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1355 
1356 	qual.bNumConfigurations = 1;
1357 	qual.bRESERVED = 0;
1358 
1359 	memcpy (dev->rbuf, &qual, sizeof qual);
1360 }
1361 
1362 static int
1363 config_buf (struct dev_data *dev, u8 type, unsigned index)
1364 {
1365 	int		len;
1366 	int		hs = 0;
1367 
1368 	/* only one configuration */
1369 	if (index > 0)
1370 		return -EINVAL;
1371 
1372 	if (gadget_is_dualspeed(dev->gadget)) {
1373 		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1374 		if (type == USB_DT_OTHER_SPEED_CONFIG)
1375 			hs = !hs;
1376 	}
1377 	if (hs) {
1378 		dev->req->buf = dev->hs_config;
1379 		len = le16_to_cpu(dev->hs_config->wTotalLength);
1380 	} else {
1381 		dev->req->buf = dev->config;
1382 		len = le16_to_cpu(dev->config->wTotalLength);
1383 	}
1384 	((u8 *)dev->req->buf) [1] = type;
1385 	return len;
1386 }
1387 
1388 static int
1389 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1390 {
1391 	struct dev_data			*dev = get_gadget_data (gadget);
1392 	struct usb_request		*req = dev->req;
1393 	int				value = -EOPNOTSUPP;
1394 	struct usb_gadgetfs_event	*event;
1395 	u16				w_value = le16_to_cpu(ctrl->wValue);
1396 	u16				w_length = le16_to_cpu(ctrl->wLength);
1397 
1398 	spin_lock (&dev->lock);
1399 	dev->setup_abort = 0;
1400 	if (dev->state == STATE_DEV_UNCONNECTED) {
1401 		if (gadget_is_dualspeed(gadget)
1402 				&& gadget->speed == USB_SPEED_HIGH
1403 				&& dev->hs_config == NULL) {
1404 			spin_unlock(&dev->lock);
1405 			ERROR (dev, "no high speed config??\n");
1406 			return -EINVAL;
1407 		}
1408 
1409 		dev->state = STATE_DEV_CONNECTED;
1410 
1411 		INFO (dev, "connected\n");
1412 		event = next_event (dev, GADGETFS_CONNECT);
1413 		event->u.speed = gadget->speed;
1414 		ep0_readable (dev);
1415 
1416 	/* host may have given up waiting for response.  we can miss control
1417 	 * requests handled lower down (device/endpoint status and features);
1418 	 * then ep0_{read,write} will report the wrong status. controller
1419 	 * driver will have aborted pending i/o.
1420 	 */
1421 	} else if (dev->state == STATE_DEV_SETUP)
1422 		dev->setup_abort = 1;
1423 
1424 	req->buf = dev->rbuf;
1425 	req->context = NULL;
1426 	value = -EOPNOTSUPP;
1427 	switch (ctrl->bRequest) {
1428 
1429 	case USB_REQ_GET_DESCRIPTOR:
1430 		if (ctrl->bRequestType != USB_DIR_IN)
1431 			goto unrecognized;
1432 		switch (w_value >> 8) {
1433 
1434 		case USB_DT_DEVICE:
1435 			value = min (w_length, (u16) sizeof *dev->dev);
1436 			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1437 			req->buf = dev->dev;
1438 			break;
1439 		case USB_DT_DEVICE_QUALIFIER:
1440 			if (!dev->hs_config)
1441 				break;
1442 			value = min (w_length, (u16)
1443 				sizeof (struct usb_qualifier_descriptor));
1444 			make_qualifier (dev);
1445 			break;
1446 		case USB_DT_OTHER_SPEED_CONFIG:
1447 			// FALLTHROUGH
1448 		case USB_DT_CONFIG:
1449 			value = config_buf (dev,
1450 					w_value >> 8,
1451 					w_value & 0xff);
1452 			if (value >= 0)
1453 				value = min (w_length, (u16) value);
1454 			break;
1455 		case USB_DT_STRING:
1456 			goto unrecognized;
1457 
1458 		default:		// all others are errors
1459 			break;
1460 		}
1461 		break;
1462 
1463 	/* currently one config, two speeds */
1464 	case USB_REQ_SET_CONFIGURATION:
1465 		if (ctrl->bRequestType != 0)
1466 			goto unrecognized;
1467 		if (0 == (u8) w_value) {
1468 			value = 0;
1469 			dev->current_config = 0;
1470 			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1471 			// user mode expected to disable endpoints
1472 		} else {
1473 			u8	config, power;
1474 
1475 			if (gadget_is_dualspeed(gadget)
1476 					&& gadget->speed == USB_SPEED_HIGH) {
1477 				config = dev->hs_config->bConfigurationValue;
1478 				power = dev->hs_config->bMaxPower;
1479 			} else {
1480 				config = dev->config->bConfigurationValue;
1481 				power = dev->config->bMaxPower;
1482 			}
1483 
1484 			if (config == (u8) w_value) {
1485 				value = 0;
1486 				dev->current_config = config;
1487 				usb_gadget_vbus_draw(gadget, 2 * power);
1488 			}
1489 		}
1490 
1491 		/* report SET_CONFIGURATION like any other control request,
1492 		 * except that usermode may not stall this.  the next
1493 		 * request mustn't be allowed start until this finishes:
1494 		 * endpoints and threads set up, etc.
1495 		 *
1496 		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1497 		 * has bad/racey automagic that prevents synchronizing here.
1498 		 * even kernel mode drivers often miss them.
1499 		 */
1500 		if (value == 0) {
1501 			INFO (dev, "configuration #%d\n", dev->current_config);
1502 			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1503 			if (dev->usermode_setup) {
1504 				dev->setup_can_stall = 0;
1505 				goto delegate;
1506 			}
1507 		}
1508 		break;
1509 
1510 #ifndef	CONFIG_USB_PXA25X
1511 	/* PXA automagically handles this request too */
1512 	case USB_REQ_GET_CONFIGURATION:
1513 		if (ctrl->bRequestType != 0x80)
1514 			goto unrecognized;
1515 		*(u8 *)req->buf = dev->current_config;
1516 		value = min (w_length, (u16) 1);
1517 		break;
1518 #endif
1519 
1520 	default:
1521 unrecognized:
1522 		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1523 			dev->usermode_setup ? "delegate" : "fail",
1524 			ctrl->bRequestType, ctrl->bRequest,
1525 			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1526 
1527 		/* if there's an ep0 reader, don't stall */
1528 		if (dev->usermode_setup) {
1529 			dev->setup_can_stall = 1;
1530 delegate:
1531 			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1532 						? 1 : 0;
1533 			dev->setup_wLength = w_length;
1534 			dev->setup_out_ready = 0;
1535 			dev->setup_out_error = 0;
1536 			value = 0;
1537 
1538 			/* read DATA stage for OUT right away */
1539 			if (unlikely (!dev->setup_in && w_length)) {
1540 				value = setup_req (gadget->ep0, dev->req,
1541 							w_length);
1542 				if (value < 0)
1543 					break;
1544 				value = usb_ep_queue (gadget->ep0, dev->req,
1545 							GFP_ATOMIC);
1546 				if (value < 0) {
1547 					clean_req (gadget->ep0, dev->req);
1548 					break;
1549 				}
1550 
1551 				/* we can't currently stall these */
1552 				dev->setup_can_stall = 0;
1553 			}
1554 
1555 			/* state changes when reader collects event */
1556 			event = next_event (dev, GADGETFS_SETUP);
1557 			event->u.setup = *ctrl;
1558 			ep0_readable (dev);
1559 			spin_unlock (&dev->lock);
1560 			return 0;
1561 		}
1562 	}
1563 
1564 	/* proceed with data transfer and status phases? */
1565 	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1566 		req->length = value;
1567 		req->zero = value < w_length;
1568 		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1569 		if (value < 0) {
1570 			DBG (dev, "ep_queue --> %d\n", value);
1571 			req->status = 0;
1572 		}
1573 	}
1574 
1575 	/* device stalls when value < 0 */
1576 	spin_unlock (&dev->lock);
1577 	return value;
1578 }
1579 
1580 static void destroy_ep_files (struct dev_data *dev)
1581 {
1582 	DBG (dev, "%s %d\n", __func__, dev->state);
1583 
1584 	/* dev->state must prevent interference */
1585 	spin_lock_irq (&dev->lock);
1586 	while (!list_empty(&dev->epfiles)) {
1587 		struct ep_data	*ep;
1588 		struct inode	*parent;
1589 		struct dentry	*dentry;
1590 
1591 		/* break link to FS */
1592 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1593 		list_del_init (&ep->epfiles);
1594 		dentry = ep->dentry;
1595 		ep->dentry = NULL;
1596 		parent = dentry->d_parent->d_inode;
1597 
1598 		/* break link to controller */
1599 		if (ep->state == STATE_EP_ENABLED)
1600 			(void) usb_ep_disable (ep->ep);
1601 		ep->state = STATE_EP_UNBOUND;
1602 		usb_ep_free_request (ep->ep, ep->req);
1603 		ep->ep = NULL;
1604 		wake_up (&ep->wait);
1605 		put_ep (ep);
1606 
1607 		spin_unlock_irq (&dev->lock);
1608 
1609 		/* break link to dcache */
1610 		mutex_lock (&parent->i_mutex);
1611 		d_delete (dentry);
1612 		dput (dentry);
1613 		mutex_unlock (&parent->i_mutex);
1614 
1615 		spin_lock_irq (&dev->lock);
1616 	}
1617 	spin_unlock_irq (&dev->lock);
1618 }
1619 
1620 
1621 static struct inode *
1622 gadgetfs_create_file (struct super_block *sb, char const *name,
1623 		void *data, const struct file_operations *fops,
1624 		struct dentry **dentry_p);
1625 
1626 static int activate_ep_files (struct dev_data *dev)
1627 {
1628 	struct usb_ep	*ep;
1629 	struct ep_data	*data;
1630 
1631 	gadget_for_each_ep (ep, dev->gadget) {
1632 
1633 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1634 		if (!data)
1635 			goto enomem0;
1636 		data->state = STATE_EP_DISABLED;
1637 		mutex_init(&data->lock);
1638 		init_waitqueue_head (&data->wait);
1639 
1640 		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1641 		atomic_set (&data->count, 1);
1642 		data->dev = dev;
1643 		get_dev (dev);
1644 
1645 		data->ep = ep;
1646 		ep->driver_data = data;
1647 
1648 		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1649 		if (!data->req)
1650 			goto enomem1;
1651 
1652 		data->inode = gadgetfs_create_file (dev->sb, data->name,
1653 				data, &ep_config_operations,
1654 				&data->dentry);
1655 		if (!data->inode)
1656 			goto enomem2;
1657 		list_add_tail (&data->epfiles, &dev->epfiles);
1658 	}
1659 	return 0;
1660 
1661 enomem2:
1662 	usb_ep_free_request (ep, data->req);
1663 enomem1:
1664 	put_dev (dev);
1665 	kfree (data);
1666 enomem0:
1667 	DBG (dev, "%s enomem\n", __func__);
1668 	destroy_ep_files (dev);
1669 	return -ENOMEM;
1670 }
1671 
1672 static void
1673 gadgetfs_unbind (struct usb_gadget *gadget)
1674 {
1675 	struct dev_data		*dev = get_gadget_data (gadget);
1676 
1677 	DBG (dev, "%s\n", __func__);
1678 
1679 	spin_lock_irq (&dev->lock);
1680 	dev->state = STATE_DEV_UNBOUND;
1681 	spin_unlock_irq (&dev->lock);
1682 
1683 	destroy_ep_files (dev);
1684 	gadget->ep0->driver_data = NULL;
1685 	set_gadget_data (gadget, NULL);
1686 
1687 	/* we've already been disconnected ... no i/o is active */
1688 	if (dev->req)
1689 		usb_ep_free_request (gadget->ep0, dev->req);
1690 	DBG (dev, "%s done\n", __func__);
1691 	put_dev (dev);
1692 }
1693 
1694 static struct dev_data		*the_device;
1695 
1696 static int gadgetfs_bind(struct usb_gadget *gadget,
1697 		struct usb_gadget_driver *driver)
1698 {
1699 	struct dev_data		*dev = the_device;
1700 
1701 	if (!dev)
1702 		return -ESRCH;
1703 	if (0 != strcmp (CHIP, gadget->name)) {
1704 		pr_err("%s expected %s controller not %s\n",
1705 			shortname, CHIP, gadget->name);
1706 		return -ENODEV;
1707 	}
1708 
1709 	set_gadget_data (gadget, dev);
1710 	dev->gadget = gadget;
1711 	gadget->ep0->driver_data = dev;
1712 
1713 	/* preallocate control response and buffer */
1714 	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1715 	if (!dev->req)
1716 		goto enomem;
1717 	dev->req->context = NULL;
1718 	dev->req->complete = epio_complete;
1719 
1720 	if (activate_ep_files (dev) < 0)
1721 		goto enomem;
1722 
1723 	INFO (dev, "bound to %s driver\n", gadget->name);
1724 	spin_lock_irq(&dev->lock);
1725 	dev->state = STATE_DEV_UNCONNECTED;
1726 	spin_unlock_irq(&dev->lock);
1727 	get_dev (dev);
1728 	return 0;
1729 
1730 enomem:
1731 	gadgetfs_unbind (gadget);
1732 	return -ENOMEM;
1733 }
1734 
1735 static void
1736 gadgetfs_disconnect (struct usb_gadget *gadget)
1737 {
1738 	struct dev_data		*dev = get_gadget_data (gadget);
1739 	unsigned long		flags;
1740 
1741 	spin_lock_irqsave (&dev->lock, flags);
1742 	if (dev->state == STATE_DEV_UNCONNECTED)
1743 		goto exit;
1744 	dev->state = STATE_DEV_UNCONNECTED;
1745 
1746 	INFO (dev, "disconnected\n");
1747 	next_event (dev, GADGETFS_DISCONNECT);
1748 	ep0_readable (dev);
1749 exit:
1750 	spin_unlock_irqrestore (&dev->lock, flags);
1751 }
1752 
1753 static void
1754 gadgetfs_suspend (struct usb_gadget *gadget)
1755 {
1756 	struct dev_data		*dev = get_gadget_data (gadget);
1757 
1758 	INFO (dev, "suspended from state %d\n", dev->state);
1759 	spin_lock (&dev->lock);
1760 	switch (dev->state) {
1761 	case STATE_DEV_SETUP:		// VERY odd... host died??
1762 	case STATE_DEV_CONNECTED:
1763 	case STATE_DEV_UNCONNECTED:
1764 		next_event (dev, GADGETFS_SUSPEND);
1765 		ep0_readable (dev);
1766 		/* FALLTHROUGH */
1767 	default:
1768 		break;
1769 	}
1770 	spin_unlock (&dev->lock);
1771 }
1772 
1773 static struct usb_gadget_driver gadgetfs_driver = {
1774 	.function	= (char *) driver_desc,
1775 	.bind		= gadgetfs_bind,
1776 	.unbind		= gadgetfs_unbind,
1777 	.setup		= gadgetfs_setup,
1778 	.disconnect	= gadgetfs_disconnect,
1779 	.suspend	= gadgetfs_suspend,
1780 
1781 	.driver	= {
1782 		.name		= (char *) shortname,
1783 	},
1784 };
1785 
1786 /*----------------------------------------------------------------------*/
1787 
1788 static void gadgetfs_nop(struct usb_gadget *arg) { }
1789 
1790 static int gadgetfs_probe(struct usb_gadget *gadget,
1791 		struct usb_gadget_driver *driver)
1792 {
1793 	CHIP = gadget->name;
1794 	return -EISNAM;
1795 }
1796 
1797 static struct usb_gadget_driver probe_driver = {
1798 	.max_speed	= USB_SPEED_HIGH,
1799 	.bind		= gadgetfs_probe,
1800 	.unbind		= gadgetfs_nop,
1801 	.setup		= (void *)gadgetfs_nop,
1802 	.disconnect	= gadgetfs_nop,
1803 	.driver	= {
1804 		.name		= "nop",
1805 	},
1806 };
1807 
1808 
1809 /* DEVICE INITIALIZATION
1810  *
1811  *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1812  *     status = write (fd, descriptors, sizeof descriptors)
1813  *
1814  * That write establishes the device configuration, so the kernel can
1815  * bind to the controller ... guaranteeing it can handle enumeration
1816  * at all necessary speeds.  Descriptor order is:
1817  *
1818  * . message tag (u32, host order) ... for now, must be zero; it
1819  *	would change to support features like multi-config devices
1820  * . full/low speed config ... all wTotalLength bytes (with interface,
1821  *	class, altsetting, endpoint, and other descriptors)
1822  * . high speed config ... all descriptors, for high speed operation;
1823  *	this one's optional except for high-speed hardware
1824  * . device descriptor
1825  *
1826  * Endpoints are not yet enabled. Drivers must wait until device
1827  * configuration and interface altsetting changes create
1828  * the need to configure (or unconfigure) them.
1829  *
1830  * After initialization, the device stays active for as long as that
1831  * $CHIP file is open.  Events must then be read from that descriptor,
1832  * such as configuration notifications.
1833  */
1834 
1835 static int is_valid_config (struct usb_config_descriptor *config)
1836 {
1837 	return config->bDescriptorType == USB_DT_CONFIG
1838 		&& config->bLength == USB_DT_CONFIG_SIZE
1839 		&& config->bConfigurationValue != 0
1840 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1841 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1842 	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1843 	/* FIXME check lengths: walk to end */
1844 }
1845 
1846 static ssize_t
1847 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1848 {
1849 	struct dev_data		*dev = fd->private_data;
1850 	ssize_t			value = len, length = len;
1851 	unsigned		total;
1852 	u32			tag;
1853 	char			*kbuf;
1854 
1855 	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1856 		return -EINVAL;
1857 
1858 	/* we might need to change message format someday */
1859 	if (copy_from_user (&tag, buf, 4))
1860 		return -EFAULT;
1861 	if (tag != 0)
1862 		return -EINVAL;
1863 	buf += 4;
1864 	length -= 4;
1865 
1866 	kbuf = memdup_user(buf, length);
1867 	if (IS_ERR(kbuf))
1868 		return PTR_ERR(kbuf);
1869 
1870 	spin_lock_irq (&dev->lock);
1871 	value = -EINVAL;
1872 	if (dev->buf)
1873 		goto fail;
1874 	dev->buf = kbuf;
1875 
1876 	/* full or low speed config */
1877 	dev->config = (void *) kbuf;
1878 	total = le16_to_cpu(dev->config->wTotalLength);
1879 	if (!is_valid_config (dev->config) || total >= length)
1880 		goto fail;
1881 	kbuf += total;
1882 	length -= total;
1883 
1884 	/* optional high speed config */
1885 	if (kbuf [1] == USB_DT_CONFIG) {
1886 		dev->hs_config = (void *) kbuf;
1887 		total = le16_to_cpu(dev->hs_config->wTotalLength);
1888 		if (!is_valid_config (dev->hs_config) || total >= length)
1889 			goto fail;
1890 		kbuf += total;
1891 		length -= total;
1892 	}
1893 
1894 	/* could support multiple configs, using another encoding! */
1895 
1896 	/* device descriptor (tweaked for paranoia) */
1897 	if (length != USB_DT_DEVICE_SIZE)
1898 		goto fail;
1899 	dev->dev = (void *)kbuf;
1900 	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1901 			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1902 			|| dev->dev->bNumConfigurations != 1)
1903 		goto fail;
1904 	dev->dev->bNumConfigurations = 1;
1905 	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1906 
1907 	/* triggers gadgetfs_bind(); then we can enumerate. */
1908 	spin_unlock_irq (&dev->lock);
1909 	if (dev->hs_config)
1910 		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1911 	else
1912 		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1913 
1914 	value = usb_gadget_probe_driver(&gadgetfs_driver);
1915 	if (value != 0) {
1916 		kfree (dev->buf);
1917 		dev->buf = NULL;
1918 	} else {
1919 		/* at this point "good" hardware has for the first time
1920 		 * let the USB the host see us.  alternatively, if users
1921 		 * unplug/replug that will clear all the error state.
1922 		 *
1923 		 * note:  everything running before here was guaranteed
1924 		 * to choke driver model style diagnostics.  from here
1925 		 * on, they can work ... except in cleanup paths that
1926 		 * kick in after the ep0 descriptor is closed.
1927 		 */
1928 		fd->f_op = &ep0_io_operations;
1929 		value = len;
1930 	}
1931 	return value;
1932 
1933 fail:
1934 	spin_unlock_irq (&dev->lock);
1935 	pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
1936 	kfree (dev->buf);
1937 	dev->buf = NULL;
1938 	return value;
1939 }
1940 
1941 static int
1942 dev_open (struct inode *inode, struct file *fd)
1943 {
1944 	struct dev_data		*dev = inode->i_private;
1945 	int			value = -EBUSY;
1946 
1947 	spin_lock_irq(&dev->lock);
1948 	if (dev->state == STATE_DEV_DISABLED) {
1949 		dev->ev_next = 0;
1950 		dev->state = STATE_DEV_OPENED;
1951 		fd->private_data = dev;
1952 		get_dev (dev);
1953 		value = 0;
1954 	}
1955 	spin_unlock_irq(&dev->lock);
1956 	return value;
1957 }
1958 
1959 static const struct file_operations dev_init_operations = {
1960 	.llseek =	no_llseek,
1961 
1962 	.open =		dev_open,
1963 	.write =	dev_config,
1964 	.fasync =	ep0_fasync,
1965 	.unlocked_ioctl = dev_ioctl,
1966 	.release =	dev_release,
1967 };
1968 
1969 /*----------------------------------------------------------------------*/
1970 
1971 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1972  *
1973  * Mounting the filesystem creates a controller file, used first for
1974  * device configuration then later for event monitoring.
1975  */
1976 
1977 
1978 /* FIXME PAM etc could set this security policy without mount options
1979  * if epfiles inherited ownership and permissons from ep0 ...
1980  */
1981 
1982 static unsigned default_uid;
1983 static unsigned default_gid;
1984 static unsigned default_perm = S_IRUSR | S_IWUSR;
1985 
1986 module_param (default_uid, uint, 0644);
1987 module_param (default_gid, uint, 0644);
1988 module_param (default_perm, uint, 0644);
1989 
1990 
1991 static struct inode *
1992 gadgetfs_make_inode (struct super_block *sb,
1993 		void *data, const struct file_operations *fops,
1994 		int mode)
1995 {
1996 	struct inode *inode = new_inode (sb);
1997 
1998 	if (inode) {
1999 		inode->i_ino = get_next_ino();
2000 		inode->i_mode = mode;
2001 		inode->i_uid = make_kuid(&init_user_ns, default_uid);
2002 		inode->i_gid = make_kgid(&init_user_ns, default_gid);
2003 		inode->i_atime = inode->i_mtime = inode->i_ctime
2004 				= CURRENT_TIME;
2005 		inode->i_private = data;
2006 		inode->i_fop = fops;
2007 	}
2008 	return inode;
2009 }
2010 
2011 /* creates in fs root directory, so non-renamable and non-linkable.
2012  * so inode and dentry are paired, until device reconfig.
2013  */
2014 static struct inode *
2015 gadgetfs_create_file (struct super_block *sb, char const *name,
2016 		void *data, const struct file_operations *fops,
2017 		struct dentry **dentry_p)
2018 {
2019 	struct dentry	*dentry;
2020 	struct inode	*inode;
2021 
2022 	dentry = d_alloc_name(sb->s_root, name);
2023 	if (!dentry)
2024 		return NULL;
2025 
2026 	inode = gadgetfs_make_inode (sb, data, fops,
2027 			S_IFREG | (default_perm & S_IRWXUGO));
2028 	if (!inode) {
2029 		dput(dentry);
2030 		return NULL;
2031 	}
2032 	d_add (dentry, inode);
2033 	*dentry_p = dentry;
2034 	return inode;
2035 }
2036 
2037 static const struct super_operations gadget_fs_operations = {
2038 	.statfs =	simple_statfs,
2039 	.drop_inode =	generic_delete_inode,
2040 };
2041 
2042 static int
2043 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2044 {
2045 	struct inode	*inode;
2046 	struct dev_data	*dev;
2047 
2048 	if (the_device)
2049 		return -ESRCH;
2050 
2051 	/* fake probe to determine $CHIP */
2052 	CHIP = NULL;
2053 	usb_gadget_probe_driver(&probe_driver);
2054 	if (!CHIP)
2055 		return -ENODEV;
2056 
2057 	/* superblock */
2058 	sb->s_blocksize = PAGE_CACHE_SIZE;
2059 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2060 	sb->s_magic = GADGETFS_MAGIC;
2061 	sb->s_op = &gadget_fs_operations;
2062 	sb->s_time_gran = 1;
2063 
2064 	/* root inode */
2065 	inode = gadgetfs_make_inode (sb,
2066 			NULL, &simple_dir_operations,
2067 			S_IFDIR | S_IRUGO | S_IXUGO);
2068 	if (!inode)
2069 		goto Enomem;
2070 	inode->i_op = &simple_dir_inode_operations;
2071 	if (!(sb->s_root = d_make_root (inode)))
2072 		goto Enomem;
2073 
2074 	/* the ep0 file is named after the controller we expect;
2075 	 * user mode code can use it for sanity checks, like we do.
2076 	 */
2077 	dev = dev_new ();
2078 	if (!dev)
2079 		goto Enomem;
2080 
2081 	dev->sb = sb;
2082 	if (!gadgetfs_create_file (sb, CHIP,
2083 				dev, &dev_init_operations,
2084 				&dev->dentry)) {
2085 		put_dev(dev);
2086 		goto Enomem;
2087 	}
2088 
2089 	/* other endpoint files are available after hardware setup,
2090 	 * from binding to a controller.
2091 	 */
2092 	the_device = dev;
2093 	return 0;
2094 
2095 Enomem:
2096 	return -ENOMEM;
2097 }
2098 
2099 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2100 static struct dentry *
2101 gadgetfs_mount (struct file_system_type *t, int flags,
2102 		const char *path, void *opts)
2103 {
2104 	return mount_single (t, flags, opts, gadgetfs_fill_super);
2105 }
2106 
2107 static void
2108 gadgetfs_kill_sb (struct super_block *sb)
2109 {
2110 	kill_litter_super (sb);
2111 	if (the_device) {
2112 		put_dev (the_device);
2113 		the_device = NULL;
2114 	}
2115 }
2116 
2117 /*----------------------------------------------------------------------*/
2118 
2119 static struct file_system_type gadgetfs_type = {
2120 	.owner		= THIS_MODULE,
2121 	.name		= shortname,
2122 	.mount		= gadgetfs_mount,
2123 	.kill_sb	= gadgetfs_kill_sb,
2124 };
2125 MODULE_ALIAS_FS("gadgetfs");
2126 
2127 /*----------------------------------------------------------------------*/
2128 
2129 static int __init init (void)
2130 {
2131 	int status;
2132 
2133 	status = register_filesystem (&gadgetfs_type);
2134 	if (status == 0)
2135 		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2136 			shortname, driver_desc);
2137 	return status;
2138 }
2139 module_init (init);
2140 
2141 static void __exit cleanup (void)
2142 {
2143 	pr_debug ("unregister %s\n", shortname);
2144 	unregister_filesystem (&gadgetfs_type);
2145 }
2146 module_exit (cleanup);
2147 
2148