xref: /openbmc/linux/drivers/usb/gadget/legacy/inode.c (revision 4f6cce39)
1 /*
2  * inode.c -- user mode filesystem api for usb gadget controllers
3  *
4  * Copyright (C) 2003-2004 David Brownell
5  * Copyright (C) 2003 Agilent Technologies
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/fs.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <linux/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27 #include <linux/mmu_context.h>
28 #include <linux/aio.h>
29 #include <linux/uio.h>
30 
31 #include <linux/device.h>
32 #include <linux/moduleparam.h>
33 
34 #include <linux/usb/gadgetfs.h>
35 #include <linux/usb/gadget.h>
36 
37 
38 /*
39  * The gadgetfs API maps each endpoint to a file descriptor so that you
40  * can use standard synchronous read/write calls for I/O.  There's some
41  * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
42  * drivers show how this works in practice.  You can also use AIO to
43  * eliminate I/O gaps between requests, to help when streaming data.
44  *
45  * Key parts that must be USB-specific are protocols defining how the
46  * read/write operations relate to the hardware state machines.  There
47  * are two types of files.  One type is for the device, implementing ep0.
48  * The other type is for each IN or OUT endpoint.  In both cases, the
49  * user mode driver must configure the hardware before using it.
50  *
51  * - First, dev_config() is called when /dev/gadget/$CHIP is configured
52  *   (by writing configuration and device descriptors).  Afterwards it
53  *   may serve as a source of device events, used to handle all control
54  *   requests other than basic enumeration.
55  *
56  * - Then, after a SET_CONFIGURATION control request, ep_config() is
57  *   called when each /dev/gadget/ep* file is configured (by writing
58  *   endpoint descriptors).  Afterwards these files are used to write()
59  *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
60  *   direction" request is issued (like reading an IN endpoint).
61  *
62  * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
63  * not possible on all hardware.  For example, precise fault handling with
64  * respect to data left in endpoint fifos after aborted operations; or
65  * selective clearing of endpoint halts, to implement SET_INTERFACE.
66  */
67 
68 #define	DRIVER_DESC	"USB Gadget filesystem"
69 #define	DRIVER_VERSION	"24 Aug 2004"
70 
71 static const char driver_desc [] = DRIVER_DESC;
72 static const char shortname [] = "gadgetfs";
73 
74 MODULE_DESCRIPTION (DRIVER_DESC);
75 MODULE_AUTHOR ("David Brownell");
76 MODULE_LICENSE ("GPL");
77 
78 static int ep_open(struct inode *, struct file *);
79 
80 
81 /*----------------------------------------------------------------------*/
82 
83 #define GADGETFS_MAGIC		0xaee71ee7
84 
85 /* /dev/gadget/$CHIP represents ep0 and the whole device */
86 enum ep0_state {
87 	/* DISABLED is the initial state. */
88 	STATE_DEV_DISABLED = 0,
89 
90 	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
91 	 * ep0/device i/o modes and binding to the controller.  Driver
92 	 * must always write descriptors to initialize the device, then
93 	 * the device becomes UNCONNECTED until enumeration.
94 	 */
95 	STATE_DEV_OPENED,
96 
97 	/* From then on, ep0 fd is in either of two basic modes:
98 	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
99 	 * - SETUP: read/write will transfer control data and succeed;
100 	 *   or if "wrong direction", performs protocol stall
101 	 */
102 	STATE_DEV_UNCONNECTED,
103 	STATE_DEV_CONNECTED,
104 	STATE_DEV_SETUP,
105 
106 	/* UNBOUND means the driver closed ep0, so the device won't be
107 	 * accessible again (DEV_DISABLED) until all fds are closed.
108 	 */
109 	STATE_DEV_UNBOUND,
110 };
111 
112 /* enough for the whole queue: most events invalidate others */
113 #define	N_EVENT			5
114 
115 struct dev_data {
116 	spinlock_t			lock;
117 	atomic_t			count;
118 	enum ep0_state			state;		/* P: lock */
119 	struct usb_gadgetfs_event	event [N_EVENT];
120 	unsigned			ev_next;
121 	struct fasync_struct		*fasync;
122 	u8				current_config;
123 
124 	/* drivers reading ep0 MUST handle control requests (SETUP)
125 	 * reported that way; else the host will time out.
126 	 */
127 	unsigned			usermode_setup : 1,
128 					setup_in : 1,
129 					setup_can_stall : 1,
130 					setup_out_ready : 1,
131 					setup_out_error : 1,
132 					setup_abort : 1,
133 					gadget_registered : 1;
134 	unsigned			setup_wLength;
135 
136 	/* the rest is basically write-once */
137 	struct usb_config_descriptor	*config, *hs_config;
138 	struct usb_device_descriptor	*dev;
139 	struct usb_request		*req;
140 	struct usb_gadget		*gadget;
141 	struct list_head		epfiles;
142 	void				*buf;
143 	wait_queue_head_t		wait;
144 	struct super_block		*sb;
145 	struct dentry			*dentry;
146 
147 	/* except this scratch i/o buffer for ep0 */
148 	u8				rbuf [256];
149 };
150 
151 static inline void get_dev (struct dev_data *data)
152 {
153 	atomic_inc (&data->count);
154 }
155 
156 static void put_dev (struct dev_data *data)
157 {
158 	if (likely (!atomic_dec_and_test (&data->count)))
159 		return;
160 	/* needs no more cleanup */
161 	BUG_ON (waitqueue_active (&data->wait));
162 	kfree (data);
163 }
164 
165 static struct dev_data *dev_new (void)
166 {
167 	struct dev_data		*dev;
168 
169 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
170 	if (!dev)
171 		return NULL;
172 	dev->state = STATE_DEV_DISABLED;
173 	atomic_set (&dev->count, 1);
174 	spin_lock_init (&dev->lock);
175 	INIT_LIST_HEAD (&dev->epfiles);
176 	init_waitqueue_head (&dev->wait);
177 	return dev;
178 }
179 
180 /*----------------------------------------------------------------------*/
181 
182 /* other /dev/gadget/$ENDPOINT files represent endpoints */
183 enum ep_state {
184 	STATE_EP_DISABLED = 0,
185 	STATE_EP_READY,
186 	STATE_EP_ENABLED,
187 	STATE_EP_UNBOUND,
188 };
189 
190 struct ep_data {
191 	struct mutex			lock;
192 	enum ep_state			state;
193 	atomic_t			count;
194 	struct dev_data			*dev;
195 	/* must hold dev->lock before accessing ep or req */
196 	struct usb_ep			*ep;
197 	struct usb_request		*req;
198 	ssize_t				status;
199 	char				name [16];
200 	struct usb_endpoint_descriptor	desc, hs_desc;
201 	struct list_head		epfiles;
202 	wait_queue_head_t		wait;
203 	struct dentry			*dentry;
204 };
205 
206 static inline void get_ep (struct ep_data *data)
207 {
208 	atomic_inc (&data->count);
209 }
210 
211 static void put_ep (struct ep_data *data)
212 {
213 	if (likely (!atomic_dec_and_test (&data->count)))
214 		return;
215 	put_dev (data->dev);
216 	/* needs no more cleanup */
217 	BUG_ON (!list_empty (&data->epfiles));
218 	BUG_ON (waitqueue_active (&data->wait));
219 	kfree (data);
220 }
221 
222 /*----------------------------------------------------------------------*/
223 
224 /* most "how to use the hardware" policy choices are in userspace:
225  * mapping endpoint roles (which the driver needs) to the capabilities
226  * which the usb controller has.  most of those capabilities are exposed
227  * implicitly, starting with the driver name and then endpoint names.
228  */
229 
230 static const char *CHIP;
231 
232 /*----------------------------------------------------------------------*/
233 
234 /* NOTE:  don't use dev_printk calls before binding to the gadget
235  * at the end of ep0 configuration, or after unbind.
236  */
237 
238 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
239 #define xprintk(d,level,fmt,args...) \
240 	printk(level "%s: " fmt , shortname , ## args)
241 
242 #ifdef DEBUG
243 #define DBG(dev,fmt,args...) \
244 	xprintk(dev , KERN_DEBUG , fmt , ## args)
245 #else
246 #define DBG(dev,fmt,args...) \
247 	do { } while (0)
248 #endif /* DEBUG */
249 
250 #ifdef VERBOSE_DEBUG
251 #define VDEBUG	DBG
252 #else
253 #define VDEBUG(dev,fmt,args...) \
254 	do { } while (0)
255 #endif /* DEBUG */
256 
257 #define ERROR(dev,fmt,args...) \
258 	xprintk(dev , KERN_ERR , fmt , ## args)
259 #define INFO(dev,fmt,args...) \
260 	xprintk(dev , KERN_INFO , fmt , ## args)
261 
262 
263 /*----------------------------------------------------------------------*/
264 
265 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
266  *
267  * After opening, configure non-control endpoints.  Then use normal
268  * stream read() and write() requests; and maybe ioctl() to get more
269  * precise FIFO status when recovering from cancellation.
270  */
271 
272 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
273 {
274 	struct ep_data	*epdata = ep->driver_data;
275 
276 	if (!req->context)
277 		return;
278 	if (req->status)
279 		epdata->status = req->status;
280 	else
281 		epdata->status = req->actual;
282 	complete ((struct completion *)req->context);
283 }
284 
285 /* tasklock endpoint, returning when it's connected.
286  * still need dev->lock to use epdata->ep.
287  */
288 static int
289 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
290 {
291 	int	val;
292 
293 	if (f_flags & O_NONBLOCK) {
294 		if (!mutex_trylock(&epdata->lock))
295 			goto nonblock;
296 		if (epdata->state != STATE_EP_ENABLED &&
297 		    (!is_write || epdata->state != STATE_EP_READY)) {
298 			mutex_unlock(&epdata->lock);
299 nonblock:
300 			val = -EAGAIN;
301 		} else
302 			val = 0;
303 		return val;
304 	}
305 
306 	val = mutex_lock_interruptible(&epdata->lock);
307 	if (val < 0)
308 		return val;
309 
310 	switch (epdata->state) {
311 	case STATE_EP_ENABLED:
312 		return 0;
313 	case STATE_EP_READY:			/* not configured yet */
314 		if (is_write)
315 			return 0;
316 		// FALLTHRU
317 	case STATE_EP_UNBOUND:			/* clean disconnect */
318 		break;
319 	// case STATE_EP_DISABLED:		/* "can't happen" */
320 	default:				/* error! */
321 		pr_debug ("%s: ep %p not available, state %d\n",
322 				shortname, epdata, epdata->state);
323 	}
324 	mutex_unlock(&epdata->lock);
325 	return -ENODEV;
326 }
327 
328 static ssize_t
329 ep_io (struct ep_data *epdata, void *buf, unsigned len)
330 {
331 	DECLARE_COMPLETION_ONSTACK (done);
332 	int value;
333 
334 	spin_lock_irq (&epdata->dev->lock);
335 	if (likely (epdata->ep != NULL)) {
336 		struct usb_request	*req = epdata->req;
337 
338 		req->context = &done;
339 		req->complete = epio_complete;
340 		req->buf = buf;
341 		req->length = len;
342 		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
343 	} else
344 		value = -ENODEV;
345 	spin_unlock_irq (&epdata->dev->lock);
346 
347 	if (likely (value == 0)) {
348 		value = wait_event_interruptible (done.wait, done.done);
349 		if (value != 0) {
350 			spin_lock_irq (&epdata->dev->lock);
351 			if (likely (epdata->ep != NULL)) {
352 				DBG (epdata->dev, "%s i/o interrupted\n",
353 						epdata->name);
354 				usb_ep_dequeue (epdata->ep, epdata->req);
355 				spin_unlock_irq (&epdata->dev->lock);
356 
357 				wait_event (done.wait, done.done);
358 				if (epdata->status == -ECONNRESET)
359 					epdata->status = -EINTR;
360 			} else {
361 				spin_unlock_irq (&epdata->dev->lock);
362 
363 				DBG (epdata->dev, "endpoint gone\n");
364 				epdata->status = -ENODEV;
365 			}
366 		}
367 		return epdata->status;
368 	}
369 	return value;
370 }
371 
372 static int
373 ep_release (struct inode *inode, struct file *fd)
374 {
375 	struct ep_data		*data = fd->private_data;
376 	int value;
377 
378 	value = mutex_lock_interruptible(&data->lock);
379 	if (value < 0)
380 		return value;
381 
382 	/* clean up if this can be reopened */
383 	if (data->state != STATE_EP_UNBOUND) {
384 		data->state = STATE_EP_DISABLED;
385 		data->desc.bDescriptorType = 0;
386 		data->hs_desc.bDescriptorType = 0;
387 		usb_ep_disable(data->ep);
388 	}
389 	mutex_unlock(&data->lock);
390 	put_ep (data);
391 	return 0;
392 }
393 
394 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
395 {
396 	struct ep_data		*data = fd->private_data;
397 	int			status;
398 
399 	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
400 		return status;
401 
402 	spin_lock_irq (&data->dev->lock);
403 	if (likely (data->ep != NULL)) {
404 		switch (code) {
405 		case GADGETFS_FIFO_STATUS:
406 			status = usb_ep_fifo_status (data->ep);
407 			break;
408 		case GADGETFS_FIFO_FLUSH:
409 			usb_ep_fifo_flush (data->ep);
410 			break;
411 		case GADGETFS_CLEAR_HALT:
412 			status = usb_ep_clear_halt (data->ep);
413 			break;
414 		default:
415 			status = -ENOTTY;
416 		}
417 	} else
418 		status = -ENODEV;
419 	spin_unlock_irq (&data->dev->lock);
420 	mutex_unlock(&data->lock);
421 	return status;
422 }
423 
424 /*----------------------------------------------------------------------*/
425 
426 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
427 
428 struct kiocb_priv {
429 	struct usb_request	*req;
430 	struct ep_data		*epdata;
431 	struct kiocb		*iocb;
432 	struct mm_struct	*mm;
433 	struct work_struct	work;
434 	void			*buf;
435 	struct iov_iter		to;
436 	const void		*to_free;
437 	unsigned		actual;
438 };
439 
440 static int ep_aio_cancel(struct kiocb *iocb)
441 {
442 	struct kiocb_priv	*priv = iocb->private;
443 	struct ep_data		*epdata;
444 	int			value;
445 
446 	local_irq_disable();
447 	epdata = priv->epdata;
448 	// spin_lock(&epdata->dev->lock);
449 	if (likely(epdata && epdata->ep && priv->req))
450 		value = usb_ep_dequeue (epdata->ep, priv->req);
451 	else
452 		value = -EINVAL;
453 	// spin_unlock(&epdata->dev->lock);
454 	local_irq_enable();
455 
456 	return value;
457 }
458 
459 static void ep_user_copy_worker(struct work_struct *work)
460 {
461 	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
462 	struct mm_struct *mm = priv->mm;
463 	struct kiocb *iocb = priv->iocb;
464 	size_t ret;
465 
466 	use_mm(mm);
467 	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
468 	unuse_mm(mm);
469 	if (!ret)
470 		ret = -EFAULT;
471 
472 	/* completing the iocb can drop the ctx and mm, don't touch mm after */
473 	iocb->ki_complete(iocb, ret, ret);
474 
475 	kfree(priv->buf);
476 	kfree(priv->to_free);
477 	kfree(priv);
478 }
479 
480 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
481 {
482 	struct kiocb		*iocb = req->context;
483 	struct kiocb_priv	*priv = iocb->private;
484 	struct ep_data		*epdata = priv->epdata;
485 
486 	/* lock against disconnect (and ideally, cancel) */
487 	spin_lock(&epdata->dev->lock);
488 	priv->req = NULL;
489 	priv->epdata = NULL;
490 
491 	/* if this was a write or a read returning no data then we
492 	 * don't need to copy anything to userspace, so we can
493 	 * complete the aio request immediately.
494 	 */
495 	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
496 		kfree(req->buf);
497 		kfree(priv->to_free);
498 		kfree(priv);
499 		iocb->private = NULL;
500 		/* aio_complete() reports bytes-transferred _and_ faults */
501 
502 		iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
503 				req->status);
504 	} else {
505 		/* ep_copy_to_user() won't report both; we hide some faults */
506 		if (unlikely(0 != req->status))
507 			DBG(epdata->dev, "%s fault %d len %d\n",
508 				ep->name, req->status, req->actual);
509 
510 		priv->buf = req->buf;
511 		priv->actual = req->actual;
512 		INIT_WORK(&priv->work, ep_user_copy_worker);
513 		schedule_work(&priv->work);
514 	}
515 	spin_unlock(&epdata->dev->lock);
516 
517 	usb_ep_free_request(ep, req);
518 	put_ep(epdata);
519 }
520 
521 static ssize_t ep_aio(struct kiocb *iocb,
522 		      struct kiocb_priv *priv,
523 		      struct ep_data *epdata,
524 		      char *buf,
525 		      size_t len)
526 {
527 	struct usb_request *req;
528 	ssize_t value;
529 
530 	iocb->private = priv;
531 	priv->iocb = iocb;
532 
533 	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
534 	get_ep(epdata);
535 	priv->epdata = epdata;
536 	priv->actual = 0;
537 	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
538 
539 	/* each kiocb is coupled to one usb_request, but we can't
540 	 * allocate or submit those if the host disconnected.
541 	 */
542 	spin_lock_irq(&epdata->dev->lock);
543 	value = -ENODEV;
544 	if (unlikely(epdata->ep == NULL))
545 		goto fail;
546 
547 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
548 	value = -ENOMEM;
549 	if (unlikely(!req))
550 		goto fail;
551 
552 	priv->req = req;
553 	req->buf = buf;
554 	req->length = len;
555 	req->complete = ep_aio_complete;
556 	req->context = iocb;
557 	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
558 	if (unlikely(0 != value)) {
559 		usb_ep_free_request(epdata->ep, req);
560 		goto fail;
561 	}
562 	spin_unlock_irq(&epdata->dev->lock);
563 	return -EIOCBQUEUED;
564 
565 fail:
566 	spin_unlock_irq(&epdata->dev->lock);
567 	kfree(priv->to_free);
568 	kfree(priv);
569 	put_ep(epdata);
570 	return value;
571 }
572 
573 static ssize_t
574 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
575 {
576 	struct file *file = iocb->ki_filp;
577 	struct ep_data *epdata = file->private_data;
578 	size_t len = iov_iter_count(to);
579 	ssize_t value;
580 	char *buf;
581 
582 	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
583 		return value;
584 
585 	/* halt any endpoint by doing a "wrong direction" i/o call */
586 	if (usb_endpoint_dir_in(&epdata->desc)) {
587 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
588 		    !is_sync_kiocb(iocb)) {
589 			mutex_unlock(&epdata->lock);
590 			return -EINVAL;
591 		}
592 		DBG (epdata->dev, "%s halt\n", epdata->name);
593 		spin_lock_irq(&epdata->dev->lock);
594 		if (likely(epdata->ep != NULL))
595 			usb_ep_set_halt(epdata->ep);
596 		spin_unlock_irq(&epdata->dev->lock);
597 		mutex_unlock(&epdata->lock);
598 		return -EBADMSG;
599 	}
600 
601 	buf = kmalloc(len, GFP_KERNEL);
602 	if (unlikely(!buf)) {
603 		mutex_unlock(&epdata->lock);
604 		return -ENOMEM;
605 	}
606 	if (is_sync_kiocb(iocb)) {
607 		value = ep_io(epdata, buf, len);
608 		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
609 			value = -EFAULT;
610 	} else {
611 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
612 		value = -ENOMEM;
613 		if (!priv)
614 			goto fail;
615 		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
616 		if (!priv->to_free) {
617 			kfree(priv);
618 			goto fail;
619 		}
620 		value = ep_aio(iocb, priv, epdata, buf, len);
621 		if (value == -EIOCBQUEUED)
622 			buf = NULL;
623 	}
624 fail:
625 	kfree(buf);
626 	mutex_unlock(&epdata->lock);
627 	return value;
628 }
629 
630 static ssize_t ep_config(struct ep_data *, const char *, size_t);
631 
632 static ssize_t
633 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
634 {
635 	struct file *file = iocb->ki_filp;
636 	struct ep_data *epdata = file->private_data;
637 	size_t len = iov_iter_count(from);
638 	bool configured;
639 	ssize_t value;
640 	char *buf;
641 
642 	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
643 		return value;
644 
645 	configured = epdata->state == STATE_EP_ENABLED;
646 
647 	/* halt any endpoint by doing a "wrong direction" i/o call */
648 	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
649 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
650 		    !is_sync_kiocb(iocb)) {
651 			mutex_unlock(&epdata->lock);
652 			return -EINVAL;
653 		}
654 		DBG (epdata->dev, "%s halt\n", epdata->name);
655 		spin_lock_irq(&epdata->dev->lock);
656 		if (likely(epdata->ep != NULL))
657 			usb_ep_set_halt(epdata->ep);
658 		spin_unlock_irq(&epdata->dev->lock);
659 		mutex_unlock(&epdata->lock);
660 		return -EBADMSG;
661 	}
662 
663 	buf = kmalloc(len, GFP_KERNEL);
664 	if (unlikely(!buf)) {
665 		mutex_unlock(&epdata->lock);
666 		return -ENOMEM;
667 	}
668 
669 	if (unlikely(!copy_from_iter_full(buf, len, from))) {
670 		value = -EFAULT;
671 		goto out;
672 	}
673 
674 	if (unlikely(!configured)) {
675 		value = ep_config(epdata, buf, len);
676 	} else if (is_sync_kiocb(iocb)) {
677 		value = ep_io(epdata, buf, len);
678 	} else {
679 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
680 		value = -ENOMEM;
681 		if (priv) {
682 			value = ep_aio(iocb, priv, epdata, buf, len);
683 			if (value == -EIOCBQUEUED)
684 				buf = NULL;
685 		}
686 	}
687 out:
688 	kfree(buf);
689 	mutex_unlock(&epdata->lock);
690 	return value;
691 }
692 
693 /*----------------------------------------------------------------------*/
694 
695 /* used after endpoint configuration */
696 static const struct file_operations ep_io_operations = {
697 	.owner =	THIS_MODULE,
698 
699 	.open =		ep_open,
700 	.release =	ep_release,
701 	.llseek =	no_llseek,
702 	.unlocked_ioctl = ep_ioctl,
703 	.read_iter =	ep_read_iter,
704 	.write_iter =	ep_write_iter,
705 };
706 
707 /* ENDPOINT INITIALIZATION
708  *
709  *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
710  *     status = write (fd, descriptors, sizeof descriptors)
711  *
712  * That write establishes the endpoint configuration, configuring
713  * the controller to process bulk, interrupt, or isochronous transfers
714  * at the right maxpacket size, and so on.
715  *
716  * The descriptors are message type 1, identified by a host order u32
717  * at the beginning of what's written.  Descriptor order is: full/low
718  * speed descriptor, then optional high speed descriptor.
719  */
720 static ssize_t
721 ep_config (struct ep_data *data, const char *buf, size_t len)
722 {
723 	struct usb_ep		*ep;
724 	u32			tag;
725 	int			value, length = len;
726 
727 	if (data->state != STATE_EP_READY) {
728 		value = -EL2HLT;
729 		goto fail;
730 	}
731 
732 	value = len;
733 	if (len < USB_DT_ENDPOINT_SIZE + 4)
734 		goto fail0;
735 
736 	/* we might need to change message format someday */
737 	memcpy(&tag, buf, 4);
738 	if (tag != 1) {
739 		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
740 		goto fail0;
741 	}
742 	buf += 4;
743 	len -= 4;
744 
745 	/* NOTE:  audio endpoint extensions not accepted here;
746 	 * just don't include the extra bytes.
747 	 */
748 
749 	/* full/low speed descriptor, then high speed */
750 	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
751 	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
752 			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
753 		goto fail0;
754 	if (len != USB_DT_ENDPOINT_SIZE) {
755 		if (len != 2 * USB_DT_ENDPOINT_SIZE)
756 			goto fail0;
757 		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
758 			USB_DT_ENDPOINT_SIZE);
759 		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
760 				|| data->hs_desc.bDescriptorType
761 					!= USB_DT_ENDPOINT) {
762 			DBG(data->dev, "config %s, bad hs length or type\n",
763 					data->name);
764 			goto fail0;
765 		}
766 	}
767 
768 	spin_lock_irq (&data->dev->lock);
769 	if (data->dev->state == STATE_DEV_UNBOUND) {
770 		value = -ENOENT;
771 		goto gone;
772 	} else {
773 		ep = data->ep;
774 		if (ep == NULL) {
775 			value = -ENODEV;
776 			goto gone;
777 		}
778 	}
779 	switch (data->dev->gadget->speed) {
780 	case USB_SPEED_LOW:
781 	case USB_SPEED_FULL:
782 		ep->desc = &data->desc;
783 		break;
784 	case USB_SPEED_HIGH:
785 		/* fails if caller didn't provide that descriptor... */
786 		ep->desc = &data->hs_desc;
787 		break;
788 	default:
789 		DBG(data->dev, "unconnected, %s init abandoned\n",
790 				data->name);
791 		value = -EINVAL;
792 		goto gone;
793 	}
794 	value = usb_ep_enable(ep);
795 	if (value == 0) {
796 		data->state = STATE_EP_ENABLED;
797 		value = length;
798 	}
799 gone:
800 	spin_unlock_irq (&data->dev->lock);
801 	if (value < 0) {
802 fail:
803 		data->desc.bDescriptorType = 0;
804 		data->hs_desc.bDescriptorType = 0;
805 	}
806 	return value;
807 fail0:
808 	value = -EINVAL;
809 	goto fail;
810 }
811 
812 static int
813 ep_open (struct inode *inode, struct file *fd)
814 {
815 	struct ep_data		*data = inode->i_private;
816 	int			value = -EBUSY;
817 
818 	if (mutex_lock_interruptible(&data->lock) != 0)
819 		return -EINTR;
820 	spin_lock_irq (&data->dev->lock);
821 	if (data->dev->state == STATE_DEV_UNBOUND)
822 		value = -ENOENT;
823 	else if (data->state == STATE_EP_DISABLED) {
824 		value = 0;
825 		data->state = STATE_EP_READY;
826 		get_ep (data);
827 		fd->private_data = data;
828 		VDEBUG (data->dev, "%s ready\n", data->name);
829 	} else
830 		DBG (data->dev, "%s state %d\n",
831 			data->name, data->state);
832 	spin_unlock_irq (&data->dev->lock);
833 	mutex_unlock(&data->lock);
834 	return value;
835 }
836 
837 /*----------------------------------------------------------------------*/
838 
839 /* EP0 IMPLEMENTATION can be partly in userspace.
840  *
841  * Drivers that use this facility receive various events, including
842  * control requests the kernel doesn't handle.  Drivers that don't
843  * use this facility may be too simple-minded for real applications.
844  */
845 
846 static inline void ep0_readable (struct dev_data *dev)
847 {
848 	wake_up (&dev->wait);
849 	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
850 }
851 
852 static void clean_req (struct usb_ep *ep, struct usb_request *req)
853 {
854 	struct dev_data		*dev = ep->driver_data;
855 
856 	if (req->buf != dev->rbuf) {
857 		kfree(req->buf);
858 		req->buf = dev->rbuf;
859 	}
860 	req->complete = epio_complete;
861 	dev->setup_out_ready = 0;
862 }
863 
864 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
865 {
866 	struct dev_data		*dev = ep->driver_data;
867 	unsigned long		flags;
868 	int			free = 1;
869 
870 	/* for control OUT, data must still get to userspace */
871 	spin_lock_irqsave(&dev->lock, flags);
872 	if (!dev->setup_in) {
873 		dev->setup_out_error = (req->status != 0);
874 		if (!dev->setup_out_error)
875 			free = 0;
876 		dev->setup_out_ready = 1;
877 		ep0_readable (dev);
878 	}
879 
880 	/* clean up as appropriate */
881 	if (free && req->buf != &dev->rbuf)
882 		clean_req (ep, req);
883 	req->complete = epio_complete;
884 	spin_unlock_irqrestore(&dev->lock, flags);
885 }
886 
887 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
888 {
889 	struct dev_data	*dev = ep->driver_data;
890 
891 	if (dev->setup_out_ready) {
892 		DBG (dev, "ep0 request busy!\n");
893 		return -EBUSY;
894 	}
895 	if (len > sizeof (dev->rbuf))
896 		req->buf = kmalloc(len, GFP_ATOMIC);
897 	if (req->buf == NULL) {
898 		req->buf = dev->rbuf;
899 		return -ENOMEM;
900 	}
901 	req->complete = ep0_complete;
902 	req->length = len;
903 	req->zero = 0;
904 	return 0;
905 }
906 
907 static ssize_t
908 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
909 {
910 	struct dev_data			*dev = fd->private_data;
911 	ssize_t				retval;
912 	enum ep0_state			state;
913 
914 	spin_lock_irq (&dev->lock);
915 	if (dev->state <= STATE_DEV_OPENED) {
916 		retval = -EINVAL;
917 		goto done;
918 	}
919 
920 	/* report fd mode change before acting on it */
921 	if (dev->setup_abort) {
922 		dev->setup_abort = 0;
923 		retval = -EIDRM;
924 		goto done;
925 	}
926 
927 	/* control DATA stage */
928 	if ((state = dev->state) == STATE_DEV_SETUP) {
929 
930 		if (dev->setup_in) {		/* stall IN */
931 			VDEBUG(dev, "ep0in stall\n");
932 			(void) usb_ep_set_halt (dev->gadget->ep0);
933 			retval = -EL2HLT;
934 			dev->state = STATE_DEV_CONNECTED;
935 
936 		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
937 			struct usb_ep		*ep = dev->gadget->ep0;
938 			struct usb_request	*req = dev->req;
939 
940 			if ((retval = setup_req (ep, req, 0)) == 0) {
941 				spin_unlock_irq (&dev->lock);
942 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
943 				spin_lock_irq (&dev->lock);
944 			}
945 			dev->state = STATE_DEV_CONNECTED;
946 
947 			/* assume that was SET_CONFIGURATION */
948 			if (dev->current_config) {
949 				unsigned power;
950 
951 				if (gadget_is_dualspeed(dev->gadget)
952 						&& (dev->gadget->speed
953 							== USB_SPEED_HIGH))
954 					power = dev->hs_config->bMaxPower;
955 				else
956 					power = dev->config->bMaxPower;
957 				usb_gadget_vbus_draw(dev->gadget, 2 * power);
958 			}
959 
960 		} else {			/* collect OUT data */
961 			if ((fd->f_flags & O_NONBLOCK) != 0
962 					&& !dev->setup_out_ready) {
963 				retval = -EAGAIN;
964 				goto done;
965 			}
966 			spin_unlock_irq (&dev->lock);
967 			retval = wait_event_interruptible (dev->wait,
968 					dev->setup_out_ready != 0);
969 
970 			/* FIXME state could change from under us */
971 			spin_lock_irq (&dev->lock);
972 			if (retval)
973 				goto done;
974 
975 			if (dev->state != STATE_DEV_SETUP) {
976 				retval = -ECANCELED;
977 				goto done;
978 			}
979 			dev->state = STATE_DEV_CONNECTED;
980 
981 			if (dev->setup_out_error)
982 				retval = -EIO;
983 			else {
984 				len = min (len, (size_t)dev->req->actual);
985 // FIXME don't call this with the spinlock held ...
986 				if (copy_to_user (buf, dev->req->buf, len))
987 					retval = -EFAULT;
988 				else
989 					retval = len;
990 				clean_req (dev->gadget->ep0, dev->req);
991 				/* NOTE userspace can't yet choose to stall */
992 			}
993 		}
994 		goto done;
995 	}
996 
997 	/* else normal: return event data */
998 	if (len < sizeof dev->event [0]) {
999 		retval = -EINVAL;
1000 		goto done;
1001 	}
1002 	len -= len % sizeof (struct usb_gadgetfs_event);
1003 	dev->usermode_setup = 1;
1004 
1005 scan:
1006 	/* return queued events right away */
1007 	if (dev->ev_next != 0) {
1008 		unsigned		i, n;
1009 
1010 		n = len / sizeof (struct usb_gadgetfs_event);
1011 		if (dev->ev_next < n)
1012 			n = dev->ev_next;
1013 
1014 		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1015 		for (i = 0; i < n; i++) {
1016 			if (dev->event [i].type == GADGETFS_SETUP) {
1017 				dev->state = STATE_DEV_SETUP;
1018 				n = i + 1;
1019 				break;
1020 			}
1021 		}
1022 		spin_unlock_irq (&dev->lock);
1023 		len = n * sizeof (struct usb_gadgetfs_event);
1024 		if (copy_to_user (buf, &dev->event, len))
1025 			retval = -EFAULT;
1026 		else
1027 			retval = len;
1028 		if (len > 0) {
1029 			/* NOTE this doesn't guard against broken drivers;
1030 			 * concurrent ep0 readers may lose events.
1031 			 */
1032 			spin_lock_irq (&dev->lock);
1033 			if (dev->ev_next > n) {
1034 				memmove(&dev->event[0], &dev->event[n],
1035 					sizeof (struct usb_gadgetfs_event)
1036 						* (dev->ev_next - n));
1037 			}
1038 			dev->ev_next -= n;
1039 			spin_unlock_irq (&dev->lock);
1040 		}
1041 		return retval;
1042 	}
1043 	if (fd->f_flags & O_NONBLOCK) {
1044 		retval = -EAGAIN;
1045 		goto done;
1046 	}
1047 
1048 	switch (state) {
1049 	default:
1050 		DBG (dev, "fail %s, state %d\n", __func__, state);
1051 		retval = -ESRCH;
1052 		break;
1053 	case STATE_DEV_UNCONNECTED:
1054 	case STATE_DEV_CONNECTED:
1055 		spin_unlock_irq (&dev->lock);
1056 		DBG (dev, "%s wait\n", __func__);
1057 
1058 		/* wait for events */
1059 		retval = wait_event_interruptible (dev->wait,
1060 				dev->ev_next != 0);
1061 		if (retval < 0)
1062 			return retval;
1063 		spin_lock_irq (&dev->lock);
1064 		goto scan;
1065 	}
1066 
1067 done:
1068 	spin_unlock_irq (&dev->lock);
1069 	return retval;
1070 }
1071 
1072 static struct usb_gadgetfs_event *
1073 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1074 {
1075 	struct usb_gadgetfs_event	*event;
1076 	unsigned			i;
1077 
1078 	switch (type) {
1079 	/* these events purge the queue */
1080 	case GADGETFS_DISCONNECT:
1081 		if (dev->state == STATE_DEV_SETUP)
1082 			dev->setup_abort = 1;
1083 		// FALL THROUGH
1084 	case GADGETFS_CONNECT:
1085 		dev->ev_next = 0;
1086 		break;
1087 	case GADGETFS_SETUP:		/* previous request timed out */
1088 	case GADGETFS_SUSPEND:		/* same effect */
1089 		/* these events can't be repeated */
1090 		for (i = 0; i != dev->ev_next; i++) {
1091 			if (dev->event [i].type != type)
1092 				continue;
1093 			DBG(dev, "discard old event[%d] %d\n", i, type);
1094 			dev->ev_next--;
1095 			if (i == dev->ev_next)
1096 				break;
1097 			/* indices start at zero, for simplicity */
1098 			memmove (&dev->event [i], &dev->event [i + 1],
1099 				sizeof (struct usb_gadgetfs_event)
1100 					* (dev->ev_next - i));
1101 		}
1102 		break;
1103 	default:
1104 		BUG ();
1105 	}
1106 	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1107 	event = &dev->event [dev->ev_next++];
1108 	BUG_ON (dev->ev_next > N_EVENT);
1109 	memset (event, 0, sizeof *event);
1110 	event->type = type;
1111 	return event;
1112 }
1113 
1114 static ssize_t
1115 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1116 {
1117 	struct dev_data		*dev = fd->private_data;
1118 	ssize_t			retval = -ESRCH;
1119 
1120 	/* report fd mode change before acting on it */
1121 	if (dev->setup_abort) {
1122 		dev->setup_abort = 0;
1123 		retval = -EIDRM;
1124 
1125 	/* data and/or status stage for control request */
1126 	} else if (dev->state == STATE_DEV_SETUP) {
1127 
1128 		len = min_t(size_t, len, dev->setup_wLength);
1129 		if (dev->setup_in) {
1130 			retval = setup_req (dev->gadget->ep0, dev->req, len);
1131 			if (retval == 0) {
1132 				dev->state = STATE_DEV_CONNECTED;
1133 				spin_unlock_irq (&dev->lock);
1134 				if (copy_from_user (dev->req->buf, buf, len))
1135 					retval = -EFAULT;
1136 				else {
1137 					if (len < dev->setup_wLength)
1138 						dev->req->zero = 1;
1139 					retval = usb_ep_queue (
1140 						dev->gadget->ep0, dev->req,
1141 						GFP_KERNEL);
1142 				}
1143 				spin_lock_irq(&dev->lock);
1144 				if (retval < 0) {
1145 					clean_req (dev->gadget->ep0, dev->req);
1146 				} else
1147 					retval = len;
1148 
1149 				return retval;
1150 			}
1151 
1152 		/* can stall some OUT transfers */
1153 		} else if (dev->setup_can_stall) {
1154 			VDEBUG(dev, "ep0out stall\n");
1155 			(void) usb_ep_set_halt (dev->gadget->ep0);
1156 			retval = -EL2HLT;
1157 			dev->state = STATE_DEV_CONNECTED;
1158 		} else {
1159 			DBG(dev, "bogus ep0out stall!\n");
1160 		}
1161 	} else
1162 		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1163 
1164 	return retval;
1165 }
1166 
1167 static int
1168 ep0_fasync (int f, struct file *fd, int on)
1169 {
1170 	struct dev_data		*dev = fd->private_data;
1171 	// caller must F_SETOWN before signal delivery happens
1172 	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1173 	return fasync_helper (f, fd, on, &dev->fasync);
1174 }
1175 
1176 static struct usb_gadget_driver gadgetfs_driver;
1177 
1178 static int
1179 dev_release (struct inode *inode, struct file *fd)
1180 {
1181 	struct dev_data		*dev = fd->private_data;
1182 
1183 	/* closing ep0 === shutdown all */
1184 
1185 	if (dev->gadget_registered)
1186 		usb_gadget_unregister_driver (&gadgetfs_driver);
1187 
1188 	/* at this point "good" hardware has disconnected the
1189 	 * device from USB; the host won't see it any more.
1190 	 * alternatively, all host requests will time out.
1191 	 */
1192 
1193 	kfree (dev->buf);
1194 	dev->buf = NULL;
1195 
1196 	/* other endpoints were all decoupled from this device */
1197 	spin_lock_irq(&dev->lock);
1198 	dev->state = STATE_DEV_DISABLED;
1199 	spin_unlock_irq(&dev->lock);
1200 
1201 	put_dev (dev);
1202 	return 0;
1203 }
1204 
1205 static unsigned int
1206 ep0_poll (struct file *fd, poll_table *wait)
1207 {
1208        struct dev_data         *dev = fd->private_data;
1209        int                     mask = 0;
1210 
1211 	if (dev->state <= STATE_DEV_OPENED)
1212 		return DEFAULT_POLLMASK;
1213 
1214        poll_wait(fd, &dev->wait, wait);
1215 
1216        spin_lock_irq (&dev->lock);
1217 
1218        /* report fd mode change before acting on it */
1219        if (dev->setup_abort) {
1220                dev->setup_abort = 0;
1221                mask = POLLHUP;
1222                goto out;
1223        }
1224 
1225        if (dev->state == STATE_DEV_SETUP) {
1226                if (dev->setup_in || dev->setup_can_stall)
1227                        mask = POLLOUT;
1228        } else {
1229                if (dev->ev_next != 0)
1230                        mask = POLLIN;
1231        }
1232 out:
1233        spin_unlock_irq(&dev->lock);
1234        return mask;
1235 }
1236 
1237 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1238 {
1239 	struct dev_data		*dev = fd->private_data;
1240 	struct usb_gadget	*gadget = dev->gadget;
1241 	long ret = -ENOTTY;
1242 
1243 	if (gadget->ops->ioctl)
1244 		ret = gadget->ops->ioctl (gadget, code, value);
1245 
1246 	return ret;
1247 }
1248 
1249 /*----------------------------------------------------------------------*/
1250 
1251 /* The in-kernel gadget driver handles most ep0 issues, in particular
1252  * enumerating the single configuration (as provided from user space).
1253  *
1254  * Unrecognized ep0 requests may be handled in user space.
1255  */
1256 
1257 static void make_qualifier (struct dev_data *dev)
1258 {
1259 	struct usb_qualifier_descriptor		qual;
1260 	struct usb_device_descriptor		*desc;
1261 
1262 	qual.bLength = sizeof qual;
1263 	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1264 	qual.bcdUSB = cpu_to_le16 (0x0200);
1265 
1266 	desc = dev->dev;
1267 	qual.bDeviceClass = desc->bDeviceClass;
1268 	qual.bDeviceSubClass = desc->bDeviceSubClass;
1269 	qual.bDeviceProtocol = desc->bDeviceProtocol;
1270 
1271 	/* assumes ep0 uses the same value for both speeds ... */
1272 	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1273 
1274 	qual.bNumConfigurations = 1;
1275 	qual.bRESERVED = 0;
1276 
1277 	memcpy (dev->rbuf, &qual, sizeof qual);
1278 }
1279 
1280 static int
1281 config_buf (struct dev_data *dev, u8 type, unsigned index)
1282 {
1283 	int		len;
1284 	int		hs = 0;
1285 
1286 	/* only one configuration */
1287 	if (index > 0)
1288 		return -EINVAL;
1289 
1290 	if (gadget_is_dualspeed(dev->gadget)) {
1291 		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1292 		if (type == USB_DT_OTHER_SPEED_CONFIG)
1293 			hs = !hs;
1294 	}
1295 	if (hs) {
1296 		dev->req->buf = dev->hs_config;
1297 		len = le16_to_cpu(dev->hs_config->wTotalLength);
1298 	} else {
1299 		dev->req->buf = dev->config;
1300 		len = le16_to_cpu(dev->config->wTotalLength);
1301 	}
1302 	((u8 *)dev->req->buf) [1] = type;
1303 	return len;
1304 }
1305 
1306 static int
1307 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1308 {
1309 	struct dev_data			*dev = get_gadget_data (gadget);
1310 	struct usb_request		*req = dev->req;
1311 	int				value = -EOPNOTSUPP;
1312 	struct usb_gadgetfs_event	*event;
1313 	u16				w_value = le16_to_cpu(ctrl->wValue);
1314 	u16				w_length = le16_to_cpu(ctrl->wLength);
1315 
1316 	spin_lock (&dev->lock);
1317 	dev->setup_abort = 0;
1318 	if (dev->state == STATE_DEV_UNCONNECTED) {
1319 		if (gadget_is_dualspeed(gadget)
1320 				&& gadget->speed == USB_SPEED_HIGH
1321 				&& dev->hs_config == NULL) {
1322 			spin_unlock(&dev->lock);
1323 			ERROR (dev, "no high speed config??\n");
1324 			return -EINVAL;
1325 		}
1326 
1327 		dev->state = STATE_DEV_CONNECTED;
1328 
1329 		INFO (dev, "connected\n");
1330 		event = next_event (dev, GADGETFS_CONNECT);
1331 		event->u.speed = gadget->speed;
1332 		ep0_readable (dev);
1333 
1334 	/* host may have given up waiting for response.  we can miss control
1335 	 * requests handled lower down (device/endpoint status and features);
1336 	 * then ep0_{read,write} will report the wrong status. controller
1337 	 * driver will have aborted pending i/o.
1338 	 */
1339 	} else if (dev->state == STATE_DEV_SETUP)
1340 		dev->setup_abort = 1;
1341 
1342 	req->buf = dev->rbuf;
1343 	req->context = NULL;
1344 	value = -EOPNOTSUPP;
1345 	switch (ctrl->bRequest) {
1346 
1347 	case USB_REQ_GET_DESCRIPTOR:
1348 		if (ctrl->bRequestType != USB_DIR_IN)
1349 			goto unrecognized;
1350 		switch (w_value >> 8) {
1351 
1352 		case USB_DT_DEVICE:
1353 			value = min (w_length, (u16) sizeof *dev->dev);
1354 			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1355 			req->buf = dev->dev;
1356 			break;
1357 		case USB_DT_DEVICE_QUALIFIER:
1358 			if (!dev->hs_config)
1359 				break;
1360 			value = min (w_length, (u16)
1361 				sizeof (struct usb_qualifier_descriptor));
1362 			make_qualifier (dev);
1363 			break;
1364 		case USB_DT_OTHER_SPEED_CONFIG:
1365 			// FALLTHROUGH
1366 		case USB_DT_CONFIG:
1367 			value = config_buf (dev,
1368 					w_value >> 8,
1369 					w_value & 0xff);
1370 			if (value >= 0)
1371 				value = min (w_length, (u16) value);
1372 			break;
1373 		case USB_DT_STRING:
1374 			goto unrecognized;
1375 
1376 		default:		// all others are errors
1377 			break;
1378 		}
1379 		break;
1380 
1381 	/* currently one config, two speeds */
1382 	case USB_REQ_SET_CONFIGURATION:
1383 		if (ctrl->bRequestType != 0)
1384 			goto unrecognized;
1385 		if (0 == (u8) w_value) {
1386 			value = 0;
1387 			dev->current_config = 0;
1388 			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1389 			// user mode expected to disable endpoints
1390 		} else {
1391 			u8	config, power;
1392 
1393 			if (gadget_is_dualspeed(gadget)
1394 					&& gadget->speed == USB_SPEED_HIGH) {
1395 				config = dev->hs_config->bConfigurationValue;
1396 				power = dev->hs_config->bMaxPower;
1397 			} else {
1398 				config = dev->config->bConfigurationValue;
1399 				power = dev->config->bMaxPower;
1400 			}
1401 
1402 			if (config == (u8) w_value) {
1403 				value = 0;
1404 				dev->current_config = config;
1405 				usb_gadget_vbus_draw(gadget, 2 * power);
1406 			}
1407 		}
1408 
1409 		/* report SET_CONFIGURATION like any other control request,
1410 		 * except that usermode may not stall this.  the next
1411 		 * request mustn't be allowed start until this finishes:
1412 		 * endpoints and threads set up, etc.
1413 		 *
1414 		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1415 		 * has bad/racey automagic that prevents synchronizing here.
1416 		 * even kernel mode drivers often miss them.
1417 		 */
1418 		if (value == 0) {
1419 			INFO (dev, "configuration #%d\n", dev->current_config);
1420 			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1421 			if (dev->usermode_setup) {
1422 				dev->setup_can_stall = 0;
1423 				goto delegate;
1424 			}
1425 		}
1426 		break;
1427 
1428 #ifndef	CONFIG_USB_PXA25X
1429 	/* PXA automagically handles this request too */
1430 	case USB_REQ_GET_CONFIGURATION:
1431 		if (ctrl->bRequestType != 0x80)
1432 			goto unrecognized;
1433 		*(u8 *)req->buf = dev->current_config;
1434 		value = min (w_length, (u16) 1);
1435 		break;
1436 #endif
1437 
1438 	default:
1439 unrecognized:
1440 		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1441 			dev->usermode_setup ? "delegate" : "fail",
1442 			ctrl->bRequestType, ctrl->bRequest,
1443 			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1444 
1445 		/* if there's an ep0 reader, don't stall */
1446 		if (dev->usermode_setup) {
1447 			dev->setup_can_stall = 1;
1448 delegate:
1449 			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1450 						? 1 : 0;
1451 			dev->setup_wLength = w_length;
1452 			dev->setup_out_ready = 0;
1453 			dev->setup_out_error = 0;
1454 			value = 0;
1455 
1456 			/* read DATA stage for OUT right away */
1457 			if (unlikely (!dev->setup_in && w_length)) {
1458 				value = setup_req (gadget->ep0, dev->req,
1459 							w_length);
1460 				if (value < 0)
1461 					break;
1462 
1463 				spin_unlock (&dev->lock);
1464 				value = usb_ep_queue (gadget->ep0, dev->req,
1465 							GFP_KERNEL);
1466 				spin_lock (&dev->lock);
1467 				if (value < 0) {
1468 					clean_req (gadget->ep0, dev->req);
1469 					break;
1470 				}
1471 
1472 				/* we can't currently stall these */
1473 				dev->setup_can_stall = 0;
1474 			}
1475 
1476 			/* state changes when reader collects event */
1477 			event = next_event (dev, GADGETFS_SETUP);
1478 			event->u.setup = *ctrl;
1479 			ep0_readable (dev);
1480 			spin_unlock (&dev->lock);
1481 			return 0;
1482 		}
1483 	}
1484 
1485 	/* proceed with data transfer and status phases? */
1486 	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1487 		req->length = value;
1488 		req->zero = value < w_length;
1489 
1490 		spin_unlock (&dev->lock);
1491 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1492 		if (value < 0) {
1493 			DBG (dev, "ep_queue --> %d\n", value);
1494 			req->status = 0;
1495 		}
1496 		return value;
1497 	}
1498 
1499 	/* device stalls when value < 0 */
1500 	spin_unlock (&dev->lock);
1501 	return value;
1502 }
1503 
1504 static void destroy_ep_files (struct dev_data *dev)
1505 {
1506 	DBG (dev, "%s %d\n", __func__, dev->state);
1507 
1508 	/* dev->state must prevent interference */
1509 	spin_lock_irq (&dev->lock);
1510 	while (!list_empty(&dev->epfiles)) {
1511 		struct ep_data	*ep;
1512 		struct inode	*parent;
1513 		struct dentry	*dentry;
1514 
1515 		/* break link to FS */
1516 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1517 		list_del_init (&ep->epfiles);
1518 		dentry = ep->dentry;
1519 		ep->dentry = NULL;
1520 		parent = d_inode(dentry->d_parent);
1521 
1522 		/* break link to controller */
1523 		if (ep->state == STATE_EP_ENABLED)
1524 			(void) usb_ep_disable (ep->ep);
1525 		ep->state = STATE_EP_UNBOUND;
1526 		usb_ep_free_request (ep->ep, ep->req);
1527 		ep->ep = NULL;
1528 		wake_up (&ep->wait);
1529 		put_ep (ep);
1530 
1531 		spin_unlock_irq (&dev->lock);
1532 
1533 		/* break link to dcache */
1534 		inode_lock(parent);
1535 		d_delete (dentry);
1536 		dput (dentry);
1537 		inode_unlock(parent);
1538 
1539 		spin_lock_irq (&dev->lock);
1540 	}
1541 	spin_unlock_irq (&dev->lock);
1542 }
1543 
1544 
1545 static struct dentry *
1546 gadgetfs_create_file (struct super_block *sb, char const *name,
1547 		void *data, const struct file_operations *fops);
1548 
1549 static int activate_ep_files (struct dev_data *dev)
1550 {
1551 	struct usb_ep	*ep;
1552 	struct ep_data	*data;
1553 
1554 	gadget_for_each_ep (ep, dev->gadget) {
1555 
1556 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1557 		if (!data)
1558 			goto enomem0;
1559 		data->state = STATE_EP_DISABLED;
1560 		mutex_init(&data->lock);
1561 		init_waitqueue_head (&data->wait);
1562 
1563 		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1564 		atomic_set (&data->count, 1);
1565 		data->dev = dev;
1566 		get_dev (dev);
1567 
1568 		data->ep = ep;
1569 		ep->driver_data = data;
1570 
1571 		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1572 		if (!data->req)
1573 			goto enomem1;
1574 
1575 		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1576 				data, &ep_io_operations);
1577 		if (!data->dentry)
1578 			goto enomem2;
1579 		list_add_tail (&data->epfiles, &dev->epfiles);
1580 	}
1581 	return 0;
1582 
1583 enomem2:
1584 	usb_ep_free_request (ep, data->req);
1585 enomem1:
1586 	put_dev (dev);
1587 	kfree (data);
1588 enomem0:
1589 	DBG (dev, "%s enomem\n", __func__);
1590 	destroy_ep_files (dev);
1591 	return -ENOMEM;
1592 }
1593 
1594 static void
1595 gadgetfs_unbind (struct usb_gadget *gadget)
1596 {
1597 	struct dev_data		*dev = get_gadget_data (gadget);
1598 
1599 	DBG (dev, "%s\n", __func__);
1600 
1601 	spin_lock_irq (&dev->lock);
1602 	dev->state = STATE_DEV_UNBOUND;
1603 	spin_unlock_irq (&dev->lock);
1604 
1605 	destroy_ep_files (dev);
1606 	gadget->ep0->driver_data = NULL;
1607 	set_gadget_data (gadget, NULL);
1608 
1609 	/* we've already been disconnected ... no i/o is active */
1610 	if (dev->req)
1611 		usb_ep_free_request (gadget->ep0, dev->req);
1612 	DBG (dev, "%s done\n", __func__);
1613 	put_dev (dev);
1614 }
1615 
1616 static struct dev_data		*the_device;
1617 
1618 static int gadgetfs_bind(struct usb_gadget *gadget,
1619 		struct usb_gadget_driver *driver)
1620 {
1621 	struct dev_data		*dev = the_device;
1622 
1623 	if (!dev)
1624 		return -ESRCH;
1625 	if (0 != strcmp (CHIP, gadget->name)) {
1626 		pr_err("%s expected %s controller not %s\n",
1627 			shortname, CHIP, gadget->name);
1628 		return -ENODEV;
1629 	}
1630 
1631 	set_gadget_data (gadget, dev);
1632 	dev->gadget = gadget;
1633 	gadget->ep0->driver_data = dev;
1634 
1635 	/* preallocate control response and buffer */
1636 	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1637 	if (!dev->req)
1638 		goto enomem;
1639 	dev->req->context = NULL;
1640 	dev->req->complete = epio_complete;
1641 
1642 	if (activate_ep_files (dev) < 0)
1643 		goto enomem;
1644 
1645 	INFO (dev, "bound to %s driver\n", gadget->name);
1646 	spin_lock_irq(&dev->lock);
1647 	dev->state = STATE_DEV_UNCONNECTED;
1648 	spin_unlock_irq(&dev->lock);
1649 	get_dev (dev);
1650 	return 0;
1651 
1652 enomem:
1653 	gadgetfs_unbind (gadget);
1654 	return -ENOMEM;
1655 }
1656 
1657 static void
1658 gadgetfs_disconnect (struct usb_gadget *gadget)
1659 {
1660 	struct dev_data		*dev = get_gadget_data (gadget);
1661 	unsigned long		flags;
1662 
1663 	spin_lock_irqsave (&dev->lock, flags);
1664 	if (dev->state == STATE_DEV_UNCONNECTED)
1665 		goto exit;
1666 	dev->state = STATE_DEV_UNCONNECTED;
1667 
1668 	INFO (dev, "disconnected\n");
1669 	next_event (dev, GADGETFS_DISCONNECT);
1670 	ep0_readable (dev);
1671 exit:
1672 	spin_unlock_irqrestore (&dev->lock, flags);
1673 }
1674 
1675 static void
1676 gadgetfs_suspend (struct usb_gadget *gadget)
1677 {
1678 	struct dev_data		*dev = get_gadget_data (gadget);
1679 
1680 	INFO (dev, "suspended from state %d\n", dev->state);
1681 	spin_lock (&dev->lock);
1682 	switch (dev->state) {
1683 	case STATE_DEV_SETUP:		// VERY odd... host died??
1684 	case STATE_DEV_CONNECTED:
1685 	case STATE_DEV_UNCONNECTED:
1686 		next_event (dev, GADGETFS_SUSPEND);
1687 		ep0_readable (dev);
1688 		/* FALLTHROUGH */
1689 	default:
1690 		break;
1691 	}
1692 	spin_unlock (&dev->lock);
1693 }
1694 
1695 static struct usb_gadget_driver gadgetfs_driver = {
1696 	.function	= (char *) driver_desc,
1697 	.bind		= gadgetfs_bind,
1698 	.unbind		= gadgetfs_unbind,
1699 	.setup		= gadgetfs_setup,
1700 	.reset		= gadgetfs_disconnect,
1701 	.disconnect	= gadgetfs_disconnect,
1702 	.suspend	= gadgetfs_suspend,
1703 
1704 	.driver	= {
1705 		.name		= (char *) shortname,
1706 	},
1707 };
1708 
1709 /*----------------------------------------------------------------------*/
1710 /* DEVICE INITIALIZATION
1711  *
1712  *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1713  *     status = write (fd, descriptors, sizeof descriptors)
1714  *
1715  * That write establishes the device configuration, so the kernel can
1716  * bind to the controller ... guaranteeing it can handle enumeration
1717  * at all necessary speeds.  Descriptor order is:
1718  *
1719  * . message tag (u32, host order) ... for now, must be zero; it
1720  *	would change to support features like multi-config devices
1721  * . full/low speed config ... all wTotalLength bytes (with interface,
1722  *	class, altsetting, endpoint, and other descriptors)
1723  * . high speed config ... all descriptors, for high speed operation;
1724  *	this one's optional except for high-speed hardware
1725  * . device descriptor
1726  *
1727  * Endpoints are not yet enabled. Drivers must wait until device
1728  * configuration and interface altsetting changes create
1729  * the need to configure (or unconfigure) them.
1730  *
1731  * After initialization, the device stays active for as long as that
1732  * $CHIP file is open.  Events must then be read from that descriptor,
1733  * such as configuration notifications.
1734  */
1735 
1736 static int is_valid_config(struct usb_config_descriptor *config,
1737 		unsigned int total)
1738 {
1739 	return config->bDescriptorType == USB_DT_CONFIG
1740 		&& config->bLength == USB_DT_CONFIG_SIZE
1741 		&& total >= USB_DT_CONFIG_SIZE
1742 		&& config->bConfigurationValue != 0
1743 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1744 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1745 	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1746 	/* FIXME check lengths: walk to end */
1747 }
1748 
1749 static ssize_t
1750 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1751 {
1752 	struct dev_data		*dev = fd->private_data;
1753 	ssize_t			value = len, length = len;
1754 	unsigned		total;
1755 	u32			tag;
1756 	char			*kbuf;
1757 
1758 	spin_lock_irq(&dev->lock);
1759 	if (dev->state > STATE_DEV_OPENED) {
1760 		value = ep0_write(fd, buf, len, ptr);
1761 		spin_unlock_irq(&dev->lock);
1762 		return value;
1763 	}
1764 	spin_unlock_irq(&dev->lock);
1765 
1766 	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1767 	    (len > PAGE_SIZE * 4))
1768 		return -EINVAL;
1769 
1770 	/* we might need to change message format someday */
1771 	if (copy_from_user (&tag, buf, 4))
1772 		return -EFAULT;
1773 	if (tag != 0)
1774 		return -EINVAL;
1775 	buf += 4;
1776 	length -= 4;
1777 
1778 	kbuf = memdup_user(buf, length);
1779 	if (IS_ERR(kbuf))
1780 		return PTR_ERR(kbuf);
1781 
1782 	spin_lock_irq (&dev->lock);
1783 	value = -EINVAL;
1784 	if (dev->buf) {
1785 		kfree(kbuf);
1786 		goto fail;
1787 	}
1788 	dev->buf = kbuf;
1789 
1790 	/* full or low speed config */
1791 	dev->config = (void *) kbuf;
1792 	total = le16_to_cpu(dev->config->wTotalLength);
1793 	if (!is_valid_config(dev->config, total) ||
1794 			total > length - USB_DT_DEVICE_SIZE)
1795 		goto fail;
1796 	kbuf += total;
1797 	length -= total;
1798 
1799 	/* optional high speed config */
1800 	if (kbuf [1] == USB_DT_CONFIG) {
1801 		dev->hs_config = (void *) kbuf;
1802 		total = le16_to_cpu(dev->hs_config->wTotalLength);
1803 		if (!is_valid_config(dev->hs_config, total) ||
1804 				total > length - USB_DT_DEVICE_SIZE)
1805 			goto fail;
1806 		kbuf += total;
1807 		length -= total;
1808 	} else {
1809 		dev->hs_config = NULL;
1810 	}
1811 
1812 	/* could support multiple configs, using another encoding! */
1813 
1814 	/* device descriptor (tweaked for paranoia) */
1815 	if (length != USB_DT_DEVICE_SIZE)
1816 		goto fail;
1817 	dev->dev = (void *)kbuf;
1818 	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1819 			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1820 			|| dev->dev->bNumConfigurations != 1)
1821 		goto fail;
1822 	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1823 
1824 	/* triggers gadgetfs_bind(); then we can enumerate. */
1825 	spin_unlock_irq (&dev->lock);
1826 	if (dev->hs_config)
1827 		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1828 	else
1829 		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1830 
1831 	value = usb_gadget_probe_driver(&gadgetfs_driver);
1832 	if (value != 0) {
1833 		kfree (dev->buf);
1834 		dev->buf = NULL;
1835 	} else {
1836 		/* at this point "good" hardware has for the first time
1837 		 * let the USB the host see us.  alternatively, if users
1838 		 * unplug/replug that will clear all the error state.
1839 		 *
1840 		 * note:  everything running before here was guaranteed
1841 		 * to choke driver model style diagnostics.  from here
1842 		 * on, they can work ... except in cleanup paths that
1843 		 * kick in after the ep0 descriptor is closed.
1844 		 */
1845 		value = len;
1846 		dev->gadget_registered = true;
1847 	}
1848 	return value;
1849 
1850 fail:
1851 	spin_unlock_irq (&dev->lock);
1852 	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1853 	kfree (dev->buf);
1854 	dev->buf = NULL;
1855 	return value;
1856 }
1857 
1858 static int
1859 dev_open (struct inode *inode, struct file *fd)
1860 {
1861 	struct dev_data		*dev = inode->i_private;
1862 	int			value = -EBUSY;
1863 
1864 	spin_lock_irq(&dev->lock);
1865 	if (dev->state == STATE_DEV_DISABLED) {
1866 		dev->ev_next = 0;
1867 		dev->state = STATE_DEV_OPENED;
1868 		fd->private_data = dev;
1869 		get_dev (dev);
1870 		value = 0;
1871 	}
1872 	spin_unlock_irq(&dev->lock);
1873 	return value;
1874 }
1875 
1876 static const struct file_operations ep0_operations = {
1877 	.llseek =	no_llseek,
1878 
1879 	.open =		dev_open,
1880 	.read =		ep0_read,
1881 	.write =	dev_config,
1882 	.fasync =	ep0_fasync,
1883 	.poll =		ep0_poll,
1884 	.unlocked_ioctl = dev_ioctl,
1885 	.release =	dev_release,
1886 };
1887 
1888 /*----------------------------------------------------------------------*/
1889 
1890 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1891  *
1892  * Mounting the filesystem creates a controller file, used first for
1893  * device configuration then later for event monitoring.
1894  */
1895 
1896 
1897 /* FIXME PAM etc could set this security policy without mount options
1898  * if epfiles inherited ownership and permissons from ep0 ...
1899  */
1900 
1901 static unsigned default_uid;
1902 static unsigned default_gid;
1903 static unsigned default_perm = S_IRUSR | S_IWUSR;
1904 
1905 module_param (default_uid, uint, 0644);
1906 module_param (default_gid, uint, 0644);
1907 module_param (default_perm, uint, 0644);
1908 
1909 
1910 static struct inode *
1911 gadgetfs_make_inode (struct super_block *sb,
1912 		void *data, const struct file_operations *fops,
1913 		int mode)
1914 {
1915 	struct inode *inode = new_inode (sb);
1916 
1917 	if (inode) {
1918 		inode->i_ino = get_next_ino();
1919 		inode->i_mode = mode;
1920 		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1921 		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1922 		inode->i_atime = inode->i_mtime = inode->i_ctime
1923 				= current_time(inode);
1924 		inode->i_private = data;
1925 		inode->i_fop = fops;
1926 	}
1927 	return inode;
1928 }
1929 
1930 /* creates in fs root directory, so non-renamable and non-linkable.
1931  * so inode and dentry are paired, until device reconfig.
1932  */
1933 static struct dentry *
1934 gadgetfs_create_file (struct super_block *sb, char const *name,
1935 		void *data, const struct file_operations *fops)
1936 {
1937 	struct dentry	*dentry;
1938 	struct inode	*inode;
1939 
1940 	dentry = d_alloc_name(sb->s_root, name);
1941 	if (!dentry)
1942 		return NULL;
1943 
1944 	inode = gadgetfs_make_inode (sb, data, fops,
1945 			S_IFREG | (default_perm & S_IRWXUGO));
1946 	if (!inode) {
1947 		dput(dentry);
1948 		return NULL;
1949 	}
1950 	d_add (dentry, inode);
1951 	return dentry;
1952 }
1953 
1954 static const struct super_operations gadget_fs_operations = {
1955 	.statfs =	simple_statfs,
1956 	.drop_inode =	generic_delete_inode,
1957 };
1958 
1959 static int
1960 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1961 {
1962 	struct inode	*inode;
1963 	struct dev_data	*dev;
1964 
1965 	if (the_device)
1966 		return -ESRCH;
1967 
1968 	CHIP = usb_get_gadget_udc_name();
1969 	if (!CHIP)
1970 		return -ENODEV;
1971 
1972 	/* superblock */
1973 	sb->s_blocksize = PAGE_SIZE;
1974 	sb->s_blocksize_bits = PAGE_SHIFT;
1975 	sb->s_magic = GADGETFS_MAGIC;
1976 	sb->s_op = &gadget_fs_operations;
1977 	sb->s_time_gran = 1;
1978 
1979 	/* root inode */
1980 	inode = gadgetfs_make_inode (sb,
1981 			NULL, &simple_dir_operations,
1982 			S_IFDIR | S_IRUGO | S_IXUGO);
1983 	if (!inode)
1984 		goto Enomem;
1985 	inode->i_op = &simple_dir_inode_operations;
1986 	if (!(sb->s_root = d_make_root (inode)))
1987 		goto Enomem;
1988 
1989 	/* the ep0 file is named after the controller we expect;
1990 	 * user mode code can use it for sanity checks, like we do.
1991 	 */
1992 	dev = dev_new ();
1993 	if (!dev)
1994 		goto Enomem;
1995 
1996 	dev->sb = sb;
1997 	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
1998 	if (!dev->dentry) {
1999 		put_dev(dev);
2000 		goto Enomem;
2001 	}
2002 
2003 	/* other endpoint files are available after hardware setup,
2004 	 * from binding to a controller.
2005 	 */
2006 	the_device = dev;
2007 	return 0;
2008 
2009 Enomem:
2010 	return -ENOMEM;
2011 }
2012 
2013 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2014 static struct dentry *
2015 gadgetfs_mount (struct file_system_type *t, int flags,
2016 		const char *path, void *opts)
2017 {
2018 	return mount_single (t, flags, opts, gadgetfs_fill_super);
2019 }
2020 
2021 static void
2022 gadgetfs_kill_sb (struct super_block *sb)
2023 {
2024 	kill_litter_super (sb);
2025 	if (the_device) {
2026 		put_dev (the_device);
2027 		the_device = NULL;
2028 	}
2029 	kfree(CHIP);
2030 	CHIP = NULL;
2031 }
2032 
2033 /*----------------------------------------------------------------------*/
2034 
2035 static struct file_system_type gadgetfs_type = {
2036 	.owner		= THIS_MODULE,
2037 	.name		= shortname,
2038 	.mount		= gadgetfs_mount,
2039 	.kill_sb	= gadgetfs_kill_sb,
2040 };
2041 MODULE_ALIAS_FS("gadgetfs");
2042 
2043 /*----------------------------------------------------------------------*/
2044 
2045 static int __init init (void)
2046 {
2047 	int status;
2048 
2049 	status = register_filesystem (&gadgetfs_type);
2050 	if (status == 0)
2051 		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2052 			shortname, driver_desc);
2053 	return status;
2054 }
2055 module_init (init);
2056 
2057 static void __exit cleanup (void)
2058 {
2059 	pr_debug ("unregister %s\n", shortname);
2060 	unregister_filesystem (&gadgetfs_type);
2061 }
2062 module_exit (cleanup);
2063 
2064