1 /*
2  * Handles the Intel 27x USB Device Controller (UDC)
3  *
4  * Inspired by original driver by Frank Becker, David Brownell, and others.
5  * Copyright (C) 2008 Robert Jarzmik
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
22 #include <linux/clk.h>
23 #include <linux/irq.h>
24 #include <linux/gpio.h>
25 #include <linux/gpio/consumer.h>
26 #include <linux/slab.h>
27 #include <linux/prefetch.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/platform_data/pxa2xx_udc.h>
30 #include <linux/of_device.h>
31 #include <linux/of_gpio.h>
32 
33 #include <linux/usb.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 
37 #include "pxa27x_udc.h"
38 
39 /*
40  * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
41  * series processors.
42  *
43  * Such controller drivers work with a gadget driver.  The gadget driver
44  * returns descriptors, implements configuration and data protocols used
45  * by the host to interact with this device, and allocates endpoints to
46  * the different protocol interfaces.  The controller driver virtualizes
47  * usb hardware so that the gadget drivers will be more portable.
48  *
49  * This UDC hardware wants to implement a bit too much USB protocol. The
50  * biggest issues are:  that the endpoints have to be set up before the
51  * controller can be enabled (minor, and not uncommon); and each endpoint
52  * can only have one configuration, interface and alternative interface
53  * number (major, and very unusual). Once set up, these cannot be changed
54  * without a controller reset.
55  *
56  * The workaround is to setup all combinations necessary for the gadgets which
57  * will work with this driver. This is done in pxa_udc structure, statically.
58  * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
59  * (You could modify this if needed.  Some drivers have a "fifo_mode" module
60  * parameter to facilitate such changes.)
61  *
62  * The combinations have been tested with these gadgets :
63  *  - zero gadget
64  *  - file storage gadget
65  *  - ether gadget
66  *
67  * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
68  * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
69  *
70  * All the requests are handled the same way :
71  *  - the drivers tries to handle the request directly to the IO
72  *  - if the IO fifo is not big enough, the remaining is send/received in
73  *    interrupt handling.
74  */
75 
76 #define	DRIVER_VERSION	"2008-04-18"
77 #define	DRIVER_DESC	"PXA 27x USB Device Controller driver"
78 
79 static const char driver_name[] = "pxa27x_udc";
80 static struct pxa_udc *the_controller;
81 
82 static void handle_ep(struct pxa_ep *ep);
83 
84 /*
85  * Debug filesystem
86  */
87 #ifdef CONFIG_USB_GADGET_DEBUG_FS
88 
89 #include <linux/debugfs.h>
90 #include <linux/uaccess.h>
91 #include <linux/seq_file.h>
92 
93 static int state_dbg_show(struct seq_file *s, void *p)
94 {
95 	struct pxa_udc *udc = s->private;
96 	int pos = 0, ret;
97 	u32 tmp;
98 
99 	ret = -ENODEV;
100 	if (!udc->driver)
101 		goto out;
102 
103 	/* basic device status */
104 	pos += seq_printf(s, DRIVER_DESC "\n"
105 			 "%s version: %s\nGadget driver: %s\n",
106 			 driver_name, DRIVER_VERSION,
107 			 udc->driver ? udc->driver->driver.name : "(none)");
108 
109 	tmp = udc_readl(udc, UDCCR);
110 	pos += seq_printf(s,
111 			 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), "
112 			 "con=%d,inter=%d,altinter=%d\n", tmp,
113 			 (tmp & UDCCR_OEN) ? " oen":"",
114 			 (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
115 			 (tmp & UDCCR_AHNP) ? " rem" : "",
116 			 (tmp & UDCCR_BHNP) ? " rstir" : "",
117 			 (tmp & UDCCR_DWRE) ? " dwre" : "",
118 			 (tmp & UDCCR_SMAC) ? " smac" : "",
119 			 (tmp & UDCCR_EMCE) ? " emce" : "",
120 			 (tmp & UDCCR_UDR) ? " udr" : "",
121 			 (tmp & UDCCR_UDA) ? " uda" : "",
122 			 (tmp & UDCCR_UDE) ? " ude" : "",
123 			 (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
124 			 (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
125 			 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
126 	/* registers for device and ep0 */
127 	pos += seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
128 			udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
129 	pos += seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
130 			udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
131 	pos += seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
132 	pos += seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, "
133 			"reconfig=%lu\n",
134 			udc->stats.irqs_reset, udc->stats.irqs_suspend,
135 			udc->stats.irqs_resume, udc->stats.irqs_reconfig);
136 
137 	ret = 0;
138 out:
139 	return ret;
140 }
141 
142 static int queues_dbg_show(struct seq_file *s, void *p)
143 {
144 	struct pxa_udc *udc = s->private;
145 	struct pxa_ep *ep;
146 	struct pxa27x_request *req;
147 	int pos = 0, i, maxpkt, ret;
148 
149 	ret = -ENODEV;
150 	if (!udc->driver)
151 		goto out;
152 
153 	/* dump endpoint queues */
154 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
155 		ep = &udc->pxa_ep[i];
156 		maxpkt = ep->fifo_size;
157 		pos += seq_printf(s,  "%-12s max_pkt=%d %s\n",
158 				EPNAME(ep), maxpkt, "pio");
159 
160 		if (list_empty(&ep->queue)) {
161 			pos += seq_printf(s, "\t(nothing queued)\n");
162 			continue;
163 		}
164 
165 		list_for_each_entry(req, &ep->queue, queue) {
166 			pos += seq_printf(s,  "\treq %p len %d/%d buf %p\n",
167 					&req->req, req->req.actual,
168 					req->req.length, req->req.buf);
169 		}
170 	}
171 
172 	ret = 0;
173 out:
174 	return ret;
175 }
176 
177 static int eps_dbg_show(struct seq_file *s, void *p)
178 {
179 	struct pxa_udc *udc = s->private;
180 	struct pxa_ep *ep;
181 	int pos = 0, i, ret;
182 	u32 tmp;
183 
184 	ret = -ENODEV;
185 	if (!udc->driver)
186 		goto out;
187 
188 	ep = &udc->pxa_ep[0];
189 	tmp = udc_ep_readl(ep, UDCCSR);
190 	pos += seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", tmp,
191 			 (tmp & UDCCSR0_SA) ? " sa" : "",
192 			 (tmp & UDCCSR0_RNE) ? " rne" : "",
193 			 (tmp & UDCCSR0_FST) ? " fst" : "",
194 			 (tmp & UDCCSR0_SST) ? " sst" : "",
195 			 (tmp & UDCCSR0_DME) ? " dme" : "",
196 			 (tmp & UDCCSR0_IPR) ? " ipr" : "",
197 			 (tmp & UDCCSR0_OPC) ? " opc" : "");
198 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
199 		ep = &udc->pxa_ep[i];
200 		tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
201 		pos += seq_printf(s, "%-12s: "
202 				"IN %lu(%lu reqs), OUT %lu(%lu reqs), "
203 				"irqs=%lu, udccr=0x%08x, udccsr=0x%03x, "
204 				"udcbcr=%d\n",
205 				EPNAME(ep),
206 				ep->stats.in_bytes, ep->stats.in_ops,
207 				ep->stats.out_bytes, ep->stats.out_ops,
208 				ep->stats.irqs,
209 				tmp, udc_ep_readl(ep, UDCCSR),
210 				udc_ep_readl(ep, UDCBCR));
211 	}
212 
213 	ret = 0;
214 out:
215 	return ret;
216 }
217 
218 static int eps_dbg_open(struct inode *inode, struct file *file)
219 {
220 	return single_open(file, eps_dbg_show, inode->i_private);
221 }
222 
223 static int queues_dbg_open(struct inode *inode, struct file *file)
224 {
225 	return single_open(file, queues_dbg_show, inode->i_private);
226 }
227 
228 static int state_dbg_open(struct inode *inode, struct file *file)
229 {
230 	return single_open(file, state_dbg_show, inode->i_private);
231 }
232 
233 static const struct file_operations state_dbg_fops = {
234 	.owner		= THIS_MODULE,
235 	.open		= state_dbg_open,
236 	.llseek		= seq_lseek,
237 	.read		= seq_read,
238 	.release	= single_release,
239 };
240 
241 static const struct file_operations queues_dbg_fops = {
242 	.owner		= THIS_MODULE,
243 	.open		= queues_dbg_open,
244 	.llseek		= seq_lseek,
245 	.read		= seq_read,
246 	.release	= single_release,
247 };
248 
249 static const struct file_operations eps_dbg_fops = {
250 	.owner		= THIS_MODULE,
251 	.open		= eps_dbg_open,
252 	.llseek		= seq_lseek,
253 	.read		= seq_read,
254 	.release	= single_release,
255 };
256 
257 static void pxa_init_debugfs(struct pxa_udc *udc)
258 {
259 	struct dentry *root, *state, *queues, *eps;
260 
261 	root = debugfs_create_dir(udc->gadget.name, NULL);
262 	if (IS_ERR(root) || !root)
263 		goto err_root;
264 
265 	state = debugfs_create_file("udcstate", 0400, root, udc,
266 			&state_dbg_fops);
267 	if (!state)
268 		goto err_state;
269 	queues = debugfs_create_file("queues", 0400, root, udc,
270 			&queues_dbg_fops);
271 	if (!queues)
272 		goto err_queues;
273 	eps = debugfs_create_file("epstate", 0400, root, udc,
274 			&eps_dbg_fops);
275 	if (!eps)
276 		goto err_eps;
277 
278 	udc->debugfs_root = root;
279 	udc->debugfs_state = state;
280 	udc->debugfs_queues = queues;
281 	udc->debugfs_eps = eps;
282 	return;
283 err_eps:
284 	debugfs_remove(eps);
285 err_queues:
286 	debugfs_remove(queues);
287 err_state:
288 	debugfs_remove(root);
289 err_root:
290 	dev_err(udc->dev, "debugfs is not available\n");
291 }
292 
293 static void pxa_cleanup_debugfs(struct pxa_udc *udc)
294 {
295 	debugfs_remove(udc->debugfs_eps);
296 	debugfs_remove(udc->debugfs_queues);
297 	debugfs_remove(udc->debugfs_state);
298 	debugfs_remove(udc->debugfs_root);
299 	udc->debugfs_eps = NULL;
300 	udc->debugfs_queues = NULL;
301 	udc->debugfs_state = NULL;
302 	udc->debugfs_root = NULL;
303 }
304 
305 #else
306 static inline void pxa_init_debugfs(struct pxa_udc *udc)
307 {
308 }
309 
310 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
311 {
312 }
313 #endif
314 
315 /**
316  * is_match_usb_pxa - check if usb_ep and pxa_ep match
317  * @udc_usb_ep: usb endpoint
318  * @ep: pxa endpoint
319  * @config: configuration required in pxa_ep
320  * @interface: interface required in pxa_ep
321  * @altsetting: altsetting required in pxa_ep
322  *
323  * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
324  */
325 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
326 		int config, int interface, int altsetting)
327 {
328 	if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
329 		return 0;
330 	if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
331 		return 0;
332 	if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
333 		return 0;
334 	if ((ep->config != config) || (ep->interface != interface)
335 			|| (ep->alternate != altsetting))
336 		return 0;
337 	return 1;
338 }
339 
340 /**
341  * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
342  * @udc: pxa udc
343  * @udc_usb_ep: udc_usb_ep structure
344  *
345  * Match udc_usb_ep and all pxa_ep available, to see if one matches.
346  * This is necessary because of the strong pxa hardware restriction requiring
347  * that once pxa endpoints are initialized, their configuration is freezed, and
348  * no change can be made to their address, direction, or in which configuration,
349  * interface or altsetting they are active ... which differs from more usual
350  * models which have endpoints be roughly just addressable fifos, and leave
351  * configuration events up to gadget drivers (like all control messages).
352  *
353  * Note that there is still a blurred point here :
354  *   - we rely on UDCCR register "active interface" and "active altsetting".
355  *     This is a nonsense in regard of USB spec, where multiple interfaces are
356  *     active at the same time.
357  *   - if we knew for sure that the pxa can handle multiple interface at the
358  *     same time, assuming Intel's Developer Guide is wrong, this function
359  *     should be reviewed, and a cache of couples (iface, altsetting) should
360  *     be kept in the pxa_udc structure. In this case this function would match
361  *     against the cache of couples instead of the "last altsetting" set up.
362  *
363  * Returns the matched pxa_ep structure or NULL if none found
364  */
365 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
366 		struct udc_usb_ep *udc_usb_ep)
367 {
368 	int i;
369 	struct pxa_ep *ep;
370 	int cfg = udc->config;
371 	int iface = udc->last_interface;
372 	int alt = udc->last_alternate;
373 
374 	if (udc_usb_ep == &udc->udc_usb_ep[0])
375 		return &udc->pxa_ep[0];
376 
377 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
378 		ep = &udc->pxa_ep[i];
379 		if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
380 			return ep;
381 	}
382 	return NULL;
383 }
384 
385 /**
386  * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
387  * @udc: pxa udc
388  *
389  * Context: in_interrupt()
390  *
391  * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
392  * previously set up (and is not NULL). The update is necessary is a
393  * configuration change or altsetting change was issued by the USB host.
394  */
395 static void update_pxa_ep_matches(struct pxa_udc *udc)
396 {
397 	int i;
398 	struct udc_usb_ep *udc_usb_ep;
399 
400 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
401 		udc_usb_ep = &udc->udc_usb_ep[i];
402 		if (udc_usb_ep->pxa_ep)
403 			udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
404 	}
405 }
406 
407 /**
408  * pio_irq_enable - Enables irq generation for one endpoint
409  * @ep: udc endpoint
410  */
411 static void pio_irq_enable(struct pxa_ep *ep)
412 {
413 	struct pxa_udc *udc = ep->dev;
414 	int index = EPIDX(ep);
415 	u32 udcicr0 = udc_readl(udc, UDCICR0);
416 	u32 udcicr1 = udc_readl(udc, UDCICR1);
417 
418 	if (index < 16)
419 		udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
420 	else
421 		udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
422 }
423 
424 /**
425  * pio_irq_disable - Disables irq generation for one endpoint
426  * @ep: udc endpoint
427  */
428 static void pio_irq_disable(struct pxa_ep *ep)
429 {
430 	struct pxa_udc *udc = ep->dev;
431 	int index = EPIDX(ep);
432 	u32 udcicr0 = udc_readl(udc, UDCICR0);
433 	u32 udcicr1 = udc_readl(udc, UDCICR1);
434 
435 	if (index < 16)
436 		udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
437 	else
438 		udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
439 }
440 
441 /**
442  * udc_set_mask_UDCCR - set bits in UDCCR
443  * @udc: udc device
444  * @mask: bits to set in UDCCR
445  *
446  * Sets bits in UDCCR, leaving DME and FST bits as they were.
447  */
448 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
449 {
450 	u32 udccr = udc_readl(udc, UDCCR);
451 	udc_writel(udc, UDCCR,
452 			(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
453 }
454 
455 /**
456  * udc_clear_mask_UDCCR - clears bits in UDCCR
457  * @udc: udc device
458  * @mask: bit to clear in UDCCR
459  *
460  * Clears bits in UDCCR, leaving DME and FST bits as they were.
461  */
462 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
463 {
464 	u32 udccr = udc_readl(udc, UDCCR);
465 	udc_writel(udc, UDCCR,
466 			(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
467 }
468 
469 /**
470  * ep_write_UDCCSR - set bits in UDCCSR
471  * @udc: udc device
472  * @mask: bits to set in UDCCR
473  *
474  * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
475  *
476  * A specific case is applied to ep0 : the ACM bit is always set to 1, for
477  * SET_INTERFACE and SET_CONFIGURATION.
478  */
479 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
480 {
481 	if (is_ep0(ep))
482 		mask |= UDCCSR0_ACM;
483 	udc_ep_writel(ep, UDCCSR, mask);
484 }
485 
486 /**
487  * ep_count_bytes_remain - get how many bytes in udc endpoint
488  * @ep: udc endpoint
489  *
490  * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
491  */
492 static int ep_count_bytes_remain(struct pxa_ep *ep)
493 {
494 	if (ep->dir_in)
495 		return -EOPNOTSUPP;
496 	return udc_ep_readl(ep, UDCBCR) & 0x3ff;
497 }
498 
499 /**
500  * ep_is_empty - checks if ep has byte ready for reading
501  * @ep: udc endpoint
502  *
503  * If endpoint is the control endpoint, checks if there are bytes in the
504  * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
505  * are ready for reading on OUT endpoint.
506  *
507  * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
508  */
509 static int ep_is_empty(struct pxa_ep *ep)
510 {
511 	int ret;
512 
513 	if (!is_ep0(ep) && ep->dir_in)
514 		return -EOPNOTSUPP;
515 	if (is_ep0(ep))
516 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
517 	else
518 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
519 	return ret;
520 }
521 
522 /**
523  * ep_is_full - checks if ep has place to write bytes
524  * @ep: udc endpoint
525  *
526  * If endpoint is not the control endpoint and is an IN endpoint, checks if
527  * there is place to write bytes into the endpoint.
528  *
529  * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
530  */
531 static int ep_is_full(struct pxa_ep *ep)
532 {
533 	if (is_ep0(ep))
534 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
535 	if (!ep->dir_in)
536 		return -EOPNOTSUPP;
537 	return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
538 }
539 
540 /**
541  * epout_has_pkt - checks if OUT endpoint fifo has a packet available
542  * @ep: pxa endpoint
543  *
544  * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
545  */
546 static int epout_has_pkt(struct pxa_ep *ep)
547 {
548 	if (!is_ep0(ep) && ep->dir_in)
549 		return -EOPNOTSUPP;
550 	if (is_ep0(ep))
551 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
552 	return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
553 }
554 
555 /**
556  * set_ep0state - Set ep0 automata state
557  * @dev: udc device
558  * @state: state
559  */
560 static void set_ep0state(struct pxa_udc *udc, int state)
561 {
562 	struct pxa_ep *ep = &udc->pxa_ep[0];
563 	char *old_stname = EP0_STNAME(udc);
564 
565 	udc->ep0state = state;
566 	ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
567 		EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
568 		udc_ep_readl(ep, UDCBCR));
569 }
570 
571 /**
572  * ep0_idle - Put control endpoint into idle state
573  * @dev: udc device
574  */
575 static void ep0_idle(struct pxa_udc *dev)
576 {
577 	set_ep0state(dev, WAIT_FOR_SETUP);
578 }
579 
580 /**
581  * inc_ep_stats_reqs - Update ep stats counts
582  * @ep: physical endpoint
583  * @req: usb request
584  * @is_in: ep direction (USB_DIR_IN or 0)
585  *
586  */
587 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
588 {
589 	if (is_in)
590 		ep->stats.in_ops++;
591 	else
592 		ep->stats.out_ops++;
593 }
594 
595 /**
596  * inc_ep_stats_bytes - Update ep stats counts
597  * @ep: physical endpoint
598  * @count: bytes transferred on endpoint
599  * @is_in: ep direction (USB_DIR_IN or 0)
600  */
601 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
602 {
603 	if (is_in)
604 		ep->stats.in_bytes += count;
605 	else
606 		ep->stats.out_bytes += count;
607 }
608 
609 /**
610  * pxa_ep_setup - Sets up an usb physical endpoint
611  * @ep: pxa27x physical endpoint
612  *
613  * Find the physical pxa27x ep, and setup its UDCCR
614  */
615 static void pxa_ep_setup(struct pxa_ep *ep)
616 {
617 	u32 new_udccr;
618 
619 	new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
620 		| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
621 		| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
622 		| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
623 		| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
624 		| ((ep->dir_in) ? UDCCONR_ED : 0)
625 		| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
626 		| UDCCONR_EE;
627 
628 	udc_ep_writel(ep, UDCCR, new_udccr);
629 }
630 
631 /**
632  * pxa_eps_setup - Sets up all usb physical endpoints
633  * @dev: udc device
634  *
635  * Setup all pxa physical endpoints, except ep0
636  */
637 static void pxa_eps_setup(struct pxa_udc *dev)
638 {
639 	unsigned int i;
640 
641 	dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
642 
643 	for (i = 1; i < NR_PXA_ENDPOINTS; i++)
644 		pxa_ep_setup(&dev->pxa_ep[i]);
645 }
646 
647 /**
648  * pxa_ep_alloc_request - Allocate usb request
649  * @_ep: usb endpoint
650  * @gfp_flags:
651  *
652  * For the pxa27x, these can just wrap kmalloc/kfree.  gadget drivers
653  * must still pass correctly initialized endpoints, since other controller
654  * drivers may care about how it's currently set up (dma issues etc).
655   */
656 static struct usb_request *
657 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
658 {
659 	struct pxa27x_request *req;
660 
661 	req = kzalloc(sizeof *req, gfp_flags);
662 	if (!req)
663 		return NULL;
664 
665 	INIT_LIST_HEAD(&req->queue);
666 	req->in_use = 0;
667 	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
668 
669 	return &req->req;
670 }
671 
672 /**
673  * pxa_ep_free_request - Free usb request
674  * @_ep: usb endpoint
675  * @_req: usb request
676  *
677  * Wrapper around kfree to free _req
678  */
679 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
680 {
681 	struct pxa27x_request *req;
682 
683 	req = container_of(_req, struct pxa27x_request, req);
684 	WARN_ON(!list_empty(&req->queue));
685 	kfree(req);
686 }
687 
688 /**
689  * ep_add_request - add a request to the endpoint's queue
690  * @ep: usb endpoint
691  * @req: usb request
692  *
693  * Context: ep->lock held
694  *
695  * Queues the request in the endpoint's queue, and enables the interrupts
696  * on the endpoint.
697  */
698 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
699 {
700 	if (unlikely(!req))
701 		return;
702 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
703 		req->req.length, udc_ep_readl(ep, UDCCSR));
704 
705 	req->in_use = 1;
706 	list_add_tail(&req->queue, &ep->queue);
707 	pio_irq_enable(ep);
708 }
709 
710 /**
711  * ep_del_request - removes a request from the endpoint's queue
712  * @ep: usb endpoint
713  * @req: usb request
714  *
715  * Context: ep->lock held
716  *
717  * Unqueue the request from the endpoint's queue. If there are no more requests
718  * on the endpoint, and if it's not the control endpoint, interrupts are
719  * disabled on the endpoint.
720  */
721 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
722 {
723 	if (unlikely(!req))
724 		return;
725 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
726 		req->req.length, udc_ep_readl(ep, UDCCSR));
727 
728 	list_del_init(&req->queue);
729 	req->in_use = 0;
730 	if (!is_ep0(ep) && list_empty(&ep->queue))
731 		pio_irq_disable(ep);
732 }
733 
734 /**
735  * req_done - Complete an usb request
736  * @ep: pxa physical endpoint
737  * @req: pxa request
738  * @status: usb request status sent to gadget API
739  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
740  *
741  * Context: ep->lock held if flags not NULL, else ep->lock released
742  *
743  * Retire a pxa27x usb request. Endpoint must be locked.
744  */
745 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
746 	unsigned long *pflags)
747 {
748 	unsigned long	flags;
749 
750 	ep_del_request(ep, req);
751 	if (likely(req->req.status == -EINPROGRESS))
752 		req->req.status = status;
753 	else
754 		status = req->req.status;
755 
756 	if (status && status != -ESHUTDOWN)
757 		ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
758 			&req->req, status,
759 			req->req.actual, req->req.length);
760 
761 	if (pflags)
762 		spin_unlock_irqrestore(&ep->lock, *pflags);
763 	local_irq_save(flags);
764 	usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
765 	local_irq_restore(flags);
766 	if (pflags)
767 		spin_lock_irqsave(&ep->lock, *pflags);
768 }
769 
770 /**
771  * ep_end_out_req - Ends endpoint OUT request
772  * @ep: physical endpoint
773  * @req: pxa request
774  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
775  *
776  * Context: ep->lock held or released (see req_done())
777  *
778  * Ends endpoint OUT request (completes usb request).
779  */
780 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
781 	unsigned long *pflags)
782 {
783 	inc_ep_stats_reqs(ep, !USB_DIR_IN);
784 	req_done(ep, req, 0, pflags);
785 }
786 
787 /**
788  * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
789  * @ep: physical endpoint
790  * @req: pxa request
791  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
792  *
793  * Context: ep->lock held or released (see req_done())
794  *
795  * Ends control endpoint OUT request (completes usb request), and puts
796  * control endpoint into idle state
797  */
798 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
799 	unsigned long *pflags)
800 {
801 	set_ep0state(ep->dev, OUT_STATUS_STAGE);
802 	ep_end_out_req(ep, req, pflags);
803 	ep0_idle(ep->dev);
804 }
805 
806 /**
807  * ep_end_in_req - Ends endpoint IN request
808  * @ep: physical endpoint
809  * @req: pxa request
810  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
811  *
812  * Context: ep->lock held or released (see req_done())
813  *
814  * Ends endpoint IN request (completes usb request).
815  */
816 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
817 	unsigned long *pflags)
818 {
819 	inc_ep_stats_reqs(ep, USB_DIR_IN);
820 	req_done(ep, req, 0, pflags);
821 }
822 
823 /**
824  * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
825  * @ep: physical endpoint
826  * @req: pxa request
827  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
828  *
829  * Context: ep->lock held or released (see req_done())
830  *
831  * Ends control endpoint IN request (completes usb request), and puts
832  * control endpoint into status state
833  */
834 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
835 	unsigned long *pflags)
836 {
837 	set_ep0state(ep->dev, IN_STATUS_STAGE);
838 	ep_end_in_req(ep, req, pflags);
839 }
840 
841 /**
842  * nuke - Dequeue all requests
843  * @ep: pxa endpoint
844  * @status: usb request status
845  *
846  * Context: ep->lock released
847  *
848  * Dequeues all requests on an endpoint. As a side effect, interrupts will be
849  * disabled on that endpoint (because no more requests).
850  */
851 static void nuke(struct pxa_ep *ep, int status)
852 {
853 	struct pxa27x_request	*req;
854 	unsigned long		flags;
855 
856 	spin_lock_irqsave(&ep->lock, flags);
857 	while (!list_empty(&ep->queue)) {
858 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
859 		req_done(ep, req, status, &flags);
860 	}
861 	spin_unlock_irqrestore(&ep->lock, flags);
862 }
863 
864 /**
865  * read_packet - transfer 1 packet from an OUT endpoint into request
866  * @ep: pxa physical endpoint
867  * @req: usb request
868  *
869  * Takes bytes from OUT endpoint and transfers them info the usb request.
870  * If there is less space in request than bytes received in OUT endpoint,
871  * bytes are left in the OUT endpoint.
872  *
873  * Returns how many bytes were actually transferred
874  */
875 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
876 {
877 	u32 *buf;
878 	int bytes_ep, bufferspace, count, i;
879 
880 	bytes_ep = ep_count_bytes_remain(ep);
881 	bufferspace = req->req.length - req->req.actual;
882 
883 	buf = (u32 *)(req->req.buf + req->req.actual);
884 	prefetchw(buf);
885 
886 	if (likely(!ep_is_empty(ep)))
887 		count = min(bytes_ep, bufferspace);
888 	else /* zlp */
889 		count = 0;
890 
891 	for (i = count; i > 0; i -= 4)
892 		*buf++ = udc_ep_readl(ep, UDCDR);
893 	req->req.actual += count;
894 
895 	ep_write_UDCCSR(ep, UDCCSR_PC);
896 
897 	return count;
898 }
899 
900 /**
901  * write_packet - transfer 1 packet from request into an IN endpoint
902  * @ep: pxa physical endpoint
903  * @req: usb request
904  * @max: max bytes that fit into endpoint
905  *
906  * Takes bytes from usb request, and transfers them into the physical
907  * endpoint. If there are no bytes to transfer, doesn't write anything
908  * to physical endpoint.
909  *
910  * Returns how many bytes were actually transferred.
911  */
912 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
913 			unsigned int max)
914 {
915 	int length, count, remain, i;
916 	u32 *buf;
917 	u8 *buf_8;
918 
919 	buf = (u32 *)(req->req.buf + req->req.actual);
920 	prefetch(buf);
921 
922 	length = min(req->req.length - req->req.actual, max);
923 	req->req.actual += length;
924 
925 	remain = length & 0x3;
926 	count = length & ~(0x3);
927 	for (i = count; i > 0 ; i -= 4)
928 		udc_ep_writel(ep, UDCDR, *buf++);
929 
930 	buf_8 = (u8 *)buf;
931 	for (i = remain; i > 0; i--)
932 		udc_ep_writeb(ep, UDCDR, *buf_8++);
933 
934 	ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
935 		udc_ep_readl(ep, UDCCSR));
936 
937 	return length;
938 }
939 
940 /**
941  * read_fifo - Transfer packets from OUT endpoint into usb request
942  * @ep: pxa physical endpoint
943  * @req: usb request
944  *
945  * Context: callable when in_interrupt()
946  *
947  * Unload as many packets as possible from the fifo we use for usb OUT
948  * transfers and put them into the request. Caller should have made sure
949  * there's at least one packet ready.
950  * Doesn't complete the request, that's the caller's job
951  *
952  * Returns 1 if the request completed, 0 otherwise
953  */
954 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
955 {
956 	int count, is_short, completed = 0;
957 
958 	while (epout_has_pkt(ep)) {
959 		count = read_packet(ep, req);
960 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
961 
962 		is_short = (count < ep->fifo_size);
963 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
964 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
965 			&req->req, req->req.actual, req->req.length);
966 
967 		/* completion */
968 		if (is_short || req->req.actual == req->req.length) {
969 			completed = 1;
970 			break;
971 		}
972 		/* finished that packet.  the next one may be waiting... */
973 	}
974 	return completed;
975 }
976 
977 /**
978  * write_fifo - transfer packets from usb request into an IN endpoint
979  * @ep: pxa physical endpoint
980  * @req: pxa usb request
981  *
982  * Write to an IN endpoint fifo, as many packets as possible.
983  * irqs will use this to write the rest later.
984  * caller guarantees at least one packet buffer is ready (or a zlp).
985  * Doesn't complete the request, that's the caller's job
986  *
987  * Returns 1 if request fully transferred, 0 if partial transfer
988  */
989 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
990 {
991 	unsigned max;
992 	int count, is_short, is_last = 0, completed = 0, totcount = 0;
993 	u32 udccsr;
994 
995 	max = ep->fifo_size;
996 	do {
997 		is_short = 0;
998 
999 		udccsr = udc_ep_readl(ep, UDCCSR);
1000 		if (udccsr & UDCCSR_PC) {
1001 			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
1002 				udccsr);
1003 			ep_write_UDCCSR(ep, UDCCSR_PC);
1004 		}
1005 		if (udccsr & UDCCSR_TRN) {
1006 			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
1007 				udccsr);
1008 			ep_write_UDCCSR(ep, UDCCSR_TRN);
1009 		}
1010 
1011 		count = write_packet(ep, req, max);
1012 		inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1013 		totcount += count;
1014 
1015 		/* last packet is usually short (or a zlp) */
1016 		if (unlikely(count < max)) {
1017 			is_last = 1;
1018 			is_short = 1;
1019 		} else {
1020 			if (likely(req->req.length > req->req.actual)
1021 					|| req->req.zero)
1022 				is_last = 0;
1023 			else
1024 				is_last = 1;
1025 			/* interrupt/iso maxpacket may not fill the fifo */
1026 			is_short = unlikely(max < ep->fifo_size);
1027 		}
1028 
1029 		if (is_short)
1030 			ep_write_UDCCSR(ep, UDCCSR_SP);
1031 
1032 		/* requests complete when all IN data is in the FIFO */
1033 		if (is_last) {
1034 			completed = 1;
1035 			break;
1036 		}
1037 	} while (!ep_is_full(ep));
1038 
1039 	ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
1040 			totcount, is_last ? "/L" : "", is_short ? "/S" : "",
1041 			req->req.length - req->req.actual, &req->req);
1042 
1043 	return completed;
1044 }
1045 
1046 /**
1047  * read_ep0_fifo - Transfer packets from control endpoint into usb request
1048  * @ep: control endpoint
1049  * @req: pxa usb request
1050  *
1051  * Special ep0 version of the above read_fifo. Reads as many bytes from control
1052  * endpoint as can be read, and stores them into usb request (limited by request
1053  * maximum length).
1054  *
1055  * Returns 0 if usb request only partially filled, 1 if fully filled
1056  */
1057 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1058 {
1059 	int count, is_short, completed = 0;
1060 
1061 	while (epout_has_pkt(ep)) {
1062 		count = read_packet(ep, req);
1063 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1064 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1065 
1066 		is_short = (count < ep->fifo_size);
1067 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
1068 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
1069 			&req->req, req->req.actual, req->req.length);
1070 
1071 		if (is_short || req->req.actual >= req->req.length) {
1072 			completed = 1;
1073 			break;
1074 		}
1075 	}
1076 
1077 	return completed;
1078 }
1079 
1080 /**
1081  * write_ep0_fifo - Send a request to control endpoint (ep0 in)
1082  * @ep: control endpoint
1083  * @req: request
1084  *
1085  * Context: callable when in_interrupt()
1086  *
1087  * Sends a request (or a part of the request) to the control endpoint (ep0 in).
1088  * If the request doesn't fit, the remaining part will be sent from irq.
1089  * The request is considered fully written only if either :
1090  *   - last write transferred all remaining bytes, but fifo was not fully filled
1091  *   - last write was a 0 length write
1092  *
1093  * Returns 1 if request fully written, 0 if request only partially sent
1094  */
1095 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1096 {
1097 	unsigned	count;
1098 	int		is_last, is_short;
1099 
1100 	count = write_packet(ep, req, EP0_FIFO_SIZE);
1101 	inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1102 
1103 	is_short = (count < EP0_FIFO_SIZE);
1104 	is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
1105 
1106 	/* Sends either a short packet or a 0 length packet */
1107 	if (unlikely(is_short))
1108 		ep_write_UDCCSR(ep, UDCCSR0_IPR);
1109 
1110 	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1111 		count, is_short ? "/S" : "", is_last ? "/L" : "",
1112 		req->req.length - req->req.actual,
1113 		&req->req, udc_ep_readl(ep, UDCCSR));
1114 
1115 	return is_last;
1116 }
1117 
1118 /**
1119  * pxa_ep_queue - Queue a request into an IN endpoint
1120  * @_ep: usb endpoint
1121  * @_req: usb request
1122  * @gfp_flags: flags
1123  *
1124  * Context: normally called when !in_interrupt, but callable when in_interrupt()
1125  * in the special case of ep0 setup :
1126  *   (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
1127  *
1128  * Returns 0 if succedeed, error otherwise
1129  */
1130 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1131 			gfp_t gfp_flags)
1132 {
1133 	struct udc_usb_ep	*udc_usb_ep;
1134 	struct pxa_ep		*ep;
1135 	struct pxa27x_request	*req;
1136 	struct pxa_udc		*dev;
1137 	unsigned long		flags;
1138 	int			rc = 0;
1139 	int			is_first_req;
1140 	unsigned		length;
1141 	int			recursion_detected;
1142 
1143 	req = container_of(_req, struct pxa27x_request, req);
1144 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1145 
1146 	if (unlikely(!_req || !_req->complete || !_req->buf))
1147 		return -EINVAL;
1148 
1149 	if (unlikely(!_ep))
1150 		return -EINVAL;
1151 
1152 	dev = udc_usb_ep->dev;
1153 	ep = udc_usb_ep->pxa_ep;
1154 	if (unlikely(!ep))
1155 		return -EINVAL;
1156 
1157 	dev = ep->dev;
1158 	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1159 		ep_dbg(ep, "bogus device state\n");
1160 		return -ESHUTDOWN;
1161 	}
1162 
1163 	/* iso is always one packet per request, that's the only way
1164 	 * we can report per-packet status.  that also helps with dma.
1165 	 */
1166 	if (unlikely(EPXFERTYPE_is_ISO(ep)
1167 			&& req->req.length > ep->fifo_size))
1168 		return -EMSGSIZE;
1169 
1170 	spin_lock_irqsave(&ep->lock, flags);
1171 	recursion_detected = ep->in_handle_ep;
1172 
1173 	is_first_req = list_empty(&ep->queue);
1174 	ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
1175 			_req, is_first_req ? "yes" : "no",
1176 			_req->length, _req->buf);
1177 
1178 	if (!ep->enabled) {
1179 		_req->status = -ESHUTDOWN;
1180 		rc = -ESHUTDOWN;
1181 		goto out_locked;
1182 	}
1183 
1184 	if (req->in_use) {
1185 		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1186 		goto out_locked;
1187 	}
1188 
1189 	length = _req->length;
1190 	_req->status = -EINPROGRESS;
1191 	_req->actual = 0;
1192 
1193 	ep_add_request(ep, req);
1194 	spin_unlock_irqrestore(&ep->lock, flags);
1195 
1196 	if (is_ep0(ep)) {
1197 		switch (dev->ep0state) {
1198 		case WAIT_ACK_SET_CONF_INTERF:
1199 			if (length == 0) {
1200 				ep_end_in_req(ep, req, NULL);
1201 			} else {
1202 				ep_err(ep, "got a request of %d bytes while"
1203 					"in state WAIT_ACK_SET_CONF_INTERF\n",
1204 					length);
1205 				ep_del_request(ep, req);
1206 				rc = -EL2HLT;
1207 			}
1208 			ep0_idle(ep->dev);
1209 			break;
1210 		case IN_DATA_STAGE:
1211 			if (!ep_is_full(ep))
1212 				if (write_ep0_fifo(ep, req))
1213 					ep0_end_in_req(ep, req, NULL);
1214 			break;
1215 		case OUT_DATA_STAGE:
1216 			if ((length == 0) || !epout_has_pkt(ep))
1217 				if (read_ep0_fifo(ep, req))
1218 					ep0_end_out_req(ep, req, NULL);
1219 			break;
1220 		default:
1221 			ep_err(ep, "odd state %s to send me a request\n",
1222 				EP0_STNAME(ep->dev));
1223 			ep_del_request(ep, req);
1224 			rc = -EL2HLT;
1225 			break;
1226 		}
1227 	} else {
1228 		if (!recursion_detected)
1229 			handle_ep(ep);
1230 	}
1231 
1232 out:
1233 	return rc;
1234 out_locked:
1235 	spin_unlock_irqrestore(&ep->lock, flags);
1236 	goto out;
1237 }
1238 
1239 /**
1240  * pxa_ep_dequeue - Dequeue one request
1241  * @_ep: usb endpoint
1242  * @_req: usb request
1243  *
1244  * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
1245  */
1246 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1247 {
1248 	struct pxa_ep		*ep;
1249 	struct udc_usb_ep	*udc_usb_ep;
1250 	struct pxa27x_request	*req;
1251 	unsigned long		flags;
1252 	int			rc = -EINVAL;
1253 
1254 	if (!_ep)
1255 		return rc;
1256 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1257 	ep = udc_usb_ep->pxa_ep;
1258 	if (!ep || is_ep0(ep))
1259 		return rc;
1260 
1261 	spin_lock_irqsave(&ep->lock, flags);
1262 
1263 	/* make sure it's actually queued on this endpoint */
1264 	list_for_each_entry(req, &ep->queue, queue) {
1265 		if (&req->req == _req) {
1266 			rc = 0;
1267 			break;
1268 		}
1269 	}
1270 
1271 	spin_unlock_irqrestore(&ep->lock, flags);
1272 	if (!rc)
1273 		req_done(ep, req, -ECONNRESET, NULL);
1274 	return rc;
1275 }
1276 
1277 /**
1278  * pxa_ep_set_halt - Halts operations on one endpoint
1279  * @_ep: usb endpoint
1280  * @value:
1281  *
1282  * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
1283  */
1284 static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1285 {
1286 	struct pxa_ep		*ep;
1287 	struct udc_usb_ep	*udc_usb_ep;
1288 	unsigned long flags;
1289 	int rc;
1290 
1291 
1292 	if (!_ep)
1293 		return -EINVAL;
1294 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1295 	ep = udc_usb_ep->pxa_ep;
1296 	if (!ep || is_ep0(ep))
1297 		return -EINVAL;
1298 
1299 	if (value == 0) {
1300 		/*
1301 		 * This path (reset toggle+halt) is needed to implement
1302 		 * SET_INTERFACE on normal hardware.  but it can't be
1303 		 * done from software on the PXA UDC, and the hardware
1304 		 * forgets to do it as part of SET_INTERFACE automagic.
1305 		 */
1306 		ep_dbg(ep, "only host can clear halt\n");
1307 		return -EROFS;
1308 	}
1309 
1310 	spin_lock_irqsave(&ep->lock, flags);
1311 
1312 	rc = -EAGAIN;
1313 	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
1314 		goto out;
1315 
1316 	/* FST, FEF bits are the same for control and non control endpoints */
1317 	rc = 0;
1318 	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
1319 	if (is_ep0(ep))
1320 		set_ep0state(ep->dev, STALL);
1321 
1322 out:
1323 	spin_unlock_irqrestore(&ep->lock, flags);
1324 	return rc;
1325 }
1326 
1327 /**
1328  * pxa_ep_fifo_status - Get how many bytes in physical endpoint
1329  * @_ep: usb endpoint
1330  *
1331  * Returns number of bytes in OUT fifos. Broken for IN fifos.
1332  */
1333 static int pxa_ep_fifo_status(struct usb_ep *_ep)
1334 {
1335 	struct pxa_ep		*ep;
1336 	struct udc_usb_ep	*udc_usb_ep;
1337 
1338 	if (!_ep)
1339 		return -ENODEV;
1340 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1341 	ep = udc_usb_ep->pxa_ep;
1342 	if (!ep || is_ep0(ep))
1343 		return -ENODEV;
1344 
1345 	if (ep->dir_in)
1346 		return -EOPNOTSUPP;
1347 	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
1348 		return 0;
1349 	else
1350 		return ep_count_bytes_remain(ep) + 1;
1351 }
1352 
1353 /**
1354  * pxa_ep_fifo_flush - Flushes one endpoint
1355  * @_ep: usb endpoint
1356  *
1357  * Discards all data in one endpoint(IN or OUT), except control endpoint.
1358  */
1359 static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1360 {
1361 	struct pxa_ep		*ep;
1362 	struct udc_usb_ep	*udc_usb_ep;
1363 	unsigned long		flags;
1364 
1365 	if (!_ep)
1366 		return;
1367 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1368 	ep = udc_usb_ep->pxa_ep;
1369 	if (!ep || is_ep0(ep))
1370 		return;
1371 
1372 	spin_lock_irqsave(&ep->lock, flags);
1373 
1374 	if (unlikely(!list_empty(&ep->queue)))
1375 		ep_dbg(ep, "called while queue list not empty\n");
1376 	ep_dbg(ep, "called\n");
1377 
1378 	/* for OUT, just read and discard the FIFO contents. */
1379 	if (!ep->dir_in) {
1380 		while (!ep_is_empty(ep))
1381 			udc_ep_readl(ep, UDCDR);
1382 	} else {
1383 		/* most IN status is the same, but ISO can't stall */
1384 		ep_write_UDCCSR(ep,
1385 				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1386 				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1387 	}
1388 
1389 	spin_unlock_irqrestore(&ep->lock, flags);
1390 }
1391 
1392 /**
1393  * pxa_ep_enable - Enables usb endpoint
1394  * @_ep: usb endpoint
1395  * @desc: usb endpoint descriptor
1396  *
1397  * Nothing much to do here, as ep configuration is done once and for all
1398  * before udc is enabled. After udc enable, no physical endpoint configuration
1399  * can be changed.
1400  * Function makes sanity checks and flushes the endpoint.
1401  */
1402 static int pxa_ep_enable(struct usb_ep *_ep,
1403 	const struct usb_endpoint_descriptor *desc)
1404 {
1405 	struct pxa_ep		*ep;
1406 	struct udc_usb_ep	*udc_usb_ep;
1407 	struct pxa_udc		*udc;
1408 
1409 	if (!_ep || !desc)
1410 		return -EINVAL;
1411 
1412 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1413 	if (udc_usb_ep->pxa_ep) {
1414 		ep = udc_usb_ep->pxa_ep;
1415 		ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
1416 			_ep->name);
1417 	} else {
1418 		ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
1419 	}
1420 
1421 	if (!ep || is_ep0(ep)) {
1422 		dev_err(udc_usb_ep->dev->dev,
1423 			"unable to match pxa_ep for ep %s\n",
1424 			_ep->name);
1425 		return -EINVAL;
1426 	}
1427 
1428 	if ((desc->bDescriptorType != USB_DT_ENDPOINT)
1429 			|| (ep->type != usb_endpoint_type(desc))) {
1430 		ep_err(ep, "type mismatch\n");
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (ep->fifo_size < usb_endpoint_maxp(desc)) {
1435 		ep_err(ep, "bad maxpacket\n");
1436 		return -ERANGE;
1437 	}
1438 
1439 	udc_usb_ep->pxa_ep = ep;
1440 	udc = ep->dev;
1441 
1442 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
1443 		ep_err(ep, "bogus device state\n");
1444 		return -ESHUTDOWN;
1445 	}
1446 
1447 	ep->enabled = 1;
1448 
1449 	/* flush fifo (mostly for OUT buffers) */
1450 	pxa_ep_fifo_flush(_ep);
1451 
1452 	ep_dbg(ep, "enabled\n");
1453 	return 0;
1454 }
1455 
1456 /**
1457  * pxa_ep_disable - Disable usb endpoint
1458  * @_ep: usb endpoint
1459  *
1460  * Same as for pxa_ep_enable, no physical endpoint configuration can be
1461  * changed.
1462  * Function flushes the endpoint and related requests.
1463  */
1464 static int pxa_ep_disable(struct usb_ep *_ep)
1465 {
1466 	struct pxa_ep		*ep;
1467 	struct udc_usb_ep	*udc_usb_ep;
1468 
1469 	if (!_ep)
1470 		return -EINVAL;
1471 
1472 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1473 	ep = udc_usb_ep->pxa_ep;
1474 	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1475 		return -EINVAL;
1476 
1477 	ep->enabled = 0;
1478 	nuke(ep, -ESHUTDOWN);
1479 
1480 	pxa_ep_fifo_flush(_ep);
1481 	udc_usb_ep->pxa_ep = NULL;
1482 
1483 	ep_dbg(ep, "disabled\n");
1484 	return 0;
1485 }
1486 
1487 static struct usb_ep_ops pxa_ep_ops = {
1488 	.enable		= pxa_ep_enable,
1489 	.disable	= pxa_ep_disable,
1490 
1491 	.alloc_request	= pxa_ep_alloc_request,
1492 	.free_request	= pxa_ep_free_request,
1493 
1494 	.queue		= pxa_ep_queue,
1495 	.dequeue	= pxa_ep_dequeue,
1496 
1497 	.set_halt	= pxa_ep_set_halt,
1498 	.fifo_status	= pxa_ep_fifo_status,
1499 	.fifo_flush	= pxa_ep_fifo_flush,
1500 };
1501 
1502 /**
1503  * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
1504  * @udc: udc device
1505  * @on: 0 if disconnect pullup resistor, 1 otherwise
1506  * Context: any
1507  *
1508  * Handle D+ pullup resistor, make the device visible to the usb bus, and
1509  * declare it as a full speed usb device
1510  */
1511 static void dplus_pullup(struct pxa_udc *udc, int on)
1512 {
1513 	if (udc->gpiod) {
1514 		gpiod_set_value(udc->gpiod, on);
1515 	} else if (udc->udc_command) {
1516 		if (on)
1517 			udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
1518 		else
1519 			udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
1520 	}
1521 	udc->pullup_on = on;
1522 }
1523 
1524 /**
1525  * pxa_udc_get_frame - Returns usb frame number
1526  * @_gadget: usb gadget
1527  */
1528 static int pxa_udc_get_frame(struct usb_gadget *_gadget)
1529 {
1530 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1531 
1532 	return (udc_readl(udc, UDCFNR) & 0x7ff);
1533 }
1534 
1535 /**
1536  * pxa_udc_wakeup - Force udc device out of suspend
1537  * @_gadget: usb gadget
1538  *
1539  * Returns 0 if successful, error code otherwise
1540  */
1541 static int pxa_udc_wakeup(struct usb_gadget *_gadget)
1542 {
1543 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1544 
1545 	/* host may not have enabled remote wakeup */
1546 	if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
1547 		return -EHOSTUNREACH;
1548 	udc_set_mask_UDCCR(udc, UDCCR_UDR);
1549 	return 0;
1550 }
1551 
1552 static void udc_enable(struct pxa_udc *udc);
1553 static void udc_disable(struct pxa_udc *udc);
1554 
1555 /**
1556  * should_enable_udc - Tells if UDC should be enabled
1557  * @udc: udc device
1558  * Context: any
1559  *
1560  * The UDC should be enabled if :
1561 
1562  *  - the pullup resistor is connected
1563  *  - and a gadget driver is bound
1564  *  - and vbus is sensed (or no vbus sense is available)
1565  *
1566  * Returns 1 if UDC should be enabled, 0 otherwise
1567  */
1568 static int should_enable_udc(struct pxa_udc *udc)
1569 {
1570 	int put_on;
1571 
1572 	put_on = ((udc->pullup_on) && (udc->driver));
1573 	put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver)));
1574 	return put_on;
1575 }
1576 
1577 /**
1578  * should_disable_udc - Tells if UDC should be disabled
1579  * @udc: udc device
1580  * Context: any
1581  *
1582  * The UDC should be disabled if :
1583  *  - the pullup resistor is not connected
1584  *  - or no gadget driver is bound
1585  *  - or no vbus is sensed (when vbus sesing is available)
1586  *
1587  * Returns 1 if UDC should be disabled
1588  */
1589 static int should_disable_udc(struct pxa_udc *udc)
1590 {
1591 	int put_off;
1592 
1593 	put_off = ((!udc->pullup_on) || (!udc->driver));
1594 	put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver)));
1595 	return put_off;
1596 }
1597 
1598 /**
1599  * pxa_udc_pullup - Offer manual D+ pullup control
1600  * @_gadget: usb gadget using the control
1601  * @is_active: 0 if disconnect, else connect D+ pullup resistor
1602  * Context: !in_interrupt()
1603  *
1604  * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
1605  */
1606 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
1607 {
1608 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1609 
1610 	if (!udc->gpiod && !udc->udc_command)
1611 		return -EOPNOTSUPP;
1612 
1613 	dplus_pullup(udc, is_active);
1614 
1615 	if (should_enable_udc(udc))
1616 		udc_enable(udc);
1617 	if (should_disable_udc(udc))
1618 		udc_disable(udc);
1619 	return 0;
1620 }
1621 
1622 static void udc_enable(struct pxa_udc *udc);
1623 static void udc_disable(struct pxa_udc *udc);
1624 
1625 /**
1626  * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
1627  * @_gadget: usb gadget
1628  * @is_active: 0 if should disable the udc, 1 if should enable
1629  *
1630  * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
1631  * udc, and deactivates D+ pullup resistor.
1632  *
1633  * Returns 0
1634  */
1635 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1636 {
1637 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1638 
1639 	udc->vbus_sensed = is_active;
1640 	if (should_enable_udc(udc))
1641 		udc_enable(udc);
1642 	if (should_disable_udc(udc))
1643 		udc_disable(udc);
1644 
1645 	return 0;
1646 }
1647 
1648 /**
1649  * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
1650  * @_gadget: usb gadget
1651  * @mA: current drawn
1652  *
1653  * Context: !in_interrupt()
1654  *
1655  * Called after a configuration was chosen by a USB host, to inform how much
1656  * current can be drawn by the device from VBus line.
1657  *
1658  * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
1659  */
1660 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1661 {
1662 	struct pxa_udc *udc;
1663 
1664 	udc = to_gadget_udc(_gadget);
1665 	if (!IS_ERR_OR_NULL(udc->transceiver))
1666 		return usb_phy_set_power(udc->transceiver, mA);
1667 	return -EOPNOTSUPP;
1668 }
1669 
1670 static int pxa27x_udc_start(struct usb_gadget *g,
1671 		struct usb_gadget_driver *driver);
1672 static int pxa27x_udc_stop(struct usb_gadget *g);
1673 
1674 static const struct usb_gadget_ops pxa_udc_ops = {
1675 	.get_frame	= pxa_udc_get_frame,
1676 	.wakeup		= pxa_udc_wakeup,
1677 	.pullup		= pxa_udc_pullup,
1678 	.vbus_session	= pxa_udc_vbus_session,
1679 	.vbus_draw	= pxa_udc_vbus_draw,
1680 	.udc_start	= pxa27x_udc_start,
1681 	.udc_stop	= pxa27x_udc_stop,
1682 };
1683 
1684 /**
1685  * udc_disable - disable udc device controller
1686  * @udc: udc device
1687  * Context: any
1688  *
1689  * Disables the udc device : disables clocks, udc interrupts, control endpoint
1690  * interrupts.
1691  */
1692 static void udc_disable(struct pxa_udc *udc)
1693 {
1694 	if (!udc->enabled)
1695 		return;
1696 
1697 	udc_writel(udc, UDCICR0, 0);
1698 	udc_writel(udc, UDCICR1, 0);
1699 
1700 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1701 
1702 	ep0_idle(udc);
1703 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1704 	clk_disable(udc->clk);
1705 
1706 	udc->enabled = 0;
1707 }
1708 
1709 /**
1710  * udc_init_data - Initialize udc device data structures
1711  * @dev: udc device
1712  *
1713  * Initializes gadget endpoint list, endpoints locks. No action is taken
1714  * on the hardware.
1715  */
1716 static void udc_init_data(struct pxa_udc *dev)
1717 {
1718 	int i;
1719 	struct pxa_ep *ep;
1720 
1721 	/* device/ep0 records init */
1722 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1723 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1724 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
1725 	ep0_idle(dev);
1726 
1727 	/* PXA endpoints init */
1728 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
1729 		ep = &dev->pxa_ep[i];
1730 
1731 		ep->enabled = is_ep0(ep);
1732 		INIT_LIST_HEAD(&ep->queue);
1733 		spin_lock_init(&ep->lock);
1734 	}
1735 
1736 	/* USB endpoints init */
1737 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
1738 		list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
1739 				&dev->gadget.ep_list);
1740 		usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
1741 					   dev->udc_usb_ep[i].usb_ep.maxpacket);
1742 	}
1743 }
1744 
1745 /**
1746  * udc_enable - Enables the udc device
1747  * @dev: udc device
1748  *
1749  * Enables the udc device : enables clocks, udc interrupts, control endpoint
1750  * interrupts, sets usb as UDC client and setups endpoints.
1751  */
1752 static void udc_enable(struct pxa_udc *udc)
1753 {
1754 	if (udc->enabled)
1755 		return;
1756 
1757 	clk_enable(udc->clk);
1758 	udc_writel(udc, UDCICR0, 0);
1759 	udc_writel(udc, UDCICR1, 0);
1760 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1761 
1762 	ep0_idle(udc);
1763 	udc->gadget.speed = USB_SPEED_FULL;
1764 	memset(&udc->stats, 0, sizeof(udc->stats));
1765 
1766 	pxa_eps_setup(udc);
1767 	udc_set_mask_UDCCR(udc, UDCCR_UDE);
1768 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
1769 	udelay(2);
1770 	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1771 		dev_err(udc->dev, "Configuration errors, udc disabled\n");
1772 
1773 	/*
1774 	 * Caller must be able to sleep in order to cope with startup transients
1775 	 */
1776 	msleep(100);
1777 
1778 	/* enable suspend/resume and reset irqs */
1779 	udc_writel(udc, UDCICR1,
1780 			UDCICR1_IECC | UDCICR1_IERU
1781 			| UDCICR1_IESU | UDCICR1_IERS);
1782 
1783 	/* enable ep0 irqs */
1784 	pio_irq_enable(&udc->pxa_ep[0]);
1785 
1786 	udc->enabled = 1;
1787 }
1788 
1789 /**
1790  * pxa27x_start - Register gadget driver
1791  * @driver: gadget driver
1792  * @bind: bind function
1793  *
1794  * When a driver is successfully registered, it will receive control requests
1795  * including set_configuration(), which enables non-control requests.  Then
1796  * usb traffic follows until a disconnect is reported.  Then a host may connect
1797  * again, or the driver might get unbound.
1798  *
1799  * Note that the udc is not automatically enabled. Check function
1800  * should_enable_udc().
1801  *
1802  * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1803  */
1804 static int pxa27x_udc_start(struct usb_gadget *g,
1805 		struct usb_gadget_driver *driver)
1806 {
1807 	struct pxa_udc *udc = to_pxa(g);
1808 	int retval;
1809 
1810 	/* first hook up the driver ... */
1811 	udc->driver = driver;
1812 
1813 	if (!IS_ERR_OR_NULL(udc->transceiver)) {
1814 		retval = otg_set_peripheral(udc->transceiver->otg,
1815 						&udc->gadget);
1816 		if (retval) {
1817 			dev_err(udc->dev, "can't bind to transceiver\n");
1818 			goto fail;
1819 		}
1820 	}
1821 
1822 	if (should_enable_udc(udc))
1823 		udc_enable(udc);
1824 	return 0;
1825 
1826 fail:
1827 	udc->driver = NULL;
1828 	return retval;
1829 }
1830 
1831 /**
1832  * stop_activity - Stops udc endpoints
1833  * @udc: udc device
1834  * @driver: gadget driver
1835  *
1836  * Disables all udc endpoints (even control endpoint), report disconnect to
1837  * the gadget user.
1838  */
1839 static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
1840 {
1841 	int i;
1842 
1843 	/* don't disconnect drivers more than once */
1844 	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1845 		driver = NULL;
1846 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1847 
1848 	for (i = 0; i < NR_USB_ENDPOINTS; i++)
1849 		pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
1850 }
1851 
1852 /**
1853  * pxa27x_udc_stop - Unregister the gadget driver
1854  * @driver: gadget driver
1855  *
1856  * Returns 0 if no error, -ENODEV, -EINVAL otherwise
1857  */
1858 static int pxa27x_udc_stop(struct usb_gadget *g)
1859 {
1860 	struct pxa_udc *udc = to_pxa(g);
1861 
1862 	stop_activity(udc, NULL);
1863 	udc_disable(udc);
1864 
1865 	udc->driver = NULL;
1866 
1867 	if (!IS_ERR_OR_NULL(udc->transceiver))
1868 		return otg_set_peripheral(udc->transceiver->otg, NULL);
1869 	return 0;
1870 }
1871 
1872 /**
1873  * handle_ep0_ctrl_req - handle control endpoint control request
1874  * @udc: udc device
1875  * @req: control request
1876  */
1877 static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1878 				struct pxa27x_request *req)
1879 {
1880 	struct pxa_ep *ep = &udc->pxa_ep[0];
1881 	union {
1882 		struct usb_ctrlrequest	r;
1883 		u32			word[2];
1884 	} u;
1885 	int i;
1886 	int have_extrabytes = 0;
1887 	unsigned long flags;
1888 
1889 	nuke(ep, -EPROTO);
1890 	spin_lock_irqsave(&ep->lock, flags);
1891 
1892 	/*
1893 	 * In the PXA320 manual, in the section about Back-to-Back setup
1894 	 * packets, it describes this situation.  The solution is to set OPC to
1895 	 * get rid of the status packet, and then continue with the setup
1896 	 * packet. Generalize to pxa27x CPUs.
1897 	 */
1898 	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
1899 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1900 
1901 	/* read SETUP packet */
1902 	for (i = 0; i < 2; i++) {
1903 		if (unlikely(ep_is_empty(ep)))
1904 			goto stall;
1905 		u.word[i] = udc_ep_readl(ep, UDCDR);
1906 	}
1907 
1908 	have_extrabytes = !ep_is_empty(ep);
1909 	while (!ep_is_empty(ep)) {
1910 		i = udc_ep_readl(ep, UDCDR);
1911 		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
1912 	}
1913 
1914 	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1915 		u.r.bRequestType, u.r.bRequest,
1916 		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
1917 		le16_to_cpu(u.r.wLength));
1918 	if (unlikely(have_extrabytes))
1919 		goto stall;
1920 
1921 	if (u.r.bRequestType & USB_DIR_IN)
1922 		set_ep0state(udc, IN_DATA_STAGE);
1923 	else
1924 		set_ep0state(udc, OUT_DATA_STAGE);
1925 
1926 	/* Tell UDC to enter Data Stage */
1927 	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1928 
1929 	spin_unlock_irqrestore(&ep->lock, flags);
1930 	i = udc->driver->setup(&udc->gadget, &u.r);
1931 	spin_lock_irqsave(&ep->lock, flags);
1932 	if (i < 0)
1933 		goto stall;
1934 out:
1935 	spin_unlock_irqrestore(&ep->lock, flags);
1936 	return;
1937 stall:
1938 	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1939 		udc_ep_readl(ep, UDCCSR), i);
1940 	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
1941 	set_ep0state(udc, STALL);
1942 	goto out;
1943 }
1944 
1945 /**
1946  * handle_ep0 - Handle control endpoint data transfers
1947  * @udc: udc device
1948  * @fifo_irq: 1 if triggered by fifo service type irq
1949  * @opc_irq: 1 if triggered by output packet complete type irq
1950  *
1951  * Context : when in_interrupt() or with ep->lock held
1952  *
1953  * Tries to transfer all pending request data into the endpoint and/or
1954  * transfer all pending data in the endpoint into usb requests.
1955  * Handles states of ep0 automata.
1956  *
1957  * PXA27x hardware handles several standard usb control requests without
1958  * driver notification.  The requests fully handled by hardware are :
1959  *  SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
1960  *  GET_STATUS
1961  * The requests handled by hardware, but with irq notification are :
1962  *  SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
1963  * The remaining standard requests really handled by handle_ep0 are :
1964  *  GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
1965  * Requests standardized outside of USB 2.0 chapter 9 are handled more
1966  * uniformly, by gadget drivers.
1967  *
1968  * The control endpoint state machine is _not_ USB spec compliant, it's even
1969  * hardly compliant with Intel PXA270 developers guide.
1970  * The key points which inferred this state machine are :
1971  *   - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
1972  *     software.
1973  *   - on every OUT packet received, UDCCSR0_OPC is raised and held until
1974  *     cleared by software.
1975  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
1976  *     before reading ep0.
1977  *     This is true only for PXA27x. This is not true anymore for PXA3xx family
1978  *     (check Back-to-Back setup packet in developers guide).
1979  *   - irq can be called on a "packet complete" event (opc_irq=1), while
1980  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
1981  *     from experimentation).
1982  *   - as UDCCSR0_SA can be activated while in irq handling, and clearing
1983  *     UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
1984  *     => we never actually read the "status stage" packet of an IN data stage
1985  *     => this is not documented in Intel documentation
1986  *   - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
1987  *     STAGE. The driver add STATUS STAGE to send last zero length packet in
1988  *     OUT_STATUS_STAGE.
1989  *   - special attention was needed for IN_STATUS_STAGE. If a packet complete
1990  *     event is detected, we terminate the status stage without ackowledging the
1991  *     packet (not to risk to loose a potential SETUP packet)
1992  */
1993 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
1994 {
1995 	u32			udccsr0;
1996 	struct pxa_ep		*ep = &udc->pxa_ep[0];
1997 	struct pxa27x_request	*req = NULL;
1998 	int			completed = 0;
1999 
2000 	if (!list_empty(&ep->queue))
2001 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
2002 
2003 	udccsr0 = udc_ep_readl(ep, UDCCSR);
2004 	ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
2005 		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
2006 		(fifo_irq << 1 | opc_irq));
2007 
2008 	if (udccsr0 & UDCCSR0_SST) {
2009 		ep_dbg(ep, "clearing stall status\n");
2010 		nuke(ep, -EPIPE);
2011 		ep_write_UDCCSR(ep, UDCCSR0_SST);
2012 		ep0_idle(udc);
2013 	}
2014 
2015 	if (udccsr0 & UDCCSR0_SA) {
2016 		nuke(ep, 0);
2017 		set_ep0state(udc, SETUP_STAGE);
2018 	}
2019 
2020 	switch (udc->ep0state) {
2021 	case WAIT_FOR_SETUP:
2022 		/*
2023 		 * Hardware bug : beware, we cannot clear OPC, since we would
2024 		 * miss a potential OPC irq for a setup packet.
2025 		 * So, we only do ... nothing, and hope for a next irq with
2026 		 * UDCCSR0_SA set.
2027 		 */
2028 		break;
2029 	case SETUP_STAGE:
2030 		udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
2031 		if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
2032 			handle_ep0_ctrl_req(udc, req);
2033 		break;
2034 	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
2035 		if (epout_has_pkt(ep))
2036 			ep_write_UDCCSR(ep, UDCCSR0_OPC);
2037 		if (req && !ep_is_full(ep))
2038 			completed = write_ep0_fifo(ep, req);
2039 		if (completed)
2040 			ep0_end_in_req(ep, req, NULL);
2041 		break;
2042 	case OUT_DATA_STAGE:			/* SET_DESCRIPTOR */
2043 		if (epout_has_pkt(ep) && req)
2044 			completed = read_ep0_fifo(ep, req);
2045 		if (completed)
2046 			ep0_end_out_req(ep, req, NULL);
2047 		break;
2048 	case STALL:
2049 		ep_write_UDCCSR(ep, UDCCSR0_FST);
2050 		break;
2051 	case IN_STATUS_STAGE:
2052 		/*
2053 		 * Hardware bug : beware, we cannot clear OPC, since we would
2054 		 * miss a potential PC irq for a setup packet.
2055 		 * So, we only put the ep0 into WAIT_FOR_SETUP state.
2056 		 */
2057 		if (opc_irq)
2058 			ep0_idle(udc);
2059 		break;
2060 	case OUT_STATUS_STAGE:
2061 	case WAIT_ACK_SET_CONF_INTERF:
2062 		ep_warn(ep, "should never get in %s state here!!!\n",
2063 				EP0_STNAME(ep->dev));
2064 		ep0_idle(udc);
2065 		break;
2066 	}
2067 }
2068 
2069 /**
2070  * handle_ep - Handle endpoint data tranfers
2071  * @ep: pxa physical endpoint
2072  *
2073  * Tries to transfer all pending request data into the endpoint and/or
2074  * transfer all pending data in the endpoint into usb requests.
2075  *
2076  * Is always called when in_interrupt() and with ep->lock released.
2077  */
2078 static void handle_ep(struct pxa_ep *ep)
2079 {
2080 	struct pxa27x_request	*req;
2081 	int completed;
2082 	u32 udccsr;
2083 	int is_in = ep->dir_in;
2084 	int loop = 0;
2085 	unsigned long		flags;
2086 
2087 	spin_lock_irqsave(&ep->lock, flags);
2088 	if (ep->in_handle_ep)
2089 		goto recursion_detected;
2090 	ep->in_handle_ep = 1;
2091 
2092 	do {
2093 		completed = 0;
2094 		udccsr = udc_ep_readl(ep, UDCCSR);
2095 
2096 		if (likely(!list_empty(&ep->queue)))
2097 			req = list_entry(ep->queue.next,
2098 					struct pxa27x_request, queue);
2099 		else
2100 			req = NULL;
2101 
2102 		ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
2103 				req, udccsr, loop++);
2104 
2105 		if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
2106 			udc_ep_writel(ep, UDCCSR,
2107 					udccsr & (UDCCSR_SST | UDCCSR_TRN));
2108 		if (!req)
2109 			break;
2110 
2111 		if (unlikely(is_in)) {
2112 			if (likely(!ep_is_full(ep)))
2113 				completed = write_fifo(ep, req);
2114 		} else {
2115 			if (likely(epout_has_pkt(ep)))
2116 				completed = read_fifo(ep, req);
2117 		}
2118 
2119 		if (completed) {
2120 			if (is_in)
2121 				ep_end_in_req(ep, req, &flags);
2122 			else
2123 				ep_end_out_req(ep, req, &flags);
2124 		}
2125 	} while (completed);
2126 
2127 	ep->in_handle_ep = 0;
2128 recursion_detected:
2129 	spin_unlock_irqrestore(&ep->lock, flags);
2130 }
2131 
2132 /**
2133  * pxa27x_change_configuration - Handle SET_CONF usb request notification
2134  * @udc: udc device
2135  * @config: usb configuration
2136  *
2137  * Post the request to upper level.
2138  * Don't use any pxa specific harware configuration capabilities
2139  */
2140 static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
2141 {
2142 	struct usb_ctrlrequest req ;
2143 
2144 	dev_dbg(udc->dev, "config=%d\n", config);
2145 
2146 	udc->config = config;
2147 	udc->last_interface = 0;
2148 	udc->last_alternate = 0;
2149 
2150 	req.bRequestType = 0;
2151 	req.bRequest = USB_REQ_SET_CONFIGURATION;
2152 	req.wValue = config;
2153 	req.wIndex = 0;
2154 	req.wLength = 0;
2155 
2156 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2157 	udc->driver->setup(&udc->gadget, &req);
2158 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2159 }
2160 
2161 /**
2162  * pxa27x_change_interface - Handle SET_INTERF usb request notification
2163  * @udc: udc device
2164  * @iface: interface number
2165  * @alt: alternate setting number
2166  *
2167  * Post the request to upper level.
2168  * Don't use any pxa specific harware configuration capabilities
2169  */
2170 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
2171 {
2172 	struct usb_ctrlrequest  req;
2173 
2174 	dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
2175 
2176 	udc->last_interface = iface;
2177 	udc->last_alternate = alt;
2178 
2179 	req.bRequestType = USB_RECIP_INTERFACE;
2180 	req.bRequest = USB_REQ_SET_INTERFACE;
2181 	req.wValue = alt;
2182 	req.wIndex = iface;
2183 	req.wLength = 0;
2184 
2185 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2186 	udc->driver->setup(&udc->gadget, &req);
2187 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2188 }
2189 
2190 /*
2191  * irq_handle_data - Handle data transfer
2192  * @irq: irq IRQ number
2193  * @udc: dev pxa_udc device structure
2194  *
2195  * Called from irq handler, transferts data to or from endpoint to queue
2196  */
2197 static void irq_handle_data(int irq, struct pxa_udc *udc)
2198 {
2199 	int i;
2200 	struct pxa_ep *ep;
2201 	u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
2202 	u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
2203 
2204 	if (udcisr0 & UDCISR_INT_MASK) {
2205 		udc->pxa_ep[0].stats.irqs++;
2206 		udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
2207 		handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
2208 				!!(udcisr0 & UDCICR_PKTCOMPL));
2209 	}
2210 
2211 	udcisr0 >>= 2;
2212 	for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
2213 		if (!(udcisr0 & UDCISR_INT_MASK))
2214 			continue;
2215 
2216 		udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2217 
2218 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2219 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2220 			ep = &udc->pxa_ep[i];
2221 			ep->stats.irqs++;
2222 			handle_ep(ep);
2223 		}
2224 	}
2225 
2226 	for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
2227 		udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
2228 		if (!(udcisr1 & UDCISR_INT_MASK))
2229 			continue;
2230 
2231 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2232 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2233 			ep = &udc->pxa_ep[i];
2234 			ep->stats.irqs++;
2235 			handle_ep(ep);
2236 		}
2237 	}
2238 
2239 }
2240 
2241 /**
2242  * irq_udc_suspend - Handle IRQ "UDC Suspend"
2243  * @udc: udc device
2244  */
2245 static void irq_udc_suspend(struct pxa_udc *udc)
2246 {
2247 	udc_writel(udc, UDCISR1, UDCISR1_IRSU);
2248 	udc->stats.irqs_suspend++;
2249 
2250 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2251 			&& udc->driver && udc->driver->suspend)
2252 		udc->driver->suspend(&udc->gadget);
2253 	ep0_idle(udc);
2254 }
2255 
2256 /**
2257   * irq_udc_resume - Handle IRQ "UDC Resume"
2258   * @udc: udc device
2259   */
2260 static void irq_udc_resume(struct pxa_udc *udc)
2261 {
2262 	udc_writel(udc, UDCISR1, UDCISR1_IRRU);
2263 	udc->stats.irqs_resume++;
2264 
2265 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2266 			&& udc->driver && udc->driver->resume)
2267 		udc->driver->resume(&udc->gadget);
2268 }
2269 
2270 /**
2271  * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
2272  * @udc: udc device
2273  */
2274 static void irq_udc_reconfig(struct pxa_udc *udc)
2275 {
2276 	unsigned config, interface, alternate, config_change;
2277 	u32 udccr = udc_readl(udc, UDCCR);
2278 
2279 	udc_writel(udc, UDCISR1, UDCISR1_IRCC);
2280 	udc->stats.irqs_reconfig++;
2281 
2282 	config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
2283 	config_change = (config != udc->config);
2284 	pxa27x_change_configuration(udc, config);
2285 
2286 	interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
2287 	alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
2288 	pxa27x_change_interface(udc, interface, alternate);
2289 
2290 	if (config_change)
2291 		update_pxa_ep_matches(udc);
2292 	udc_set_mask_UDCCR(udc, UDCCR_SMAC);
2293 }
2294 
2295 /**
2296  * irq_udc_reset - Handle IRQ "UDC Reset"
2297  * @udc: udc device
2298  */
2299 static void irq_udc_reset(struct pxa_udc *udc)
2300 {
2301 	u32 udccr = udc_readl(udc, UDCCR);
2302 	struct pxa_ep *ep = &udc->pxa_ep[0];
2303 
2304 	dev_info(udc->dev, "USB reset\n");
2305 	udc_writel(udc, UDCISR1, UDCISR1_IRRS);
2306 	udc->stats.irqs_reset++;
2307 
2308 	if ((udccr & UDCCR_UDA) == 0) {
2309 		dev_dbg(udc->dev, "USB reset start\n");
2310 		stop_activity(udc, udc->driver);
2311 	}
2312 	udc->gadget.speed = USB_SPEED_FULL;
2313 	memset(&udc->stats, 0, sizeof udc->stats);
2314 
2315 	nuke(ep, -EPROTO);
2316 	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
2317 	ep0_idle(udc);
2318 }
2319 
2320 /**
2321  * pxa_udc_irq - Main irq handler
2322  * @irq: irq number
2323  * @_dev: udc device
2324  *
2325  * Handles all udc interrupts
2326  */
2327 static irqreturn_t pxa_udc_irq(int irq, void *_dev)
2328 {
2329 	struct pxa_udc *udc = _dev;
2330 	u32 udcisr0 = udc_readl(udc, UDCISR0);
2331 	u32 udcisr1 = udc_readl(udc, UDCISR1);
2332 	u32 udccr = udc_readl(udc, UDCCR);
2333 	u32 udcisr1_spec;
2334 
2335 	dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
2336 		 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
2337 
2338 	udcisr1_spec = udcisr1 & 0xf8000000;
2339 	if (unlikely(udcisr1_spec & UDCISR1_IRSU))
2340 		irq_udc_suspend(udc);
2341 	if (unlikely(udcisr1_spec & UDCISR1_IRRU))
2342 		irq_udc_resume(udc);
2343 	if (unlikely(udcisr1_spec & UDCISR1_IRCC))
2344 		irq_udc_reconfig(udc);
2345 	if (unlikely(udcisr1_spec & UDCISR1_IRRS))
2346 		irq_udc_reset(udc);
2347 
2348 	if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
2349 		irq_handle_data(irq, udc);
2350 
2351 	return IRQ_HANDLED;
2352 }
2353 
2354 static struct pxa_udc memory = {
2355 	.gadget = {
2356 		.ops		= &pxa_udc_ops,
2357 		.ep0		= &memory.udc_usb_ep[0].usb_ep,
2358 		.name		= driver_name,
2359 		.dev = {
2360 			.init_name	= "gadget",
2361 		},
2362 	},
2363 
2364 	.udc_usb_ep = {
2365 		USB_EP_CTRL,
2366 		USB_EP_OUT_BULK(1),
2367 		USB_EP_IN_BULK(2),
2368 		USB_EP_IN_ISO(3),
2369 		USB_EP_OUT_ISO(4),
2370 		USB_EP_IN_INT(5),
2371 	},
2372 
2373 	.pxa_ep = {
2374 		PXA_EP_CTRL,
2375 		/* Endpoints for gadget zero */
2376 		PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
2377 		PXA_EP_IN_BULK(2,  2, 3, 0, 0),
2378 		/* Endpoints for ether gadget, file storage gadget */
2379 		PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
2380 		PXA_EP_IN_BULK(4,  2, 1, 0, 0),
2381 		PXA_EP_IN_ISO(5,   3, 1, 0, 0),
2382 		PXA_EP_OUT_ISO(6,  4, 1, 0, 0),
2383 		PXA_EP_IN_INT(7,   5, 1, 0, 0),
2384 		/* Endpoints for RNDIS, serial */
2385 		PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
2386 		PXA_EP_IN_BULK(9,  2, 2, 0, 0),
2387 		PXA_EP_IN_INT(10,  5, 2, 0, 0),
2388 		/*
2389 		 * All the following endpoints are only for completion.  They
2390 		 * won't never work, as multiple interfaces are really broken on
2391 		 * the pxa.
2392 		*/
2393 		PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
2394 		PXA_EP_IN_BULK(12,  2, 2, 1, 0),
2395 		/* Endpoint for CDC Ether */
2396 		PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
2397 		PXA_EP_IN_BULK(14,  2, 1, 1, 1),
2398 	}
2399 };
2400 
2401 #if defined(CONFIG_OF)
2402 static struct of_device_id udc_pxa_dt_ids[] = {
2403 	{ .compatible = "marvell,pxa270-udc" },
2404 	{}
2405 };
2406 MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
2407 #endif
2408 
2409 /**
2410  * pxa_udc_probe - probes the udc device
2411  * @_dev: platform device
2412  *
2413  * Perform basic init : allocates udc clock, creates sysfs files, requests
2414  * irq.
2415  */
2416 static int pxa_udc_probe(struct platform_device *pdev)
2417 {
2418 	struct resource *regs;
2419 	struct pxa_udc *udc = &memory;
2420 	int retval = 0, gpio;
2421 	struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
2422 	unsigned long gpio_flags;
2423 
2424 	if (mach) {
2425 		gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
2426 		gpio = mach->gpio_pullup;
2427 		if (gpio_is_valid(gpio)) {
2428 			retval = devm_gpio_request_one(&pdev->dev, gpio,
2429 						       gpio_flags,
2430 						       "USB D+ pullup");
2431 			if (retval)
2432 				return retval;
2433 			udc->gpiod = gpio_to_desc(mach->gpio_pullup);
2434 		}
2435 		udc->udc_command = mach->udc_command;
2436 	} else {
2437 		udc->gpiod = devm_gpiod_get(&pdev->dev, NULL);
2438 	}
2439 
2440 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2441 	udc->regs = devm_ioremap_resource(&pdev->dev, regs);
2442 	if (IS_ERR(udc->regs))
2443 		return PTR_ERR(udc->regs);
2444 	udc->irq = platform_get_irq(pdev, 0);
2445 	if (udc->irq < 0)
2446 		return udc->irq;
2447 
2448 	udc->dev = &pdev->dev;
2449 	udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
2450 
2451 	if (IS_ERR(udc->gpiod)) {
2452 		dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
2453 			PTR_ERR(udc->gpiod));
2454 		return PTR_ERR(udc->gpiod);
2455 	}
2456 	if (udc->gpiod)
2457 		gpiod_direction_output(udc->gpiod, 0);
2458 
2459 	udc->clk = devm_clk_get(&pdev->dev, NULL);
2460 	if (IS_ERR(udc->clk))
2461 		return PTR_ERR(udc->clk);
2462 
2463 	retval = clk_prepare(udc->clk);
2464 	if (retval)
2465 		return retval;
2466 
2467 	udc->vbus_sensed = 0;
2468 
2469 	the_controller = udc;
2470 	platform_set_drvdata(pdev, udc);
2471 	udc_init_data(udc);
2472 
2473 	/* irq setup after old hardware state is cleaned up */
2474 	retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
2475 				  IRQF_SHARED, driver_name, udc);
2476 	if (retval != 0) {
2477 		dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
2478 			driver_name, udc->irq, retval);
2479 		goto err;
2480 	}
2481 
2482 	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2483 	if (retval)
2484 		goto err;
2485 
2486 	pxa_init_debugfs(udc);
2487 	if (should_enable_udc(udc))
2488 		udc_enable(udc);
2489 	return 0;
2490 err:
2491 	clk_unprepare(udc->clk);
2492 	return retval;
2493 }
2494 
2495 /**
2496  * pxa_udc_remove - removes the udc device driver
2497  * @_dev: platform device
2498  */
2499 static int pxa_udc_remove(struct platform_device *_dev)
2500 {
2501 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2502 
2503 	usb_del_gadget_udc(&udc->gadget);
2504 	pxa_cleanup_debugfs(udc);
2505 
2506 	usb_put_phy(udc->transceiver);
2507 
2508 	udc->transceiver = NULL;
2509 	the_controller = NULL;
2510 	clk_unprepare(udc->clk);
2511 
2512 	return 0;
2513 }
2514 
2515 static void pxa_udc_shutdown(struct platform_device *_dev)
2516 {
2517 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2518 
2519 	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
2520 		udc_disable(udc);
2521 }
2522 
2523 #ifdef CONFIG_PXA27x
2524 extern void pxa27x_clear_otgph(void);
2525 #else
2526 #define pxa27x_clear_otgph()   do {} while (0)
2527 #endif
2528 
2529 #ifdef CONFIG_PM
2530 /**
2531  * pxa_udc_suspend - Suspend udc device
2532  * @_dev: platform device
2533  * @state: suspend state
2534  *
2535  * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
2536  * device.
2537  */
2538 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2539 {
2540 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2541 	struct pxa_ep *ep;
2542 
2543 	ep = &udc->pxa_ep[0];
2544 	udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
2545 
2546 	udc_disable(udc);
2547 	udc->pullup_resume = udc->pullup_on;
2548 	dplus_pullup(udc, 0);
2549 
2550 	return 0;
2551 }
2552 
2553 /**
2554  * pxa_udc_resume - Resume udc device
2555  * @_dev: platform device
2556  *
2557  * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
2558  * device.
2559  */
2560 static int pxa_udc_resume(struct platform_device *_dev)
2561 {
2562 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2563 	struct pxa_ep *ep;
2564 
2565 	ep = &udc->pxa_ep[0];
2566 	udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
2567 
2568 	dplus_pullup(udc, udc->pullup_resume);
2569 	if (should_enable_udc(udc))
2570 		udc_enable(udc);
2571 	/*
2572 	 * We do not handle OTG yet.
2573 	 *
2574 	 * OTGPH bit is set when sleep mode is entered.
2575 	 * it indicates that OTG pad is retaining its state.
2576 	 * Upon exit from sleep mode and before clearing OTGPH,
2577 	 * Software must configure the USB OTG pad, UDC, and UHC
2578 	 * to the state they were in before entering sleep mode.
2579 	 */
2580 	pxa27x_clear_otgph();
2581 
2582 	return 0;
2583 }
2584 #endif
2585 
2586 /* work with hotplug and coldplug */
2587 MODULE_ALIAS("platform:pxa27x-udc");
2588 
2589 static struct platform_driver udc_driver = {
2590 	.driver		= {
2591 		.name	= "pxa27x-udc",
2592 		.of_match_table = of_match_ptr(udc_pxa_dt_ids),
2593 	},
2594 	.probe		= pxa_udc_probe,
2595 	.remove		= pxa_udc_remove,
2596 	.shutdown	= pxa_udc_shutdown,
2597 #ifdef CONFIG_PM
2598 	.suspend	= pxa_udc_suspend,
2599 	.resume		= pxa_udc_resume
2600 #endif
2601 };
2602 
2603 module_platform_driver(udc_driver);
2604 
2605 MODULE_DESCRIPTION(DRIVER_DESC);
2606 MODULE_AUTHOR("Robert Jarzmik");
2607 MODULE_LICENSE("GPL");
2608