1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
4  *
5  * Copyright (C) 2005-2007 AMD (https://www.amd.com)
6  * Author: Thomas Dahlmann
7  */
8 
9 /*
10  * This file does the core driver implementation for the UDC that is based
11  * on Synopsys device controller IP (different than HS OTG IP) that is either
12  * connected through PCI bus or integrated to SoC platforms.
13  */
14 
15 /* Driver strings */
16 #define UDC_MOD_DESCRIPTION		"Synopsys USB Device Controller"
17 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/delay.h>
23 #include <linux/ioport.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioctl.h>
31 #include <linux/fs.h>
32 #include <linux/dmapool.h>
33 #include <linux/prefetch.h>
34 #include <linux/moduleparam.h>
35 #include <asm/byteorder.h>
36 #include <asm/unaligned.h>
37 #include "amd5536udc.h"
38 
39 static void udc_tasklet_disconnect(unsigned long);
40 static void udc_setup_endpoints(struct udc *dev);
41 static void udc_soft_reset(struct udc *dev);
42 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
43 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
44 
45 /* description */
46 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
47 static const char name[] = "udc";
48 
49 /* structure to hold endpoint function pointers */
50 static const struct usb_ep_ops udc_ep_ops;
51 
52 /* received setup data */
53 static union udc_setup_data setup_data;
54 
55 /* pointer to device object */
56 static struct udc *udc;
57 
58 /* irq spin lock for soft reset */
59 static DEFINE_SPINLOCK(udc_irq_spinlock);
60 /* stall spin lock */
61 static DEFINE_SPINLOCK(udc_stall_spinlock);
62 
63 /*
64 * slave mode: pending bytes in rx fifo after nyet,
65 * used if EPIN irq came but no req was available
66 */
67 static unsigned int udc_rxfifo_pending;
68 
69 /* count soft resets after suspend to avoid loop */
70 static int soft_reset_occured;
71 static int soft_reset_after_usbreset_occured;
72 
73 /* timer */
74 static struct timer_list udc_timer;
75 static int stop_timer;
76 
77 /* set_rde -- Is used to control enabling of RX DMA. Problem is
78  * that UDC has only one bit (RDE) to enable/disable RX DMA for
79  * all OUT endpoints. So we have to handle race conditions like
80  * when OUT data reaches the fifo but no request was queued yet.
81  * This cannot be solved by letting the RX DMA disabled until a
82  * request gets queued because there may be other OUT packets
83  * in the FIFO (important for not blocking control traffic).
84  * The value of set_rde controls the correspondig timer.
85  *
86  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
87  * set_rde  0 == do not touch RDE, do no start the RDE timer
88  * set_rde  1 == timer function will look whether FIFO has data
89  * set_rde  2 == set by timer function to enable RX DMA on next call
90  */
91 static int set_rde = -1;
92 
93 static DECLARE_COMPLETION(on_exit);
94 static struct timer_list udc_pollstall_timer;
95 static int stop_pollstall_timer;
96 static DECLARE_COMPLETION(on_pollstall_exit);
97 
98 /* tasklet for usb disconnect */
99 static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect);
100 
101 /* endpoint names used for print */
102 static const char ep0_string[] = "ep0in";
103 static const struct {
104 	const char *name;
105 	const struct usb_ep_caps caps;
106 } ep_info[] = {
107 #define EP_INFO(_name, _caps) \
108 	{ \
109 		.name = _name, \
110 		.caps = _caps, \
111 	}
112 
113 	EP_INFO(ep0_string,
114 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
115 	EP_INFO("ep1in-int",
116 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
117 	EP_INFO("ep2in-bulk",
118 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
119 	EP_INFO("ep3in-bulk",
120 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
121 	EP_INFO("ep4in-bulk",
122 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
123 	EP_INFO("ep5in-bulk",
124 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
125 	EP_INFO("ep6in-bulk",
126 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
127 	EP_INFO("ep7in-bulk",
128 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
129 	EP_INFO("ep8in-bulk",
130 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
131 	EP_INFO("ep9in-bulk",
132 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
133 	EP_INFO("ep10in-bulk",
134 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
135 	EP_INFO("ep11in-bulk",
136 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
137 	EP_INFO("ep12in-bulk",
138 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
139 	EP_INFO("ep13in-bulk",
140 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
141 	EP_INFO("ep14in-bulk",
142 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
143 	EP_INFO("ep15in-bulk",
144 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
145 	EP_INFO("ep0out",
146 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
147 	EP_INFO("ep1out-bulk",
148 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
149 	EP_INFO("ep2out-bulk",
150 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
151 	EP_INFO("ep3out-bulk",
152 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
153 	EP_INFO("ep4out-bulk",
154 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
155 	EP_INFO("ep5out-bulk",
156 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
157 	EP_INFO("ep6out-bulk",
158 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
159 	EP_INFO("ep7out-bulk",
160 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
161 	EP_INFO("ep8out-bulk",
162 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
163 	EP_INFO("ep9out-bulk",
164 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
165 	EP_INFO("ep10out-bulk",
166 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
167 	EP_INFO("ep11out-bulk",
168 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
169 	EP_INFO("ep12out-bulk",
170 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
171 	EP_INFO("ep13out-bulk",
172 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
173 	EP_INFO("ep14out-bulk",
174 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
175 	EP_INFO("ep15out-bulk",
176 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
177 
178 #undef EP_INFO
179 };
180 
181 /* buffer fill mode */
182 static int use_dma_bufferfill_mode;
183 /* tx buffer size for high speed */
184 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
185 
186 /*---------------------------------------------------------------------------*/
187 /* Prints UDC device registers and endpoint irq registers */
188 static void print_regs(struct udc *dev)
189 {
190 	DBG(dev, "------- Device registers -------\n");
191 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
192 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
193 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
194 	DBG(dev, "\n");
195 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
196 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
197 	DBG(dev, "\n");
198 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
199 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
200 	DBG(dev, "\n");
201 	DBG(dev, "USE DMA        = %d\n", use_dma);
202 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
203 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
204 			"WITHOUT desc. update)\n");
205 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
206 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
207 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
208 			"WITH desc. update)\n");
209 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
210 	}
211 	if (use_dma && use_dma_bufferfill_mode) {
212 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
213 		dev_info(dev->dev, "DMA mode (%s)\n", "BF");
214 	}
215 	if (!use_dma)
216 		dev_info(dev->dev, "FIFO mode\n");
217 	DBG(dev, "-------------------------------------------------------\n");
218 }
219 
220 /* Masks unused interrupts */
221 int udc_mask_unused_interrupts(struct udc *dev)
222 {
223 	u32 tmp;
224 
225 	/* mask all dev interrupts */
226 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
227 		AMD_BIT(UDC_DEVINT_ENUM) |
228 		AMD_BIT(UDC_DEVINT_US) |
229 		AMD_BIT(UDC_DEVINT_UR) |
230 		AMD_BIT(UDC_DEVINT_ES) |
231 		AMD_BIT(UDC_DEVINT_SI) |
232 		AMD_BIT(UDC_DEVINT_SOF)|
233 		AMD_BIT(UDC_DEVINT_SC);
234 	writel(tmp, &dev->regs->irqmsk);
235 
236 	/* mask all ep interrupts */
237 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
242 
243 /* Enables endpoint 0 interrupts */
244 static int udc_enable_ep0_interrupts(struct udc *dev)
245 {
246 	u32 tmp;
247 
248 	DBG(dev, "udc_enable_ep0_interrupts()\n");
249 
250 	/* read irq mask */
251 	tmp = readl(&dev->regs->ep_irqmsk);
252 	/* enable ep0 irq's */
253 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
254 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
255 	writel(tmp, &dev->regs->ep_irqmsk);
256 
257 	return 0;
258 }
259 
260 /* Enables device interrupts for SET_INTF and SET_CONFIG */
261 int udc_enable_dev_setup_interrupts(struct udc *dev)
262 {
263 	u32 tmp;
264 
265 	DBG(dev, "enable device interrupts for setup data\n");
266 
267 	/* read irq mask */
268 	tmp = readl(&dev->regs->irqmsk);
269 
270 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
271 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
272 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
273 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
274 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
275 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
276 	writel(tmp, &dev->regs->irqmsk);
277 
278 	return 0;
279 }
280 EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
281 
282 /* Calculates fifo start of endpoint based on preceding endpoints */
283 static int udc_set_txfifo_addr(struct udc_ep *ep)
284 {
285 	struct udc	*dev;
286 	u32 tmp;
287 	int i;
288 
289 	if (!ep || !(ep->in))
290 		return -EINVAL;
291 
292 	dev = ep->dev;
293 	ep->txfifo = dev->txfifo;
294 
295 	/* traverse ep's */
296 	for (i = 0; i < ep->num; i++) {
297 		if (dev->ep[i].regs) {
298 			/* read fifo size */
299 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
300 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
301 			ep->txfifo += tmp;
302 		}
303 	}
304 	return 0;
305 }
306 
307 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
308 static u32 cnak_pending;
309 
310 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
311 {
312 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
313 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
314 		cnak_pending |= 1 << (num);
315 		ep->naking = 1;
316 	} else
317 		cnak_pending = cnak_pending & (~(1 << (num)));
318 }
319 
320 
321 /* Enables endpoint, is called by gadget driver */
322 static int
323 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
324 {
325 	struct udc_ep		*ep;
326 	struct udc		*dev;
327 	u32			tmp;
328 	unsigned long		iflags;
329 	u8 udc_csr_epix;
330 	unsigned		maxpacket;
331 
332 	if (!usbep
333 			|| usbep->name == ep0_string
334 			|| !desc
335 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
336 		return -EINVAL;
337 
338 	ep = container_of(usbep, struct udc_ep, ep);
339 	dev = ep->dev;
340 
341 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
342 
343 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
344 		return -ESHUTDOWN;
345 
346 	spin_lock_irqsave(&dev->lock, iflags);
347 	ep->ep.desc = desc;
348 
349 	ep->halted = 0;
350 
351 	/* set traffic type */
352 	tmp = readl(&dev->ep[ep->num].regs->ctl);
353 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
354 	writel(tmp, &dev->ep[ep->num].regs->ctl);
355 
356 	/* set max packet size */
357 	maxpacket = usb_endpoint_maxp(desc);
358 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
359 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
360 	ep->ep.maxpacket = maxpacket;
361 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
362 
363 	/* IN ep */
364 	if (ep->in) {
365 
366 		/* ep ix in UDC CSR register space */
367 		udc_csr_epix = ep->num;
368 
369 		/* set buffer size (tx fifo entries) */
370 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
371 		/* double buffering: fifo size = 2 x max packet size */
372 		tmp = AMD_ADDBITS(
373 				tmp,
374 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
375 					  / UDC_DWORD_BYTES,
376 				UDC_EPIN_BUFF_SIZE);
377 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
378 
379 		/* calc. tx fifo base addr */
380 		udc_set_txfifo_addr(ep);
381 
382 		/* flush fifo */
383 		tmp = readl(&ep->regs->ctl);
384 		tmp |= AMD_BIT(UDC_EPCTL_F);
385 		writel(tmp, &ep->regs->ctl);
386 
387 	/* OUT ep */
388 	} else {
389 		/* ep ix in UDC CSR register space */
390 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
391 
392 		/* set max packet size UDC CSR	*/
393 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
394 		tmp = AMD_ADDBITS(tmp, maxpacket,
395 					UDC_CSR_NE_MAX_PKT);
396 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
397 
398 		if (use_dma && !ep->in) {
399 			/* alloc and init BNA dummy request */
400 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
401 			ep->bna_occurred = 0;
402 		}
403 
404 		if (ep->num != UDC_EP0OUT_IX)
405 			dev->data_ep_enabled = 1;
406 	}
407 
408 	/* set ep values */
409 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
410 	/* max packet */
411 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
412 	/* ep number */
413 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
414 	/* ep direction */
415 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
416 	/* ep type */
417 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
418 	/* ep config */
419 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
420 	/* ep interface */
421 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
422 	/* ep alt */
423 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
424 	/* write reg */
425 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
426 
427 	/* enable ep irq */
428 	tmp = readl(&dev->regs->ep_irqmsk);
429 	tmp &= AMD_UNMASK_BIT(ep->num);
430 	writel(tmp, &dev->regs->ep_irqmsk);
431 
432 	/*
433 	 * clear NAK by writing CNAK
434 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
435 	 */
436 	if (!use_dma || ep->in) {
437 		tmp = readl(&ep->regs->ctl);
438 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
439 		writel(tmp, &ep->regs->ctl);
440 		ep->naking = 0;
441 		UDC_QUEUE_CNAK(ep, ep->num);
442 	}
443 	tmp = desc->bEndpointAddress;
444 	DBG(dev, "%s enabled\n", usbep->name);
445 
446 	spin_unlock_irqrestore(&dev->lock, iflags);
447 	return 0;
448 }
449 
450 /* Resets endpoint */
451 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
452 {
453 	u32		tmp;
454 
455 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
456 	ep->ep.desc = NULL;
457 	ep->ep.ops = &udc_ep_ops;
458 	INIT_LIST_HEAD(&ep->queue);
459 
460 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
461 	/* set NAK */
462 	tmp = readl(&ep->regs->ctl);
463 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
464 	writel(tmp, &ep->regs->ctl);
465 	ep->naking = 1;
466 
467 	/* disable interrupt */
468 	tmp = readl(&regs->ep_irqmsk);
469 	tmp |= AMD_BIT(ep->num);
470 	writel(tmp, &regs->ep_irqmsk);
471 
472 	if (ep->in) {
473 		/* unset P and IN bit of potential former DMA */
474 		tmp = readl(&ep->regs->ctl);
475 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
476 		writel(tmp, &ep->regs->ctl);
477 
478 		tmp = readl(&ep->regs->sts);
479 		tmp |= AMD_BIT(UDC_EPSTS_IN);
480 		writel(tmp, &ep->regs->sts);
481 
482 		/* flush the fifo */
483 		tmp = readl(&ep->regs->ctl);
484 		tmp |= AMD_BIT(UDC_EPCTL_F);
485 		writel(tmp, &ep->regs->ctl);
486 
487 	}
488 	/* reset desc pointer */
489 	writel(0, &ep->regs->desptr);
490 }
491 
492 /* Disables endpoint, is called by gadget driver */
493 static int udc_ep_disable(struct usb_ep *usbep)
494 {
495 	struct udc_ep	*ep = NULL;
496 	unsigned long	iflags;
497 
498 	if (!usbep)
499 		return -EINVAL;
500 
501 	ep = container_of(usbep, struct udc_ep, ep);
502 	if (usbep->name == ep0_string || !ep->ep.desc)
503 		return -EINVAL;
504 
505 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
506 
507 	spin_lock_irqsave(&ep->dev->lock, iflags);
508 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
509 	empty_req_queue(ep);
510 	ep_init(ep->dev->regs, ep);
511 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
512 
513 	return 0;
514 }
515 
516 /* Allocates request packet, called by gadget driver */
517 static struct usb_request *
518 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
519 {
520 	struct udc_request	*req;
521 	struct udc_data_dma	*dma_desc;
522 	struct udc_ep	*ep;
523 
524 	if (!usbep)
525 		return NULL;
526 
527 	ep = container_of(usbep, struct udc_ep, ep);
528 
529 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
530 	req = kzalloc(sizeof(struct udc_request), gfp);
531 	if (!req)
532 		return NULL;
533 
534 	req->req.dma = DMA_DONT_USE;
535 	INIT_LIST_HEAD(&req->queue);
536 
537 	if (ep->dma) {
538 		/* ep0 in requests are allocated from data pool here */
539 		dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
540 						&req->td_phys);
541 		if (!dma_desc) {
542 			kfree(req);
543 			return NULL;
544 		}
545 
546 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
547 				"td_phys = %lx\n",
548 				req, dma_desc,
549 				(unsigned long)req->td_phys);
550 		/* prevent from using desc. - set HOST BUSY */
551 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
552 						UDC_DMA_STP_STS_BS_HOST_BUSY,
553 						UDC_DMA_STP_STS_BS);
554 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
555 		req->td_data = dma_desc;
556 		req->td_data_last = NULL;
557 		req->chain_len = 1;
558 	}
559 
560 	return &req->req;
561 }
562 
563 /* frees pci pool descriptors of a DMA chain */
564 static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
565 {
566 	struct udc_data_dma *td = req->td_data;
567 	unsigned int i;
568 
569 	dma_addr_t addr_next = 0x00;
570 	dma_addr_t addr = (dma_addr_t)td->next;
571 
572 	DBG(dev, "free chain req = %p\n", req);
573 
574 	/* do not free first desc., will be done by free for request */
575 	for (i = 1; i < req->chain_len; i++) {
576 		td = phys_to_virt(addr);
577 		addr_next = (dma_addr_t)td->next;
578 		dma_pool_free(dev->data_requests, td, addr);
579 		addr = addr_next;
580 	}
581 }
582 
583 /* Frees request packet, called by gadget driver */
584 static void
585 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
586 {
587 	struct udc_ep	*ep;
588 	struct udc_request	*req;
589 
590 	if (!usbep || !usbreq)
591 		return;
592 
593 	ep = container_of(usbep, struct udc_ep, ep);
594 	req = container_of(usbreq, struct udc_request, req);
595 	VDBG(ep->dev, "free_req req=%p\n", req);
596 	BUG_ON(!list_empty(&req->queue));
597 	if (req->td_data) {
598 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
599 
600 		/* free dma chain if created */
601 		if (req->chain_len > 1)
602 			udc_free_dma_chain(ep->dev, req);
603 
604 		dma_pool_free(ep->dev->data_requests, req->td_data,
605 							req->td_phys);
606 	}
607 	kfree(req);
608 }
609 
610 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
611 static void udc_init_bna_dummy(struct udc_request *req)
612 {
613 	if (req) {
614 		/* set last bit */
615 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
616 		/* set next pointer to itself */
617 		req->td_data->next = req->td_phys;
618 		/* set HOST BUSY */
619 		req->td_data->status
620 			= AMD_ADDBITS(req->td_data->status,
621 					UDC_DMA_STP_STS_BS_DMA_DONE,
622 					UDC_DMA_STP_STS_BS);
623 #ifdef UDC_VERBOSE
624 		pr_debug("bna desc = %p, sts = %08x\n",
625 			req->td_data, req->td_data->status);
626 #endif
627 	}
628 }
629 
630 /* Allocate BNA dummy descriptor */
631 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
632 {
633 	struct udc_request *req = NULL;
634 	struct usb_request *_req = NULL;
635 
636 	/* alloc the dummy request */
637 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
638 	if (_req) {
639 		req = container_of(_req, struct udc_request, req);
640 		ep->bna_dummy_req = req;
641 		udc_init_bna_dummy(req);
642 	}
643 	return req;
644 }
645 
646 /* Write data to TX fifo for IN packets */
647 static void
648 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
649 {
650 	u8			*req_buf;
651 	u32			*buf;
652 	int			i, j;
653 	unsigned		bytes = 0;
654 	unsigned		remaining = 0;
655 
656 	if (!req || !ep)
657 		return;
658 
659 	req_buf = req->buf + req->actual;
660 	prefetch(req_buf);
661 	remaining = req->length - req->actual;
662 
663 	buf = (u32 *) req_buf;
664 
665 	bytes = ep->ep.maxpacket;
666 	if (bytes > remaining)
667 		bytes = remaining;
668 
669 	/* dwords first */
670 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
671 		writel(*(buf + i), ep->txfifo);
672 
673 	/* remaining bytes must be written by byte access */
674 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
675 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
676 							ep->txfifo);
677 	}
678 
679 	/* dummy write confirm */
680 	writel(0, &ep->regs->confirm);
681 }
682 
683 /* Read dwords from RX fifo for OUT transfers */
684 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
685 {
686 	int i;
687 
688 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
689 
690 	for (i = 0; i < dwords; i++)
691 		*(buf + i) = readl(dev->rxfifo);
692 	return 0;
693 }
694 
695 /* Read bytes from RX fifo for OUT transfers */
696 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
697 {
698 	int i, j;
699 	u32 tmp;
700 
701 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
702 
703 	/* dwords first */
704 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
705 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
706 
707 	/* remaining bytes must be read by byte access */
708 	if (bytes % UDC_DWORD_BYTES) {
709 		tmp = readl(dev->rxfifo);
710 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
711 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
712 			tmp = tmp >> UDC_BITS_PER_BYTE;
713 		}
714 	}
715 
716 	return 0;
717 }
718 
719 /* Read data from RX fifo for OUT transfers */
720 static int
721 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
722 {
723 	u8 *buf;
724 	unsigned buf_space;
725 	unsigned bytes = 0;
726 	unsigned finished = 0;
727 
728 	/* received number bytes */
729 	bytes = readl(&ep->regs->sts);
730 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
731 
732 	buf_space = req->req.length - req->req.actual;
733 	buf = req->req.buf + req->req.actual;
734 	if (bytes > buf_space) {
735 		if ((buf_space % ep->ep.maxpacket) != 0) {
736 			DBG(ep->dev,
737 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
738 				ep->ep.name, bytes, buf_space);
739 			req->req.status = -EOVERFLOW;
740 		}
741 		bytes = buf_space;
742 	}
743 	req->req.actual += bytes;
744 
745 	/* last packet ? */
746 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
747 		|| ((req->req.actual == req->req.length) && !req->req.zero))
748 		finished = 1;
749 
750 	/* read rx fifo bytes */
751 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
752 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
753 
754 	return finished;
755 }
756 
757 /* Creates or re-inits a DMA chain */
758 static int udc_create_dma_chain(
759 	struct udc_ep *ep,
760 	struct udc_request *req,
761 	unsigned long buf_len, gfp_t gfp_flags
762 )
763 {
764 	unsigned long bytes = req->req.length;
765 	unsigned int i;
766 	dma_addr_t dma_addr;
767 	struct udc_data_dma	*td = NULL;
768 	struct udc_data_dma	*last = NULL;
769 	unsigned long txbytes;
770 	unsigned create_new_chain = 0;
771 	unsigned len;
772 
773 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
774 	     bytes, buf_len);
775 	dma_addr = DMA_DONT_USE;
776 
777 	/* unset L bit in first desc for OUT */
778 	if (!ep->in)
779 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
780 
781 	/* alloc only new desc's if not already available */
782 	len = req->req.length / ep->ep.maxpacket;
783 	if (req->req.length % ep->ep.maxpacket)
784 		len++;
785 
786 	if (len > req->chain_len) {
787 		/* shorter chain already allocated before */
788 		if (req->chain_len > 1)
789 			udc_free_dma_chain(ep->dev, req);
790 		req->chain_len = len;
791 		create_new_chain = 1;
792 	}
793 
794 	td = req->td_data;
795 	/* gen. required number of descriptors and buffers */
796 	for (i = buf_len; i < bytes; i += buf_len) {
797 		/* create or determine next desc. */
798 		if (create_new_chain) {
799 			td = dma_pool_alloc(ep->dev->data_requests,
800 					    gfp_flags, &dma_addr);
801 			if (!td)
802 				return -ENOMEM;
803 
804 			td->status = 0;
805 		} else if (i == buf_len) {
806 			/* first td */
807 			td = (struct udc_data_dma *)phys_to_virt(
808 						req->td_data->next);
809 			td->status = 0;
810 		} else {
811 			td = (struct udc_data_dma *)phys_to_virt(last->next);
812 			td->status = 0;
813 		}
814 
815 		if (td)
816 			td->bufptr = req->req.dma + i; /* assign buffer */
817 		else
818 			break;
819 
820 		/* short packet ? */
821 		if ((bytes - i) >= buf_len) {
822 			txbytes = buf_len;
823 		} else {
824 			/* short packet */
825 			txbytes = bytes - i;
826 		}
827 
828 		/* link td and assign tx bytes */
829 		if (i == buf_len) {
830 			if (create_new_chain)
831 				req->td_data->next = dma_addr;
832 			/*
833 			 * else
834 			 *	req->td_data->next = virt_to_phys(td);
835 			 */
836 			/* write tx bytes */
837 			if (ep->in) {
838 				/* first desc */
839 				req->td_data->status =
840 					AMD_ADDBITS(req->td_data->status,
841 						    ep->ep.maxpacket,
842 						    UDC_DMA_IN_STS_TXBYTES);
843 				/* second desc */
844 				td->status = AMD_ADDBITS(td->status,
845 							txbytes,
846 							UDC_DMA_IN_STS_TXBYTES);
847 			}
848 		} else {
849 			if (create_new_chain)
850 				last->next = dma_addr;
851 			/*
852 			 * else
853 			 *	last->next = virt_to_phys(td);
854 			 */
855 			if (ep->in) {
856 				/* write tx bytes */
857 				td->status = AMD_ADDBITS(td->status,
858 							txbytes,
859 							UDC_DMA_IN_STS_TXBYTES);
860 			}
861 		}
862 		last = td;
863 	}
864 	/* set last bit */
865 	if (td) {
866 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
867 		/* last desc. points to itself */
868 		req->td_data_last = td;
869 	}
870 
871 	return 0;
872 }
873 
874 /* create/re-init a DMA descriptor or a DMA descriptor chain */
875 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
876 {
877 	int	retval = 0;
878 	u32	tmp;
879 
880 	VDBG(ep->dev, "prep_dma\n");
881 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
882 			ep->num, req->td_data);
883 
884 	/* set buffer pointer */
885 	req->td_data->bufptr = req->req.dma;
886 
887 	/* set last bit */
888 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
889 
890 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
891 	if (use_dma_ppb) {
892 
893 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
894 		if (retval != 0) {
895 			if (retval == -ENOMEM)
896 				DBG(ep->dev, "Out of DMA memory\n");
897 			return retval;
898 		}
899 		if (ep->in) {
900 			if (req->req.length == ep->ep.maxpacket) {
901 				/* write tx bytes */
902 				req->td_data->status =
903 					AMD_ADDBITS(req->td_data->status,
904 						ep->ep.maxpacket,
905 						UDC_DMA_IN_STS_TXBYTES);
906 
907 			}
908 		}
909 
910 	}
911 
912 	if (ep->in) {
913 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
914 				"maxpacket=%d ep%d\n",
915 				use_dma_ppb, req->req.length,
916 				ep->ep.maxpacket, ep->num);
917 		/*
918 		 * if bytes < max packet then tx bytes must
919 		 * be written in packet per buffer mode
920 		 */
921 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
922 				|| ep->num == UDC_EP0OUT_IX
923 				|| ep->num == UDC_EP0IN_IX) {
924 			/* write tx bytes */
925 			req->td_data->status =
926 				AMD_ADDBITS(req->td_data->status,
927 						req->req.length,
928 						UDC_DMA_IN_STS_TXBYTES);
929 			/* reset frame num */
930 			req->td_data->status =
931 				AMD_ADDBITS(req->td_data->status,
932 						0,
933 						UDC_DMA_IN_STS_FRAMENUM);
934 		}
935 		/* set HOST BUSY */
936 		req->td_data->status =
937 			AMD_ADDBITS(req->td_data->status,
938 				UDC_DMA_STP_STS_BS_HOST_BUSY,
939 				UDC_DMA_STP_STS_BS);
940 	} else {
941 		VDBG(ep->dev, "OUT set host ready\n");
942 		/* set HOST READY */
943 		req->td_data->status =
944 			AMD_ADDBITS(req->td_data->status,
945 				UDC_DMA_STP_STS_BS_HOST_READY,
946 				UDC_DMA_STP_STS_BS);
947 
948 		/* clear NAK by writing CNAK */
949 		if (ep->naking) {
950 			tmp = readl(&ep->regs->ctl);
951 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
952 			writel(tmp, &ep->regs->ctl);
953 			ep->naking = 0;
954 			UDC_QUEUE_CNAK(ep, ep->num);
955 		}
956 
957 	}
958 
959 	return retval;
960 }
961 
962 /* Completes request packet ... caller MUST hold lock */
963 static void
964 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
965 __releases(ep->dev->lock)
966 __acquires(ep->dev->lock)
967 {
968 	struct udc		*dev;
969 	unsigned		halted;
970 
971 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
972 
973 	dev = ep->dev;
974 	/* unmap DMA */
975 	if (ep->dma)
976 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
977 
978 	halted = ep->halted;
979 	ep->halted = 1;
980 
981 	/* set new status if pending */
982 	if (req->req.status == -EINPROGRESS)
983 		req->req.status = sts;
984 
985 	/* remove from ep queue */
986 	list_del_init(&req->queue);
987 
988 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
989 		&req->req, req->req.length, ep->ep.name, sts);
990 
991 	spin_unlock(&dev->lock);
992 	usb_gadget_giveback_request(&ep->ep, &req->req);
993 	spin_lock(&dev->lock);
994 	ep->halted = halted;
995 }
996 
997 /* Iterates to the end of a DMA chain and returns last descriptor */
998 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
999 {
1000 	struct udc_data_dma	*td;
1001 
1002 	td = req->td_data;
1003 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
1004 		td = phys_to_virt(td->next);
1005 
1006 	return td;
1007 
1008 }
1009 
1010 /* Iterates to the end of a DMA chain and counts bytes received */
1011 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1012 {
1013 	struct udc_data_dma	*td;
1014 	u32 count;
1015 
1016 	td = req->td_data;
1017 	/* received number bytes */
1018 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1019 
1020 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1021 		td = phys_to_virt(td->next);
1022 		/* received number bytes */
1023 		if (td) {
1024 			count += AMD_GETBITS(td->status,
1025 				UDC_DMA_OUT_STS_RXBYTES);
1026 		}
1027 	}
1028 
1029 	return count;
1030 
1031 }
1032 
1033 /* Enabling RX DMA */
1034 static void udc_set_rde(struct udc *dev)
1035 {
1036 	u32 tmp;
1037 
1038 	VDBG(dev, "udc_set_rde()\n");
1039 	/* stop RDE timer */
1040 	if (timer_pending(&udc_timer)) {
1041 		set_rde = 0;
1042 		mod_timer(&udc_timer, jiffies - 1);
1043 	}
1044 	/* set RDE */
1045 	tmp = readl(&dev->regs->ctl);
1046 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1047 	writel(tmp, &dev->regs->ctl);
1048 }
1049 
1050 /* Queues a request packet, called by gadget driver */
1051 static int
1052 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1053 {
1054 	int			retval = 0;
1055 	u8			open_rxfifo = 0;
1056 	unsigned long		iflags;
1057 	struct udc_ep		*ep;
1058 	struct udc_request	*req;
1059 	struct udc		*dev;
1060 	u32			tmp;
1061 
1062 	/* check the inputs */
1063 	req = container_of(usbreq, struct udc_request, req);
1064 
1065 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1066 			|| !list_empty(&req->queue))
1067 		return -EINVAL;
1068 
1069 	ep = container_of(usbep, struct udc_ep, ep);
1070 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1071 		return -EINVAL;
1072 
1073 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1074 	dev = ep->dev;
1075 
1076 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1077 		return -ESHUTDOWN;
1078 
1079 	/* map dma (usually done before) */
1080 	if (ep->dma) {
1081 		VDBG(dev, "DMA map req %p\n", req);
1082 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1083 		if (retval)
1084 			return retval;
1085 	}
1086 
1087 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1088 			usbep->name, usbreq, usbreq->length,
1089 			req->td_data, usbreq->buf);
1090 
1091 	spin_lock_irqsave(&dev->lock, iflags);
1092 	usbreq->actual = 0;
1093 	usbreq->status = -EINPROGRESS;
1094 	req->dma_done = 0;
1095 
1096 	/* on empty queue just do first transfer */
1097 	if (list_empty(&ep->queue)) {
1098 		/* zlp */
1099 		if (usbreq->length == 0) {
1100 			/* IN zlp's are handled by hardware */
1101 			complete_req(ep, req, 0);
1102 			VDBG(dev, "%s: zlp\n", ep->ep.name);
1103 			/*
1104 			 * if set_config or set_intf is waiting for ack by zlp
1105 			 * then set CSR_DONE
1106 			 */
1107 			if (dev->set_cfg_not_acked) {
1108 				tmp = readl(&dev->regs->ctl);
1109 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1110 				writel(tmp, &dev->regs->ctl);
1111 				dev->set_cfg_not_acked = 0;
1112 			}
1113 			/* setup command is ACK'ed now by zlp */
1114 			if (dev->waiting_zlp_ack_ep0in) {
1115 				/* clear NAK by writing CNAK in EP0_IN */
1116 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1117 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1118 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1119 				dev->ep[UDC_EP0IN_IX].naking = 0;
1120 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1121 							UDC_EP0IN_IX);
1122 				dev->waiting_zlp_ack_ep0in = 0;
1123 			}
1124 			goto finished;
1125 		}
1126 		if (ep->dma) {
1127 			retval = prep_dma(ep, req, GFP_ATOMIC);
1128 			if (retval != 0)
1129 				goto finished;
1130 			/* write desc pointer to enable DMA */
1131 			if (ep->in) {
1132 				/* set HOST READY */
1133 				req->td_data->status =
1134 					AMD_ADDBITS(req->td_data->status,
1135 						UDC_DMA_IN_STS_BS_HOST_READY,
1136 						UDC_DMA_IN_STS_BS);
1137 			}
1138 
1139 			/* disabled rx dma while descriptor update */
1140 			if (!ep->in) {
1141 				/* stop RDE timer */
1142 				if (timer_pending(&udc_timer)) {
1143 					set_rde = 0;
1144 					mod_timer(&udc_timer, jiffies - 1);
1145 				}
1146 				/* clear RDE */
1147 				tmp = readl(&dev->regs->ctl);
1148 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1149 				writel(tmp, &dev->regs->ctl);
1150 				open_rxfifo = 1;
1151 
1152 				/*
1153 				 * if BNA occurred then let BNA dummy desc.
1154 				 * point to current desc.
1155 				 */
1156 				if (ep->bna_occurred) {
1157 					VDBG(dev, "copy to BNA dummy desc.\n");
1158 					memcpy(ep->bna_dummy_req->td_data,
1159 						req->td_data,
1160 						sizeof(struct udc_data_dma));
1161 				}
1162 			}
1163 			/* write desc pointer */
1164 			writel(req->td_phys, &ep->regs->desptr);
1165 
1166 			/* clear NAK by writing CNAK */
1167 			if (ep->naking) {
1168 				tmp = readl(&ep->regs->ctl);
1169 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1170 				writel(tmp, &ep->regs->ctl);
1171 				ep->naking = 0;
1172 				UDC_QUEUE_CNAK(ep, ep->num);
1173 			}
1174 
1175 			if (ep->in) {
1176 				/* enable ep irq */
1177 				tmp = readl(&dev->regs->ep_irqmsk);
1178 				tmp &= AMD_UNMASK_BIT(ep->num);
1179 				writel(tmp, &dev->regs->ep_irqmsk);
1180 			}
1181 		} else if (ep->in) {
1182 				/* enable ep irq */
1183 				tmp = readl(&dev->regs->ep_irqmsk);
1184 				tmp &= AMD_UNMASK_BIT(ep->num);
1185 				writel(tmp, &dev->regs->ep_irqmsk);
1186 			}
1187 
1188 	} else if (ep->dma) {
1189 
1190 		/*
1191 		 * prep_dma not used for OUT ep's, this is not possible
1192 		 * for PPB modes, because of chain creation reasons
1193 		 */
1194 		if (ep->in) {
1195 			retval = prep_dma(ep, req, GFP_ATOMIC);
1196 			if (retval != 0)
1197 				goto finished;
1198 		}
1199 	}
1200 	VDBG(dev, "list_add\n");
1201 	/* add request to ep queue */
1202 	if (req) {
1203 
1204 		list_add_tail(&req->queue, &ep->queue);
1205 
1206 		/* open rxfifo if out data queued */
1207 		if (open_rxfifo) {
1208 			/* enable DMA */
1209 			req->dma_going = 1;
1210 			udc_set_rde(dev);
1211 			if (ep->num != UDC_EP0OUT_IX)
1212 				dev->data_ep_queued = 1;
1213 		}
1214 		/* stop OUT naking */
1215 		if (!ep->in) {
1216 			if (!use_dma && udc_rxfifo_pending) {
1217 				DBG(dev, "udc_queue(): pending bytes in "
1218 					"rxfifo after nyet\n");
1219 				/*
1220 				 * read pending bytes afer nyet:
1221 				 * referring to isr
1222 				 */
1223 				if (udc_rxfifo_read(ep, req)) {
1224 					/* finish */
1225 					complete_req(ep, req, 0);
1226 				}
1227 				udc_rxfifo_pending = 0;
1228 
1229 			}
1230 		}
1231 	}
1232 
1233 finished:
1234 	spin_unlock_irqrestore(&dev->lock, iflags);
1235 	return retval;
1236 }
1237 
1238 /* Empty request queue of an endpoint; caller holds spinlock */
1239 void empty_req_queue(struct udc_ep *ep)
1240 {
1241 	struct udc_request	*req;
1242 
1243 	ep->halted = 1;
1244 	while (!list_empty(&ep->queue)) {
1245 		req = list_entry(ep->queue.next,
1246 			struct udc_request,
1247 			queue);
1248 		complete_req(ep, req, -ESHUTDOWN);
1249 	}
1250 }
1251 EXPORT_SYMBOL_GPL(empty_req_queue);
1252 
1253 /* Dequeues a request packet, called by gadget driver */
1254 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1255 {
1256 	struct udc_ep		*ep;
1257 	struct udc_request	*req;
1258 	unsigned		halted;
1259 	unsigned long		iflags;
1260 
1261 	ep = container_of(usbep, struct udc_ep, ep);
1262 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1263 				&& ep->num != UDC_EP0OUT_IX)))
1264 		return -EINVAL;
1265 
1266 	req = container_of(usbreq, struct udc_request, req);
1267 
1268 	spin_lock_irqsave(&ep->dev->lock, iflags);
1269 	halted = ep->halted;
1270 	ep->halted = 1;
1271 	/* request in processing or next one */
1272 	if (ep->queue.next == &req->queue) {
1273 		if (ep->dma && req->dma_going) {
1274 			if (ep->in)
1275 				ep->cancel_transfer = 1;
1276 			else {
1277 				u32 tmp;
1278 				u32 dma_sts;
1279 				/* stop potential receive DMA */
1280 				tmp = readl(&udc->regs->ctl);
1281 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1282 							&udc->regs->ctl);
1283 				/*
1284 				 * Cancel transfer later in ISR
1285 				 * if descriptor was touched.
1286 				 */
1287 				dma_sts = AMD_GETBITS(req->td_data->status,
1288 							UDC_DMA_OUT_STS_BS);
1289 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1290 					ep->cancel_transfer = 1;
1291 				else {
1292 					udc_init_bna_dummy(ep->req);
1293 					writel(ep->bna_dummy_req->td_phys,
1294 						&ep->regs->desptr);
1295 				}
1296 				writel(tmp, &udc->regs->ctl);
1297 			}
1298 		}
1299 	}
1300 	complete_req(ep, req, -ECONNRESET);
1301 	ep->halted = halted;
1302 
1303 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1304 	return 0;
1305 }
1306 
1307 /* Halt or clear halt of endpoint */
1308 static int
1309 udc_set_halt(struct usb_ep *usbep, int halt)
1310 {
1311 	struct udc_ep	*ep;
1312 	u32 tmp;
1313 	unsigned long iflags;
1314 	int retval = 0;
1315 
1316 	if (!usbep)
1317 		return -EINVAL;
1318 
1319 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1320 
1321 	ep = container_of(usbep, struct udc_ep, ep);
1322 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1323 		return -EINVAL;
1324 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1325 		return -ESHUTDOWN;
1326 
1327 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1328 	/* halt or clear halt */
1329 	if (halt) {
1330 		if (ep->num == 0)
1331 			ep->dev->stall_ep0in = 1;
1332 		else {
1333 			/*
1334 			 * set STALL
1335 			 * rxfifo empty not taken into acount
1336 			 */
1337 			tmp = readl(&ep->regs->ctl);
1338 			tmp |= AMD_BIT(UDC_EPCTL_S);
1339 			writel(tmp, &ep->regs->ctl);
1340 			ep->halted = 1;
1341 
1342 			/* setup poll timer */
1343 			if (!timer_pending(&udc_pollstall_timer)) {
1344 				udc_pollstall_timer.expires = jiffies +
1345 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1346 					/ (1000 * 1000);
1347 				if (!stop_pollstall_timer) {
1348 					DBG(ep->dev, "start polltimer\n");
1349 					add_timer(&udc_pollstall_timer);
1350 				}
1351 			}
1352 		}
1353 	} else {
1354 		/* ep is halted by set_halt() before */
1355 		if (ep->halted) {
1356 			tmp = readl(&ep->regs->ctl);
1357 			/* clear stall bit */
1358 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1359 			/* clear NAK by writing CNAK */
1360 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1361 			writel(tmp, &ep->regs->ctl);
1362 			ep->halted = 0;
1363 			UDC_QUEUE_CNAK(ep, ep->num);
1364 		}
1365 	}
1366 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1367 	return retval;
1368 }
1369 
1370 /* gadget interface */
1371 static const struct usb_ep_ops udc_ep_ops = {
1372 	.enable		= udc_ep_enable,
1373 	.disable	= udc_ep_disable,
1374 
1375 	.alloc_request	= udc_alloc_request,
1376 	.free_request	= udc_free_request,
1377 
1378 	.queue		= udc_queue,
1379 	.dequeue	= udc_dequeue,
1380 
1381 	.set_halt	= udc_set_halt,
1382 	/* fifo ops not implemented */
1383 };
1384 
1385 /*-------------------------------------------------------------------------*/
1386 
1387 /* Get frame counter (not implemented) */
1388 static int udc_get_frame(struct usb_gadget *gadget)
1389 {
1390 	return -EOPNOTSUPP;
1391 }
1392 
1393 /* Initiates a remote wakeup */
1394 static int udc_remote_wakeup(struct udc *dev)
1395 {
1396 	unsigned long flags;
1397 	u32 tmp;
1398 
1399 	DBG(dev, "UDC initiates remote wakeup\n");
1400 
1401 	spin_lock_irqsave(&dev->lock, flags);
1402 
1403 	tmp = readl(&dev->regs->ctl);
1404 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
1405 	writel(tmp, &dev->regs->ctl);
1406 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1407 	writel(tmp, &dev->regs->ctl);
1408 
1409 	spin_unlock_irqrestore(&dev->lock, flags);
1410 	return 0;
1411 }
1412 
1413 /* Remote wakeup gadget interface */
1414 static int udc_wakeup(struct usb_gadget *gadget)
1415 {
1416 	struct udc		*dev;
1417 
1418 	if (!gadget)
1419 		return -EINVAL;
1420 	dev = container_of(gadget, struct udc, gadget);
1421 	udc_remote_wakeup(dev);
1422 
1423 	return 0;
1424 }
1425 
1426 static int amd5536_udc_start(struct usb_gadget *g,
1427 		struct usb_gadget_driver *driver);
1428 static int amd5536_udc_stop(struct usb_gadget *g);
1429 
1430 static const struct usb_gadget_ops udc_ops = {
1431 	.wakeup		= udc_wakeup,
1432 	.get_frame	= udc_get_frame,
1433 	.udc_start	= amd5536_udc_start,
1434 	.udc_stop	= amd5536_udc_stop,
1435 };
1436 
1437 /* Setups endpoint parameters, adds endpoints to linked list */
1438 static void make_ep_lists(struct udc *dev)
1439 {
1440 	/* make gadget ep lists */
1441 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1442 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1443 						&dev->gadget.ep_list);
1444 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1445 						&dev->gadget.ep_list);
1446 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1447 						&dev->gadget.ep_list);
1448 
1449 	/* fifo config */
1450 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1451 	if (dev->gadget.speed == USB_SPEED_FULL)
1452 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1453 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1454 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1455 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1456 }
1457 
1458 /* Inits UDC context */
1459 void udc_basic_init(struct udc *dev)
1460 {
1461 	u32	tmp;
1462 
1463 	DBG(dev, "udc_basic_init()\n");
1464 
1465 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1466 
1467 	/* stop RDE timer */
1468 	if (timer_pending(&udc_timer)) {
1469 		set_rde = 0;
1470 		mod_timer(&udc_timer, jiffies - 1);
1471 	}
1472 	/* stop poll stall timer */
1473 	if (timer_pending(&udc_pollstall_timer))
1474 		mod_timer(&udc_pollstall_timer, jiffies - 1);
1475 	/* disable DMA */
1476 	tmp = readl(&dev->regs->ctl);
1477 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1478 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1479 	writel(tmp, &dev->regs->ctl);
1480 
1481 	/* enable dynamic CSR programming */
1482 	tmp = readl(&dev->regs->cfg);
1483 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1484 	/* set self powered */
1485 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
1486 	/* set remote wakeupable */
1487 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1488 	writel(tmp, &dev->regs->cfg);
1489 
1490 	make_ep_lists(dev);
1491 
1492 	dev->data_ep_enabled = 0;
1493 	dev->data_ep_queued = 0;
1494 }
1495 EXPORT_SYMBOL_GPL(udc_basic_init);
1496 
1497 /* init registers at driver load time */
1498 static int startup_registers(struct udc *dev)
1499 {
1500 	u32 tmp;
1501 
1502 	/* init controller by soft reset */
1503 	udc_soft_reset(dev);
1504 
1505 	/* mask not needed interrupts */
1506 	udc_mask_unused_interrupts(dev);
1507 
1508 	/* put into initial config */
1509 	udc_basic_init(dev);
1510 	/* link up all endpoints */
1511 	udc_setup_endpoints(dev);
1512 
1513 	/* program speed */
1514 	tmp = readl(&dev->regs->cfg);
1515 	if (use_fullspeed)
1516 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1517 	else
1518 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1519 	writel(tmp, &dev->regs->cfg);
1520 
1521 	return 0;
1522 }
1523 
1524 /* Sets initial endpoint parameters */
1525 static void udc_setup_endpoints(struct udc *dev)
1526 {
1527 	struct udc_ep	*ep;
1528 	u32	tmp;
1529 	u32	reg;
1530 
1531 	DBG(dev, "udc_setup_endpoints()\n");
1532 
1533 	/* read enum speed */
1534 	tmp = readl(&dev->regs->sts);
1535 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1536 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1537 		dev->gadget.speed = USB_SPEED_HIGH;
1538 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1539 		dev->gadget.speed = USB_SPEED_FULL;
1540 
1541 	/* set basic ep parameters */
1542 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1543 		ep = &dev->ep[tmp];
1544 		ep->dev = dev;
1545 		ep->ep.name = ep_info[tmp].name;
1546 		ep->ep.caps = ep_info[tmp].caps;
1547 		ep->num = tmp;
1548 		/* txfifo size is calculated at enable time */
1549 		ep->txfifo = dev->txfifo;
1550 
1551 		/* fifo size */
1552 		if (tmp < UDC_EPIN_NUM) {
1553 			ep->fifo_depth = UDC_TXFIFO_SIZE;
1554 			ep->in = 1;
1555 		} else {
1556 			ep->fifo_depth = UDC_RXFIFO_SIZE;
1557 			ep->in = 0;
1558 
1559 		}
1560 		ep->regs = &dev->ep_regs[tmp];
1561 		/*
1562 		 * ep will be reset only if ep was not enabled before to avoid
1563 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
1564 		 * not enabled by gadget driver
1565 		 */
1566 		if (!ep->ep.desc)
1567 			ep_init(dev->regs, ep);
1568 
1569 		if (use_dma) {
1570 			/*
1571 			 * ep->dma is not really used, just to indicate that
1572 			 * DMA is active: remove this
1573 			 * dma regs = dev control regs
1574 			 */
1575 			ep->dma = &dev->regs->ctl;
1576 
1577 			/* nak OUT endpoints until enable - not for ep0 */
1578 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1579 						&& tmp > UDC_EPIN_NUM) {
1580 				/* set NAK */
1581 				reg = readl(&dev->ep[tmp].regs->ctl);
1582 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
1583 				writel(reg, &dev->ep[tmp].regs->ctl);
1584 				dev->ep[tmp].naking = 1;
1585 
1586 			}
1587 		}
1588 	}
1589 	/* EP0 max packet */
1590 	if (dev->gadget.speed == USB_SPEED_FULL) {
1591 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1592 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
1593 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1594 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
1595 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
1596 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1597 					   UDC_EP0IN_MAX_PKT_SIZE);
1598 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1599 					   UDC_EP0OUT_MAX_PKT_SIZE);
1600 	}
1601 
1602 	/*
1603 	 * with suspend bug workaround, ep0 params for gadget driver
1604 	 * are set at gadget driver bind() call
1605 	 */
1606 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1607 	dev->ep[UDC_EP0IN_IX].halted = 0;
1608 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1609 
1610 	/* init cfg/alt/int */
1611 	dev->cur_config = 0;
1612 	dev->cur_intf = 0;
1613 	dev->cur_alt = 0;
1614 }
1615 
1616 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
1617 static void usb_connect(struct udc *dev)
1618 {
1619 	/* Return if already connected */
1620 	if (dev->connected)
1621 		return;
1622 
1623 	dev_info(dev->dev, "USB Connect\n");
1624 
1625 	dev->connected = 1;
1626 
1627 	/* put into initial config */
1628 	udc_basic_init(dev);
1629 
1630 	/* enable device setup interrupts */
1631 	udc_enable_dev_setup_interrupts(dev);
1632 }
1633 
1634 /*
1635  * Calls gadget with disconnect event and resets the UDC and makes
1636  * initial bringup to be ready for ep0 events
1637  */
1638 static void usb_disconnect(struct udc *dev)
1639 {
1640 	/* Return if already disconnected */
1641 	if (!dev->connected)
1642 		return;
1643 
1644 	dev_info(dev->dev, "USB Disconnect\n");
1645 
1646 	dev->connected = 0;
1647 
1648 	/* mask interrupts */
1649 	udc_mask_unused_interrupts(dev);
1650 
1651 	/* REVISIT there doesn't seem to be a point to having this
1652 	 * talk to a tasklet ... do it directly, we already hold
1653 	 * the spinlock needed to process the disconnect.
1654 	 */
1655 
1656 	tasklet_schedule(&disconnect_tasklet);
1657 }
1658 
1659 /* Tasklet for disconnect to be outside of interrupt context */
1660 static void udc_tasklet_disconnect(unsigned long par)
1661 {
1662 	struct udc *dev = udc;
1663 	u32 tmp;
1664 
1665 	DBG(dev, "Tasklet disconnect\n");
1666 	spin_lock_irq(&dev->lock);
1667 
1668 	if (dev->driver) {
1669 		spin_unlock(&dev->lock);
1670 		dev->driver->disconnect(&dev->gadget);
1671 		spin_lock(&dev->lock);
1672 
1673 		/* empty queues */
1674 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1675 			empty_req_queue(&dev->ep[tmp]);
1676 
1677 	}
1678 
1679 	/* disable ep0 */
1680 	ep_init(dev->regs,
1681 			&dev->ep[UDC_EP0IN_IX]);
1682 
1683 
1684 	if (!soft_reset_occured) {
1685 		/* init controller by soft reset */
1686 		udc_soft_reset(dev);
1687 		soft_reset_occured++;
1688 	}
1689 
1690 	/* re-enable dev interrupts */
1691 	udc_enable_dev_setup_interrupts(dev);
1692 	/* back to full speed ? */
1693 	if (use_fullspeed) {
1694 		tmp = readl(&dev->regs->cfg);
1695 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1696 		writel(tmp, &dev->regs->cfg);
1697 	}
1698 
1699 	spin_unlock_irq(&dev->lock);
1700 }
1701 
1702 /* Reset the UDC core */
1703 static void udc_soft_reset(struct udc *dev)
1704 {
1705 	unsigned long	flags;
1706 
1707 	DBG(dev, "Soft reset\n");
1708 	/*
1709 	 * reset possible waiting interrupts, because int.
1710 	 * status is lost after soft reset,
1711 	 * ep int. status reset
1712 	 */
1713 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1714 	/* device int. status reset */
1715 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1716 
1717 	/* Don't do this for Broadcom UDC since this is a reserved
1718 	 * bit.
1719 	 */
1720 	if (dev->chiprev != UDC_BCM_REV) {
1721 		spin_lock_irqsave(&udc_irq_spinlock, flags);
1722 		writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1723 		readl(&dev->regs->cfg);
1724 		spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1725 	}
1726 }
1727 
1728 /* RDE timer callback to set RDE bit */
1729 static void udc_timer_function(struct timer_list *unused)
1730 {
1731 	u32 tmp;
1732 
1733 	spin_lock_irq(&udc_irq_spinlock);
1734 
1735 	if (set_rde > 0) {
1736 		/*
1737 		 * open the fifo if fifo was filled on last timer call
1738 		 * conditionally
1739 		 */
1740 		if (set_rde > 1) {
1741 			/* set RDE to receive setup data */
1742 			tmp = readl(&udc->regs->ctl);
1743 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1744 			writel(tmp, &udc->regs->ctl);
1745 			set_rde = -1;
1746 		} else if (readl(&udc->regs->sts)
1747 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1748 			/*
1749 			 * if fifo empty setup polling, do not just
1750 			 * open the fifo
1751 			 */
1752 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1753 			if (!stop_timer)
1754 				add_timer(&udc_timer);
1755 		} else {
1756 			/*
1757 			 * fifo contains data now, setup timer for opening
1758 			 * the fifo when timer expires to be able to receive
1759 			 * setup packets, when data packets gets queued by
1760 			 * gadget layer then timer will forced to expire with
1761 			 * set_rde=0 (RDE is set in udc_queue())
1762 			 */
1763 			set_rde++;
1764 			/* debug: lhadmot_timer_start = 221070 */
1765 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1766 			if (!stop_timer)
1767 				add_timer(&udc_timer);
1768 		}
1769 
1770 	} else
1771 		set_rde = -1; /* RDE was set by udc_queue() */
1772 	spin_unlock_irq(&udc_irq_spinlock);
1773 	if (stop_timer)
1774 		complete(&on_exit);
1775 
1776 }
1777 
1778 /* Handle halt state, used in stall poll timer */
1779 static void udc_handle_halt_state(struct udc_ep *ep)
1780 {
1781 	u32 tmp;
1782 	/* set stall as long not halted */
1783 	if (ep->halted == 1) {
1784 		tmp = readl(&ep->regs->ctl);
1785 		/* STALL cleared ? */
1786 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1787 			/*
1788 			 * FIXME: MSC spec requires that stall remains
1789 			 * even on receivng of CLEAR_FEATURE HALT. So
1790 			 * we would set STALL again here to be compliant.
1791 			 * But with current mass storage drivers this does
1792 			 * not work (would produce endless host retries).
1793 			 * So we clear halt on CLEAR_FEATURE.
1794 			 *
1795 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1796 			tmp |= AMD_BIT(UDC_EPCTL_S);
1797 			writel(tmp, &ep->regs->ctl);*/
1798 
1799 			/* clear NAK by writing CNAK */
1800 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1801 			writel(tmp, &ep->regs->ctl);
1802 			ep->halted = 0;
1803 			UDC_QUEUE_CNAK(ep, ep->num);
1804 		}
1805 	}
1806 }
1807 
1808 /* Stall timer callback to poll S bit and set it again after */
1809 static void udc_pollstall_timer_function(struct timer_list *unused)
1810 {
1811 	struct udc_ep *ep;
1812 	int halted = 0;
1813 
1814 	spin_lock_irq(&udc_stall_spinlock);
1815 	/*
1816 	 * only one IN and OUT endpoints are handled
1817 	 * IN poll stall
1818 	 */
1819 	ep = &udc->ep[UDC_EPIN_IX];
1820 	udc_handle_halt_state(ep);
1821 	if (ep->halted)
1822 		halted = 1;
1823 	/* OUT poll stall */
1824 	ep = &udc->ep[UDC_EPOUT_IX];
1825 	udc_handle_halt_state(ep);
1826 	if (ep->halted)
1827 		halted = 1;
1828 
1829 	/* setup timer again when still halted */
1830 	if (!stop_pollstall_timer && halted) {
1831 		udc_pollstall_timer.expires = jiffies +
1832 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1833 					/ (1000 * 1000);
1834 		add_timer(&udc_pollstall_timer);
1835 	}
1836 	spin_unlock_irq(&udc_stall_spinlock);
1837 
1838 	if (stop_pollstall_timer)
1839 		complete(&on_pollstall_exit);
1840 }
1841 
1842 /* Inits endpoint 0 so that SETUP packets are processed */
1843 static void activate_control_endpoints(struct udc *dev)
1844 {
1845 	u32 tmp;
1846 
1847 	DBG(dev, "activate_control_endpoints\n");
1848 
1849 	/* flush fifo */
1850 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1851 	tmp |= AMD_BIT(UDC_EPCTL_F);
1852 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1853 
1854 	/* set ep0 directions */
1855 	dev->ep[UDC_EP0IN_IX].in = 1;
1856 	dev->ep[UDC_EP0OUT_IX].in = 0;
1857 
1858 	/* set buffer size (tx fifo entries) of EP0_IN */
1859 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1860 	if (dev->gadget.speed == USB_SPEED_FULL)
1861 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1862 					UDC_EPIN_BUFF_SIZE);
1863 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1864 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1865 					UDC_EPIN_BUFF_SIZE);
1866 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1867 
1868 	/* set max packet size of EP0_IN */
1869 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1870 	if (dev->gadget.speed == USB_SPEED_FULL)
1871 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1872 					UDC_EP_MAX_PKT_SIZE);
1873 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1874 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1875 				UDC_EP_MAX_PKT_SIZE);
1876 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1877 
1878 	/* set max packet size of EP0_OUT */
1879 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1880 	if (dev->gadget.speed == USB_SPEED_FULL)
1881 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1882 					UDC_EP_MAX_PKT_SIZE);
1883 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1884 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1885 					UDC_EP_MAX_PKT_SIZE);
1886 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1887 
1888 	/* set max packet size of EP0 in UDC CSR */
1889 	tmp = readl(&dev->csr->ne[0]);
1890 	if (dev->gadget.speed == USB_SPEED_FULL)
1891 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1892 					UDC_CSR_NE_MAX_PKT);
1893 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1894 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1895 					UDC_CSR_NE_MAX_PKT);
1896 	writel(tmp, &dev->csr->ne[0]);
1897 
1898 	if (use_dma) {
1899 		dev->ep[UDC_EP0OUT_IX].td->status |=
1900 			AMD_BIT(UDC_DMA_OUT_STS_L);
1901 		/* write dma desc address */
1902 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1903 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
1904 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1905 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
1906 		/* stop RDE timer */
1907 		if (timer_pending(&udc_timer)) {
1908 			set_rde = 0;
1909 			mod_timer(&udc_timer, jiffies - 1);
1910 		}
1911 		/* stop pollstall timer */
1912 		if (timer_pending(&udc_pollstall_timer))
1913 			mod_timer(&udc_pollstall_timer, jiffies - 1);
1914 		/* enable DMA */
1915 		tmp = readl(&dev->regs->ctl);
1916 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1917 				| AMD_BIT(UDC_DEVCTL_RDE)
1918 				| AMD_BIT(UDC_DEVCTL_TDE);
1919 		if (use_dma_bufferfill_mode)
1920 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
1921 		else if (use_dma_ppb_du)
1922 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
1923 		writel(tmp, &dev->regs->ctl);
1924 	}
1925 
1926 	/* clear NAK by writing CNAK for EP0IN */
1927 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1928 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1929 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1930 	dev->ep[UDC_EP0IN_IX].naking = 0;
1931 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1932 
1933 	/* clear NAK by writing CNAK for EP0OUT */
1934 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1935 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1936 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1937 	dev->ep[UDC_EP0OUT_IX].naking = 0;
1938 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1939 }
1940 
1941 /* Make endpoint 0 ready for control traffic */
1942 static int setup_ep0(struct udc *dev)
1943 {
1944 	activate_control_endpoints(dev);
1945 	/* enable ep0 interrupts */
1946 	udc_enable_ep0_interrupts(dev);
1947 	/* enable device setup interrupts */
1948 	udc_enable_dev_setup_interrupts(dev);
1949 
1950 	return 0;
1951 }
1952 
1953 /* Called by gadget driver to register itself */
1954 static int amd5536_udc_start(struct usb_gadget *g,
1955 		struct usb_gadget_driver *driver)
1956 {
1957 	struct udc *dev = to_amd5536_udc(g);
1958 	u32 tmp;
1959 
1960 	driver->driver.bus = NULL;
1961 	dev->driver = driver;
1962 
1963 	/* Some gadget drivers use both ep0 directions.
1964 	 * NOTE: to gadget driver, ep0 is just one endpoint...
1965 	 */
1966 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1967 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
1968 
1969 	/* get ready for ep0 traffic */
1970 	setup_ep0(dev);
1971 
1972 	/* clear SD */
1973 	tmp = readl(&dev->regs->ctl);
1974 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1975 	writel(tmp, &dev->regs->ctl);
1976 
1977 	usb_connect(dev);
1978 
1979 	return 0;
1980 }
1981 
1982 /* shutdown requests and disconnect from gadget */
1983 static void
1984 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
1985 __releases(dev->lock)
1986 __acquires(dev->lock)
1987 {
1988 	int tmp;
1989 
1990 	/* empty queues and init hardware */
1991 	udc_basic_init(dev);
1992 
1993 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1994 		empty_req_queue(&dev->ep[tmp]);
1995 
1996 	udc_setup_endpoints(dev);
1997 }
1998 
1999 /* Called by gadget driver to unregister itself */
2000 static int amd5536_udc_stop(struct usb_gadget *g)
2001 {
2002 	struct udc *dev = to_amd5536_udc(g);
2003 	unsigned long flags;
2004 	u32 tmp;
2005 
2006 	spin_lock_irqsave(&dev->lock, flags);
2007 	udc_mask_unused_interrupts(dev);
2008 	shutdown(dev, NULL);
2009 	spin_unlock_irqrestore(&dev->lock, flags);
2010 
2011 	dev->driver = NULL;
2012 
2013 	/* set SD */
2014 	tmp = readl(&dev->regs->ctl);
2015 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
2016 	writel(tmp, &dev->regs->ctl);
2017 
2018 	return 0;
2019 }
2020 
2021 /* Clear pending NAK bits */
2022 static void udc_process_cnak_queue(struct udc *dev)
2023 {
2024 	u32 tmp;
2025 	u32 reg;
2026 
2027 	/* check epin's */
2028 	DBG(dev, "CNAK pending queue processing\n");
2029 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2030 		if (cnak_pending & (1 << tmp)) {
2031 			DBG(dev, "CNAK pending for ep%d\n", tmp);
2032 			/* clear NAK by writing CNAK */
2033 			reg = readl(&dev->ep[tmp].regs->ctl);
2034 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
2035 			writel(reg, &dev->ep[tmp].regs->ctl);
2036 			dev->ep[tmp].naking = 0;
2037 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2038 		}
2039 	}
2040 	/* ...	and ep0out */
2041 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2042 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2043 		/* clear NAK by writing CNAK */
2044 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2045 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
2046 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2047 		dev->ep[UDC_EP0OUT_IX].naking = 0;
2048 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2049 				dev->ep[UDC_EP0OUT_IX].num);
2050 	}
2051 }
2052 
2053 /* Enabling RX DMA after setup packet */
2054 static void udc_ep0_set_rde(struct udc *dev)
2055 {
2056 	if (use_dma) {
2057 		/*
2058 		 * only enable RXDMA when no data endpoint enabled
2059 		 * or data is queued
2060 		 */
2061 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
2062 			udc_set_rde(dev);
2063 		} else {
2064 			/*
2065 			 * setup timer for enabling RDE (to not enable
2066 			 * RXFIFO DMA for data endpoints to early)
2067 			 */
2068 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
2069 				udc_timer.expires =
2070 					jiffies + HZ/UDC_RDE_TIMER_DIV;
2071 				set_rde = 1;
2072 				if (!stop_timer)
2073 					add_timer(&udc_timer);
2074 			}
2075 		}
2076 	}
2077 }
2078 
2079 
2080 /* Interrupt handler for data OUT traffic */
2081 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2082 {
2083 	irqreturn_t		ret_val = IRQ_NONE;
2084 	u32			tmp;
2085 	struct udc_ep		*ep;
2086 	struct udc_request	*req;
2087 	unsigned int		count;
2088 	struct udc_data_dma	*td = NULL;
2089 	unsigned		dma_done;
2090 
2091 	VDBG(dev, "ep%d irq\n", ep_ix);
2092 	ep = &dev->ep[ep_ix];
2093 
2094 	tmp = readl(&ep->regs->sts);
2095 	if (use_dma) {
2096 		/* BNA event ? */
2097 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2098 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2099 					ep->num, readl(&ep->regs->desptr));
2100 			/* clear BNA */
2101 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2102 			if (!ep->cancel_transfer)
2103 				ep->bna_occurred = 1;
2104 			else
2105 				ep->cancel_transfer = 0;
2106 			ret_val = IRQ_HANDLED;
2107 			goto finished;
2108 		}
2109 	}
2110 	/* HE event ? */
2111 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2112 		dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
2113 
2114 		/* clear HE */
2115 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2116 		ret_val = IRQ_HANDLED;
2117 		goto finished;
2118 	}
2119 
2120 	if (!list_empty(&ep->queue)) {
2121 
2122 		/* next request */
2123 		req = list_entry(ep->queue.next,
2124 			struct udc_request, queue);
2125 	} else {
2126 		req = NULL;
2127 		udc_rxfifo_pending = 1;
2128 	}
2129 	VDBG(dev, "req = %p\n", req);
2130 	/* fifo mode */
2131 	if (!use_dma) {
2132 
2133 		/* read fifo */
2134 		if (req && udc_rxfifo_read(ep, req)) {
2135 			ret_val = IRQ_HANDLED;
2136 
2137 			/* finish */
2138 			complete_req(ep, req, 0);
2139 			/* next request */
2140 			if (!list_empty(&ep->queue) && !ep->halted) {
2141 				req = list_entry(ep->queue.next,
2142 					struct udc_request, queue);
2143 			} else
2144 				req = NULL;
2145 		}
2146 
2147 	/* DMA */
2148 	} else if (!ep->cancel_transfer && req) {
2149 		ret_val = IRQ_HANDLED;
2150 
2151 		/* check for DMA done */
2152 		if (!use_dma_ppb) {
2153 			dma_done = AMD_GETBITS(req->td_data->status,
2154 						UDC_DMA_OUT_STS_BS);
2155 		/* packet per buffer mode - rx bytes */
2156 		} else {
2157 			/*
2158 			 * if BNA occurred then recover desc. from
2159 			 * BNA dummy desc.
2160 			 */
2161 			if (ep->bna_occurred) {
2162 				VDBG(dev, "Recover desc. from BNA dummy\n");
2163 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
2164 						sizeof(struct udc_data_dma));
2165 				ep->bna_occurred = 0;
2166 				udc_init_bna_dummy(ep->req);
2167 			}
2168 			td = udc_get_last_dma_desc(req);
2169 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2170 		}
2171 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2172 			/* buffer fill mode - rx bytes */
2173 			if (!use_dma_ppb) {
2174 				/* received number bytes */
2175 				count = AMD_GETBITS(req->td_data->status,
2176 						UDC_DMA_OUT_STS_RXBYTES);
2177 				VDBG(dev, "rx bytes=%u\n", count);
2178 			/* packet per buffer mode - rx bytes */
2179 			} else {
2180 				VDBG(dev, "req->td_data=%p\n", req->td_data);
2181 				VDBG(dev, "last desc = %p\n", td);
2182 				/* received number bytes */
2183 				if (use_dma_ppb_du) {
2184 					/* every desc. counts bytes */
2185 					count = udc_get_ppbdu_rxbytes(req);
2186 				} else {
2187 					/* last desc. counts bytes */
2188 					count = AMD_GETBITS(td->status,
2189 						UDC_DMA_OUT_STS_RXBYTES);
2190 					if (!count && req->req.length
2191 						== UDC_DMA_MAXPACKET) {
2192 						/*
2193 						 * on 64k packets the RXBYTES
2194 						 * field is zero
2195 						 */
2196 						count = UDC_DMA_MAXPACKET;
2197 					}
2198 				}
2199 				VDBG(dev, "last desc rx bytes=%u\n", count);
2200 			}
2201 
2202 			tmp = req->req.length - req->req.actual;
2203 			if (count > tmp) {
2204 				if ((tmp % ep->ep.maxpacket) != 0) {
2205 					DBG(dev, "%s: rx %db, space=%db\n",
2206 						ep->ep.name, count, tmp);
2207 					req->req.status = -EOVERFLOW;
2208 				}
2209 				count = tmp;
2210 			}
2211 			req->req.actual += count;
2212 			req->dma_going = 0;
2213 			/* complete request */
2214 			complete_req(ep, req, 0);
2215 
2216 			/* next request */
2217 			if (!list_empty(&ep->queue) && !ep->halted) {
2218 				req = list_entry(ep->queue.next,
2219 					struct udc_request,
2220 					queue);
2221 				/*
2222 				 * DMA may be already started by udc_queue()
2223 				 * called by gadget drivers completion
2224 				 * routine. This happens when queue
2225 				 * holds one request only.
2226 				 */
2227 				if (req->dma_going == 0) {
2228 					/* next dma */
2229 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2230 						goto finished;
2231 					/* write desc pointer */
2232 					writel(req->td_phys,
2233 						&ep->regs->desptr);
2234 					req->dma_going = 1;
2235 					/* enable DMA */
2236 					udc_set_rde(dev);
2237 				}
2238 			} else {
2239 				/*
2240 				 * implant BNA dummy descriptor to allow
2241 				 * RXFIFO opening by RDE
2242 				 */
2243 				if (ep->bna_dummy_req) {
2244 					/* write desc pointer */
2245 					writel(ep->bna_dummy_req->td_phys,
2246 						&ep->regs->desptr);
2247 					ep->bna_occurred = 0;
2248 				}
2249 
2250 				/*
2251 				 * schedule timer for setting RDE if queue
2252 				 * remains empty to allow ep0 packets pass
2253 				 * through
2254 				 */
2255 				if (set_rde != 0
2256 						&& !timer_pending(&udc_timer)) {
2257 					udc_timer.expires =
2258 						jiffies
2259 						+ HZ*UDC_RDE_TIMER_SECONDS;
2260 					set_rde = 1;
2261 					if (!stop_timer)
2262 						add_timer(&udc_timer);
2263 				}
2264 				if (ep->num != UDC_EP0OUT_IX)
2265 					dev->data_ep_queued = 0;
2266 			}
2267 
2268 		} else {
2269 			/*
2270 			* RX DMA must be reenabled for each desc in PPBDU mode
2271 			* and must be enabled for PPBNDU mode in case of BNA
2272 			*/
2273 			udc_set_rde(dev);
2274 		}
2275 
2276 	} else if (ep->cancel_transfer) {
2277 		ret_val = IRQ_HANDLED;
2278 		ep->cancel_transfer = 0;
2279 	}
2280 
2281 	/* check pending CNAKS */
2282 	if (cnak_pending) {
2283 		/* CNAk processing when rxfifo empty only */
2284 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2285 			udc_process_cnak_queue(dev);
2286 	}
2287 
2288 	/* clear OUT bits in ep status */
2289 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2290 finished:
2291 	return ret_val;
2292 }
2293 
2294 /* Interrupt handler for data IN traffic */
2295 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2296 {
2297 	irqreturn_t ret_val = IRQ_NONE;
2298 	u32 tmp;
2299 	u32 epsts;
2300 	struct udc_ep *ep;
2301 	struct udc_request *req;
2302 	struct udc_data_dma *td;
2303 	unsigned len;
2304 
2305 	ep = &dev->ep[ep_ix];
2306 
2307 	epsts = readl(&ep->regs->sts);
2308 	if (use_dma) {
2309 		/* BNA ? */
2310 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2311 			dev_err(dev->dev,
2312 				"BNA ep%din occurred - DESPTR = %08lx\n",
2313 				ep->num,
2314 				(unsigned long) readl(&ep->regs->desptr));
2315 
2316 			/* clear BNA */
2317 			writel(epsts, &ep->regs->sts);
2318 			ret_val = IRQ_HANDLED;
2319 			goto finished;
2320 		}
2321 	}
2322 	/* HE event ? */
2323 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2324 		dev_err(dev->dev,
2325 			"HE ep%dn occurred - DESPTR = %08lx\n",
2326 			ep->num, (unsigned long) readl(&ep->regs->desptr));
2327 
2328 		/* clear HE */
2329 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2330 		ret_val = IRQ_HANDLED;
2331 		goto finished;
2332 	}
2333 
2334 	/* DMA completion */
2335 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2336 		VDBG(dev, "TDC set- completion\n");
2337 		ret_val = IRQ_HANDLED;
2338 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2339 			req = list_entry(ep->queue.next,
2340 					struct udc_request, queue);
2341 			/*
2342 			 * length bytes transferred
2343 			 * check dma done of last desc. in PPBDU mode
2344 			 */
2345 			if (use_dma_ppb_du) {
2346 				td = udc_get_last_dma_desc(req);
2347 				if (td)
2348 					req->req.actual = req->req.length;
2349 			} else {
2350 				/* assume all bytes transferred */
2351 				req->req.actual = req->req.length;
2352 			}
2353 
2354 			if (req->req.actual == req->req.length) {
2355 				/* complete req */
2356 				complete_req(ep, req, 0);
2357 				req->dma_going = 0;
2358 				/* further request available ? */
2359 				if (list_empty(&ep->queue)) {
2360 					/* disable interrupt */
2361 					tmp = readl(&dev->regs->ep_irqmsk);
2362 					tmp |= AMD_BIT(ep->num);
2363 					writel(tmp, &dev->regs->ep_irqmsk);
2364 				}
2365 			}
2366 		}
2367 		ep->cancel_transfer = 0;
2368 
2369 	}
2370 	/*
2371 	 * status reg has IN bit set and TDC not set (if TDC was handled,
2372 	 * IN must not be handled (UDC defect) ?
2373 	 */
2374 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2375 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2376 		ret_val = IRQ_HANDLED;
2377 		if (!list_empty(&ep->queue)) {
2378 			/* next request */
2379 			req = list_entry(ep->queue.next,
2380 					struct udc_request, queue);
2381 			/* FIFO mode */
2382 			if (!use_dma) {
2383 				/* write fifo */
2384 				udc_txfifo_write(ep, &req->req);
2385 				len = req->req.length - req->req.actual;
2386 				if (len > ep->ep.maxpacket)
2387 					len = ep->ep.maxpacket;
2388 				req->req.actual += len;
2389 				if (req->req.actual == req->req.length
2390 					|| (len != ep->ep.maxpacket)) {
2391 					/* complete req */
2392 					complete_req(ep, req, 0);
2393 				}
2394 			/* DMA */
2395 			} else if (req && !req->dma_going) {
2396 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2397 					req, req->td_data);
2398 				if (req->td_data) {
2399 
2400 					req->dma_going = 1;
2401 
2402 					/*
2403 					 * unset L bit of first desc.
2404 					 * for chain
2405 					 */
2406 					if (use_dma_ppb && req->req.length >
2407 							ep->ep.maxpacket) {
2408 						req->td_data->status &=
2409 							AMD_CLEAR_BIT(
2410 							UDC_DMA_IN_STS_L);
2411 					}
2412 
2413 					/* write desc pointer */
2414 					writel(req->td_phys, &ep->regs->desptr);
2415 
2416 					/* set HOST READY */
2417 					req->td_data->status =
2418 						AMD_ADDBITS(
2419 						req->td_data->status,
2420 						UDC_DMA_IN_STS_BS_HOST_READY,
2421 						UDC_DMA_IN_STS_BS);
2422 
2423 					/* set poll demand bit */
2424 					tmp = readl(&ep->regs->ctl);
2425 					tmp |= AMD_BIT(UDC_EPCTL_P);
2426 					writel(tmp, &ep->regs->ctl);
2427 				}
2428 			}
2429 
2430 		} else if (!use_dma && ep->in) {
2431 			/* disable interrupt */
2432 			tmp = readl(
2433 				&dev->regs->ep_irqmsk);
2434 			tmp |= AMD_BIT(ep->num);
2435 			writel(tmp,
2436 				&dev->regs->ep_irqmsk);
2437 		}
2438 	}
2439 	/* clear status bits */
2440 	writel(epsts, &ep->regs->sts);
2441 
2442 finished:
2443 	return ret_val;
2444 
2445 }
2446 
2447 /* Interrupt handler for Control OUT traffic */
2448 static irqreturn_t udc_control_out_isr(struct udc *dev)
2449 __releases(dev->lock)
2450 __acquires(dev->lock)
2451 {
2452 	irqreturn_t ret_val = IRQ_NONE;
2453 	u32 tmp;
2454 	int setup_supported;
2455 	u32 count;
2456 	int set = 0;
2457 	struct udc_ep	*ep;
2458 	struct udc_ep	*ep_tmp;
2459 
2460 	ep = &dev->ep[UDC_EP0OUT_IX];
2461 
2462 	/* clear irq */
2463 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2464 
2465 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2466 	/* check BNA and clear if set */
2467 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2468 		VDBG(dev, "ep0: BNA set\n");
2469 		writel(AMD_BIT(UDC_EPSTS_BNA),
2470 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
2471 		ep->bna_occurred = 1;
2472 		ret_val = IRQ_HANDLED;
2473 		goto finished;
2474 	}
2475 
2476 	/* type of data: SETUP or DATA 0 bytes */
2477 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2478 	VDBG(dev, "data_typ = %x\n", tmp);
2479 
2480 	/* setup data */
2481 	if (tmp == UDC_EPSTS_OUT_SETUP) {
2482 		ret_val = IRQ_HANDLED;
2483 
2484 		ep->dev->stall_ep0in = 0;
2485 		dev->waiting_zlp_ack_ep0in = 0;
2486 
2487 		/* set NAK for EP0_IN */
2488 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2489 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2490 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2491 		dev->ep[UDC_EP0IN_IX].naking = 1;
2492 		/* get setup data */
2493 		if (use_dma) {
2494 
2495 			/* clear OUT bits in ep status */
2496 			writel(UDC_EPSTS_OUT_CLEAR,
2497 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2498 
2499 			setup_data.data[0] =
2500 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2501 			setup_data.data[1] =
2502 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2503 			/* set HOST READY */
2504 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
2505 					UDC_DMA_STP_STS_BS_HOST_READY;
2506 		} else {
2507 			/* read fifo */
2508 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2509 		}
2510 
2511 		/* determine direction of control data */
2512 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2513 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2514 			/* enable RDE */
2515 			udc_ep0_set_rde(dev);
2516 			set = 0;
2517 		} else {
2518 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2519 			/*
2520 			 * implant BNA dummy descriptor to allow RXFIFO opening
2521 			 * by RDE
2522 			 */
2523 			if (ep->bna_dummy_req) {
2524 				/* write desc pointer */
2525 				writel(ep->bna_dummy_req->td_phys,
2526 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2527 				ep->bna_occurred = 0;
2528 			}
2529 
2530 			set = 1;
2531 			dev->ep[UDC_EP0OUT_IX].naking = 1;
2532 			/*
2533 			 * setup timer for enabling RDE (to not enable
2534 			 * RXFIFO DMA for data to early)
2535 			 */
2536 			set_rde = 1;
2537 			if (!timer_pending(&udc_timer)) {
2538 				udc_timer.expires = jiffies +
2539 							HZ/UDC_RDE_TIMER_DIV;
2540 				if (!stop_timer)
2541 					add_timer(&udc_timer);
2542 			}
2543 		}
2544 
2545 		/*
2546 		 * mass storage reset must be processed here because
2547 		 * next packet may be a CLEAR_FEATURE HALT which would not
2548 		 * clear the stall bit when no STALL handshake was received
2549 		 * before (autostall can cause this)
2550 		 */
2551 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
2552 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
2553 			DBG(dev, "MSC Reset\n");
2554 			/*
2555 			 * clear stall bits
2556 			 * only one IN and OUT endpoints are handled
2557 			 */
2558 			ep_tmp = &udc->ep[UDC_EPIN_IX];
2559 			udc_set_halt(&ep_tmp->ep, 0);
2560 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
2561 			udc_set_halt(&ep_tmp->ep, 0);
2562 		}
2563 
2564 		/* call gadget with setup data received */
2565 		spin_unlock(&dev->lock);
2566 		setup_supported = dev->driver->setup(&dev->gadget,
2567 						&setup_data.request);
2568 		spin_lock(&dev->lock);
2569 
2570 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2571 		/* ep0 in returns data (not zlp) on IN phase */
2572 		if (setup_supported >= 0 && setup_supported <
2573 				UDC_EP0IN_MAXPACKET) {
2574 			/* clear NAK by writing CNAK in EP0_IN */
2575 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2576 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2577 			dev->ep[UDC_EP0IN_IX].naking = 0;
2578 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2579 
2580 		/* if unsupported request then stall */
2581 		} else if (setup_supported < 0) {
2582 			tmp |= AMD_BIT(UDC_EPCTL_S);
2583 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2584 		} else
2585 			dev->waiting_zlp_ack_ep0in = 1;
2586 
2587 
2588 		/* clear NAK by writing CNAK in EP0_OUT */
2589 		if (!set) {
2590 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2591 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2592 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2593 			dev->ep[UDC_EP0OUT_IX].naking = 0;
2594 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2595 		}
2596 
2597 		if (!use_dma) {
2598 			/* clear OUT bits in ep status */
2599 			writel(UDC_EPSTS_OUT_CLEAR,
2600 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2601 		}
2602 
2603 	/* data packet 0 bytes */
2604 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
2605 		/* clear OUT bits in ep status */
2606 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2607 
2608 		/* get setup data: only 0 packet */
2609 		if (use_dma) {
2610 			/* no req if 0 packet, just reactivate */
2611 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2612 				VDBG(dev, "ZLP\n");
2613 
2614 				/* set HOST READY */
2615 				dev->ep[UDC_EP0OUT_IX].td->status =
2616 					AMD_ADDBITS(
2617 					dev->ep[UDC_EP0OUT_IX].td->status,
2618 					UDC_DMA_OUT_STS_BS_HOST_READY,
2619 					UDC_DMA_OUT_STS_BS);
2620 				/* enable RDE */
2621 				udc_ep0_set_rde(dev);
2622 				ret_val = IRQ_HANDLED;
2623 
2624 			} else {
2625 				/* control write */
2626 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2627 				/* re-program desc. pointer for possible ZLPs */
2628 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2629 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2630 				/* enable RDE */
2631 				udc_ep0_set_rde(dev);
2632 			}
2633 		} else {
2634 
2635 			/* received number bytes */
2636 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2637 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2638 			/* out data for fifo mode not working */
2639 			count = 0;
2640 
2641 			/* 0 packet or real data ? */
2642 			if (count != 0) {
2643 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2644 			} else {
2645 				/* dummy read confirm */
2646 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2647 				ret_val = IRQ_HANDLED;
2648 			}
2649 		}
2650 	}
2651 
2652 	/* check pending CNAKS */
2653 	if (cnak_pending) {
2654 		/* CNAk processing when rxfifo empty only */
2655 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2656 			udc_process_cnak_queue(dev);
2657 	}
2658 
2659 finished:
2660 	return ret_val;
2661 }
2662 
2663 /* Interrupt handler for Control IN traffic */
2664 static irqreturn_t udc_control_in_isr(struct udc *dev)
2665 {
2666 	irqreturn_t ret_val = IRQ_NONE;
2667 	u32 tmp;
2668 	struct udc_ep *ep;
2669 	struct udc_request *req;
2670 	unsigned len;
2671 
2672 	ep = &dev->ep[UDC_EP0IN_IX];
2673 
2674 	/* clear irq */
2675 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2676 
2677 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2678 	/* DMA completion */
2679 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2680 		VDBG(dev, "isr: TDC clear\n");
2681 		ret_val = IRQ_HANDLED;
2682 
2683 		/* clear TDC bit */
2684 		writel(AMD_BIT(UDC_EPSTS_TDC),
2685 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2686 
2687 	/* status reg has IN bit set ? */
2688 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2689 		ret_val = IRQ_HANDLED;
2690 
2691 		if (ep->dma) {
2692 			/* clear IN bit */
2693 			writel(AMD_BIT(UDC_EPSTS_IN),
2694 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2695 		}
2696 		if (dev->stall_ep0in) {
2697 			DBG(dev, "stall ep0in\n");
2698 			/* halt ep0in */
2699 			tmp = readl(&ep->regs->ctl);
2700 			tmp |= AMD_BIT(UDC_EPCTL_S);
2701 			writel(tmp, &ep->regs->ctl);
2702 		} else {
2703 			if (!list_empty(&ep->queue)) {
2704 				/* next request */
2705 				req = list_entry(ep->queue.next,
2706 						struct udc_request, queue);
2707 
2708 				if (ep->dma) {
2709 					/* write desc pointer */
2710 					writel(req->td_phys, &ep->regs->desptr);
2711 					/* set HOST READY */
2712 					req->td_data->status =
2713 						AMD_ADDBITS(
2714 						req->td_data->status,
2715 						UDC_DMA_STP_STS_BS_HOST_READY,
2716 						UDC_DMA_STP_STS_BS);
2717 
2718 					/* set poll demand bit */
2719 					tmp =
2720 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2721 					tmp |= AMD_BIT(UDC_EPCTL_P);
2722 					writel(tmp,
2723 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
2724 
2725 					/* all bytes will be transferred */
2726 					req->req.actual = req->req.length;
2727 
2728 					/* complete req */
2729 					complete_req(ep, req, 0);
2730 
2731 				} else {
2732 					/* write fifo */
2733 					udc_txfifo_write(ep, &req->req);
2734 
2735 					/* lengh bytes transferred */
2736 					len = req->req.length - req->req.actual;
2737 					if (len > ep->ep.maxpacket)
2738 						len = ep->ep.maxpacket;
2739 
2740 					req->req.actual += len;
2741 					if (req->req.actual == req->req.length
2742 						|| (len != ep->ep.maxpacket)) {
2743 						/* complete req */
2744 						complete_req(ep, req, 0);
2745 					}
2746 				}
2747 
2748 			}
2749 		}
2750 		ep->halted = 0;
2751 		dev->stall_ep0in = 0;
2752 		if (!ep->dma) {
2753 			/* clear IN bit */
2754 			writel(AMD_BIT(UDC_EPSTS_IN),
2755 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2756 		}
2757 	}
2758 
2759 	return ret_val;
2760 }
2761 
2762 
2763 /* Interrupt handler for global device events */
2764 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2765 __releases(dev->lock)
2766 __acquires(dev->lock)
2767 {
2768 	irqreturn_t ret_val = IRQ_NONE;
2769 	u32 tmp;
2770 	u32 cfg;
2771 	struct udc_ep *ep;
2772 	u16 i;
2773 	u8 udc_csr_epix;
2774 
2775 	/* SET_CONFIG irq ? */
2776 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2777 		ret_val = IRQ_HANDLED;
2778 
2779 		/* read config value */
2780 		tmp = readl(&dev->regs->sts);
2781 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2782 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2783 		dev->cur_config = cfg;
2784 		dev->set_cfg_not_acked = 1;
2785 
2786 		/* make usb request for gadget driver */
2787 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2788 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2789 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2790 
2791 		/* programm the NE registers */
2792 		for (i = 0; i < UDC_EP_NUM; i++) {
2793 			ep = &dev->ep[i];
2794 			if (ep->in) {
2795 
2796 				/* ep ix in UDC CSR register space */
2797 				udc_csr_epix = ep->num;
2798 
2799 
2800 			/* OUT ep */
2801 			} else {
2802 				/* ep ix in UDC CSR register space */
2803 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2804 			}
2805 
2806 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2807 			/* ep cfg */
2808 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2809 						UDC_CSR_NE_CFG);
2810 			/* write reg */
2811 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2812 
2813 			/* clear stall bits */
2814 			ep->halted = 0;
2815 			tmp = readl(&ep->regs->ctl);
2816 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2817 			writel(tmp, &ep->regs->ctl);
2818 		}
2819 		/* call gadget zero with setup data received */
2820 		spin_unlock(&dev->lock);
2821 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2822 		spin_lock(&dev->lock);
2823 
2824 	} /* SET_INTERFACE ? */
2825 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2826 		ret_val = IRQ_HANDLED;
2827 
2828 		dev->set_cfg_not_acked = 1;
2829 		/* read interface and alt setting values */
2830 		tmp = readl(&dev->regs->sts);
2831 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2832 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2833 
2834 		/* make usb request for gadget driver */
2835 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2836 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2837 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2838 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2839 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2840 
2841 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2842 				dev->cur_alt, dev->cur_intf);
2843 
2844 		/* programm the NE registers */
2845 		for (i = 0; i < UDC_EP_NUM; i++) {
2846 			ep = &dev->ep[i];
2847 			if (ep->in) {
2848 
2849 				/* ep ix in UDC CSR register space */
2850 				udc_csr_epix = ep->num;
2851 
2852 
2853 			/* OUT ep */
2854 			} else {
2855 				/* ep ix in UDC CSR register space */
2856 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2857 			}
2858 
2859 			/* UDC CSR reg */
2860 			/* set ep values */
2861 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2862 			/* ep interface */
2863 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2864 						UDC_CSR_NE_INTF);
2865 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2866 			/* ep alt */
2867 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2868 						UDC_CSR_NE_ALT);
2869 			/* write reg */
2870 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2871 
2872 			/* clear stall bits */
2873 			ep->halted = 0;
2874 			tmp = readl(&ep->regs->ctl);
2875 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2876 			writel(tmp, &ep->regs->ctl);
2877 		}
2878 
2879 		/* call gadget zero with setup data received */
2880 		spin_unlock(&dev->lock);
2881 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2882 		spin_lock(&dev->lock);
2883 
2884 	} /* USB reset */
2885 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2886 		DBG(dev, "USB Reset interrupt\n");
2887 		ret_val = IRQ_HANDLED;
2888 
2889 		/* allow soft reset when suspend occurs */
2890 		soft_reset_occured = 0;
2891 
2892 		dev->waiting_zlp_ack_ep0in = 0;
2893 		dev->set_cfg_not_acked = 0;
2894 
2895 		/* mask not needed interrupts */
2896 		udc_mask_unused_interrupts(dev);
2897 
2898 		/* call gadget to resume and reset configs etc. */
2899 		spin_unlock(&dev->lock);
2900 		if (dev->sys_suspended && dev->driver->resume) {
2901 			dev->driver->resume(&dev->gadget);
2902 			dev->sys_suspended = 0;
2903 		}
2904 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2905 		spin_lock(&dev->lock);
2906 
2907 		/* disable ep0 to empty req queue */
2908 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2909 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2910 
2911 		/* soft reset when rxfifo not empty */
2912 		tmp = readl(&dev->regs->sts);
2913 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2914 				&& !soft_reset_after_usbreset_occured) {
2915 			udc_soft_reset(dev);
2916 			soft_reset_after_usbreset_occured++;
2917 		}
2918 
2919 		/*
2920 		 * DMA reset to kill potential old DMA hw hang,
2921 		 * POLL bit is already reset by ep_init() through
2922 		 * disconnect()
2923 		 */
2924 		DBG(dev, "DMA machine reset\n");
2925 		tmp = readl(&dev->regs->cfg);
2926 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2927 		writel(tmp, &dev->regs->cfg);
2928 
2929 		/* put into initial config */
2930 		udc_basic_init(dev);
2931 
2932 		/* enable device setup interrupts */
2933 		udc_enable_dev_setup_interrupts(dev);
2934 
2935 		/* enable suspend interrupt */
2936 		tmp = readl(&dev->regs->irqmsk);
2937 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2938 		writel(tmp, &dev->regs->irqmsk);
2939 
2940 	} /* USB suspend */
2941 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2942 		DBG(dev, "USB Suspend interrupt\n");
2943 		ret_val = IRQ_HANDLED;
2944 		if (dev->driver->suspend) {
2945 			spin_unlock(&dev->lock);
2946 			dev->sys_suspended = 1;
2947 			dev->driver->suspend(&dev->gadget);
2948 			spin_lock(&dev->lock);
2949 		}
2950 	} /* new speed ? */
2951 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2952 		DBG(dev, "ENUM interrupt\n");
2953 		ret_val = IRQ_HANDLED;
2954 		soft_reset_after_usbreset_occured = 0;
2955 
2956 		/* disable ep0 to empty req queue */
2957 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2958 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2959 
2960 		/* link up all endpoints */
2961 		udc_setup_endpoints(dev);
2962 		dev_info(dev->dev, "Connect: %s\n",
2963 			 usb_speed_string(dev->gadget.speed));
2964 
2965 		/* init ep 0 */
2966 		activate_control_endpoints(dev);
2967 
2968 		/* enable ep0 interrupts */
2969 		udc_enable_ep0_interrupts(dev);
2970 	}
2971 	/* session valid change interrupt */
2972 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
2973 		DBG(dev, "USB SVC interrupt\n");
2974 		ret_val = IRQ_HANDLED;
2975 
2976 		/* check that session is not valid to detect disconnect */
2977 		tmp = readl(&dev->regs->sts);
2978 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
2979 			/* disable suspend interrupt */
2980 			tmp = readl(&dev->regs->irqmsk);
2981 			tmp |= AMD_BIT(UDC_DEVINT_US);
2982 			writel(tmp, &dev->regs->irqmsk);
2983 			DBG(dev, "USB Disconnect (session valid low)\n");
2984 			/* cleanup on disconnect */
2985 			usb_disconnect(udc);
2986 		}
2987 
2988 	}
2989 
2990 	return ret_val;
2991 }
2992 
2993 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
2994 irqreturn_t udc_irq(int irq, void *pdev)
2995 {
2996 	struct udc *dev = pdev;
2997 	u32 reg;
2998 	u16 i;
2999 	u32 ep_irq;
3000 	irqreturn_t ret_val = IRQ_NONE;
3001 
3002 	spin_lock(&dev->lock);
3003 
3004 	/* check for ep irq */
3005 	reg = readl(&dev->regs->ep_irqsts);
3006 	if (reg) {
3007 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3008 			ret_val |= udc_control_out_isr(dev);
3009 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3010 			ret_val |= udc_control_in_isr(dev);
3011 
3012 		/*
3013 		 * data endpoint
3014 		 * iterate ep's
3015 		 */
3016 		for (i = 1; i < UDC_EP_NUM; i++) {
3017 			ep_irq = 1 << i;
3018 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3019 				continue;
3020 
3021 			/* clear irq status */
3022 			writel(ep_irq, &dev->regs->ep_irqsts);
3023 
3024 			/* irq for out ep ? */
3025 			if (i > UDC_EPIN_NUM)
3026 				ret_val |= udc_data_out_isr(dev, i);
3027 			else
3028 				ret_val |= udc_data_in_isr(dev, i);
3029 		}
3030 
3031 	}
3032 
3033 
3034 	/* check for dev irq */
3035 	reg = readl(&dev->regs->irqsts);
3036 	if (reg) {
3037 		/* clear irq */
3038 		writel(reg, &dev->regs->irqsts);
3039 		ret_val |= udc_dev_isr(dev, reg);
3040 	}
3041 
3042 
3043 	spin_unlock(&dev->lock);
3044 	return ret_val;
3045 }
3046 EXPORT_SYMBOL_GPL(udc_irq);
3047 
3048 /* Tears down device */
3049 void gadget_release(struct device *pdev)
3050 {
3051 	struct amd5536udc *dev = dev_get_drvdata(pdev);
3052 	kfree(dev);
3053 }
3054 EXPORT_SYMBOL_GPL(gadget_release);
3055 
3056 /* Cleanup on device remove */
3057 void udc_remove(struct udc *dev)
3058 {
3059 	/* remove timer */
3060 	stop_timer++;
3061 	if (timer_pending(&udc_timer))
3062 		wait_for_completion(&on_exit);
3063 	del_timer_sync(&udc_timer);
3064 	/* remove pollstall timer */
3065 	stop_pollstall_timer++;
3066 	if (timer_pending(&udc_pollstall_timer))
3067 		wait_for_completion(&on_pollstall_exit);
3068 	del_timer_sync(&udc_pollstall_timer);
3069 	udc = NULL;
3070 }
3071 EXPORT_SYMBOL_GPL(udc_remove);
3072 
3073 /* free all the dma pools */
3074 void free_dma_pools(struct udc *dev)
3075 {
3076 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3077 		      dev->ep[UDC_EP0OUT_IX].td_phys);
3078 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3079 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3080 	dma_pool_destroy(dev->stp_requests);
3081 	dma_pool_destroy(dev->data_requests);
3082 }
3083 EXPORT_SYMBOL_GPL(free_dma_pools);
3084 
3085 /* create dma pools on init */
3086 int init_dma_pools(struct udc *dev)
3087 {
3088 	struct udc_stp_dma	*td_stp;
3089 	struct udc_data_dma	*td_data;
3090 	int retval;
3091 
3092 	/* consistent DMA mode setting ? */
3093 	if (use_dma_ppb) {
3094 		use_dma_bufferfill_mode = 0;
3095 	} else {
3096 		use_dma_ppb_du = 0;
3097 		use_dma_bufferfill_mode = 1;
3098 	}
3099 
3100 	/* DMA setup */
3101 	dev->data_requests = dma_pool_create("data_requests", dev->dev,
3102 		sizeof(struct udc_data_dma), 0, 0);
3103 	if (!dev->data_requests) {
3104 		DBG(dev, "can't get request data pool\n");
3105 		return -ENOMEM;
3106 	}
3107 
3108 	/* EP0 in dma regs = dev control regs */
3109 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3110 
3111 	/* dma desc for setup data */
3112 	dev->stp_requests = dma_pool_create("setup requests", dev->dev,
3113 		sizeof(struct udc_stp_dma), 0, 0);
3114 	if (!dev->stp_requests) {
3115 		DBG(dev, "can't get stp request pool\n");
3116 		retval = -ENOMEM;
3117 		goto err_create_dma_pool;
3118 	}
3119 	/* setup */
3120 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3121 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3122 	if (!td_stp) {
3123 		retval = -ENOMEM;
3124 		goto err_alloc_dma;
3125 	}
3126 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3127 
3128 	/* data: 0 packets !? */
3129 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3130 				&dev->ep[UDC_EP0OUT_IX].td_phys);
3131 	if (!td_data) {
3132 		retval = -ENOMEM;
3133 		goto err_alloc_phys;
3134 	}
3135 	dev->ep[UDC_EP0OUT_IX].td = td_data;
3136 	return 0;
3137 
3138 err_alloc_phys:
3139 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3140 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3141 err_alloc_dma:
3142 	dma_pool_destroy(dev->stp_requests);
3143 	dev->stp_requests = NULL;
3144 err_create_dma_pool:
3145 	dma_pool_destroy(dev->data_requests);
3146 	dev->data_requests = NULL;
3147 	return retval;
3148 }
3149 EXPORT_SYMBOL_GPL(init_dma_pools);
3150 
3151 /* general probe */
3152 int udc_probe(struct udc *dev)
3153 {
3154 	char		tmp[128];
3155 	u32		reg;
3156 	int		retval;
3157 
3158 	/* device struct setup */
3159 	dev->gadget.ops = &udc_ops;
3160 
3161 	dev_set_name(&dev->gadget.dev, "gadget");
3162 	dev->gadget.name = name;
3163 	dev->gadget.max_speed = USB_SPEED_HIGH;
3164 
3165 	/* init registers, interrupts, ... */
3166 	startup_registers(dev);
3167 
3168 	dev_info(dev->dev, "%s\n", mod_desc);
3169 
3170 	snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3171 
3172 	/* Print this device info for AMD chips only*/
3173 	if (dev->chiprev == UDC_HSA0_REV ||
3174 	    dev->chiprev == UDC_HSB1_REV) {
3175 		dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3176 			 tmp, dev->phys_addr, dev->chiprev,
3177 			 (dev->chiprev == UDC_HSA0_REV) ?
3178 			 "A0" : "B1");
3179 		strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3180 		if (dev->chiprev == UDC_HSA0_REV) {
3181 			dev_err(dev->dev, "chip revision is A0; too old\n");
3182 			retval = -ENODEV;
3183 			goto finished;
3184 		}
3185 		dev_info(dev->dev,
3186 			 "driver version: %s(for Geode5536 B1)\n", tmp);
3187 	}
3188 
3189 	udc = dev;
3190 
3191 	retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
3192 					    gadget_release);
3193 	if (retval)
3194 		goto finished;
3195 
3196 	/* timer init */
3197 	timer_setup(&udc_timer, udc_timer_function, 0);
3198 	timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
3199 
3200 	/* set SD */
3201 	reg = readl(&dev->regs->ctl);
3202 	reg |= AMD_BIT(UDC_DEVCTL_SD);
3203 	writel(reg, &dev->regs->ctl);
3204 
3205 	/* print dev register info */
3206 	print_regs(dev);
3207 
3208 	return 0;
3209 
3210 finished:
3211 	return retval;
3212 }
3213 EXPORT_SYMBOL_GPL(udc_probe);
3214 
3215 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3216 MODULE_AUTHOR("Thomas Dahlmann");
3217 MODULE_LICENSE("GPL");
3218