1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
4  *
5  * Copyright (C) 2005-2007 AMD (https://www.amd.com)
6  * Author: Thomas Dahlmann
7  */
8 
9 /*
10  * This file does the core driver implementation for the UDC that is based
11  * on Synopsys device controller IP (different than HS OTG IP) that is either
12  * connected through PCI bus or integrated to SoC platforms.
13  */
14 
15 /* Driver strings */
16 #define UDC_MOD_DESCRIPTION		"Synopsys USB Device Controller"
17 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/delay.h>
23 #include <linux/ioport.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioctl.h>
31 #include <linux/fs.h>
32 #include <linux/dmapool.h>
33 #include <linux/prefetch.h>
34 #include <linux/moduleparam.h>
35 #include <asm/byteorder.h>
36 #include <asm/unaligned.h>
37 #include "amd5536udc.h"
38 
39 static void udc_setup_endpoints(struct udc *dev);
40 static void udc_soft_reset(struct udc *dev);
41 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
42 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
43 
44 /* description */
45 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
46 static const char name[] = "udc";
47 
48 /* structure to hold endpoint function pointers */
49 static const struct usb_ep_ops udc_ep_ops;
50 
51 /* received setup data */
52 static union udc_setup_data setup_data;
53 
54 /* pointer to device object */
55 static struct udc *udc;
56 
57 /* irq spin lock for soft reset */
58 static DEFINE_SPINLOCK(udc_irq_spinlock);
59 /* stall spin lock */
60 static DEFINE_SPINLOCK(udc_stall_spinlock);
61 
62 /*
63 * slave mode: pending bytes in rx fifo after nyet,
64 * used if EPIN irq came but no req was available
65 */
66 static unsigned int udc_rxfifo_pending;
67 
68 /* count soft resets after suspend to avoid loop */
69 static int soft_reset_occured;
70 static int soft_reset_after_usbreset_occured;
71 
72 /* timer */
73 static struct timer_list udc_timer;
74 static int stop_timer;
75 
76 /* set_rde -- Is used to control enabling of RX DMA. Problem is
77  * that UDC has only one bit (RDE) to enable/disable RX DMA for
78  * all OUT endpoints. So we have to handle race conditions like
79  * when OUT data reaches the fifo but no request was queued yet.
80  * This cannot be solved by letting the RX DMA disabled until a
81  * request gets queued because there may be other OUT packets
82  * in the FIFO (important for not blocking control traffic).
83  * The value of set_rde controls the corresponding timer.
84  *
85  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
86  * set_rde  0 == do not touch RDE, do no start the RDE timer
87  * set_rde  1 == timer function will look whether FIFO has data
88  * set_rde  2 == set by timer function to enable RX DMA on next call
89  */
90 static int set_rde = -1;
91 
92 static DECLARE_COMPLETION(on_exit);
93 static struct timer_list udc_pollstall_timer;
94 static int stop_pollstall_timer;
95 static DECLARE_COMPLETION(on_pollstall_exit);
96 
97 /* endpoint names used for print */
98 static const char ep0_string[] = "ep0in";
99 static const struct {
100 	const char *name;
101 	const struct usb_ep_caps caps;
102 } ep_info[] = {
103 #define EP_INFO(_name, _caps) \
104 	{ \
105 		.name = _name, \
106 		.caps = _caps, \
107 	}
108 
109 	EP_INFO(ep0_string,
110 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
111 	EP_INFO("ep1in-int",
112 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
113 	EP_INFO("ep2in-bulk",
114 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
115 	EP_INFO("ep3in-bulk",
116 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
117 	EP_INFO("ep4in-bulk",
118 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
119 	EP_INFO("ep5in-bulk",
120 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
121 	EP_INFO("ep6in-bulk",
122 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
123 	EP_INFO("ep7in-bulk",
124 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
125 	EP_INFO("ep8in-bulk",
126 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
127 	EP_INFO("ep9in-bulk",
128 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
129 	EP_INFO("ep10in-bulk",
130 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
131 	EP_INFO("ep11in-bulk",
132 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
133 	EP_INFO("ep12in-bulk",
134 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
135 	EP_INFO("ep13in-bulk",
136 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
137 	EP_INFO("ep14in-bulk",
138 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
139 	EP_INFO("ep15in-bulk",
140 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
141 	EP_INFO("ep0out",
142 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
143 	EP_INFO("ep1out-bulk",
144 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
145 	EP_INFO("ep2out-bulk",
146 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
147 	EP_INFO("ep3out-bulk",
148 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
149 	EP_INFO("ep4out-bulk",
150 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
151 	EP_INFO("ep5out-bulk",
152 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
153 	EP_INFO("ep6out-bulk",
154 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
155 	EP_INFO("ep7out-bulk",
156 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
157 	EP_INFO("ep8out-bulk",
158 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
159 	EP_INFO("ep9out-bulk",
160 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
161 	EP_INFO("ep10out-bulk",
162 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
163 	EP_INFO("ep11out-bulk",
164 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
165 	EP_INFO("ep12out-bulk",
166 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
167 	EP_INFO("ep13out-bulk",
168 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
169 	EP_INFO("ep14out-bulk",
170 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
171 	EP_INFO("ep15out-bulk",
172 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
173 
174 #undef EP_INFO
175 };
176 
177 /* buffer fill mode */
178 static int use_dma_bufferfill_mode;
179 /* tx buffer size for high speed */
180 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
181 
182 /*---------------------------------------------------------------------------*/
183 /* Prints UDC device registers and endpoint irq registers */
print_regs(struct udc * dev)184 static void print_regs(struct udc *dev)
185 {
186 	DBG(dev, "------- Device registers -------\n");
187 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
188 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
189 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
190 	DBG(dev, "\n");
191 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
192 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
193 	DBG(dev, "\n");
194 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
195 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
196 	DBG(dev, "\n");
197 	DBG(dev, "USE DMA        = %d\n", use_dma);
198 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
199 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
200 			"WITHOUT desc. update)\n");
201 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
202 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
203 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
204 			"WITH desc. update)\n");
205 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
206 	}
207 	if (use_dma && use_dma_bufferfill_mode) {
208 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
209 		dev_info(dev->dev, "DMA mode (%s)\n", "BF");
210 	}
211 	if (!use_dma)
212 		dev_info(dev->dev, "FIFO mode\n");
213 	DBG(dev, "-------------------------------------------------------\n");
214 }
215 
216 /* Masks unused interrupts */
udc_mask_unused_interrupts(struct udc * dev)217 int udc_mask_unused_interrupts(struct udc *dev)
218 {
219 	u32 tmp;
220 
221 	/* mask all dev interrupts */
222 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
223 		AMD_BIT(UDC_DEVINT_ENUM) |
224 		AMD_BIT(UDC_DEVINT_US) |
225 		AMD_BIT(UDC_DEVINT_UR) |
226 		AMD_BIT(UDC_DEVINT_ES) |
227 		AMD_BIT(UDC_DEVINT_SI) |
228 		AMD_BIT(UDC_DEVINT_SOF)|
229 		AMD_BIT(UDC_DEVINT_SC);
230 	writel(tmp, &dev->regs->irqmsk);
231 
232 	/* mask all ep interrupts */
233 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
234 
235 	return 0;
236 }
237 EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
238 
239 /* Enables endpoint 0 interrupts */
udc_enable_ep0_interrupts(struct udc * dev)240 static int udc_enable_ep0_interrupts(struct udc *dev)
241 {
242 	u32 tmp;
243 
244 	DBG(dev, "udc_enable_ep0_interrupts()\n");
245 
246 	/* read irq mask */
247 	tmp = readl(&dev->regs->ep_irqmsk);
248 	/* enable ep0 irq's */
249 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
250 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
251 	writel(tmp, &dev->regs->ep_irqmsk);
252 
253 	return 0;
254 }
255 
256 /* Enables device interrupts for SET_INTF and SET_CONFIG */
udc_enable_dev_setup_interrupts(struct udc * dev)257 int udc_enable_dev_setup_interrupts(struct udc *dev)
258 {
259 	u32 tmp;
260 
261 	DBG(dev, "enable device interrupts for setup data\n");
262 
263 	/* read irq mask */
264 	tmp = readl(&dev->regs->irqmsk);
265 
266 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
267 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
268 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
269 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
270 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
271 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
272 	writel(tmp, &dev->regs->irqmsk);
273 
274 	return 0;
275 }
276 EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
277 
278 /* Calculates fifo start of endpoint based on preceding endpoints */
udc_set_txfifo_addr(struct udc_ep * ep)279 static int udc_set_txfifo_addr(struct udc_ep *ep)
280 {
281 	struct udc	*dev;
282 	u32 tmp;
283 	int i;
284 
285 	if (!ep || !(ep->in))
286 		return -EINVAL;
287 
288 	dev = ep->dev;
289 	ep->txfifo = dev->txfifo;
290 
291 	/* traverse ep's */
292 	for (i = 0; i < ep->num; i++) {
293 		if (dev->ep[i].regs) {
294 			/* read fifo size */
295 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
296 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
297 			ep->txfifo += tmp;
298 		}
299 	}
300 	return 0;
301 }
302 
303 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
304 static u32 cnak_pending;
305 
UDC_QUEUE_CNAK(struct udc_ep * ep,unsigned num)306 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
307 {
308 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
309 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
310 		cnak_pending |= 1 << (num);
311 		ep->naking = 1;
312 	} else
313 		cnak_pending = cnak_pending & (~(1 << (num)));
314 }
315 
316 
317 /* Enables endpoint, is called by gadget driver */
318 static int
udc_ep_enable(struct usb_ep * usbep,const struct usb_endpoint_descriptor * desc)319 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
320 {
321 	struct udc_ep		*ep;
322 	struct udc		*dev;
323 	u32			tmp;
324 	unsigned long		iflags;
325 	u8 udc_csr_epix;
326 	unsigned		maxpacket;
327 
328 	if (!usbep
329 			|| usbep->name == ep0_string
330 			|| !desc
331 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
332 		return -EINVAL;
333 
334 	ep = container_of(usbep, struct udc_ep, ep);
335 	dev = ep->dev;
336 
337 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
338 
339 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
340 		return -ESHUTDOWN;
341 
342 	spin_lock_irqsave(&dev->lock, iflags);
343 	ep->ep.desc = desc;
344 
345 	ep->halted = 0;
346 
347 	/* set traffic type */
348 	tmp = readl(&dev->ep[ep->num].regs->ctl);
349 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
350 	writel(tmp, &dev->ep[ep->num].regs->ctl);
351 
352 	/* set max packet size */
353 	maxpacket = usb_endpoint_maxp(desc);
354 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
355 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
356 	ep->ep.maxpacket = maxpacket;
357 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
358 
359 	/* IN ep */
360 	if (ep->in) {
361 
362 		/* ep ix in UDC CSR register space */
363 		udc_csr_epix = ep->num;
364 
365 		/* set buffer size (tx fifo entries) */
366 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
367 		/* double buffering: fifo size = 2 x max packet size */
368 		tmp = AMD_ADDBITS(
369 				tmp,
370 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
371 					  / UDC_DWORD_BYTES,
372 				UDC_EPIN_BUFF_SIZE);
373 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
374 
375 		/* calc. tx fifo base addr */
376 		udc_set_txfifo_addr(ep);
377 
378 		/* flush fifo */
379 		tmp = readl(&ep->regs->ctl);
380 		tmp |= AMD_BIT(UDC_EPCTL_F);
381 		writel(tmp, &ep->regs->ctl);
382 
383 	/* OUT ep */
384 	} else {
385 		/* ep ix in UDC CSR register space */
386 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
387 
388 		/* set max packet size UDC CSR	*/
389 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
390 		tmp = AMD_ADDBITS(tmp, maxpacket,
391 					UDC_CSR_NE_MAX_PKT);
392 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
393 
394 		if (use_dma && !ep->in) {
395 			/* alloc and init BNA dummy request */
396 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
397 			ep->bna_occurred = 0;
398 		}
399 
400 		if (ep->num != UDC_EP0OUT_IX)
401 			dev->data_ep_enabled = 1;
402 	}
403 
404 	/* set ep values */
405 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
406 	/* max packet */
407 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
408 	/* ep number */
409 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
410 	/* ep direction */
411 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
412 	/* ep type */
413 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
414 	/* ep config */
415 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
416 	/* ep interface */
417 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
418 	/* ep alt */
419 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
420 	/* write reg */
421 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
422 
423 	/* enable ep irq */
424 	tmp = readl(&dev->regs->ep_irqmsk);
425 	tmp &= AMD_UNMASK_BIT(ep->num);
426 	writel(tmp, &dev->regs->ep_irqmsk);
427 
428 	/*
429 	 * clear NAK by writing CNAK
430 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
431 	 */
432 	if (!use_dma || ep->in) {
433 		tmp = readl(&ep->regs->ctl);
434 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
435 		writel(tmp, &ep->regs->ctl);
436 		ep->naking = 0;
437 		UDC_QUEUE_CNAK(ep, ep->num);
438 	}
439 	tmp = desc->bEndpointAddress;
440 	DBG(dev, "%s enabled\n", usbep->name);
441 
442 	spin_unlock_irqrestore(&dev->lock, iflags);
443 	return 0;
444 }
445 
446 /* Resets endpoint */
ep_init(struct udc_regs __iomem * regs,struct udc_ep * ep)447 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
448 {
449 	u32		tmp;
450 
451 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
452 	ep->ep.desc = NULL;
453 	ep->ep.ops = &udc_ep_ops;
454 	INIT_LIST_HEAD(&ep->queue);
455 
456 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
457 	/* set NAK */
458 	tmp = readl(&ep->regs->ctl);
459 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
460 	writel(tmp, &ep->regs->ctl);
461 	ep->naking = 1;
462 
463 	/* disable interrupt */
464 	tmp = readl(&regs->ep_irqmsk);
465 	tmp |= AMD_BIT(ep->num);
466 	writel(tmp, &regs->ep_irqmsk);
467 
468 	if (ep->in) {
469 		/* unset P and IN bit of potential former DMA */
470 		tmp = readl(&ep->regs->ctl);
471 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
472 		writel(tmp, &ep->regs->ctl);
473 
474 		tmp = readl(&ep->regs->sts);
475 		tmp |= AMD_BIT(UDC_EPSTS_IN);
476 		writel(tmp, &ep->regs->sts);
477 
478 		/* flush the fifo */
479 		tmp = readl(&ep->regs->ctl);
480 		tmp |= AMD_BIT(UDC_EPCTL_F);
481 		writel(tmp, &ep->regs->ctl);
482 
483 	}
484 	/* reset desc pointer */
485 	writel(0, &ep->regs->desptr);
486 }
487 
488 /* Disables endpoint, is called by gadget driver */
udc_ep_disable(struct usb_ep * usbep)489 static int udc_ep_disable(struct usb_ep *usbep)
490 {
491 	struct udc_ep	*ep = NULL;
492 	unsigned long	iflags;
493 
494 	if (!usbep)
495 		return -EINVAL;
496 
497 	ep = container_of(usbep, struct udc_ep, ep);
498 	if (usbep->name == ep0_string || !ep->ep.desc)
499 		return -EINVAL;
500 
501 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
502 
503 	spin_lock_irqsave(&ep->dev->lock, iflags);
504 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
505 	empty_req_queue(ep);
506 	ep_init(ep->dev->regs, ep);
507 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
508 
509 	return 0;
510 }
511 
512 /* Allocates request packet, called by gadget driver */
513 static struct usb_request *
udc_alloc_request(struct usb_ep * usbep,gfp_t gfp)514 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
515 {
516 	struct udc_request	*req;
517 	struct udc_data_dma	*dma_desc;
518 	struct udc_ep	*ep;
519 
520 	if (!usbep)
521 		return NULL;
522 
523 	ep = container_of(usbep, struct udc_ep, ep);
524 
525 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
526 	req = kzalloc(sizeof(struct udc_request), gfp);
527 	if (!req)
528 		return NULL;
529 
530 	req->req.dma = DMA_DONT_USE;
531 	INIT_LIST_HEAD(&req->queue);
532 
533 	if (ep->dma) {
534 		/* ep0 in requests are allocated from data pool here */
535 		dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
536 						&req->td_phys);
537 		if (!dma_desc) {
538 			kfree(req);
539 			return NULL;
540 		}
541 
542 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
543 				"td_phys = %lx\n",
544 				req, dma_desc,
545 				(unsigned long)req->td_phys);
546 		/* prevent from using desc. - set HOST BUSY */
547 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
548 						UDC_DMA_STP_STS_BS_HOST_BUSY,
549 						UDC_DMA_STP_STS_BS);
550 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
551 		req->td_data = dma_desc;
552 		req->td_data_last = NULL;
553 		req->chain_len = 1;
554 	}
555 
556 	return &req->req;
557 }
558 
559 /* frees pci pool descriptors of a DMA chain */
udc_free_dma_chain(struct udc * dev,struct udc_request * req)560 static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
561 {
562 	struct udc_data_dma *td = req->td_data;
563 	unsigned int i;
564 
565 	dma_addr_t addr_next = 0x00;
566 	dma_addr_t addr = (dma_addr_t)td->next;
567 
568 	DBG(dev, "free chain req = %p\n", req);
569 
570 	/* do not free first desc., will be done by free for request */
571 	for (i = 1; i < req->chain_len; i++) {
572 		td = phys_to_virt(addr);
573 		addr_next = (dma_addr_t)td->next;
574 		dma_pool_free(dev->data_requests, td, addr);
575 		addr = addr_next;
576 	}
577 }
578 
579 /* Frees request packet, called by gadget driver */
580 static void
udc_free_request(struct usb_ep * usbep,struct usb_request * usbreq)581 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
582 {
583 	struct udc_ep	*ep;
584 	struct udc_request	*req;
585 
586 	if (!usbep || !usbreq)
587 		return;
588 
589 	ep = container_of(usbep, struct udc_ep, ep);
590 	req = container_of(usbreq, struct udc_request, req);
591 	VDBG(ep->dev, "free_req req=%p\n", req);
592 	BUG_ON(!list_empty(&req->queue));
593 	if (req->td_data) {
594 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
595 
596 		/* free dma chain if created */
597 		if (req->chain_len > 1)
598 			udc_free_dma_chain(ep->dev, req);
599 
600 		dma_pool_free(ep->dev->data_requests, req->td_data,
601 							req->td_phys);
602 	}
603 	kfree(req);
604 }
605 
606 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
udc_init_bna_dummy(struct udc_request * req)607 static void udc_init_bna_dummy(struct udc_request *req)
608 {
609 	if (req) {
610 		/* set last bit */
611 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
612 		/* set next pointer to itself */
613 		req->td_data->next = req->td_phys;
614 		/* set HOST BUSY */
615 		req->td_data->status
616 			= AMD_ADDBITS(req->td_data->status,
617 					UDC_DMA_STP_STS_BS_DMA_DONE,
618 					UDC_DMA_STP_STS_BS);
619 #ifdef UDC_VERBOSE
620 		pr_debug("bna desc = %p, sts = %08x\n",
621 			req->td_data, req->td_data->status);
622 #endif
623 	}
624 }
625 
626 /* Allocate BNA dummy descriptor */
udc_alloc_bna_dummy(struct udc_ep * ep)627 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
628 {
629 	struct udc_request *req = NULL;
630 	struct usb_request *_req = NULL;
631 
632 	/* alloc the dummy request */
633 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
634 	if (_req) {
635 		req = container_of(_req, struct udc_request, req);
636 		ep->bna_dummy_req = req;
637 		udc_init_bna_dummy(req);
638 	}
639 	return req;
640 }
641 
642 /* Write data to TX fifo for IN packets */
643 static void
udc_txfifo_write(struct udc_ep * ep,struct usb_request * req)644 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
645 {
646 	u8			*req_buf;
647 	u32			*buf;
648 	int			i, j;
649 	unsigned		bytes = 0;
650 	unsigned		remaining = 0;
651 
652 	if (!req || !ep)
653 		return;
654 
655 	req_buf = req->buf + req->actual;
656 	prefetch(req_buf);
657 	remaining = req->length - req->actual;
658 
659 	buf = (u32 *) req_buf;
660 
661 	bytes = ep->ep.maxpacket;
662 	if (bytes > remaining)
663 		bytes = remaining;
664 
665 	/* dwords first */
666 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
667 		writel(*(buf + i), ep->txfifo);
668 
669 	/* remaining bytes must be written by byte access */
670 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
671 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
672 							ep->txfifo);
673 	}
674 
675 	/* dummy write confirm */
676 	writel(0, &ep->regs->confirm);
677 }
678 
679 /* Read dwords from RX fifo for OUT transfers */
udc_rxfifo_read_dwords(struct udc * dev,u32 * buf,int dwords)680 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
681 {
682 	int i;
683 
684 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
685 
686 	for (i = 0; i < dwords; i++)
687 		*(buf + i) = readl(dev->rxfifo);
688 	return 0;
689 }
690 
691 /* Read bytes from RX fifo for OUT transfers */
udc_rxfifo_read_bytes(struct udc * dev,u8 * buf,int bytes)692 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
693 {
694 	int i, j;
695 	u32 tmp;
696 
697 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
698 
699 	/* dwords first */
700 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
701 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
702 
703 	/* remaining bytes must be read by byte access */
704 	if (bytes % UDC_DWORD_BYTES) {
705 		tmp = readl(dev->rxfifo);
706 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
707 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
708 			tmp = tmp >> UDC_BITS_PER_BYTE;
709 		}
710 	}
711 
712 	return 0;
713 }
714 
715 /* Read data from RX fifo for OUT transfers */
716 static int
udc_rxfifo_read(struct udc_ep * ep,struct udc_request * req)717 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
718 {
719 	u8 *buf;
720 	unsigned buf_space;
721 	unsigned bytes = 0;
722 	unsigned finished = 0;
723 
724 	/* received number bytes */
725 	bytes = readl(&ep->regs->sts);
726 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
727 
728 	buf_space = req->req.length - req->req.actual;
729 	buf = req->req.buf + req->req.actual;
730 	if (bytes > buf_space) {
731 		if ((buf_space % ep->ep.maxpacket) != 0) {
732 			DBG(ep->dev,
733 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
734 				ep->ep.name, bytes, buf_space);
735 			req->req.status = -EOVERFLOW;
736 		}
737 		bytes = buf_space;
738 	}
739 	req->req.actual += bytes;
740 
741 	/* last packet ? */
742 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
743 		|| ((req->req.actual == req->req.length) && !req->req.zero))
744 		finished = 1;
745 
746 	/* read rx fifo bytes */
747 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
748 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
749 
750 	return finished;
751 }
752 
753 /* Creates or re-inits a DMA chain */
udc_create_dma_chain(struct udc_ep * ep,struct udc_request * req,unsigned long buf_len,gfp_t gfp_flags)754 static int udc_create_dma_chain(
755 	struct udc_ep *ep,
756 	struct udc_request *req,
757 	unsigned long buf_len, gfp_t gfp_flags
758 )
759 {
760 	unsigned long bytes = req->req.length;
761 	unsigned int i;
762 	dma_addr_t dma_addr;
763 	struct udc_data_dma	*td = NULL;
764 	struct udc_data_dma	*last = NULL;
765 	unsigned long txbytes;
766 	unsigned create_new_chain = 0;
767 	unsigned len;
768 
769 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
770 	     bytes, buf_len);
771 	dma_addr = DMA_DONT_USE;
772 
773 	/* unset L bit in first desc for OUT */
774 	if (!ep->in)
775 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
776 
777 	/* alloc only new desc's if not already available */
778 	len = req->req.length / ep->ep.maxpacket;
779 	if (req->req.length % ep->ep.maxpacket)
780 		len++;
781 
782 	if (len > req->chain_len) {
783 		/* shorter chain already allocated before */
784 		if (req->chain_len > 1)
785 			udc_free_dma_chain(ep->dev, req);
786 		req->chain_len = len;
787 		create_new_chain = 1;
788 	}
789 
790 	td = req->td_data;
791 	/* gen. required number of descriptors and buffers */
792 	for (i = buf_len; i < bytes; i += buf_len) {
793 		/* create or determine next desc. */
794 		if (create_new_chain) {
795 			td = dma_pool_alloc(ep->dev->data_requests,
796 					    gfp_flags, &dma_addr);
797 			if (!td)
798 				return -ENOMEM;
799 
800 			td->status = 0;
801 		} else if (i == buf_len) {
802 			/* first td */
803 			td = (struct udc_data_dma *)phys_to_virt(
804 						req->td_data->next);
805 			td->status = 0;
806 		} else {
807 			td = (struct udc_data_dma *)phys_to_virt(last->next);
808 			td->status = 0;
809 		}
810 
811 		if (td)
812 			td->bufptr = req->req.dma + i; /* assign buffer */
813 		else
814 			break;
815 
816 		/* short packet ? */
817 		if ((bytes - i) >= buf_len) {
818 			txbytes = buf_len;
819 		} else {
820 			/* short packet */
821 			txbytes = bytes - i;
822 		}
823 
824 		/* link td and assign tx bytes */
825 		if (i == buf_len) {
826 			if (create_new_chain)
827 				req->td_data->next = dma_addr;
828 			/*
829 			 * else
830 			 *	req->td_data->next = virt_to_phys(td);
831 			 */
832 			/* write tx bytes */
833 			if (ep->in) {
834 				/* first desc */
835 				req->td_data->status =
836 					AMD_ADDBITS(req->td_data->status,
837 						    ep->ep.maxpacket,
838 						    UDC_DMA_IN_STS_TXBYTES);
839 				/* second desc */
840 				td->status = AMD_ADDBITS(td->status,
841 							txbytes,
842 							UDC_DMA_IN_STS_TXBYTES);
843 			}
844 		} else {
845 			if (create_new_chain)
846 				last->next = dma_addr;
847 			/*
848 			 * else
849 			 *	last->next = virt_to_phys(td);
850 			 */
851 			if (ep->in) {
852 				/* write tx bytes */
853 				td->status = AMD_ADDBITS(td->status,
854 							txbytes,
855 							UDC_DMA_IN_STS_TXBYTES);
856 			}
857 		}
858 		last = td;
859 	}
860 	/* set last bit */
861 	if (td) {
862 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
863 		/* last desc. points to itself */
864 		req->td_data_last = td;
865 	}
866 
867 	return 0;
868 }
869 
870 /* create/re-init a DMA descriptor or a DMA descriptor chain */
prep_dma(struct udc_ep * ep,struct udc_request * req,gfp_t gfp)871 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
872 {
873 	int	retval = 0;
874 	u32	tmp;
875 
876 	VDBG(ep->dev, "prep_dma\n");
877 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
878 			ep->num, req->td_data);
879 
880 	/* set buffer pointer */
881 	req->td_data->bufptr = req->req.dma;
882 
883 	/* set last bit */
884 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
885 
886 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
887 	if (use_dma_ppb) {
888 
889 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
890 		if (retval != 0) {
891 			if (retval == -ENOMEM)
892 				DBG(ep->dev, "Out of DMA memory\n");
893 			return retval;
894 		}
895 		if (ep->in) {
896 			if (req->req.length == ep->ep.maxpacket) {
897 				/* write tx bytes */
898 				req->td_data->status =
899 					AMD_ADDBITS(req->td_data->status,
900 						ep->ep.maxpacket,
901 						UDC_DMA_IN_STS_TXBYTES);
902 
903 			}
904 		}
905 
906 	}
907 
908 	if (ep->in) {
909 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
910 				"maxpacket=%d ep%d\n",
911 				use_dma_ppb, req->req.length,
912 				ep->ep.maxpacket, ep->num);
913 		/*
914 		 * if bytes < max packet then tx bytes must
915 		 * be written in packet per buffer mode
916 		 */
917 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
918 				|| ep->num == UDC_EP0OUT_IX
919 				|| ep->num == UDC_EP0IN_IX) {
920 			/* write tx bytes */
921 			req->td_data->status =
922 				AMD_ADDBITS(req->td_data->status,
923 						req->req.length,
924 						UDC_DMA_IN_STS_TXBYTES);
925 			/* reset frame num */
926 			req->td_data->status =
927 				AMD_ADDBITS(req->td_data->status,
928 						0,
929 						UDC_DMA_IN_STS_FRAMENUM);
930 		}
931 		/* set HOST BUSY */
932 		req->td_data->status =
933 			AMD_ADDBITS(req->td_data->status,
934 				UDC_DMA_STP_STS_BS_HOST_BUSY,
935 				UDC_DMA_STP_STS_BS);
936 	} else {
937 		VDBG(ep->dev, "OUT set host ready\n");
938 		/* set HOST READY */
939 		req->td_data->status =
940 			AMD_ADDBITS(req->td_data->status,
941 				UDC_DMA_STP_STS_BS_HOST_READY,
942 				UDC_DMA_STP_STS_BS);
943 
944 		/* clear NAK by writing CNAK */
945 		if (ep->naking) {
946 			tmp = readl(&ep->regs->ctl);
947 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
948 			writel(tmp, &ep->regs->ctl);
949 			ep->naking = 0;
950 			UDC_QUEUE_CNAK(ep, ep->num);
951 		}
952 
953 	}
954 
955 	return retval;
956 }
957 
958 /* Completes request packet ... caller MUST hold lock */
959 static void
complete_req(struct udc_ep * ep,struct udc_request * req,int sts)960 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
961 __releases(ep->dev->lock)
962 __acquires(ep->dev->lock)
963 {
964 	struct udc		*dev;
965 	unsigned		halted;
966 
967 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
968 
969 	dev = ep->dev;
970 	/* unmap DMA */
971 	if (ep->dma)
972 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
973 
974 	halted = ep->halted;
975 	ep->halted = 1;
976 
977 	/* set new status if pending */
978 	if (req->req.status == -EINPROGRESS)
979 		req->req.status = sts;
980 
981 	/* remove from ep queue */
982 	list_del_init(&req->queue);
983 
984 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
985 		&req->req, req->req.length, ep->ep.name, sts);
986 
987 	spin_unlock(&dev->lock);
988 	usb_gadget_giveback_request(&ep->ep, &req->req);
989 	spin_lock(&dev->lock);
990 	ep->halted = halted;
991 }
992 
993 /* Iterates to the end of a DMA chain and returns last descriptor */
udc_get_last_dma_desc(struct udc_request * req)994 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
995 {
996 	struct udc_data_dma	*td;
997 
998 	td = req->td_data;
999 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
1000 		td = phys_to_virt(td->next);
1001 
1002 	return td;
1003 
1004 }
1005 
1006 /* Iterates to the end of a DMA chain and counts bytes received */
udc_get_ppbdu_rxbytes(struct udc_request * req)1007 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1008 {
1009 	struct udc_data_dma	*td;
1010 	u32 count;
1011 
1012 	td = req->td_data;
1013 	/* received number bytes */
1014 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1015 
1016 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1017 		td = phys_to_virt(td->next);
1018 		/* received number bytes */
1019 		if (td) {
1020 			count += AMD_GETBITS(td->status,
1021 				UDC_DMA_OUT_STS_RXBYTES);
1022 		}
1023 	}
1024 
1025 	return count;
1026 
1027 }
1028 
1029 /* Enabling RX DMA */
udc_set_rde(struct udc * dev)1030 static void udc_set_rde(struct udc *dev)
1031 {
1032 	u32 tmp;
1033 
1034 	VDBG(dev, "udc_set_rde()\n");
1035 	/* stop RDE timer */
1036 	if (timer_pending(&udc_timer)) {
1037 		set_rde = 0;
1038 		mod_timer(&udc_timer, jiffies - 1);
1039 	}
1040 	/* set RDE */
1041 	tmp = readl(&dev->regs->ctl);
1042 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1043 	writel(tmp, &dev->regs->ctl);
1044 }
1045 
1046 /* Queues a request packet, called by gadget driver */
1047 static int
udc_queue(struct usb_ep * usbep,struct usb_request * usbreq,gfp_t gfp)1048 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1049 {
1050 	int			retval = 0;
1051 	u8			open_rxfifo = 0;
1052 	unsigned long		iflags;
1053 	struct udc_ep		*ep;
1054 	struct udc_request	*req;
1055 	struct udc		*dev;
1056 	u32			tmp;
1057 
1058 	/* check the inputs */
1059 	req = container_of(usbreq, struct udc_request, req);
1060 
1061 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1062 			|| !list_empty(&req->queue))
1063 		return -EINVAL;
1064 
1065 	ep = container_of(usbep, struct udc_ep, ep);
1066 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1067 		return -EINVAL;
1068 
1069 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1070 	dev = ep->dev;
1071 
1072 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1073 		return -ESHUTDOWN;
1074 
1075 	/* map dma (usually done before) */
1076 	if (ep->dma) {
1077 		VDBG(dev, "DMA map req %p\n", req);
1078 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1079 		if (retval)
1080 			return retval;
1081 	}
1082 
1083 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1084 			usbep->name, usbreq, usbreq->length,
1085 			req->td_data, usbreq->buf);
1086 
1087 	spin_lock_irqsave(&dev->lock, iflags);
1088 	usbreq->actual = 0;
1089 	usbreq->status = -EINPROGRESS;
1090 	req->dma_done = 0;
1091 
1092 	/* on empty queue just do first transfer */
1093 	if (list_empty(&ep->queue)) {
1094 		/* zlp */
1095 		if (usbreq->length == 0) {
1096 			/* IN zlp's are handled by hardware */
1097 			complete_req(ep, req, 0);
1098 			VDBG(dev, "%s: zlp\n", ep->ep.name);
1099 			/*
1100 			 * if set_config or set_intf is waiting for ack by zlp
1101 			 * then set CSR_DONE
1102 			 */
1103 			if (dev->set_cfg_not_acked) {
1104 				tmp = readl(&dev->regs->ctl);
1105 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1106 				writel(tmp, &dev->regs->ctl);
1107 				dev->set_cfg_not_acked = 0;
1108 			}
1109 			/* setup command is ACK'ed now by zlp */
1110 			if (dev->waiting_zlp_ack_ep0in) {
1111 				/* clear NAK by writing CNAK in EP0_IN */
1112 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1113 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1114 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1115 				dev->ep[UDC_EP0IN_IX].naking = 0;
1116 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1117 							UDC_EP0IN_IX);
1118 				dev->waiting_zlp_ack_ep0in = 0;
1119 			}
1120 			goto finished;
1121 		}
1122 		if (ep->dma) {
1123 			retval = prep_dma(ep, req, GFP_ATOMIC);
1124 			if (retval != 0)
1125 				goto finished;
1126 			/* write desc pointer to enable DMA */
1127 			if (ep->in) {
1128 				/* set HOST READY */
1129 				req->td_data->status =
1130 					AMD_ADDBITS(req->td_data->status,
1131 						UDC_DMA_IN_STS_BS_HOST_READY,
1132 						UDC_DMA_IN_STS_BS);
1133 			}
1134 
1135 			/* disabled rx dma while descriptor update */
1136 			if (!ep->in) {
1137 				/* stop RDE timer */
1138 				if (timer_pending(&udc_timer)) {
1139 					set_rde = 0;
1140 					mod_timer(&udc_timer, jiffies - 1);
1141 				}
1142 				/* clear RDE */
1143 				tmp = readl(&dev->regs->ctl);
1144 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1145 				writel(tmp, &dev->regs->ctl);
1146 				open_rxfifo = 1;
1147 
1148 				/*
1149 				 * if BNA occurred then let BNA dummy desc.
1150 				 * point to current desc.
1151 				 */
1152 				if (ep->bna_occurred) {
1153 					VDBG(dev, "copy to BNA dummy desc.\n");
1154 					memcpy(ep->bna_dummy_req->td_data,
1155 						req->td_data,
1156 						sizeof(struct udc_data_dma));
1157 				}
1158 			}
1159 			/* write desc pointer */
1160 			writel(req->td_phys, &ep->regs->desptr);
1161 
1162 			/* clear NAK by writing CNAK */
1163 			if (ep->naking) {
1164 				tmp = readl(&ep->regs->ctl);
1165 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1166 				writel(tmp, &ep->regs->ctl);
1167 				ep->naking = 0;
1168 				UDC_QUEUE_CNAK(ep, ep->num);
1169 			}
1170 
1171 			if (ep->in) {
1172 				/* enable ep irq */
1173 				tmp = readl(&dev->regs->ep_irqmsk);
1174 				tmp &= AMD_UNMASK_BIT(ep->num);
1175 				writel(tmp, &dev->regs->ep_irqmsk);
1176 			}
1177 		} else if (ep->in) {
1178 				/* enable ep irq */
1179 				tmp = readl(&dev->regs->ep_irqmsk);
1180 				tmp &= AMD_UNMASK_BIT(ep->num);
1181 				writel(tmp, &dev->regs->ep_irqmsk);
1182 			}
1183 
1184 	} else if (ep->dma) {
1185 
1186 		/*
1187 		 * prep_dma not used for OUT ep's, this is not possible
1188 		 * for PPB modes, because of chain creation reasons
1189 		 */
1190 		if (ep->in) {
1191 			retval = prep_dma(ep, req, GFP_ATOMIC);
1192 			if (retval != 0)
1193 				goto finished;
1194 		}
1195 	}
1196 	VDBG(dev, "list_add\n");
1197 	/* add request to ep queue */
1198 	if (req) {
1199 
1200 		list_add_tail(&req->queue, &ep->queue);
1201 
1202 		/* open rxfifo if out data queued */
1203 		if (open_rxfifo) {
1204 			/* enable DMA */
1205 			req->dma_going = 1;
1206 			udc_set_rde(dev);
1207 			if (ep->num != UDC_EP0OUT_IX)
1208 				dev->data_ep_queued = 1;
1209 		}
1210 		/* stop OUT naking */
1211 		if (!ep->in) {
1212 			if (!use_dma && udc_rxfifo_pending) {
1213 				DBG(dev, "udc_queue(): pending bytes in "
1214 					"rxfifo after nyet\n");
1215 				/*
1216 				 * read pending bytes afer nyet:
1217 				 * referring to isr
1218 				 */
1219 				if (udc_rxfifo_read(ep, req)) {
1220 					/* finish */
1221 					complete_req(ep, req, 0);
1222 				}
1223 				udc_rxfifo_pending = 0;
1224 
1225 			}
1226 		}
1227 	}
1228 
1229 finished:
1230 	spin_unlock_irqrestore(&dev->lock, iflags);
1231 	return retval;
1232 }
1233 
1234 /* Empty request queue of an endpoint; caller holds spinlock */
empty_req_queue(struct udc_ep * ep)1235 void empty_req_queue(struct udc_ep *ep)
1236 {
1237 	struct udc_request	*req;
1238 
1239 	ep->halted = 1;
1240 	while (!list_empty(&ep->queue)) {
1241 		req = list_entry(ep->queue.next,
1242 			struct udc_request,
1243 			queue);
1244 		complete_req(ep, req, -ESHUTDOWN);
1245 	}
1246 }
1247 EXPORT_SYMBOL_GPL(empty_req_queue);
1248 
1249 /* Dequeues a request packet, called by gadget driver */
udc_dequeue(struct usb_ep * usbep,struct usb_request * usbreq)1250 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1251 {
1252 	struct udc_ep		*ep;
1253 	struct udc_request	*req;
1254 	unsigned		halted;
1255 	unsigned long		iflags;
1256 
1257 	ep = container_of(usbep, struct udc_ep, ep);
1258 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1259 				&& ep->num != UDC_EP0OUT_IX)))
1260 		return -EINVAL;
1261 
1262 	req = container_of(usbreq, struct udc_request, req);
1263 
1264 	spin_lock_irqsave(&ep->dev->lock, iflags);
1265 	halted = ep->halted;
1266 	ep->halted = 1;
1267 	/* request in processing or next one */
1268 	if (ep->queue.next == &req->queue) {
1269 		if (ep->dma && req->dma_going) {
1270 			if (ep->in)
1271 				ep->cancel_transfer = 1;
1272 			else {
1273 				u32 tmp;
1274 				u32 dma_sts;
1275 				/* stop potential receive DMA */
1276 				tmp = readl(&udc->regs->ctl);
1277 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1278 							&udc->regs->ctl);
1279 				/*
1280 				 * Cancel transfer later in ISR
1281 				 * if descriptor was touched.
1282 				 */
1283 				dma_sts = AMD_GETBITS(req->td_data->status,
1284 							UDC_DMA_OUT_STS_BS);
1285 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1286 					ep->cancel_transfer = 1;
1287 				else {
1288 					udc_init_bna_dummy(ep->req);
1289 					writel(ep->bna_dummy_req->td_phys,
1290 						&ep->regs->desptr);
1291 				}
1292 				writel(tmp, &udc->regs->ctl);
1293 			}
1294 		}
1295 	}
1296 	complete_req(ep, req, -ECONNRESET);
1297 	ep->halted = halted;
1298 
1299 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1300 	return 0;
1301 }
1302 
1303 /* Halt or clear halt of endpoint */
1304 static int
udc_set_halt(struct usb_ep * usbep,int halt)1305 udc_set_halt(struct usb_ep *usbep, int halt)
1306 {
1307 	struct udc_ep	*ep;
1308 	u32 tmp;
1309 	unsigned long iflags;
1310 	int retval = 0;
1311 
1312 	if (!usbep)
1313 		return -EINVAL;
1314 
1315 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1316 
1317 	ep = container_of(usbep, struct udc_ep, ep);
1318 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1319 		return -EINVAL;
1320 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1321 		return -ESHUTDOWN;
1322 
1323 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1324 	/* halt or clear halt */
1325 	if (halt) {
1326 		if (ep->num == 0)
1327 			ep->dev->stall_ep0in = 1;
1328 		else {
1329 			/*
1330 			 * set STALL
1331 			 * rxfifo empty not taken into acount
1332 			 */
1333 			tmp = readl(&ep->regs->ctl);
1334 			tmp |= AMD_BIT(UDC_EPCTL_S);
1335 			writel(tmp, &ep->regs->ctl);
1336 			ep->halted = 1;
1337 
1338 			/* setup poll timer */
1339 			if (!timer_pending(&udc_pollstall_timer)) {
1340 				udc_pollstall_timer.expires = jiffies +
1341 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1342 					/ (1000 * 1000);
1343 				if (!stop_pollstall_timer) {
1344 					DBG(ep->dev, "start polltimer\n");
1345 					add_timer(&udc_pollstall_timer);
1346 				}
1347 			}
1348 		}
1349 	} else {
1350 		/* ep is halted by set_halt() before */
1351 		if (ep->halted) {
1352 			tmp = readl(&ep->regs->ctl);
1353 			/* clear stall bit */
1354 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1355 			/* clear NAK by writing CNAK */
1356 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1357 			writel(tmp, &ep->regs->ctl);
1358 			ep->halted = 0;
1359 			UDC_QUEUE_CNAK(ep, ep->num);
1360 		}
1361 	}
1362 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1363 	return retval;
1364 }
1365 
1366 /* gadget interface */
1367 static const struct usb_ep_ops udc_ep_ops = {
1368 	.enable		= udc_ep_enable,
1369 	.disable	= udc_ep_disable,
1370 
1371 	.alloc_request	= udc_alloc_request,
1372 	.free_request	= udc_free_request,
1373 
1374 	.queue		= udc_queue,
1375 	.dequeue	= udc_dequeue,
1376 
1377 	.set_halt	= udc_set_halt,
1378 	/* fifo ops not implemented */
1379 };
1380 
1381 /*-------------------------------------------------------------------------*/
1382 
1383 /* Get frame counter (not implemented) */
udc_get_frame(struct usb_gadget * gadget)1384 static int udc_get_frame(struct usb_gadget *gadget)
1385 {
1386 	return -EOPNOTSUPP;
1387 }
1388 
1389 /* Initiates a remote wakeup */
udc_remote_wakeup(struct udc * dev)1390 static int udc_remote_wakeup(struct udc *dev)
1391 {
1392 	unsigned long flags;
1393 	u32 tmp;
1394 
1395 	DBG(dev, "UDC initiates remote wakeup\n");
1396 
1397 	spin_lock_irqsave(&dev->lock, flags);
1398 
1399 	tmp = readl(&dev->regs->ctl);
1400 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
1401 	writel(tmp, &dev->regs->ctl);
1402 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1403 	writel(tmp, &dev->regs->ctl);
1404 
1405 	spin_unlock_irqrestore(&dev->lock, flags);
1406 	return 0;
1407 }
1408 
1409 /* Remote wakeup gadget interface */
udc_wakeup(struct usb_gadget * gadget)1410 static int udc_wakeup(struct usb_gadget *gadget)
1411 {
1412 	struct udc		*dev;
1413 
1414 	if (!gadget)
1415 		return -EINVAL;
1416 	dev = container_of(gadget, struct udc, gadget);
1417 	udc_remote_wakeup(dev);
1418 
1419 	return 0;
1420 }
1421 
1422 static int amd5536_udc_start(struct usb_gadget *g,
1423 		struct usb_gadget_driver *driver);
1424 static int amd5536_udc_stop(struct usb_gadget *g);
1425 
1426 static const struct usb_gadget_ops udc_ops = {
1427 	.wakeup		= udc_wakeup,
1428 	.get_frame	= udc_get_frame,
1429 	.udc_start	= amd5536_udc_start,
1430 	.udc_stop	= amd5536_udc_stop,
1431 };
1432 
1433 /* Setups endpoint parameters, adds endpoints to linked list */
make_ep_lists(struct udc * dev)1434 static void make_ep_lists(struct udc *dev)
1435 {
1436 	/* make gadget ep lists */
1437 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1438 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1439 						&dev->gadget.ep_list);
1440 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1441 						&dev->gadget.ep_list);
1442 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1443 						&dev->gadget.ep_list);
1444 
1445 	/* fifo config */
1446 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1447 	if (dev->gadget.speed == USB_SPEED_FULL)
1448 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1449 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1450 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1451 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1452 }
1453 
1454 /* Inits UDC context */
udc_basic_init(struct udc * dev)1455 void udc_basic_init(struct udc *dev)
1456 {
1457 	u32	tmp;
1458 
1459 	DBG(dev, "udc_basic_init()\n");
1460 
1461 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1462 
1463 	/* stop RDE timer */
1464 	if (timer_pending(&udc_timer)) {
1465 		set_rde = 0;
1466 		mod_timer(&udc_timer, jiffies - 1);
1467 	}
1468 	/* stop poll stall timer */
1469 	if (timer_pending(&udc_pollstall_timer))
1470 		mod_timer(&udc_pollstall_timer, jiffies - 1);
1471 	/* disable DMA */
1472 	tmp = readl(&dev->regs->ctl);
1473 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1474 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1475 	writel(tmp, &dev->regs->ctl);
1476 
1477 	/* enable dynamic CSR programming */
1478 	tmp = readl(&dev->regs->cfg);
1479 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1480 	/* set self powered */
1481 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
1482 	/* set remote wakeupable */
1483 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1484 	writel(tmp, &dev->regs->cfg);
1485 
1486 	make_ep_lists(dev);
1487 
1488 	dev->data_ep_enabled = 0;
1489 	dev->data_ep_queued = 0;
1490 }
1491 EXPORT_SYMBOL_GPL(udc_basic_init);
1492 
1493 /* init registers at driver load time */
startup_registers(struct udc * dev)1494 static int startup_registers(struct udc *dev)
1495 {
1496 	u32 tmp;
1497 
1498 	/* init controller by soft reset */
1499 	udc_soft_reset(dev);
1500 
1501 	/* mask not needed interrupts */
1502 	udc_mask_unused_interrupts(dev);
1503 
1504 	/* put into initial config */
1505 	udc_basic_init(dev);
1506 	/* link up all endpoints */
1507 	udc_setup_endpoints(dev);
1508 
1509 	/* program speed */
1510 	tmp = readl(&dev->regs->cfg);
1511 	if (use_fullspeed)
1512 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1513 	else
1514 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1515 	writel(tmp, &dev->regs->cfg);
1516 
1517 	return 0;
1518 }
1519 
1520 /* Sets initial endpoint parameters */
udc_setup_endpoints(struct udc * dev)1521 static void udc_setup_endpoints(struct udc *dev)
1522 {
1523 	struct udc_ep	*ep;
1524 	u32	tmp;
1525 	u32	reg;
1526 
1527 	DBG(dev, "udc_setup_endpoints()\n");
1528 
1529 	/* read enum speed */
1530 	tmp = readl(&dev->regs->sts);
1531 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1532 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1533 		dev->gadget.speed = USB_SPEED_HIGH;
1534 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1535 		dev->gadget.speed = USB_SPEED_FULL;
1536 
1537 	/* set basic ep parameters */
1538 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1539 		ep = &dev->ep[tmp];
1540 		ep->dev = dev;
1541 		ep->ep.name = ep_info[tmp].name;
1542 		ep->ep.caps = ep_info[tmp].caps;
1543 		ep->num = tmp;
1544 		/* txfifo size is calculated at enable time */
1545 		ep->txfifo = dev->txfifo;
1546 
1547 		/* fifo size */
1548 		if (tmp < UDC_EPIN_NUM) {
1549 			ep->fifo_depth = UDC_TXFIFO_SIZE;
1550 			ep->in = 1;
1551 		} else {
1552 			ep->fifo_depth = UDC_RXFIFO_SIZE;
1553 			ep->in = 0;
1554 
1555 		}
1556 		ep->regs = &dev->ep_regs[tmp];
1557 		/*
1558 		 * ep will be reset only if ep was not enabled before to avoid
1559 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
1560 		 * not enabled by gadget driver
1561 		 */
1562 		if (!ep->ep.desc)
1563 			ep_init(dev->regs, ep);
1564 
1565 		if (use_dma) {
1566 			/*
1567 			 * ep->dma is not really used, just to indicate that
1568 			 * DMA is active: remove this
1569 			 * dma regs = dev control regs
1570 			 */
1571 			ep->dma = &dev->regs->ctl;
1572 
1573 			/* nak OUT endpoints until enable - not for ep0 */
1574 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1575 						&& tmp > UDC_EPIN_NUM) {
1576 				/* set NAK */
1577 				reg = readl(&dev->ep[tmp].regs->ctl);
1578 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
1579 				writel(reg, &dev->ep[tmp].regs->ctl);
1580 				dev->ep[tmp].naking = 1;
1581 
1582 			}
1583 		}
1584 	}
1585 	/* EP0 max packet */
1586 	if (dev->gadget.speed == USB_SPEED_FULL) {
1587 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1588 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
1589 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1590 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
1591 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
1592 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1593 					   UDC_EP0IN_MAX_PKT_SIZE);
1594 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1595 					   UDC_EP0OUT_MAX_PKT_SIZE);
1596 	}
1597 
1598 	/*
1599 	 * with suspend bug workaround, ep0 params for gadget driver
1600 	 * are set at gadget driver bind() call
1601 	 */
1602 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1603 	dev->ep[UDC_EP0IN_IX].halted = 0;
1604 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1605 
1606 	/* init cfg/alt/int */
1607 	dev->cur_config = 0;
1608 	dev->cur_intf = 0;
1609 	dev->cur_alt = 0;
1610 }
1611 
1612 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
usb_connect(struct udc * dev)1613 static void usb_connect(struct udc *dev)
1614 {
1615 	/* Return if already connected */
1616 	if (dev->connected)
1617 		return;
1618 
1619 	dev_info(dev->dev, "USB Connect\n");
1620 
1621 	dev->connected = 1;
1622 
1623 	/* put into initial config */
1624 	udc_basic_init(dev);
1625 
1626 	/* enable device setup interrupts */
1627 	udc_enable_dev_setup_interrupts(dev);
1628 }
1629 
1630 /*
1631  * Calls gadget with disconnect event and resets the UDC and makes
1632  * initial bringup to be ready for ep0 events
1633  */
usb_disconnect(struct udc * dev)1634 static void usb_disconnect(struct udc *dev)
1635 {
1636 	u32 tmp;
1637 
1638 	/* Return if already disconnected */
1639 	if (!dev->connected)
1640 		return;
1641 
1642 	dev_info(dev->dev, "USB Disconnect\n");
1643 
1644 	dev->connected = 0;
1645 
1646 	/* mask interrupts */
1647 	udc_mask_unused_interrupts(dev);
1648 
1649 	if (dev->driver) {
1650 		spin_unlock(&dev->lock);
1651 		dev->driver->disconnect(&dev->gadget);
1652 		spin_lock(&dev->lock);
1653 
1654 		/* empty queues */
1655 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1656 			empty_req_queue(&dev->ep[tmp]);
1657 	}
1658 
1659 	/* disable ep0 */
1660 	ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
1661 
1662 	if (!soft_reset_occured) {
1663 		/* init controller by soft reset */
1664 		udc_soft_reset(dev);
1665 		soft_reset_occured++;
1666 	}
1667 
1668 	/* re-enable dev interrupts */
1669 	udc_enable_dev_setup_interrupts(dev);
1670 	/* back to full speed ? */
1671 	if (use_fullspeed) {
1672 		tmp = readl(&dev->regs->cfg);
1673 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1674 		writel(tmp, &dev->regs->cfg);
1675 	}
1676 }
1677 
1678 /* Reset the UDC core */
udc_soft_reset(struct udc * dev)1679 static void udc_soft_reset(struct udc *dev)
1680 {
1681 	unsigned long	flags;
1682 
1683 	DBG(dev, "Soft reset\n");
1684 	/*
1685 	 * reset possible waiting interrupts, because int.
1686 	 * status is lost after soft reset,
1687 	 * ep int. status reset
1688 	 */
1689 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1690 	/* device int. status reset */
1691 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1692 
1693 	/* Don't do this for Broadcom UDC since this is a reserved
1694 	 * bit.
1695 	 */
1696 	if (dev->chiprev != UDC_BCM_REV) {
1697 		spin_lock_irqsave(&udc_irq_spinlock, flags);
1698 		writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1699 		readl(&dev->regs->cfg);
1700 		spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1701 	}
1702 }
1703 
1704 /* RDE timer callback to set RDE bit */
udc_timer_function(struct timer_list * unused)1705 static void udc_timer_function(struct timer_list *unused)
1706 {
1707 	u32 tmp;
1708 
1709 	spin_lock_irq(&udc_irq_spinlock);
1710 
1711 	if (set_rde > 0) {
1712 		/*
1713 		 * open the fifo if fifo was filled on last timer call
1714 		 * conditionally
1715 		 */
1716 		if (set_rde > 1) {
1717 			/* set RDE to receive setup data */
1718 			tmp = readl(&udc->regs->ctl);
1719 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1720 			writel(tmp, &udc->regs->ctl);
1721 			set_rde = -1;
1722 		} else if (readl(&udc->regs->sts)
1723 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1724 			/*
1725 			 * if fifo empty setup polling, do not just
1726 			 * open the fifo
1727 			 */
1728 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1729 			if (!stop_timer)
1730 				add_timer(&udc_timer);
1731 		} else {
1732 			/*
1733 			 * fifo contains data now, setup timer for opening
1734 			 * the fifo when timer expires to be able to receive
1735 			 * setup packets, when data packets gets queued by
1736 			 * gadget layer then timer will forced to expire with
1737 			 * set_rde=0 (RDE is set in udc_queue())
1738 			 */
1739 			set_rde++;
1740 			/* debug: lhadmot_timer_start = 221070 */
1741 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1742 			if (!stop_timer)
1743 				add_timer(&udc_timer);
1744 		}
1745 
1746 	} else
1747 		set_rde = -1; /* RDE was set by udc_queue() */
1748 	spin_unlock_irq(&udc_irq_spinlock);
1749 	if (stop_timer)
1750 		complete(&on_exit);
1751 
1752 }
1753 
1754 /* Handle halt state, used in stall poll timer */
udc_handle_halt_state(struct udc_ep * ep)1755 static void udc_handle_halt_state(struct udc_ep *ep)
1756 {
1757 	u32 tmp;
1758 	/* set stall as long not halted */
1759 	if (ep->halted == 1) {
1760 		tmp = readl(&ep->regs->ctl);
1761 		/* STALL cleared ? */
1762 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1763 			/*
1764 			 * FIXME: MSC spec requires that stall remains
1765 			 * even on receivng of CLEAR_FEATURE HALT. So
1766 			 * we would set STALL again here to be compliant.
1767 			 * But with current mass storage drivers this does
1768 			 * not work (would produce endless host retries).
1769 			 * So we clear halt on CLEAR_FEATURE.
1770 			 *
1771 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1772 			tmp |= AMD_BIT(UDC_EPCTL_S);
1773 			writel(tmp, &ep->regs->ctl);*/
1774 
1775 			/* clear NAK by writing CNAK */
1776 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1777 			writel(tmp, &ep->regs->ctl);
1778 			ep->halted = 0;
1779 			UDC_QUEUE_CNAK(ep, ep->num);
1780 		}
1781 	}
1782 }
1783 
1784 /* Stall timer callback to poll S bit and set it again after */
udc_pollstall_timer_function(struct timer_list * unused)1785 static void udc_pollstall_timer_function(struct timer_list *unused)
1786 {
1787 	struct udc_ep *ep;
1788 	int halted = 0;
1789 
1790 	spin_lock_irq(&udc_stall_spinlock);
1791 	/*
1792 	 * only one IN and OUT endpoints are handled
1793 	 * IN poll stall
1794 	 */
1795 	ep = &udc->ep[UDC_EPIN_IX];
1796 	udc_handle_halt_state(ep);
1797 	if (ep->halted)
1798 		halted = 1;
1799 	/* OUT poll stall */
1800 	ep = &udc->ep[UDC_EPOUT_IX];
1801 	udc_handle_halt_state(ep);
1802 	if (ep->halted)
1803 		halted = 1;
1804 
1805 	/* setup timer again when still halted */
1806 	if (!stop_pollstall_timer && halted) {
1807 		udc_pollstall_timer.expires = jiffies +
1808 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1809 					/ (1000 * 1000);
1810 		add_timer(&udc_pollstall_timer);
1811 	}
1812 	spin_unlock_irq(&udc_stall_spinlock);
1813 
1814 	if (stop_pollstall_timer)
1815 		complete(&on_pollstall_exit);
1816 }
1817 
1818 /* Inits endpoint 0 so that SETUP packets are processed */
activate_control_endpoints(struct udc * dev)1819 static void activate_control_endpoints(struct udc *dev)
1820 {
1821 	u32 tmp;
1822 
1823 	DBG(dev, "activate_control_endpoints\n");
1824 
1825 	/* flush fifo */
1826 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1827 	tmp |= AMD_BIT(UDC_EPCTL_F);
1828 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1829 
1830 	/* set ep0 directions */
1831 	dev->ep[UDC_EP0IN_IX].in = 1;
1832 	dev->ep[UDC_EP0OUT_IX].in = 0;
1833 
1834 	/* set buffer size (tx fifo entries) of EP0_IN */
1835 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1836 	if (dev->gadget.speed == USB_SPEED_FULL)
1837 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1838 					UDC_EPIN_BUFF_SIZE);
1839 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1840 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1841 					UDC_EPIN_BUFF_SIZE);
1842 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1843 
1844 	/* set max packet size of EP0_IN */
1845 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1846 	if (dev->gadget.speed == USB_SPEED_FULL)
1847 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1848 					UDC_EP_MAX_PKT_SIZE);
1849 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1850 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1851 				UDC_EP_MAX_PKT_SIZE);
1852 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1853 
1854 	/* set max packet size of EP0_OUT */
1855 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1856 	if (dev->gadget.speed == USB_SPEED_FULL)
1857 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1858 					UDC_EP_MAX_PKT_SIZE);
1859 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1860 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1861 					UDC_EP_MAX_PKT_SIZE);
1862 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1863 
1864 	/* set max packet size of EP0 in UDC CSR */
1865 	tmp = readl(&dev->csr->ne[0]);
1866 	if (dev->gadget.speed == USB_SPEED_FULL)
1867 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1868 					UDC_CSR_NE_MAX_PKT);
1869 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1870 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1871 					UDC_CSR_NE_MAX_PKT);
1872 	writel(tmp, &dev->csr->ne[0]);
1873 
1874 	if (use_dma) {
1875 		dev->ep[UDC_EP0OUT_IX].td->status |=
1876 			AMD_BIT(UDC_DMA_OUT_STS_L);
1877 		/* write dma desc address */
1878 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1879 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
1880 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1881 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
1882 		/* stop RDE timer */
1883 		if (timer_pending(&udc_timer)) {
1884 			set_rde = 0;
1885 			mod_timer(&udc_timer, jiffies - 1);
1886 		}
1887 		/* stop pollstall timer */
1888 		if (timer_pending(&udc_pollstall_timer))
1889 			mod_timer(&udc_pollstall_timer, jiffies - 1);
1890 		/* enable DMA */
1891 		tmp = readl(&dev->regs->ctl);
1892 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1893 				| AMD_BIT(UDC_DEVCTL_RDE)
1894 				| AMD_BIT(UDC_DEVCTL_TDE);
1895 		if (use_dma_bufferfill_mode)
1896 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
1897 		else if (use_dma_ppb_du)
1898 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
1899 		writel(tmp, &dev->regs->ctl);
1900 	}
1901 
1902 	/* clear NAK by writing CNAK for EP0IN */
1903 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1904 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1905 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1906 	dev->ep[UDC_EP0IN_IX].naking = 0;
1907 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1908 
1909 	/* clear NAK by writing CNAK for EP0OUT */
1910 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1911 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1912 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1913 	dev->ep[UDC_EP0OUT_IX].naking = 0;
1914 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1915 }
1916 
1917 /* Make endpoint 0 ready for control traffic */
setup_ep0(struct udc * dev)1918 static int setup_ep0(struct udc *dev)
1919 {
1920 	activate_control_endpoints(dev);
1921 	/* enable ep0 interrupts */
1922 	udc_enable_ep0_interrupts(dev);
1923 	/* enable device setup interrupts */
1924 	udc_enable_dev_setup_interrupts(dev);
1925 
1926 	return 0;
1927 }
1928 
1929 /* Called by gadget driver to register itself */
amd5536_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1930 static int amd5536_udc_start(struct usb_gadget *g,
1931 		struct usb_gadget_driver *driver)
1932 {
1933 	struct udc *dev = to_amd5536_udc(g);
1934 	u32 tmp;
1935 
1936 	dev->driver = driver;
1937 
1938 	/* Some gadget drivers use both ep0 directions.
1939 	 * NOTE: to gadget driver, ep0 is just one endpoint...
1940 	 */
1941 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1942 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
1943 
1944 	/* get ready for ep0 traffic */
1945 	setup_ep0(dev);
1946 
1947 	/* clear SD */
1948 	tmp = readl(&dev->regs->ctl);
1949 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1950 	writel(tmp, &dev->regs->ctl);
1951 
1952 	usb_connect(dev);
1953 
1954 	return 0;
1955 }
1956 
1957 /* shutdown requests and disconnect from gadget */
1958 static void
shutdown(struct udc * dev,struct usb_gadget_driver * driver)1959 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
1960 __releases(dev->lock)
1961 __acquires(dev->lock)
1962 {
1963 	int tmp;
1964 
1965 	/* empty queues and init hardware */
1966 	udc_basic_init(dev);
1967 
1968 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1969 		empty_req_queue(&dev->ep[tmp]);
1970 
1971 	udc_setup_endpoints(dev);
1972 }
1973 
1974 /* Called by gadget driver to unregister itself */
amd5536_udc_stop(struct usb_gadget * g)1975 static int amd5536_udc_stop(struct usb_gadget *g)
1976 {
1977 	struct udc *dev = to_amd5536_udc(g);
1978 	unsigned long flags;
1979 	u32 tmp;
1980 
1981 	spin_lock_irqsave(&dev->lock, flags);
1982 	udc_mask_unused_interrupts(dev);
1983 	shutdown(dev, NULL);
1984 	spin_unlock_irqrestore(&dev->lock, flags);
1985 
1986 	dev->driver = NULL;
1987 
1988 	/* set SD */
1989 	tmp = readl(&dev->regs->ctl);
1990 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
1991 	writel(tmp, &dev->regs->ctl);
1992 
1993 	return 0;
1994 }
1995 
1996 /* Clear pending NAK bits */
udc_process_cnak_queue(struct udc * dev)1997 static void udc_process_cnak_queue(struct udc *dev)
1998 {
1999 	u32 tmp;
2000 	u32 reg;
2001 
2002 	/* check epin's */
2003 	DBG(dev, "CNAK pending queue processing\n");
2004 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2005 		if (cnak_pending & (1 << tmp)) {
2006 			DBG(dev, "CNAK pending for ep%d\n", tmp);
2007 			/* clear NAK by writing CNAK */
2008 			reg = readl(&dev->ep[tmp].regs->ctl);
2009 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
2010 			writel(reg, &dev->ep[tmp].regs->ctl);
2011 			dev->ep[tmp].naking = 0;
2012 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2013 		}
2014 	}
2015 	/* ...	and ep0out */
2016 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2017 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2018 		/* clear NAK by writing CNAK */
2019 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2020 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
2021 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2022 		dev->ep[UDC_EP0OUT_IX].naking = 0;
2023 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2024 				dev->ep[UDC_EP0OUT_IX].num);
2025 	}
2026 }
2027 
2028 /* Enabling RX DMA after setup packet */
udc_ep0_set_rde(struct udc * dev)2029 static void udc_ep0_set_rde(struct udc *dev)
2030 {
2031 	if (use_dma) {
2032 		/*
2033 		 * only enable RXDMA when no data endpoint enabled
2034 		 * or data is queued
2035 		 */
2036 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
2037 			udc_set_rde(dev);
2038 		} else {
2039 			/*
2040 			 * setup timer for enabling RDE (to not enable
2041 			 * RXFIFO DMA for data endpoints to early)
2042 			 */
2043 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
2044 				udc_timer.expires =
2045 					jiffies + HZ/UDC_RDE_TIMER_DIV;
2046 				set_rde = 1;
2047 				if (!stop_timer)
2048 					add_timer(&udc_timer);
2049 			}
2050 		}
2051 	}
2052 }
2053 
2054 
2055 /* Interrupt handler for data OUT traffic */
udc_data_out_isr(struct udc * dev,int ep_ix)2056 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2057 {
2058 	irqreturn_t		ret_val = IRQ_NONE;
2059 	u32			tmp;
2060 	struct udc_ep		*ep;
2061 	struct udc_request	*req;
2062 	unsigned int		count;
2063 	struct udc_data_dma	*td = NULL;
2064 	unsigned		dma_done;
2065 
2066 	VDBG(dev, "ep%d irq\n", ep_ix);
2067 	ep = &dev->ep[ep_ix];
2068 
2069 	tmp = readl(&ep->regs->sts);
2070 	if (use_dma) {
2071 		/* BNA event ? */
2072 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2073 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2074 					ep->num, readl(&ep->regs->desptr));
2075 			/* clear BNA */
2076 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2077 			if (!ep->cancel_transfer)
2078 				ep->bna_occurred = 1;
2079 			else
2080 				ep->cancel_transfer = 0;
2081 			ret_val = IRQ_HANDLED;
2082 			goto finished;
2083 		}
2084 	}
2085 	/* HE event ? */
2086 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2087 		dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
2088 
2089 		/* clear HE */
2090 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2091 		ret_val = IRQ_HANDLED;
2092 		goto finished;
2093 	}
2094 
2095 	if (!list_empty(&ep->queue)) {
2096 
2097 		/* next request */
2098 		req = list_entry(ep->queue.next,
2099 			struct udc_request, queue);
2100 	} else {
2101 		req = NULL;
2102 		udc_rxfifo_pending = 1;
2103 	}
2104 	VDBG(dev, "req = %p\n", req);
2105 	/* fifo mode */
2106 	if (!use_dma) {
2107 
2108 		/* read fifo */
2109 		if (req && udc_rxfifo_read(ep, req)) {
2110 			ret_val = IRQ_HANDLED;
2111 
2112 			/* finish */
2113 			complete_req(ep, req, 0);
2114 			/* next request */
2115 			if (!list_empty(&ep->queue) && !ep->halted) {
2116 				req = list_entry(ep->queue.next,
2117 					struct udc_request, queue);
2118 			} else
2119 				req = NULL;
2120 		}
2121 
2122 	/* DMA */
2123 	} else if (!ep->cancel_transfer && req) {
2124 		ret_val = IRQ_HANDLED;
2125 
2126 		/* check for DMA done */
2127 		if (!use_dma_ppb) {
2128 			dma_done = AMD_GETBITS(req->td_data->status,
2129 						UDC_DMA_OUT_STS_BS);
2130 		/* packet per buffer mode - rx bytes */
2131 		} else {
2132 			/*
2133 			 * if BNA occurred then recover desc. from
2134 			 * BNA dummy desc.
2135 			 */
2136 			if (ep->bna_occurred) {
2137 				VDBG(dev, "Recover desc. from BNA dummy\n");
2138 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
2139 						sizeof(struct udc_data_dma));
2140 				ep->bna_occurred = 0;
2141 				udc_init_bna_dummy(ep->req);
2142 			}
2143 			td = udc_get_last_dma_desc(req);
2144 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2145 		}
2146 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2147 			/* buffer fill mode - rx bytes */
2148 			if (!use_dma_ppb) {
2149 				/* received number bytes */
2150 				count = AMD_GETBITS(req->td_data->status,
2151 						UDC_DMA_OUT_STS_RXBYTES);
2152 				VDBG(dev, "rx bytes=%u\n", count);
2153 			/* packet per buffer mode - rx bytes */
2154 			} else {
2155 				VDBG(dev, "req->td_data=%p\n", req->td_data);
2156 				VDBG(dev, "last desc = %p\n", td);
2157 				/* received number bytes */
2158 				if (use_dma_ppb_du) {
2159 					/* every desc. counts bytes */
2160 					count = udc_get_ppbdu_rxbytes(req);
2161 				} else {
2162 					/* last desc. counts bytes */
2163 					count = AMD_GETBITS(td->status,
2164 						UDC_DMA_OUT_STS_RXBYTES);
2165 					if (!count && req->req.length
2166 						== UDC_DMA_MAXPACKET) {
2167 						/*
2168 						 * on 64k packets the RXBYTES
2169 						 * field is zero
2170 						 */
2171 						count = UDC_DMA_MAXPACKET;
2172 					}
2173 				}
2174 				VDBG(dev, "last desc rx bytes=%u\n", count);
2175 			}
2176 
2177 			tmp = req->req.length - req->req.actual;
2178 			if (count > tmp) {
2179 				if ((tmp % ep->ep.maxpacket) != 0) {
2180 					DBG(dev, "%s: rx %db, space=%db\n",
2181 						ep->ep.name, count, tmp);
2182 					req->req.status = -EOVERFLOW;
2183 				}
2184 				count = tmp;
2185 			}
2186 			req->req.actual += count;
2187 			req->dma_going = 0;
2188 			/* complete request */
2189 			complete_req(ep, req, 0);
2190 
2191 			/* next request */
2192 			if (!list_empty(&ep->queue) && !ep->halted) {
2193 				req = list_entry(ep->queue.next,
2194 					struct udc_request,
2195 					queue);
2196 				/*
2197 				 * DMA may be already started by udc_queue()
2198 				 * called by gadget drivers completion
2199 				 * routine. This happens when queue
2200 				 * holds one request only.
2201 				 */
2202 				if (req->dma_going == 0) {
2203 					/* next dma */
2204 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2205 						goto finished;
2206 					/* write desc pointer */
2207 					writel(req->td_phys,
2208 						&ep->regs->desptr);
2209 					req->dma_going = 1;
2210 					/* enable DMA */
2211 					udc_set_rde(dev);
2212 				}
2213 			} else {
2214 				/*
2215 				 * implant BNA dummy descriptor to allow
2216 				 * RXFIFO opening by RDE
2217 				 */
2218 				if (ep->bna_dummy_req) {
2219 					/* write desc pointer */
2220 					writel(ep->bna_dummy_req->td_phys,
2221 						&ep->regs->desptr);
2222 					ep->bna_occurred = 0;
2223 				}
2224 
2225 				/*
2226 				 * schedule timer for setting RDE if queue
2227 				 * remains empty to allow ep0 packets pass
2228 				 * through
2229 				 */
2230 				if (set_rde != 0
2231 						&& !timer_pending(&udc_timer)) {
2232 					udc_timer.expires =
2233 						jiffies
2234 						+ HZ*UDC_RDE_TIMER_SECONDS;
2235 					set_rde = 1;
2236 					if (!stop_timer)
2237 						add_timer(&udc_timer);
2238 				}
2239 				if (ep->num != UDC_EP0OUT_IX)
2240 					dev->data_ep_queued = 0;
2241 			}
2242 
2243 		} else {
2244 			/*
2245 			* RX DMA must be reenabled for each desc in PPBDU mode
2246 			* and must be enabled for PPBNDU mode in case of BNA
2247 			*/
2248 			udc_set_rde(dev);
2249 		}
2250 
2251 	} else if (ep->cancel_transfer) {
2252 		ret_val = IRQ_HANDLED;
2253 		ep->cancel_transfer = 0;
2254 	}
2255 
2256 	/* check pending CNAKS */
2257 	if (cnak_pending) {
2258 		/* CNAk processing when rxfifo empty only */
2259 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2260 			udc_process_cnak_queue(dev);
2261 	}
2262 
2263 	/* clear OUT bits in ep status */
2264 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2265 finished:
2266 	return ret_val;
2267 }
2268 
2269 /* Interrupt handler for data IN traffic */
udc_data_in_isr(struct udc * dev,int ep_ix)2270 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2271 {
2272 	irqreturn_t ret_val = IRQ_NONE;
2273 	u32 tmp;
2274 	u32 epsts;
2275 	struct udc_ep *ep;
2276 	struct udc_request *req;
2277 	struct udc_data_dma *td;
2278 	unsigned len;
2279 
2280 	ep = &dev->ep[ep_ix];
2281 
2282 	epsts = readl(&ep->regs->sts);
2283 	if (use_dma) {
2284 		/* BNA ? */
2285 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2286 			dev_err(dev->dev,
2287 				"BNA ep%din occurred - DESPTR = %08lx\n",
2288 				ep->num,
2289 				(unsigned long) readl(&ep->regs->desptr));
2290 
2291 			/* clear BNA */
2292 			writel(epsts, &ep->regs->sts);
2293 			ret_val = IRQ_HANDLED;
2294 			goto finished;
2295 		}
2296 	}
2297 	/* HE event ? */
2298 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2299 		dev_err(dev->dev,
2300 			"HE ep%dn occurred - DESPTR = %08lx\n",
2301 			ep->num, (unsigned long) readl(&ep->regs->desptr));
2302 
2303 		/* clear HE */
2304 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2305 		ret_val = IRQ_HANDLED;
2306 		goto finished;
2307 	}
2308 
2309 	/* DMA completion */
2310 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2311 		VDBG(dev, "TDC set- completion\n");
2312 		ret_val = IRQ_HANDLED;
2313 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2314 			req = list_entry(ep->queue.next,
2315 					struct udc_request, queue);
2316 			/*
2317 			 * length bytes transferred
2318 			 * check dma done of last desc. in PPBDU mode
2319 			 */
2320 			if (use_dma_ppb_du) {
2321 				td = udc_get_last_dma_desc(req);
2322 				if (td)
2323 					req->req.actual = req->req.length;
2324 			} else {
2325 				/* assume all bytes transferred */
2326 				req->req.actual = req->req.length;
2327 			}
2328 
2329 			if (req->req.actual == req->req.length) {
2330 				/* complete req */
2331 				complete_req(ep, req, 0);
2332 				req->dma_going = 0;
2333 				/* further request available ? */
2334 				if (list_empty(&ep->queue)) {
2335 					/* disable interrupt */
2336 					tmp = readl(&dev->regs->ep_irqmsk);
2337 					tmp |= AMD_BIT(ep->num);
2338 					writel(tmp, &dev->regs->ep_irqmsk);
2339 				}
2340 			}
2341 		}
2342 		ep->cancel_transfer = 0;
2343 
2344 	}
2345 	/*
2346 	 * status reg has IN bit set and TDC not set (if TDC was handled,
2347 	 * IN must not be handled (UDC defect) ?
2348 	 */
2349 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2350 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2351 		ret_val = IRQ_HANDLED;
2352 		if (!list_empty(&ep->queue)) {
2353 			/* next request */
2354 			req = list_entry(ep->queue.next,
2355 					struct udc_request, queue);
2356 			/* FIFO mode */
2357 			if (!use_dma) {
2358 				/* write fifo */
2359 				udc_txfifo_write(ep, &req->req);
2360 				len = req->req.length - req->req.actual;
2361 				if (len > ep->ep.maxpacket)
2362 					len = ep->ep.maxpacket;
2363 				req->req.actual += len;
2364 				if (req->req.actual == req->req.length
2365 					|| (len != ep->ep.maxpacket)) {
2366 					/* complete req */
2367 					complete_req(ep, req, 0);
2368 				}
2369 			/* DMA */
2370 			} else if (req && !req->dma_going) {
2371 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2372 					req, req->td_data);
2373 				if (req->td_data) {
2374 
2375 					req->dma_going = 1;
2376 
2377 					/*
2378 					 * unset L bit of first desc.
2379 					 * for chain
2380 					 */
2381 					if (use_dma_ppb && req->req.length >
2382 							ep->ep.maxpacket) {
2383 						req->td_data->status &=
2384 							AMD_CLEAR_BIT(
2385 							UDC_DMA_IN_STS_L);
2386 					}
2387 
2388 					/* write desc pointer */
2389 					writel(req->td_phys, &ep->regs->desptr);
2390 
2391 					/* set HOST READY */
2392 					req->td_data->status =
2393 						AMD_ADDBITS(
2394 						req->td_data->status,
2395 						UDC_DMA_IN_STS_BS_HOST_READY,
2396 						UDC_DMA_IN_STS_BS);
2397 
2398 					/* set poll demand bit */
2399 					tmp = readl(&ep->regs->ctl);
2400 					tmp |= AMD_BIT(UDC_EPCTL_P);
2401 					writel(tmp, &ep->regs->ctl);
2402 				}
2403 			}
2404 
2405 		} else if (!use_dma && ep->in) {
2406 			/* disable interrupt */
2407 			tmp = readl(
2408 				&dev->regs->ep_irqmsk);
2409 			tmp |= AMD_BIT(ep->num);
2410 			writel(tmp,
2411 				&dev->regs->ep_irqmsk);
2412 		}
2413 	}
2414 	/* clear status bits */
2415 	writel(epsts, &ep->regs->sts);
2416 
2417 finished:
2418 	return ret_val;
2419 
2420 }
2421 
2422 /* Interrupt handler for Control OUT traffic */
udc_control_out_isr(struct udc * dev)2423 static irqreturn_t udc_control_out_isr(struct udc *dev)
2424 __releases(dev->lock)
2425 __acquires(dev->lock)
2426 {
2427 	irqreturn_t ret_val = IRQ_NONE;
2428 	u32 tmp;
2429 	int setup_supported;
2430 	u32 count;
2431 	int set = 0;
2432 	struct udc_ep	*ep;
2433 	struct udc_ep	*ep_tmp;
2434 
2435 	ep = &dev->ep[UDC_EP0OUT_IX];
2436 
2437 	/* clear irq */
2438 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2439 
2440 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2441 	/* check BNA and clear if set */
2442 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2443 		VDBG(dev, "ep0: BNA set\n");
2444 		writel(AMD_BIT(UDC_EPSTS_BNA),
2445 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
2446 		ep->bna_occurred = 1;
2447 		ret_val = IRQ_HANDLED;
2448 		goto finished;
2449 	}
2450 
2451 	/* type of data: SETUP or DATA 0 bytes */
2452 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2453 	VDBG(dev, "data_typ = %x\n", tmp);
2454 
2455 	/* setup data */
2456 	if (tmp == UDC_EPSTS_OUT_SETUP) {
2457 		ret_val = IRQ_HANDLED;
2458 
2459 		ep->dev->stall_ep0in = 0;
2460 		dev->waiting_zlp_ack_ep0in = 0;
2461 
2462 		/* set NAK for EP0_IN */
2463 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2464 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2465 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2466 		dev->ep[UDC_EP0IN_IX].naking = 1;
2467 		/* get setup data */
2468 		if (use_dma) {
2469 
2470 			/* clear OUT bits in ep status */
2471 			writel(UDC_EPSTS_OUT_CLEAR,
2472 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2473 
2474 			setup_data.data[0] =
2475 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2476 			setup_data.data[1] =
2477 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2478 			/* set HOST READY */
2479 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
2480 					UDC_DMA_STP_STS_BS_HOST_READY;
2481 		} else {
2482 			/* read fifo */
2483 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2484 		}
2485 
2486 		/* determine direction of control data */
2487 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2488 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2489 			/* enable RDE */
2490 			udc_ep0_set_rde(dev);
2491 			set = 0;
2492 		} else {
2493 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2494 			/*
2495 			 * implant BNA dummy descriptor to allow RXFIFO opening
2496 			 * by RDE
2497 			 */
2498 			if (ep->bna_dummy_req) {
2499 				/* write desc pointer */
2500 				writel(ep->bna_dummy_req->td_phys,
2501 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2502 				ep->bna_occurred = 0;
2503 			}
2504 
2505 			set = 1;
2506 			dev->ep[UDC_EP0OUT_IX].naking = 1;
2507 			/*
2508 			 * setup timer for enabling RDE (to not enable
2509 			 * RXFIFO DMA for data to early)
2510 			 */
2511 			set_rde = 1;
2512 			if (!timer_pending(&udc_timer)) {
2513 				udc_timer.expires = jiffies +
2514 							HZ/UDC_RDE_TIMER_DIV;
2515 				if (!stop_timer)
2516 					add_timer(&udc_timer);
2517 			}
2518 		}
2519 
2520 		/*
2521 		 * mass storage reset must be processed here because
2522 		 * next packet may be a CLEAR_FEATURE HALT which would not
2523 		 * clear the stall bit when no STALL handshake was received
2524 		 * before (autostall can cause this)
2525 		 */
2526 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
2527 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
2528 			DBG(dev, "MSC Reset\n");
2529 			/*
2530 			 * clear stall bits
2531 			 * only one IN and OUT endpoints are handled
2532 			 */
2533 			ep_tmp = &udc->ep[UDC_EPIN_IX];
2534 			udc_set_halt(&ep_tmp->ep, 0);
2535 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
2536 			udc_set_halt(&ep_tmp->ep, 0);
2537 		}
2538 
2539 		/* call gadget with setup data received */
2540 		spin_unlock(&dev->lock);
2541 		setup_supported = dev->driver->setup(&dev->gadget,
2542 						&setup_data.request);
2543 		spin_lock(&dev->lock);
2544 
2545 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2546 		/* ep0 in returns data (not zlp) on IN phase */
2547 		if (setup_supported >= 0 && setup_supported <
2548 				UDC_EP0IN_MAXPACKET) {
2549 			/* clear NAK by writing CNAK in EP0_IN */
2550 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2551 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2552 			dev->ep[UDC_EP0IN_IX].naking = 0;
2553 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2554 
2555 		/* if unsupported request then stall */
2556 		} else if (setup_supported < 0) {
2557 			tmp |= AMD_BIT(UDC_EPCTL_S);
2558 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2559 		} else
2560 			dev->waiting_zlp_ack_ep0in = 1;
2561 
2562 
2563 		/* clear NAK by writing CNAK in EP0_OUT */
2564 		if (!set) {
2565 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2566 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2567 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2568 			dev->ep[UDC_EP0OUT_IX].naking = 0;
2569 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2570 		}
2571 
2572 		if (!use_dma) {
2573 			/* clear OUT bits in ep status */
2574 			writel(UDC_EPSTS_OUT_CLEAR,
2575 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2576 		}
2577 
2578 	/* data packet 0 bytes */
2579 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
2580 		/* clear OUT bits in ep status */
2581 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2582 
2583 		/* get setup data: only 0 packet */
2584 		if (use_dma) {
2585 			/* no req if 0 packet, just reactivate */
2586 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2587 				VDBG(dev, "ZLP\n");
2588 
2589 				/* set HOST READY */
2590 				dev->ep[UDC_EP0OUT_IX].td->status =
2591 					AMD_ADDBITS(
2592 					dev->ep[UDC_EP0OUT_IX].td->status,
2593 					UDC_DMA_OUT_STS_BS_HOST_READY,
2594 					UDC_DMA_OUT_STS_BS);
2595 				/* enable RDE */
2596 				udc_ep0_set_rde(dev);
2597 				ret_val = IRQ_HANDLED;
2598 
2599 			} else {
2600 				/* control write */
2601 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2602 				/* re-program desc. pointer for possible ZLPs */
2603 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2604 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2605 				/* enable RDE */
2606 				udc_ep0_set_rde(dev);
2607 			}
2608 		} else {
2609 
2610 			/* received number bytes */
2611 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2612 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2613 			/* out data for fifo mode not working */
2614 			count = 0;
2615 
2616 			/* 0 packet or real data ? */
2617 			if (count != 0) {
2618 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2619 			} else {
2620 				/* dummy read confirm */
2621 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2622 				ret_val = IRQ_HANDLED;
2623 			}
2624 		}
2625 	}
2626 
2627 	/* check pending CNAKS */
2628 	if (cnak_pending) {
2629 		/* CNAk processing when rxfifo empty only */
2630 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2631 			udc_process_cnak_queue(dev);
2632 	}
2633 
2634 finished:
2635 	return ret_val;
2636 }
2637 
2638 /* Interrupt handler for Control IN traffic */
udc_control_in_isr(struct udc * dev)2639 static irqreturn_t udc_control_in_isr(struct udc *dev)
2640 {
2641 	irqreturn_t ret_val = IRQ_NONE;
2642 	u32 tmp;
2643 	struct udc_ep *ep;
2644 	struct udc_request *req;
2645 	unsigned len;
2646 
2647 	ep = &dev->ep[UDC_EP0IN_IX];
2648 
2649 	/* clear irq */
2650 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2651 
2652 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2653 	/* DMA completion */
2654 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2655 		VDBG(dev, "isr: TDC clear\n");
2656 		ret_val = IRQ_HANDLED;
2657 
2658 		/* clear TDC bit */
2659 		writel(AMD_BIT(UDC_EPSTS_TDC),
2660 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2661 
2662 	/* status reg has IN bit set ? */
2663 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2664 		ret_val = IRQ_HANDLED;
2665 
2666 		if (ep->dma) {
2667 			/* clear IN bit */
2668 			writel(AMD_BIT(UDC_EPSTS_IN),
2669 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2670 		}
2671 		if (dev->stall_ep0in) {
2672 			DBG(dev, "stall ep0in\n");
2673 			/* halt ep0in */
2674 			tmp = readl(&ep->regs->ctl);
2675 			tmp |= AMD_BIT(UDC_EPCTL_S);
2676 			writel(tmp, &ep->regs->ctl);
2677 		} else {
2678 			if (!list_empty(&ep->queue)) {
2679 				/* next request */
2680 				req = list_entry(ep->queue.next,
2681 						struct udc_request, queue);
2682 
2683 				if (ep->dma) {
2684 					/* write desc pointer */
2685 					writel(req->td_phys, &ep->regs->desptr);
2686 					/* set HOST READY */
2687 					req->td_data->status =
2688 						AMD_ADDBITS(
2689 						req->td_data->status,
2690 						UDC_DMA_STP_STS_BS_HOST_READY,
2691 						UDC_DMA_STP_STS_BS);
2692 
2693 					/* set poll demand bit */
2694 					tmp =
2695 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2696 					tmp |= AMD_BIT(UDC_EPCTL_P);
2697 					writel(tmp,
2698 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
2699 
2700 					/* all bytes will be transferred */
2701 					req->req.actual = req->req.length;
2702 
2703 					/* complete req */
2704 					complete_req(ep, req, 0);
2705 
2706 				} else {
2707 					/* write fifo */
2708 					udc_txfifo_write(ep, &req->req);
2709 
2710 					/* lengh bytes transferred */
2711 					len = req->req.length - req->req.actual;
2712 					if (len > ep->ep.maxpacket)
2713 						len = ep->ep.maxpacket;
2714 
2715 					req->req.actual += len;
2716 					if (req->req.actual == req->req.length
2717 						|| (len != ep->ep.maxpacket)) {
2718 						/* complete req */
2719 						complete_req(ep, req, 0);
2720 					}
2721 				}
2722 
2723 			}
2724 		}
2725 		ep->halted = 0;
2726 		dev->stall_ep0in = 0;
2727 		if (!ep->dma) {
2728 			/* clear IN bit */
2729 			writel(AMD_BIT(UDC_EPSTS_IN),
2730 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2731 		}
2732 	}
2733 
2734 	return ret_val;
2735 }
2736 
2737 
2738 /* Interrupt handler for global device events */
udc_dev_isr(struct udc * dev,u32 dev_irq)2739 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2740 __releases(dev->lock)
2741 __acquires(dev->lock)
2742 {
2743 	irqreturn_t ret_val = IRQ_NONE;
2744 	u32 tmp;
2745 	u32 cfg;
2746 	struct udc_ep *ep;
2747 	u16 i;
2748 	u8 udc_csr_epix;
2749 
2750 	/* SET_CONFIG irq ? */
2751 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2752 		ret_val = IRQ_HANDLED;
2753 
2754 		/* read config value */
2755 		tmp = readl(&dev->regs->sts);
2756 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2757 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2758 		dev->cur_config = cfg;
2759 		dev->set_cfg_not_acked = 1;
2760 
2761 		/* make usb request for gadget driver */
2762 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2763 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2764 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2765 
2766 		/* programm the NE registers */
2767 		for (i = 0; i < UDC_EP_NUM; i++) {
2768 			ep = &dev->ep[i];
2769 			if (ep->in) {
2770 
2771 				/* ep ix in UDC CSR register space */
2772 				udc_csr_epix = ep->num;
2773 
2774 
2775 			/* OUT ep */
2776 			} else {
2777 				/* ep ix in UDC CSR register space */
2778 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2779 			}
2780 
2781 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2782 			/* ep cfg */
2783 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2784 						UDC_CSR_NE_CFG);
2785 			/* write reg */
2786 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2787 
2788 			/* clear stall bits */
2789 			ep->halted = 0;
2790 			tmp = readl(&ep->regs->ctl);
2791 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2792 			writel(tmp, &ep->regs->ctl);
2793 		}
2794 		/* call gadget zero with setup data received */
2795 		spin_unlock(&dev->lock);
2796 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2797 		spin_lock(&dev->lock);
2798 
2799 	} /* SET_INTERFACE ? */
2800 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2801 		ret_val = IRQ_HANDLED;
2802 
2803 		dev->set_cfg_not_acked = 1;
2804 		/* read interface and alt setting values */
2805 		tmp = readl(&dev->regs->sts);
2806 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2807 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2808 
2809 		/* make usb request for gadget driver */
2810 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2811 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2812 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2813 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2814 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2815 
2816 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2817 				dev->cur_alt, dev->cur_intf);
2818 
2819 		/* programm the NE registers */
2820 		for (i = 0; i < UDC_EP_NUM; i++) {
2821 			ep = &dev->ep[i];
2822 			if (ep->in) {
2823 
2824 				/* ep ix in UDC CSR register space */
2825 				udc_csr_epix = ep->num;
2826 
2827 
2828 			/* OUT ep */
2829 			} else {
2830 				/* ep ix in UDC CSR register space */
2831 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2832 			}
2833 
2834 			/* UDC CSR reg */
2835 			/* set ep values */
2836 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2837 			/* ep interface */
2838 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2839 						UDC_CSR_NE_INTF);
2840 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2841 			/* ep alt */
2842 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2843 						UDC_CSR_NE_ALT);
2844 			/* write reg */
2845 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2846 
2847 			/* clear stall bits */
2848 			ep->halted = 0;
2849 			tmp = readl(&ep->regs->ctl);
2850 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2851 			writel(tmp, &ep->regs->ctl);
2852 		}
2853 
2854 		/* call gadget zero with setup data received */
2855 		spin_unlock(&dev->lock);
2856 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2857 		spin_lock(&dev->lock);
2858 
2859 	} /* USB reset */
2860 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2861 		DBG(dev, "USB Reset interrupt\n");
2862 		ret_val = IRQ_HANDLED;
2863 
2864 		/* allow soft reset when suspend occurs */
2865 		soft_reset_occured = 0;
2866 
2867 		dev->waiting_zlp_ack_ep0in = 0;
2868 		dev->set_cfg_not_acked = 0;
2869 
2870 		/* mask not needed interrupts */
2871 		udc_mask_unused_interrupts(dev);
2872 
2873 		/* call gadget to resume and reset configs etc. */
2874 		spin_unlock(&dev->lock);
2875 		if (dev->sys_suspended && dev->driver->resume) {
2876 			dev->driver->resume(&dev->gadget);
2877 			dev->sys_suspended = 0;
2878 		}
2879 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2880 		spin_lock(&dev->lock);
2881 
2882 		/* disable ep0 to empty req queue */
2883 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2884 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2885 
2886 		/* soft reset when rxfifo not empty */
2887 		tmp = readl(&dev->regs->sts);
2888 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2889 				&& !soft_reset_after_usbreset_occured) {
2890 			udc_soft_reset(dev);
2891 			soft_reset_after_usbreset_occured++;
2892 		}
2893 
2894 		/*
2895 		 * DMA reset to kill potential old DMA hw hang,
2896 		 * POLL bit is already reset by ep_init() through
2897 		 * disconnect()
2898 		 */
2899 		DBG(dev, "DMA machine reset\n");
2900 		tmp = readl(&dev->regs->cfg);
2901 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2902 		writel(tmp, &dev->regs->cfg);
2903 
2904 		/* put into initial config */
2905 		udc_basic_init(dev);
2906 
2907 		/* enable device setup interrupts */
2908 		udc_enable_dev_setup_interrupts(dev);
2909 
2910 		/* enable suspend interrupt */
2911 		tmp = readl(&dev->regs->irqmsk);
2912 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2913 		writel(tmp, &dev->regs->irqmsk);
2914 
2915 	} /* USB suspend */
2916 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2917 		DBG(dev, "USB Suspend interrupt\n");
2918 		ret_val = IRQ_HANDLED;
2919 		if (dev->driver->suspend) {
2920 			spin_unlock(&dev->lock);
2921 			dev->sys_suspended = 1;
2922 			dev->driver->suspend(&dev->gadget);
2923 			spin_lock(&dev->lock);
2924 		}
2925 	} /* new speed ? */
2926 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2927 		DBG(dev, "ENUM interrupt\n");
2928 		ret_val = IRQ_HANDLED;
2929 		soft_reset_after_usbreset_occured = 0;
2930 
2931 		/* disable ep0 to empty req queue */
2932 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2933 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2934 
2935 		/* link up all endpoints */
2936 		udc_setup_endpoints(dev);
2937 		dev_info(dev->dev, "Connect: %s\n",
2938 			 usb_speed_string(dev->gadget.speed));
2939 
2940 		/* init ep 0 */
2941 		activate_control_endpoints(dev);
2942 
2943 		/* enable ep0 interrupts */
2944 		udc_enable_ep0_interrupts(dev);
2945 	}
2946 	/* session valid change interrupt */
2947 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
2948 		DBG(dev, "USB SVC interrupt\n");
2949 		ret_val = IRQ_HANDLED;
2950 
2951 		/* check that session is not valid to detect disconnect */
2952 		tmp = readl(&dev->regs->sts);
2953 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
2954 			/* disable suspend interrupt */
2955 			tmp = readl(&dev->regs->irqmsk);
2956 			tmp |= AMD_BIT(UDC_DEVINT_US);
2957 			writel(tmp, &dev->regs->irqmsk);
2958 			DBG(dev, "USB Disconnect (session valid low)\n");
2959 			/* cleanup on disconnect */
2960 			usb_disconnect(udc);
2961 		}
2962 
2963 	}
2964 
2965 	return ret_val;
2966 }
2967 
2968 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
udc_irq(int irq,void * pdev)2969 irqreturn_t udc_irq(int irq, void *pdev)
2970 {
2971 	struct udc *dev = pdev;
2972 	u32 reg;
2973 	u16 i;
2974 	u32 ep_irq;
2975 	irqreturn_t ret_val = IRQ_NONE;
2976 
2977 	spin_lock(&dev->lock);
2978 
2979 	/* check for ep irq */
2980 	reg = readl(&dev->regs->ep_irqsts);
2981 	if (reg) {
2982 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
2983 			ret_val |= udc_control_out_isr(dev);
2984 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
2985 			ret_val |= udc_control_in_isr(dev);
2986 
2987 		/*
2988 		 * data endpoint
2989 		 * iterate ep's
2990 		 */
2991 		for (i = 1; i < UDC_EP_NUM; i++) {
2992 			ep_irq = 1 << i;
2993 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
2994 				continue;
2995 
2996 			/* clear irq status */
2997 			writel(ep_irq, &dev->regs->ep_irqsts);
2998 
2999 			/* irq for out ep ? */
3000 			if (i > UDC_EPIN_NUM)
3001 				ret_val |= udc_data_out_isr(dev, i);
3002 			else
3003 				ret_val |= udc_data_in_isr(dev, i);
3004 		}
3005 
3006 	}
3007 
3008 
3009 	/* check for dev irq */
3010 	reg = readl(&dev->regs->irqsts);
3011 	if (reg) {
3012 		/* clear irq */
3013 		writel(reg, &dev->regs->irqsts);
3014 		ret_val |= udc_dev_isr(dev, reg);
3015 	}
3016 
3017 
3018 	spin_unlock(&dev->lock);
3019 	return ret_val;
3020 }
3021 EXPORT_SYMBOL_GPL(udc_irq);
3022 
3023 /* Tears down device */
gadget_release(struct device * pdev)3024 void gadget_release(struct device *pdev)
3025 {
3026 	struct amd5536udc *dev = dev_get_drvdata(pdev);
3027 	kfree(dev);
3028 }
3029 EXPORT_SYMBOL_GPL(gadget_release);
3030 
3031 /* Cleanup on device remove */
udc_remove(struct udc * dev)3032 void udc_remove(struct udc *dev)
3033 {
3034 	/* remove timer */
3035 	stop_timer++;
3036 	if (timer_pending(&udc_timer))
3037 		wait_for_completion(&on_exit);
3038 	del_timer_sync(&udc_timer);
3039 	/* remove pollstall timer */
3040 	stop_pollstall_timer++;
3041 	if (timer_pending(&udc_pollstall_timer))
3042 		wait_for_completion(&on_pollstall_exit);
3043 	del_timer_sync(&udc_pollstall_timer);
3044 	udc = NULL;
3045 }
3046 EXPORT_SYMBOL_GPL(udc_remove);
3047 
3048 /* free all the dma pools */
free_dma_pools(struct udc * dev)3049 void free_dma_pools(struct udc *dev)
3050 {
3051 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3052 		      dev->ep[UDC_EP0OUT_IX].td_phys);
3053 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3054 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3055 	dma_pool_destroy(dev->stp_requests);
3056 	dma_pool_destroy(dev->data_requests);
3057 }
3058 EXPORT_SYMBOL_GPL(free_dma_pools);
3059 
3060 /* create dma pools on init */
init_dma_pools(struct udc * dev)3061 int init_dma_pools(struct udc *dev)
3062 {
3063 	struct udc_stp_dma	*td_stp;
3064 	struct udc_data_dma	*td_data;
3065 	int retval;
3066 
3067 	/* consistent DMA mode setting ? */
3068 	if (use_dma_ppb) {
3069 		use_dma_bufferfill_mode = 0;
3070 	} else {
3071 		use_dma_ppb_du = 0;
3072 		use_dma_bufferfill_mode = 1;
3073 	}
3074 
3075 	/* DMA setup */
3076 	dev->data_requests = dma_pool_create("data_requests", dev->dev,
3077 		sizeof(struct udc_data_dma), 0, 0);
3078 	if (!dev->data_requests) {
3079 		DBG(dev, "can't get request data pool\n");
3080 		return -ENOMEM;
3081 	}
3082 
3083 	/* EP0 in dma regs = dev control regs */
3084 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3085 
3086 	/* dma desc for setup data */
3087 	dev->stp_requests = dma_pool_create("setup requests", dev->dev,
3088 		sizeof(struct udc_stp_dma), 0, 0);
3089 	if (!dev->stp_requests) {
3090 		DBG(dev, "can't get stp request pool\n");
3091 		retval = -ENOMEM;
3092 		goto err_create_dma_pool;
3093 	}
3094 	/* setup */
3095 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3096 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3097 	if (!td_stp) {
3098 		retval = -ENOMEM;
3099 		goto err_alloc_dma;
3100 	}
3101 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3102 
3103 	/* data: 0 packets !? */
3104 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3105 				&dev->ep[UDC_EP0OUT_IX].td_phys);
3106 	if (!td_data) {
3107 		retval = -ENOMEM;
3108 		goto err_alloc_phys;
3109 	}
3110 	dev->ep[UDC_EP0OUT_IX].td = td_data;
3111 	return 0;
3112 
3113 err_alloc_phys:
3114 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3115 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3116 err_alloc_dma:
3117 	dma_pool_destroy(dev->stp_requests);
3118 	dev->stp_requests = NULL;
3119 err_create_dma_pool:
3120 	dma_pool_destroy(dev->data_requests);
3121 	dev->data_requests = NULL;
3122 	return retval;
3123 }
3124 EXPORT_SYMBOL_GPL(init_dma_pools);
3125 
3126 /* general probe */
udc_probe(struct udc * dev)3127 int udc_probe(struct udc *dev)
3128 {
3129 	char		tmp[128];
3130 	u32		reg;
3131 	int		retval;
3132 
3133 	/* device struct setup */
3134 	dev->gadget.ops = &udc_ops;
3135 
3136 	dev_set_name(&dev->gadget.dev, "gadget");
3137 	dev->gadget.name = name;
3138 	dev->gadget.max_speed = USB_SPEED_HIGH;
3139 
3140 	/* init registers, interrupts, ... */
3141 	startup_registers(dev);
3142 
3143 	dev_info(dev->dev, "%s\n", mod_desc);
3144 
3145 	snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3146 
3147 	/* Print this device info for AMD chips only*/
3148 	if (dev->chiprev == UDC_HSA0_REV ||
3149 	    dev->chiprev == UDC_HSB1_REV) {
3150 		dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3151 			 tmp, dev->phys_addr, dev->chiprev,
3152 			 (dev->chiprev == UDC_HSA0_REV) ?
3153 			 "A0" : "B1");
3154 		strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3155 		if (dev->chiprev == UDC_HSA0_REV) {
3156 			dev_err(dev->dev, "chip revision is A0; too old\n");
3157 			retval = -ENODEV;
3158 			goto finished;
3159 		}
3160 		dev_info(dev->dev,
3161 			 "driver version: %s(for Geode5536 B1)\n", tmp);
3162 	}
3163 
3164 	udc = dev;
3165 
3166 	retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
3167 					    gadget_release);
3168 	if (retval)
3169 		goto finished;
3170 
3171 	/* timer init */
3172 	timer_setup(&udc_timer, udc_timer_function, 0);
3173 	timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
3174 
3175 	/* set SD */
3176 	reg = readl(&dev->regs->ctl);
3177 	reg |= AMD_BIT(UDC_DEVCTL_SD);
3178 	writel(reg, &dev->regs->ctl);
3179 
3180 	/* print dev register info */
3181 	print_regs(dev);
3182 
3183 	return 0;
3184 
3185 finished:
3186 	return retval;
3187 }
3188 EXPORT_SYMBOL_GPL(udc_probe);
3189 
3190 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3191 MODULE_AUTHOR("Thomas Dahlmann");
3192 MODULE_LICENSE("GPL");
3193