1 /*
2  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3  * Author: Chao Xie <chao.xie@marvell.com>
4  *	   Neil Zhang <zhangwm@marvell.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/timer.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/moduleparam.h>
27 #include <linux/device.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 #include <linux/usb/otg.h>
31 #include <linux/pm.h>
32 #include <linux/io.h>
33 #include <linux/irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <linux/platform_data/mv_usb.h>
37 #include <asm/unaligned.h>
38 
39 #include "mv_udc.h"
40 
41 #define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
42 #define DRIVER_VERSION		"8 Nov 2010"
43 
44 #define ep_dir(ep)	(((ep)->ep_num == 0) ? \
45 				((ep)->udc->ep0_dir) : ((ep)->direction))
46 
47 /* timeout value -- usec */
48 #define RESET_TIMEOUT		10000
49 #define FLUSH_TIMEOUT		10000
50 #define EPSTATUS_TIMEOUT	10000
51 #define PRIME_TIMEOUT		10000
52 #define READSAFE_TIMEOUT	1000
53 
54 #define LOOPS_USEC_SHIFT	1
55 #define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
56 #define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
57 
58 static DECLARE_COMPLETION(release_done);
59 
60 static const char driver_name[] = "mv_udc";
61 static const char driver_desc[] = DRIVER_DESC;
62 
63 static void nuke(struct mv_ep *ep, int status);
64 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
65 
66 /* for endpoint 0 operations */
67 static const struct usb_endpoint_descriptor mv_ep0_desc = {
68 	.bLength =		USB_DT_ENDPOINT_SIZE,
69 	.bDescriptorType =	USB_DT_ENDPOINT,
70 	.bEndpointAddress =	0,
71 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
72 	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
73 };
74 
75 static void ep0_reset(struct mv_udc *udc)
76 {
77 	struct mv_ep *ep;
78 	u32 epctrlx;
79 	int i = 0;
80 
81 	/* ep0 in and out */
82 	for (i = 0; i < 2; i++) {
83 		ep = &udc->eps[i];
84 		ep->udc = udc;
85 
86 		/* ep0 dQH */
87 		ep->dqh = &udc->ep_dqh[i];
88 
89 		/* configure ep0 endpoint capabilities in dQH */
90 		ep->dqh->max_packet_length =
91 			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
92 			| EP_QUEUE_HEAD_IOS;
93 
94 		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
95 
96 		epctrlx = readl(&udc->op_regs->epctrlx[0]);
97 		if (i) {	/* TX */
98 			epctrlx |= EPCTRL_TX_ENABLE
99 				| (USB_ENDPOINT_XFER_CONTROL
100 					<< EPCTRL_TX_EP_TYPE_SHIFT);
101 
102 		} else {	/* RX */
103 			epctrlx |= EPCTRL_RX_ENABLE
104 				| (USB_ENDPOINT_XFER_CONTROL
105 					<< EPCTRL_RX_EP_TYPE_SHIFT);
106 		}
107 
108 		writel(epctrlx, &udc->op_regs->epctrlx[0]);
109 	}
110 }
111 
112 /* protocol ep0 stall, will automatically be cleared on new transaction */
113 static void ep0_stall(struct mv_udc *udc)
114 {
115 	u32	epctrlx;
116 
117 	/* set TX and RX to stall */
118 	epctrlx = readl(&udc->op_regs->epctrlx[0]);
119 	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
120 	writel(epctrlx, &udc->op_regs->epctrlx[0]);
121 
122 	/* update ep0 state */
123 	udc->ep0_state = WAIT_FOR_SETUP;
124 	udc->ep0_dir = EP_DIR_OUT;
125 }
126 
127 static int process_ep_req(struct mv_udc *udc, int index,
128 	struct mv_req *curr_req)
129 {
130 	struct mv_dtd	*curr_dtd;
131 	struct mv_dqh	*curr_dqh;
132 	int td_complete, actual, remaining_length;
133 	int i, direction;
134 	int retval = 0;
135 	u32 errors;
136 	u32 bit_pos;
137 
138 	curr_dqh = &udc->ep_dqh[index];
139 	direction = index % 2;
140 
141 	curr_dtd = curr_req->head;
142 	td_complete = 0;
143 	actual = curr_req->req.length;
144 
145 	for (i = 0; i < curr_req->dtd_count; i++) {
146 		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
147 			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
148 				udc->eps[index].name);
149 			return 1;
150 		}
151 
152 		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
153 		if (!errors) {
154 			remaining_length =
155 				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
156 					>> DTD_LENGTH_BIT_POS;
157 			actual -= remaining_length;
158 
159 			if (remaining_length) {
160 				if (direction) {
161 					dev_dbg(&udc->dev->dev,
162 						"TX dTD remains data\n");
163 					retval = -EPROTO;
164 					break;
165 				} else
166 					break;
167 			}
168 		} else {
169 			dev_info(&udc->dev->dev,
170 				"complete_tr error: ep=%d %s: error = 0x%x\n",
171 				index >> 1, direction ? "SEND" : "RECV",
172 				errors);
173 			if (errors & DTD_STATUS_HALTED) {
174 				/* Clear the errors and Halt condition */
175 				curr_dqh->size_ioc_int_sts &= ~errors;
176 				retval = -EPIPE;
177 			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
178 				retval = -EPROTO;
179 			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
180 				retval = -EILSEQ;
181 			}
182 		}
183 		if (i != curr_req->dtd_count - 1)
184 			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
185 	}
186 	if (retval)
187 		return retval;
188 
189 	if (direction == EP_DIR_OUT)
190 		bit_pos = 1 << curr_req->ep->ep_num;
191 	else
192 		bit_pos = 1 << (16 + curr_req->ep->ep_num);
193 
194 	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
195 		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
196 			while (readl(&udc->op_regs->epstatus) & bit_pos)
197 				udelay(1);
198 			break;
199 		}
200 		udelay(1);
201 	}
202 
203 	curr_req->req.actual = actual;
204 
205 	return 0;
206 }
207 
208 /*
209  * done() - retire a request; caller blocked irqs
210  * @status : request status to be set, only works when
211  * request is still in progress.
212  */
213 static void done(struct mv_ep *ep, struct mv_req *req, int status)
214 	__releases(&ep->udc->lock)
215 	__acquires(&ep->udc->lock)
216 {
217 	struct mv_udc *udc = NULL;
218 	unsigned char stopped = ep->stopped;
219 	struct mv_dtd *curr_td, *next_td;
220 	int j;
221 
222 	udc = (struct mv_udc *)ep->udc;
223 	/* Removed the req from fsl_ep->queue */
224 	list_del_init(&req->queue);
225 
226 	/* req.status should be set as -EINPROGRESS in ep_queue() */
227 	if (req->req.status == -EINPROGRESS)
228 		req->req.status = status;
229 	else
230 		status = req->req.status;
231 
232 	/* Free dtd for the request */
233 	next_td = req->head;
234 	for (j = 0; j < req->dtd_count; j++) {
235 		curr_td = next_td;
236 		if (j != req->dtd_count - 1)
237 			next_td = curr_td->next_dtd_virt;
238 		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
239 	}
240 
241 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
242 
243 	if (status && (status != -ESHUTDOWN))
244 		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
245 			ep->ep.name, &req->req, status,
246 			req->req.actual, req->req.length);
247 
248 	ep->stopped = 1;
249 
250 	spin_unlock(&ep->udc->lock);
251 
252 	usb_gadget_giveback_request(&ep->ep, &req->req);
253 
254 	spin_lock(&ep->udc->lock);
255 	ep->stopped = stopped;
256 }
257 
258 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
259 {
260 	struct mv_udc *udc;
261 	struct mv_dqh *dqh;
262 	u32 bit_pos, direction;
263 	u32 usbcmd, epstatus;
264 	unsigned int loops;
265 	int retval = 0;
266 
267 	udc = ep->udc;
268 	direction = ep_dir(ep);
269 	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
270 	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
271 
272 	/* check if the pipe is empty */
273 	if (!(list_empty(&ep->queue))) {
274 		struct mv_req *lastreq;
275 		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
276 		lastreq->tail->dtd_next =
277 			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
278 
279 		wmb();
280 
281 		if (readl(&udc->op_regs->epprime) & bit_pos)
282 			goto done;
283 
284 		loops = LOOPS(READSAFE_TIMEOUT);
285 		while (1) {
286 			/* start with setting the semaphores */
287 			usbcmd = readl(&udc->op_regs->usbcmd);
288 			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
289 			writel(usbcmd, &udc->op_regs->usbcmd);
290 
291 			/* read the endpoint status */
292 			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
293 
294 			/*
295 			 * Reread the ATDTW semaphore bit to check if it is
296 			 * cleared. When hardware see a hazard, it will clear
297 			 * the bit or else we remain set to 1 and we can
298 			 * proceed with priming of endpoint if not already
299 			 * primed.
300 			 */
301 			if (readl(&udc->op_regs->usbcmd)
302 				& USBCMD_ATDTW_TRIPWIRE_SET)
303 				break;
304 
305 			loops--;
306 			if (loops == 0) {
307 				dev_err(&udc->dev->dev,
308 					"Timeout for ATDTW_TRIPWIRE...\n");
309 				retval = -ETIME;
310 				goto done;
311 			}
312 			udelay(LOOPS_USEC);
313 		}
314 
315 		/* Clear the semaphore */
316 		usbcmd = readl(&udc->op_regs->usbcmd);
317 		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
318 		writel(usbcmd, &udc->op_regs->usbcmd);
319 
320 		if (epstatus)
321 			goto done;
322 	}
323 
324 	/* Write dQH next pointer and terminate bit to 0 */
325 	dqh->next_dtd_ptr = req->head->td_dma
326 				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
327 
328 	/* clear active and halt bit, in case set from a previous error */
329 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
330 
331 	/* Ensure that updates to the QH will occur before priming. */
332 	wmb();
333 
334 	/* Prime the Endpoint */
335 	writel(bit_pos, &udc->op_regs->epprime);
336 
337 done:
338 	return retval;
339 }
340 
341 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
342 		dma_addr_t *dma, int *is_last)
343 {
344 	struct mv_dtd *dtd;
345 	struct mv_udc *udc;
346 	struct mv_dqh *dqh;
347 	u32 temp, mult = 0;
348 
349 	/* how big will this transfer be? */
350 	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
351 		dqh = req->ep->dqh;
352 		mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
353 				& 0x3;
354 		*length = min(req->req.length - req->req.actual,
355 				(unsigned)(mult * req->ep->ep.maxpacket));
356 	} else
357 		*length = min(req->req.length - req->req.actual,
358 				(unsigned)EP_MAX_LENGTH_TRANSFER);
359 
360 	udc = req->ep->udc;
361 
362 	/*
363 	 * Be careful that no _GFP_HIGHMEM is set,
364 	 * or we can not use dma_to_virt
365 	 */
366 	dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
367 	if (dtd == NULL)
368 		return dtd;
369 
370 	dtd->td_dma = *dma;
371 	/* initialize buffer page pointers */
372 	temp = (u32)(req->req.dma + req->req.actual);
373 	dtd->buff_ptr0 = cpu_to_le32(temp);
374 	temp &= ~0xFFF;
375 	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
376 	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
377 	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
378 	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
379 
380 	req->req.actual += *length;
381 
382 	/* zlp is needed if req->req.zero is set */
383 	if (req->req.zero) {
384 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
385 			*is_last = 1;
386 		else
387 			*is_last = 0;
388 	} else if (req->req.length == req->req.actual)
389 		*is_last = 1;
390 	else
391 		*is_last = 0;
392 
393 	/* Fill in the transfer size; set active bit */
394 	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
395 
396 	/* Enable interrupt for the last dtd of a request */
397 	if (*is_last && !req->req.no_interrupt)
398 		temp |= DTD_IOC;
399 
400 	temp |= mult << 10;
401 
402 	dtd->size_ioc_sts = temp;
403 
404 	mb();
405 
406 	return dtd;
407 }
408 
409 /* generate dTD linked list for a request */
410 static int req_to_dtd(struct mv_req *req)
411 {
412 	unsigned count;
413 	int is_last, is_first = 1;
414 	struct mv_dtd *dtd, *last_dtd = NULL;
415 	struct mv_udc *udc;
416 	dma_addr_t dma;
417 
418 	udc = req->ep->udc;
419 
420 	do {
421 		dtd = build_dtd(req, &count, &dma, &is_last);
422 		if (dtd == NULL)
423 			return -ENOMEM;
424 
425 		if (is_first) {
426 			is_first = 0;
427 			req->head = dtd;
428 		} else {
429 			last_dtd->dtd_next = dma;
430 			last_dtd->next_dtd_virt = dtd;
431 		}
432 		last_dtd = dtd;
433 		req->dtd_count++;
434 	} while (!is_last);
435 
436 	/* set terminate bit to 1 for the last dTD */
437 	dtd->dtd_next = DTD_NEXT_TERMINATE;
438 
439 	req->tail = dtd;
440 
441 	return 0;
442 }
443 
444 static int mv_ep_enable(struct usb_ep *_ep,
445 		const struct usb_endpoint_descriptor *desc)
446 {
447 	struct mv_udc *udc;
448 	struct mv_ep *ep;
449 	struct mv_dqh *dqh;
450 	u16 max = 0;
451 	u32 bit_pos, epctrlx, direction;
452 	unsigned char zlt = 0, ios = 0, mult = 0;
453 	unsigned long flags;
454 
455 	ep = container_of(_ep, struct mv_ep, ep);
456 	udc = ep->udc;
457 
458 	if (!_ep || !desc
459 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
460 		return -EINVAL;
461 
462 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
463 		return -ESHUTDOWN;
464 
465 	direction = ep_dir(ep);
466 	max = usb_endpoint_maxp(desc);
467 
468 	/*
469 	 * disable HW zero length termination select
470 	 * driver handles zero length packet through req->req.zero
471 	 */
472 	zlt = 1;
473 
474 	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
475 
476 	/* Check if the Endpoint is Primed */
477 	if ((readl(&udc->op_regs->epprime) & bit_pos)
478 		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
479 		dev_info(&udc->dev->dev,
480 			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
481 			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
482 			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
483 			(unsigned)readl(&udc->op_regs->epprime),
484 			(unsigned)readl(&udc->op_regs->epstatus),
485 			(unsigned)bit_pos);
486 		goto en_done;
487 	}
488 	/* Set the max packet length, interrupt on Setup and Mult fields */
489 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
490 	case USB_ENDPOINT_XFER_BULK:
491 		zlt = 1;
492 		mult = 0;
493 		break;
494 	case USB_ENDPOINT_XFER_CONTROL:
495 		ios = 1;
496 	case USB_ENDPOINT_XFER_INT:
497 		mult = 0;
498 		break;
499 	case USB_ENDPOINT_XFER_ISOC:
500 		/* Calculate transactions needed for high bandwidth iso */
501 		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
502 		max = max & 0x7ff;	/* bit 0~10 */
503 		/* 3 transactions at most */
504 		if (mult > 3)
505 			goto en_done;
506 		break;
507 	default:
508 		goto en_done;
509 	}
510 
511 	spin_lock_irqsave(&udc->lock, flags);
512 	/* Get the endpoint queue head address */
513 	dqh = ep->dqh;
514 	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
515 		| (mult << EP_QUEUE_HEAD_MULT_POS)
516 		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
517 		| (ios ? EP_QUEUE_HEAD_IOS : 0);
518 	dqh->next_dtd_ptr = 1;
519 	dqh->size_ioc_int_sts = 0;
520 
521 	ep->ep.maxpacket = max;
522 	ep->ep.desc = desc;
523 	ep->stopped = 0;
524 
525 	/* Enable the endpoint for Rx or Tx and set the endpoint type */
526 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
527 	if (direction == EP_DIR_IN) {
528 		epctrlx &= ~EPCTRL_TX_ALL_MASK;
529 		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
530 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
531 				<< EPCTRL_TX_EP_TYPE_SHIFT);
532 	} else {
533 		epctrlx &= ~EPCTRL_RX_ALL_MASK;
534 		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
535 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
536 				<< EPCTRL_RX_EP_TYPE_SHIFT);
537 	}
538 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
539 
540 	/*
541 	 * Implement Guideline (GL# USB-7) The unused endpoint type must
542 	 * be programmed to bulk.
543 	 */
544 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
545 	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
546 		epctrlx |= (USB_ENDPOINT_XFER_BULK
547 				<< EPCTRL_RX_EP_TYPE_SHIFT);
548 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
549 	}
550 
551 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
552 	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
553 		epctrlx |= (USB_ENDPOINT_XFER_BULK
554 				<< EPCTRL_TX_EP_TYPE_SHIFT);
555 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
556 	}
557 
558 	spin_unlock_irqrestore(&udc->lock, flags);
559 
560 	return 0;
561 en_done:
562 	return -EINVAL;
563 }
564 
565 static int  mv_ep_disable(struct usb_ep *_ep)
566 {
567 	struct mv_udc *udc;
568 	struct mv_ep *ep;
569 	struct mv_dqh *dqh;
570 	u32 bit_pos, epctrlx, direction;
571 	unsigned long flags;
572 
573 	ep = container_of(_ep, struct mv_ep, ep);
574 	if ((_ep == NULL) || !ep->ep.desc)
575 		return -EINVAL;
576 
577 	udc = ep->udc;
578 
579 	/* Get the endpoint queue head address */
580 	dqh = ep->dqh;
581 
582 	spin_lock_irqsave(&udc->lock, flags);
583 
584 	direction = ep_dir(ep);
585 	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
586 
587 	/* Reset the max packet length and the interrupt on Setup */
588 	dqh->max_packet_length = 0;
589 
590 	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
591 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
592 	epctrlx &= ~((direction == EP_DIR_IN)
593 			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
594 			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
595 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
596 
597 	/* nuke all pending requests (does flush) */
598 	nuke(ep, -ESHUTDOWN);
599 
600 	ep->ep.desc = NULL;
601 	ep->stopped = 1;
602 
603 	spin_unlock_irqrestore(&udc->lock, flags);
604 
605 	return 0;
606 }
607 
608 static struct usb_request *
609 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
610 {
611 	struct mv_req *req = NULL;
612 
613 	req = kzalloc(sizeof *req, gfp_flags);
614 	if (!req)
615 		return NULL;
616 
617 	req->req.dma = DMA_ADDR_INVALID;
618 	INIT_LIST_HEAD(&req->queue);
619 
620 	return &req->req;
621 }
622 
623 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
624 {
625 	struct mv_req *req = NULL;
626 
627 	req = container_of(_req, struct mv_req, req);
628 
629 	if (_req)
630 		kfree(req);
631 }
632 
633 static void mv_ep_fifo_flush(struct usb_ep *_ep)
634 {
635 	struct mv_udc *udc;
636 	u32 bit_pos, direction;
637 	struct mv_ep *ep;
638 	unsigned int loops;
639 
640 	if (!_ep)
641 		return;
642 
643 	ep = container_of(_ep, struct mv_ep, ep);
644 	if (!ep->ep.desc)
645 		return;
646 
647 	udc = ep->udc;
648 	direction = ep_dir(ep);
649 
650 	if (ep->ep_num == 0)
651 		bit_pos = (1 << 16) | 1;
652 	else if (direction == EP_DIR_OUT)
653 		bit_pos = 1 << ep->ep_num;
654 	else
655 		bit_pos = 1 << (16 + ep->ep_num);
656 
657 	loops = LOOPS(EPSTATUS_TIMEOUT);
658 	do {
659 		unsigned int inter_loops;
660 
661 		if (loops == 0) {
662 			dev_err(&udc->dev->dev,
663 				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
664 				(unsigned)readl(&udc->op_regs->epstatus),
665 				(unsigned)bit_pos);
666 			return;
667 		}
668 		/* Write 1 to the Flush register */
669 		writel(bit_pos, &udc->op_regs->epflush);
670 
671 		/* Wait until flushing completed */
672 		inter_loops = LOOPS(FLUSH_TIMEOUT);
673 		while (readl(&udc->op_regs->epflush)) {
674 			/*
675 			 * ENDPTFLUSH bit should be cleared to indicate this
676 			 * operation is complete
677 			 */
678 			if (inter_loops == 0) {
679 				dev_err(&udc->dev->dev,
680 					"TIMEOUT for ENDPTFLUSH=0x%x,"
681 					"bit_pos=0x%x\n",
682 					(unsigned)readl(&udc->op_regs->epflush),
683 					(unsigned)bit_pos);
684 				return;
685 			}
686 			inter_loops--;
687 			udelay(LOOPS_USEC);
688 		}
689 		loops--;
690 	} while (readl(&udc->op_regs->epstatus) & bit_pos);
691 }
692 
693 /* queues (submits) an I/O request to an endpoint */
694 static int
695 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
696 {
697 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
698 	struct mv_req *req = container_of(_req, struct mv_req, req);
699 	struct mv_udc *udc = ep->udc;
700 	unsigned long flags;
701 	int retval;
702 
703 	/* catch various bogus parameters */
704 	if (!_req || !req->req.complete || !req->req.buf
705 			|| !list_empty(&req->queue)) {
706 		dev_err(&udc->dev->dev, "%s, bad params", __func__);
707 		return -EINVAL;
708 	}
709 	if (unlikely(!_ep || !ep->ep.desc)) {
710 		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
711 		return -EINVAL;
712 	}
713 
714 	udc = ep->udc;
715 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
716 		return -ESHUTDOWN;
717 
718 	req->ep = ep;
719 
720 	/* map virtual address to hardware */
721 	retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
722 	if (retval)
723 		return retval;
724 
725 	req->req.status = -EINPROGRESS;
726 	req->req.actual = 0;
727 	req->dtd_count = 0;
728 
729 	spin_lock_irqsave(&udc->lock, flags);
730 
731 	/* build dtds and push them to device queue */
732 	if (!req_to_dtd(req)) {
733 		retval = queue_dtd(ep, req);
734 		if (retval) {
735 			spin_unlock_irqrestore(&udc->lock, flags);
736 			dev_err(&udc->dev->dev, "Failed to queue dtd\n");
737 			goto err_unmap_dma;
738 		}
739 	} else {
740 		spin_unlock_irqrestore(&udc->lock, flags);
741 		dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
742 		retval = -ENOMEM;
743 		goto err_unmap_dma;
744 	}
745 
746 	/* Update ep0 state */
747 	if (ep->ep_num == 0)
748 		udc->ep0_state = DATA_STATE_XMIT;
749 
750 	/* irq handler advances the queue */
751 	list_add_tail(&req->queue, &ep->queue);
752 	spin_unlock_irqrestore(&udc->lock, flags);
753 
754 	return 0;
755 
756 err_unmap_dma:
757 	usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
758 
759 	return retval;
760 }
761 
762 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
763 {
764 	struct mv_dqh *dqh = ep->dqh;
765 	u32 bit_pos;
766 
767 	/* Write dQH next pointer and terminate bit to 0 */
768 	dqh->next_dtd_ptr = req->head->td_dma
769 		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
770 
771 	/* clear active and halt bit, in case set from a previous error */
772 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
773 
774 	/* Ensure that updates to the QH will occure before priming. */
775 	wmb();
776 
777 	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
778 
779 	/* Prime the Endpoint */
780 	writel(bit_pos, &ep->udc->op_regs->epprime);
781 }
782 
783 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
784 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
785 {
786 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
787 	struct mv_req *req;
788 	struct mv_udc *udc = ep->udc;
789 	unsigned long flags;
790 	int stopped, ret = 0;
791 	u32 epctrlx;
792 
793 	if (!_ep || !_req)
794 		return -EINVAL;
795 
796 	spin_lock_irqsave(&ep->udc->lock, flags);
797 	stopped = ep->stopped;
798 
799 	/* Stop the ep before we deal with the queue */
800 	ep->stopped = 1;
801 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
802 	if (ep_dir(ep) == EP_DIR_IN)
803 		epctrlx &= ~EPCTRL_TX_ENABLE;
804 	else
805 		epctrlx &= ~EPCTRL_RX_ENABLE;
806 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
807 
808 	/* make sure it's actually queued on this endpoint */
809 	list_for_each_entry(req, &ep->queue, queue) {
810 		if (&req->req == _req)
811 			break;
812 	}
813 	if (&req->req != _req) {
814 		ret = -EINVAL;
815 		goto out;
816 	}
817 
818 	/* The request is in progress, or completed but not dequeued */
819 	if (ep->queue.next == &req->queue) {
820 		_req->status = -ECONNRESET;
821 		mv_ep_fifo_flush(_ep);	/* flush current transfer */
822 
823 		/* The request isn't the last request in this ep queue */
824 		if (req->queue.next != &ep->queue) {
825 			struct mv_req *next_req;
826 
827 			next_req = list_entry(req->queue.next,
828 				struct mv_req, queue);
829 
830 			/* Point the QH to the first TD of next request */
831 			mv_prime_ep(ep, next_req);
832 		} else {
833 			struct mv_dqh *qh;
834 
835 			qh = ep->dqh;
836 			qh->next_dtd_ptr = 1;
837 			qh->size_ioc_int_sts = 0;
838 		}
839 
840 		/* The request hasn't been processed, patch up the TD chain */
841 	} else {
842 		struct mv_req *prev_req;
843 
844 		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
845 		writel(readl(&req->tail->dtd_next),
846 				&prev_req->tail->dtd_next);
847 
848 	}
849 
850 	done(ep, req, -ECONNRESET);
851 
852 	/* Enable EP */
853 out:
854 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
855 	if (ep_dir(ep) == EP_DIR_IN)
856 		epctrlx |= EPCTRL_TX_ENABLE;
857 	else
858 		epctrlx |= EPCTRL_RX_ENABLE;
859 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
860 	ep->stopped = stopped;
861 
862 	spin_unlock_irqrestore(&ep->udc->lock, flags);
863 	return ret;
864 }
865 
866 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
867 {
868 	u32 epctrlx;
869 
870 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
871 
872 	if (stall) {
873 		if (direction == EP_DIR_IN)
874 			epctrlx |= EPCTRL_TX_EP_STALL;
875 		else
876 			epctrlx |= EPCTRL_RX_EP_STALL;
877 	} else {
878 		if (direction == EP_DIR_IN) {
879 			epctrlx &= ~EPCTRL_TX_EP_STALL;
880 			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
881 		} else {
882 			epctrlx &= ~EPCTRL_RX_EP_STALL;
883 			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
884 		}
885 	}
886 	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
887 }
888 
889 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
890 {
891 	u32 epctrlx;
892 
893 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
894 
895 	if (direction == EP_DIR_OUT)
896 		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
897 	else
898 		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
899 }
900 
901 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
902 {
903 	struct mv_ep *ep;
904 	unsigned long flags = 0;
905 	int status = 0;
906 	struct mv_udc *udc;
907 
908 	ep = container_of(_ep, struct mv_ep, ep);
909 	udc = ep->udc;
910 	if (!_ep || !ep->ep.desc) {
911 		status = -EINVAL;
912 		goto out;
913 	}
914 
915 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
916 		status = -EOPNOTSUPP;
917 		goto out;
918 	}
919 
920 	/*
921 	 * Attempt to halt IN ep will fail if any transfer requests
922 	 * are still queue
923 	 */
924 	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
925 		status = -EAGAIN;
926 		goto out;
927 	}
928 
929 	spin_lock_irqsave(&ep->udc->lock, flags);
930 	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
931 	if (halt && wedge)
932 		ep->wedge = 1;
933 	else if (!halt)
934 		ep->wedge = 0;
935 	spin_unlock_irqrestore(&ep->udc->lock, flags);
936 
937 	if (ep->ep_num == 0) {
938 		udc->ep0_state = WAIT_FOR_SETUP;
939 		udc->ep0_dir = EP_DIR_OUT;
940 	}
941 out:
942 	return status;
943 }
944 
945 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
946 {
947 	return mv_ep_set_halt_wedge(_ep, halt, 0);
948 }
949 
950 static int mv_ep_set_wedge(struct usb_ep *_ep)
951 {
952 	return mv_ep_set_halt_wedge(_ep, 1, 1);
953 }
954 
955 static struct usb_ep_ops mv_ep_ops = {
956 	.enable		= mv_ep_enable,
957 	.disable	= mv_ep_disable,
958 
959 	.alloc_request	= mv_alloc_request,
960 	.free_request	= mv_free_request,
961 
962 	.queue		= mv_ep_queue,
963 	.dequeue	= mv_ep_dequeue,
964 
965 	.set_wedge	= mv_ep_set_wedge,
966 	.set_halt	= mv_ep_set_halt,
967 	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
968 };
969 
970 static void udc_clock_enable(struct mv_udc *udc)
971 {
972 	clk_prepare_enable(udc->clk);
973 }
974 
975 static void udc_clock_disable(struct mv_udc *udc)
976 {
977 	clk_disable_unprepare(udc->clk);
978 }
979 
980 static void udc_stop(struct mv_udc *udc)
981 {
982 	u32 tmp;
983 
984 	/* Disable interrupts */
985 	tmp = readl(&udc->op_regs->usbintr);
986 	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
987 		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
988 	writel(tmp, &udc->op_regs->usbintr);
989 
990 	udc->stopped = 1;
991 
992 	/* Reset the Run the bit in the command register to stop VUSB */
993 	tmp = readl(&udc->op_regs->usbcmd);
994 	tmp &= ~USBCMD_RUN_STOP;
995 	writel(tmp, &udc->op_regs->usbcmd);
996 }
997 
998 static void udc_start(struct mv_udc *udc)
999 {
1000 	u32 usbintr;
1001 
1002 	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1003 		| USBINTR_PORT_CHANGE_DETECT_EN
1004 		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1005 	/* Enable interrupts */
1006 	writel(usbintr, &udc->op_regs->usbintr);
1007 
1008 	udc->stopped = 0;
1009 
1010 	/* Set the Run bit in the command register */
1011 	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1012 }
1013 
1014 static int udc_reset(struct mv_udc *udc)
1015 {
1016 	unsigned int loops;
1017 	u32 tmp, portsc;
1018 
1019 	/* Stop the controller */
1020 	tmp = readl(&udc->op_regs->usbcmd);
1021 	tmp &= ~USBCMD_RUN_STOP;
1022 	writel(tmp, &udc->op_regs->usbcmd);
1023 
1024 	/* Reset the controller to get default values */
1025 	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1026 
1027 	/* wait for reset to complete */
1028 	loops = LOOPS(RESET_TIMEOUT);
1029 	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1030 		if (loops == 0) {
1031 			dev_err(&udc->dev->dev,
1032 				"Wait for RESET completed TIMEOUT\n");
1033 			return -ETIMEDOUT;
1034 		}
1035 		loops--;
1036 		udelay(LOOPS_USEC);
1037 	}
1038 
1039 	/* set controller to device mode */
1040 	tmp = readl(&udc->op_regs->usbmode);
1041 	tmp |= USBMODE_CTRL_MODE_DEVICE;
1042 
1043 	/* turn setup lockout off, require setup tripwire in usbcmd */
1044 	tmp |= USBMODE_SETUP_LOCK_OFF;
1045 
1046 	writel(tmp, &udc->op_regs->usbmode);
1047 
1048 	writel(0x0, &udc->op_regs->epsetupstat);
1049 
1050 	/* Configure the Endpoint List Address */
1051 	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1052 		&udc->op_regs->eplistaddr);
1053 
1054 	portsc = readl(&udc->op_regs->portsc[0]);
1055 	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1056 		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1057 
1058 	if (udc->force_fs)
1059 		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1060 	else
1061 		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1062 
1063 	writel(portsc, &udc->op_regs->portsc[0]);
1064 
1065 	tmp = readl(&udc->op_regs->epctrlx[0]);
1066 	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1067 	writel(tmp, &udc->op_regs->epctrlx[0]);
1068 
1069 	return 0;
1070 }
1071 
1072 static int mv_udc_enable_internal(struct mv_udc *udc)
1073 {
1074 	int retval;
1075 
1076 	if (udc->active)
1077 		return 0;
1078 
1079 	dev_dbg(&udc->dev->dev, "enable udc\n");
1080 	udc_clock_enable(udc);
1081 	if (udc->pdata->phy_init) {
1082 		retval = udc->pdata->phy_init(udc->phy_regs);
1083 		if (retval) {
1084 			dev_err(&udc->dev->dev,
1085 				"init phy error %d\n", retval);
1086 			udc_clock_disable(udc);
1087 			return retval;
1088 		}
1089 	}
1090 	udc->active = 1;
1091 
1092 	return 0;
1093 }
1094 
1095 static int mv_udc_enable(struct mv_udc *udc)
1096 {
1097 	if (udc->clock_gating)
1098 		return mv_udc_enable_internal(udc);
1099 
1100 	return 0;
1101 }
1102 
1103 static void mv_udc_disable_internal(struct mv_udc *udc)
1104 {
1105 	if (udc->active) {
1106 		dev_dbg(&udc->dev->dev, "disable udc\n");
1107 		if (udc->pdata->phy_deinit)
1108 			udc->pdata->phy_deinit(udc->phy_regs);
1109 		udc_clock_disable(udc);
1110 		udc->active = 0;
1111 	}
1112 }
1113 
1114 static void mv_udc_disable(struct mv_udc *udc)
1115 {
1116 	if (udc->clock_gating)
1117 		mv_udc_disable_internal(udc);
1118 }
1119 
1120 static int mv_udc_get_frame(struct usb_gadget *gadget)
1121 {
1122 	struct mv_udc *udc;
1123 	u16	retval;
1124 
1125 	if (!gadget)
1126 		return -ENODEV;
1127 
1128 	udc = container_of(gadget, struct mv_udc, gadget);
1129 
1130 	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1131 
1132 	return retval;
1133 }
1134 
1135 /* Tries to wake up the host connected to this gadget */
1136 static int mv_udc_wakeup(struct usb_gadget *gadget)
1137 {
1138 	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1139 	u32 portsc;
1140 
1141 	/* Remote wakeup feature not enabled by host */
1142 	if (!udc->remote_wakeup)
1143 		return -ENOTSUPP;
1144 
1145 	portsc = readl(&udc->op_regs->portsc);
1146 	/* not suspended? */
1147 	if (!(portsc & PORTSCX_PORT_SUSPEND))
1148 		return 0;
1149 	/* trigger force resume */
1150 	portsc |= PORTSCX_PORT_FORCE_RESUME;
1151 	writel(portsc, &udc->op_regs->portsc[0]);
1152 	return 0;
1153 }
1154 
1155 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1156 {
1157 	struct mv_udc *udc;
1158 	unsigned long flags;
1159 	int retval = 0;
1160 
1161 	udc = container_of(gadget, struct mv_udc, gadget);
1162 	spin_lock_irqsave(&udc->lock, flags);
1163 
1164 	udc->vbus_active = (is_active != 0);
1165 
1166 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1167 		__func__, udc->softconnect, udc->vbus_active);
1168 
1169 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1170 		retval = mv_udc_enable(udc);
1171 		if (retval == 0) {
1172 			/* Clock is disabled, need re-init registers */
1173 			udc_reset(udc);
1174 			ep0_reset(udc);
1175 			udc_start(udc);
1176 		}
1177 	} else if (udc->driver && udc->softconnect) {
1178 		if (!udc->active)
1179 			goto out;
1180 
1181 		/* stop all the transfer in queue*/
1182 		stop_activity(udc, udc->driver);
1183 		udc_stop(udc);
1184 		mv_udc_disable(udc);
1185 	}
1186 
1187 out:
1188 	spin_unlock_irqrestore(&udc->lock, flags);
1189 	return retval;
1190 }
1191 
1192 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1193 {
1194 	struct mv_udc *udc;
1195 	unsigned long flags;
1196 	int retval = 0;
1197 
1198 	udc = container_of(gadget, struct mv_udc, gadget);
1199 	spin_lock_irqsave(&udc->lock, flags);
1200 
1201 	udc->softconnect = (is_on != 0);
1202 
1203 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1204 			__func__, udc->softconnect, udc->vbus_active);
1205 
1206 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1207 		retval = mv_udc_enable(udc);
1208 		if (retval == 0) {
1209 			/* Clock is disabled, need re-init registers */
1210 			udc_reset(udc);
1211 			ep0_reset(udc);
1212 			udc_start(udc);
1213 		}
1214 	} else if (udc->driver && udc->vbus_active) {
1215 		/* stop all the transfer in queue*/
1216 		stop_activity(udc, udc->driver);
1217 		udc_stop(udc);
1218 		mv_udc_disable(udc);
1219 	}
1220 
1221 	spin_unlock_irqrestore(&udc->lock, flags);
1222 	return retval;
1223 }
1224 
1225 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1226 static int mv_udc_stop(struct usb_gadget *);
1227 /* device controller usb_gadget_ops structure */
1228 static const struct usb_gadget_ops mv_ops = {
1229 
1230 	/* returns the current frame number */
1231 	.get_frame	= mv_udc_get_frame,
1232 
1233 	/* tries to wake up the host connected to this gadget */
1234 	.wakeup		= mv_udc_wakeup,
1235 
1236 	/* notify controller that VBUS is powered or not */
1237 	.vbus_session	= mv_udc_vbus_session,
1238 
1239 	/* D+ pullup, software-controlled connect/disconnect to USB host */
1240 	.pullup		= mv_udc_pullup,
1241 	.udc_start	= mv_udc_start,
1242 	.udc_stop	= mv_udc_stop,
1243 };
1244 
1245 static int eps_init(struct mv_udc *udc)
1246 {
1247 	struct mv_ep	*ep;
1248 	char name[14];
1249 	int i;
1250 
1251 	/* initialize ep0 */
1252 	ep = &udc->eps[0];
1253 	ep->udc = udc;
1254 	strncpy(ep->name, "ep0", sizeof(ep->name));
1255 	ep->ep.name = ep->name;
1256 	ep->ep.ops = &mv_ep_ops;
1257 	ep->wedge = 0;
1258 	ep->stopped = 0;
1259 	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1260 	ep->ep.caps.type_control = true;
1261 	ep->ep.caps.dir_in = true;
1262 	ep->ep.caps.dir_out = true;
1263 	ep->ep_num = 0;
1264 	ep->ep.desc = &mv_ep0_desc;
1265 	INIT_LIST_HEAD(&ep->queue);
1266 
1267 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1268 
1269 	/* initialize other endpoints */
1270 	for (i = 2; i < udc->max_eps * 2; i++) {
1271 		ep = &udc->eps[i];
1272 		if (i % 2) {
1273 			snprintf(name, sizeof(name), "ep%din", i / 2);
1274 			ep->direction = EP_DIR_IN;
1275 			ep->ep.caps.dir_in = true;
1276 		} else {
1277 			snprintf(name, sizeof(name), "ep%dout", i / 2);
1278 			ep->direction = EP_DIR_OUT;
1279 			ep->ep.caps.dir_out = true;
1280 		}
1281 		ep->udc = udc;
1282 		strncpy(ep->name, name, sizeof(ep->name));
1283 		ep->ep.name = ep->name;
1284 
1285 		ep->ep.caps.type_iso = true;
1286 		ep->ep.caps.type_bulk = true;
1287 		ep->ep.caps.type_int = true;
1288 
1289 		ep->ep.ops = &mv_ep_ops;
1290 		ep->stopped = 0;
1291 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1292 		ep->ep_num = i / 2;
1293 
1294 		INIT_LIST_HEAD(&ep->queue);
1295 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1296 
1297 		ep->dqh = &udc->ep_dqh[i];
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 /* delete all endpoint requests, called with spinlock held */
1304 static void nuke(struct mv_ep *ep, int status)
1305 {
1306 	/* called with spinlock held */
1307 	ep->stopped = 1;
1308 
1309 	/* endpoint fifo flush */
1310 	mv_ep_fifo_flush(&ep->ep);
1311 
1312 	while (!list_empty(&ep->queue)) {
1313 		struct mv_req *req = NULL;
1314 		req = list_entry(ep->queue.next, struct mv_req, queue);
1315 		done(ep, req, status);
1316 	}
1317 }
1318 
1319 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1320 {
1321 	struct mv_ep	*ep;
1322 
1323 	nuke(&udc->eps[0], -ESHUTDOWN);
1324 
1325 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1326 		nuke(ep, -ESHUTDOWN);
1327 	}
1328 
1329 	/* report reset; the driver is already quiesced */
1330 	if (driver) {
1331 		spin_unlock(&udc->lock);
1332 		usb_gadget_udc_reset(&udc->gadget, driver);
1333 		spin_lock(&udc->lock);
1334 	}
1335 }
1336 /* stop all USB activities */
1337 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1338 {
1339 	struct mv_ep	*ep;
1340 
1341 	nuke(&udc->eps[0], -ESHUTDOWN);
1342 
1343 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1344 		nuke(ep, -ESHUTDOWN);
1345 	}
1346 
1347 	/* report disconnect; the driver is already quiesced */
1348 	if (driver) {
1349 		spin_unlock(&udc->lock);
1350 		driver->disconnect(&udc->gadget);
1351 		spin_lock(&udc->lock);
1352 	}
1353 }
1354 
1355 static int mv_udc_start(struct usb_gadget *gadget,
1356 		struct usb_gadget_driver *driver)
1357 {
1358 	struct mv_udc *udc;
1359 	int retval = 0;
1360 	unsigned long flags;
1361 
1362 	udc = container_of(gadget, struct mv_udc, gadget);
1363 
1364 	if (udc->driver)
1365 		return -EBUSY;
1366 
1367 	spin_lock_irqsave(&udc->lock, flags);
1368 
1369 	/* hook up the driver ... */
1370 	driver->driver.bus = NULL;
1371 	udc->driver = driver;
1372 
1373 	udc->usb_state = USB_STATE_ATTACHED;
1374 	udc->ep0_state = WAIT_FOR_SETUP;
1375 	udc->ep0_dir = EP_DIR_OUT;
1376 
1377 	spin_unlock_irqrestore(&udc->lock, flags);
1378 
1379 	if (udc->transceiver) {
1380 		retval = otg_set_peripheral(udc->transceiver->otg,
1381 					&udc->gadget);
1382 		if (retval) {
1383 			dev_err(&udc->dev->dev,
1384 				"unable to register peripheral to otg\n");
1385 			udc->driver = NULL;
1386 			return retval;
1387 		}
1388 	}
1389 
1390 	/* When boot with cable attached, there will be no vbus irq occurred */
1391 	if (udc->qwork)
1392 		queue_work(udc->qwork, &udc->vbus_work);
1393 
1394 	return 0;
1395 }
1396 
1397 static int mv_udc_stop(struct usb_gadget *gadget)
1398 {
1399 	struct mv_udc *udc;
1400 	unsigned long flags;
1401 
1402 	udc = container_of(gadget, struct mv_udc, gadget);
1403 
1404 	spin_lock_irqsave(&udc->lock, flags);
1405 
1406 	mv_udc_enable(udc);
1407 	udc_stop(udc);
1408 
1409 	/* stop all usb activities */
1410 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1411 	stop_activity(udc, NULL);
1412 	mv_udc_disable(udc);
1413 
1414 	spin_unlock_irqrestore(&udc->lock, flags);
1415 
1416 	/* unbind gadget driver */
1417 	udc->driver = NULL;
1418 
1419 	return 0;
1420 }
1421 
1422 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1423 {
1424 	u32 portsc;
1425 
1426 	portsc = readl(&udc->op_regs->portsc[0]);
1427 	portsc |= mode << 16;
1428 	writel(portsc, &udc->op_regs->portsc[0]);
1429 }
1430 
1431 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1432 {
1433 	struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1434 	struct mv_req *req = container_of(_req, struct mv_req, req);
1435 	struct mv_udc *udc;
1436 	unsigned long flags;
1437 
1438 	udc = mvep->udc;
1439 
1440 	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1441 
1442 	spin_lock_irqsave(&udc->lock, flags);
1443 	if (req->test_mode) {
1444 		mv_set_ptc(udc, req->test_mode);
1445 		req->test_mode = 0;
1446 	}
1447 	spin_unlock_irqrestore(&udc->lock, flags);
1448 }
1449 
1450 static int
1451 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1452 {
1453 	int retval = 0;
1454 	struct mv_req *req;
1455 	struct mv_ep *ep;
1456 
1457 	ep = &udc->eps[0];
1458 	udc->ep0_dir = direction;
1459 	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1460 
1461 	req = udc->status_req;
1462 
1463 	/* fill in the reqest structure */
1464 	if (empty == false) {
1465 		*((u16 *) req->req.buf) = cpu_to_le16(status);
1466 		req->req.length = 2;
1467 	} else
1468 		req->req.length = 0;
1469 
1470 	req->ep = ep;
1471 	req->req.status = -EINPROGRESS;
1472 	req->req.actual = 0;
1473 	if (udc->test_mode) {
1474 		req->req.complete = prime_status_complete;
1475 		req->test_mode = udc->test_mode;
1476 		udc->test_mode = 0;
1477 	} else
1478 		req->req.complete = NULL;
1479 	req->dtd_count = 0;
1480 
1481 	if (req->req.dma == DMA_ADDR_INVALID) {
1482 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1483 				req->req.buf, req->req.length,
1484 				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1485 		req->mapped = 1;
1486 	}
1487 
1488 	/* prime the data phase */
1489 	if (!req_to_dtd(req)) {
1490 		retval = queue_dtd(ep, req);
1491 		if (retval) {
1492 			dev_err(&udc->dev->dev,
1493 				"Failed to queue dtd when prime status\n");
1494 			goto out;
1495 		}
1496 	} else{	/* no mem */
1497 		retval = -ENOMEM;
1498 		dev_err(&udc->dev->dev,
1499 			"Failed to dma_pool_alloc when prime status\n");
1500 		goto out;
1501 	}
1502 
1503 	list_add_tail(&req->queue, &ep->queue);
1504 
1505 	return 0;
1506 out:
1507 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1508 
1509 	return retval;
1510 }
1511 
1512 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1513 {
1514 	if (index <= TEST_FORCE_EN) {
1515 		udc->test_mode = index;
1516 		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1517 			ep0_stall(udc);
1518 	} else
1519 		dev_err(&udc->dev->dev,
1520 			"This test mode(%d) is not supported\n", index);
1521 }
1522 
1523 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1524 {
1525 	udc->dev_addr = (u8)setup->wValue;
1526 
1527 	/* update usb state */
1528 	udc->usb_state = USB_STATE_ADDRESS;
1529 
1530 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1531 		ep0_stall(udc);
1532 }
1533 
1534 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1535 	struct usb_ctrlrequest *setup)
1536 {
1537 	u16 status = 0;
1538 	int retval;
1539 
1540 	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1541 		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1542 		return;
1543 
1544 	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1545 		status = 1 << USB_DEVICE_SELF_POWERED;
1546 		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1547 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1548 			== USB_RECIP_INTERFACE) {
1549 		/* get interface status */
1550 		status = 0;
1551 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1552 			== USB_RECIP_ENDPOINT) {
1553 		u8 ep_num, direction;
1554 
1555 		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1556 		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1557 				? EP_DIR_IN : EP_DIR_OUT;
1558 		status = ep_is_stall(udc, ep_num, direction)
1559 				<< USB_ENDPOINT_HALT;
1560 	}
1561 
1562 	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1563 	if (retval)
1564 		ep0_stall(udc);
1565 	else
1566 		udc->ep0_state = DATA_STATE_XMIT;
1567 }
1568 
1569 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1570 {
1571 	u8 ep_num;
1572 	u8 direction;
1573 	struct mv_ep *ep;
1574 
1575 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1576 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1577 		switch (setup->wValue) {
1578 		case USB_DEVICE_REMOTE_WAKEUP:
1579 			udc->remote_wakeup = 0;
1580 			break;
1581 		default:
1582 			goto out;
1583 		}
1584 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1585 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1586 		switch (setup->wValue) {
1587 		case USB_ENDPOINT_HALT:
1588 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1589 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1590 				? EP_DIR_IN : EP_DIR_OUT;
1591 			if (setup->wValue != 0 || setup->wLength != 0
1592 				|| ep_num > udc->max_eps)
1593 				goto out;
1594 			ep = &udc->eps[ep_num * 2 + direction];
1595 			if (ep->wedge == 1)
1596 				break;
1597 			spin_unlock(&udc->lock);
1598 			ep_set_stall(udc, ep_num, direction, 0);
1599 			spin_lock(&udc->lock);
1600 			break;
1601 		default:
1602 			goto out;
1603 		}
1604 	} else
1605 		goto out;
1606 
1607 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1608 		ep0_stall(udc);
1609 out:
1610 	return;
1611 }
1612 
1613 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1614 {
1615 	u8 ep_num;
1616 	u8 direction;
1617 
1618 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1619 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1620 		switch (setup->wValue) {
1621 		case USB_DEVICE_REMOTE_WAKEUP:
1622 			udc->remote_wakeup = 1;
1623 			break;
1624 		case USB_DEVICE_TEST_MODE:
1625 			if (setup->wIndex & 0xFF
1626 				||  udc->gadget.speed != USB_SPEED_HIGH)
1627 				ep0_stall(udc);
1628 
1629 			if (udc->usb_state != USB_STATE_CONFIGURED
1630 				&& udc->usb_state != USB_STATE_ADDRESS
1631 				&& udc->usb_state != USB_STATE_DEFAULT)
1632 				ep0_stall(udc);
1633 
1634 			mv_udc_testmode(udc, (setup->wIndex >> 8));
1635 			goto out;
1636 		default:
1637 			goto out;
1638 		}
1639 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1640 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1641 		switch (setup->wValue) {
1642 		case USB_ENDPOINT_HALT:
1643 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1644 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1645 				? EP_DIR_IN : EP_DIR_OUT;
1646 			if (setup->wValue != 0 || setup->wLength != 0
1647 				|| ep_num > udc->max_eps)
1648 				goto out;
1649 			spin_unlock(&udc->lock);
1650 			ep_set_stall(udc, ep_num, direction, 1);
1651 			spin_lock(&udc->lock);
1652 			break;
1653 		default:
1654 			goto out;
1655 		}
1656 	} else
1657 		goto out;
1658 
1659 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1660 		ep0_stall(udc);
1661 out:
1662 	return;
1663 }
1664 
1665 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1666 	struct usb_ctrlrequest *setup)
1667 	__releases(&ep->udc->lock)
1668 	__acquires(&ep->udc->lock)
1669 {
1670 	bool delegate = false;
1671 
1672 	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1673 
1674 	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1675 			setup->bRequestType, setup->bRequest,
1676 			setup->wValue, setup->wIndex, setup->wLength);
1677 	/* We process some standard setup requests here */
1678 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1679 		switch (setup->bRequest) {
1680 		case USB_REQ_GET_STATUS:
1681 			ch9getstatus(udc, ep_num, setup);
1682 			break;
1683 
1684 		case USB_REQ_SET_ADDRESS:
1685 			ch9setaddress(udc, setup);
1686 			break;
1687 
1688 		case USB_REQ_CLEAR_FEATURE:
1689 			ch9clearfeature(udc, setup);
1690 			break;
1691 
1692 		case USB_REQ_SET_FEATURE:
1693 			ch9setfeature(udc, setup);
1694 			break;
1695 
1696 		default:
1697 			delegate = true;
1698 		}
1699 	} else
1700 		delegate = true;
1701 
1702 	/* delegate USB standard requests to the gadget driver */
1703 	if (delegate == true) {
1704 		/* USB requests handled by gadget */
1705 		if (setup->wLength) {
1706 			/* DATA phase from gadget, STATUS phase from udc */
1707 			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1708 					?  EP_DIR_IN : EP_DIR_OUT;
1709 			spin_unlock(&udc->lock);
1710 			if (udc->driver->setup(&udc->gadget,
1711 				&udc->local_setup_buff) < 0)
1712 				ep0_stall(udc);
1713 			spin_lock(&udc->lock);
1714 			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1715 					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1716 		} else {
1717 			/* no DATA phase, IN STATUS phase from gadget */
1718 			udc->ep0_dir = EP_DIR_IN;
1719 			spin_unlock(&udc->lock);
1720 			if (udc->driver->setup(&udc->gadget,
1721 				&udc->local_setup_buff) < 0)
1722 				ep0_stall(udc);
1723 			spin_lock(&udc->lock);
1724 			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1725 		}
1726 	}
1727 }
1728 
1729 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1730 static void ep0_req_complete(struct mv_udc *udc,
1731 	struct mv_ep *ep0, struct mv_req *req)
1732 {
1733 	u32 new_addr;
1734 
1735 	if (udc->usb_state == USB_STATE_ADDRESS) {
1736 		/* set the new address */
1737 		new_addr = (u32)udc->dev_addr;
1738 		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1739 			&udc->op_regs->deviceaddr);
1740 	}
1741 
1742 	done(ep0, req, 0);
1743 
1744 	switch (udc->ep0_state) {
1745 	case DATA_STATE_XMIT:
1746 		/* receive status phase */
1747 		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1748 			ep0_stall(udc);
1749 		break;
1750 	case DATA_STATE_RECV:
1751 		/* send status phase */
1752 		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1753 			ep0_stall(udc);
1754 		break;
1755 	case WAIT_FOR_OUT_STATUS:
1756 		udc->ep0_state = WAIT_FOR_SETUP;
1757 		break;
1758 	case WAIT_FOR_SETUP:
1759 		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1760 		break;
1761 	default:
1762 		ep0_stall(udc);
1763 		break;
1764 	}
1765 }
1766 
1767 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1768 {
1769 	u32 temp;
1770 	struct mv_dqh *dqh;
1771 
1772 	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1773 
1774 	/* Clear bit in ENDPTSETUPSTAT */
1775 	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1776 
1777 	/* while a hazard exists when setup package arrives */
1778 	do {
1779 		/* Set Setup Tripwire */
1780 		temp = readl(&udc->op_regs->usbcmd);
1781 		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1782 
1783 		/* Copy the setup packet to local buffer */
1784 		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1785 	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1786 
1787 	/* Clear Setup Tripwire */
1788 	temp = readl(&udc->op_regs->usbcmd);
1789 	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1790 }
1791 
1792 static void irq_process_tr_complete(struct mv_udc *udc)
1793 {
1794 	u32 tmp, bit_pos;
1795 	int i, ep_num = 0, direction = 0;
1796 	struct mv_ep	*curr_ep;
1797 	struct mv_req *curr_req, *temp_req;
1798 	int status;
1799 
1800 	/*
1801 	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1802 	 * because the setup packets are to be read ASAP
1803 	 */
1804 
1805 	/* Process all Setup packet received interrupts */
1806 	tmp = readl(&udc->op_regs->epsetupstat);
1807 
1808 	if (tmp) {
1809 		for (i = 0; i < udc->max_eps; i++) {
1810 			if (tmp & (1 << i)) {
1811 				get_setup_data(udc, i,
1812 					(u8 *)(&udc->local_setup_buff));
1813 				handle_setup_packet(udc, i,
1814 					&udc->local_setup_buff);
1815 			}
1816 		}
1817 	}
1818 
1819 	/* Don't clear the endpoint setup status register here.
1820 	 * It is cleared as a setup packet is read out of the buffer
1821 	 */
1822 
1823 	/* Process non-setup transaction complete interrupts */
1824 	tmp = readl(&udc->op_regs->epcomplete);
1825 
1826 	if (!tmp)
1827 		return;
1828 
1829 	writel(tmp, &udc->op_regs->epcomplete);
1830 
1831 	for (i = 0; i < udc->max_eps * 2; i++) {
1832 		ep_num = i >> 1;
1833 		direction = i % 2;
1834 
1835 		bit_pos = 1 << (ep_num + 16 * direction);
1836 
1837 		if (!(bit_pos & tmp))
1838 			continue;
1839 
1840 		if (i == 1)
1841 			curr_ep = &udc->eps[0];
1842 		else
1843 			curr_ep = &udc->eps[i];
1844 		/* process the req queue until an uncomplete request */
1845 		list_for_each_entry_safe(curr_req, temp_req,
1846 			&curr_ep->queue, queue) {
1847 			status = process_ep_req(udc, i, curr_req);
1848 			if (status)
1849 				break;
1850 
1851 			/* write back status to req */
1852 			curr_req->req.status = status;
1853 
1854 			/* ep0 request completion */
1855 			if (ep_num == 0) {
1856 				ep0_req_complete(udc, curr_ep, curr_req);
1857 				break;
1858 			} else {
1859 				done(curr_ep, curr_req, status);
1860 			}
1861 		}
1862 	}
1863 }
1864 
1865 static void irq_process_reset(struct mv_udc *udc)
1866 {
1867 	u32 tmp;
1868 	unsigned int loops;
1869 
1870 	udc->ep0_dir = EP_DIR_OUT;
1871 	udc->ep0_state = WAIT_FOR_SETUP;
1872 	udc->remote_wakeup = 0;		/* default to 0 on reset */
1873 
1874 	/* The address bits are past bit 25-31. Set the address */
1875 	tmp = readl(&udc->op_regs->deviceaddr);
1876 	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1877 	writel(tmp, &udc->op_regs->deviceaddr);
1878 
1879 	/* Clear all the setup token semaphores */
1880 	tmp = readl(&udc->op_regs->epsetupstat);
1881 	writel(tmp, &udc->op_regs->epsetupstat);
1882 
1883 	/* Clear all the endpoint complete status bits */
1884 	tmp = readl(&udc->op_regs->epcomplete);
1885 	writel(tmp, &udc->op_regs->epcomplete);
1886 
1887 	/* wait until all endptprime bits cleared */
1888 	loops = LOOPS(PRIME_TIMEOUT);
1889 	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1890 		if (loops == 0) {
1891 			dev_err(&udc->dev->dev,
1892 				"Timeout for ENDPTPRIME = 0x%x\n",
1893 				readl(&udc->op_regs->epprime));
1894 			break;
1895 		}
1896 		loops--;
1897 		udelay(LOOPS_USEC);
1898 	}
1899 
1900 	/* Write 1s to the Flush register */
1901 	writel((u32)~0, &udc->op_regs->epflush);
1902 
1903 	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1904 		dev_info(&udc->dev->dev, "usb bus reset\n");
1905 		udc->usb_state = USB_STATE_DEFAULT;
1906 		/* reset all the queues, stop all USB activities */
1907 		gadget_reset(udc, udc->driver);
1908 	} else {
1909 		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1910 			readl(&udc->op_regs->portsc));
1911 
1912 		/*
1913 		 * re-initialize
1914 		 * controller reset
1915 		 */
1916 		udc_reset(udc);
1917 
1918 		/* reset all the queues, stop all USB activities */
1919 		stop_activity(udc, udc->driver);
1920 
1921 		/* reset ep0 dQH and endptctrl */
1922 		ep0_reset(udc);
1923 
1924 		/* enable interrupt and set controller to run state */
1925 		udc_start(udc);
1926 
1927 		udc->usb_state = USB_STATE_ATTACHED;
1928 	}
1929 }
1930 
1931 static void handle_bus_resume(struct mv_udc *udc)
1932 {
1933 	udc->usb_state = udc->resume_state;
1934 	udc->resume_state = 0;
1935 
1936 	/* report resume to the driver */
1937 	if (udc->driver) {
1938 		if (udc->driver->resume) {
1939 			spin_unlock(&udc->lock);
1940 			udc->driver->resume(&udc->gadget);
1941 			spin_lock(&udc->lock);
1942 		}
1943 	}
1944 }
1945 
1946 static void irq_process_suspend(struct mv_udc *udc)
1947 {
1948 	udc->resume_state = udc->usb_state;
1949 	udc->usb_state = USB_STATE_SUSPENDED;
1950 
1951 	if (udc->driver->suspend) {
1952 		spin_unlock(&udc->lock);
1953 		udc->driver->suspend(&udc->gadget);
1954 		spin_lock(&udc->lock);
1955 	}
1956 }
1957 
1958 static void irq_process_port_change(struct mv_udc *udc)
1959 {
1960 	u32 portsc;
1961 
1962 	portsc = readl(&udc->op_regs->portsc[0]);
1963 	if (!(portsc & PORTSCX_PORT_RESET)) {
1964 		/* Get the speed */
1965 		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1966 		switch (speed) {
1967 		case PORTSCX_PORT_SPEED_HIGH:
1968 			udc->gadget.speed = USB_SPEED_HIGH;
1969 			break;
1970 		case PORTSCX_PORT_SPEED_FULL:
1971 			udc->gadget.speed = USB_SPEED_FULL;
1972 			break;
1973 		case PORTSCX_PORT_SPEED_LOW:
1974 			udc->gadget.speed = USB_SPEED_LOW;
1975 			break;
1976 		default:
1977 			udc->gadget.speed = USB_SPEED_UNKNOWN;
1978 			break;
1979 		}
1980 	}
1981 
1982 	if (portsc & PORTSCX_PORT_SUSPEND) {
1983 		udc->resume_state = udc->usb_state;
1984 		udc->usb_state = USB_STATE_SUSPENDED;
1985 		if (udc->driver->suspend) {
1986 			spin_unlock(&udc->lock);
1987 			udc->driver->suspend(&udc->gadget);
1988 			spin_lock(&udc->lock);
1989 		}
1990 	}
1991 
1992 	if (!(portsc & PORTSCX_PORT_SUSPEND)
1993 		&& udc->usb_state == USB_STATE_SUSPENDED) {
1994 		handle_bus_resume(udc);
1995 	}
1996 
1997 	if (!udc->resume_state)
1998 		udc->usb_state = USB_STATE_DEFAULT;
1999 }
2000 
2001 static void irq_process_error(struct mv_udc *udc)
2002 {
2003 	/* Increment the error count */
2004 	udc->errors++;
2005 }
2006 
2007 static irqreturn_t mv_udc_irq(int irq, void *dev)
2008 {
2009 	struct mv_udc *udc = (struct mv_udc *)dev;
2010 	u32 status, intr;
2011 
2012 	/* Disable ISR when stopped bit is set */
2013 	if (udc->stopped)
2014 		return IRQ_NONE;
2015 
2016 	spin_lock(&udc->lock);
2017 
2018 	status = readl(&udc->op_regs->usbsts);
2019 	intr = readl(&udc->op_regs->usbintr);
2020 	status &= intr;
2021 
2022 	if (status == 0) {
2023 		spin_unlock(&udc->lock);
2024 		return IRQ_NONE;
2025 	}
2026 
2027 	/* Clear all the interrupts occurred */
2028 	writel(status, &udc->op_regs->usbsts);
2029 
2030 	if (status & USBSTS_ERR)
2031 		irq_process_error(udc);
2032 
2033 	if (status & USBSTS_RESET)
2034 		irq_process_reset(udc);
2035 
2036 	if (status & USBSTS_PORT_CHANGE)
2037 		irq_process_port_change(udc);
2038 
2039 	if (status & USBSTS_INT)
2040 		irq_process_tr_complete(udc);
2041 
2042 	if (status & USBSTS_SUSPEND)
2043 		irq_process_suspend(udc);
2044 
2045 	spin_unlock(&udc->lock);
2046 
2047 	return IRQ_HANDLED;
2048 }
2049 
2050 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2051 {
2052 	struct mv_udc *udc = (struct mv_udc *)dev;
2053 
2054 	/* polling VBUS and init phy may cause too much time*/
2055 	if (udc->qwork)
2056 		queue_work(udc->qwork, &udc->vbus_work);
2057 
2058 	return IRQ_HANDLED;
2059 }
2060 
2061 static void mv_udc_vbus_work(struct work_struct *work)
2062 {
2063 	struct mv_udc *udc;
2064 	unsigned int vbus;
2065 
2066 	udc = container_of(work, struct mv_udc, vbus_work);
2067 	if (!udc->pdata->vbus)
2068 		return;
2069 
2070 	vbus = udc->pdata->vbus->poll();
2071 	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2072 
2073 	if (vbus == VBUS_HIGH)
2074 		mv_udc_vbus_session(&udc->gadget, 1);
2075 	else if (vbus == VBUS_LOW)
2076 		mv_udc_vbus_session(&udc->gadget, 0);
2077 }
2078 
2079 /* release device structure */
2080 static void gadget_release(struct device *_dev)
2081 {
2082 	struct mv_udc *udc;
2083 
2084 	udc = dev_get_drvdata(_dev);
2085 
2086 	complete(udc->done);
2087 }
2088 
2089 static int mv_udc_remove(struct platform_device *pdev)
2090 {
2091 	struct mv_udc *udc;
2092 
2093 	udc = platform_get_drvdata(pdev);
2094 
2095 	usb_del_gadget_udc(&udc->gadget);
2096 
2097 	if (udc->qwork) {
2098 		flush_workqueue(udc->qwork);
2099 		destroy_workqueue(udc->qwork);
2100 	}
2101 
2102 	/* free memory allocated in probe */
2103 	dma_pool_destroy(udc->dtd_pool);
2104 
2105 	if (udc->ep_dqh)
2106 		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2107 			udc->ep_dqh, udc->ep_dqh_dma);
2108 
2109 	mv_udc_disable(udc);
2110 
2111 	/* free dev, wait for the release() finished */
2112 	wait_for_completion(udc->done);
2113 
2114 	return 0;
2115 }
2116 
2117 static int mv_udc_probe(struct platform_device *pdev)
2118 {
2119 	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2120 	struct mv_udc *udc;
2121 	int retval = 0;
2122 	struct resource *r;
2123 	size_t size;
2124 
2125 	if (pdata == NULL) {
2126 		dev_err(&pdev->dev, "missing platform_data\n");
2127 		return -ENODEV;
2128 	}
2129 
2130 	udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2131 	if (udc == NULL)
2132 		return -ENOMEM;
2133 
2134 	udc->done = &release_done;
2135 	udc->pdata = dev_get_platdata(&pdev->dev);
2136 	spin_lock_init(&udc->lock);
2137 
2138 	udc->dev = pdev;
2139 
2140 	if (pdata->mode == MV_USB_MODE_OTG) {
2141 		udc->transceiver = devm_usb_get_phy(&pdev->dev,
2142 					USB_PHY_TYPE_USB2);
2143 		if (IS_ERR(udc->transceiver)) {
2144 			retval = PTR_ERR(udc->transceiver);
2145 
2146 			if (retval == -ENXIO)
2147 				return retval;
2148 
2149 			udc->transceiver = NULL;
2150 			return -EPROBE_DEFER;
2151 		}
2152 	}
2153 
2154 	/* udc only have one sysclk. */
2155 	udc->clk = devm_clk_get(&pdev->dev, NULL);
2156 	if (IS_ERR(udc->clk))
2157 		return PTR_ERR(udc->clk);
2158 
2159 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2160 	if (r == NULL) {
2161 		dev_err(&pdev->dev, "no I/O memory resource defined\n");
2162 		return -ENODEV;
2163 	}
2164 
2165 	udc->cap_regs = (struct mv_cap_regs __iomem *)
2166 		devm_ioremap(&pdev->dev, r->start, resource_size(r));
2167 	if (udc->cap_regs == NULL) {
2168 		dev_err(&pdev->dev, "failed to map I/O memory\n");
2169 		return -EBUSY;
2170 	}
2171 
2172 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2173 	if (r == NULL) {
2174 		dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2175 		return -ENODEV;
2176 	}
2177 
2178 	udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2179 	if (udc->phy_regs == NULL) {
2180 		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2181 		return -EBUSY;
2182 	}
2183 
2184 	/* we will acces controller register, so enable the clk */
2185 	retval = mv_udc_enable_internal(udc);
2186 	if (retval)
2187 		return retval;
2188 
2189 	udc->op_regs =
2190 		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2191 		+ (readl(&udc->cap_regs->caplength_hciversion)
2192 			& CAPLENGTH_MASK));
2193 	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2194 
2195 	/*
2196 	 * some platform will use usb to download image, it may not disconnect
2197 	 * usb gadget before loading kernel. So first stop udc here.
2198 	 */
2199 	udc_stop(udc);
2200 	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2201 
2202 	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2203 	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2204 	udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2205 					&udc->ep_dqh_dma, GFP_KERNEL);
2206 
2207 	if (udc->ep_dqh == NULL) {
2208 		dev_err(&pdev->dev, "allocate dQH memory failed\n");
2209 		retval = -ENOMEM;
2210 		goto err_disable_clock;
2211 	}
2212 	udc->ep_dqh_size = size;
2213 
2214 	/* create dTD dma_pool resource */
2215 	udc->dtd_pool = dma_pool_create("mv_dtd",
2216 			&pdev->dev,
2217 			sizeof(struct mv_dtd),
2218 			DTD_ALIGNMENT,
2219 			DMA_BOUNDARY);
2220 
2221 	if (!udc->dtd_pool) {
2222 		retval = -ENOMEM;
2223 		goto err_free_dma;
2224 	}
2225 
2226 	size = udc->max_eps * sizeof(struct mv_ep) *2;
2227 	udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2228 	if (udc->eps == NULL) {
2229 		retval = -ENOMEM;
2230 		goto err_destroy_dma;
2231 	}
2232 
2233 	/* initialize ep0 status request structure */
2234 	udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2235 					GFP_KERNEL);
2236 	if (!udc->status_req) {
2237 		retval = -ENOMEM;
2238 		goto err_destroy_dma;
2239 	}
2240 	INIT_LIST_HEAD(&udc->status_req->queue);
2241 
2242 	/* allocate a small amount of memory to get valid address */
2243 	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2244 	udc->status_req->req.dma = DMA_ADDR_INVALID;
2245 
2246 	udc->resume_state = USB_STATE_NOTATTACHED;
2247 	udc->usb_state = USB_STATE_POWERED;
2248 	udc->ep0_dir = EP_DIR_OUT;
2249 	udc->remote_wakeup = 0;
2250 
2251 	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2252 	if (r == NULL) {
2253 		dev_err(&pdev->dev, "no IRQ resource defined\n");
2254 		retval = -ENODEV;
2255 		goto err_destroy_dma;
2256 	}
2257 	udc->irq = r->start;
2258 	if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2259 		IRQF_SHARED, driver_name, udc)) {
2260 		dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2261 			udc->irq);
2262 		retval = -ENODEV;
2263 		goto err_destroy_dma;
2264 	}
2265 
2266 	/* initialize gadget structure */
2267 	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2268 	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2269 	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2270 	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2271 	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2272 
2273 	/* the "gadget" abstracts/virtualizes the controller */
2274 	udc->gadget.name = driver_name;		/* gadget name */
2275 
2276 	eps_init(udc);
2277 
2278 	/* VBUS detect: we can disable/enable clock on demand.*/
2279 	if (udc->transceiver)
2280 		udc->clock_gating = 1;
2281 	else if (pdata->vbus) {
2282 		udc->clock_gating = 1;
2283 		retval = devm_request_threaded_irq(&pdev->dev,
2284 				pdata->vbus->irq, NULL,
2285 				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2286 		if (retval) {
2287 			dev_info(&pdev->dev,
2288 				"Can not request irq for VBUS, "
2289 				"disable clock gating\n");
2290 			udc->clock_gating = 0;
2291 		}
2292 
2293 		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2294 		if (!udc->qwork) {
2295 			dev_err(&pdev->dev, "cannot create workqueue\n");
2296 			retval = -ENOMEM;
2297 			goto err_destroy_dma;
2298 		}
2299 
2300 		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2301 	}
2302 
2303 	/*
2304 	 * When clock gating is supported, we can disable clk and phy.
2305 	 * If not, it means that VBUS detection is not supported, we
2306 	 * have to enable vbus active all the time to let controller work.
2307 	 */
2308 	if (udc->clock_gating)
2309 		mv_udc_disable_internal(udc);
2310 	else
2311 		udc->vbus_active = 1;
2312 
2313 	retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2314 			gadget_release);
2315 	if (retval)
2316 		goto err_create_workqueue;
2317 
2318 	platform_set_drvdata(pdev, udc);
2319 	dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2320 		udc->clock_gating ? "with" : "without");
2321 
2322 	return 0;
2323 
2324 err_create_workqueue:
2325 	destroy_workqueue(udc->qwork);
2326 err_destroy_dma:
2327 	dma_pool_destroy(udc->dtd_pool);
2328 err_free_dma:
2329 	dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2330 			udc->ep_dqh, udc->ep_dqh_dma);
2331 err_disable_clock:
2332 	mv_udc_disable_internal(udc);
2333 
2334 	return retval;
2335 }
2336 
2337 #ifdef CONFIG_PM
2338 static int mv_udc_suspend(struct device *dev)
2339 {
2340 	struct mv_udc *udc;
2341 
2342 	udc = dev_get_drvdata(dev);
2343 
2344 	/* if OTG is enabled, the following will be done in OTG driver*/
2345 	if (udc->transceiver)
2346 		return 0;
2347 
2348 	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2349 		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2350 			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2351 			return -EAGAIN;
2352 		}
2353 
2354 	/*
2355 	 * only cable is unplugged, udc can suspend.
2356 	 * So do not care about clock_gating == 1.
2357 	 */
2358 	if (!udc->clock_gating) {
2359 		udc_stop(udc);
2360 
2361 		spin_lock_irq(&udc->lock);
2362 		/* stop all usb activities */
2363 		stop_activity(udc, udc->driver);
2364 		spin_unlock_irq(&udc->lock);
2365 
2366 		mv_udc_disable_internal(udc);
2367 	}
2368 
2369 	return 0;
2370 }
2371 
2372 static int mv_udc_resume(struct device *dev)
2373 {
2374 	struct mv_udc *udc;
2375 	int retval;
2376 
2377 	udc = dev_get_drvdata(dev);
2378 
2379 	/* if OTG is enabled, the following will be done in OTG driver*/
2380 	if (udc->transceiver)
2381 		return 0;
2382 
2383 	if (!udc->clock_gating) {
2384 		retval = mv_udc_enable_internal(udc);
2385 		if (retval)
2386 			return retval;
2387 
2388 		if (udc->driver && udc->softconnect) {
2389 			udc_reset(udc);
2390 			ep0_reset(udc);
2391 			udc_start(udc);
2392 		}
2393 	}
2394 
2395 	return 0;
2396 }
2397 
2398 static const struct dev_pm_ops mv_udc_pm_ops = {
2399 	.suspend	= mv_udc_suspend,
2400 	.resume		= mv_udc_resume,
2401 };
2402 #endif
2403 
2404 static void mv_udc_shutdown(struct platform_device *pdev)
2405 {
2406 	struct mv_udc *udc;
2407 	u32 mode;
2408 
2409 	udc = platform_get_drvdata(pdev);
2410 	/* reset controller mode to IDLE */
2411 	mv_udc_enable(udc);
2412 	mode = readl(&udc->op_regs->usbmode);
2413 	mode &= ~3;
2414 	writel(mode, &udc->op_regs->usbmode);
2415 	mv_udc_disable(udc);
2416 }
2417 
2418 static struct platform_driver udc_driver = {
2419 	.probe		= mv_udc_probe,
2420 	.remove		= mv_udc_remove,
2421 	.shutdown	= mv_udc_shutdown,
2422 	.driver		= {
2423 		.name	= "mv-udc",
2424 #ifdef CONFIG_PM
2425 		.pm	= &mv_udc_pm_ops,
2426 #endif
2427 	},
2428 };
2429 
2430 module_platform_driver(udc_driver);
2431 MODULE_ALIAS("platform:mv-udc");
2432 MODULE_DESCRIPTION(DRIVER_DESC);
2433 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2434 MODULE_VERSION(DRIVER_VERSION);
2435 MODULE_LICENSE("GPL");
2436