1 /*
2  * driver/usb/gadget/fsl_qe_udc.c
3  *
4  * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
5  *
6  * 	Xie Xiaobo <X.Xie@freescale.com>
7  * 	Li Yang <leoli@freescale.com>
8  * 	Based on bareboard code from Shlomi Gridish.
9  *
10  * Description:
11  * Freescle QE/CPM USB Pheripheral Controller Driver
12  * The controller can be found on MPC8360, MPC8272, and etc.
13  * MPC8360 Rev 1.1 may need QE mircocode update
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the
17  * Free Software Foundation;  either version 2 of the License, or (at your
18  * option) any later version.
19  */
20 
21 #undef USB_TRACE
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/ioport.h>
26 #include <linux/types.h>
27 #include <linux/errno.h>
28 #include <linux/err.h>
29 #include <linux/slab.h>
30 #include <linux/list.h>
31 #include <linux/interrupt.h>
32 #include <linux/io.h>
33 #include <linux/moduleparam.h>
34 #include <linux/of_address.h>
35 #include <linux/of_irq.h>
36 #include <linux/of_platform.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/usb/ch9.h>
39 #include <linux/usb/gadget.h>
40 #include <linux/usb/otg.h>
41 #include <soc/fsl/qe/qe.h>
42 #include <asm/cpm.h>
43 #include <asm/dma.h>
44 #include <asm/reg.h>
45 #include "fsl_qe_udc.h"
46 
47 #define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
48 #define DRIVER_AUTHOR   "Xie XiaoBo"
49 #define DRIVER_VERSION  "1.0"
50 
51 #define DMA_ADDR_INVALID        (~(dma_addr_t)0)
52 
53 static const char driver_name[] = "fsl_qe_udc";
54 static const char driver_desc[] = DRIVER_DESC;
55 
56 /*ep name is important in gadget, it should obey the convention of ep_match()*/
57 static const char *const ep_name[] = {
58 	"ep0-control", /* everyone has ep0 */
59 	/* 3 configurable endpoints */
60 	"ep1",
61 	"ep2",
62 	"ep3",
63 };
64 
65 static struct usb_endpoint_descriptor qe_ep0_desc = {
66 	.bLength =		USB_DT_ENDPOINT_SIZE,
67 	.bDescriptorType =	USB_DT_ENDPOINT,
68 
69 	.bEndpointAddress =	0,
70 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
71 	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
72 };
73 
74 /********************************************************************
75  *      Internal Used Function Start
76 ********************************************************************/
77 /*-----------------------------------------------------------------
78  * done() - retire a request; caller blocked irqs
79  *--------------------------------------------------------------*/
80 static void done(struct qe_ep *ep, struct qe_req *req, int status)
81 {
82 	struct qe_udc *udc = ep->udc;
83 	unsigned char stopped = ep->stopped;
84 
85 	/* the req->queue pointer is used by ep_queue() func, in which
86 	 * the request will be added into a udc_ep->queue 'd tail
87 	 * so here the req will be dropped from the ep->queue
88 	 */
89 	list_del_init(&req->queue);
90 
91 	/* req.status should be set as -EINPROGRESS in ep_queue() */
92 	if (req->req.status == -EINPROGRESS)
93 		req->req.status = status;
94 	else
95 		status = req->req.status;
96 
97 	if (req->mapped) {
98 		dma_unmap_single(udc->gadget.dev.parent,
99 			req->req.dma, req->req.length,
100 			ep_is_in(ep)
101 				? DMA_TO_DEVICE
102 				: DMA_FROM_DEVICE);
103 		req->req.dma = DMA_ADDR_INVALID;
104 		req->mapped = 0;
105 	} else
106 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
107 			req->req.dma, req->req.length,
108 			ep_is_in(ep)
109 				? DMA_TO_DEVICE
110 				: DMA_FROM_DEVICE);
111 
112 	if (status && (status != -ESHUTDOWN))
113 		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
114 			ep->ep.name, &req->req, status,
115 			req->req.actual, req->req.length);
116 
117 	/* don't modify queue heads during completion callback */
118 	ep->stopped = 1;
119 	spin_unlock(&udc->lock);
120 
121 	usb_gadget_giveback_request(&ep->ep, &req->req);
122 
123 	spin_lock(&udc->lock);
124 
125 	ep->stopped = stopped;
126 }
127 
128 /*-----------------------------------------------------------------
129  * nuke(): delete all requests related to this ep
130  *--------------------------------------------------------------*/
131 static void nuke(struct qe_ep *ep, int status)
132 {
133 	/* Whether this eq has request linked */
134 	while (!list_empty(&ep->queue)) {
135 		struct qe_req *req = NULL;
136 		req = list_entry(ep->queue.next, struct qe_req, queue);
137 
138 		done(ep, req, status);
139 	}
140 }
141 
142 /*---------------------------------------------------------------------------*
143  * USB and Endpoint manipulate process, include parameter and register       *
144  *---------------------------------------------------------------------------*/
145 /* @value: 1--set stall 0--clean stall */
146 static int qe_eprx_stall_change(struct qe_ep *ep, int value)
147 {
148 	u16 tem_usep;
149 	u8 epnum = ep->epnum;
150 	struct qe_udc *udc = ep->udc;
151 
152 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
153 	tem_usep = tem_usep & ~USB_RHS_MASK;
154 	if (value == 1)
155 		tem_usep |= USB_RHS_STALL;
156 	else if (ep->dir == USB_DIR_IN)
157 		tem_usep |= USB_RHS_IGNORE_OUT;
158 
159 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
160 	return 0;
161 }
162 
163 static int qe_eptx_stall_change(struct qe_ep *ep, int value)
164 {
165 	u16 tem_usep;
166 	u8 epnum = ep->epnum;
167 	struct qe_udc *udc = ep->udc;
168 
169 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
170 	tem_usep = tem_usep & ~USB_THS_MASK;
171 	if (value == 1)
172 		tem_usep |= USB_THS_STALL;
173 	else if (ep->dir == USB_DIR_OUT)
174 		tem_usep |= USB_THS_IGNORE_IN;
175 
176 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
177 
178 	return 0;
179 }
180 
181 static int qe_ep0_stall(struct qe_udc *udc)
182 {
183 	qe_eptx_stall_change(&udc->eps[0], 1);
184 	qe_eprx_stall_change(&udc->eps[0], 1);
185 	udc->ep0_state = WAIT_FOR_SETUP;
186 	udc->ep0_dir = 0;
187 	return 0;
188 }
189 
190 static int qe_eprx_nack(struct qe_ep *ep)
191 {
192 	u8 epnum = ep->epnum;
193 	struct qe_udc *udc = ep->udc;
194 
195 	if (ep->state == EP_STATE_IDLE) {
196 		/* Set the ep's nack */
197 		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
198 				USB_RHS_MASK, USB_RHS_NACK);
199 
200 		/* Mask Rx and Busy interrupts */
201 		clrbits16(&udc->usb_regs->usb_usbmr,
202 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
203 
204 		ep->state = EP_STATE_NACK;
205 	}
206 	return 0;
207 }
208 
209 static int qe_eprx_normal(struct qe_ep *ep)
210 {
211 	struct qe_udc *udc = ep->udc;
212 
213 	if (ep->state == EP_STATE_NACK) {
214 		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
215 				USB_RTHS_MASK, USB_THS_IGNORE_IN);
216 
217 		/* Unmask RX interrupts */
218 		out_be16(&udc->usb_regs->usb_usber,
219 				USB_E_BSY_MASK | USB_E_RXB_MASK);
220 		setbits16(&udc->usb_regs->usb_usbmr,
221 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
222 
223 		ep->state = EP_STATE_IDLE;
224 		ep->has_data = 0;
225 	}
226 
227 	return 0;
228 }
229 
230 static int qe_ep_cmd_stoptx(struct qe_ep *ep)
231 {
232 	if (ep->udc->soc_type == PORT_CPM)
233 		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
234 				CPM_USB_STOP_TX_OPCODE);
235 	else
236 		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
237 				ep->epnum, 0);
238 
239 	return 0;
240 }
241 
242 static int qe_ep_cmd_restarttx(struct qe_ep *ep)
243 {
244 	if (ep->udc->soc_type == PORT_CPM)
245 		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
246 				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
247 	else
248 		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
249 				ep->epnum, 0);
250 
251 	return 0;
252 }
253 
254 static int qe_ep_flushtxfifo(struct qe_ep *ep)
255 {
256 	struct qe_udc *udc = ep->udc;
257 	int i;
258 
259 	i = (int)ep->epnum;
260 
261 	qe_ep_cmd_stoptx(ep);
262 	out_8(&udc->usb_regs->usb_uscom,
263 		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
264 	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
265 	out_be32(&udc->ep_param[i]->tstate, 0);
266 	out_be16(&udc->ep_param[i]->tbcnt, 0);
267 
268 	ep->c_txbd = ep->txbase;
269 	ep->n_txbd = ep->txbase;
270 	qe_ep_cmd_restarttx(ep);
271 	return 0;
272 }
273 
274 static int qe_ep_filltxfifo(struct qe_ep *ep)
275 {
276 	struct qe_udc *udc = ep->udc;
277 
278 	out_8(&udc->usb_regs->usb_uscom,
279 			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
280 	return 0;
281 }
282 
283 static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
284 {
285 	struct qe_ep *ep;
286 	u32 bdring_len;
287 	struct qe_bd __iomem *bd;
288 	int i;
289 
290 	ep = &udc->eps[pipe_num];
291 
292 	if (ep->dir == USB_DIR_OUT)
293 		bdring_len = USB_BDRING_LEN_RX;
294 	else
295 		bdring_len = USB_BDRING_LEN;
296 
297 	bd = ep->rxbase;
298 	for (i = 0; i < (bdring_len - 1); i++) {
299 		out_be32((u32 __iomem *)bd, R_E | R_I);
300 		bd++;
301 	}
302 	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
303 
304 	bd = ep->txbase;
305 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
306 		out_be32(&bd->buf, 0);
307 		out_be32((u32 __iomem *)bd, 0);
308 		bd++;
309 	}
310 	out_be32((u32 __iomem *)bd, T_W);
311 
312 	return 0;
313 }
314 
315 static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
316 {
317 	struct qe_ep *ep;
318 	u16 tmpusep;
319 
320 	ep = &udc->eps[pipe_num];
321 	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
322 	tmpusep &= ~USB_RTHS_MASK;
323 
324 	switch (ep->dir) {
325 	case USB_DIR_BOTH:
326 		qe_ep_flushtxfifo(ep);
327 		break;
328 	case USB_DIR_OUT:
329 		tmpusep |= USB_THS_IGNORE_IN;
330 		break;
331 	case USB_DIR_IN:
332 		qe_ep_flushtxfifo(ep);
333 		tmpusep |= USB_RHS_IGNORE_OUT;
334 		break;
335 	default:
336 		break;
337 	}
338 	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
339 
340 	qe_epbds_reset(udc, pipe_num);
341 
342 	return 0;
343 }
344 
345 static int qe_ep_toggledata01(struct qe_ep *ep)
346 {
347 	ep->data01 ^= 0x1;
348 	return 0;
349 }
350 
351 static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
352 {
353 	struct qe_ep *ep = &udc->eps[pipe_num];
354 	unsigned long tmp_addr = 0;
355 	struct usb_ep_para __iomem *epparam;
356 	int i;
357 	struct qe_bd __iomem *bd;
358 	int bdring_len;
359 
360 	if (ep->dir == USB_DIR_OUT)
361 		bdring_len = USB_BDRING_LEN_RX;
362 	else
363 		bdring_len = USB_BDRING_LEN;
364 
365 	epparam = udc->ep_param[pipe_num];
366 	/* alloc multi-ram for BD rings and set the ep parameters */
367 	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
368 				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
369 	if (IS_ERR_VALUE(tmp_addr))
370 		return -ENOMEM;
371 
372 	out_be16(&epparam->rbase, (u16)tmp_addr);
373 	out_be16(&epparam->tbase, (u16)(tmp_addr +
374 				(sizeof(struct qe_bd) * bdring_len)));
375 
376 	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
377 	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
378 
379 	ep->rxbase = cpm_muram_addr(tmp_addr);
380 	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
381 				* bdring_len));
382 	ep->n_rxbd = ep->rxbase;
383 	ep->e_rxbd = ep->rxbase;
384 	ep->n_txbd = ep->txbase;
385 	ep->c_txbd = ep->txbase;
386 	ep->data01 = 0; /* data0 */
387 
388 	/* Init TX and RX bds */
389 	bd = ep->rxbase;
390 	for (i = 0; i < bdring_len - 1; i++) {
391 		out_be32(&bd->buf, 0);
392 		out_be32((u32 __iomem *)bd, 0);
393 		bd++;
394 	}
395 	out_be32(&bd->buf, 0);
396 	out_be32((u32 __iomem *)bd, R_W);
397 
398 	bd = ep->txbase;
399 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
400 		out_be32(&bd->buf, 0);
401 		out_be32((u32 __iomem *)bd, 0);
402 		bd++;
403 	}
404 	out_be32(&bd->buf, 0);
405 	out_be32((u32 __iomem *)bd, T_W);
406 
407 	return 0;
408 }
409 
410 static int qe_ep_rxbd_update(struct qe_ep *ep)
411 {
412 	unsigned int size;
413 	int i;
414 	unsigned int tmp;
415 	struct qe_bd __iomem *bd;
416 	unsigned int bdring_len;
417 
418 	if (ep->rxbase == NULL)
419 		return -EINVAL;
420 
421 	bd = ep->rxbase;
422 
423 	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
424 	if (!ep->rxframe)
425 		return -ENOMEM;
426 
427 	qe_frame_init(ep->rxframe);
428 
429 	if (ep->dir == USB_DIR_OUT)
430 		bdring_len = USB_BDRING_LEN_RX;
431 	else
432 		bdring_len = USB_BDRING_LEN;
433 
434 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
435 	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
436 	if (!ep->rxbuffer) {
437 		kfree(ep->rxframe);
438 		return -ENOMEM;
439 	}
440 
441 	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
442 	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
443 		ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
444 					ep->rxbuffer,
445 					size,
446 					DMA_FROM_DEVICE);
447 		ep->rxbufmap = 1;
448 	} else {
449 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
450 					ep->rxbuf_d, size,
451 					DMA_FROM_DEVICE);
452 		ep->rxbufmap = 0;
453 	}
454 
455 	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
456 	tmp = ep->rxbuf_d;
457 	tmp = (u32)(((tmp >> 2) << 2) + 4);
458 
459 	for (i = 0; i < bdring_len - 1; i++) {
460 		out_be32(&bd->buf, tmp);
461 		out_be32((u32 __iomem *)bd, (R_E | R_I));
462 		tmp = tmp + size;
463 		bd++;
464 	}
465 	out_be32(&bd->buf, tmp);
466 	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
467 
468 	return 0;
469 }
470 
471 static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
472 {
473 	struct qe_ep *ep = &udc->eps[pipe_num];
474 	struct usb_ep_para __iomem *epparam;
475 	u16 usep, logepnum;
476 	u16 tmp;
477 	u8 rtfcr = 0;
478 
479 	epparam = udc->ep_param[pipe_num];
480 
481 	usep = 0;
482 	logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
483 	usep |= (logepnum << USB_EPNUM_SHIFT);
484 
485 	switch (ep->ep.desc->bmAttributes & 0x03) {
486 	case USB_ENDPOINT_XFER_BULK:
487 		usep |= USB_TRANS_BULK;
488 		break;
489 	case USB_ENDPOINT_XFER_ISOC:
490 		usep |=  USB_TRANS_ISO;
491 		break;
492 	case USB_ENDPOINT_XFER_INT:
493 		usep |= USB_TRANS_INT;
494 		break;
495 	default:
496 		usep |= USB_TRANS_CTR;
497 		break;
498 	}
499 
500 	switch (ep->dir) {
501 	case USB_DIR_OUT:
502 		usep |= USB_THS_IGNORE_IN;
503 		break;
504 	case USB_DIR_IN:
505 		usep |= USB_RHS_IGNORE_OUT;
506 		break;
507 	default:
508 		break;
509 	}
510 	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
511 
512 	rtfcr = 0x30;
513 	out_8(&epparam->rbmr, rtfcr);
514 	out_8(&epparam->tbmr, rtfcr);
515 
516 	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
517 	/* MRBLR must be divisble by 4 */
518 	tmp = (u16)(((tmp >> 2) << 2) + 4);
519 	out_be16(&epparam->mrblr, tmp);
520 
521 	return 0;
522 }
523 
524 static int qe_ep_init(struct qe_udc *udc,
525 		      unsigned char pipe_num,
526 		      const struct usb_endpoint_descriptor *desc)
527 {
528 	struct qe_ep *ep = &udc->eps[pipe_num];
529 	unsigned long flags;
530 	int reval = 0;
531 	u16 max = 0;
532 
533 	max = usb_endpoint_maxp(desc);
534 
535 	/* check the max package size validate for this endpoint */
536 	/* Refer to USB2.0 spec table 9-13,
537 	*/
538 	if (pipe_num != 0) {
539 		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
540 		case USB_ENDPOINT_XFER_BULK:
541 			if (strstr(ep->ep.name, "-iso")
542 					|| strstr(ep->ep.name, "-int"))
543 				goto en_done;
544 			switch (udc->gadget.speed) {
545 			case USB_SPEED_HIGH:
546 			if ((max == 128) || (max == 256) || (max == 512))
547 				break;
548 			default:
549 				switch (max) {
550 				case 4:
551 				case 8:
552 				case 16:
553 				case 32:
554 				case 64:
555 					break;
556 				default:
557 				case USB_SPEED_LOW:
558 					goto en_done;
559 				}
560 			}
561 			break;
562 		case USB_ENDPOINT_XFER_INT:
563 			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
564 				goto en_done;
565 			switch (udc->gadget.speed) {
566 			case USB_SPEED_HIGH:
567 				if (max <= 1024)
568 					break;
569 			case USB_SPEED_FULL:
570 				if (max <= 64)
571 					break;
572 			default:
573 				if (max <= 8)
574 					break;
575 				goto en_done;
576 			}
577 			break;
578 		case USB_ENDPOINT_XFER_ISOC:
579 			if (strstr(ep->ep.name, "-bulk")
580 				|| strstr(ep->ep.name, "-int"))
581 				goto en_done;
582 			switch (udc->gadget.speed) {
583 			case USB_SPEED_HIGH:
584 				if (max <= 1024)
585 					break;
586 			case USB_SPEED_FULL:
587 				if (max <= 1023)
588 					break;
589 			default:
590 				goto en_done;
591 			}
592 			break;
593 		case USB_ENDPOINT_XFER_CONTROL:
594 			if (strstr(ep->ep.name, "-iso")
595 				|| strstr(ep->ep.name, "-int"))
596 				goto en_done;
597 			switch (udc->gadget.speed) {
598 			case USB_SPEED_HIGH:
599 			case USB_SPEED_FULL:
600 				switch (max) {
601 				case 1:
602 				case 2:
603 				case 4:
604 				case 8:
605 				case 16:
606 				case 32:
607 				case 64:
608 					break;
609 				default:
610 					goto en_done;
611 				}
612 			case USB_SPEED_LOW:
613 				switch (max) {
614 				case 1:
615 				case 2:
616 				case 4:
617 				case 8:
618 					break;
619 				default:
620 					goto en_done;
621 				}
622 			default:
623 				goto en_done;
624 			}
625 			break;
626 
627 		default:
628 			goto en_done;
629 		}
630 	} /* if ep0*/
631 
632 	spin_lock_irqsave(&udc->lock, flags);
633 
634 	/* initialize ep structure */
635 	ep->ep.maxpacket = max;
636 	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
637 	ep->ep.desc = desc;
638 	ep->stopped = 0;
639 	ep->init = 1;
640 
641 	if (pipe_num == 0) {
642 		ep->dir = USB_DIR_BOTH;
643 		udc->ep0_dir = USB_DIR_OUT;
644 		udc->ep0_state = WAIT_FOR_SETUP;
645 	} else	{
646 		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
647 		case USB_DIR_OUT:
648 			ep->dir = USB_DIR_OUT;
649 			break;
650 		case USB_DIR_IN:
651 			ep->dir = USB_DIR_IN;
652 		default:
653 			break;
654 		}
655 	}
656 
657 	/* hardware special operation */
658 	qe_ep_bd_init(udc, pipe_num);
659 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
660 		reval = qe_ep_rxbd_update(ep);
661 		if (reval)
662 			goto en_done1;
663 	}
664 
665 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
666 		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
667 		if (!ep->txframe)
668 			goto en_done2;
669 		qe_frame_init(ep->txframe);
670 	}
671 
672 	qe_ep_register_init(udc, pipe_num);
673 
674 	/* Now HW will be NAKing transfers to that EP,
675 	 * until a buffer is queued to it. */
676 	spin_unlock_irqrestore(&udc->lock, flags);
677 
678 	return 0;
679 en_done2:
680 	kfree(ep->rxbuffer);
681 	kfree(ep->rxframe);
682 en_done1:
683 	spin_unlock_irqrestore(&udc->lock, flags);
684 en_done:
685 	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
686 	return -ENODEV;
687 }
688 
689 static inline void qe_usb_enable(struct qe_udc *udc)
690 {
691 	setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
692 }
693 
694 static inline void qe_usb_disable(struct qe_udc *udc)
695 {
696 	clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
697 }
698 
699 /*----------------------------------------------------------------------------*
700  *		USB and EP basic manipulate function end		      *
701  *----------------------------------------------------------------------------*/
702 
703 
704 /******************************************************************************
705 		UDC transmit and receive process
706  ******************************************************************************/
707 static void recycle_one_rxbd(struct qe_ep *ep)
708 {
709 	u32 bdstatus;
710 
711 	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
712 	bdstatus = R_I | R_E | (bdstatus & R_W);
713 	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
714 
715 	if (bdstatus & R_W)
716 		ep->e_rxbd = ep->rxbase;
717 	else
718 		ep->e_rxbd++;
719 }
720 
721 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
722 {
723 	u32 bdstatus;
724 	struct qe_bd __iomem *bd, *nextbd;
725 	unsigned char stop = 0;
726 
727 	nextbd = ep->n_rxbd;
728 	bd = ep->e_rxbd;
729 	bdstatus = in_be32((u32 __iomem *)bd);
730 
731 	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
732 		bdstatus = R_E | R_I | (bdstatus & R_W);
733 		out_be32((u32 __iomem *)bd, bdstatus);
734 
735 		if (bdstatus & R_W)
736 			bd = ep->rxbase;
737 		else
738 			bd++;
739 
740 		bdstatus = in_be32((u32 __iomem *)bd);
741 		if (stopatnext && (bd == nextbd))
742 			stop = 1;
743 	}
744 
745 	ep->e_rxbd = bd;
746 }
747 
748 static void ep_recycle_rxbds(struct qe_ep *ep)
749 {
750 	struct qe_bd __iomem *bd = ep->n_rxbd;
751 	u32 bdstatus;
752 	u8 epnum = ep->epnum;
753 	struct qe_udc *udc = ep->udc;
754 
755 	bdstatus = in_be32((u32 __iomem *)bd);
756 	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
757 		bd = ep->rxbase +
758 				((in_be16(&udc->ep_param[epnum]->rbptr) -
759 				  in_be16(&udc->ep_param[epnum]->rbase))
760 				 >> 3);
761 		bdstatus = in_be32((u32 __iomem *)bd);
762 
763 		if (bdstatus & R_W)
764 			bd = ep->rxbase;
765 		else
766 			bd++;
767 
768 		ep->e_rxbd = bd;
769 		recycle_rxbds(ep, 0);
770 		ep->e_rxbd = ep->n_rxbd;
771 	} else
772 		recycle_rxbds(ep, 1);
773 
774 	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
775 		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
776 
777 	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
778 		qe_eprx_normal(ep);
779 
780 	ep->localnack = 0;
781 }
782 
783 static void setup_received_handle(struct qe_udc *udc,
784 					struct usb_ctrlrequest *setup);
785 static int qe_ep_rxframe_handle(struct qe_ep *ep);
786 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
787 /* when BD PID is setup, handle the packet */
788 static int ep0_setup_handle(struct qe_udc *udc)
789 {
790 	struct qe_ep *ep = &udc->eps[0];
791 	struct qe_frame *pframe;
792 	unsigned int fsize;
793 	u8 *cp;
794 
795 	pframe = ep->rxframe;
796 	if ((frame_get_info(pframe) & PID_SETUP)
797 			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
798 		fsize = frame_get_length(pframe);
799 		if (unlikely(fsize != 8))
800 			return -EINVAL;
801 		cp = (u8 *)&udc->local_setup_buff;
802 		memcpy(cp, pframe->data, fsize);
803 		ep->data01 = 1;
804 
805 		/* handle the usb command base on the usb_ctrlrequest */
806 		setup_received_handle(udc, &udc->local_setup_buff);
807 		return 0;
808 	}
809 	return -EINVAL;
810 }
811 
812 static int qe_ep0_rx(struct qe_udc *udc)
813 {
814 	struct qe_ep *ep = &udc->eps[0];
815 	struct qe_frame *pframe;
816 	struct qe_bd __iomem *bd;
817 	u32 bdstatus, length;
818 	u32 vaddr;
819 
820 	pframe = ep->rxframe;
821 
822 	if (ep->dir == USB_DIR_IN) {
823 		dev_err(udc->dev, "ep0 not a control endpoint\n");
824 		return -EINVAL;
825 	}
826 
827 	bd = ep->n_rxbd;
828 	bdstatus = in_be32((u32 __iomem *)bd);
829 	length = bdstatus & BD_LENGTH_MASK;
830 
831 	while (!(bdstatus & R_E) && length) {
832 		if ((bdstatus & R_F) && (bdstatus & R_L)
833 			&& !(bdstatus & R_ERROR)) {
834 			if (length == USB_CRC_SIZE) {
835 				udc->ep0_state = WAIT_FOR_SETUP;
836 				dev_vdbg(udc->dev,
837 					"receive a ZLP in status phase\n");
838 			} else {
839 				qe_frame_clean(pframe);
840 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
841 				frame_set_data(pframe, (u8 *)vaddr);
842 				frame_set_length(pframe,
843 						(length - USB_CRC_SIZE));
844 				frame_set_status(pframe, FRAME_OK);
845 				switch (bdstatus & R_PID) {
846 				case R_PID_SETUP:
847 					frame_set_info(pframe, PID_SETUP);
848 					break;
849 				case R_PID_DATA1:
850 					frame_set_info(pframe, PID_DATA1);
851 					break;
852 				default:
853 					frame_set_info(pframe, PID_DATA0);
854 					break;
855 				}
856 
857 				if ((bdstatus & R_PID) == R_PID_SETUP)
858 					ep0_setup_handle(udc);
859 				else
860 					qe_ep_rxframe_handle(ep);
861 			}
862 		} else {
863 			dev_err(udc->dev, "The receive frame with error!\n");
864 		}
865 
866 		/* note: don't clear the rxbd's buffer address */
867 		recycle_one_rxbd(ep);
868 
869 		/* Get next BD */
870 		if (bdstatus & R_W)
871 			bd = ep->rxbase;
872 		else
873 			bd++;
874 
875 		bdstatus = in_be32((u32 __iomem *)bd);
876 		length = bdstatus & BD_LENGTH_MASK;
877 
878 	}
879 
880 	ep->n_rxbd = bd;
881 
882 	return 0;
883 }
884 
885 static int qe_ep_rxframe_handle(struct qe_ep *ep)
886 {
887 	struct qe_frame *pframe;
888 	u8 framepid = 0;
889 	unsigned int fsize;
890 	u8 *cp;
891 	struct qe_req *req;
892 
893 	pframe = ep->rxframe;
894 
895 	if (frame_get_info(pframe) & PID_DATA1)
896 		framepid = 0x1;
897 
898 	if (framepid != ep->data01) {
899 		dev_err(ep->udc->dev, "the data01 error!\n");
900 		return -EIO;
901 	}
902 
903 	fsize = frame_get_length(pframe);
904 	if (list_empty(&ep->queue)) {
905 		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
906 	} else {
907 		req = list_entry(ep->queue.next, struct qe_req, queue);
908 
909 		cp = (u8 *)(req->req.buf) + req->req.actual;
910 		if (cp) {
911 			memcpy(cp, pframe->data, fsize);
912 			req->req.actual += fsize;
913 			if ((fsize < ep->ep.maxpacket) ||
914 					(req->req.actual >= req->req.length)) {
915 				if (ep->epnum == 0)
916 					ep0_req_complete(ep->udc, req);
917 				else
918 					done(ep, req, 0);
919 				if (list_empty(&ep->queue) && ep->epnum != 0)
920 					qe_eprx_nack(ep);
921 			}
922 		}
923 	}
924 
925 	qe_ep_toggledata01(ep);
926 
927 	return 0;
928 }
929 
930 static void ep_rx_tasklet(unsigned long data)
931 {
932 	struct qe_udc *udc = (struct qe_udc *)data;
933 	struct qe_ep *ep;
934 	struct qe_frame *pframe;
935 	struct qe_bd __iomem *bd;
936 	unsigned long flags;
937 	u32 bdstatus, length;
938 	u32 vaddr, i;
939 
940 	spin_lock_irqsave(&udc->lock, flags);
941 
942 	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
943 		ep = &udc->eps[i];
944 
945 		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
946 			dev_dbg(udc->dev,
947 				"This is a transmit ep or disable tasklet!\n");
948 			continue;
949 		}
950 
951 		pframe = ep->rxframe;
952 		bd = ep->n_rxbd;
953 		bdstatus = in_be32((u32 __iomem *)bd);
954 		length = bdstatus & BD_LENGTH_MASK;
955 
956 		while (!(bdstatus & R_E) && length) {
957 			if (list_empty(&ep->queue)) {
958 				qe_eprx_nack(ep);
959 				dev_dbg(udc->dev,
960 					"The rxep have noreq %d\n",
961 					ep->has_data);
962 				break;
963 			}
964 
965 			if ((bdstatus & R_F) && (bdstatus & R_L)
966 				&& !(bdstatus & R_ERROR)) {
967 				qe_frame_clean(pframe);
968 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
969 				frame_set_data(pframe, (u8 *)vaddr);
970 				frame_set_length(pframe,
971 						(length - USB_CRC_SIZE));
972 				frame_set_status(pframe, FRAME_OK);
973 				switch (bdstatus & R_PID) {
974 				case R_PID_DATA1:
975 					frame_set_info(pframe, PID_DATA1);
976 					break;
977 				case R_PID_SETUP:
978 					frame_set_info(pframe, PID_SETUP);
979 					break;
980 				default:
981 					frame_set_info(pframe, PID_DATA0);
982 					break;
983 				}
984 				/* handle the rx frame */
985 				qe_ep_rxframe_handle(ep);
986 			} else {
987 				dev_err(udc->dev,
988 					"error in received frame\n");
989 			}
990 			/* note: don't clear the rxbd's buffer address */
991 			/*clear the length */
992 			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
993 			ep->has_data--;
994 			if (!(ep->localnack))
995 				recycle_one_rxbd(ep);
996 
997 			/* Get next BD */
998 			if (bdstatus & R_W)
999 				bd = ep->rxbase;
1000 			else
1001 				bd++;
1002 
1003 			bdstatus = in_be32((u32 __iomem *)bd);
1004 			length = bdstatus & BD_LENGTH_MASK;
1005 		}
1006 
1007 		ep->n_rxbd = bd;
1008 
1009 		if (ep->localnack)
1010 			ep_recycle_rxbds(ep);
1011 
1012 		ep->enable_tasklet = 0;
1013 	} /* for i=1 */
1014 
1015 	spin_unlock_irqrestore(&udc->lock, flags);
1016 }
1017 
1018 static int qe_ep_rx(struct qe_ep *ep)
1019 {
1020 	struct qe_udc *udc;
1021 	struct qe_frame *pframe;
1022 	struct qe_bd __iomem *bd;
1023 	u16 swoffs, ucoffs, emptybds;
1024 
1025 	udc = ep->udc;
1026 	pframe = ep->rxframe;
1027 
1028 	if (ep->dir == USB_DIR_IN) {
1029 		dev_err(udc->dev, "transmit ep in rx function\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	bd = ep->n_rxbd;
1034 
1035 	swoffs = (u16)(bd - ep->rxbase);
1036 	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1037 			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1038 	if (swoffs < ucoffs)
1039 		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1040 	else
1041 		emptybds = swoffs - ucoffs;
1042 
1043 	if (emptybds < MIN_EMPTY_BDS) {
1044 		qe_eprx_nack(ep);
1045 		ep->localnack = 1;
1046 		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1047 	}
1048 	ep->has_data = USB_BDRING_LEN_RX - emptybds;
1049 
1050 	if (list_empty(&ep->queue)) {
1051 		qe_eprx_nack(ep);
1052 		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1053 				ep->has_data);
1054 		return 0;
1055 	}
1056 
1057 	tasklet_schedule(&udc->rx_tasklet);
1058 	ep->enable_tasklet = 1;
1059 
1060 	return 0;
1061 }
1062 
1063 /* send data from a frame, no matter what tx_req */
1064 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1065 {
1066 	struct qe_udc *udc = ep->udc;
1067 	struct qe_bd __iomem *bd;
1068 	u16 saveusbmr;
1069 	u32 bdstatus, pidmask;
1070 	u32 paddr;
1071 
1072 	if (ep->dir == USB_DIR_OUT) {
1073 		dev_err(udc->dev, "receive ep passed to tx function\n");
1074 		return -EINVAL;
1075 	}
1076 
1077 	/* Disable the Tx interrupt */
1078 	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1079 	out_be16(&udc->usb_regs->usb_usbmr,
1080 			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1081 
1082 	bd = ep->n_txbd;
1083 	bdstatus = in_be32((u32 __iomem *)bd);
1084 
1085 	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1086 		if (frame_get_length(frame) == 0) {
1087 			frame_set_data(frame, udc->nullbuf);
1088 			frame_set_length(frame, 2);
1089 			frame->info |= (ZLP | NO_CRC);
1090 			dev_vdbg(udc->dev, "the frame size = 0\n");
1091 		}
1092 		paddr = virt_to_phys((void *)frame->data);
1093 		out_be32(&bd->buf, paddr);
1094 		bdstatus = (bdstatus&T_W);
1095 		if (!(frame_get_info(frame) & NO_CRC))
1096 			bdstatus |= T_R | T_I | T_L | T_TC
1097 					| frame_get_length(frame);
1098 		else
1099 			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1100 
1101 		/* if the packet is a ZLP in status phase */
1102 		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1103 			ep->data01 = 0x1;
1104 
1105 		if (ep->data01) {
1106 			pidmask = T_PID_DATA1;
1107 			frame->info |= PID_DATA1;
1108 		} else {
1109 			pidmask = T_PID_DATA0;
1110 			frame->info |= PID_DATA0;
1111 		}
1112 		bdstatus |= T_CNF;
1113 		bdstatus |= pidmask;
1114 		out_be32((u32 __iomem *)bd, bdstatus);
1115 		qe_ep_filltxfifo(ep);
1116 
1117 		/* enable the TX interrupt */
1118 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1119 
1120 		qe_ep_toggledata01(ep);
1121 		if (bdstatus & T_W)
1122 			ep->n_txbd = ep->txbase;
1123 		else
1124 			ep->n_txbd++;
1125 
1126 		return 0;
1127 	} else {
1128 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1129 		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1130 		return -EBUSY;
1131 	}
1132 }
1133 
1134 /* when a bd was transmitted, the function can
1135  * handle the tx_req, not include ep0           */
1136 static int txcomplete(struct qe_ep *ep, unsigned char restart)
1137 {
1138 	if (ep->tx_req != NULL) {
1139 		struct qe_req *req = ep->tx_req;
1140 		unsigned zlp = 0, last_len = 0;
1141 
1142 		last_len = min_t(unsigned, req->req.length - ep->sent,
1143 				ep->ep.maxpacket);
1144 
1145 		if (!restart) {
1146 			int asent = ep->last;
1147 			ep->sent += asent;
1148 			ep->last -= asent;
1149 		} else {
1150 			ep->last = 0;
1151 		}
1152 
1153 		/* zlp needed when req->re.zero is set */
1154 		if (req->req.zero) {
1155 			if (last_len == 0 ||
1156 				(req->req.length % ep->ep.maxpacket) != 0)
1157 				zlp = 0;
1158 			else
1159 				zlp = 1;
1160 		} else
1161 			zlp = 0;
1162 
1163 		/* a request already were transmitted completely */
1164 		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1165 			done(ep, ep->tx_req, 0);
1166 			ep->tx_req = NULL;
1167 			ep->last = 0;
1168 			ep->sent = 0;
1169 		}
1170 	}
1171 
1172 	/* we should gain a new tx_req fot this endpoint */
1173 	if (ep->tx_req == NULL) {
1174 		if (!list_empty(&ep->queue)) {
1175 			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
1176 							queue);
1177 			ep->last = 0;
1178 			ep->sent = 0;
1179 		}
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 /* give a frame and a tx_req, send some data */
1186 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1187 {
1188 	unsigned int size;
1189 	u8 *buf;
1190 
1191 	qe_frame_clean(frame);
1192 	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1193 				ep->ep.maxpacket);
1194 	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1195 	if (buf && size) {
1196 		ep->last = size;
1197 		ep->tx_req->req.actual += size;
1198 		frame_set_data(frame, buf);
1199 		frame_set_length(frame, size);
1200 		frame_set_status(frame, FRAME_OK);
1201 		frame_set_info(frame, 0);
1202 		return qe_ep_tx(ep, frame);
1203 	}
1204 	return -EIO;
1205 }
1206 
1207 /* give a frame struct,send a ZLP */
1208 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1209 {
1210 	struct qe_udc *udc = ep->udc;
1211 
1212 	if (frame == NULL)
1213 		return -ENODEV;
1214 
1215 	qe_frame_clean(frame);
1216 	frame_set_data(frame, (u8 *)udc->nullbuf);
1217 	frame_set_length(frame, 2);
1218 	frame_set_status(frame, FRAME_OK);
1219 	frame_set_info(frame, (ZLP | NO_CRC | infor));
1220 
1221 	return qe_ep_tx(ep, frame);
1222 }
1223 
1224 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1225 {
1226 	struct qe_req *req = ep->tx_req;
1227 	int reval;
1228 
1229 	if (req == NULL)
1230 		return -ENODEV;
1231 
1232 	if ((req->req.length - ep->sent) > 0)
1233 		reval = qe_usb_senddata(ep, frame);
1234 	else
1235 		reval = sendnulldata(ep, frame, 0);
1236 
1237 	return reval;
1238 }
1239 
1240 /* if direction is DIR_IN, the status is Device->Host
1241  * if direction is DIR_OUT, the status transaction is Device<-Host
1242  * in status phase, udc create a request and gain status */
1243 static int ep0_prime_status(struct qe_udc *udc, int direction)
1244 {
1245 
1246 	struct qe_ep *ep = &udc->eps[0];
1247 
1248 	if (direction == USB_DIR_IN) {
1249 		udc->ep0_state = DATA_STATE_NEED_ZLP;
1250 		udc->ep0_dir = USB_DIR_IN;
1251 		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1252 	} else {
1253 		udc->ep0_dir = USB_DIR_OUT;
1254 		udc->ep0_state = WAIT_FOR_OUT_STATUS;
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 /* a request complete in ep0, whether gadget request or udc request */
1261 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1262 {
1263 	struct qe_ep *ep = &udc->eps[0];
1264 	/* because usb and ep's status already been set in ch9setaddress() */
1265 
1266 	switch (udc->ep0_state) {
1267 	case DATA_STATE_XMIT:
1268 		done(ep, req, 0);
1269 		/* receive status phase */
1270 		if (ep0_prime_status(udc, USB_DIR_OUT))
1271 			qe_ep0_stall(udc);
1272 		break;
1273 
1274 	case DATA_STATE_NEED_ZLP:
1275 		done(ep, req, 0);
1276 		udc->ep0_state = WAIT_FOR_SETUP;
1277 		break;
1278 
1279 	case DATA_STATE_RECV:
1280 		done(ep, req, 0);
1281 		/* send status phase */
1282 		if (ep0_prime_status(udc, USB_DIR_IN))
1283 			qe_ep0_stall(udc);
1284 		break;
1285 
1286 	case WAIT_FOR_OUT_STATUS:
1287 		done(ep, req, 0);
1288 		udc->ep0_state = WAIT_FOR_SETUP;
1289 		break;
1290 
1291 	case WAIT_FOR_SETUP:
1292 		dev_vdbg(udc->dev, "Unexpected interrupt\n");
1293 		break;
1294 
1295 	default:
1296 		qe_ep0_stall(udc);
1297 		break;
1298 	}
1299 }
1300 
1301 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1302 {
1303 	struct qe_req *tx_req = NULL;
1304 	struct qe_frame *frame = ep->txframe;
1305 
1306 	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1307 		if (!restart)
1308 			ep->udc->ep0_state = WAIT_FOR_SETUP;
1309 		else
1310 			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1311 		return 0;
1312 	}
1313 
1314 	tx_req = ep->tx_req;
1315 	if (tx_req != NULL) {
1316 		if (!restart) {
1317 			int asent = ep->last;
1318 			ep->sent += asent;
1319 			ep->last -= asent;
1320 		} else {
1321 			ep->last = 0;
1322 		}
1323 
1324 		/* a request already were transmitted completely */
1325 		if ((ep->tx_req->req.length - ep->sent) <= 0) {
1326 			ep->tx_req->req.actual = (unsigned int)ep->sent;
1327 			ep0_req_complete(ep->udc, ep->tx_req);
1328 			ep->tx_req = NULL;
1329 			ep->last = 0;
1330 			ep->sent = 0;
1331 		}
1332 	} else {
1333 		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int ep0_txframe_handle(struct qe_ep *ep)
1340 {
1341 	/* if have error, transmit again */
1342 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1343 		qe_ep_flushtxfifo(ep);
1344 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1345 		if (frame_get_info(ep->txframe) & PID_DATA0)
1346 			ep->data01 = 0;
1347 		else
1348 			ep->data01 = 1;
1349 
1350 		ep0_txcomplete(ep, 1);
1351 	} else
1352 		ep0_txcomplete(ep, 0);
1353 
1354 	frame_create_tx(ep, ep->txframe);
1355 	return 0;
1356 }
1357 
1358 static int qe_ep0_txconf(struct qe_ep *ep)
1359 {
1360 	struct qe_bd __iomem *bd;
1361 	struct qe_frame *pframe;
1362 	u32 bdstatus;
1363 
1364 	bd = ep->c_txbd;
1365 	bdstatus = in_be32((u32 __iomem *)bd);
1366 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1367 		pframe = ep->txframe;
1368 
1369 		/* clear and recycle the BD */
1370 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1371 		out_be32(&bd->buf, 0);
1372 		if (bdstatus & T_W)
1373 			ep->c_txbd = ep->txbase;
1374 		else
1375 			ep->c_txbd++;
1376 
1377 		if (ep->c_txbd == ep->n_txbd) {
1378 			if (bdstatus & DEVICE_T_ERROR) {
1379 				frame_set_status(pframe, FRAME_ERROR);
1380 				if (bdstatus & T_TO)
1381 					pframe->status |= TX_ER_TIMEOUT;
1382 				if (bdstatus & T_UN)
1383 					pframe->status |= TX_ER_UNDERUN;
1384 			}
1385 			ep0_txframe_handle(ep);
1386 		}
1387 
1388 		bd = ep->c_txbd;
1389 		bdstatus = in_be32((u32 __iomem *)bd);
1390 	}
1391 
1392 	return 0;
1393 }
1394 
1395 static int ep_txframe_handle(struct qe_ep *ep)
1396 {
1397 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1398 		qe_ep_flushtxfifo(ep);
1399 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1400 		if (frame_get_info(ep->txframe) & PID_DATA0)
1401 			ep->data01 = 0;
1402 		else
1403 			ep->data01 = 1;
1404 
1405 		txcomplete(ep, 1);
1406 	} else
1407 		txcomplete(ep, 0);
1408 
1409 	frame_create_tx(ep, ep->txframe); /* send the data */
1410 	return 0;
1411 }
1412 
1413 /* confirm the already trainsmited bd */
1414 static int qe_ep_txconf(struct qe_ep *ep)
1415 {
1416 	struct qe_bd __iomem *bd;
1417 	struct qe_frame *pframe = NULL;
1418 	u32 bdstatus;
1419 	unsigned char breakonrxinterrupt = 0;
1420 
1421 	bd = ep->c_txbd;
1422 	bdstatus = in_be32((u32 __iomem *)bd);
1423 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1424 		pframe = ep->txframe;
1425 		if (bdstatus & DEVICE_T_ERROR) {
1426 			frame_set_status(pframe, FRAME_ERROR);
1427 			if (bdstatus & T_TO)
1428 				pframe->status |= TX_ER_TIMEOUT;
1429 			if (bdstatus & T_UN)
1430 				pframe->status |= TX_ER_UNDERUN;
1431 		}
1432 
1433 		/* clear and recycle the BD */
1434 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1435 		out_be32(&bd->buf, 0);
1436 		if (bdstatus & T_W)
1437 			ep->c_txbd = ep->txbase;
1438 		else
1439 			ep->c_txbd++;
1440 
1441 		/* handle the tx frame */
1442 		ep_txframe_handle(ep);
1443 		bd = ep->c_txbd;
1444 		bdstatus = in_be32((u32 __iomem *)bd);
1445 	}
1446 	if (breakonrxinterrupt)
1447 		return -EIO;
1448 	else
1449 		return 0;
1450 }
1451 
1452 /* Add a request in queue, and try to transmit a packet */
1453 static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1454 {
1455 	int reval = 0;
1456 
1457 	if (ep->tx_req == NULL) {
1458 		ep->sent = 0;
1459 		ep->last = 0;
1460 		txcomplete(ep, 0); /* can gain a new tx_req */
1461 		reval = frame_create_tx(ep, ep->txframe);
1462 	}
1463 	return reval;
1464 }
1465 
1466 /* Maybe this is a good ideal */
1467 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1468 {
1469 	struct qe_udc *udc = ep->udc;
1470 	struct qe_frame *pframe = NULL;
1471 	struct qe_bd __iomem *bd;
1472 	u32 bdstatus, length;
1473 	u32 vaddr, fsize;
1474 	u8 *cp;
1475 	u8 finish_req = 0;
1476 	u8 framepid;
1477 
1478 	if (list_empty(&ep->queue)) {
1479 		dev_vdbg(udc->dev, "the req already finish!\n");
1480 		return 0;
1481 	}
1482 	pframe = ep->rxframe;
1483 
1484 	bd = ep->n_rxbd;
1485 	bdstatus = in_be32((u32 __iomem *)bd);
1486 	length = bdstatus & BD_LENGTH_MASK;
1487 
1488 	while (!(bdstatus & R_E) && length) {
1489 		if (finish_req)
1490 			break;
1491 		if ((bdstatus & R_F) && (bdstatus & R_L)
1492 					&& !(bdstatus & R_ERROR)) {
1493 			qe_frame_clean(pframe);
1494 			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1495 			frame_set_data(pframe, (u8 *)vaddr);
1496 			frame_set_length(pframe, (length - USB_CRC_SIZE));
1497 			frame_set_status(pframe, FRAME_OK);
1498 			switch (bdstatus & R_PID) {
1499 			case R_PID_DATA1:
1500 				frame_set_info(pframe, PID_DATA1); break;
1501 			default:
1502 				frame_set_info(pframe, PID_DATA0); break;
1503 			}
1504 			/* handle the rx frame */
1505 
1506 			if (frame_get_info(pframe) & PID_DATA1)
1507 				framepid = 0x1;
1508 			else
1509 				framepid = 0;
1510 
1511 			if (framepid != ep->data01) {
1512 				dev_vdbg(udc->dev, "the data01 error!\n");
1513 			} else {
1514 				fsize = frame_get_length(pframe);
1515 
1516 				cp = (u8 *)(req->req.buf) + req->req.actual;
1517 				if (cp) {
1518 					memcpy(cp, pframe->data, fsize);
1519 					req->req.actual += fsize;
1520 					if ((fsize < ep->ep.maxpacket)
1521 						|| (req->req.actual >=
1522 							req->req.length)) {
1523 						finish_req = 1;
1524 						done(ep, req, 0);
1525 						if (list_empty(&ep->queue))
1526 							qe_eprx_nack(ep);
1527 					}
1528 				}
1529 				qe_ep_toggledata01(ep);
1530 			}
1531 		} else {
1532 			dev_err(udc->dev, "The receive frame with error!\n");
1533 		}
1534 
1535 		/* note: don't clear the rxbd's buffer address *
1536 		 * only Clear the length */
1537 		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1538 		ep->has_data--;
1539 
1540 		/* Get next BD */
1541 		if (bdstatus & R_W)
1542 			bd = ep->rxbase;
1543 		else
1544 			bd++;
1545 
1546 		bdstatus = in_be32((u32 __iomem *)bd);
1547 		length = bdstatus & BD_LENGTH_MASK;
1548 	}
1549 
1550 	ep->n_rxbd = bd;
1551 	ep_recycle_rxbds(ep);
1552 
1553 	return 0;
1554 }
1555 
1556 /* only add the request in queue */
1557 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1558 {
1559 	if (ep->state == EP_STATE_NACK) {
1560 		if (ep->has_data <= 0) {
1561 			/* Enable rx and unmask rx interrupt */
1562 			qe_eprx_normal(ep);
1563 		} else {
1564 			/* Copy the exist BD data */
1565 			ep_req_rx(ep, req);
1566 		}
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 /********************************************************************
1573 	Internal Used Function End
1574 ********************************************************************/
1575 
1576 /*-----------------------------------------------------------------------
1577 	Endpoint Management Functions For Gadget
1578  -----------------------------------------------------------------------*/
1579 static int qe_ep_enable(struct usb_ep *_ep,
1580 			 const struct usb_endpoint_descriptor *desc)
1581 {
1582 	struct qe_udc *udc;
1583 	struct qe_ep *ep;
1584 	int retval = 0;
1585 	unsigned char epnum;
1586 
1587 	ep = container_of(_ep, struct qe_ep, ep);
1588 
1589 	/* catch various bogus parameters */
1590 	if (!_ep || !desc || _ep->name == ep_name[0] ||
1591 			(desc->bDescriptorType != USB_DT_ENDPOINT))
1592 		return -EINVAL;
1593 
1594 	udc = ep->udc;
1595 	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1596 		return -ESHUTDOWN;
1597 
1598 	epnum = (u8)desc->bEndpointAddress & 0xF;
1599 
1600 	retval = qe_ep_init(udc, epnum, desc);
1601 	if (retval != 0) {
1602 		cpm_muram_free(cpm_muram_offset(ep->rxbase));
1603 		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1604 		return -EINVAL;
1605 	}
1606 	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1607 	return 0;
1608 }
1609 
1610 static int qe_ep_disable(struct usb_ep *_ep)
1611 {
1612 	struct qe_udc *udc;
1613 	struct qe_ep *ep;
1614 	unsigned long flags;
1615 	unsigned int size;
1616 
1617 	ep = container_of(_ep, struct qe_ep, ep);
1618 	udc = ep->udc;
1619 
1620 	if (!_ep || !ep->ep.desc) {
1621 		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1622 		return -EINVAL;
1623 	}
1624 
1625 	spin_lock_irqsave(&udc->lock, flags);
1626 	/* Nuke all pending requests (does flush) */
1627 	nuke(ep, -ESHUTDOWN);
1628 	ep->ep.desc = NULL;
1629 	ep->stopped = 1;
1630 	ep->tx_req = NULL;
1631 	qe_ep_reset(udc, ep->epnum);
1632 	spin_unlock_irqrestore(&udc->lock, flags);
1633 
1634 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
1635 
1636 	if (ep->dir == USB_DIR_OUT)
1637 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1638 				(USB_BDRING_LEN_RX + 1);
1639 	else
1640 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1641 				(USB_BDRING_LEN + 1);
1642 
1643 	if (ep->dir != USB_DIR_IN) {
1644 		kfree(ep->rxframe);
1645 		if (ep->rxbufmap) {
1646 			dma_unmap_single(udc->gadget.dev.parent,
1647 					ep->rxbuf_d, size,
1648 					DMA_FROM_DEVICE);
1649 			ep->rxbuf_d = DMA_ADDR_INVALID;
1650 		} else {
1651 			dma_sync_single_for_cpu(
1652 					udc->gadget.dev.parent,
1653 					ep->rxbuf_d, size,
1654 					DMA_FROM_DEVICE);
1655 		}
1656 		kfree(ep->rxbuffer);
1657 	}
1658 
1659 	if (ep->dir != USB_DIR_OUT)
1660 		kfree(ep->txframe);
1661 
1662 	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1663 	return 0;
1664 }
1665 
1666 static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
1667 {
1668 	struct qe_req *req;
1669 
1670 	req = kzalloc(sizeof(*req), gfp_flags);
1671 	if (!req)
1672 		return NULL;
1673 
1674 	req->req.dma = DMA_ADDR_INVALID;
1675 
1676 	INIT_LIST_HEAD(&req->queue);
1677 
1678 	return &req->req;
1679 }
1680 
1681 static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1682 {
1683 	struct qe_req *req;
1684 
1685 	req = container_of(_req, struct qe_req, req);
1686 
1687 	if (_req)
1688 		kfree(req);
1689 }
1690 
1691 static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1692 {
1693 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1694 	struct qe_req *req = container_of(_req, struct qe_req, req);
1695 	struct qe_udc *udc;
1696 	int reval;
1697 
1698 	udc = ep->udc;
1699 	/* catch various bogus parameters */
1700 	if (!_req || !req->req.complete || !req->req.buf
1701 			|| !list_empty(&req->queue)) {
1702 		dev_dbg(udc->dev, "bad params\n");
1703 		return -EINVAL;
1704 	}
1705 	if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1706 		dev_dbg(udc->dev, "bad ep\n");
1707 		return -EINVAL;
1708 	}
1709 
1710 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1711 		return -ESHUTDOWN;
1712 
1713 	req->ep = ep;
1714 
1715 	/* map virtual address to hardware */
1716 	if (req->req.dma == DMA_ADDR_INVALID) {
1717 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1718 					req->req.buf,
1719 					req->req.length,
1720 					ep_is_in(ep)
1721 					? DMA_TO_DEVICE :
1722 					DMA_FROM_DEVICE);
1723 		req->mapped = 1;
1724 	} else {
1725 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1726 					req->req.dma, req->req.length,
1727 					ep_is_in(ep)
1728 					? DMA_TO_DEVICE :
1729 					DMA_FROM_DEVICE);
1730 		req->mapped = 0;
1731 	}
1732 
1733 	req->req.status = -EINPROGRESS;
1734 	req->req.actual = 0;
1735 
1736 	list_add_tail(&req->queue, &ep->queue);
1737 	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1738 			ep->name, req->req.length);
1739 
1740 	/* push the request to device */
1741 	if (ep_is_in(ep))
1742 		reval = ep_req_send(ep, req);
1743 
1744 	/* EP0 */
1745 	if (ep_index(ep) == 0 && req->req.length > 0) {
1746 		if (ep_is_in(ep))
1747 			udc->ep0_state = DATA_STATE_XMIT;
1748 		else
1749 			udc->ep0_state = DATA_STATE_RECV;
1750 	}
1751 
1752 	if (ep->dir == USB_DIR_OUT)
1753 		reval = ep_req_receive(ep, req);
1754 
1755 	return 0;
1756 }
1757 
1758 /* queues (submits) an I/O request to an endpoint */
1759 static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1760 		       gfp_t gfp_flags)
1761 {
1762 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1763 	struct qe_udc *udc = ep->udc;
1764 	unsigned long flags;
1765 	int ret;
1766 
1767 	spin_lock_irqsave(&udc->lock, flags);
1768 	ret = __qe_ep_queue(_ep, _req);
1769 	spin_unlock_irqrestore(&udc->lock, flags);
1770 	return ret;
1771 }
1772 
1773 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
1774 static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1775 {
1776 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1777 	struct qe_req *req;
1778 	unsigned long flags;
1779 
1780 	if (!_ep || !_req)
1781 		return -EINVAL;
1782 
1783 	spin_lock_irqsave(&ep->udc->lock, flags);
1784 
1785 	/* make sure it's actually queued on this endpoint */
1786 	list_for_each_entry(req, &ep->queue, queue) {
1787 		if (&req->req == _req)
1788 			break;
1789 	}
1790 
1791 	if (&req->req != _req) {
1792 		spin_unlock_irqrestore(&ep->udc->lock, flags);
1793 		return -EINVAL;
1794 	}
1795 
1796 	done(ep, req, -ECONNRESET);
1797 
1798 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1799 	return 0;
1800 }
1801 
1802 /*-----------------------------------------------------------------
1803  * modify the endpoint halt feature
1804  * @ep: the non-isochronous endpoint being stalled
1805  * @value: 1--set halt  0--clear halt
1806  * Returns zero, or a negative error code.
1807 *----------------------------------------------------------------*/
1808 static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1809 {
1810 	struct qe_ep *ep;
1811 	unsigned long flags;
1812 	int status = -EOPNOTSUPP;
1813 	struct qe_udc *udc;
1814 
1815 	ep = container_of(_ep, struct qe_ep, ep);
1816 	if (!_ep || !ep->ep.desc) {
1817 		status = -EINVAL;
1818 		goto out;
1819 	}
1820 
1821 	udc = ep->udc;
1822 	/* Attempt to halt IN ep will fail if any transfer requests
1823 	 * are still queue */
1824 	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1825 		status = -EAGAIN;
1826 		goto out;
1827 	}
1828 
1829 	status = 0;
1830 	spin_lock_irqsave(&ep->udc->lock, flags);
1831 	qe_eptx_stall_change(ep, value);
1832 	qe_eprx_stall_change(ep, value);
1833 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1834 
1835 	if (ep->epnum == 0) {
1836 		udc->ep0_state = WAIT_FOR_SETUP;
1837 		udc->ep0_dir = 0;
1838 	}
1839 
1840 	/* set data toggle to DATA0 on clear halt */
1841 	if (value == 0)
1842 		ep->data01 = 0;
1843 out:
1844 	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1845 			value ?  "set" : "clear", status);
1846 
1847 	return status;
1848 }
1849 
1850 static const struct usb_ep_ops qe_ep_ops = {
1851 	.enable = qe_ep_enable,
1852 	.disable = qe_ep_disable,
1853 
1854 	.alloc_request = qe_alloc_request,
1855 	.free_request = qe_free_request,
1856 
1857 	.queue = qe_ep_queue,
1858 	.dequeue = qe_ep_dequeue,
1859 
1860 	.set_halt = qe_ep_set_halt,
1861 };
1862 
1863 /*------------------------------------------------------------------------
1864 	Gadget Driver Layer Operations
1865  ------------------------------------------------------------------------*/
1866 
1867 /* Get the current frame number */
1868 static int qe_get_frame(struct usb_gadget *gadget)
1869 {
1870 	struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1871 	u16 tmp;
1872 
1873 	tmp = in_be16(&udc->usb_param->frame_n);
1874 	if (tmp & 0x8000)
1875 		return tmp & 0x07ff;
1876 	return -EINVAL;
1877 }
1878 
1879 static int fsl_qe_start(struct usb_gadget *gadget,
1880 		struct usb_gadget_driver *driver);
1881 static int fsl_qe_stop(struct usb_gadget *gadget);
1882 
1883 /* defined in usb_gadget.h */
1884 static const struct usb_gadget_ops qe_gadget_ops = {
1885 	.get_frame = qe_get_frame,
1886 	.udc_start = fsl_qe_start,
1887 	.udc_stop = fsl_qe_stop,
1888 };
1889 
1890 /*-------------------------------------------------------------------------
1891 	USB ep0 Setup process in BUS Enumeration
1892  -------------------------------------------------------------------------*/
1893 static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1894 {
1895 	struct qe_ep *ep = &udc->eps[pipe];
1896 
1897 	nuke(ep, -ECONNRESET);
1898 	ep->tx_req = NULL;
1899 	return 0;
1900 }
1901 
1902 static int reset_queues(struct qe_udc *udc)
1903 {
1904 	u8 pipe;
1905 
1906 	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1907 		udc_reset_ep_queue(udc, pipe);
1908 
1909 	/* report disconnect; the driver is already quiesced */
1910 	spin_unlock(&udc->lock);
1911 	usb_gadget_udc_reset(&udc->gadget, udc->driver);
1912 	spin_lock(&udc->lock);
1913 
1914 	return 0;
1915 }
1916 
1917 static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1918 			u16 length)
1919 {
1920 	/* Save the new address to device struct */
1921 	udc->device_address = (u8) value;
1922 	/* Update usb state */
1923 	udc->usb_state = USB_STATE_ADDRESS;
1924 
1925 	/* Status phase , send a ZLP */
1926 	if (ep0_prime_status(udc, USB_DIR_IN))
1927 		qe_ep0_stall(udc);
1928 }
1929 
1930 static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1931 {
1932 	struct qe_req *req = container_of(_req, struct qe_req, req);
1933 
1934 	req->req.buf = NULL;
1935 	kfree(req);
1936 }
1937 
1938 static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1939 			u16 index, u16 length)
1940 {
1941 	u16 usb_status = 0;
1942 	struct qe_req *req;
1943 	struct qe_ep *ep;
1944 	int status = 0;
1945 
1946 	ep = &udc->eps[0];
1947 	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1948 		/* Get device status */
1949 		usb_status = 1 << USB_DEVICE_SELF_POWERED;
1950 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1951 		/* Get interface status */
1952 		/* We don't have interface information in udc driver */
1953 		usb_status = 0;
1954 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1955 		/* Get endpoint status */
1956 		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1957 		struct qe_ep *target_ep = &udc->eps[pipe];
1958 		u16 usep;
1959 
1960 		/* stall if endpoint doesn't exist */
1961 		if (!target_ep->ep.desc)
1962 			goto stall;
1963 
1964 		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1965 		if (index & USB_DIR_IN) {
1966 			if (target_ep->dir != USB_DIR_IN)
1967 				goto stall;
1968 			if ((usep & USB_THS_MASK) == USB_THS_STALL)
1969 				usb_status = 1 << USB_ENDPOINT_HALT;
1970 		} else {
1971 			if (target_ep->dir != USB_DIR_OUT)
1972 				goto stall;
1973 			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1974 				usb_status = 1 << USB_ENDPOINT_HALT;
1975 		}
1976 	}
1977 
1978 	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1979 					struct qe_req, req);
1980 	req->req.length = 2;
1981 	req->req.buf = udc->statusbuf;
1982 	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
1983 	req->req.status = -EINPROGRESS;
1984 	req->req.actual = 0;
1985 	req->req.complete = ownercomplete;
1986 
1987 	udc->ep0_dir = USB_DIR_IN;
1988 
1989 	/* data phase */
1990 	status = __qe_ep_queue(&ep->ep, &req->req);
1991 
1992 	if (status == 0)
1993 		return;
1994 stall:
1995 	dev_err(udc->dev, "Can't respond to getstatus request \n");
1996 	qe_ep0_stall(udc);
1997 }
1998 
1999 /* only handle the setup request, suppose the device in normal status */
2000 static void setup_received_handle(struct qe_udc *udc,
2001 				struct usb_ctrlrequest *setup)
2002 {
2003 	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2004 	u16 wValue = le16_to_cpu(setup->wValue);
2005 	u16 wIndex = le16_to_cpu(setup->wIndex);
2006 	u16 wLength = le16_to_cpu(setup->wLength);
2007 
2008 	/* clear the previous request in the ep0 */
2009 	udc_reset_ep_queue(udc, 0);
2010 
2011 	if (setup->bRequestType & USB_DIR_IN)
2012 		udc->ep0_dir = USB_DIR_IN;
2013 	else
2014 		udc->ep0_dir = USB_DIR_OUT;
2015 
2016 	switch (setup->bRequest) {
2017 	case USB_REQ_GET_STATUS:
2018 		/* Data+Status phase form udc */
2019 		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2020 					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2021 			break;
2022 		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2023 					wLength);
2024 		return;
2025 
2026 	case USB_REQ_SET_ADDRESS:
2027 		/* Status phase from udc */
2028 		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2029 						USB_RECIP_DEVICE))
2030 			break;
2031 		ch9setaddress(udc, wValue, wIndex, wLength);
2032 		return;
2033 
2034 	case USB_REQ_CLEAR_FEATURE:
2035 	case USB_REQ_SET_FEATURE:
2036 		/* Requests with no data phase, status phase from udc */
2037 		if ((setup->bRequestType & USB_TYPE_MASK)
2038 					!= USB_TYPE_STANDARD)
2039 			break;
2040 
2041 		if ((setup->bRequestType & USB_RECIP_MASK)
2042 				== USB_RECIP_ENDPOINT) {
2043 			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2044 			struct qe_ep *ep;
2045 
2046 			if (wValue != 0 || wLength != 0
2047 				|| pipe >= USB_MAX_ENDPOINTS)
2048 				break;
2049 			ep = &udc->eps[pipe];
2050 
2051 			spin_unlock(&udc->lock);
2052 			qe_ep_set_halt(&ep->ep,
2053 					(setup->bRequest == USB_REQ_SET_FEATURE)
2054 						? 1 : 0);
2055 			spin_lock(&udc->lock);
2056 		}
2057 
2058 		ep0_prime_status(udc, USB_DIR_IN);
2059 
2060 		return;
2061 
2062 	default:
2063 		break;
2064 	}
2065 
2066 	if (wLength) {
2067 		/* Data phase from gadget, status phase from udc */
2068 		if (setup->bRequestType & USB_DIR_IN) {
2069 			udc->ep0_state = DATA_STATE_XMIT;
2070 			udc->ep0_dir = USB_DIR_IN;
2071 		} else {
2072 			udc->ep0_state = DATA_STATE_RECV;
2073 			udc->ep0_dir = USB_DIR_OUT;
2074 		}
2075 		spin_unlock(&udc->lock);
2076 		if (udc->driver->setup(&udc->gadget,
2077 					&udc->local_setup_buff) < 0)
2078 			qe_ep0_stall(udc);
2079 		spin_lock(&udc->lock);
2080 	} else {
2081 		/* No data phase, IN status from gadget */
2082 		udc->ep0_dir = USB_DIR_IN;
2083 		spin_unlock(&udc->lock);
2084 		if (udc->driver->setup(&udc->gadget,
2085 					&udc->local_setup_buff) < 0)
2086 			qe_ep0_stall(udc);
2087 		spin_lock(&udc->lock);
2088 		udc->ep0_state = DATA_STATE_NEED_ZLP;
2089 	}
2090 }
2091 
2092 /*-------------------------------------------------------------------------
2093 	USB Interrupt handlers
2094  -------------------------------------------------------------------------*/
2095 static void suspend_irq(struct qe_udc *udc)
2096 {
2097 	udc->resume_state = udc->usb_state;
2098 	udc->usb_state = USB_STATE_SUSPENDED;
2099 
2100 	/* report suspend to the driver ,serial.c not support this*/
2101 	if (udc->driver->suspend)
2102 		udc->driver->suspend(&udc->gadget);
2103 }
2104 
2105 static void resume_irq(struct qe_udc *udc)
2106 {
2107 	udc->usb_state = udc->resume_state;
2108 	udc->resume_state = 0;
2109 
2110 	/* report resume to the driver , serial.c not support this*/
2111 	if (udc->driver->resume)
2112 		udc->driver->resume(&udc->gadget);
2113 }
2114 
2115 static void idle_irq(struct qe_udc *udc)
2116 {
2117 	u8 usbs;
2118 
2119 	usbs = in_8(&udc->usb_regs->usb_usbs);
2120 	if (usbs & USB_IDLE_STATUS_MASK) {
2121 		if ((udc->usb_state) != USB_STATE_SUSPENDED)
2122 			suspend_irq(udc);
2123 	} else {
2124 		if (udc->usb_state == USB_STATE_SUSPENDED)
2125 			resume_irq(udc);
2126 	}
2127 }
2128 
2129 static int reset_irq(struct qe_udc *udc)
2130 {
2131 	unsigned char i;
2132 
2133 	if (udc->usb_state == USB_STATE_DEFAULT)
2134 		return 0;
2135 
2136 	qe_usb_disable(udc);
2137 	out_8(&udc->usb_regs->usb_usadr, 0);
2138 
2139 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2140 		if (udc->eps[i].init)
2141 			qe_ep_reset(udc, i);
2142 	}
2143 
2144 	reset_queues(udc);
2145 	udc->usb_state = USB_STATE_DEFAULT;
2146 	udc->ep0_state = WAIT_FOR_SETUP;
2147 	udc->ep0_dir = USB_DIR_OUT;
2148 	qe_usb_enable(udc);
2149 	return 0;
2150 }
2151 
2152 static int bsy_irq(struct qe_udc *udc)
2153 {
2154 	return 0;
2155 }
2156 
2157 static int txe_irq(struct qe_udc *udc)
2158 {
2159 	return 0;
2160 }
2161 
2162 /* ep0 tx interrupt also in here */
2163 static int tx_irq(struct qe_udc *udc)
2164 {
2165 	struct qe_ep *ep;
2166 	struct qe_bd __iomem *bd;
2167 	int i, res = 0;
2168 
2169 	if ((udc->usb_state == USB_STATE_ADDRESS)
2170 		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
2171 		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2172 
2173 	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2174 		ep = &udc->eps[i];
2175 		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2176 			bd = ep->c_txbd;
2177 			if (!(in_be32((u32 __iomem *)bd) & T_R)
2178 						&& (in_be32(&bd->buf))) {
2179 				/* confirm the transmitted bd */
2180 				if (ep->epnum == 0)
2181 					res = qe_ep0_txconf(ep);
2182 				else
2183 					res = qe_ep_txconf(ep);
2184 			}
2185 		}
2186 	}
2187 	return res;
2188 }
2189 
2190 
2191 /* setup packect's rx is handle in the function too */
2192 static void rx_irq(struct qe_udc *udc)
2193 {
2194 	struct qe_ep *ep;
2195 	struct qe_bd __iomem *bd;
2196 	int i;
2197 
2198 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2199 		ep = &udc->eps[i];
2200 		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2201 			bd = ep->n_rxbd;
2202 			if (!(in_be32((u32 __iomem *)bd) & R_E)
2203 						&& (in_be32(&bd->buf))) {
2204 				if (ep->epnum == 0) {
2205 					qe_ep0_rx(udc);
2206 				} else {
2207 					/*non-setup package receive*/
2208 					qe_ep_rx(ep);
2209 				}
2210 			}
2211 		}
2212 	}
2213 }
2214 
2215 static irqreturn_t qe_udc_irq(int irq, void *_udc)
2216 {
2217 	struct qe_udc *udc = (struct qe_udc *)_udc;
2218 	u16 irq_src;
2219 	irqreturn_t status = IRQ_NONE;
2220 	unsigned long flags;
2221 
2222 	spin_lock_irqsave(&udc->lock, flags);
2223 
2224 	irq_src = in_be16(&udc->usb_regs->usb_usber) &
2225 		in_be16(&udc->usb_regs->usb_usbmr);
2226 	/* Clear notification bits */
2227 	out_be16(&udc->usb_regs->usb_usber, irq_src);
2228 	/* USB Interrupt */
2229 	if (irq_src & USB_E_IDLE_MASK) {
2230 		idle_irq(udc);
2231 		irq_src &= ~USB_E_IDLE_MASK;
2232 		status = IRQ_HANDLED;
2233 	}
2234 
2235 	if (irq_src & USB_E_TXB_MASK) {
2236 		tx_irq(udc);
2237 		irq_src &= ~USB_E_TXB_MASK;
2238 		status = IRQ_HANDLED;
2239 	}
2240 
2241 	if (irq_src & USB_E_RXB_MASK) {
2242 		rx_irq(udc);
2243 		irq_src &= ~USB_E_RXB_MASK;
2244 		status = IRQ_HANDLED;
2245 	}
2246 
2247 	if (irq_src & USB_E_RESET_MASK) {
2248 		reset_irq(udc);
2249 		irq_src &= ~USB_E_RESET_MASK;
2250 		status = IRQ_HANDLED;
2251 	}
2252 
2253 	if (irq_src & USB_E_BSY_MASK) {
2254 		bsy_irq(udc);
2255 		irq_src &= ~USB_E_BSY_MASK;
2256 		status = IRQ_HANDLED;
2257 	}
2258 
2259 	if (irq_src & USB_E_TXE_MASK) {
2260 		txe_irq(udc);
2261 		irq_src &= ~USB_E_TXE_MASK;
2262 		status = IRQ_HANDLED;
2263 	}
2264 
2265 	spin_unlock_irqrestore(&udc->lock, flags);
2266 
2267 	return status;
2268 }
2269 
2270 /*-------------------------------------------------------------------------
2271 	Gadget driver probe and unregister.
2272  --------------------------------------------------------------------------*/
2273 static int fsl_qe_start(struct usb_gadget *gadget,
2274 		struct usb_gadget_driver *driver)
2275 {
2276 	struct qe_udc *udc;
2277 	unsigned long flags;
2278 
2279 	udc = container_of(gadget, struct qe_udc, gadget);
2280 	/* lock is needed but whether should use this lock or another */
2281 	spin_lock_irqsave(&udc->lock, flags);
2282 
2283 	driver->driver.bus = NULL;
2284 	/* hook up the driver */
2285 	udc->driver = driver;
2286 	udc->gadget.speed = driver->max_speed;
2287 
2288 	/* Enable IRQ reg and Set usbcmd reg EN bit */
2289 	qe_usb_enable(udc);
2290 
2291 	out_be16(&udc->usb_regs->usb_usber, 0xffff);
2292 	out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2293 	udc->usb_state = USB_STATE_ATTACHED;
2294 	udc->ep0_state = WAIT_FOR_SETUP;
2295 	udc->ep0_dir = USB_DIR_OUT;
2296 	spin_unlock_irqrestore(&udc->lock, flags);
2297 
2298 	return 0;
2299 }
2300 
2301 static int fsl_qe_stop(struct usb_gadget *gadget)
2302 {
2303 	struct qe_udc *udc;
2304 	struct qe_ep *loop_ep;
2305 	unsigned long flags;
2306 
2307 	udc = container_of(gadget, struct qe_udc, gadget);
2308 	/* stop usb controller, disable intr */
2309 	qe_usb_disable(udc);
2310 
2311 	/* in fact, no needed */
2312 	udc->usb_state = USB_STATE_ATTACHED;
2313 	udc->ep0_state = WAIT_FOR_SETUP;
2314 	udc->ep0_dir = 0;
2315 
2316 	/* stand operation */
2317 	spin_lock_irqsave(&udc->lock, flags);
2318 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2319 	nuke(&udc->eps[0], -ESHUTDOWN);
2320 	list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2321 		nuke(loop_ep, -ESHUTDOWN);
2322 	spin_unlock_irqrestore(&udc->lock, flags);
2323 
2324 	udc->driver = NULL;
2325 
2326 	return 0;
2327 }
2328 
2329 /* udc structure's alloc and setup, include ep-param alloc */
2330 static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2331 {
2332 	struct qe_udc *udc;
2333 	struct device_node *np = ofdev->dev.of_node;
2334 	unsigned long tmp_addr = 0;
2335 	struct usb_device_para __iomem *usbpram;
2336 	unsigned int i;
2337 	u64 size;
2338 	u32 offset;
2339 
2340 	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2341 	if (!udc)
2342 		goto cleanup;
2343 
2344 	udc->dev = &ofdev->dev;
2345 
2346 	/* get default address of usb parameter in MURAM from device tree */
2347 	offset = *of_get_address(np, 1, &size, NULL);
2348 	udc->usb_param = cpm_muram_addr(offset);
2349 	memset_io(udc->usb_param, 0, size);
2350 
2351 	usbpram = udc->usb_param;
2352 	out_be16(&usbpram->frame_n, 0);
2353 	out_be32(&usbpram->rstate, 0);
2354 
2355 	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2356 					sizeof(struct usb_ep_para)),
2357 					   USB_EP_PARA_ALIGNMENT);
2358 	if (IS_ERR_VALUE(tmp_addr))
2359 		goto cleanup;
2360 
2361 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2362 		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2363 		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2364 		tmp_addr += 32;
2365 	}
2366 
2367 	memset_io(udc->ep_param[0], 0,
2368 			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2369 
2370 	udc->resume_state = USB_STATE_NOTATTACHED;
2371 	udc->usb_state = USB_STATE_POWERED;
2372 	udc->ep0_dir = 0;
2373 
2374 	spin_lock_init(&udc->lock);
2375 	return udc;
2376 
2377 cleanup:
2378 	kfree(udc);
2379 	return NULL;
2380 }
2381 
2382 /* USB Controller register init */
2383 static int qe_udc_reg_init(struct qe_udc *udc)
2384 {
2385 	struct usb_ctlr __iomem *qe_usbregs;
2386 	qe_usbregs = udc->usb_regs;
2387 
2388 	/* Spec says that we must enable the USB controller to change mode. */
2389 	out_8(&qe_usbregs->usb_usmod, 0x01);
2390 	/* Mode changed, now disable it, since muram isn't initialized yet. */
2391 	out_8(&qe_usbregs->usb_usmod, 0x00);
2392 
2393 	/* Initialize the rest. */
2394 	out_be16(&qe_usbregs->usb_usbmr, 0);
2395 	out_8(&qe_usbregs->usb_uscom, 0);
2396 	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2397 
2398 	return 0;
2399 }
2400 
2401 static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2402 {
2403 	struct qe_ep *ep = &udc->eps[pipe_num];
2404 
2405 	ep->udc = udc;
2406 	strcpy(ep->name, ep_name[pipe_num]);
2407 	ep->ep.name = ep_name[pipe_num];
2408 
2409 	if (pipe_num == 0) {
2410 		ep->ep.caps.type_control = true;
2411 	} else {
2412 		ep->ep.caps.type_iso = true;
2413 		ep->ep.caps.type_bulk = true;
2414 		ep->ep.caps.type_int = true;
2415 	}
2416 
2417 	ep->ep.caps.dir_in = true;
2418 	ep->ep.caps.dir_out = true;
2419 
2420 	ep->ep.ops = &qe_ep_ops;
2421 	ep->stopped = 1;
2422 	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2423 	ep->ep.desc = NULL;
2424 	ep->dir = 0xff;
2425 	ep->epnum = (u8)pipe_num;
2426 	ep->sent = 0;
2427 	ep->last = 0;
2428 	ep->init = 0;
2429 	ep->rxframe = NULL;
2430 	ep->txframe = NULL;
2431 	ep->tx_req = NULL;
2432 	ep->state = EP_STATE_IDLE;
2433 	ep->has_data = 0;
2434 
2435 	/* the queue lists any req for this ep */
2436 	INIT_LIST_HEAD(&ep->queue);
2437 
2438 	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
2439 	if (pipe_num != 0)
2440 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2441 
2442 	ep->gadget = &udc->gadget;
2443 
2444 	return 0;
2445 }
2446 
2447 /*-----------------------------------------------------------------------
2448  *	UDC device Driver operation functions				*
2449  *----------------------------------------------------------------------*/
2450 static void qe_udc_release(struct device *dev)
2451 {
2452 	struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2453 	int i;
2454 
2455 	complete(udc->done);
2456 	cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2457 	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2458 		udc->ep_param[i] = NULL;
2459 
2460 	kfree(udc);
2461 }
2462 
2463 /* Driver probe functions */
2464 static const struct of_device_id qe_udc_match[];
2465 static int qe_udc_probe(struct platform_device *ofdev)
2466 {
2467 	struct qe_udc *udc;
2468 	const struct of_device_id *match;
2469 	struct device_node *np = ofdev->dev.of_node;
2470 	struct qe_ep *ep;
2471 	unsigned int ret = 0;
2472 	unsigned int i;
2473 	const void *prop;
2474 
2475 	match = of_match_device(qe_udc_match, &ofdev->dev);
2476 	if (!match)
2477 		return -EINVAL;
2478 
2479 	prop = of_get_property(np, "mode", NULL);
2480 	if (!prop || strcmp(prop, "peripheral"))
2481 		return -ENODEV;
2482 
2483 	/* Initialize the udc structure including QH member and other member */
2484 	udc = qe_udc_config(ofdev);
2485 	if (!udc) {
2486 		dev_err(&ofdev->dev, "failed to initialize\n");
2487 		return -ENOMEM;
2488 	}
2489 
2490 	udc->soc_type = (unsigned long)match->data;
2491 	udc->usb_regs = of_iomap(np, 0);
2492 	if (!udc->usb_regs) {
2493 		ret = -ENOMEM;
2494 		goto err1;
2495 	}
2496 
2497 	/* initialize usb hw reg except for regs for EP,
2498 	 * leave usbintr reg untouched*/
2499 	qe_udc_reg_init(udc);
2500 
2501 	/* here comes the stand operations for probe
2502 	 * set the qe_udc->gadget.xxx */
2503 	udc->gadget.ops = &qe_gadget_ops;
2504 
2505 	/* gadget.ep0 is a pointer */
2506 	udc->gadget.ep0 = &udc->eps[0].ep;
2507 
2508 	INIT_LIST_HEAD(&udc->gadget.ep_list);
2509 
2510 	/* modify in register gadget process */
2511 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2512 
2513 	/* name: Identifies the controller hardware type. */
2514 	udc->gadget.name = driver_name;
2515 	udc->gadget.dev.parent = &ofdev->dev;
2516 
2517 	/* initialize qe_ep struct */
2518 	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2519 		/* because the ep type isn't decide here so
2520 		 * qe_ep_init() should be called in ep_enable() */
2521 
2522 		/* setup the qe_ep struct and link ep.ep.list
2523 		 * into gadget.ep_list */
2524 		qe_ep_config(udc, (unsigned char)i);
2525 	}
2526 
2527 	/* ep0 initialization in here */
2528 	ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2529 	if (ret)
2530 		goto err2;
2531 
2532 	/* create a buf for ZLP send, need to remain zeroed */
2533 	udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2534 	if (udc->nullbuf == NULL) {
2535 		ret = -ENOMEM;
2536 		goto err3;
2537 	}
2538 
2539 	/* buffer for data of get_status request */
2540 	udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2541 	if (udc->statusbuf == NULL) {
2542 		ret = -ENOMEM;
2543 		goto err3;
2544 	}
2545 
2546 	udc->nullp = virt_to_phys((void *)udc->nullbuf);
2547 	if (udc->nullp == DMA_ADDR_INVALID) {
2548 		udc->nullp = dma_map_single(
2549 					udc->gadget.dev.parent,
2550 					udc->nullbuf,
2551 					256,
2552 					DMA_TO_DEVICE);
2553 		udc->nullmap = 1;
2554 	} else {
2555 		dma_sync_single_for_device(udc->gadget.dev.parent,
2556 					udc->nullp, 256,
2557 					DMA_TO_DEVICE);
2558 	}
2559 
2560 	tasklet_init(&udc->rx_tasklet, ep_rx_tasklet,
2561 			(unsigned long)udc);
2562 	/* request irq and disable DR  */
2563 	udc->usb_irq = irq_of_parse_and_map(np, 0);
2564 	if (!udc->usb_irq) {
2565 		ret = -EINVAL;
2566 		goto err_noirq;
2567 	}
2568 
2569 	ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2570 				driver_name, udc);
2571 	if (ret) {
2572 		dev_err(udc->dev, "cannot request irq %d err %d\n",
2573 				udc->usb_irq, ret);
2574 		goto err4;
2575 	}
2576 
2577 	ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2578 			qe_udc_release);
2579 	if (ret)
2580 		goto err5;
2581 
2582 	platform_set_drvdata(ofdev, udc);
2583 	dev_info(udc->dev,
2584 			"%s USB controller initialized as device\n",
2585 			(udc->soc_type == PORT_QE) ? "QE" : "CPM");
2586 	return 0;
2587 
2588 err5:
2589 	free_irq(udc->usb_irq, udc);
2590 err4:
2591 	irq_dispose_mapping(udc->usb_irq);
2592 err_noirq:
2593 	if (udc->nullmap) {
2594 		dma_unmap_single(udc->gadget.dev.parent,
2595 			udc->nullp, 256,
2596 				DMA_TO_DEVICE);
2597 			udc->nullp = DMA_ADDR_INVALID;
2598 	} else {
2599 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2600 			udc->nullp, 256,
2601 				DMA_TO_DEVICE);
2602 	}
2603 err3:
2604 	ep = &udc->eps[0];
2605 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2606 	kfree(ep->rxframe);
2607 	kfree(ep->rxbuffer);
2608 	kfree(ep->txframe);
2609 err2:
2610 	iounmap(udc->usb_regs);
2611 err1:
2612 	kfree(udc);
2613 	return ret;
2614 }
2615 
2616 #ifdef CONFIG_PM
2617 static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2618 {
2619 	return -ENOTSUPP;
2620 }
2621 
2622 static int qe_udc_resume(struct platform_device *dev)
2623 {
2624 	return -ENOTSUPP;
2625 }
2626 #endif
2627 
2628 static int qe_udc_remove(struct platform_device *ofdev)
2629 {
2630 	struct qe_udc *udc = platform_get_drvdata(ofdev);
2631 	struct qe_ep *ep;
2632 	unsigned int size;
2633 	DECLARE_COMPLETION_ONSTACK(done);
2634 
2635 	usb_del_gadget_udc(&udc->gadget);
2636 
2637 	udc->done = &done;
2638 	tasklet_disable(&udc->rx_tasklet);
2639 
2640 	if (udc->nullmap) {
2641 		dma_unmap_single(udc->gadget.dev.parent,
2642 			udc->nullp, 256,
2643 				DMA_TO_DEVICE);
2644 			udc->nullp = DMA_ADDR_INVALID;
2645 	} else {
2646 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2647 			udc->nullp, 256,
2648 				DMA_TO_DEVICE);
2649 	}
2650 
2651 	ep = &udc->eps[0];
2652 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2653 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2654 
2655 	kfree(ep->rxframe);
2656 	if (ep->rxbufmap) {
2657 		dma_unmap_single(udc->gadget.dev.parent,
2658 				ep->rxbuf_d, size,
2659 				DMA_FROM_DEVICE);
2660 		ep->rxbuf_d = DMA_ADDR_INVALID;
2661 	} else {
2662 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2663 				ep->rxbuf_d, size,
2664 				DMA_FROM_DEVICE);
2665 	}
2666 
2667 	kfree(ep->rxbuffer);
2668 	kfree(ep->txframe);
2669 
2670 	free_irq(udc->usb_irq, udc);
2671 	irq_dispose_mapping(udc->usb_irq);
2672 
2673 	tasklet_kill(&udc->rx_tasklet);
2674 
2675 	iounmap(udc->usb_regs);
2676 
2677 	/* wait for release() of gadget.dev to free udc */
2678 	wait_for_completion(&done);
2679 
2680 	return 0;
2681 }
2682 
2683 /*-------------------------------------------------------------------------*/
2684 static const struct of_device_id qe_udc_match[] = {
2685 	{
2686 		.compatible = "fsl,mpc8323-qe-usb",
2687 		.data = (void *)PORT_QE,
2688 	},
2689 	{
2690 		.compatible = "fsl,mpc8360-qe-usb",
2691 		.data = (void *)PORT_QE,
2692 	},
2693 	{
2694 		.compatible = "fsl,mpc8272-cpm-usb",
2695 		.data = (void *)PORT_CPM,
2696 	},
2697 	{},
2698 };
2699 
2700 MODULE_DEVICE_TABLE(of, qe_udc_match);
2701 
2702 static struct platform_driver udc_driver = {
2703 	.driver = {
2704 		.name = driver_name,
2705 		.of_match_table = qe_udc_match,
2706 	},
2707 	.probe          = qe_udc_probe,
2708 	.remove         = qe_udc_remove,
2709 #ifdef CONFIG_PM
2710 	.suspend        = qe_udc_suspend,
2711 	.resume         = qe_udc_resume,
2712 #endif
2713 };
2714 
2715 module_platform_driver(udc_driver);
2716 
2717 MODULE_DESCRIPTION(DRIVER_DESC);
2718 MODULE_AUTHOR(DRIVER_AUTHOR);
2719 MODULE_LICENSE("GPL");
2720