1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * driver/usb/gadget/fsl_qe_udc.c
4  *
5  * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
6  *
7  * 	Xie Xiaobo <X.Xie@freescale.com>
8  * 	Li Yang <leoli@freescale.com>
9  * 	Based on bareboard code from Shlomi Gridish.
10  *
11  * Description:
12  * Freescle QE/CPM USB Pheripheral Controller Driver
13  * The controller can be found on MPC8360, MPC8272, and etc.
14  * MPC8360 Rev 1.1 may need QE mircocode update
15  */
16 
17 #undef USB_TRACE
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/ioport.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/moduleparam.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_platform.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/usb/otg.h>
37 #include <soc/fsl/qe/qe.h>
38 #include <asm/cpm.h>
39 #include <asm/dma.h>
40 #include <asm/reg.h>
41 #include "fsl_qe_udc.h"
42 
43 #define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
44 #define DRIVER_AUTHOR   "Xie XiaoBo"
45 #define DRIVER_VERSION  "1.0"
46 
47 #define DMA_ADDR_INVALID        (~(dma_addr_t)0)
48 
49 static const char driver_name[] = "fsl_qe_udc";
50 static const char driver_desc[] = DRIVER_DESC;
51 
52 /*ep name is important in gadget, it should obey the convention of ep_match()*/
53 static const char *const ep_name[] = {
54 	"ep0-control", /* everyone has ep0 */
55 	/* 3 configurable endpoints */
56 	"ep1",
57 	"ep2",
58 	"ep3",
59 };
60 
61 static const struct usb_endpoint_descriptor qe_ep0_desc = {
62 	.bLength =		USB_DT_ENDPOINT_SIZE,
63 	.bDescriptorType =	USB_DT_ENDPOINT,
64 
65 	.bEndpointAddress =	0,
66 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
67 	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
68 };
69 
70 /********************************************************************
71  *      Internal Used Function Start
72 ********************************************************************/
73 /*-----------------------------------------------------------------
74  * done() - retire a request; caller blocked irqs
75  *--------------------------------------------------------------*/
done(struct qe_ep * ep,struct qe_req * req,int status)76 static void done(struct qe_ep *ep, struct qe_req *req, int status)
77 {
78 	struct qe_udc *udc = ep->udc;
79 	unsigned char stopped = ep->stopped;
80 
81 	/* the req->queue pointer is used by ep_queue() func, in which
82 	 * the request will be added into a udc_ep->queue 'd tail
83 	 * so here the req will be dropped from the ep->queue
84 	 */
85 	list_del_init(&req->queue);
86 
87 	/* req.status should be set as -EINPROGRESS in ep_queue() */
88 	if (req->req.status == -EINPROGRESS)
89 		req->req.status = status;
90 	else
91 		status = req->req.status;
92 
93 	if (req->mapped) {
94 		dma_unmap_single(udc->gadget.dev.parent,
95 			req->req.dma, req->req.length,
96 			ep_is_in(ep)
97 				? DMA_TO_DEVICE
98 				: DMA_FROM_DEVICE);
99 		req->req.dma = DMA_ADDR_INVALID;
100 		req->mapped = 0;
101 	} else
102 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
103 			req->req.dma, req->req.length,
104 			ep_is_in(ep)
105 				? DMA_TO_DEVICE
106 				: DMA_FROM_DEVICE);
107 
108 	if (status && (status != -ESHUTDOWN))
109 		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
110 			ep->ep.name, &req->req, status,
111 			req->req.actual, req->req.length);
112 
113 	/* don't modify queue heads during completion callback */
114 	ep->stopped = 1;
115 	spin_unlock(&udc->lock);
116 
117 	usb_gadget_giveback_request(&ep->ep, &req->req);
118 
119 	spin_lock(&udc->lock);
120 
121 	ep->stopped = stopped;
122 }
123 
124 /*-----------------------------------------------------------------
125  * nuke(): delete all requests related to this ep
126  *--------------------------------------------------------------*/
nuke(struct qe_ep * ep,int status)127 static void nuke(struct qe_ep *ep, int status)
128 {
129 	/* Whether this eq has request linked */
130 	while (!list_empty(&ep->queue)) {
131 		struct qe_req *req = NULL;
132 		req = list_entry(ep->queue.next, struct qe_req, queue);
133 
134 		done(ep, req, status);
135 	}
136 }
137 
138 /*---------------------------------------------------------------------------*
139  * USB and Endpoint manipulate process, include parameter and register       *
140  *---------------------------------------------------------------------------*/
141 /* @value: 1--set stall 0--clean stall */
qe_eprx_stall_change(struct qe_ep * ep,int value)142 static int qe_eprx_stall_change(struct qe_ep *ep, int value)
143 {
144 	u16 tem_usep;
145 	u8 epnum = ep->epnum;
146 	struct qe_udc *udc = ep->udc;
147 
148 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
149 	tem_usep = tem_usep & ~USB_RHS_MASK;
150 	if (value == 1)
151 		tem_usep |= USB_RHS_STALL;
152 	else if (ep->dir == USB_DIR_IN)
153 		tem_usep |= USB_RHS_IGNORE_OUT;
154 
155 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
156 	return 0;
157 }
158 
qe_eptx_stall_change(struct qe_ep * ep,int value)159 static int qe_eptx_stall_change(struct qe_ep *ep, int value)
160 {
161 	u16 tem_usep;
162 	u8 epnum = ep->epnum;
163 	struct qe_udc *udc = ep->udc;
164 
165 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
166 	tem_usep = tem_usep & ~USB_THS_MASK;
167 	if (value == 1)
168 		tem_usep |= USB_THS_STALL;
169 	else if (ep->dir == USB_DIR_OUT)
170 		tem_usep |= USB_THS_IGNORE_IN;
171 
172 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
173 
174 	return 0;
175 }
176 
qe_ep0_stall(struct qe_udc * udc)177 static int qe_ep0_stall(struct qe_udc *udc)
178 {
179 	qe_eptx_stall_change(&udc->eps[0], 1);
180 	qe_eprx_stall_change(&udc->eps[0], 1);
181 	udc->ep0_state = WAIT_FOR_SETUP;
182 	udc->ep0_dir = 0;
183 	return 0;
184 }
185 
qe_eprx_nack(struct qe_ep * ep)186 static int qe_eprx_nack(struct qe_ep *ep)
187 {
188 	u8 epnum = ep->epnum;
189 	struct qe_udc *udc = ep->udc;
190 
191 	if (ep->state == EP_STATE_IDLE) {
192 		/* Set the ep's nack */
193 		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
194 				USB_RHS_MASK, USB_RHS_NACK);
195 
196 		/* Mask Rx and Busy interrupts */
197 		clrbits16(&udc->usb_regs->usb_usbmr,
198 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
199 
200 		ep->state = EP_STATE_NACK;
201 	}
202 	return 0;
203 }
204 
qe_eprx_normal(struct qe_ep * ep)205 static int qe_eprx_normal(struct qe_ep *ep)
206 {
207 	struct qe_udc *udc = ep->udc;
208 
209 	if (ep->state == EP_STATE_NACK) {
210 		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
211 				USB_RTHS_MASK, USB_THS_IGNORE_IN);
212 
213 		/* Unmask RX interrupts */
214 		out_be16(&udc->usb_regs->usb_usber,
215 				USB_E_BSY_MASK | USB_E_RXB_MASK);
216 		setbits16(&udc->usb_regs->usb_usbmr,
217 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
218 
219 		ep->state = EP_STATE_IDLE;
220 		ep->has_data = 0;
221 	}
222 
223 	return 0;
224 }
225 
qe_ep_cmd_stoptx(struct qe_ep * ep)226 static int qe_ep_cmd_stoptx(struct qe_ep *ep)
227 {
228 	if (ep->udc->soc_type == PORT_CPM)
229 		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
230 				CPM_USB_STOP_TX_OPCODE);
231 	else
232 		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
233 				ep->epnum, 0);
234 
235 	return 0;
236 }
237 
qe_ep_cmd_restarttx(struct qe_ep * ep)238 static int qe_ep_cmd_restarttx(struct qe_ep *ep)
239 {
240 	if (ep->udc->soc_type == PORT_CPM)
241 		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
242 				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
243 	else
244 		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
245 				ep->epnum, 0);
246 
247 	return 0;
248 }
249 
qe_ep_flushtxfifo(struct qe_ep * ep)250 static int qe_ep_flushtxfifo(struct qe_ep *ep)
251 {
252 	struct qe_udc *udc = ep->udc;
253 	int i;
254 
255 	i = (int)ep->epnum;
256 
257 	qe_ep_cmd_stoptx(ep);
258 	out_8(&udc->usb_regs->usb_uscom,
259 		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
260 	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
261 	out_be32(&udc->ep_param[i]->tstate, 0);
262 	out_be16(&udc->ep_param[i]->tbcnt, 0);
263 
264 	ep->c_txbd = ep->txbase;
265 	ep->n_txbd = ep->txbase;
266 	qe_ep_cmd_restarttx(ep);
267 	return 0;
268 }
269 
qe_ep_filltxfifo(struct qe_ep * ep)270 static int qe_ep_filltxfifo(struct qe_ep *ep)
271 {
272 	struct qe_udc *udc = ep->udc;
273 
274 	out_8(&udc->usb_regs->usb_uscom,
275 			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
276 	return 0;
277 }
278 
qe_epbds_reset(struct qe_udc * udc,int pipe_num)279 static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
280 {
281 	struct qe_ep *ep;
282 	u32 bdring_len;
283 	struct qe_bd __iomem *bd;
284 	int i;
285 
286 	ep = &udc->eps[pipe_num];
287 
288 	if (ep->dir == USB_DIR_OUT)
289 		bdring_len = USB_BDRING_LEN_RX;
290 	else
291 		bdring_len = USB_BDRING_LEN;
292 
293 	bd = ep->rxbase;
294 	for (i = 0; i < (bdring_len - 1); i++) {
295 		out_be32((u32 __iomem *)bd, R_E | R_I);
296 		bd++;
297 	}
298 	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
299 
300 	bd = ep->txbase;
301 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
302 		out_be32(&bd->buf, 0);
303 		out_be32((u32 __iomem *)bd, 0);
304 		bd++;
305 	}
306 	out_be32((u32 __iomem *)bd, T_W);
307 
308 	return 0;
309 }
310 
qe_ep_reset(struct qe_udc * udc,int pipe_num)311 static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
312 {
313 	struct qe_ep *ep;
314 	u16 tmpusep;
315 
316 	ep = &udc->eps[pipe_num];
317 	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
318 	tmpusep &= ~USB_RTHS_MASK;
319 
320 	switch (ep->dir) {
321 	case USB_DIR_BOTH:
322 		qe_ep_flushtxfifo(ep);
323 		break;
324 	case USB_DIR_OUT:
325 		tmpusep |= USB_THS_IGNORE_IN;
326 		break;
327 	case USB_DIR_IN:
328 		qe_ep_flushtxfifo(ep);
329 		tmpusep |= USB_RHS_IGNORE_OUT;
330 		break;
331 	default:
332 		break;
333 	}
334 	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
335 
336 	qe_epbds_reset(udc, pipe_num);
337 
338 	return 0;
339 }
340 
qe_ep_toggledata01(struct qe_ep * ep)341 static int qe_ep_toggledata01(struct qe_ep *ep)
342 {
343 	ep->data01 ^= 0x1;
344 	return 0;
345 }
346 
qe_ep_bd_init(struct qe_udc * udc,unsigned char pipe_num)347 static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
348 {
349 	struct qe_ep *ep = &udc->eps[pipe_num];
350 	unsigned long tmp_addr = 0;
351 	struct usb_ep_para __iomem *epparam;
352 	int i;
353 	struct qe_bd __iomem *bd;
354 	int bdring_len;
355 
356 	if (ep->dir == USB_DIR_OUT)
357 		bdring_len = USB_BDRING_LEN_RX;
358 	else
359 		bdring_len = USB_BDRING_LEN;
360 
361 	epparam = udc->ep_param[pipe_num];
362 	/* alloc multi-ram for BD rings and set the ep parameters */
363 	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
364 				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
365 	if (IS_ERR_VALUE(tmp_addr))
366 		return -ENOMEM;
367 
368 	out_be16(&epparam->rbase, (u16)tmp_addr);
369 	out_be16(&epparam->tbase, (u16)(tmp_addr +
370 				(sizeof(struct qe_bd) * bdring_len)));
371 
372 	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
373 	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
374 
375 	ep->rxbase = cpm_muram_addr(tmp_addr);
376 	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
377 				* bdring_len));
378 	ep->n_rxbd = ep->rxbase;
379 	ep->e_rxbd = ep->rxbase;
380 	ep->n_txbd = ep->txbase;
381 	ep->c_txbd = ep->txbase;
382 	ep->data01 = 0; /* data0 */
383 
384 	/* Init TX and RX bds */
385 	bd = ep->rxbase;
386 	for (i = 0; i < bdring_len - 1; i++) {
387 		out_be32(&bd->buf, 0);
388 		out_be32((u32 __iomem *)bd, 0);
389 		bd++;
390 	}
391 	out_be32(&bd->buf, 0);
392 	out_be32((u32 __iomem *)bd, R_W);
393 
394 	bd = ep->txbase;
395 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
396 		out_be32(&bd->buf, 0);
397 		out_be32((u32 __iomem *)bd, 0);
398 		bd++;
399 	}
400 	out_be32(&bd->buf, 0);
401 	out_be32((u32 __iomem *)bd, T_W);
402 
403 	return 0;
404 }
405 
qe_ep_rxbd_update(struct qe_ep * ep)406 static int qe_ep_rxbd_update(struct qe_ep *ep)
407 {
408 	unsigned int size;
409 	int i;
410 	unsigned int tmp;
411 	struct qe_bd __iomem *bd;
412 	unsigned int bdring_len;
413 
414 	if (ep->rxbase == NULL)
415 		return -EINVAL;
416 
417 	bd = ep->rxbase;
418 
419 	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
420 	if (!ep->rxframe)
421 		return -ENOMEM;
422 
423 	qe_frame_init(ep->rxframe);
424 
425 	if (ep->dir == USB_DIR_OUT)
426 		bdring_len = USB_BDRING_LEN_RX;
427 	else
428 		bdring_len = USB_BDRING_LEN;
429 
430 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
431 	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
432 	if (!ep->rxbuffer) {
433 		kfree(ep->rxframe);
434 		return -ENOMEM;
435 	}
436 
437 	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
438 	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
439 		ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
440 					ep->rxbuffer,
441 					size,
442 					DMA_FROM_DEVICE);
443 		ep->rxbufmap = 1;
444 	} else {
445 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
446 					ep->rxbuf_d, size,
447 					DMA_FROM_DEVICE);
448 		ep->rxbufmap = 0;
449 	}
450 
451 	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
452 	tmp = ep->rxbuf_d;
453 	tmp = (u32)(((tmp >> 2) << 2) + 4);
454 
455 	for (i = 0; i < bdring_len - 1; i++) {
456 		out_be32(&bd->buf, tmp);
457 		out_be32((u32 __iomem *)bd, (R_E | R_I));
458 		tmp = tmp + size;
459 		bd++;
460 	}
461 	out_be32(&bd->buf, tmp);
462 	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
463 
464 	return 0;
465 }
466 
qe_ep_register_init(struct qe_udc * udc,unsigned char pipe_num)467 static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
468 {
469 	struct qe_ep *ep = &udc->eps[pipe_num];
470 	struct usb_ep_para __iomem *epparam;
471 	u16 usep, logepnum;
472 	u16 tmp;
473 	u8 rtfcr = 0;
474 
475 	epparam = udc->ep_param[pipe_num];
476 
477 	usep = 0;
478 	logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
479 	usep |= (logepnum << USB_EPNUM_SHIFT);
480 
481 	switch (ep->ep.desc->bmAttributes & 0x03) {
482 	case USB_ENDPOINT_XFER_BULK:
483 		usep |= USB_TRANS_BULK;
484 		break;
485 	case USB_ENDPOINT_XFER_ISOC:
486 		usep |=  USB_TRANS_ISO;
487 		break;
488 	case USB_ENDPOINT_XFER_INT:
489 		usep |= USB_TRANS_INT;
490 		break;
491 	default:
492 		usep |= USB_TRANS_CTR;
493 		break;
494 	}
495 
496 	switch (ep->dir) {
497 	case USB_DIR_OUT:
498 		usep |= USB_THS_IGNORE_IN;
499 		break;
500 	case USB_DIR_IN:
501 		usep |= USB_RHS_IGNORE_OUT;
502 		break;
503 	default:
504 		break;
505 	}
506 	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
507 
508 	rtfcr = 0x30;
509 	out_8(&epparam->rbmr, rtfcr);
510 	out_8(&epparam->tbmr, rtfcr);
511 
512 	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
513 	/* MRBLR must be divisble by 4 */
514 	tmp = (u16)(((tmp >> 2) << 2) + 4);
515 	out_be16(&epparam->mrblr, tmp);
516 
517 	return 0;
518 }
519 
qe_ep_init(struct qe_udc * udc,unsigned char pipe_num,const struct usb_endpoint_descriptor * desc)520 static int qe_ep_init(struct qe_udc *udc,
521 		      unsigned char pipe_num,
522 		      const struct usb_endpoint_descriptor *desc)
523 {
524 	struct qe_ep *ep = &udc->eps[pipe_num];
525 	unsigned long flags;
526 	int reval = 0;
527 	u16 max = 0;
528 
529 	max = usb_endpoint_maxp(desc);
530 
531 	/* check the max package size validate for this endpoint */
532 	/* Refer to USB2.0 spec table 9-13,
533 	*/
534 	if (pipe_num != 0) {
535 		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
536 		case USB_ENDPOINT_XFER_BULK:
537 			if (strstr(ep->ep.name, "-iso")
538 					|| strstr(ep->ep.name, "-int"))
539 				goto en_done;
540 			switch (udc->gadget.speed) {
541 			case USB_SPEED_HIGH:
542 			if ((max == 128) || (max == 256) || (max == 512))
543 				break;
544 			fallthrough;
545 			default:
546 				switch (max) {
547 				case 4:
548 				case 8:
549 				case 16:
550 				case 32:
551 				case 64:
552 					break;
553 				default:
554 				case USB_SPEED_LOW:
555 					goto en_done;
556 				}
557 			}
558 			break;
559 		case USB_ENDPOINT_XFER_INT:
560 			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
561 				goto en_done;
562 			switch (udc->gadget.speed) {
563 			case USB_SPEED_HIGH:
564 				if (max <= 1024)
565 					break;
566 				fallthrough;
567 			case USB_SPEED_FULL:
568 				if (max <= 64)
569 					break;
570 				fallthrough;
571 			default:
572 				if (max <= 8)
573 					break;
574 				goto en_done;
575 			}
576 			break;
577 		case USB_ENDPOINT_XFER_ISOC:
578 			if (strstr(ep->ep.name, "-bulk")
579 				|| strstr(ep->ep.name, "-int"))
580 				goto en_done;
581 			switch (udc->gadget.speed) {
582 			case USB_SPEED_HIGH:
583 				if (max <= 1024)
584 					break;
585 				fallthrough;
586 			case USB_SPEED_FULL:
587 				if (max <= 1023)
588 					break;
589 				fallthrough;
590 			default:
591 				goto en_done;
592 			}
593 			break;
594 		case USB_ENDPOINT_XFER_CONTROL:
595 			if (strstr(ep->ep.name, "-iso")
596 				|| strstr(ep->ep.name, "-int"))
597 				goto en_done;
598 			switch (udc->gadget.speed) {
599 			case USB_SPEED_HIGH:
600 			case USB_SPEED_FULL:
601 				switch (max) {
602 				case 1:
603 				case 2:
604 				case 4:
605 				case 8:
606 				case 16:
607 				case 32:
608 				case 64:
609 					break;
610 				default:
611 					goto en_done;
612 				}
613 				fallthrough;
614 			case USB_SPEED_LOW:
615 				switch (max) {
616 				case 1:
617 				case 2:
618 				case 4:
619 				case 8:
620 					break;
621 				default:
622 					goto en_done;
623 				}
624 			default:
625 				goto en_done;
626 			}
627 			break;
628 
629 		default:
630 			goto en_done;
631 		}
632 	} /* if ep0*/
633 
634 	spin_lock_irqsave(&udc->lock, flags);
635 
636 	/* initialize ep structure */
637 	ep->ep.maxpacket = max;
638 	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
639 	ep->ep.desc = desc;
640 	ep->stopped = 0;
641 	ep->init = 1;
642 
643 	if (pipe_num == 0) {
644 		ep->dir = USB_DIR_BOTH;
645 		udc->ep0_dir = USB_DIR_OUT;
646 		udc->ep0_state = WAIT_FOR_SETUP;
647 	} else	{
648 		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
649 		case USB_DIR_OUT:
650 			ep->dir = USB_DIR_OUT;
651 			break;
652 		case USB_DIR_IN:
653 			ep->dir = USB_DIR_IN;
654 		default:
655 			break;
656 		}
657 	}
658 
659 	/* hardware special operation */
660 	qe_ep_bd_init(udc, pipe_num);
661 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
662 		reval = qe_ep_rxbd_update(ep);
663 		if (reval)
664 			goto en_done1;
665 	}
666 
667 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
668 		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
669 		if (!ep->txframe)
670 			goto en_done2;
671 		qe_frame_init(ep->txframe);
672 	}
673 
674 	qe_ep_register_init(udc, pipe_num);
675 
676 	/* Now HW will be NAKing transfers to that EP,
677 	 * until a buffer is queued to it. */
678 	spin_unlock_irqrestore(&udc->lock, flags);
679 
680 	return 0;
681 en_done2:
682 	kfree(ep->rxbuffer);
683 	kfree(ep->rxframe);
684 en_done1:
685 	spin_unlock_irqrestore(&udc->lock, flags);
686 en_done:
687 	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
688 	return -ENODEV;
689 }
690 
qe_usb_enable(struct qe_udc * udc)691 static inline void qe_usb_enable(struct qe_udc *udc)
692 {
693 	setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
694 }
695 
qe_usb_disable(struct qe_udc * udc)696 static inline void qe_usb_disable(struct qe_udc *udc)
697 {
698 	clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
699 }
700 
701 /*----------------------------------------------------------------------------*
702  *		USB and EP basic manipulate function end		      *
703  *----------------------------------------------------------------------------*/
704 
705 
706 /******************************************************************************
707 		UDC transmit and receive process
708  ******************************************************************************/
recycle_one_rxbd(struct qe_ep * ep)709 static void recycle_one_rxbd(struct qe_ep *ep)
710 {
711 	u32 bdstatus;
712 
713 	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
714 	bdstatus = R_I | R_E | (bdstatus & R_W);
715 	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
716 
717 	if (bdstatus & R_W)
718 		ep->e_rxbd = ep->rxbase;
719 	else
720 		ep->e_rxbd++;
721 }
722 
recycle_rxbds(struct qe_ep * ep,unsigned char stopatnext)723 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
724 {
725 	u32 bdstatus;
726 	struct qe_bd __iomem *bd, *nextbd;
727 	unsigned char stop = 0;
728 
729 	nextbd = ep->n_rxbd;
730 	bd = ep->e_rxbd;
731 	bdstatus = in_be32((u32 __iomem *)bd);
732 
733 	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
734 		bdstatus = R_E | R_I | (bdstatus & R_W);
735 		out_be32((u32 __iomem *)bd, bdstatus);
736 
737 		if (bdstatus & R_W)
738 			bd = ep->rxbase;
739 		else
740 			bd++;
741 
742 		bdstatus = in_be32((u32 __iomem *)bd);
743 		if (stopatnext && (bd == nextbd))
744 			stop = 1;
745 	}
746 
747 	ep->e_rxbd = bd;
748 }
749 
ep_recycle_rxbds(struct qe_ep * ep)750 static void ep_recycle_rxbds(struct qe_ep *ep)
751 {
752 	struct qe_bd __iomem *bd = ep->n_rxbd;
753 	u32 bdstatus;
754 	u8 epnum = ep->epnum;
755 	struct qe_udc *udc = ep->udc;
756 
757 	bdstatus = in_be32((u32 __iomem *)bd);
758 	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
759 		bd = ep->rxbase +
760 				((in_be16(&udc->ep_param[epnum]->rbptr) -
761 				  in_be16(&udc->ep_param[epnum]->rbase))
762 				 >> 3);
763 		bdstatus = in_be32((u32 __iomem *)bd);
764 
765 		if (bdstatus & R_W)
766 			bd = ep->rxbase;
767 		else
768 			bd++;
769 
770 		ep->e_rxbd = bd;
771 		recycle_rxbds(ep, 0);
772 		ep->e_rxbd = ep->n_rxbd;
773 	} else
774 		recycle_rxbds(ep, 1);
775 
776 	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
777 		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
778 
779 	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
780 		qe_eprx_normal(ep);
781 
782 	ep->localnack = 0;
783 }
784 
785 static void setup_received_handle(struct qe_udc *udc,
786 					struct usb_ctrlrequest *setup);
787 static int qe_ep_rxframe_handle(struct qe_ep *ep);
788 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
789 /* when BD PID is setup, handle the packet */
ep0_setup_handle(struct qe_udc * udc)790 static int ep0_setup_handle(struct qe_udc *udc)
791 {
792 	struct qe_ep *ep = &udc->eps[0];
793 	struct qe_frame *pframe;
794 	unsigned int fsize;
795 	u8 *cp;
796 
797 	pframe = ep->rxframe;
798 	if ((frame_get_info(pframe) & PID_SETUP)
799 			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
800 		fsize = frame_get_length(pframe);
801 		if (unlikely(fsize != 8))
802 			return -EINVAL;
803 		cp = (u8 *)&udc->local_setup_buff;
804 		memcpy(cp, pframe->data, fsize);
805 		ep->data01 = 1;
806 
807 		/* handle the usb command base on the usb_ctrlrequest */
808 		setup_received_handle(udc, &udc->local_setup_buff);
809 		return 0;
810 	}
811 	return -EINVAL;
812 }
813 
qe_ep0_rx(struct qe_udc * udc)814 static int qe_ep0_rx(struct qe_udc *udc)
815 {
816 	struct qe_ep *ep = &udc->eps[0];
817 	struct qe_frame *pframe;
818 	struct qe_bd __iomem *bd;
819 	u32 bdstatus, length;
820 	u32 vaddr;
821 
822 	pframe = ep->rxframe;
823 
824 	if (ep->dir == USB_DIR_IN) {
825 		dev_err(udc->dev, "ep0 not a control endpoint\n");
826 		return -EINVAL;
827 	}
828 
829 	bd = ep->n_rxbd;
830 	bdstatus = in_be32((u32 __iomem *)bd);
831 	length = bdstatus & BD_LENGTH_MASK;
832 
833 	while (!(bdstatus & R_E) && length) {
834 		if ((bdstatus & R_F) && (bdstatus & R_L)
835 			&& !(bdstatus & R_ERROR)) {
836 			if (length == USB_CRC_SIZE) {
837 				udc->ep0_state = WAIT_FOR_SETUP;
838 				dev_vdbg(udc->dev,
839 					"receive a ZLP in status phase\n");
840 			} else {
841 				qe_frame_clean(pframe);
842 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
843 				frame_set_data(pframe, (u8 *)vaddr);
844 				frame_set_length(pframe,
845 						(length - USB_CRC_SIZE));
846 				frame_set_status(pframe, FRAME_OK);
847 				switch (bdstatus & R_PID) {
848 				case R_PID_SETUP:
849 					frame_set_info(pframe, PID_SETUP);
850 					break;
851 				case R_PID_DATA1:
852 					frame_set_info(pframe, PID_DATA1);
853 					break;
854 				default:
855 					frame_set_info(pframe, PID_DATA0);
856 					break;
857 				}
858 
859 				if ((bdstatus & R_PID) == R_PID_SETUP)
860 					ep0_setup_handle(udc);
861 				else
862 					qe_ep_rxframe_handle(ep);
863 			}
864 		} else {
865 			dev_err(udc->dev, "The receive frame with error!\n");
866 		}
867 
868 		/* note: don't clear the rxbd's buffer address */
869 		recycle_one_rxbd(ep);
870 
871 		/* Get next BD */
872 		if (bdstatus & R_W)
873 			bd = ep->rxbase;
874 		else
875 			bd++;
876 
877 		bdstatus = in_be32((u32 __iomem *)bd);
878 		length = bdstatus & BD_LENGTH_MASK;
879 
880 	}
881 
882 	ep->n_rxbd = bd;
883 
884 	return 0;
885 }
886 
qe_ep_rxframe_handle(struct qe_ep * ep)887 static int qe_ep_rxframe_handle(struct qe_ep *ep)
888 {
889 	struct qe_frame *pframe;
890 	u8 framepid = 0;
891 	unsigned int fsize;
892 	u8 *cp;
893 	struct qe_req *req;
894 
895 	pframe = ep->rxframe;
896 
897 	if (frame_get_info(pframe) & PID_DATA1)
898 		framepid = 0x1;
899 
900 	if (framepid != ep->data01) {
901 		dev_err(ep->udc->dev, "the data01 error!\n");
902 		return -EIO;
903 	}
904 
905 	fsize = frame_get_length(pframe);
906 	if (list_empty(&ep->queue)) {
907 		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
908 	} else {
909 		req = list_entry(ep->queue.next, struct qe_req, queue);
910 
911 		cp = (u8 *)(req->req.buf) + req->req.actual;
912 		if (cp) {
913 			memcpy(cp, pframe->data, fsize);
914 			req->req.actual += fsize;
915 			if ((fsize < ep->ep.maxpacket) ||
916 					(req->req.actual >= req->req.length)) {
917 				if (ep->epnum == 0)
918 					ep0_req_complete(ep->udc, req);
919 				else
920 					done(ep, req, 0);
921 				if (list_empty(&ep->queue) && ep->epnum != 0)
922 					qe_eprx_nack(ep);
923 			}
924 		}
925 	}
926 
927 	qe_ep_toggledata01(ep);
928 
929 	return 0;
930 }
931 
ep_rx_tasklet(struct tasklet_struct * t)932 static void ep_rx_tasklet(struct tasklet_struct *t)
933 {
934 	struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
935 	struct qe_ep *ep;
936 	struct qe_frame *pframe;
937 	struct qe_bd __iomem *bd;
938 	unsigned long flags;
939 	u32 bdstatus, length;
940 	u32 vaddr, i;
941 
942 	spin_lock_irqsave(&udc->lock, flags);
943 
944 	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
945 		ep = &udc->eps[i];
946 
947 		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
948 			dev_dbg(udc->dev,
949 				"This is a transmit ep or disable tasklet!\n");
950 			continue;
951 		}
952 
953 		pframe = ep->rxframe;
954 		bd = ep->n_rxbd;
955 		bdstatus = in_be32((u32 __iomem *)bd);
956 		length = bdstatus & BD_LENGTH_MASK;
957 
958 		while (!(bdstatus & R_E) && length) {
959 			if (list_empty(&ep->queue)) {
960 				qe_eprx_nack(ep);
961 				dev_dbg(udc->dev,
962 					"The rxep have noreq %d\n",
963 					ep->has_data);
964 				break;
965 			}
966 
967 			if ((bdstatus & R_F) && (bdstatus & R_L)
968 				&& !(bdstatus & R_ERROR)) {
969 				qe_frame_clean(pframe);
970 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
971 				frame_set_data(pframe, (u8 *)vaddr);
972 				frame_set_length(pframe,
973 						(length - USB_CRC_SIZE));
974 				frame_set_status(pframe, FRAME_OK);
975 				switch (bdstatus & R_PID) {
976 				case R_PID_DATA1:
977 					frame_set_info(pframe, PID_DATA1);
978 					break;
979 				case R_PID_SETUP:
980 					frame_set_info(pframe, PID_SETUP);
981 					break;
982 				default:
983 					frame_set_info(pframe, PID_DATA0);
984 					break;
985 				}
986 				/* handle the rx frame */
987 				qe_ep_rxframe_handle(ep);
988 			} else {
989 				dev_err(udc->dev,
990 					"error in received frame\n");
991 			}
992 			/* note: don't clear the rxbd's buffer address */
993 			/*clear the length */
994 			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
995 			ep->has_data--;
996 			if (!(ep->localnack))
997 				recycle_one_rxbd(ep);
998 
999 			/* Get next BD */
1000 			if (bdstatus & R_W)
1001 				bd = ep->rxbase;
1002 			else
1003 				bd++;
1004 
1005 			bdstatus = in_be32((u32 __iomem *)bd);
1006 			length = bdstatus & BD_LENGTH_MASK;
1007 		}
1008 
1009 		ep->n_rxbd = bd;
1010 
1011 		if (ep->localnack)
1012 			ep_recycle_rxbds(ep);
1013 
1014 		ep->enable_tasklet = 0;
1015 	} /* for i=1 */
1016 
1017 	spin_unlock_irqrestore(&udc->lock, flags);
1018 }
1019 
qe_ep_rx(struct qe_ep * ep)1020 static int qe_ep_rx(struct qe_ep *ep)
1021 {
1022 	struct qe_udc *udc;
1023 	struct qe_frame *pframe;
1024 	struct qe_bd __iomem *bd;
1025 	u16 swoffs, ucoffs, emptybds;
1026 
1027 	udc = ep->udc;
1028 	pframe = ep->rxframe;
1029 
1030 	if (ep->dir == USB_DIR_IN) {
1031 		dev_err(udc->dev, "transmit ep in rx function\n");
1032 		return -EINVAL;
1033 	}
1034 
1035 	bd = ep->n_rxbd;
1036 
1037 	swoffs = (u16)(bd - ep->rxbase);
1038 	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1039 			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1040 	if (swoffs < ucoffs)
1041 		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1042 	else
1043 		emptybds = swoffs - ucoffs;
1044 
1045 	if (emptybds < MIN_EMPTY_BDS) {
1046 		qe_eprx_nack(ep);
1047 		ep->localnack = 1;
1048 		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1049 	}
1050 	ep->has_data = USB_BDRING_LEN_RX - emptybds;
1051 
1052 	if (list_empty(&ep->queue)) {
1053 		qe_eprx_nack(ep);
1054 		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1055 				ep->has_data);
1056 		return 0;
1057 	}
1058 
1059 	tasklet_schedule(&udc->rx_tasklet);
1060 	ep->enable_tasklet = 1;
1061 
1062 	return 0;
1063 }
1064 
1065 /* send data from a frame, no matter what tx_req */
qe_ep_tx(struct qe_ep * ep,struct qe_frame * frame)1066 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1067 {
1068 	struct qe_udc *udc = ep->udc;
1069 	struct qe_bd __iomem *bd;
1070 	u16 saveusbmr;
1071 	u32 bdstatus, pidmask;
1072 	u32 paddr;
1073 
1074 	if (ep->dir == USB_DIR_OUT) {
1075 		dev_err(udc->dev, "receive ep passed to tx function\n");
1076 		return -EINVAL;
1077 	}
1078 
1079 	/* Disable the Tx interrupt */
1080 	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1081 	out_be16(&udc->usb_regs->usb_usbmr,
1082 			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1083 
1084 	bd = ep->n_txbd;
1085 	bdstatus = in_be32((u32 __iomem *)bd);
1086 
1087 	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1088 		if (frame_get_length(frame) == 0) {
1089 			frame_set_data(frame, udc->nullbuf);
1090 			frame_set_length(frame, 2);
1091 			frame->info |= (ZLP | NO_CRC);
1092 			dev_vdbg(udc->dev, "the frame size = 0\n");
1093 		}
1094 		paddr = virt_to_phys((void *)frame->data);
1095 		out_be32(&bd->buf, paddr);
1096 		bdstatus = (bdstatus&T_W);
1097 		if (!(frame_get_info(frame) & NO_CRC))
1098 			bdstatus |= T_R | T_I | T_L | T_TC
1099 					| frame_get_length(frame);
1100 		else
1101 			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1102 
1103 		/* if the packet is a ZLP in status phase */
1104 		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1105 			ep->data01 = 0x1;
1106 
1107 		if (ep->data01) {
1108 			pidmask = T_PID_DATA1;
1109 			frame->info |= PID_DATA1;
1110 		} else {
1111 			pidmask = T_PID_DATA0;
1112 			frame->info |= PID_DATA0;
1113 		}
1114 		bdstatus |= T_CNF;
1115 		bdstatus |= pidmask;
1116 		out_be32((u32 __iomem *)bd, bdstatus);
1117 		qe_ep_filltxfifo(ep);
1118 
1119 		/* enable the TX interrupt */
1120 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1121 
1122 		qe_ep_toggledata01(ep);
1123 		if (bdstatus & T_W)
1124 			ep->n_txbd = ep->txbase;
1125 		else
1126 			ep->n_txbd++;
1127 
1128 		return 0;
1129 	} else {
1130 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1131 		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1132 		return -EBUSY;
1133 	}
1134 }
1135 
1136 /* when a bd was transmitted, the function can
1137  * handle the tx_req, not include ep0           */
txcomplete(struct qe_ep * ep,unsigned char restart)1138 static int txcomplete(struct qe_ep *ep, unsigned char restart)
1139 {
1140 	if (ep->tx_req != NULL) {
1141 		struct qe_req *req = ep->tx_req;
1142 		unsigned zlp = 0, last_len = 0;
1143 
1144 		last_len = min_t(unsigned, req->req.length - ep->sent,
1145 				ep->ep.maxpacket);
1146 
1147 		if (!restart) {
1148 			int asent = ep->last;
1149 			ep->sent += asent;
1150 			ep->last -= asent;
1151 		} else {
1152 			ep->last = 0;
1153 		}
1154 
1155 		/* zlp needed when req->re.zero is set */
1156 		if (req->req.zero) {
1157 			if (last_len == 0 ||
1158 				(req->req.length % ep->ep.maxpacket) != 0)
1159 				zlp = 0;
1160 			else
1161 				zlp = 1;
1162 		} else
1163 			zlp = 0;
1164 
1165 		/* a request already were transmitted completely */
1166 		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1167 			done(ep, ep->tx_req, 0);
1168 			ep->tx_req = NULL;
1169 			ep->last = 0;
1170 			ep->sent = 0;
1171 		}
1172 	}
1173 
1174 	/* we should gain a new tx_req fot this endpoint */
1175 	if (ep->tx_req == NULL) {
1176 		if (!list_empty(&ep->queue)) {
1177 			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
1178 							queue);
1179 			ep->last = 0;
1180 			ep->sent = 0;
1181 		}
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /* give a frame and a tx_req, send some data */
qe_usb_senddata(struct qe_ep * ep,struct qe_frame * frame)1188 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1189 {
1190 	unsigned int size;
1191 	u8 *buf;
1192 
1193 	qe_frame_clean(frame);
1194 	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1195 				ep->ep.maxpacket);
1196 	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1197 	if (buf && size) {
1198 		ep->last = size;
1199 		ep->tx_req->req.actual += size;
1200 		frame_set_data(frame, buf);
1201 		frame_set_length(frame, size);
1202 		frame_set_status(frame, FRAME_OK);
1203 		frame_set_info(frame, 0);
1204 		return qe_ep_tx(ep, frame);
1205 	}
1206 	return -EIO;
1207 }
1208 
1209 /* give a frame struct,send a ZLP */
sendnulldata(struct qe_ep * ep,struct qe_frame * frame,uint infor)1210 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1211 {
1212 	struct qe_udc *udc = ep->udc;
1213 
1214 	if (frame == NULL)
1215 		return -ENODEV;
1216 
1217 	qe_frame_clean(frame);
1218 	frame_set_data(frame, (u8 *)udc->nullbuf);
1219 	frame_set_length(frame, 2);
1220 	frame_set_status(frame, FRAME_OK);
1221 	frame_set_info(frame, (ZLP | NO_CRC | infor));
1222 
1223 	return qe_ep_tx(ep, frame);
1224 }
1225 
frame_create_tx(struct qe_ep * ep,struct qe_frame * frame)1226 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1227 {
1228 	struct qe_req *req = ep->tx_req;
1229 	int reval;
1230 
1231 	if (req == NULL)
1232 		return -ENODEV;
1233 
1234 	if ((req->req.length - ep->sent) > 0)
1235 		reval = qe_usb_senddata(ep, frame);
1236 	else
1237 		reval = sendnulldata(ep, frame, 0);
1238 
1239 	return reval;
1240 }
1241 
1242 /* if direction is DIR_IN, the status is Device->Host
1243  * if direction is DIR_OUT, the status transaction is Device<-Host
1244  * in status phase, udc create a request and gain status */
ep0_prime_status(struct qe_udc * udc,int direction)1245 static int ep0_prime_status(struct qe_udc *udc, int direction)
1246 {
1247 
1248 	struct qe_ep *ep = &udc->eps[0];
1249 
1250 	if (direction == USB_DIR_IN) {
1251 		udc->ep0_state = DATA_STATE_NEED_ZLP;
1252 		udc->ep0_dir = USB_DIR_IN;
1253 		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1254 	} else {
1255 		udc->ep0_dir = USB_DIR_OUT;
1256 		udc->ep0_state = WAIT_FOR_OUT_STATUS;
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 /* a request complete in ep0, whether gadget request or udc request */
ep0_req_complete(struct qe_udc * udc,struct qe_req * req)1263 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1264 {
1265 	struct qe_ep *ep = &udc->eps[0];
1266 	/* because usb and ep's status already been set in ch9setaddress() */
1267 
1268 	switch (udc->ep0_state) {
1269 	case DATA_STATE_XMIT:
1270 		done(ep, req, 0);
1271 		/* receive status phase */
1272 		if (ep0_prime_status(udc, USB_DIR_OUT))
1273 			qe_ep0_stall(udc);
1274 		break;
1275 
1276 	case DATA_STATE_NEED_ZLP:
1277 		done(ep, req, 0);
1278 		udc->ep0_state = WAIT_FOR_SETUP;
1279 		break;
1280 
1281 	case DATA_STATE_RECV:
1282 		done(ep, req, 0);
1283 		/* send status phase */
1284 		if (ep0_prime_status(udc, USB_DIR_IN))
1285 			qe_ep0_stall(udc);
1286 		break;
1287 
1288 	case WAIT_FOR_OUT_STATUS:
1289 		done(ep, req, 0);
1290 		udc->ep0_state = WAIT_FOR_SETUP;
1291 		break;
1292 
1293 	case WAIT_FOR_SETUP:
1294 		dev_vdbg(udc->dev, "Unexpected interrupt\n");
1295 		break;
1296 
1297 	default:
1298 		qe_ep0_stall(udc);
1299 		break;
1300 	}
1301 }
1302 
ep0_txcomplete(struct qe_ep * ep,unsigned char restart)1303 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1304 {
1305 	struct qe_req *tx_req = NULL;
1306 	struct qe_frame *frame = ep->txframe;
1307 
1308 	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1309 		if (!restart)
1310 			ep->udc->ep0_state = WAIT_FOR_SETUP;
1311 		else
1312 			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1313 		return 0;
1314 	}
1315 
1316 	tx_req = ep->tx_req;
1317 	if (tx_req != NULL) {
1318 		if (!restart) {
1319 			int asent = ep->last;
1320 			ep->sent += asent;
1321 			ep->last -= asent;
1322 		} else {
1323 			ep->last = 0;
1324 		}
1325 
1326 		/* a request already were transmitted completely */
1327 		if ((ep->tx_req->req.length - ep->sent) <= 0) {
1328 			ep->tx_req->req.actual = (unsigned int)ep->sent;
1329 			ep0_req_complete(ep->udc, ep->tx_req);
1330 			ep->tx_req = NULL;
1331 			ep->last = 0;
1332 			ep->sent = 0;
1333 		}
1334 	} else {
1335 		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1336 	}
1337 
1338 	return 0;
1339 }
1340 
ep0_txframe_handle(struct qe_ep * ep)1341 static int ep0_txframe_handle(struct qe_ep *ep)
1342 {
1343 	/* if have error, transmit again */
1344 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1345 		qe_ep_flushtxfifo(ep);
1346 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1347 		if (frame_get_info(ep->txframe) & PID_DATA0)
1348 			ep->data01 = 0;
1349 		else
1350 			ep->data01 = 1;
1351 
1352 		ep0_txcomplete(ep, 1);
1353 	} else
1354 		ep0_txcomplete(ep, 0);
1355 
1356 	frame_create_tx(ep, ep->txframe);
1357 	return 0;
1358 }
1359 
qe_ep0_txconf(struct qe_ep * ep)1360 static int qe_ep0_txconf(struct qe_ep *ep)
1361 {
1362 	struct qe_bd __iomem *bd;
1363 	struct qe_frame *pframe;
1364 	u32 bdstatus;
1365 
1366 	bd = ep->c_txbd;
1367 	bdstatus = in_be32((u32 __iomem *)bd);
1368 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1369 		pframe = ep->txframe;
1370 
1371 		/* clear and recycle the BD */
1372 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1373 		out_be32(&bd->buf, 0);
1374 		if (bdstatus & T_W)
1375 			ep->c_txbd = ep->txbase;
1376 		else
1377 			ep->c_txbd++;
1378 
1379 		if (ep->c_txbd == ep->n_txbd) {
1380 			if (bdstatus & DEVICE_T_ERROR) {
1381 				frame_set_status(pframe, FRAME_ERROR);
1382 				if (bdstatus & T_TO)
1383 					pframe->status |= TX_ER_TIMEOUT;
1384 				if (bdstatus & T_UN)
1385 					pframe->status |= TX_ER_UNDERUN;
1386 			}
1387 			ep0_txframe_handle(ep);
1388 		}
1389 
1390 		bd = ep->c_txbd;
1391 		bdstatus = in_be32((u32 __iomem *)bd);
1392 	}
1393 
1394 	return 0;
1395 }
1396 
ep_txframe_handle(struct qe_ep * ep)1397 static int ep_txframe_handle(struct qe_ep *ep)
1398 {
1399 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1400 		qe_ep_flushtxfifo(ep);
1401 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1402 		if (frame_get_info(ep->txframe) & PID_DATA0)
1403 			ep->data01 = 0;
1404 		else
1405 			ep->data01 = 1;
1406 
1407 		txcomplete(ep, 1);
1408 	} else
1409 		txcomplete(ep, 0);
1410 
1411 	frame_create_tx(ep, ep->txframe); /* send the data */
1412 	return 0;
1413 }
1414 
1415 /* confirm the already trainsmited bd */
qe_ep_txconf(struct qe_ep * ep)1416 static int qe_ep_txconf(struct qe_ep *ep)
1417 {
1418 	struct qe_bd __iomem *bd;
1419 	struct qe_frame *pframe = NULL;
1420 	u32 bdstatus;
1421 	unsigned char breakonrxinterrupt = 0;
1422 
1423 	bd = ep->c_txbd;
1424 	bdstatus = in_be32((u32 __iomem *)bd);
1425 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1426 		pframe = ep->txframe;
1427 		if (bdstatus & DEVICE_T_ERROR) {
1428 			frame_set_status(pframe, FRAME_ERROR);
1429 			if (bdstatus & T_TO)
1430 				pframe->status |= TX_ER_TIMEOUT;
1431 			if (bdstatus & T_UN)
1432 				pframe->status |= TX_ER_UNDERUN;
1433 		}
1434 
1435 		/* clear and recycle the BD */
1436 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1437 		out_be32(&bd->buf, 0);
1438 		if (bdstatus & T_W)
1439 			ep->c_txbd = ep->txbase;
1440 		else
1441 			ep->c_txbd++;
1442 
1443 		/* handle the tx frame */
1444 		ep_txframe_handle(ep);
1445 		bd = ep->c_txbd;
1446 		bdstatus = in_be32((u32 __iomem *)bd);
1447 	}
1448 	if (breakonrxinterrupt)
1449 		return -EIO;
1450 	else
1451 		return 0;
1452 }
1453 
1454 /* Add a request in queue, and try to transmit a packet */
ep_req_send(struct qe_ep * ep,struct qe_req * req)1455 static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1456 {
1457 	int reval = 0;
1458 
1459 	if (ep->tx_req == NULL) {
1460 		ep->sent = 0;
1461 		ep->last = 0;
1462 		txcomplete(ep, 0); /* can gain a new tx_req */
1463 		reval = frame_create_tx(ep, ep->txframe);
1464 	}
1465 	return reval;
1466 }
1467 
1468 /* Maybe this is a good ideal */
ep_req_rx(struct qe_ep * ep,struct qe_req * req)1469 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1470 {
1471 	struct qe_udc *udc = ep->udc;
1472 	struct qe_frame *pframe = NULL;
1473 	struct qe_bd __iomem *bd;
1474 	u32 bdstatus, length;
1475 	u32 vaddr, fsize;
1476 	u8 *cp;
1477 	u8 finish_req = 0;
1478 	u8 framepid;
1479 
1480 	if (list_empty(&ep->queue)) {
1481 		dev_vdbg(udc->dev, "the req already finish!\n");
1482 		return 0;
1483 	}
1484 	pframe = ep->rxframe;
1485 
1486 	bd = ep->n_rxbd;
1487 	bdstatus = in_be32((u32 __iomem *)bd);
1488 	length = bdstatus & BD_LENGTH_MASK;
1489 
1490 	while (!(bdstatus & R_E) && length) {
1491 		if (finish_req)
1492 			break;
1493 		if ((bdstatus & R_F) && (bdstatus & R_L)
1494 					&& !(bdstatus & R_ERROR)) {
1495 			qe_frame_clean(pframe);
1496 			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1497 			frame_set_data(pframe, (u8 *)vaddr);
1498 			frame_set_length(pframe, (length - USB_CRC_SIZE));
1499 			frame_set_status(pframe, FRAME_OK);
1500 			switch (bdstatus & R_PID) {
1501 			case R_PID_DATA1:
1502 				frame_set_info(pframe, PID_DATA1); break;
1503 			default:
1504 				frame_set_info(pframe, PID_DATA0); break;
1505 			}
1506 			/* handle the rx frame */
1507 
1508 			if (frame_get_info(pframe) & PID_DATA1)
1509 				framepid = 0x1;
1510 			else
1511 				framepid = 0;
1512 
1513 			if (framepid != ep->data01) {
1514 				dev_vdbg(udc->dev, "the data01 error!\n");
1515 			} else {
1516 				fsize = frame_get_length(pframe);
1517 
1518 				cp = (u8 *)(req->req.buf) + req->req.actual;
1519 				if (cp) {
1520 					memcpy(cp, pframe->data, fsize);
1521 					req->req.actual += fsize;
1522 					if ((fsize < ep->ep.maxpacket)
1523 						|| (req->req.actual >=
1524 							req->req.length)) {
1525 						finish_req = 1;
1526 						done(ep, req, 0);
1527 						if (list_empty(&ep->queue))
1528 							qe_eprx_nack(ep);
1529 					}
1530 				}
1531 				qe_ep_toggledata01(ep);
1532 			}
1533 		} else {
1534 			dev_err(udc->dev, "The receive frame with error!\n");
1535 		}
1536 
1537 		/* note: don't clear the rxbd's buffer address *
1538 		 * only Clear the length */
1539 		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1540 		ep->has_data--;
1541 
1542 		/* Get next BD */
1543 		if (bdstatus & R_W)
1544 			bd = ep->rxbase;
1545 		else
1546 			bd++;
1547 
1548 		bdstatus = in_be32((u32 __iomem *)bd);
1549 		length = bdstatus & BD_LENGTH_MASK;
1550 	}
1551 
1552 	ep->n_rxbd = bd;
1553 	ep_recycle_rxbds(ep);
1554 
1555 	return 0;
1556 }
1557 
1558 /* only add the request in queue */
ep_req_receive(struct qe_ep * ep,struct qe_req * req)1559 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1560 {
1561 	if (ep->state == EP_STATE_NACK) {
1562 		if (ep->has_data <= 0) {
1563 			/* Enable rx and unmask rx interrupt */
1564 			qe_eprx_normal(ep);
1565 		} else {
1566 			/* Copy the exist BD data */
1567 			ep_req_rx(ep, req);
1568 		}
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 /********************************************************************
1575 	Internal Used Function End
1576 ********************************************************************/
1577 
1578 /*-----------------------------------------------------------------------
1579 	Endpoint Management Functions For Gadget
1580  -----------------------------------------------------------------------*/
qe_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1581 static int qe_ep_enable(struct usb_ep *_ep,
1582 			 const struct usb_endpoint_descriptor *desc)
1583 {
1584 	struct qe_udc *udc;
1585 	struct qe_ep *ep;
1586 	int retval = 0;
1587 	unsigned char epnum;
1588 
1589 	ep = container_of(_ep, struct qe_ep, ep);
1590 
1591 	/* catch various bogus parameters */
1592 	if (!_ep || !desc || _ep->name == ep_name[0] ||
1593 			(desc->bDescriptorType != USB_DT_ENDPOINT))
1594 		return -EINVAL;
1595 
1596 	udc = ep->udc;
1597 	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1598 		return -ESHUTDOWN;
1599 
1600 	epnum = (u8)desc->bEndpointAddress & 0xF;
1601 
1602 	retval = qe_ep_init(udc, epnum, desc);
1603 	if (retval != 0) {
1604 		cpm_muram_free(cpm_muram_offset(ep->rxbase));
1605 		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1606 		return -EINVAL;
1607 	}
1608 	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1609 	return 0;
1610 }
1611 
qe_ep_disable(struct usb_ep * _ep)1612 static int qe_ep_disable(struct usb_ep *_ep)
1613 {
1614 	struct qe_udc *udc;
1615 	struct qe_ep *ep;
1616 	unsigned long flags;
1617 	unsigned int size;
1618 
1619 	ep = container_of(_ep, struct qe_ep, ep);
1620 	udc = ep->udc;
1621 
1622 	if (!_ep || !ep->ep.desc) {
1623 		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1624 		return -EINVAL;
1625 	}
1626 
1627 	spin_lock_irqsave(&udc->lock, flags);
1628 	/* Nuke all pending requests (does flush) */
1629 	nuke(ep, -ESHUTDOWN);
1630 	ep->ep.desc = NULL;
1631 	ep->stopped = 1;
1632 	ep->tx_req = NULL;
1633 	qe_ep_reset(udc, ep->epnum);
1634 	spin_unlock_irqrestore(&udc->lock, flags);
1635 
1636 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
1637 
1638 	if (ep->dir == USB_DIR_OUT)
1639 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1640 				(USB_BDRING_LEN_RX + 1);
1641 	else
1642 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1643 				(USB_BDRING_LEN + 1);
1644 
1645 	if (ep->dir != USB_DIR_IN) {
1646 		kfree(ep->rxframe);
1647 		if (ep->rxbufmap) {
1648 			dma_unmap_single(udc->gadget.dev.parent,
1649 					ep->rxbuf_d, size,
1650 					DMA_FROM_DEVICE);
1651 			ep->rxbuf_d = DMA_ADDR_INVALID;
1652 		} else {
1653 			dma_sync_single_for_cpu(
1654 					udc->gadget.dev.parent,
1655 					ep->rxbuf_d, size,
1656 					DMA_FROM_DEVICE);
1657 		}
1658 		kfree(ep->rxbuffer);
1659 	}
1660 
1661 	if (ep->dir != USB_DIR_OUT)
1662 		kfree(ep->txframe);
1663 
1664 	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1665 	return 0;
1666 }
1667 
qe_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)1668 static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
1669 {
1670 	struct qe_req *req;
1671 
1672 	req = kzalloc(sizeof(*req), gfp_flags);
1673 	if (!req)
1674 		return NULL;
1675 
1676 	req->req.dma = DMA_ADDR_INVALID;
1677 
1678 	INIT_LIST_HEAD(&req->queue);
1679 
1680 	return &req->req;
1681 }
1682 
qe_free_request(struct usb_ep * _ep,struct usb_request * _req)1683 static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1684 {
1685 	struct qe_req *req;
1686 
1687 	req = container_of(_req, struct qe_req, req);
1688 
1689 	if (_req)
1690 		kfree(req);
1691 }
1692 
__qe_ep_queue(struct usb_ep * _ep,struct usb_request * _req)1693 static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1694 {
1695 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1696 	struct qe_req *req = container_of(_req, struct qe_req, req);
1697 	struct qe_udc *udc;
1698 	int reval;
1699 
1700 	udc = ep->udc;
1701 	/* catch various bogus parameters */
1702 	if (!_req || !req->req.complete || !req->req.buf
1703 			|| !list_empty(&req->queue)) {
1704 		dev_dbg(udc->dev, "bad params\n");
1705 		return -EINVAL;
1706 	}
1707 	if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1708 		dev_dbg(udc->dev, "bad ep\n");
1709 		return -EINVAL;
1710 	}
1711 
1712 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1713 		return -ESHUTDOWN;
1714 
1715 	req->ep = ep;
1716 
1717 	/* map virtual address to hardware */
1718 	if (req->req.dma == DMA_ADDR_INVALID) {
1719 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1720 					req->req.buf,
1721 					req->req.length,
1722 					ep_is_in(ep)
1723 					? DMA_TO_DEVICE :
1724 					DMA_FROM_DEVICE);
1725 		req->mapped = 1;
1726 	} else {
1727 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1728 					req->req.dma, req->req.length,
1729 					ep_is_in(ep)
1730 					? DMA_TO_DEVICE :
1731 					DMA_FROM_DEVICE);
1732 		req->mapped = 0;
1733 	}
1734 
1735 	req->req.status = -EINPROGRESS;
1736 	req->req.actual = 0;
1737 
1738 	list_add_tail(&req->queue, &ep->queue);
1739 	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1740 			ep->name, req->req.length);
1741 
1742 	/* push the request to device */
1743 	if (ep_is_in(ep))
1744 		reval = ep_req_send(ep, req);
1745 
1746 	/* EP0 */
1747 	if (ep_index(ep) == 0 && req->req.length > 0) {
1748 		if (ep_is_in(ep))
1749 			udc->ep0_state = DATA_STATE_XMIT;
1750 		else
1751 			udc->ep0_state = DATA_STATE_RECV;
1752 	}
1753 
1754 	if (ep->dir == USB_DIR_OUT)
1755 		reval = ep_req_receive(ep, req);
1756 
1757 	return 0;
1758 }
1759 
1760 /* queues (submits) an I/O request to an endpoint */
qe_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1761 static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1762 		       gfp_t gfp_flags)
1763 {
1764 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1765 	struct qe_udc *udc = ep->udc;
1766 	unsigned long flags;
1767 	int ret;
1768 
1769 	spin_lock_irqsave(&udc->lock, flags);
1770 	ret = __qe_ep_queue(_ep, _req);
1771 	spin_unlock_irqrestore(&udc->lock, flags);
1772 	return ret;
1773 }
1774 
1775 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
qe_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)1776 static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1777 {
1778 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1779 	struct qe_req *req = NULL;
1780 	struct qe_req *iter;
1781 	unsigned long flags;
1782 
1783 	if (!_ep || !_req)
1784 		return -EINVAL;
1785 
1786 	spin_lock_irqsave(&ep->udc->lock, flags);
1787 
1788 	/* make sure it's actually queued on this endpoint */
1789 	list_for_each_entry(iter, &ep->queue, queue) {
1790 		if (&iter->req != _req)
1791 			continue;
1792 		req = iter;
1793 		break;
1794 	}
1795 
1796 	if (!req) {
1797 		spin_unlock_irqrestore(&ep->udc->lock, flags);
1798 		return -EINVAL;
1799 	}
1800 
1801 	done(ep, req, -ECONNRESET);
1802 
1803 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1804 	return 0;
1805 }
1806 
1807 /*-----------------------------------------------------------------
1808  * modify the endpoint halt feature
1809  * @ep: the non-isochronous endpoint being stalled
1810  * @value: 1--set halt  0--clear halt
1811  * Returns zero, or a negative error code.
1812 *----------------------------------------------------------------*/
qe_ep_set_halt(struct usb_ep * _ep,int value)1813 static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1814 {
1815 	struct qe_ep *ep;
1816 	unsigned long flags;
1817 	int status = -EOPNOTSUPP;
1818 	struct qe_udc *udc;
1819 
1820 	ep = container_of(_ep, struct qe_ep, ep);
1821 	if (!_ep || !ep->ep.desc) {
1822 		status = -EINVAL;
1823 		goto out;
1824 	}
1825 
1826 	udc = ep->udc;
1827 	/* Attempt to halt IN ep will fail if any transfer requests
1828 	 * are still queue */
1829 	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1830 		status = -EAGAIN;
1831 		goto out;
1832 	}
1833 
1834 	status = 0;
1835 	spin_lock_irqsave(&ep->udc->lock, flags);
1836 	qe_eptx_stall_change(ep, value);
1837 	qe_eprx_stall_change(ep, value);
1838 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1839 
1840 	if (ep->epnum == 0) {
1841 		udc->ep0_state = WAIT_FOR_SETUP;
1842 		udc->ep0_dir = 0;
1843 	}
1844 
1845 	/* set data toggle to DATA0 on clear halt */
1846 	if (value == 0)
1847 		ep->data01 = 0;
1848 out:
1849 	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1850 			value ?  "set" : "clear", status);
1851 
1852 	return status;
1853 }
1854 
1855 static const struct usb_ep_ops qe_ep_ops = {
1856 	.enable = qe_ep_enable,
1857 	.disable = qe_ep_disable,
1858 
1859 	.alloc_request = qe_alloc_request,
1860 	.free_request = qe_free_request,
1861 
1862 	.queue = qe_ep_queue,
1863 	.dequeue = qe_ep_dequeue,
1864 
1865 	.set_halt = qe_ep_set_halt,
1866 };
1867 
1868 /*------------------------------------------------------------------------
1869 	Gadget Driver Layer Operations
1870  ------------------------------------------------------------------------*/
1871 
1872 /* Get the current frame number */
qe_get_frame(struct usb_gadget * gadget)1873 static int qe_get_frame(struct usb_gadget *gadget)
1874 {
1875 	struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1876 	u16 tmp;
1877 
1878 	tmp = in_be16(&udc->usb_param->frame_n);
1879 	if (tmp & 0x8000)
1880 		return tmp & 0x07ff;
1881 	return -EINVAL;
1882 }
1883 
1884 static int fsl_qe_start(struct usb_gadget *gadget,
1885 		struct usb_gadget_driver *driver);
1886 static int fsl_qe_stop(struct usb_gadget *gadget);
1887 
1888 /* defined in usb_gadget.h */
1889 static const struct usb_gadget_ops qe_gadget_ops = {
1890 	.get_frame = qe_get_frame,
1891 	.udc_start = fsl_qe_start,
1892 	.udc_stop = fsl_qe_stop,
1893 };
1894 
1895 /*-------------------------------------------------------------------------
1896 	USB ep0 Setup process in BUS Enumeration
1897  -------------------------------------------------------------------------*/
udc_reset_ep_queue(struct qe_udc * udc,u8 pipe)1898 static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1899 {
1900 	struct qe_ep *ep = &udc->eps[pipe];
1901 
1902 	nuke(ep, -ECONNRESET);
1903 	ep->tx_req = NULL;
1904 	return 0;
1905 }
1906 
reset_queues(struct qe_udc * udc)1907 static int reset_queues(struct qe_udc *udc)
1908 {
1909 	u8 pipe;
1910 
1911 	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1912 		udc_reset_ep_queue(udc, pipe);
1913 
1914 	/* report disconnect; the driver is already quiesced */
1915 	spin_unlock(&udc->lock);
1916 	usb_gadget_udc_reset(&udc->gadget, udc->driver);
1917 	spin_lock(&udc->lock);
1918 
1919 	return 0;
1920 }
1921 
ch9setaddress(struct qe_udc * udc,u16 value,u16 index,u16 length)1922 static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1923 			u16 length)
1924 {
1925 	/* Save the new address to device struct */
1926 	udc->device_address = (u8) value;
1927 	/* Update usb state */
1928 	udc->usb_state = USB_STATE_ADDRESS;
1929 
1930 	/* Status phase , send a ZLP */
1931 	if (ep0_prime_status(udc, USB_DIR_IN))
1932 		qe_ep0_stall(udc);
1933 }
1934 
ownercomplete(struct usb_ep * _ep,struct usb_request * _req)1935 static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1936 {
1937 	struct qe_req *req = container_of(_req, struct qe_req, req);
1938 
1939 	req->req.buf = NULL;
1940 	kfree(req);
1941 }
1942 
ch9getstatus(struct qe_udc * udc,u8 request_type,u16 value,u16 index,u16 length)1943 static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1944 			u16 index, u16 length)
1945 {
1946 	u16 usb_status = 0;
1947 	struct qe_req *req;
1948 	struct qe_ep *ep;
1949 	int status = 0;
1950 
1951 	ep = &udc->eps[0];
1952 	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1953 		/* Get device status */
1954 		usb_status = 1 << USB_DEVICE_SELF_POWERED;
1955 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1956 		/* Get interface status */
1957 		/* We don't have interface information in udc driver */
1958 		usb_status = 0;
1959 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1960 		/* Get endpoint status */
1961 		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1962 		if (pipe >= USB_MAX_ENDPOINTS)
1963 			goto stall;
1964 		struct qe_ep *target_ep = &udc->eps[pipe];
1965 		u16 usep;
1966 
1967 		/* stall if endpoint doesn't exist */
1968 		if (!target_ep->ep.desc)
1969 			goto stall;
1970 
1971 		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1972 		if (index & USB_DIR_IN) {
1973 			if (target_ep->dir != USB_DIR_IN)
1974 				goto stall;
1975 			if ((usep & USB_THS_MASK) == USB_THS_STALL)
1976 				usb_status = 1 << USB_ENDPOINT_HALT;
1977 		} else {
1978 			if (target_ep->dir != USB_DIR_OUT)
1979 				goto stall;
1980 			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1981 				usb_status = 1 << USB_ENDPOINT_HALT;
1982 		}
1983 	}
1984 
1985 	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1986 					struct qe_req, req);
1987 	req->req.length = 2;
1988 	req->req.buf = udc->statusbuf;
1989 	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
1990 	req->req.status = -EINPROGRESS;
1991 	req->req.actual = 0;
1992 	req->req.complete = ownercomplete;
1993 
1994 	udc->ep0_dir = USB_DIR_IN;
1995 
1996 	/* data phase */
1997 	status = __qe_ep_queue(&ep->ep, &req->req);
1998 
1999 	if (status == 0)
2000 		return;
2001 stall:
2002 	dev_err(udc->dev, "Can't respond to getstatus request \n");
2003 	qe_ep0_stall(udc);
2004 }
2005 
2006 /* only handle the setup request, suppose the device in normal status */
setup_received_handle(struct qe_udc * udc,struct usb_ctrlrequest * setup)2007 static void setup_received_handle(struct qe_udc *udc,
2008 				struct usb_ctrlrequest *setup)
2009 {
2010 	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2011 	u16 wValue = le16_to_cpu(setup->wValue);
2012 	u16 wIndex = le16_to_cpu(setup->wIndex);
2013 	u16 wLength = le16_to_cpu(setup->wLength);
2014 
2015 	/* clear the previous request in the ep0 */
2016 	udc_reset_ep_queue(udc, 0);
2017 
2018 	if (setup->bRequestType & USB_DIR_IN)
2019 		udc->ep0_dir = USB_DIR_IN;
2020 	else
2021 		udc->ep0_dir = USB_DIR_OUT;
2022 
2023 	switch (setup->bRequest) {
2024 	case USB_REQ_GET_STATUS:
2025 		/* Data+Status phase form udc */
2026 		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2027 					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2028 			break;
2029 		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2030 					wLength);
2031 		return;
2032 
2033 	case USB_REQ_SET_ADDRESS:
2034 		/* Status phase from udc */
2035 		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2036 						USB_RECIP_DEVICE))
2037 			break;
2038 		ch9setaddress(udc, wValue, wIndex, wLength);
2039 		return;
2040 
2041 	case USB_REQ_CLEAR_FEATURE:
2042 	case USB_REQ_SET_FEATURE:
2043 		/* Requests with no data phase, status phase from udc */
2044 		if ((setup->bRequestType & USB_TYPE_MASK)
2045 					!= USB_TYPE_STANDARD)
2046 			break;
2047 
2048 		if ((setup->bRequestType & USB_RECIP_MASK)
2049 				== USB_RECIP_ENDPOINT) {
2050 			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2051 			struct qe_ep *ep;
2052 
2053 			if (wValue != 0 || wLength != 0
2054 				|| pipe >= USB_MAX_ENDPOINTS)
2055 				break;
2056 			ep = &udc->eps[pipe];
2057 
2058 			spin_unlock(&udc->lock);
2059 			qe_ep_set_halt(&ep->ep,
2060 					(setup->bRequest == USB_REQ_SET_FEATURE)
2061 						? 1 : 0);
2062 			spin_lock(&udc->lock);
2063 		}
2064 
2065 		ep0_prime_status(udc, USB_DIR_IN);
2066 
2067 		return;
2068 
2069 	default:
2070 		break;
2071 	}
2072 
2073 	if (wLength) {
2074 		/* Data phase from gadget, status phase from udc */
2075 		if (setup->bRequestType & USB_DIR_IN) {
2076 			udc->ep0_state = DATA_STATE_XMIT;
2077 			udc->ep0_dir = USB_DIR_IN;
2078 		} else {
2079 			udc->ep0_state = DATA_STATE_RECV;
2080 			udc->ep0_dir = USB_DIR_OUT;
2081 		}
2082 		spin_unlock(&udc->lock);
2083 		if (udc->driver->setup(&udc->gadget,
2084 					&udc->local_setup_buff) < 0)
2085 			qe_ep0_stall(udc);
2086 		spin_lock(&udc->lock);
2087 	} else {
2088 		/* No data phase, IN status from gadget */
2089 		udc->ep0_dir = USB_DIR_IN;
2090 		spin_unlock(&udc->lock);
2091 		if (udc->driver->setup(&udc->gadget,
2092 					&udc->local_setup_buff) < 0)
2093 			qe_ep0_stall(udc);
2094 		spin_lock(&udc->lock);
2095 		udc->ep0_state = DATA_STATE_NEED_ZLP;
2096 	}
2097 }
2098 
2099 /*-------------------------------------------------------------------------
2100 	USB Interrupt handlers
2101  -------------------------------------------------------------------------*/
suspend_irq(struct qe_udc * udc)2102 static void suspend_irq(struct qe_udc *udc)
2103 {
2104 	udc->resume_state = udc->usb_state;
2105 	udc->usb_state = USB_STATE_SUSPENDED;
2106 
2107 	/* report suspend to the driver ,serial.c not support this*/
2108 	if (udc->driver->suspend)
2109 		udc->driver->suspend(&udc->gadget);
2110 }
2111 
resume_irq(struct qe_udc * udc)2112 static void resume_irq(struct qe_udc *udc)
2113 {
2114 	udc->usb_state = udc->resume_state;
2115 	udc->resume_state = 0;
2116 
2117 	/* report resume to the driver , serial.c not support this*/
2118 	if (udc->driver->resume)
2119 		udc->driver->resume(&udc->gadget);
2120 }
2121 
idle_irq(struct qe_udc * udc)2122 static void idle_irq(struct qe_udc *udc)
2123 {
2124 	u8 usbs;
2125 
2126 	usbs = in_8(&udc->usb_regs->usb_usbs);
2127 	if (usbs & USB_IDLE_STATUS_MASK) {
2128 		if ((udc->usb_state) != USB_STATE_SUSPENDED)
2129 			suspend_irq(udc);
2130 	} else {
2131 		if (udc->usb_state == USB_STATE_SUSPENDED)
2132 			resume_irq(udc);
2133 	}
2134 }
2135 
reset_irq(struct qe_udc * udc)2136 static int reset_irq(struct qe_udc *udc)
2137 {
2138 	unsigned char i;
2139 
2140 	if (udc->usb_state == USB_STATE_DEFAULT)
2141 		return 0;
2142 
2143 	qe_usb_disable(udc);
2144 	out_8(&udc->usb_regs->usb_usadr, 0);
2145 
2146 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2147 		if (udc->eps[i].init)
2148 			qe_ep_reset(udc, i);
2149 	}
2150 
2151 	reset_queues(udc);
2152 	udc->usb_state = USB_STATE_DEFAULT;
2153 	udc->ep0_state = WAIT_FOR_SETUP;
2154 	udc->ep0_dir = USB_DIR_OUT;
2155 	qe_usb_enable(udc);
2156 	return 0;
2157 }
2158 
bsy_irq(struct qe_udc * udc)2159 static int bsy_irq(struct qe_udc *udc)
2160 {
2161 	return 0;
2162 }
2163 
txe_irq(struct qe_udc * udc)2164 static int txe_irq(struct qe_udc *udc)
2165 {
2166 	return 0;
2167 }
2168 
2169 /* ep0 tx interrupt also in here */
tx_irq(struct qe_udc * udc)2170 static int tx_irq(struct qe_udc *udc)
2171 {
2172 	struct qe_ep *ep;
2173 	struct qe_bd __iomem *bd;
2174 	int i, res = 0;
2175 
2176 	if ((udc->usb_state == USB_STATE_ADDRESS)
2177 		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
2178 		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2179 
2180 	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2181 		ep = &udc->eps[i];
2182 		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2183 			bd = ep->c_txbd;
2184 			if (!(in_be32((u32 __iomem *)bd) & T_R)
2185 						&& (in_be32(&bd->buf))) {
2186 				/* confirm the transmitted bd */
2187 				if (ep->epnum == 0)
2188 					res = qe_ep0_txconf(ep);
2189 				else
2190 					res = qe_ep_txconf(ep);
2191 			}
2192 		}
2193 	}
2194 	return res;
2195 }
2196 
2197 
2198 /* setup packect's rx is handle in the function too */
rx_irq(struct qe_udc * udc)2199 static void rx_irq(struct qe_udc *udc)
2200 {
2201 	struct qe_ep *ep;
2202 	struct qe_bd __iomem *bd;
2203 	int i;
2204 
2205 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2206 		ep = &udc->eps[i];
2207 		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2208 			bd = ep->n_rxbd;
2209 			if (!(in_be32((u32 __iomem *)bd) & R_E)
2210 						&& (in_be32(&bd->buf))) {
2211 				if (ep->epnum == 0) {
2212 					qe_ep0_rx(udc);
2213 				} else {
2214 					/*non-setup package receive*/
2215 					qe_ep_rx(ep);
2216 				}
2217 			}
2218 		}
2219 	}
2220 }
2221 
qe_udc_irq(int irq,void * _udc)2222 static irqreturn_t qe_udc_irq(int irq, void *_udc)
2223 {
2224 	struct qe_udc *udc = (struct qe_udc *)_udc;
2225 	u16 irq_src;
2226 	irqreturn_t status = IRQ_NONE;
2227 	unsigned long flags;
2228 
2229 	spin_lock_irqsave(&udc->lock, flags);
2230 
2231 	irq_src = in_be16(&udc->usb_regs->usb_usber) &
2232 		in_be16(&udc->usb_regs->usb_usbmr);
2233 	/* Clear notification bits */
2234 	out_be16(&udc->usb_regs->usb_usber, irq_src);
2235 	/* USB Interrupt */
2236 	if (irq_src & USB_E_IDLE_MASK) {
2237 		idle_irq(udc);
2238 		irq_src &= ~USB_E_IDLE_MASK;
2239 		status = IRQ_HANDLED;
2240 	}
2241 
2242 	if (irq_src & USB_E_TXB_MASK) {
2243 		tx_irq(udc);
2244 		irq_src &= ~USB_E_TXB_MASK;
2245 		status = IRQ_HANDLED;
2246 	}
2247 
2248 	if (irq_src & USB_E_RXB_MASK) {
2249 		rx_irq(udc);
2250 		irq_src &= ~USB_E_RXB_MASK;
2251 		status = IRQ_HANDLED;
2252 	}
2253 
2254 	if (irq_src & USB_E_RESET_MASK) {
2255 		reset_irq(udc);
2256 		irq_src &= ~USB_E_RESET_MASK;
2257 		status = IRQ_HANDLED;
2258 	}
2259 
2260 	if (irq_src & USB_E_BSY_MASK) {
2261 		bsy_irq(udc);
2262 		irq_src &= ~USB_E_BSY_MASK;
2263 		status = IRQ_HANDLED;
2264 	}
2265 
2266 	if (irq_src & USB_E_TXE_MASK) {
2267 		txe_irq(udc);
2268 		irq_src &= ~USB_E_TXE_MASK;
2269 		status = IRQ_HANDLED;
2270 	}
2271 
2272 	spin_unlock_irqrestore(&udc->lock, flags);
2273 
2274 	return status;
2275 }
2276 
2277 /*-------------------------------------------------------------------------
2278 	Gadget driver probe and unregister.
2279  --------------------------------------------------------------------------*/
fsl_qe_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)2280 static int fsl_qe_start(struct usb_gadget *gadget,
2281 		struct usb_gadget_driver *driver)
2282 {
2283 	struct qe_udc *udc;
2284 	unsigned long flags;
2285 
2286 	udc = container_of(gadget, struct qe_udc, gadget);
2287 	/* lock is needed but whether should use this lock or another */
2288 	spin_lock_irqsave(&udc->lock, flags);
2289 
2290 	/* hook up the driver */
2291 	udc->driver = driver;
2292 	udc->gadget.speed = driver->max_speed;
2293 
2294 	/* Enable IRQ reg and Set usbcmd reg EN bit */
2295 	qe_usb_enable(udc);
2296 
2297 	out_be16(&udc->usb_regs->usb_usber, 0xffff);
2298 	out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2299 	udc->usb_state = USB_STATE_ATTACHED;
2300 	udc->ep0_state = WAIT_FOR_SETUP;
2301 	udc->ep0_dir = USB_DIR_OUT;
2302 	spin_unlock_irqrestore(&udc->lock, flags);
2303 
2304 	return 0;
2305 }
2306 
fsl_qe_stop(struct usb_gadget * gadget)2307 static int fsl_qe_stop(struct usb_gadget *gadget)
2308 {
2309 	struct qe_udc *udc;
2310 	struct qe_ep *loop_ep;
2311 	unsigned long flags;
2312 
2313 	udc = container_of(gadget, struct qe_udc, gadget);
2314 	/* stop usb controller, disable intr */
2315 	qe_usb_disable(udc);
2316 
2317 	/* in fact, no needed */
2318 	udc->usb_state = USB_STATE_ATTACHED;
2319 	udc->ep0_state = WAIT_FOR_SETUP;
2320 	udc->ep0_dir = 0;
2321 
2322 	/* stand operation */
2323 	spin_lock_irqsave(&udc->lock, flags);
2324 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2325 	nuke(&udc->eps[0], -ESHUTDOWN);
2326 	list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2327 		nuke(loop_ep, -ESHUTDOWN);
2328 	spin_unlock_irqrestore(&udc->lock, flags);
2329 
2330 	udc->driver = NULL;
2331 
2332 	return 0;
2333 }
2334 
2335 /* udc structure's alloc and setup, include ep-param alloc */
qe_udc_config(struct platform_device * ofdev)2336 static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2337 {
2338 	struct qe_udc *udc;
2339 	struct device_node *np = ofdev->dev.of_node;
2340 	unsigned long tmp_addr = 0;
2341 	struct usb_device_para __iomem *usbpram;
2342 	unsigned int i;
2343 	u64 size;
2344 	u32 offset;
2345 
2346 	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2347 	if (!udc)
2348 		goto cleanup;
2349 
2350 	udc->dev = &ofdev->dev;
2351 
2352 	/* get default address of usb parameter in MURAM from device tree */
2353 	offset = *of_get_address(np, 1, &size, NULL);
2354 	udc->usb_param = cpm_muram_addr(offset);
2355 	memset_io(udc->usb_param, 0, size);
2356 
2357 	usbpram = udc->usb_param;
2358 	out_be16(&usbpram->frame_n, 0);
2359 	out_be32(&usbpram->rstate, 0);
2360 
2361 	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2362 					sizeof(struct usb_ep_para)),
2363 					   USB_EP_PARA_ALIGNMENT);
2364 	if (IS_ERR_VALUE(tmp_addr))
2365 		goto cleanup;
2366 
2367 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2368 		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2369 		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2370 		tmp_addr += 32;
2371 	}
2372 
2373 	memset_io(udc->ep_param[0], 0,
2374 			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2375 
2376 	udc->resume_state = USB_STATE_NOTATTACHED;
2377 	udc->usb_state = USB_STATE_POWERED;
2378 	udc->ep0_dir = 0;
2379 
2380 	spin_lock_init(&udc->lock);
2381 	return udc;
2382 
2383 cleanup:
2384 	kfree(udc);
2385 	return NULL;
2386 }
2387 
2388 /* USB Controller register init */
qe_udc_reg_init(struct qe_udc * udc)2389 static int qe_udc_reg_init(struct qe_udc *udc)
2390 {
2391 	struct usb_ctlr __iomem *qe_usbregs;
2392 	qe_usbregs = udc->usb_regs;
2393 
2394 	/* Spec says that we must enable the USB controller to change mode. */
2395 	out_8(&qe_usbregs->usb_usmod, 0x01);
2396 	/* Mode changed, now disable it, since muram isn't initialized yet. */
2397 	out_8(&qe_usbregs->usb_usmod, 0x00);
2398 
2399 	/* Initialize the rest. */
2400 	out_be16(&qe_usbregs->usb_usbmr, 0);
2401 	out_8(&qe_usbregs->usb_uscom, 0);
2402 	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2403 
2404 	return 0;
2405 }
2406 
qe_ep_config(struct qe_udc * udc,unsigned char pipe_num)2407 static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2408 {
2409 	struct qe_ep *ep = &udc->eps[pipe_num];
2410 
2411 	ep->udc = udc;
2412 	strcpy(ep->name, ep_name[pipe_num]);
2413 	ep->ep.name = ep_name[pipe_num];
2414 
2415 	if (pipe_num == 0) {
2416 		ep->ep.caps.type_control = true;
2417 	} else {
2418 		ep->ep.caps.type_iso = true;
2419 		ep->ep.caps.type_bulk = true;
2420 		ep->ep.caps.type_int = true;
2421 	}
2422 
2423 	ep->ep.caps.dir_in = true;
2424 	ep->ep.caps.dir_out = true;
2425 
2426 	ep->ep.ops = &qe_ep_ops;
2427 	ep->stopped = 1;
2428 	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2429 	ep->ep.desc = NULL;
2430 	ep->dir = 0xff;
2431 	ep->epnum = (u8)pipe_num;
2432 	ep->sent = 0;
2433 	ep->last = 0;
2434 	ep->init = 0;
2435 	ep->rxframe = NULL;
2436 	ep->txframe = NULL;
2437 	ep->tx_req = NULL;
2438 	ep->state = EP_STATE_IDLE;
2439 	ep->has_data = 0;
2440 
2441 	/* the queue lists any req for this ep */
2442 	INIT_LIST_HEAD(&ep->queue);
2443 
2444 	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
2445 	if (pipe_num != 0)
2446 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2447 
2448 	ep->gadget = &udc->gadget;
2449 
2450 	return 0;
2451 }
2452 
2453 /*-----------------------------------------------------------------------
2454  *	UDC device Driver operation functions				*
2455  *----------------------------------------------------------------------*/
qe_udc_release(struct device * dev)2456 static void qe_udc_release(struct device *dev)
2457 {
2458 	struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2459 	int i;
2460 
2461 	complete(udc->done);
2462 	cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2463 	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2464 		udc->ep_param[i] = NULL;
2465 
2466 	kfree(udc);
2467 }
2468 
2469 /* Driver probe functions */
2470 static const struct of_device_id qe_udc_match[];
qe_udc_probe(struct platform_device * ofdev)2471 static int qe_udc_probe(struct platform_device *ofdev)
2472 {
2473 	struct qe_udc *udc;
2474 	const struct of_device_id *match;
2475 	struct device_node *np = ofdev->dev.of_node;
2476 	struct qe_ep *ep;
2477 	unsigned int ret = 0;
2478 	unsigned int i;
2479 	const void *prop;
2480 
2481 	match = of_match_device(qe_udc_match, &ofdev->dev);
2482 	if (!match)
2483 		return -EINVAL;
2484 
2485 	prop = of_get_property(np, "mode", NULL);
2486 	if (!prop || strcmp(prop, "peripheral"))
2487 		return -ENODEV;
2488 
2489 	/* Initialize the udc structure including QH member and other member */
2490 	udc = qe_udc_config(ofdev);
2491 	if (!udc) {
2492 		dev_err(&ofdev->dev, "failed to initialize\n");
2493 		return -ENOMEM;
2494 	}
2495 
2496 	udc->soc_type = (unsigned long)match->data;
2497 	udc->usb_regs = of_iomap(np, 0);
2498 	if (!udc->usb_regs) {
2499 		ret = -ENOMEM;
2500 		goto err1;
2501 	}
2502 
2503 	/* initialize usb hw reg except for regs for EP,
2504 	 * leave usbintr reg untouched*/
2505 	qe_udc_reg_init(udc);
2506 
2507 	/* here comes the stand operations for probe
2508 	 * set the qe_udc->gadget.xxx */
2509 	udc->gadget.ops = &qe_gadget_ops;
2510 
2511 	/* gadget.ep0 is a pointer */
2512 	udc->gadget.ep0 = &udc->eps[0].ep;
2513 
2514 	INIT_LIST_HEAD(&udc->gadget.ep_list);
2515 
2516 	/* modify in register gadget process */
2517 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2518 
2519 	/* name: Identifies the controller hardware type. */
2520 	udc->gadget.name = driver_name;
2521 	udc->gadget.dev.parent = &ofdev->dev;
2522 
2523 	/* initialize qe_ep struct */
2524 	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2525 		/* because the ep type isn't decide here so
2526 		 * qe_ep_init() should be called in ep_enable() */
2527 
2528 		/* setup the qe_ep struct and link ep.ep.list
2529 		 * into gadget.ep_list */
2530 		qe_ep_config(udc, (unsigned char)i);
2531 	}
2532 
2533 	/* ep0 initialization in here */
2534 	ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2535 	if (ret)
2536 		goto err2;
2537 
2538 	/* create a buf for ZLP send, need to remain zeroed */
2539 	udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2540 	if (udc->nullbuf == NULL) {
2541 		ret = -ENOMEM;
2542 		goto err3;
2543 	}
2544 
2545 	/* buffer for data of get_status request */
2546 	udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2547 	if (udc->statusbuf == NULL) {
2548 		ret = -ENOMEM;
2549 		goto err3;
2550 	}
2551 
2552 	udc->nullp = virt_to_phys((void *)udc->nullbuf);
2553 	if (udc->nullp == DMA_ADDR_INVALID) {
2554 		udc->nullp = dma_map_single(
2555 					udc->gadget.dev.parent,
2556 					udc->nullbuf,
2557 					256,
2558 					DMA_TO_DEVICE);
2559 		udc->nullmap = 1;
2560 	} else {
2561 		dma_sync_single_for_device(udc->gadget.dev.parent,
2562 					udc->nullp, 256,
2563 					DMA_TO_DEVICE);
2564 	}
2565 
2566 	tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
2567 	/* request irq and disable DR  */
2568 	udc->usb_irq = irq_of_parse_and_map(np, 0);
2569 	if (!udc->usb_irq) {
2570 		ret = -EINVAL;
2571 		goto err_noirq;
2572 	}
2573 
2574 	ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2575 				driver_name, udc);
2576 	if (ret) {
2577 		dev_err(udc->dev, "cannot request irq %d err %d\n",
2578 				udc->usb_irq, ret);
2579 		goto err4;
2580 	}
2581 
2582 	ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2583 			qe_udc_release);
2584 	if (ret)
2585 		goto err5;
2586 
2587 	platform_set_drvdata(ofdev, udc);
2588 	dev_info(udc->dev,
2589 			"%s USB controller initialized as device\n",
2590 			(udc->soc_type == PORT_QE) ? "QE" : "CPM");
2591 	return 0;
2592 
2593 err5:
2594 	free_irq(udc->usb_irq, udc);
2595 err4:
2596 	irq_dispose_mapping(udc->usb_irq);
2597 err_noirq:
2598 	if (udc->nullmap) {
2599 		dma_unmap_single(udc->gadget.dev.parent,
2600 			udc->nullp, 256,
2601 				DMA_TO_DEVICE);
2602 			udc->nullp = DMA_ADDR_INVALID;
2603 	} else {
2604 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2605 			udc->nullp, 256,
2606 				DMA_TO_DEVICE);
2607 	}
2608 err3:
2609 	ep = &udc->eps[0];
2610 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2611 	kfree(ep->rxframe);
2612 	kfree(ep->rxbuffer);
2613 	kfree(ep->txframe);
2614 err2:
2615 	iounmap(udc->usb_regs);
2616 err1:
2617 	kfree(udc);
2618 	return ret;
2619 }
2620 
2621 #ifdef CONFIG_PM
qe_udc_suspend(struct platform_device * dev,pm_message_t state)2622 static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2623 {
2624 	return -ENOTSUPP;
2625 }
2626 
qe_udc_resume(struct platform_device * dev)2627 static int qe_udc_resume(struct platform_device *dev)
2628 {
2629 	return -ENOTSUPP;
2630 }
2631 #endif
2632 
qe_udc_remove(struct platform_device * ofdev)2633 static void qe_udc_remove(struct platform_device *ofdev)
2634 {
2635 	struct qe_udc *udc = platform_get_drvdata(ofdev);
2636 	struct qe_ep *ep;
2637 	unsigned int size;
2638 	DECLARE_COMPLETION_ONSTACK(done);
2639 
2640 	usb_del_gadget_udc(&udc->gadget);
2641 
2642 	udc->done = &done;
2643 	tasklet_disable(&udc->rx_tasklet);
2644 
2645 	if (udc->nullmap) {
2646 		dma_unmap_single(udc->gadget.dev.parent,
2647 			udc->nullp, 256,
2648 				DMA_TO_DEVICE);
2649 			udc->nullp = DMA_ADDR_INVALID;
2650 	} else {
2651 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2652 			udc->nullp, 256,
2653 				DMA_TO_DEVICE);
2654 	}
2655 
2656 	ep = &udc->eps[0];
2657 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2658 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2659 
2660 	kfree(ep->rxframe);
2661 	if (ep->rxbufmap) {
2662 		dma_unmap_single(udc->gadget.dev.parent,
2663 				ep->rxbuf_d, size,
2664 				DMA_FROM_DEVICE);
2665 		ep->rxbuf_d = DMA_ADDR_INVALID;
2666 	} else {
2667 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2668 				ep->rxbuf_d, size,
2669 				DMA_FROM_DEVICE);
2670 	}
2671 
2672 	kfree(ep->rxbuffer);
2673 	kfree(ep->txframe);
2674 
2675 	free_irq(udc->usb_irq, udc);
2676 	irq_dispose_mapping(udc->usb_irq);
2677 
2678 	tasklet_kill(&udc->rx_tasklet);
2679 
2680 	iounmap(udc->usb_regs);
2681 
2682 	/* wait for release() of gadget.dev to free udc */
2683 	wait_for_completion(&done);
2684 }
2685 
2686 /*-------------------------------------------------------------------------*/
2687 static const struct of_device_id qe_udc_match[] = {
2688 	{
2689 		.compatible = "fsl,mpc8323-qe-usb",
2690 		.data = (void *)PORT_QE,
2691 	},
2692 	{
2693 		.compatible = "fsl,mpc8360-qe-usb",
2694 		.data = (void *)PORT_QE,
2695 	},
2696 	{
2697 		.compatible = "fsl,mpc8272-cpm-usb",
2698 		.data = (void *)PORT_CPM,
2699 	},
2700 	{},
2701 };
2702 
2703 MODULE_DEVICE_TABLE(of, qe_udc_match);
2704 
2705 static struct platform_driver udc_driver = {
2706 	.driver = {
2707 		.name = driver_name,
2708 		.of_match_table = qe_udc_match,
2709 	},
2710 	.probe          = qe_udc_probe,
2711 	.remove_new     = qe_udc_remove,
2712 #ifdef CONFIG_PM
2713 	.suspend        = qe_udc_suspend,
2714 	.resume         = qe_udc_resume,
2715 #endif
2716 };
2717 
2718 module_platform_driver(udc_driver);
2719 
2720 MODULE_DESCRIPTION(DRIVER_DESC);
2721 MODULE_AUTHOR(DRIVER_AUTHOR);
2722 MODULE_LICENSE("GPL");
2723