1 /*
2  * MUSB OTG driver peripheral support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
26  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 #define __UBOOT__
37 #ifndef __UBOOT__
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/timer.h>
41 #include <linux/module.h>
42 #include <linux/smp.h>
43 #include <linux/spinlock.h>
44 #include <linux/delay.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
47 #else
48 #include <common.h>
49 #include <linux/usb/ch9.h>
50 #include "linux-compat.h"
51 #endif
52 
53 #include "musb_core.h"
54 
55 
56 /* MUSB PERIPHERAL status 3-mar-2006:
57  *
58  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
59  *   Minor glitches:
60  *
61  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
62  *       in one test run (operator error?)
63  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
64  *       to break when dma is enabled ... is something wrongly
65  *       clearing SENDSTALL?
66  *
67  * - Mass storage behaved ok when last tested.  Network traffic patterns
68  *   (with lots of short transfers etc) need retesting; they turn up the
69  *   worst cases of the DMA, since short packets are typical but are not
70  *   required.
71  *
72  * - TX/IN
73  *     + both pio and dma behave in with network and g_zero tests
74  *     + no cppi throughput issues other than no-hw-queueing
75  *     + failed with FLAT_REG (DaVinci)
76  *     + seems to behave with double buffering, PIO -and- CPPI
77  *     + with gadgetfs + AIO, requests got lost?
78  *
79  * - RX/OUT
80  *     + both pio and dma behave in with network and g_zero tests
81  *     + dma is slow in typical case (short_not_ok is clear)
82  *     + double buffering ok with PIO
83  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
84  *     + request lossage observed with gadgetfs
85  *
86  * - ISO not tested ... might work, but only weakly isochronous
87  *
88  * - Gadget driver disabling of softconnect during bind() is ignored; so
89  *   drivers can't hold off host requests until userspace is ready.
90  *   (Workaround:  they can turn it off later.)
91  *
92  * - PORTABILITY (assumes PIO works):
93  *     + DaVinci, basically works with cppi dma
94  *     + OMAP 2430, ditto with mentor dma
95  *     + TUSB 6010, platform-specific dma in the works
96  */
97 
98 /* ----------------------------------------------------------------------- */
99 
100 #define is_buffer_mapped(req) (is_dma_capable() && \
101 					(req->map_state != UN_MAPPED))
102 
103 #ifndef CONFIG_MUSB_PIO_ONLY
104 /* Maps the buffer to dma  */
105 
106 static inline void map_dma_buffer(struct musb_request *request,
107 			struct musb *musb, struct musb_ep *musb_ep)
108 {
109 	int compatible = true;
110 	struct dma_controller *dma = musb->dma_controller;
111 
112 	request->map_state = UN_MAPPED;
113 
114 	if (!is_dma_capable() || !musb_ep->dma)
115 		return;
116 
117 	/* Check if DMA engine can handle this request.
118 	 * DMA code must reject the USB request explicitly.
119 	 * Default behaviour is to map the request.
120 	 */
121 	if (dma->is_compatible)
122 		compatible = dma->is_compatible(musb_ep->dma,
123 				musb_ep->packet_sz, request->request.buf,
124 				request->request.length);
125 	if (!compatible)
126 		return;
127 
128 	if (request->request.dma == DMA_ADDR_INVALID) {
129 		request->request.dma = dma_map_single(
130 				musb->controller,
131 				request->request.buf,
132 				request->request.length,
133 				request->tx
134 					? DMA_TO_DEVICE
135 					: DMA_FROM_DEVICE);
136 		request->map_state = MUSB_MAPPED;
137 	} else {
138 		dma_sync_single_for_device(musb->controller,
139 			request->request.dma,
140 			request->request.length,
141 			request->tx
142 				? DMA_TO_DEVICE
143 				: DMA_FROM_DEVICE);
144 		request->map_state = PRE_MAPPED;
145 	}
146 }
147 
148 /* Unmap the buffer from dma and maps it back to cpu */
149 static inline void unmap_dma_buffer(struct musb_request *request,
150 				struct musb *musb)
151 {
152 	if (!is_buffer_mapped(request))
153 		return;
154 
155 	if (request->request.dma == DMA_ADDR_INVALID) {
156 		dev_vdbg(musb->controller,
157 				"not unmapping a never mapped buffer\n");
158 		return;
159 	}
160 	if (request->map_state == MUSB_MAPPED) {
161 		dma_unmap_single(musb->controller,
162 			request->request.dma,
163 			request->request.length,
164 			request->tx
165 				? DMA_TO_DEVICE
166 				: DMA_FROM_DEVICE);
167 		request->request.dma = DMA_ADDR_INVALID;
168 	} else { /* PRE_MAPPED */
169 		dma_sync_single_for_cpu(musb->controller,
170 			request->request.dma,
171 			request->request.length,
172 			request->tx
173 				? DMA_TO_DEVICE
174 				: DMA_FROM_DEVICE);
175 	}
176 	request->map_state = UN_MAPPED;
177 }
178 #else
179 static inline void map_dma_buffer(struct musb_request *request,
180 			struct musb *musb, struct musb_ep *musb_ep)
181 {
182 }
183 
184 static inline void unmap_dma_buffer(struct musb_request *request,
185 				struct musb *musb)
186 {
187 }
188 #endif
189 
190 /*
191  * Immediately complete a request.
192  *
193  * @param request the request to complete
194  * @param status the status to complete the request with
195  * Context: controller locked, IRQs blocked.
196  */
197 void musb_g_giveback(
198 	struct musb_ep		*ep,
199 	struct usb_request	*request,
200 	int			status)
201 __releases(ep->musb->lock)
202 __acquires(ep->musb->lock)
203 {
204 	struct musb_request	*req;
205 	struct musb		*musb;
206 	int			busy = ep->busy;
207 
208 	req = to_musb_request(request);
209 
210 	list_del(&req->list);
211 	if (req->request.status == -EINPROGRESS)
212 		req->request.status = status;
213 	musb = req->musb;
214 
215 	ep->busy = 1;
216 	spin_unlock(&musb->lock);
217 	unmap_dma_buffer(req, musb);
218 	if (request->status == 0)
219 		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
220 				ep->end_point.name, request,
221 				req->request.actual, req->request.length);
222 	else
223 		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
224 				ep->end_point.name, request,
225 				req->request.actual, req->request.length,
226 				request->status);
227 	req->request.complete(&req->ep->end_point, &req->request);
228 	spin_lock(&musb->lock);
229 	ep->busy = busy;
230 }
231 
232 /* ----------------------------------------------------------------------- */
233 
234 /*
235  * Abort requests queued to an endpoint using the status. Synchronous.
236  * caller locked controller and blocked irqs, and selected this ep.
237  */
238 static void nuke(struct musb_ep *ep, const int status)
239 {
240 	struct musb		*musb = ep->musb;
241 	struct musb_request	*req = NULL;
242 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
243 
244 	ep->busy = 1;
245 
246 	if (is_dma_capable() && ep->dma) {
247 		struct dma_controller	*c = ep->musb->dma_controller;
248 		int value;
249 
250 		if (ep->is_in) {
251 			/*
252 			 * The programming guide says that we must not clear
253 			 * the DMAMODE bit before DMAENAB, so we only
254 			 * clear it in the second write...
255 			 */
256 			musb_writew(epio, MUSB_TXCSR,
257 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
258 			musb_writew(epio, MUSB_TXCSR,
259 					0 | MUSB_TXCSR_FLUSHFIFO);
260 		} else {
261 			musb_writew(epio, MUSB_RXCSR,
262 					0 | MUSB_RXCSR_FLUSHFIFO);
263 			musb_writew(epio, MUSB_RXCSR,
264 					0 | MUSB_RXCSR_FLUSHFIFO);
265 		}
266 
267 		value = c->channel_abort(ep->dma);
268 		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
269 				ep->name, value);
270 		c->channel_release(ep->dma);
271 		ep->dma = NULL;
272 	}
273 
274 	while (!list_empty(&ep->req_list)) {
275 		req = list_first_entry(&ep->req_list, struct musb_request, list);
276 		musb_g_giveback(ep, &req->request, status);
277 	}
278 }
279 
280 /* ----------------------------------------------------------------------- */
281 
282 /* Data transfers - pure PIO, pure DMA, or mixed mode */
283 
284 /*
285  * This assumes the separate CPPI engine is responding to DMA requests
286  * from the usb core ... sequenced a bit differently from mentor dma.
287  */
288 
289 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
290 {
291 	if (can_bulk_split(musb, ep->type))
292 		return ep->hw_ep->max_packet_sz_tx;
293 	else
294 		return ep->packet_sz;
295 }
296 
297 
298 #ifdef CONFIG_USB_INVENTRA_DMA
299 
300 /* Peripheral tx (IN) using Mentor DMA works as follows:
301 	Only mode 0 is used for transfers <= wPktSize,
302 	mode 1 is used for larger transfers,
303 
304 	One of the following happens:
305 	- Host sends IN token which causes an endpoint interrupt
306 		-> TxAvail
307 			-> if DMA is currently busy, exit.
308 			-> if queue is non-empty, txstate().
309 
310 	- Request is queued by the gadget driver.
311 		-> if queue was previously empty, txstate()
312 
313 	txstate()
314 		-> start
315 		  /\	-> setup DMA
316 		  |     (data is transferred to the FIFO, then sent out when
317 		  |	IN token(s) are recd from Host.
318 		  |		-> DMA interrupt on completion
319 		  |		   calls TxAvail.
320 		  |		      -> stop DMA, ~DMAENAB,
321 		  |		      -> set TxPktRdy for last short pkt or zlp
322 		  |		      -> Complete Request
323 		  |		      -> Continue next request (call txstate)
324 		  |___________________________________|
325 
326  * Non-Mentor DMA engines can of course work differently, such as by
327  * upleveling from irq-per-packet to irq-per-buffer.
328  */
329 
330 #endif
331 
332 /*
333  * An endpoint is transmitting data. This can be called either from
334  * the IRQ routine or from ep.queue() to kickstart a request on an
335  * endpoint.
336  *
337  * Context: controller locked, IRQs blocked, endpoint selected
338  */
339 static void txstate(struct musb *musb, struct musb_request *req)
340 {
341 	u8			epnum = req->epnum;
342 	struct musb_ep		*musb_ep;
343 	void __iomem		*epio = musb->endpoints[epnum].regs;
344 	struct usb_request	*request;
345 	u16			fifo_count = 0, csr;
346 	int			use_dma = 0;
347 
348 	musb_ep = req->ep;
349 
350 	/* Check if EP is disabled */
351 	if (!musb_ep->desc) {
352 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
353 						musb_ep->end_point.name);
354 		return;
355 	}
356 
357 	/* we shouldn't get here while DMA is active ... but we do ... */
358 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
359 		dev_dbg(musb->controller, "dma pending...\n");
360 		return;
361 	}
362 
363 	/* read TXCSR before */
364 	csr = musb_readw(epio, MUSB_TXCSR);
365 
366 	request = &req->request;
367 	fifo_count = min(max_ep_writesize(musb, musb_ep),
368 			(int)(request->length - request->actual));
369 
370 	if (csr & MUSB_TXCSR_TXPKTRDY) {
371 		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
372 				musb_ep->end_point.name, csr);
373 		return;
374 	}
375 
376 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
377 		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
378 				musb_ep->end_point.name, csr);
379 		return;
380 	}
381 
382 	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
383 			epnum, musb_ep->packet_sz, fifo_count,
384 			csr);
385 
386 #ifndef	CONFIG_MUSB_PIO_ONLY
387 	if (is_buffer_mapped(req)) {
388 		struct dma_controller	*c = musb->dma_controller;
389 		size_t request_size;
390 
391 		/* setup DMA, then program endpoint CSR */
392 		request_size = min_t(size_t, request->length - request->actual,
393 					musb_ep->dma->max_len);
394 
395 		use_dma = (request->dma != DMA_ADDR_INVALID);
396 
397 		/* MUSB_TXCSR_P_ISO is still set correctly */
398 
399 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
400 		{
401 			if (request_size < musb_ep->packet_sz)
402 				musb_ep->dma->desired_mode = 0;
403 			else
404 				musb_ep->dma->desired_mode = 1;
405 
406 			use_dma = use_dma && c->channel_program(
407 					musb_ep->dma, musb_ep->packet_sz,
408 					musb_ep->dma->desired_mode,
409 					request->dma + request->actual, request_size);
410 			if (use_dma) {
411 				if (musb_ep->dma->desired_mode == 0) {
412 					/*
413 					 * We must not clear the DMAMODE bit
414 					 * before the DMAENAB bit -- and the
415 					 * latter doesn't always get cleared
416 					 * before we get here...
417 					 */
418 					csr &= ~(MUSB_TXCSR_AUTOSET
419 						| MUSB_TXCSR_DMAENAB);
420 					musb_writew(epio, MUSB_TXCSR, csr
421 						| MUSB_TXCSR_P_WZC_BITS);
422 					csr &= ~MUSB_TXCSR_DMAMODE;
423 					csr |= (MUSB_TXCSR_DMAENAB |
424 							MUSB_TXCSR_MODE);
425 					/* against programming guide */
426 				} else {
427 					csr |= (MUSB_TXCSR_DMAENAB
428 							| MUSB_TXCSR_DMAMODE
429 							| MUSB_TXCSR_MODE);
430 					if (!musb_ep->hb_mult)
431 						csr |= MUSB_TXCSR_AUTOSET;
432 				}
433 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
434 
435 				musb_writew(epio, MUSB_TXCSR, csr);
436 			}
437 		}
438 
439 #elif defined(CONFIG_USB_TI_CPPI_DMA)
440 		/* program endpoint CSR first, then setup DMA */
441 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
442 		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
443 		       MUSB_TXCSR_MODE;
444 		musb_writew(epio, MUSB_TXCSR,
445 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
446 				| csr);
447 
448 		/* ensure writebuffer is empty */
449 		csr = musb_readw(epio, MUSB_TXCSR);
450 
451 		/* NOTE host side sets DMAENAB later than this; both are
452 		 * OK since the transfer dma glue (between CPPI and Mentor
453 		 * fifos) just tells CPPI it could start.  Data only moves
454 		 * to the USB TX fifo when both fifos are ready.
455 		 */
456 
457 		/* "mode" is irrelevant here; handle terminating ZLPs like
458 		 * PIO does, since the hardware RNDIS mode seems unreliable
459 		 * except for the last-packet-is-already-short case.
460 		 */
461 		use_dma = use_dma && c->channel_program(
462 				musb_ep->dma, musb_ep->packet_sz,
463 				0,
464 				request->dma + request->actual,
465 				request_size);
466 		if (!use_dma) {
467 			c->channel_release(musb_ep->dma);
468 			musb_ep->dma = NULL;
469 			csr &= ~MUSB_TXCSR_DMAENAB;
470 			musb_writew(epio, MUSB_TXCSR, csr);
471 			/* invariant: prequest->buf is non-null */
472 		}
473 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
474 		use_dma = use_dma && c->channel_program(
475 				musb_ep->dma, musb_ep->packet_sz,
476 				request->zero,
477 				request->dma + request->actual,
478 				request_size);
479 #endif
480 	}
481 #endif
482 
483 	if (!use_dma) {
484 		/*
485 		 * Unmap the dma buffer back to cpu if dma channel
486 		 * programming fails
487 		 */
488 		unmap_dma_buffer(req, musb);
489 
490 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
491 				(u8 *) (request->buf + request->actual));
492 		request->actual += fifo_count;
493 		csr |= MUSB_TXCSR_TXPKTRDY;
494 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
495 		musb_writew(epio, MUSB_TXCSR, csr);
496 	}
497 
498 	/* host may already have the data when this message shows... */
499 	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
500 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
501 			request->actual, request->length,
502 			musb_readw(epio, MUSB_TXCSR),
503 			fifo_count,
504 			musb_readw(epio, MUSB_TXMAXP));
505 }
506 
507 /*
508  * FIFO state update (e.g. data ready).
509  * Called from IRQ,  with controller locked.
510  */
511 void musb_g_tx(struct musb *musb, u8 epnum)
512 {
513 	u16			csr;
514 	struct musb_request	*req;
515 	struct usb_request	*request;
516 	u8 __iomem		*mbase = musb->mregs;
517 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
518 	void __iomem		*epio = musb->endpoints[epnum].regs;
519 	struct dma_channel	*dma;
520 
521 	musb_ep_select(mbase, epnum);
522 	req = next_request(musb_ep);
523 	request = &req->request;
524 
525 	csr = musb_readw(epio, MUSB_TXCSR);
526 	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
527 
528 	dma = is_dma_capable() ? musb_ep->dma : NULL;
529 
530 	/*
531 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
532 	 * probably rates reporting as a host error.
533 	 */
534 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
535 		csr |=	MUSB_TXCSR_P_WZC_BITS;
536 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
537 		musb_writew(epio, MUSB_TXCSR, csr);
538 		return;
539 	}
540 
541 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
542 		/* We NAKed, no big deal... little reason to care. */
543 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
544 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
545 		musb_writew(epio, MUSB_TXCSR, csr);
546 		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
547 				epnum, request);
548 	}
549 
550 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
551 		/*
552 		 * SHOULD NOT HAPPEN... has with CPPI though, after
553 		 * changing SENDSTALL (and other cases); harmless?
554 		 */
555 		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
556 		return;
557 	}
558 
559 	if (request) {
560 		u8	is_dma = 0;
561 
562 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
563 			is_dma = 1;
564 			csr |= MUSB_TXCSR_P_WZC_BITS;
565 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
566 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
567 			musb_writew(epio, MUSB_TXCSR, csr);
568 			/* Ensure writebuffer is empty. */
569 			csr = musb_readw(epio, MUSB_TXCSR);
570 			request->actual += musb_ep->dma->actual_len;
571 			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
572 				epnum, csr, musb_ep->dma->actual_len, request);
573 		}
574 
575 		/*
576 		 * First, maybe a terminating short packet. Some DMA
577 		 * engines might handle this by themselves.
578 		 */
579 		if ((request->zero && request->length
580 			&& (request->length % musb_ep->packet_sz == 0)
581 			&& (request->actual == request->length))
582 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
583 			|| (is_dma && (!dma->desired_mode ||
584 				(request->actual &
585 					(musb_ep->packet_sz - 1))))
586 #endif
587 		) {
588 			/*
589 			 * On DMA completion, FIFO may not be
590 			 * available yet...
591 			 */
592 			if (csr & MUSB_TXCSR_TXPKTRDY)
593 				return;
594 
595 			dev_dbg(musb->controller, "sending zero pkt\n");
596 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
597 					| MUSB_TXCSR_TXPKTRDY);
598 			request->zero = 0;
599 		}
600 
601 		if (request->actual == request->length) {
602 			musb_g_giveback(musb_ep, request, 0);
603 			/*
604 			 * In the giveback function the MUSB lock is
605 			 * released and acquired after sometime. During
606 			 * this time period the INDEX register could get
607 			 * changed by the gadget_queue function especially
608 			 * on SMP systems. Reselect the INDEX to be sure
609 			 * we are reading/modifying the right registers
610 			 */
611 			musb_ep_select(mbase, epnum);
612 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
613 			if (!req) {
614 				dev_dbg(musb->controller, "%s idle now\n",
615 					musb_ep->end_point.name);
616 				return;
617 			}
618 		}
619 
620 		txstate(musb, req);
621 	}
622 }
623 
624 /* ------------------------------------------------------------ */
625 
626 #ifdef CONFIG_USB_INVENTRA_DMA
627 
628 /* Peripheral rx (OUT) using Mentor DMA works as follows:
629 	- Only mode 0 is used.
630 
631 	- Request is queued by the gadget class driver.
632 		-> if queue was previously empty, rxstate()
633 
634 	- Host sends OUT token which causes an endpoint interrupt
635 	  /\      -> RxReady
636 	  |	      -> if request queued, call rxstate
637 	  |		/\	-> setup DMA
638 	  |		|	     -> DMA interrupt on completion
639 	  |		|		-> RxReady
640 	  |		|		      -> stop DMA
641 	  |		|		      -> ack the read
642 	  |		|		      -> if data recd = max expected
643 	  |		|				by the request, or host
644 	  |		|				sent a short packet,
645 	  |		|				complete the request,
646 	  |		|				and start the next one.
647 	  |		|_____________________________________|
648 	  |					 else just wait for the host
649 	  |					    to send the next OUT token.
650 	  |__________________________________________________|
651 
652  * Non-Mentor DMA engines can of course work differently.
653  */
654 
655 #endif
656 
657 /*
658  * Context: controller locked, IRQs blocked, endpoint selected
659  */
660 static void rxstate(struct musb *musb, struct musb_request *req)
661 {
662 	const u8		epnum = req->epnum;
663 	struct usb_request	*request = &req->request;
664 	struct musb_ep		*musb_ep;
665 	void __iomem		*epio = musb->endpoints[epnum].regs;
666 	unsigned		fifo_count = 0;
667 	u16			len;
668 	u16			csr = musb_readw(epio, MUSB_RXCSR);
669 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
670 	u8			use_mode_1;
671 
672 	if (hw_ep->is_shared_fifo)
673 		musb_ep = &hw_ep->ep_in;
674 	else
675 		musb_ep = &hw_ep->ep_out;
676 
677 	len = musb_ep->packet_sz;
678 
679 	/* Check if EP is disabled */
680 	if (!musb_ep->desc) {
681 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
682 						musb_ep->end_point.name);
683 		return;
684 	}
685 
686 	/* We shouldn't get here while DMA is active, but we do... */
687 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
688 		dev_dbg(musb->controller, "DMA pending...\n");
689 		return;
690 	}
691 
692 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
693 		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
694 		    musb_ep->end_point.name, csr);
695 		return;
696 	}
697 
698 	if (is_cppi_enabled() && is_buffer_mapped(req)) {
699 		struct dma_controller	*c = musb->dma_controller;
700 		struct dma_channel	*channel = musb_ep->dma;
701 
702 		/* NOTE:  CPPI won't actually stop advancing the DMA
703 		 * queue after short packet transfers, so this is almost
704 		 * always going to run as IRQ-per-packet DMA so that
705 		 * faults will be handled correctly.
706 		 */
707 		if (c->channel_program(channel,
708 				musb_ep->packet_sz,
709 				!request->short_not_ok,
710 				request->dma + request->actual,
711 				request->length - request->actual)) {
712 
713 			/* make sure that if an rxpkt arrived after the irq,
714 			 * the cppi engine will be ready to take it as soon
715 			 * as DMA is enabled
716 			 */
717 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
718 					| MUSB_RXCSR_DMAMODE);
719 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
720 			musb_writew(epio, MUSB_RXCSR, csr);
721 			return;
722 		}
723 	}
724 
725 	if (csr & MUSB_RXCSR_RXPKTRDY) {
726 		len = musb_readw(epio, MUSB_RXCOUNT);
727 
728 		/*
729 		 * Enable Mode 1 on RX transfers only when short_not_ok flag
730 		 * is set. Currently short_not_ok flag is set only from
731 		 * file_storage and f_mass_storage drivers
732 		 */
733 
734 		if (request->short_not_ok && len == musb_ep->packet_sz)
735 			use_mode_1 = 1;
736 		else
737 			use_mode_1 = 0;
738 
739 		if (request->actual < request->length) {
740 #ifdef CONFIG_USB_INVENTRA_DMA
741 			if (is_buffer_mapped(req)) {
742 				struct dma_controller	*c;
743 				struct dma_channel	*channel;
744 				int			use_dma = 0;
745 
746 				c = musb->dma_controller;
747 				channel = musb_ep->dma;
748 
749 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
750 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
751 	 * completion. We only get interrupts from DMA controller.
752 	 *
753 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
754 	 * in advance. For mass storage class, request->length = what the host
755 	 * sends, so that'd work.  But for pretty much everything else,
756 	 * request->length is routinely more than what the host sends. For
757 	 * most these gadgets, end of is signified either by a short packet,
758 	 * or filling the last byte of the buffer.  (Sending extra data in
759 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
760 	 * we don't get DMA completion interrupt for short packets.
761 	 *
762 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
763 	 * to get endpoint interrupt on every DMA req, but that didn't seem
764 	 * to work reliably.
765 	 *
766 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
767 	 * then becomes usable as a runtime "use mode 1" hint...
768 	 */
769 
770 				/* Experimental: Mode1 works with mass storage use cases */
771 				if (use_mode_1) {
772 					csr |= MUSB_RXCSR_AUTOCLEAR;
773 					musb_writew(epio, MUSB_RXCSR, csr);
774 					csr |= MUSB_RXCSR_DMAENAB;
775 					musb_writew(epio, MUSB_RXCSR, csr);
776 
777 					/*
778 					 * this special sequence (enabling and then
779 					 * disabling MUSB_RXCSR_DMAMODE) is required
780 					 * to get DMAReq to activate
781 					 */
782 					musb_writew(epio, MUSB_RXCSR,
783 						csr | MUSB_RXCSR_DMAMODE);
784 					musb_writew(epio, MUSB_RXCSR, csr);
785 
786 				} else {
787 					if (!musb_ep->hb_mult &&
788 						musb_ep->hw_ep->rx_double_buffered)
789 						csr |= MUSB_RXCSR_AUTOCLEAR;
790 					csr |= MUSB_RXCSR_DMAENAB;
791 					musb_writew(epio, MUSB_RXCSR, csr);
792 				}
793 
794 				if (request->actual < request->length) {
795 					int transfer_size = 0;
796 					if (use_mode_1) {
797 						transfer_size = min(request->length - request->actual,
798 								channel->max_len);
799 						musb_ep->dma->desired_mode = 1;
800 					} else {
801 						transfer_size = min(request->length - request->actual,
802 								(unsigned)len);
803 						musb_ep->dma->desired_mode = 0;
804 					}
805 
806 					use_dma = c->channel_program(
807 							channel,
808 							musb_ep->packet_sz,
809 							channel->desired_mode,
810 							request->dma
811 							+ request->actual,
812 							transfer_size);
813 				}
814 
815 				if (use_dma)
816 					return;
817 			}
818 #elif defined(CONFIG_USB_UX500_DMA)
819 			if ((is_buffer_mapped(req)) &&
820 				(request->actual < request->length)) {
821 
822 				struct dma_controller *c;
823 				struct dma_channel *channel;
824 				int transfer_size = 0;
825 
826 				c = musb->dma_controller;
827 				channel = musb_ep->dma;
828 
829 				/* In case first packet is short */
830 				if (len < musb_ep->packet_sz)
831 					transfer_size = len;
832 				else if (request->short_not_ok)
833 					transfer_size =	min(request->length -
834 							request->actual,
835 							channel->max_len);
836 				else
837 					transfer_size = min(request->length -
838 							request->actual,
839 							(unsigned)len);
840 
841 				csr &= ~MUSB_RXCSR_DMAMODE;
842 				csr |= (MUSB_RXCSR_DMAENAB |
843 					MUSB_RXCSR_AUTOCLEAR);
844 
845 				musb_writew(epio, MUSB_RXCSR, csr);
846 
847 				if (transfer_size <= musb_ep->packet_sz) {
848 					musb_ep->dma->desired_mode = 0;
849 				} else {
850 					musb_ep->dma->desired_mode = 1;
851 					/* Mode must be set after DMAENAB */
852 					csr |= MUSB_RXCSR_DMAMODE;
853 					musb_writew(epio, MUSB_RXCSR, csr);
854 				}
855 
856 				if (c->channel_program(channel,
857 							musb_ep->packet_sz,
858 							channel->desired_mode,
859 							request->dma
860 							+ request->actual,
861 							transfer_size))
862 
863 					return;
864 			}
865 #endif	/* Mentor's DMA */
866 
867 			fifo_count = request->length - request->actual;
868 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
869 					musb_ep->end_point.name,
870 					len, fifo_count,
871 					musb_ep->packet_sz);
872 
873 			fifo_count = min_t(unsigned, len, fifo_count);
874 
875 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
876 			if (tusb_dma_omap() && is_buffer_mapped(req)) {
877 				struct dma_controller *c = musb->dma_controller;
878 				struct dma_channel *channel = musb_ep->dma;
879 				u32 dma_addr = request->dma + request->actual;
880 				int ret;
881 
882 				ret = c->channel_program(channel,
883 						musb_ep->packet_sz,
884 						channel->desired_mode,
885 						dma_addr,
886 						fifo_count);
887 				if (ret)
888 					return;
889 			}
890 #endif
891 			/*
892 			 * Unmap the dma buffer back to cpu if dma channel
893 			 * programming fails. This buffer is mapped if the
894 			 * channel allocation is successful
895 			 */
896 			 if (is_buffer_mapped(req)) {
897 				unmap_dma_buffer(req, musb);
898 
899 				/*
900 				 * Clear DMAENAB and AUTOCLEAR for the
901 				 * PIO mode transfer
902 				 */
903 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
904 				musb_writew(epio, MUSB_RXCSR, csr);
905 			}
906 
907 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
908 					(request->buf + request->actual));
909 			request->actual += fifo_count;
910 
911 			/* REVISIT if we left anything in the fifo, flush
912 			 * it and report -EOVERFLOW
913 			 */
914 
915 			/* ack the read! */
916 			csr |= MUSB_RXCSR_P_WZC_BITS;
917 			csr &= ~MUSB_RXCSR_RXPKTRDY;
918 			musb_writew(epio, MUSB_RXCSR, csr);
919 		}
920 	}
921 
922 	/* reach the end or short packet detected */
923 	if (request->actual == request->length || len < musb_ep->packet_sz)
924 		musb_g_giveback(musb_ep, request, 0);
925 }
926 
927 /*
928  * Data ready for a request; called from IRQ
929  */
930 void musb_g_rx(struct musb *musb, u8 epnum)
931 {
932 	u16			csr;
933 	struct musb_request	*req;
934 	struct usb_request	*request;
935 	void __iomem		*mbase = musb->mregs;
936 	struct musb_ep		*musb_ep;
937 	void __iomem		*epio = musb->endpoints[epnum].regs;
938 	struct dma_channel	*dma;
939 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
940 
941 	if (hw_ep->is_shared_fifo)
942 		musb_ep = &hw_ep->ep_in;
943 	else
944 		musb_ep = &hw_ep->ep_out;
945 
946 	musb_ep_select(mbase, epnum);
947 
948 	req = next_request(musb_ep);
949 	if (!req)
950 		return;
951 
952 	request = &req->request;
953 
954 	csr = musb_readw(epio, MUSB_RXCSR);
955 	dma = is_dma_capable() ? musb_ep->dma : NULL;
956 
957 	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
958 			csr, dma ? " (dma)" : "", request);
959 
960 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
961 		csr |= MUSB_RXCSR_P_WZC_BITS;
962 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
963 		musb_writew(epio, MUSB_RXCSR, csr);
964 		return;
965 	}
966 
967 	if (csr & MUSB_RXCSR_P_OVERRUN) {
968 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
969 		csr &= ~MUSB_RXCSR_P_OVERRUN;
970 		musb_writew(epio, MUSB_RXCSR, csr);
971 
972 		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
973 		if (request->status == -EINPROGRESS)
974 			request->status = -EOVERFLOW;
975 	}
976 	if (csr & MUSB_RXCSR_INCOMPRX) {
977 		/* REVISIT not necessarily an error */
978 		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
979 	}
980 
981 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
982 		/* "should not happen"; likely RXPKTRDY pending for DMA */
983 		dev_dbg(musb->controller, "%s busy, csr %04x\n",
984 			musb_ep->end_point.name, csr);
985 		return;
986 	}
987 
988 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
989 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
990 				| MUSB_RXCSR_DMAENAB
991 				| MUSB_RXCSR_DMAMODE);
992 		musb_writew(epio, MUSB_RXCSR,
993 			MUSB_RXCSR_P_WZC_BITS | csr);
994 
995 		request->actual += musb_ep->dma->actual_len;
996 
997 		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
998 			epnum, csr,
999 			musb_readw(epio, MUSB_RXCSR),
1000 			musb_ep->dma->actual_len, request);
1001 
1002 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1003 	defined(CONFIG_USB_UX500_DMA)
1004 		/* Autoclear doesn't clear RxPktRdy for short packets */
1005 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
1006 				|| (dma->actual_len
1007 					& (musb_ep->packet_sz - 1))) {
1008 			/* ack the read! */
1009 			csr &= ~MUSB_RXCSR_RXPKTRDY;
1010 			musb_writew(epio, MUSB_RXCSR, csr);
1011 		}
1012 
1013 		/* incomplete, and not short? wait for next IN packet */
1014 		if ((request->actual < request->length)
1015 				&& (musb_ep->dma->actual_len
1016 					== musb_ep->packet_sz)) {
1017 			/* In double buffer case, continue to unload fifo if
1018  			 * there is Rx packet in FIFO.
1019  			 **/
1020 			csr = musb_readw(epio, MUSB_RXCSR);
1021 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
1022 				hw_ep->rx_double_buffered)
1023 				goto exit;
1024 			return;
1025 		}
1026 #endif
1027 		musb_g_giveback(musb_ep, request, 0);
1028 		/*
1029 		 * In the giveback function the MUSB lock is
1030 		 * released and acquired after sometime. During
1031 		 * this time period the INDEX register could get
1032 		 * changed by the gadget_queue function especially
1033 		 * on SMP systems. Reselect the INDEX to be sure
1034 		 * we are reading/modifying the right registers
1035 		 */
1036 		musb_ep_select(mbase, epnum);
1037 
1038 		req = next_request(musb_ep);
1039 		if (!req)
1040 			return;
1041 	}
1042 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1043 	defined(CONFIG_USB_UX500_DMA)
1044 exit:
1045 #endif
1046 	/* Analyze request */
1047 	rxstate(musb, req);
1048 }
1049 
1050 /* ------------------------------------------------------------ */
1051 
1052 static int musb_gadget_enable(struct usb_ep *ep,
1053 			const struct usb_endpoint_descriptor *desc)
1054 {
1055 	unsigned long		flags;
1056 	struct musb_ep		*musb_ep;
1057 	struct musb_hw_ep	*hw_ep;
1058 	void __iomem		*regs;
1059 	struct musb		*musb;
1060 	void __iomem	*mbase;
1061 	u8		epnum;
1062 	u16		csr;
1063 	unsigned	tmp;
1064 	int		status = -EINVAL;
1065 
1066 	if (!ep || !desc)
1067 		return -EINVAL;
1068 
1069 	musb_ep = to_musb_ep(ep);
1070 	hw_ep = musb_ep->hw_ep;
1071 	regs = hw_ep->regs;
1072 	musb = musb_ep->musb;
1073 	mbase = musb->mregs;
1074 	epnum = musb_ep->current_epnum;
1075 
1076 	spin_lock_irqsave(&musb->lock, flags);
1077 
1078 	if (musb_ep->desc) {
1079 		status = -EBUSY;
1080 		goto fail;
1081 	}
1082 	musb_ep->type = usb_endpoint_type(desc);
1083 
1084 	/* check direction and (later) maxpacket size against endpoint */
1085 	if (usb_endpoint_num(desc) != epnum)
1086 		goto fail;
1087 
1088 	/* REVISIT this rules out high bandwidth periodic transfers */
1089 	tmp = usb_endpoint_maxp(desc);
1090 	if (tmp & ~0x07ff) {
1091 		int ok;
1092 
1093 		if (usb_endpoint_dir_in(desc))
1094 			ok = musb->hb_iso_tx;
1095 		else
1096 			ok = musb->hb_iso_rx;
1097 
1098 		if (!ok) {
1099 			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1100 			goto fail;
1101 		}
1102 		musb_ep->hb_mult = (tmp >> 11) & 3;
1103 	} else {
1104 		musb_ep->hb_mult = 0;
1105 	}
1106 
1107 	musb_ep->packet_sz = tmp & 0x7ff;
1108 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1109 
1110 	/* enable the interrupts for the endpoint, set the endpoint
1111 	 * packet size (or fail), set the mode, clear the fifo
1112 	 */
1113 	musb_ep_select(mbase, epnum);
1114 	if (usb_endpoint_dir_in(desc)) {
1115 		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1116 
1117 		if (hw_ep->is_shared_fifo)
1118 			musb_ep->is_in = 1;
1119 		if (!musb_ep->is_in)
1120 			goto fail;
1121 
1122 		if (tmp > hw_ep->max_packet_sz_tx) {
1123 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1124 			goto fail;
1125 		}
1126 
1127 		int_txe |= (1 << epnum);
1128 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
1129 
1130 		/* REVISIT if can_bulk_split(), use by updating "tmp";
1131 		 * likewise high bandwidth periodic tx
1132 		 */
1133 		/* Set TXMAXP with the FIFO size of the endpoint
1134 		 * to disable double buffering mode.
1135 		 */
1136 		if (musb->double_buffer_not_ok)
1137 			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1138 		else
1139 			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1140 					| (musb_ep->hb_mult << 11));
1141 
1142 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1143 		if (musb_readw(regs, MUSB_TXCSR)
1144 				& MUSB_TXCSR_FIFONOTEMPTY)
1145 			csr |= MUSB_TXCSR_FLUSHFIFO;
1146 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1147 			csr |= MUSB_TXCSR_P_ISO;
1148 
1149 		/* set twice in case of double buffering */
1150 		musb_writew(regs, MUSB_TXCSR, csr);
1151 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1152 		musb_writew(regs, MUSB_TXCSR, csr);
1153 
1154 	} else {
1155 		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1156 
1157 		if (hw_ep->is_shared_fifo)
1158 			musb_ep->is_in = 0;
1159 		if (musb_ep->is_in)
1160 			goto fail;
1161 
1162 		if (tmp > hw_ep->max_packet_sz_rx) {
1163 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1164 			goto fail;
1165 		}
1166 
1167 		int_rxe |= (1 << epnum);
1168 		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1169 
1170 		/* REVISIT if can_bulk_combine() use by updating "tmp"
1171 		 * likewise high bandwidth periodic rx
1172 		 */
1173 		/* Set RXMAXP with the FIFO size of the endpoint
1174 		 * to disable double buffering mode.
1175 		 */
1176 		if (musb->double_buffer_not_ok)
1177 			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1178 		else
1179 			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1180 					| (musb_ep->hb_mult << 11));
1181 
1182 		/* force shared fifo to OUT-only mode */
1183 		if (hw_ep->is_shared_fifo) {
1184 			csr = musb_readw(regs, MUSB_TXCSR);
1185 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1186 			musb_writew(regs, MUSB_TXCSR, csr);
1187 		}
1188 
1189 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1190 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1191 			csr |= MUSB_RXCSR_P_ISO;
1192 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1193 			csr |= MUSB_RXCSR_DISNYET;
1194 
1195 		/* set twice in case of double buffering */
1196 		musb_writew(regs, MUSB_RXCSR, csr);
1197 		musb_writew(regs, MUSB_RXCSR, csr);
1198 	}
1199 
1200 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
1201 	 * for some reason you run out of channels here.
1202 	 */
1203 	if (is_dma_capable() && musb->dma_controller) {
1204 		struct dma_controller	*c = musb->dma_controller;
1205 
1206 		musb_ep->dma = c->channel_alloc(c, hw_ep,
1207 				(desc->bEndpointAddress & USB_DIR_IN));
1208 	} else
1209 		musb_ep->dma = NULL;
1210 
1211 	musb_ep->desc = desc;
1212 	musb_ep->busy = 0;
1213 	musb_ep->wedged = 0;
1214 	status = 0;
1215 
1216 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1217 			musb_driver_name, musb_ep->end_point.name,
1218 			({ char *s; switch (musb_ep->type) {
1219 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
1220 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
1221 			default:			s = "iso"; break;
1222 			}; s; }),
1223 			musb_ep->is_in ? "IN" : "OUT",
1224 			musb_ep->dma ? "dma, " : "",
1225 			musb_ep->packet_sz);
1226 
1227 	schedule_work(&musb->irq_work);
1228 
1229 fail:
1230 	spin_unlock_irqrestore(&musb->lock, flags);
1231 	return status;
1232 }
1233 
1234 /*
1235  * Disable an endpoint flushing all requests queued.
1236  */
1237 static int musb_gadget_disable(struct usb_ep *ep)
1238 {
1239 	unsigned long	flags;
1240 	struct musb	*musb;
1241 	u8		epnum;
1242 	struct musb_ep	*musb_ep;
1243 	void __iomem	*epio;
1244 	int		status = 0;
1245 
1246 	musb_ep = to_musb_ep(ep);
1247 	musb = musb_ep->musb;
1248 	epnum = musb_ep->current_epnum;
1249 	epio = musb->endpoints[epnum].regs;
1250 
1251 	spin_lock_irqsave(&musb->lock, flags);
1252 	musb_ep_select(musb->mregs, epnum);
1253 
1254 	/* zero the endpoint sizes */
1255 	if (musb_ep->is_in) {
1256 		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1257 		int_txe &= ~(1 << epnum);
1258 		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1259 		musb_writew(epio, MUSB_TXMAXP, 0);
1260 	} else {
1261 		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1262 		int_rxe &= ~(1 << epnum);
1263 		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1264 		musb_writew(epio, MUSB_RXMAXP, 0);
1265 	}
1266 
1267 	musb_ep->desc = NULL;
1268 #ifndef __UBOOT__
1269 	musb_ep->end_point.desc = NULL;
1270 #endif
1271 
1272 	/* abort all pending DMA and requests */
1273 	nuke(musb_ep, -ESHUTDOWN);
1274 
1275 	schedule_work(&musb->irq_work);
1276 
1277 	spin_unlock_irqrestore(&(musb->lock), flags);
1278 
1279 	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1280 
1281 	return status;
1282 }
1283 
1284 /*
1285  * Allocate a request for an endpoint.
1286  * Reused by ep0 code.
1287  */
1288 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1289 {
1290 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1291 	struct musb		*musb = musb_ep->musb;
1292 	struct musb_request	*request = NULL;
1293 
1294 	request = kzalloc(sizeof *request, gfp_flags);
1295 	if (!request) {
1296 		dev_dbg(musb->controller, "not enough memory\n");
1297 		return NULL;
1298 	}
1299 
1300 	request->request.dma = DMA_ADDR_INVALID;
1301 	request->epnum = musb_ep->current_epnum;
1302 	request->ep = musb_ep;
1303 
1304 	return &request->request;
1305 }
1306 
1307 /*
1308  * Free a request
1309  * Reused by ep0 code.
1310  */
1311 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1312 {
1313 	kfree(to_musb_request(req));
1314 }
1315 
1316 static LIST_HEAD(buffers);
1317 
1318 struct free_record {
1319 	struct list_head	list;
1320 	struct device		*dev;
1321 	unsigned		bytes;
1322 	dma_addr_t		dma;
1323 };
1324 
1325 /*
1326  * Context: controller locked, IRQs blocked.
1327  */
1328 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1329 {
1330 	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1331 		req->tx ? "TX/IN" : "RX/OUT",
1332 		&req->request, req->request.length, req->epnum);
1333 
1334 	musb_ep_select(musb->mregs, req->epnum);
1335 	if (req->tx)
1336 		txstate(musb, req);
1337 	else
1338 		rxstate(musb, req);
1339 }
1340 
1341 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1342 			gfp_t gfp_flags)
1343 {
1344 	struct musb_ep		*musb_ep;
1345 	struct musb_request	*request;
1346 	struct musb		*musb;
1347 	int			status = 0;
1348 	unsigned long		lockflags;
1349 
1350 	if (!ep || !req)
1351 		return -EINVAL;
1352 	if (!req->buf)
1353 		return -ENODATA;
1354 
1355 	musb_ep = to_musb_ep(ep);
1356 	musb = musb_ep->musb;
1357 
1358 	request = to_musb_request(req);
1359 	request->musb = musb;
1360 
1361 	if (request->ep != musb_ep)
1362 		return -EINVAL;
1363 
1364 	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1365 
1366 	/* request is mine now... */
1367 	request->request.actual = 0;
1368 	request->request.status = -EINPROGRESS;
1369 	request->epnum = musb_ep->current_epnum;
1370 	request->tx = musb_ep->is_in;
1371 
1372 	map_dma_buffer(request, musb, musb_ep);
1373 
1374 	spin_lock_irqsave(&musb->lock, lockflags);
1375 
1376 	/* don't queue if the ep is down */
1377 	if (!musb_ep->desc) {
1378 		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1379 				req, ep->name, "disabled");
1380 		status = -ESHUTDOWN;
1381 		goto cleanup;
1382 	}
1383 
1384 	/* add request to the list */
1385 	list_add_tail(&request->list, &musb_ep->req_list);
1386 
1387 	/* it this is the head of the queue, start i/o ... */
1388 	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1389 		musb_ep_restart(musb, request);
1390 
1391 cleanup:
1392 	spin_unlock_irqrestore(&musb->lock, lockflags);
1393 	return status;
1394 }
1395 
1396 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1397 {
1398 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1399 	struct musb_request	*req = to_musb_request(request);
1400 	struct musb_request	*r;
1401 	unsigned long		flags;
1402 	int			status = 0;
1403 	struct musb		*musb = musb_ep->musb;
1404 
1405 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1406 		return -EINVAL;
1407 
1408 	spin_lock_irqsave(&musb->lock, flags);
1409 
1410 	list_for_each_entry(r, &musb_ep->req_list, list) {
1411 		if (r == req)
1412 			break;
1413 	}
1414 	if (r != req) {
1415 		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1416 		status = -EINVAL;
1417 		goto done;
1418 	}
1419 
1420 	/* if the hardware doesn't have the request, easy ... */
1421 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1422 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1423 
1424 	/* ... else abort the dma transfer ... */
1425 	else if (is_dma_capable() && musb_ep->dma) {
1426 		struct dma_controller	*c = musb->dma_controller;
1427 
1428 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1429 		if (c->channel_abort)
1430 			status = c->channel_abort(musb_ep->dma);
1431 		else
1432 			status = -EBUSY;
1433 		if (status == 0)
1434 			musb_g_giveback(musb_ep, request, -ECONNRESET);
1435 	} else {
1436 		/* NOTE: by sticking to easily tested hardware/driver states,
1437 		 * we leave counting of in-flight packets imprecise.
1438 		 */
1439 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1440 	}
1441 
1442 done:
1443 	spin_unlock_irqrestore(&musb->lock, flags);
1444 	return status;
1445 }
1446 
1447 /*
1448  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1449  * data but will queue requests.
1450  *
1451  * exported to ep0 code
1452  */
1453 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1454 {
1455 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1456 	u8			epnum = musb_ep->current_epnum;
1457 	struct musb		*musb = musb_ep->musb;
1458 	void __iomem		*epio = musb->endpoints[epnum].regs;
1459 	void __iomem		*mbase;
1460 	unsigned long		flags;
1461 	u16			csr;
1462 	struct musb_request	*request;
1463 	int			status = 0;
1464 
1465 	if (!ep)
1466 		return -EINVAL;
1467 	mbase = musb->mregs;
1468 
1469 	spin_lock_irqsave(&musb->lock, flags);
1470 
1471 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1472 		status = -EINVAL;
1473 		goto done;
1474 	}
1475 
1476 	musb_ep_select(mbase, epnum);
1477 
1478 	request = next_request(musb_ep);
1479 	if (value) {
1480 		if (request) {
1481 			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1482 			    ep->name);
1483 			status = -EAGAIN;
1484 			goto done;
1485 		}
1486 		/* Cannot portably stall with non-empty FIFO */
1487 		if (musb_ep->is_in) {
1488 			csr = musb_readw(epio, MUSB_TXCSR);
1489 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1490 				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1491 				status = -EAGAIN;
1492 				goto done;
1493 			}
1494 		}
1495 	} else
1496 		musb_ep->wedged = 0;
1497 
1498 	/* set/clear the stall and toggle bits */
1499 	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1500 	if (musb_ep->is_in) {
1501 		csr = musb_readw(epio, MUSB_TXCSR);
1502 		csr |= MUSB_TXCSR_P_WZC_BITS
1503 			| MUSB_TXCSR_CLRDATATOG;
1504 		if (value)
1505 			csr |= MUSB_TXCSR_P_SENDSTALL;
1506 		else
1507 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
1508 				| MUSB_TXCSR_P_SENTSTALL);
1509 		csr &= ~MUSB_TXCSR_TXPKTRDY;
1510 		musb_writew(epio, MUSB_TXCSR, csr);
1511 	} else {
1512 		csr = musb_readw(epio, MUSB_RXCSR);
1513 		csr |= MUSB_RXCSR_P_WZC_BITS
1514 			| MUSB_RXCSR_FLUSHFIFO
1515 			| MUSB_RXCSR_CLRDATATOG;
1516 		if (value)
1517 			csr |= MUSB_RXCSR_P_SENDSTALL;
1518 		else
1519 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
1520 				| MUSB_RXCSR_P_SENTSTALL);
1521 		musb_writew(epio, MUSB_RXCSR, csr);
1522 	}
1523 
1524 	/* maybe start the first request in the queue */
1525 	if (!musb_ep->busy && !value && request) {
1526 		dev_dbg(musb->controller, "restarting the request\n");
1527 		musb_ep_restart(musb, request);
1528 	}
1529 
1530 done:
1531 	spin_unlock_irqrestore(&musb->lock, flags);
1532 	return status;
1533 }
1534 
1535 #ifndef __UBOOT__
1536 /*
1537  * Sets the halt feature with the clear requests ignored
1538  */
1539 static int musb_gadget_set_wedge(struct usb_ep *ep)
1540 {
1541 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1542 
1543 	if (!ep)
1544 		return -EINVAL;
1545 
1546 	musb_ep->wedged = 1;
1547 
1548 	return usb_ep_set_halt(ep);
1549 }
1550 #endif
1551 
1552 static int musb_gadget_fifo_status(struct usb_ep *ep)
1553 {
1554 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1555 	void __iomem		*epio = musb_ep->hw_ep->regs;
1556 	int			retval = -EINVAL;
1557 
1558 	if (musb_ep->desc && !musb_ep->is_in) {
1559 		struct musb		*musb = musb_ep->musb;
1560 		int			epnum = musb_ep->current_epnum;
1561 		void __iomem		*mbase = musb->mregs;
1562 		unsigned long		flags;
1563 
1564 		spin_lock_irqsave(&musb->lock, flags);
1565 
1566 		musb_ep_select(mbase, epnum);
1567 		/* FIXME return zero unless RXPKTRDY is set */
1568 		retval = musb_readw(epio, MUSB_RXCOUNT);
1569 
1570 		spin_unlock_irqrestore(&musb->lock, flags);
1571 	}
1572 	return retval;
1573 }
1574 
1575 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1576 {
1577 	struct musb_ep	*musb_ep = to_musb_ep(ep);
1578 	struct musb	*musb = musb_ep->musb;
1579 	u8		epnum = musb_ep->current_epnum;
1580 	void __iomem	*epio = musb->endpoints[epnum].regs;
1581 	void __iomem	*mbase;
1582 	unsigned long	flags;
1583 	u16		csr, int_txe;
1584 
1585 	mbase = musb->mregs;
1586 
1587 	spin_lock_irqsave(&musb->lock, flags);
1588 	musb_ep_select(mbase, (u8) epnum);
1589 
1590 	/* disable interrupts */
1591 	int_txe = musb_readw(mbase, MUSB_INTRTXE);
1592 	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1593 
1594 	if (musb_ep->is_in) {
1595 		csr = musb_readw(epio, MUSB_TXCSR);
1596 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1597 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1598 			/*
1599 			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1600 			 * to interrupt current FIFO loading, but not flushing
1601 			 * the already loaded ones.
1602 			 */
1603 			csr &= ~MUSB_TXCSR_TXPKTRDY;
1604 			musb_writew(epio, MUSB_TXCSR, csr);
1605 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1606 			musb_writew(epio, MUSB_TXCSR, csr);
1607 		}
1608 	} else {
1609 		csr = musb_readw(epio, MUSB_RXCSR);
1610 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1611 		musb_writew(epio, MUSB_RXCSR, csr);
1612 		musb_writew(epio, MUSB_RXCSR, csr);
1613 	}
1614 
1615 	/* re-enable interrupt */
1616 	musb_writew(mbase, MUSB_INTRTXE, int_txe);
1617 	spin_unlock_irqrestore(&musb->lock, flags);
1618 }
1619 
1620 static const struct usb_ep_ops musb_ep_ops = {
1621 	.enable		= musb_gadget_enable,
1622 	.disable	= musb_gadget_disable,
1623 	.alloc_request	= musb_alloc_request,
1624 	.free_request	= musb_free_request,
1625 	.queue		= musb_gadget_queue,
1626 	.dequeue	= musb_gadget_dequeue,
1627 	.set_halt	= musb_gadget_set_halt,
1628 #ifndef __UBOOT__
1629 	.set_wedge	= musb_gadget_set_wedge,
1630 #endif
1631 	.fifo_status	= musb_gadget_fifo_status,
1632 	.fifo_flush	= musb_gadget_fifo_flush
1633 };
1634 
1635 /* ----------------------------------------------------------------------- */
1636 
1637 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1638 {
1639 	struct musb	*musb = gadget_to_musb(gadget);
1640 
1641 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
1642 }
1643 
1644 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1645 {
1646 #ifndef __UBOOT__
1647 	struct musb	*musb = gadget_to_musb(gadget);
1648 	void __iomem	*mregs = musb->mregs;
1649 	unsigned long	flags;
1650 	int		status = -EINVAL;
1651 	u8		power, devctl;
1652 	int		retries;
1653 
1654 	spin_lock_irqsave(&musb->lock, flags);
1655 
1656 	switch (musb->xceiv->state) {
1657 	case OTG_STATE_B_PERIPHERAL:
1658 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1659 		 * that's part of the standard usb 1.1 state machine, and
1660 		 * doesn't affect OTG transitions.
1661 		 */
1662 		if (musb->may_wakeup && musb->is_suspended)
1663 			break;
1664 		goto done;
1665 	case OTG_STATE_B_IDLE:
1666 		/* Start SRP ... OTG not required. */
1667 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1668 		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1669 		devctl |= MUSB_DEVCTL_SESSION;
1670 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
1671 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1672 		retries = 100;
1673 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
1674 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1675 			if (retries-- < 1)
1676 				break;
1677 		}
1678 		retries = 10000;
1679 		while (devctl & MUSB_DEVCTL_SESSION) {
1680 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1681 			if (retries-- < 1)
1682 				break;
1683 		}
1684 
1685 		spin_unlock_irqrestore(&musb->lock, flags);
1686 		otg_start_srp(musb->xceiv->otg);
1687 		spin_lock_irqsave(&musb->lock, flags);
1688 
1689 		/* Block idling for at least 1s */
1690 		musb_platform_try_idle(musb,
1691 			jiffies + msecs_to_jiffies(1 * HZ));
1692 
1693 		status = 0;
1694 		goto done;
1695 	default:
1696 		dev_dbg(musb->controller, "Unhandled wake: %s\n",
1697 			otg_state_string(musb->xceiv->state));
1698 		goto done;
1699 	}
1700 
1701 	status = 0;
1702 
1703 	power = musb_readb(mregs, MUSB_POWER);
1704 	power |= MUSB_POWER_RESUME;
1705 	musb_writeb(mregs, MUSB_POWER, power);
1706 	dev_dbg(musb->controller, "issue wakeup\n");
1707 
1708 	/* FIXME do this next chunk in a timer callback, no udelay */
1709 	mdelay(2);
1710 
1711 	power = musb_readb(mregs, MUSB_POWER);
1712 	power &= ~MUSB_POWER_RESUME;
1713 	musb_writeb(mregs, MUSB_POWER, power);
1714 done:
1715 	spin_unlock_irqrestore(&musb->lock, flags);
1716 	return status;
1717 #else
1718 	return 0;
1719 #endif
1720 }
1721 
1722 static int
1723 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1724 {
1725 	struct musb	*musb = gadget_to_musb(gadget);
1726 
1727 	musb->is_self_powered = !!is_selfpowered;
1728 	return 0;
1729 }
1730 
1731 static void musb_pullup(struct musb *musb, int is_on)
1732 {
1733 	u8 power;
1734 
1735 	power = musb_readb(musb->mregs, MUSB_POWER);
1736 	if (is_on)
1737 		power |= MUSB_POWER_SOFTCONN;
1738 	else
1739 		power &= ~MUSB_POWER_SOFTCONN;
1740 
1741 	/* FIXME if on, HdrcStart; if off, HdrcStop */
1742 
1743 	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1744 		is_on ? "on" : "off");
1745 	musb_writeb(musb->mregs, MUSB_POWER, power);
1746 }
1747 
1748 #if 0
1749 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1750 {
1751 	dev_dbg(musb->controller, "<= %s =>\n", __func__);
1752 
1753 	/*
1754 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
1755 	 * though that can clear it), just musb_pullup().
1756 	 */
1757 
1758 	return -EINVAL;
1759 }
1760 #endif
1761 
1762 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1763 {
1764 #ifndef __UBOOT__
1765 	struct musb	*musb = gadget_to_musb(gadget);
1766 
1767 	if (!musb->xceiv->set_power)
1768 		return -EOPNOTSUPP;
1769 	return usb_phy_set_power(musb->xceiv, mA);
1770 #else
1771 	return 0;
1772 #endif
1773 }
1774 
1775 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1776 {
1777 	struct musb	*musb = gadget_to_musb(gadget);
1778 	unsigned long	flags;
1779 
1780 	is_on = !!is_on;
1781 
1782 	pm_runtime_get_sync(musb->controller);
1783 
1784 	/* NOTE: this assumes we are sensing vbus; we'd rather
1785 	 * not pullup unless the B-session is active.
1786 	 */
1787 	spin_lock_irqsave(&musb->lock, flags);
1788 	if (is_on != musb->softconnect) {
1789 		musb->softconnect = is_on;
1790 		musb_pullup(musb, is_on);
1791 	}
1792 	spin_unlock_irqrestore(&musb->lock, flags);
1793 
1794 	pm_runtime_put(musb->controller);
1795 
1796 	return 0;
1797 }
1798 
1799 #ifndef __UBOOT__
1800 static int musb_gadget_start(struct usb_gadget *g,
1801 		struct usb_gadget_driver *driver);
1802 static int musb_gadget_stop(struct usb_gadget *g,
1803 		struct usb_gadget_driver *driver);
1804 #endif
1805 
1806 static const struct usb_gadget_ops musb_gadget_operations = {
1807 	.get_frame		= musb_gadget_get_frame,
1808 	.wakeup			= musb_gadget_wakeup,
1809 	.set_selfpowered	= musb_gadget_set_self_powered,
1810 	/* .vbus_session		= musb_gadget_vbus_session, */
1811 	.vbus_draw		= musb_gadget_vbus_draw,
1812 	.pullup			= musb_gadget_pullup,
1813 #ifndef __UBOOT__
1814 	.udc_start		= musb_gadget_start,
1815 	.udc_stop		= musb_gadget_stop,
1816 #endif
1817 };
1818 
1819 /* ----------------------------------------------------------------------- */
1820 
1821 /* Registration */
1822 
1823 /* Only this registration code "knows" the rule (from USB standards)
1824  * about there being only one external upstream port.  It assumes
1825  * all peripheral ports are external...
1826  */
1827 
1828 #ifndef __UBOOT__
1829 static void musb_gadget_release(struct device *dev)
1830 {
1831 	/* kref_put(WHAT) */
1832 	dev_dbg(dev, "%s\n", __func__);
1833 }
1834 #endif
1835 
1836 
1837 static void __devinit
1838 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1839 {
1840 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1841 
1842 	memset(ep, 0, sizeof *ep);
1843 
1844 	ep->current_epnum = epnum;
1845 	ep->musb = musb;
1846 	ep->hw_ep = hw_ep;
1847 	ep->is_in = is_in;
1848 
1849 	INIT_LIST_HEAD(&ep->req_list);
1850 
1851 	sprintf(ep->name, "ep%d%s", epnum,
1852 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1853 				is_in ? "in" : "out"));
1854 	ep->end_point.name = ep->name;
1855 	INIT_LIST_HEAD(&ep->end_point.ep_list);
1856 	if (!epnum) {
1857 		ep->end_point.maxpacket = 64;
1858 		ep->end_point.ops = &musb_g_ep0_ops;
1859 		musb->g.ep0 = &ep->end_point;
1860 	} else {
1861 		if (is_in)
1862 			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1863 		else
1864 			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1865 		ep->end_point.ops = &musb_ep_ops;
1866 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1867 	}
1868 }
1869 
1870 /*
1871  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1872  * to the rest of the driver state.
1873  */
1874 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1875 {
1876 	u8			epnum;
1877 	struct musb_hw_ep	*hw_ep;
1878 	unsigned		count = 0;
1879 
1880 	/* initialize endpoint list just once */
1881 	INIT_LIST_HEAD(&(musb->g.ep_list));
1882 
1883 	for (epnum = 0, hw_ep = musb->endpoints;
1884 			epnum < musb->nr_endpoints;
1885 			epnum++, hw_ep++) {
1886 		if (hw_ep->is_shared_fifo /* || !epnum */) {
1887 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1888 			count++;
1889 		} else {
1890 			if (hw_ep->max_packet_sz_tx) {
1891 				init_peripheral_ep(musb, &hw_ep->ep_in,
1892 							epnum, 1);
1893 				count++;
1894 			}
1895 			if (hw_ep->max_packet_sz_rx) {
1896 				init_peripheral_ep(musb, &hw_ep->ep_out,
1897 							epnum, 0);
1898 				count++;
1899 			}
1900 		}
1901 	}
1902 }
1903 
1904 /* called once during driver setup to initialize and link into
1905  * the driver model; memory is zeroed.
1906  */
1907 int __devinit musb_gadget_setup(struct musb *musb)
1908 {
1909 	int status;
1910 
1911 	/* REVISIT minor race:  if (erroneously) setting up two
1912 	 * musb peripherals at the same time, only the bus lock
1913 	 * is probably held.
1914 	 */
1915 
1916 	musb->g.ops = &musb_gadget_operations;
1917 #ifndef __UBOOT__
1918 	musb->g.max_speed = USB_SPEED_HIGH;
1919 #endif
1920 	musb->g.speed = USB_SPEED_UNKNOWN;
1921 
1922 #ifndef __UBOOT__
1923 	/* this "gadget" abstracts/virtualizes the controller */
1924 	dev_set_name(&musb->g.dev, "gadget");
1925 	musb->g.dev.parent = musb->controller;
1926 	musb->g.dev.dma_mask = musb->controller->dma_mask;
1927 	musb->g.dev.release = musb_gadget_release;
1928 #endif
1929 	musb->g.name = musb_driver_name;
1930 
1931 #ifndef __UBOOT__
1932 	if (is_otg_enabled(musb))
1933 		musb->g.is_otg = 1;
1934 #endif
1935 
1936 	musb_g_init_endpoints(musb);
1937 
1938 	musb->is_active = 0;
1939 	musb_platform_try_idle(musb, 0);
1940 
1941 #ifndef __UBOOT__
1942 	status = device_register(&musb->g.dev);
1943 	if (status != 0) {
1944 		put_device(&musb->g.dev);
1945 		return status;
1946 	}
1947 	status = usb_add_gadget_udc(musb->controller, &musb->g);
1948 	if (status)
1949 		goto err;
1950 #endif
1951 
1952 	return 0;
1953 #ifndef __UBOOT__
1954 err:
1955 	musb->g.dev.parent = NULL;
1956 	device_unregister(&musb->g.dev);
1957 	return status;
1958 #endif
1959 }
1960 
1961 void musb_gadget_cleanup(struct musb *musb)
1962 {
1963 #ifndef __UBOOT__
1964 	usb_del_gadget_udc(&musb->g);
1965 	if (musb->g.dev.parent)
1966 		device_unregister(&musb->g.dev);
1967 #endif
1968 }
1969 
1970 /*
1971  * Register the gadget driver. Used by gadget drivers when
1972  * registering themselves with the controller.
1973  *
1974  * -EINVAL something went wrong (not driver)
1975  * -EBUSY another gadget is already using the controller
1976  * -ENOMEM no memory to perform the operation
1977  *
1978  * @param driver the gadget driver
1979  * @return <0 if error, 0 if everything is fine
1980  */
1981 #ifndef __UBOOT__
1982 static int musb_gadget_start(struct usb_gadget *g,
1983 		struct usb_gadget_driver *driver)
1984 #else
1985 int musb_gadget_start(struct usb_gadget *g,
1986 		struct usb_gadget_driver *driver)
1987 #endif
1988 {
1989 	struct musb		*musb = gadget_to_musb(g);
1990 #ifndef __UBOOT__
1991 	struct usb_otg		*otg = musb->xceiv->otg;
1992 #endif
1993 	unsigned long		flags;
1994 	int			retval = -EINVAL;
1995 
1996 #ifndef __UBOOT__
1997 	if (driver->max_speed < USB_SPEED_HIGH)
1998 		goto err0;
1999 #endif
2000 
2001 	pm_runtime_get_sync(musb->controller);
2002 
2003 #ifndef __UBOOT__
2004 	dev_dbg(musb->controller, "registering driver %s\n", driver->function);
2005 #endif
2006 
2007 	musb->softconnect = 0;
2008 	musb->gadget_driver = driver;
2009 
2010 	spin_lock_irqsave(&musb->lock, flags);
2011 	musb->is_active = 1;
2012 
2013 #ifndef __UBOOT__
2014 	otg_set_peripheral(otg, &musb->g);
2015 	musb->xceiv->state = OTG_STATE_B_IDLE;
2016 
2017 	/*
2018 	 * FIXME this ignores the softconnect flag.  Drivers are
2019 	 * allowed hold the peripheral inactive until for example
2020 	 * userspace hooks up printer hardware or DSP codecs, so
2021 	 * hosts only see fully functional devices.
2022 	 */
2023 
2024 	if (!is_otg_enabled(musb))
2025 #endif
2026 		musb_start(musb);
2027 
2028 	spin_unlock_irqrestore(&musb->lock, flags);
2029 
2030 #ifndef __UBOOT__
2031 	if (is_otg_enabled(musb)) {
2032 		struct usb_hcd	*hcd = musb_to_hcd(musb);
2033 
2034 		dev_dbg(musb->controller, "OTG startup...\n");
2035 
2036 		/* REVISIT:  funcall to other code, which also
2037 		 * handles power budgeting ... this way also
2038 		 * ensures HdrcStart is indirectly called.
2039 		 */
2040 		retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2041 		if (retval < 0) {
2042 			dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
2043 			goto err2;
2044 		}
2045 
2046 		if ((musb->xceiv->last_event == USB_EVENT_ID)
2047 					&& otg->set_vbus)
2048 			otg_set_vbus(otg, 1);
2049 
2050 		hcd->self.uses_pio_for_control = 1;
2051 	}
2052 	if (musb->xceiv->last_event == USB_EVENT_NONE)
2053 		pm_runtime_put(musb->controller);
2054 #endif
2055 
2056 	return 0;
2057 
2058 #ifndef __UBOOT__
2059 err2:
2060 	if (!is_otg_enabled(musb))
2061 		musb_stop(musb);
2062 err0:
2063 	return retval;
2064 #endif
2065 }
2066 
2067 #ifndef __UBOOT__
2068 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2069 {
2070 	int			i;
2071 	struct musb_hw_ep	*hw_ep;
2072 
2073 	/* don't disconnect if it's not connected */
2074 	if (musb->g.speed == USB_SPEED_UNKNOWN)
2075 		driver = NULL;
2076 	else
2077 		musb->g.speed = USB_SPEED_UNKNOWN;
2078 
2079 	/* deactivate the hardware */
2080 	if (musb->softconnect) {
2081 		musb->softconnect = 0;
2082 		musb_pullup(musb, 0);
2083 	}
2084 	musb_stop(musb);
2085 
2086 	/* killing any outstanding requests will quiesce the driver;
2087 	 * then report disconnect
2088 	 */
2089 	if (driver) {
2090 		for (i = 0, hw_ep = musb->endpoints;
2091 				i < musb->nr_endpoints;
2092 				i++, hw_ep++) {
2093 			musb_ep_select(musb->mregs, i);
2094 			if (hw_ep->is_shared_fifo /* || !epnum */) {
2095 				nuke(&hw_ep->ep_in, -ESHUTDOWN);
2096 			} else {
2097 				if (hw_ep->max_packet_sz_tx)
2098 					nuke(&hw_ep->ep_in, -ESHUTDOWN);
2099 				if (hw_ep->max_packet_sz_rx)
2100 					nuke(&hw_ep->ep_out, -ESHUTDOWN);
2101 			}
2102 		}
2103 	}
2104 }
2105 
2106 /*
2107  * Unregister the gadget driver. Used by gadget drivers when
2108  * unregistering themselves from the controller.
2109  *
2110  * @param driver the gadget driver to unregister
2111  */
2112 static int musb_gadget_stop(struct usb_gadget *g,
2113 		struct usb_gadget_driver *driver)
2114 {
2115 	struct musb	*musb = gadget_to_musb(g);
2116 	unsigned long	flags;
2117 
2118 	if (musb->xceiv->last_event == USB_EVENT_NONE)
2119 		pm_runtime_get_sync(musb->controller);
2120 
2121 	/*
2122 	 * REVISIT always use otg_set_peripheral() here too;
2123 	 * this needs to shut down the OTG engine.
2124 	 */
2125 
2126 	spin_lock_irqsave(&musb->lock, flags);
2127 
2128 	musb_hnp_stop(musb);
2129 
2130 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2131 
2132 	musb->xceiv->state = OTG_STATE_UNDEFINED;
2133 	stop_activity(musb, driver);
2134 	otg_set_peripheral(musb->xceiv->otg, NULL);
2135 
2136 	dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2137 
2138 	musb->is_active = 0;
2139 	musb_platform_try_idle(musb, 0);
2140 	spin_unlock_irqrestore(&musb->lock, flags);
2141 
2142 	if (is_otg_enabled(musb)) {
2143 		usb_remove_hcd(musb_to_hcd(musb));
2144 		/* FIXME we need to be able to register another
2145 		 * gadget driver here and have everything work;
2146 		 * that currently misbehaves.
2147 		 */
2148 	}
2149 
2150 	if (!is_otg_enabled(musb))
2151 		musb_stop(musb);
2152 
2153 	pm_runtime_put(musb->controller);
2154 
2155 	return 0;
2156 }
2157 #endif
2158 
2159 /* ----------------------------------------------------------------------- */
2160 
2161 /* lifecycle operations called through plat_uds.c */
2162 
2163 void musb_g_resume(struct musb *musb)
2164 {
2165 #ifndef __UBOOT__
2166 	musb->is_suspended = 0;
2167 	switch (musb->xceiv->state) {
2168 	case OTG_STATE_B_IDLE:
2169 		break;
2170 	case OTG_STATE_B_WAIT_ACON:
2171 	case OTG_STATE_B_PERIPHERAL:
2172 		musb->is_active = 1;
2173 		if (musb->gadget_driver && musb->gadget_driver->resume) {
2174 			spin_unlock(&musb->lock);
2175 			musb->gadget_driver->resume(&musb->g);
2176 			spin_lock(&musb->lock);
2177 		}
2178 		break;
2179 	default:
2180 		WARNING("unhandled RESUME transition (%s)\n",
2181 				otg_state_string(musb->xceiv->state));
2182 	}
2183 #endif
2184 }
2185 
2186 /* called when SOF packets stop for 3+ msec */
2187 void musb_g_suspend(struct musb *musb)
2188 {
2189 #ifndef __UBOOT__
2190 	u8	devctl;
2191 
2192 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2193 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2194 
2195 	switch (musb->xceiv->state) {
2196 	case OTG_STATE_B_IDLE:
2197 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2198 			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2199 		break;
2200 	case OTG_STATE_B_PERIPHERAL:
2201 		musb->is_suspended = 1;
2202 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
2203 			spin_unlock(&musb->lock);
2204 			musb->gadget_driver->suspend(&musb->g);
2205 			spin_lock(&musb->lock);
2206 		}
2207 		break;
2208 	default:
2209 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2210 		 * A_PERIPHERAL may need care too
2211 		 */
2212 		WARNING("unhandled SUSPEND transition (%s)\n",
2213 				otg_state_string(musb->xceiv->state));
2214 	}
2215 #endif
2216 }
2217 
2218 /* Called during SRP */
2219 void musb_g_wakeup(struct musb *musb)
2220 {
2221 	musb_gadget_wakeup(&musb->g);
2222 }
2223 
2224 /* called when VBUS drops below session threshold, and in other cases */
2225 void musb_g_disconnect(struct musb *musb)
2226 {
2227 	void __iomem	*mregs = musb->mregs;
2228 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
2229 
2230 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2231 
2232 	/* clear HR */
2233 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2234 
2235 	/* don't draw vbus until new b-default session */
2236 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2237 
2238 	musb->g.speed = USB_SPEED_UNKNOWN;
2239 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2240 		spin_unlock(&musb->lock);
2241 		musb->gadget_driver->disconnect(&musb->g);
2242 		spin_lock(&musb->lock);
2243 	}
2244 
2245 #ifndef __UBOOT__
2246 	switch (musb->xceiv->state) {
2247 	default:
2248 		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2249 			otg_state_string(musb->xceiv->state));
2250 		musb->xceiv->state = OTG_STATE_A_IDLE;
2251 		MUSB_HST_MODE(musb);
2252 		break;
2253 	case OTG_STATE_A_PERIPHERAL:
2254 		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2255 		MUSB_HST_MODE(musb);
2256 		break;
2257 	case OTG_STATE_B_WAIT_ACON:
2258 	case OTG_STATE_B_HOST:
2259 	case OTG_STATE_B_PERIPHERAL:
2260 	case OTG_STATE_B_IDLE:
2261 		musb->xceiv->state = OTG_STATE_B_IDLE;
2262 		break;
2263 	case OTG_STATE_B_SRP_INIT:
2264 		break;
2265 	}
2266 #endif
2267 
2268 	musb->is_active = 0;
2269 }
2270 
2271 void musb_g_reset(struct musb *musb)
2272 __releases(musb->lock)
2273 __acquires(musb->lock)
2274 {
2275 	void __iomem	*mbase = musb->mregs;
2276 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
2277 	u8		power;
2278 
2279 #ifndef __UBOOT__
2280 	dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2281 			(devctl & MUSB_DEVCTL_BDEVICE)
2282 				? "B-Device" : "A-Device",
2283 			musb_readb(mbase, MUSB_FADDR),
2284 			musb->gadget_driver
2285 				? musb->gadget_driver->driver.name
2286 				: NULL
2287 			);
2288 #endif
2289 
2290 	/* report disconnect, if we didn't already (flushing EP state) */
2291 	if (musb->g.speed != USB_SPEED_UNKNOWN)
2292 		musb_g_disconnect(musb);
2293 
2294 	/* clear HR */
2295 	else if (devctl & MUSB_DEVCTL_HR)
2296 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2297 
2298 
2299 	/* what speed did we negotiate? */
2300 	power = musb_readb(mbase, MUSB_POWER);
2301 	musb->g.speed = (power & MUSB_POWER_HSMODE)
2302 			? USB_SPEED_HIGH : USB_SPEED_FULL;
2303 
2304 	/* start in USB_STATE_DEFAULT */
2305 	musb->is_active = 1;
2306 	musb->is_suspended = 0;
2307 	MUSB_DEV_MODE(musb);
2308 	musb->address = 0;
2309 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2310 
2311 	musb->may_wakeup = 0;
2312 	musb->g.b_hnp_enable = 0;
2313 	musb->g.a_alt_hnp_support = 0;
2314 	musb->g.a_hnp_support = 0;
2315 
2316 #ifndef __UBOOT__
2317 	/* Normal reset, as B-Device;
2318 	 * or else after HNP, as A-Device
2319 	 */
2320 	if (devctl & MUSB_DEVCTL_BDEVICE) {
2321 		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2322 		musb->g.is_a_peripheral = 0;
2323 	} else if (is_otg_enabled(musb)) {
2324 		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2325 		musb->g.is_a_peripheral = 1;
2326 	} else
2327 		WARN_ON(1);
2328 
2329 	/* start with default limits on VBUS power draw */
2330 	(void) musb_gadget_vbus_draw(&musb->g,
2331 			is_otg_enabled(musb) ? 8 : 100);
2332 #endif
2333 }
2334