xref: /openbmc/linux/drivers/usb/musb/musb_gadget.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * MUSB OTG driver peripheral support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
26  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/moduleparam.h>
44 #include <linux/stat.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
47 
48 #include "musb_core.h"
49 
50 
51 /* MUSB PERIPHERAL status 3-mar-2006:
52  *
53  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
54  *   Minor glitches:
55  *
56  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
57  *       in one test run (operator error?)
58  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
59  *       to break when dma is enabled ... is something wrongly
60  *       clearing SENDSTALL?
61  *
62  * - Mass storage behaved ok when last tested.  Network traffic patterns
63  *   (with lots of short transfers etc) need retesting; they turn up the
64  *   worst cases of the DMA, since short packets are typical but are not
65  *   required.
66  *
67  * - TX/IN
68  *     + both pio and dma behave in with network and g_zero tests
69  *     + no cppi throughput issues other than no-hw-queueing
70  *     + failed with FLAT_REG (DaVinci)
71  *     + seems to behave with double buffering, PIO -and- CPPI
72  *     + with gadgetfs + AIO, requests got lost?
73  *
74  * - RX/OUT
75  *     + both pio and dma behave in with network and g_zero tests
76  *     + dma is slow in typical case (short_not_ok is clear)
77  *     + double buffering ok with PIO
78  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
79  *     + request lossage observed with gadgetfs
80  *
81  * - ISO not tested ... might work, but only weakly isochronous
82  *
83  * - Gadget driver disabling of softconnect during bind() is ignored; so
84  *   drivers can't hold off host requests until userspace is ready.
85  *   (Workaround:  they can turn it off later.)
86  *
87  * - PORTABILITY (assumes PIO works):
88  *     + DaVinci, basically works with cppi dma
89  *     + OMAP 2430, ditto with mentor dma
90  *     + TUSB 6010, platform-specific dma in the works
91  */
92 
93 /* ----------------------------------------------------------------------- */
94 
95 /* Maps the buffer to dma  */
96 
97 static inline void map_dma_buffer(struct musb_request *request,
98 				struct musb *musb)
99 {
100 	if (request->request.dma == DMA_ADDR_INVALID) {
101 		request->request.dma = dma_map_single(
102 				musb->controller,
103 				request->request.buf,
104 				request->request.length,
105 				request->tx
106 					? DMA_TO_DEVICE
107 					: DMA_FROM_DEVICE);
108 		request->mapped = 1;
109 	} else {
110 		dma_sync_single_for_device(musb->controller,
111 			request->request.dma,
112 			request->request.length,
113 			request->tx
114 				? DMA_TO_DEVICE
115 				: DMA_FROM_DEVICE);
116 		request->mapped = 0;
117 	}
118 }
119 
120 /* Unmap the buffer from dma and maps it back to cpu */
121 static inline void unmap_dma_buffer(struct musb_request *request,
122 				struct musb *musb)
123 {
124 	if (request->request.dma == DMA_ADDR_INVALID) {
125 		DBG(20, "not unmapping a never mapped buffer\n");
126 		return;
127 	}
128 	if (request->mapped) {
129 		dma_unmap_single(musb->controller,
130 			request->request.dma,
131 			request->request.length,
132 			request->tx
133 				? DMA_TO_DEVICE
134 				: DMA_FROM_DEVICE);
135 		request->request.dma = DMA_ADDR_INVALID;
136 		request->mapped = 0;
137 	} else {
138 		dma_sync_single_for_cpu(musb->controller,
139 			request->request.dma,
140 			request->request.length,
141 			request->tx
142 				? DMA_TO_DEVICE
143 				: DMA_FROM_DEVICE);
144 
145 	}
146 }
147 
148 /*
149  * Immediately complete a request.
150  *
151  * @param request the request to complete
152  * @param status the status to complete the request with
153  * Context: controller locked, IRQs blocked.
154  */
155 void musb_g_giveback(
156 	struct musb_ep		*ep,
157 	struct usb_request	*request,
158 	int			status)
159 __releases(ep->musb->lock)
160 __acquires(ep->musb->lock)
161 {
162 	struct musb_request	*req;
163 	struct musb		*musb;
164 	int			busy = ep->busy;
165 
166 	req = to_musb_request(request);
167 
168 	list_del(&request->list);
169 	if (req->request.status == -EINPROGRESS)
170 		req->request.status = status;
171 	musb = req->musb;
172 
173 	ep->busy = 1;
174 	spin_unlock(&musb->lock);
175 	if (is_dma_capable() && ep->dma)
176 		unmap_dma_buffer(req, musb);
177 	if (request->status == 0)
178 		DBG(5, "%s done request %p,  %d/%d\n",
179 				ep->end_point.name, request,
180 				req->request.actual, req->request.length);
181 	else
182 		DBG(2, "%s request %p, %d/%d fault %d\n",
183 				ep->end_point.name, request,
184 				req->request.actual, req->request.length,
185 				request->status);
186 	req->request.complete(&req->ep->end_point, &req->request);
187 	spin_lock(&musb->lock);
188 	ep->busy = busy;
189 }
190 
191 /* ----------------------------------------------------------------------- */
192 
193 /*
194  * Abort requests queued to an endpoint using the status. Synchronous.
195  * caller locked controller and blocked irqs, and selected this ep.
196  */
197 static void nuke(struct musb_ep *ep, const int status)
198 {
199 	struct musb_request	*req = NULL;
200 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
201 
202 	ep->busy = 1;
203 
204 	if (is_dma_capable() && ep->dma) {
205 		struct dma_controller	*c = ep->musb->dma_controller;
206 		int value;
207 
208 		if (ep->is_in) {
209 			/*
210 			 * The programming guide says that we must not clear
211 			 * the DMAMODE bit before DMAENAB, so we only
212 			 * clear it in the second write...
213 			 */
214 			musb_writew(epio, MUSB_TXCSR,
215 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
216 			musb_writew(epio, MUSB_TXCSR,
217 					0 | MUSB_TXCSR_FLUSHFIFO);
218 		} else {
219 			musb_writew(epio, MUSB_RXCSR,
220 					0 | MUSB_RXCSR_FLUSHFIFO);
221 			musb_writew(epio, MUSB_RXCSR,
222 					0 | MUSB_RXCSR_FLUSHFIFO);
223 		}
224 
225 		value = c->channel_abort(ep->dma);
226 		DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
227 		c->channel_release(ep->dma);
228 		ep->dma = NULL;
229 	}
230 
231 	while (!list_empty(&(ep->req_list))) {
232 		req = container_of(ep->req_list.next, struct musb_request,
233 				request.list);
234 		musb_g_giveback(ep, &req->request, status);
235 	}
236 }
237 
238 /* ----------------------------------------------------------------------- */
239 
240 /* Data transfers - pure PIO, pure DMA, or mixed mode */
241 
242 /*
243  * This assumes the separate CPPI engine is responding to DMA requests
244  * from the usb core ... sequenced a bit differently from mentor dma.
245  */
246 
247 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
248 {
249 	if (can_bulk_split(musb, ep->type))
250 		return ep->hw_ep->max_packet_sz_tx;
251 	else
252 		return ep->packet_sz;
253 }
254 
255 
256 #ifdef CONFIG_USB_INVENTRA_DMA
257 
258 /* Peripheral tx (IN) using Mentor DMA works as follows:
259 	Only mode 0 is used for transfers <= wPktSize,
260 	mode 1 is used for larger transfers,
261 
262 	One of the following happens:
263 	- Host sends IN token which causes an endpoint interrupt
264 		-> TxAvail
265 			-> if DMA is currently busy, exit.
266 			-> if queue is non-empty, txstate().
267 
268 	- Request is queued by the gadget driver.
269 		-> if queue was previously empty, txstate()
270 
271 	txstate()
272 		-> start
273 		  /\	-> setup DMA
274 		  |     (data is transferred to the FIFO, then sent out when
275 		  |	IN token(s) are recd from Host.
276 		  |		-> DMA interrupt on completion
277 		  |		   calls TxAvail.
278 		  |		      -> stop DMA, ~DMAENAB,
279 		  |		      -> set TxPktRdy for last short pkt or zlp
280 		  |		      -> Complete Request
281 		  |		      -> Continue next request (call txstate)
282 		  |___________________________________|
283 
284  * Non-Mentor DMA engines can of course work differently, such as by
285  * upleveling from irq-per-packet to irq-per-buffer.
286  */
287 
288 #endif
289 
290 /*
291  * An endpoint is transmitting data. This can be called either from
292  * the IRQ routine or from ep.queue() to kickstart a request on an
293  * endpoint.
294  *
295  * Context: controller locked, IRQs blocked, endpoint selected
296  */
297 static void txstate(struct musb *musb, struct musb_request *req)
298 {
299 	u8			epnum = req->epnum;
300 	struct musb_ep		*musb_ep;
301 	void __iomem		*epio = musb->endpoints[epnum].regs;
302 	struct usb_request	*request;
303 	u16			fifo_count = 0, csr;
304 	int			use_dma = 0;
305 
306 	musb_ep = req->ep;
307 
308 	/* we shouldn't get here while DMA is active ... but we do ... */
309 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
310 		DBG(4, "dma pending...\n");
311 		return;
312 	}
313 
314 	/* read TXCSR before */
315 	csr = musb_readw(epio, MUSB_TXCSR);
316 
317 	request = &req->request;
318 	fifo_count = min(max_ep_writesize(musb, musb_ep),
319 			(int)(request->length - request->actual));
320 
321 	if (csr & MUSB_TXCSR_TXPKTRDY) {
322 		DBG(5, "%s old packet still ready , txcsr %03x\n",
323 				musb_ep->end_point.name, csr);
324 		return;
325 	}
326 
327 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
328 		DBG(5, "%s stalling, txcsr %03x\n",
329 				musb_ep->end_point.name, csr);
330 		return;
331 	}
332 
333 	DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
334 			epnum, musb_ep->packet_sz, fifo_count,
335 			csr);
336 
337 #ifndef	CONFIG_MUSB_PIO_ONLY
338 	if (is_dma_capable() && musb_ep->dma) {
339 		struct dma_controller	*c = musb->dma_controller;
340 		size_t request_size;
341 
342 		/* setup DMA, then program endpoint CSR */
343 		request_size = min_t(size_t, request->length - request->actual,
344 					musb_ep->dma->max_len);
345 
346 		use_dma = (request->dma != DMA_ADDR_INVALID);
347 
348 		/* MUSB_TXCSR_P_ISO is still set correctly */
349 
350 #ifdef CONFIG_USB_INVENTRA_DMA
351 		{
352 			if (request_size < musb_ep->packet_sz)
353 				musb_ep->dma->desired_mode = 0;
354 			else
355 				musb_ep->dma->desired_mode = 1;
356 
357 			use_dma = use_dma && c->channel_program(
358 					musb_ep->dma, musb_ep->packet_sz,
359 					musb_ep->dma->desired_mode,
360 					request->dma + request->actual, request_size);
361 			if (use_dma) {
362 				if (musb_ep->dma->desired_mode == 0) {
363 					/*
364 					 * We must not clear the DMAMODE bit
365 					 * before the DMAENAB bit -- and the
366 					 * latter doesn't always get cleared
367 					 * before we get here...
368 					 */
369 					csr &= ~(MUSB_TXCSR_AUTOSET
370 						| MUSB_TXCSR_DMAENAB);
371 					musb_writew(epio, MUSB_TXCSR, csr
372 						| MUSB_TXCSR_P_WZC_BITS);
373 					csr &= ~MUSB_TXCSR_DMAMODE;
374 					csr |= (MUSB_TXCSR_DMAENAB |
375 							MUSB_TXCSR_MODE);
376 					/* against programming guide */
377 				} else {
378 					csr |= (MUSB_TXCSR_DMAENAB
379 							| MUSB_TXCSR_DMAMODE
380 							| MUSB_TXCSR_MODE);
381 					if (!musb_ep->hb_mult)
382 						csr |= MUSB_TXCSR_AUTOSET;
383 				}
384 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
385 
386 				musb_writew(epio, MUSB_TXCSR, csr);
387 			}
388 		}
389 
390 #elif defined(CONFIG_USB_TI_CPPI_DMA)
391 		/* program endpoint CSR first, then setup DMA */
392 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
393 		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
394 		       MUSB_TXCSR_MODE;
395 		musb_writew(epio, MUSB_TXCSR,
396 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
397 				| csr);
398 
399 		/* ensure writebuffer is empty */
400 		csr = musb_readw(epio, MUSB_TXCSR);
401 
402 		/* NOTE host side sets DMAENAB later than this; both are
403 		 * OK since the transfer dma glue (between CPPI and Mentor
404 		 * fifos) just tells CPPI it could start.  Data only moves
405 		 * to the USB TX fifo when both fifos are ready.
406 		 */
407 
408 		/* "mode" is irrelevant here; handle terminating ZLPs like
409 		 * PIO does, since the hardware RNDIS mode seems unreliable
410 		 * except for the last-packet-is-already-short case.
411 		 */
412 		use_dma = use_dma && c->channel_program(
413 				musb_ep->dma, musb_ep->packet_sz,
414 				0,
415 				request->dma + request->actual,
416 				request_size);
417 		if (!use_dma) {
418 			c->channel_release(musb_ep->dma);
419 			musb_ep->dma = NULL;
420 			csr &= ~MUSB_TXCSR_DMAENAB;
421 			musb_writew(epio, MUSB_TXCSR, csr);
422 			/* invariant: prequest->buf is non-null */
423 		}
424 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
425 		use_dma = use_dma && c->channel_program(
426 				musb_ep->dma, musb_ep->packet_sz,
427 				request->zero,
428 				request->dma + request->actual,
429 				request_size);
430 #endif
431 	}
432 #endif
433 
434 	if (!use_dma) {
435 		/*
436 		 * Unmap the dma buffer back to cpu if dma channel
437 		 * programming fails
438 		 */
439 		if (is_dma_capable() && musb_ep->dma)
440 			unmap_dma_buffer(req, musb);
441 
442 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
443 				(u8 *) (request->buf + request->actual));
444 		request->actual += fifo_count;
445 		csr |= MUSB_TXCSR_TXPKTRDY;
446 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
447 		musb_writew(epio, MUSB_TXCSR, csr);
448 	}
449 
450 	/* host may already have the data when this message shows... */
451 	DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
452 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
453 			request->actual, request->length,
454 			musb_readw(epio, MUSB_TXCSR),
455 			fifo_count,
456 			musb_readw(epio, MUSB_TXMAXP));
457 }
458 
459 /*
460  * FIFO state update (e.g. data ready).
461  * Called from IRQ,  with controller locked.
462  */
463 void musb_g_tx(struct musb *musb, u8 epnum)
464 {
465 	u16			csr;
466 	struct usb_request	*request;
467 	u8 __iomem		*mbase = musb->mregs;
468 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
469 	void __iomem		*epio = musb->endpoints[epnum].regs;
470 	struct dma_channel	*dma;
471 
472 	musb_ep_select(mbase, epnum);
473 	request = next_request(musb_ep);
474 
475 	csr = musb_readw(epio, MUSB_TXCSR);
476 	DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
477 
478 	dma = is_dma_capable() ? musb_ep->dma : NULL;
479 
480 	/*
481 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
482 	 * probably rates reporting as a host error.
483 	 */
484 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
485 		csr |=	MUSB_TXCSR_P_WZC_BITS;
486 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
487 		musb_writew(epio, MUSB_TXCSR, csr);
488 		return;
489 	}
490 
491 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
492 		/* We NAKed, no big deal... little reason to care. */
493 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
494 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
495 		musb_writew(epio, MUSB_TXCSR, csr);
496 		DBG(20, "underrun on ep%d, req %p\n", epnum, request);
497 	}
498 
499 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
500 		/*
501 		 * SHOULD NOT HAPPEN... has with CPPI though, after
502 		 * changing SENDSTALL (and other cases); harmless?
503 		 */
504 		DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
505 		return;
506 	}
507 
508 	if (request) {
509 		u8	is_dma = 0;
510 
511 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
512 			is_dma = 1;
513 			csr |= MUSB_TXCSR_P_WZC_BITS;
514 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
515 				 MUSB_TXCSR_TXPKTRDY);
516 			musb_writew(epio, MUSB_TXCSR, csr);
517 			/* Ensure writebuffer is empty. */
518 			csr = musb_readw(epio, MUSB_TXCSR);
519 			request->actual += musb_ep->dma->actual_len;
520 			DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
521 				epnum, csr, musb_ep->dma->actual_len, request);
522 		}
523 
524 		/*
525 		 * First, maybe a terminating short packet. Some DMA
526 		 * engines might handle this by themselves.
527 		 */
528 		if ((request->zero && request->length
529 			&& (request->length % musb_ep->packet_sz == 0)
530 			&& (request->actual == request->length))
531 #ifdef CONFIG_USB_INVENTRA_DMA
532 			|| (is_dma && (!dma->desired_mode ||
533 				(request->actual &
534 					(musb_ep->packet_sz - 1))))
535 #endif
536 		) {
537 			/*
538 			 * On DMA completion, FIFO may not be
539 			 * available yet...
540 			 */
541 			if (csr & MUSB_TXCSR_TXPKTRDY)
542 				return;
543 
544 			DBG(4, "sending zero pkt\n");
545 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
546 					| MUSB_TXCSR_TXPKTRDY);
547 			request->zero = 0;
548 		}
549 
550 		if (request->actual == request->length) {
551 			musb_g_giveback(musb_ep, request, 0);
552 			request = musb_ep->desc ? next_request(musb_ep) : NULL;
553 			if (!request) {
554 				DBG(4, "%s idle now\n",
555 					musb_ep->end_point.name);
556 				return;
557 			}
558 		}
559 
560 		txstate(musb, to_musb_request(request));
561 	}
562 }
563 
564 /* ------------------------------------------------------------ */
565 
566 #ifdef CONFIG_USB_INVENTRA_DMA
567 
568 /* Peripheral rx (OUT) using Mentor DMA works as follows:
569 	- Only mode 0 is used.
570 
571 	- Request is queued by the gadget class driver.
572 		-> if queue was previously empty, rxstate()
573 
574 	- Host sends OUT token which causes an endpoint interrupt
575 	  /\      -> RxReady
576 	  |	      -> if request queued, call rxstate
577 	  |		/\	-> setup DMA
578 	  |		|	     -> DMA interrupt on completion
579 	  |		|		-> RxReady
580 	  |		|		      -> stop DMA
581 	  |		|		      -> ack the read
582 	  |		|		      -> if data recd = max expected
583 	  |		|				by the request, or host
584 	  |		|				sent a short packet,
585 	  |		|				complete the request,
586 	  |		|				and start the next one.
587 	  |		|_____________________________________|
588 	  |					 else just wait for the host
589 	  |					    to send the next OUT token.
590 	  |__________________________________________________|
591 
592  * Non-Mentor DMA engines can of course work differently.
593  */
594 
595 #endif
596 
597 /*
598  * Context: controller locked, IRQs blocked, endpoint selected
599  */
600 static void rxstate(struct musb *musb, struct musb_request *req)
601 {
602 	const u8		epnum = req->epnum;
603 	struct usb_request	*request = &req->request;
604 	struct musb_ep		*musb_ep;
605 	void __iomem		*epio = musb->endpoints[epnum].regs;
606 	unsigned		fifo_count = 0;
607 	u16			len;
608 	u16			csr = musb_readw(epio, MUSB_RXCSR);
609 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
610 
611 	if (hw_ep->is_shared_fifo)
612 		musb_ep = &hw_ep->ep_in;
613 	else
614 		musb_ep = &hw_ep->ep_out;
615 
616 	len = musb_ep->packet_sz;
617 
618 	/* We shouldn't get here while DMA is active, but we do... */
619 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
620 		DBG(4, "DMA pending...\n");
621 		return;
622 	}
623 
624 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
625 		DBG(5, "%s stalling, RXCSR %04x\n",
626 		    musb_ep->end_point.name, csr);
627 		return;
628 	}
629 
630 	if (is_cppi_enabled() && musb_ep->dma) {
631 		struct dma_controller	*c = musb->dma_controller;
632 		struct dma_channel	*channel = musb_ep->dma;
633 
634 		/* NOTE:  CPPI won't actually stop advancing the DMA
635 		 * queue after short packet transfers, so this is almost
636 		 * always going to run as IRQ-per-packet DMA so that
637 		 * faults will be handled correctly.
638 		 */
639 		if (c->channel_program(channel,
640 				musb_ep->packet_sz,
641 				!request->short_not_ok,
642 				request->dma + request->actual,
643 				request->length - request->actual)) {
644 
645 			/* make sure that if an rxpkt arrived after the irq,
646 			 * the cppi engine will be ready to take it as soon
647 			 * as DMA is enabled
648 			 */
649 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
650 					| MUSB_RXCSR_DMAMODE);
651 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
652 			musb_writew(epio, MUSB_RXCSR, csr);
653 			return;
654 		}
655 	}
656 
657 	if (csr & MUSB_RXCSR_RXPKTRDY) {
658 		len = musb_readw(epio, MUSB_RXCOUNT);
659 		if (request->actual < request->length) {
660 #ifdef CONFIG_USB_INVENTRA_DMA
661 			if (is_dma_capable() && musb_ep->dma) {
662 				struct dma_controller	*c;
663 				struct dma_channel	*channel;
664 				int			use_dma = 0;
665 
666 				c = musb->dma_controller;
667 				channel = musb_ep->dma;
668 
669 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
670 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
671 	 * completion. We only get interrupts from DMA controller.
672 	 *
673 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
674 	 * in advance. For mass storage class, request->length = what the host
675 	 * sends, so that'd work.  But for pretty much everything else,
676 	 * request->length is routinely more than what the host sends. For
677 	 * most these gadgets, end of is signified either by a short packet,
678 	 * or filling the last byte of the buffer.  (Sending extra data in
679 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
680 	 * we don't get DMA completion interrrupt for short packets.
681 	 *
682 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
683 	 * to get endpoint interrupt on every DMA req, but that didn't seem
684 	 * to work reliably.
685 	 *
686 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
687 	 * then becomes usable as a runtime "use mode 1" hint...
688 	 */
689 
690 				csr |= MUSB_RXCSR_DMAENAB;
691 #ifdef USE_MODE1
692 				csr |= MUSB_RXCSR_AUTOCLEAR;
693 				/* csr |= MUSB_RXCSR_DMAMODE; */
694 
695 				/* this special sequence (enabling and then
696 				 * disabling MUSB_RXCSR_DMAMODE) is required
697 				 * to get DMAReq to activate
698 				 */
699 				musb_writew(epio, MUSB_RXCSR,
700 					csr | MUSB_RXCSR_DMAMODE);
701 #else
702 				if (!musb_ep->hb_mult &&
703 					musb_ep->hw_ep->rx_double_buffered)
704 					csr |= MUSB_RXCSR_AUTOCLEAR;
705 #endif
706 				musb_writew(epio, MUSB_RXCSR, csr);
707 
708 				if (request->actual < request->length) {
709 					int transfer_size = 0;
710 #ifdef USE_MODE1
711 					transfer_size = min(request->length - request->actual,
712 							channel->max_len);
713 #else
714 					transfer_size = min(request->length - request->actual,
715 							(unsigned)len);
716 #endif
717 					if (transfer_size <= musb_ep->packet_sz)
718 						musb_ep->dma->desired_mode = 0;
719 					else
720 						musb_ep->dma->desired_mode = 1;
721 
722 					use_dma = c->channel_program(
723 							channel,
724 							musb_ep->packet_sz,
725 							channel->desired_mode,
726 							request->dma
727 							+ request->actual,
728 							transfer_size);
729 				}
730 
731 				if (use_dma)
732 					return;
733 			}
734 #endif	/* Mentor's DMA */
735 
736 			fifo_count = request->length - request->actual;
737 			DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
738 					musb_ep->end_point.name,
739 					len, fifo_count,
740 					musb_ep->packet_sz);
741 
742 			fifo_count = min_t(unsigned, len, fifo_count);
743 
744 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
745 			if (tusb_dma_omap() && musb_ep->dma) {
746 				struct dma_controller *c = musb->dma_controller;
747 				struct dma_channel *channel = musb_ep->dma;
748 				u32 dma_addr = request->dma + request->actual;
749 				int ret;
750 
751 				ret = c->channel_program(channel,
752 						musb_ep->packet_sz,
753 						channel->desired_mode,
754 						dma_addr,
755 						fifo_count);
756 				if (ret)
757 					return;
758 			}
759 #endif
760 			/*
761 			 * Unmap the dma buffer back to cpu if dma channel
762 			 * programming fails. This buffer is mapped if the
763 			 * channel allocation is successful
764 			 */
765 			 if (is_dma_capable() && musb_ep->dma) {
766 				unmap_dma_buffer(req, musb);
767 
768 				/*
769 				 * Clear DMAENAB and AUTOCLEAR for the
770 				 * PIO mode transfer
771 				 */
772 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
773 				musb_writew(epio, MUSB_RXCSR, csr);
774 			}
775 
776 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
777 					(request->buf + request->actual));
778 			request->actual += fifo_count;
779 
780 			/* REVISIT if we left anything in the fifo, flush
781 			 * it and report -EOVERFLOW
782 			 */
783 
784 			/* ack the read! */
785 			csr |= MUSB_RXCSR_P_WZC_BITS;
786 			csr &= ~MUSB_RXCSR_RXPKTRDY;
787 			musb_writew(epio, MUSB_RXCSR, csr);
788 		}
789 	}
790 
791 	/* reach the end or short packet detected */
792 	if (request->actual == request->length || len < musb_ep->packet_sz)
793 		musb_g_giveback(musb_ep, request, 0);
794 }
795 
796 /*
797  * Data ready for a request; called from IRQ
798  */
799 void musb_g_rx(struct musb *musb, u8 epnum)
800 {
801 	u16			csr;
802 	struct usb_request	*request;
803 	void __iomem		*mbase = musb->mregs;
804 	struct musb_ep		*musb_ep;
805 	void __iomem		*epio = musb->endpoints[epnum].regs;
806 	struct dma_channel	*dma;
807 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
808 
809 	if (hw_ep->is_shared_fifo)
810 		musb_ep = &hw_ep->ep_in;
811 	else
812 		musb_ep = &hw_ep->ep_out;
813 
814 	musb_ep_select(mbase, epnum);
815 
816 	request = next_request(musb_ep);
817 	if (!request)
818 		return;
819 
820 	csr = musb_readw(epio, MUSB_RXCSR);
821 	dma = is_dma_capable() ? musb_ep->dma : NULL;
822 
823 	DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
824 			csr, dma ? " (dma)" : "", request);
825 
826 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
827 		csr |= MUSB_RXCSR_P_WZC_BITS;
828 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
829 		musb_writew(epio, MUSB_RXCSR, csr);
830 		return;
831 	}
832 
833 	if (csr & MUSB_RXCSR_P_OVERRUN) {
834 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
835 		csr &= ~MUSB_RXCSR_P_OVERRUN;
836 		musb_writew(epio, MUSB_RXCSR, csr);
837 
838 		DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
839 		if (request->status == -EINPROGRESS)
840 			request->status = -EOVERFLOW;
841 	}
842 	if (csr & MUSB_RXCSR_INCOMPRX) {
843 		/* REVISIT not necessarily an error */
844 		DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
845 	}
846 
847 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
848 		/* "should not happen"; likely RXPKTRDY pending for DMA */
849 		DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
850 			"%s busy, csr %04x\n",
851 			musb_ep->end_point.name, csr);
852 		return;
853 	}
854 
855 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
856 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
857 				| MUSB_RXCSR_DMAENAB
858 				| MUSB_RXCSR_DMAMODE);
859 		musb_writew(epio, MUSB_RXCSR,
860 			MUSB_RXCSR_P_WZC_BITS | csr);
861 
862 		request->actual += musb_ep->dma->actual_len;
863 
864 		DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
865 			epnum, csr,
866 			musb_readw(epio, MUSB_RXCSR),
867 			musb_ep->dma->actual_len, request);
868 
869 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
870 		/* Autoclear doesn't clear RxPktRdy for short packets */
871 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
872 				|| (dma->actual_len
873 					& (musb_ep->packet_sz - 1))) {
874 			/* ack the read! */
875 			csr &= ~MUSB_RXCSR_RXPKTRDY;
876 			musb_writew(epio, MUSB_RXCSR, csr);
877 		}
878 
879 		/* incomplete, and not short? wait for next IN packet */
880 		if ((request->actual < request->length)
881 				&& (musb_ep->dma->actual_len
882 					== musb_ep->packet_sz)) {
883 			/* In double buffer case, continue to unload fifo if
884  			 * there is Rx packet in FIFO.
885  			 **/
886 			csr = musb_readw(epio, MUSB_RXCSR);
887 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
888 				hw_ep->rx_double_buffered)
889 				goto exit;
890 			return;
891 		}
892 #endif
893 		musb_g_giveback(musb_ep, request, 0);
894 
895 		request = next_request(musb_ep);
896 		if (!request)
897 			return;
898 	}
899 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
900 exit:
901 #endif
902 	/* Analyze request */
903 	rxstate(musb, to_musb_request(request));
904 }
905 
906 /* ------------------------------------------------------------ */
907 
908 static int musb_gadget_enable(struct usb_ep *ep,
909 			const struct usb_endpoint_descriptor *desc)
910 {
911 	unsigned long		flags;
912 	struct musb_ep		*musb_ep;
913 	struct musb_hw_ep	*hw_ep;
914 	void __iomem		*regs;
915 	struct musb		*musb;
916 	void __iomem	*mbase;
917 	u8		epnum;
918 	u16		csr;
919 	unsigned	tmp;
920 	int		status = -EINVAL;
921 
922 	if (!ep || !desc)
923 		return -EINVAL;
924 
925 	musb_ep = to_musb_ep(ep);
926 	hw_ep = musb_ep->hw_ep;
927 	regs = hw_ep->regs;
928 	musb = musb_ep->musb;
929 	mbase = musb->mregs;
930 	epnum = musb_ep->current_epnum;
931 
932 	spin_lock_irqsave(&musb->lock, flags);
933 
934 	if (musb_ep->desc) {
935 		status = -EBUSY;
936 		goto fail;
937 	}
938 	musb_ep->type = usb_endpoint_type(desc);
939 
940 	/* check direction and (later) maxpacket size against endpoint */
941 	if (usb_endpoint_num(desc) != epnum)
942 		goto fail;
943 
944 	/* REVISIT this rules out high bandwidth periodic transfers */
945 	tmp = le16_to_cpu(desc->wMaxPacketSize);
946 	if (tmp & ~0x07ff) {
947 		int ok;
948 
949 		if (usb_endpoint_dir_in(desc))
950 			ok = musb->hb_iso_tx;
951 		else
952 			ok = musb->hb_iso_rx;
953 
954 		if (!ok) {
955 			DBG(4, "%s: not support ISO high bandwidth\n", __func__);
956 			goto fail;
957 		}
958 		musb_ep->hb_mult = (tmp >> 11) & 3;
959 	} else {
960 		musb_ep->hb_mult = 0;
961 	}
962 
963 	musb_ep->packet_sz = tmp & 0x7ff;
964 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
965 
966 	/* enable the interrupts for the endpoint, set the endpoint
967 	 * packet size (or fail), set the mode, clear the fifo
968 	 */
969 	musb_ep_select(mbase, epnum);
970 	if (usb_endpoint_dir_in(desc)) {
971 		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
972 
973 		if (hw_ep->is_shared_fifo)
974 			musb_ep->is_in = 1;
975 		if (!musb_ep->is_in)
976 			goto fail;
977 
978 		if (tmp > hw_ep->max_packet_sz_tx) {
979 			DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
980 			goto fail;
981 		}
982 
983 		int_txe |= (1 << epnum);
984 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
985 
986 		/* REVISIT if can_bulk_split(), use by updating "tmp";
987 		 * likewise high bandwidth periodic tx
988 		 */
989 		/* Set TXMAXP with the FIFO size of the endpoint
990 		 * to disable double buffering mode.
991 		 */
992 		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
993 
994 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
995 		if (musb_readw(regs, MUSB_TXCSR)
996 				& MUSB_TXCSR_FIFONOTEMPTY)
997 			csr |= MUSB_TXCSR_FLUSHFIFO;
998 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
999 			csr |= MUSB_TXCSR_P_ISO;
1000 
1001 		/* set twice in case of double buffering */
1002 		musb_writew(regs, MUSB_TXCSR, csr);
1003 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1004 		musb_writew(regs, MUSB_TXCSR, csr);
1005 
1006 	} else {
1007 		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1008 
1009 		if (hw_ep->is_shared_fifo)
1010 			musb_ep->is_in = 0;
1011 		if (musb_ep->is_in)
1012 			goto fail;
1013 
1014 		if (tmp > hw_ep->max_packet_sz_rx) {
1015 			DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
1016 			goto fail;
1017 		}
1018 
1019 		int_rxe |= (1 << epnum);
1020 		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1021 
1022 		/* REVISIT if can_bulk_combine() use by updating "tmp"
1023 		 * likewise high bandwidth periodic rx
1024 		 */
1025 		/* Set RXMAXP with the FIFO size of the endpoint
1026 		 * to disable double buffering mode.
1027 		 */
1028 		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
1029 
1030 		/* force shared fifo to OUT-only mode */
1031 		if (hw_ep->is_shared_fifo) {
1032 			csr = musb_readw(regs, MUSB_TXCSR);
1033 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1034 			musb_writew(regs, MUSB_TXCSR, csr);
1035 		}
1036 
1037 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1038 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1039 			csr |= MUSB_RXCSR_P_ISO;
1040 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1041 			csr |= MUSB_RXCSR_DISNYET;
1042 
1043 		/* set twice in case of double buffering */
1044 		musb_writew(regs, MUSB_RXCSR, csr);
1045 		musb_writew(regs, MUSB_RXCSR, csr);
1046 	}
1047 
1048 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
1049 	 * for some reason you run out of channels here.
1050 	 */
1051 	if (is_dma_capable() && musb->dma_controller) {
1052 		struct dma_controller	*c = musb->dma_controller;
1053 
1054 		musb_ep->dma = c->channel_alloc(c, hw_ep,
1055 				(desc->bEndpointAddress & USB_DIR_IN));
1056 	} else
1057 		musb_ep->dma = NULL;
1058 
1059 	musb_ep->desc = desc;
1060 	musb_ep->busy = 0;
1061 	musb_ep->wedged = 0;
1062 	status = 0;
1063 
1064 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1065 			musb_driver_name, musb_ep->end_point.name,
1066 			({ char *s; switch (musb_ep->type) {
1067 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
1068 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
1069 			default:			s = "iso"; break;
1070 			}; s; }),
1071 			musb_ep->is_in ? "IN" : "OUT",
1072 			musb_ep->dma ? "dma, " : "",
1073 			musb_ep->packet_sz);
1074 
1075 	schedule_work(&musb->irq_work);
1076 
1077 fail:
1078 	spin_unlock_irqrestore(&musb->lock, flags);
1079 	return status;
1080 }
1081 
1082 /*
1083  * Disable an endpoint flushing all requests queued.
1084  */
1085 static int musb_gadget_disable(struct usb_ep *ep)
1086 {
1087 	unsigned long	flags;
1088 	struct musb	*musb;
1089 	u8		epnum;
1090 	struct musb_ep	*musb_ep;
1091 	void __iomem	*epio;
1092 	int		status = 0;
1093 
1094 	musb_ep = to_musb_ep(ep);
1095 	musb = musb_ep->musb;
1096 	epnum = musb_ep->current_epnum;
1097 	epio = musb->endpoints[epnum].regs;
1098 
1099 	spin_lock_irqsave(&musb->lock, flags);
1100 	musb_ep_select(musb->mregs, epnum);
1101 
1102 	/* zero the endpoint sizes */
1103 	if (musb_ep->is_in) {
1104 		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1105 		int_txe &= ~(1 << epnum);
1106 		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1107 		musb_writew(epio, MUSB_TXMAXP, 0);
1108 	} else {
1109 		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1110 		int_rxe &= ~(1 << epnum);
1111 		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1112 		musb_writew(epio, MUSB_RXMAXP, 0);
1113 	}
1114 
1115 	musb_ep->desc = NULL;
1116 
1117 	/* abort all pending DMA and requests */
1118 	nuke(musb_ep, -ESHUTDOWN);
1119 
1120 	schedule_work(&musb->irq_work);
1121 
1122 	spin_unlock_irqrestore(&(musb->lock), flags);
1123 
1124 	DBG(2, "%s\n", musb_ep->end_point.name);
1125 
1126 	return status;
1127 }
1128 
1129 /*
1130  * Allocate a request for an endpoint.
1131  * Reused by ep0 code.
1132  */
1133 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1134 {
1135 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1136 	struct musb_request	*request = NULL;
1137 
1138 	request = kzalloc(sizeof *request, gfp_flags);
1139 	if (request) {
1140 		INIT_LIST_HEAD(&request->request.list);
1141 		request->request.dma = DMA_ADDR_INVALID;
1142 		request->epnum = musb_ep->current_epnum;
1143 		request->ep = musb_ep;
1144 	}
1145 
1146 	return &request->request;
1147 }
1148 
1149 /*
1150  * Free a request
1151  * Reused by ep0 code.
1152  */
1153 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1154 {
1155 	kfree(to_musb_request(req));
1156 }
1157 
1158 static LIST_HEAD(buffers);
1159 
1160 struct free_record {
1161 	struct list_head	list;
1162 	struct device		*dev;
1163 	unsigned		bytes;
1164 	dma_addr_t		dma;
1165 };
1166 
1167 /*
1168  * Context: controller locked, IRQs blocked.
1169  */
1170 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1171 {
1172 	DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1173 		req->tx ? "TX/IN" : "RX/OUT",
1174 		&req->request, req->request.length, req->epnum);
1175 
1176 	musb_ep_select(musb->mregs, req->epnum);
1177 	if (req->tx)
1178 		txstate(musb, req);
1179 	else
1180 		rxstate(musb, req);
1181 }
1182 
1183 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1184 			gfp_t gfp_flags)
1185 {
1186 	struct musb_ep		*musb_ep;
1187 	struct musb_request	*request;
1188 	struct musb		*musb;
1189 	int			status = 0;
1190 	unsigned long		lockflags;
1191 
1192 	if (!ep || !req)
1193 		return -EINVAL;
1194 	if (!req->buf)
1195 		return -ENODATA;
1196 
1197 	musb_ep = to_musb_ep(ep);
1198 	musb = musb_ep->musb;
1199 
1200 	request = to_musb_request(req);
1201 	request->musb = musb;
1202 
1203 	if (request->ep != musb_ep)
1204 		return -EINVAL;
1205 
1206 	DBG(4, "<== to %s request=%p\n", ep->name, req);
1207 
1208 	/* request is mine now... */
1209 	request->request.actual = 0;
1210 	request->request.status = -EINPROGRESS;
1211 	request->epnum = musb_ep->current_epnum;
1212 	request->tx = musb_ep->is_in;
1213 
1214 	if (is_dma_capable() && musb_ep->dma)
1215 		map_dma_buffer(request, musb);
1216 	else
1217 		request->mapped = 0;
1218 
1219 	spin_lock_irqsave(&musb->lock, lockflags);
1220 
1221 	/* don't queue if the ep is down */
1222 	if (!musb_ep->desc) {
1223 		DBG(4, "req %p queued to %s while ep %s\n",
1224 				req, ep->name, "disabled");
1225 		status = -ESHUTDOWN;
1226 		goto cleanup;
1227 	}
1228 
1229 	/* add request to the list */
1230 	list_add_tail(&(request->request.list), &(musb_ep->req_list));
1231 
1232 	/* it this is the head of the queue, start i/o ... */
1233 	if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1234 		musb_ep_restart(musb, request);
1235 
1236 cleanup:
1237 	spin_unlock_irqrestore(&musb->lock, lockflags);
1238 	return status;
1239 }
1240 
1241 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1242 {
1243 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1244 	struct usb_request	*r;
1245 	unsigned long		flags;
1246 	int			status = 0;
1247 	struct musb		*musb = musb_ep->musb;
1248 
1249 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1250 		return -EINVAL;
1251 
1252 	spin_lock_irqsave(&musb->lock, flags);
1253 
1254 	list_for_each_entry(r, &musb_ep->req_list, list) {
1255 		if (r == request)
1256 			break;
1257 	}
1258 	if (r != request) {
1259 		DBG(3, "request %p not queued to %s\n", request, ep->name);
1260 		status = -EINVAL;
1261 		goto done;
1262 	}
1263 
1264 	/* if the hardware doesn't have the request, easy ... */
1265 	if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1266 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1267 
1268 	/* ... else abort the dma transfer ... */
1269 	else if (is_dma_capable() && musb_ep->dma) {
1270 		struct dma_controller	*c = musb->dma_controller;
1271 
1272 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1273 		if (c->channel_abort)
1274 			status = c->channel_abort(musb_ep->dma);
1275 		else
1276 			status = -EBUSY;
1277 		if (status == 0)
1278 			musb_g_giveback(musb_ep, request, -ECONNRESET);
1279 	} else {
1280 		/* NOTE: by sticking to easily tested hardware/driver states,
1281 		 * we leave counting of in-flight packets imprecise.
1282 		 */
1283 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1284 	}
1285 
1286 done:
1287 	spin_unlock_irqrestore(&musb->lock, flags);
1288 	return status;
1289 }
1290 
1291 /*
1292  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1293  * data but will queue requests.
1294  *
1295  * exported to ep0 code
1296  */
1297 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1298 {
1299 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1300 	u8			epnum = musb_ep->current_epnum;
1301 	struct musb		*musb = musb_ep->musb;
1302 	void __iomem		*epio = musb->endpoints[epnum].regs;
1303 	void __iomem		*mbase;
1304 	unsigned long		flags;
1305 	u16			csr;
1306 	struct musb_request	*request;
1307 	int			status = 0;
1308 
1309 	if (!ep)
1310 		return -EINVAL;
1311 	mbase = musb->mregs;
1312 
1313 	spin_lock_irqsave(&musb->lock, flags);
1314 
1315 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1316 		status = -EINVAL;
1317 		goto done;
1318 	}
1319 
1320 	musb_ep_select(mbase, epnum);
1321 
1322 	request = to_musb_request(next_request(musb_ep));
1323 	if (value) {
1324 		if (request) {
1325 			DBG(3, "request in progress, cannot halt %s\n",
1326 			    ep->name);
1327 			status = -EAGAIN;
1328 			goto done;
1329 		}
1330 		/* Cannot portably stall with non-empty FIFO */
1331 		if (musb_ep->is_in) {
1332 			csr = musb_readw(epio, MUSB_TXCSR);
1333 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1334 				DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
1335 				status = -EAGAIN;
1336 				goto done;
1337 			}
1338 		}
1339 	} else
1340 		musb_ep->wedged = 0;
1341 
1342 	/* set/clear the stall and toggle bits */
1343 	DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1344 	if (musb_ep->is_in) {
1345 		csr = musb_readw(epio, MUSB_TXCSR);
1346 		csr |= MUSB_TXCSR_P_WZC_BITS
1347 			| MUSB_TXCSR_CLRDATATOG;
1348 		if (value)
1349 			csr |= MUSB_TXCSR_P_SENDSTALL;
1350 		else
1351 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
1352 				| MUSB_TXCSR_P_SENTSTALL);
1353 		csr &= ~MUSB_TXCSR_TXPKTRDY;
1354 		musb_writew(epio, MUSB_TXCSR, csr);
1355 	} else {
1356 		csr = musb_readw(epio, MUSB_RXCSR);
1357 		csr |= MUSB_RXCSR_P_WZC_BITS
1358 			| MUSB_RXCSR_FLUSHFIFO
1359 			| MUSB_RXCSR_CLRDATATOG;
1360 		if (value)
1361 			csr |= MUSB_RXCSR_P_SENDSTALL;
1362 		else
1363 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
1364 				| MUSB_RXCSR_P_SENTSTALL);
1365 		musb_writew(epio, MUSB_RXCSR, csr);
1366 	}
1367 
1368 	/* maybe start the first request in the queue */
1369 	if (!musb_ep->busy && !value && request) {
1370 		DBG(3, "restarting the request\n");
1371 		musb_ep_restart(musb, request);
1372 	}
1373 
1374 done:
1375 	spin_unlock_irqrestore(&musb->lock, flags);
1376 	return status;
1377 }
1378 
1379 /*
1380  * Sets the halt feature with the clear requests ignored
1381  */
1382 static int musb_gadget_set_wedge(struct usb_ep *ep)
1383 {
1384 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1385 
1386 	if (!ep)
1387 		return -EINVAL;
1388 
1389 	musb_ep->wedged = 1;
1390 
1391 	return usb_ep_set_halt(ep);
1392 }
1393 
1394 static int musb_gadget_fifo_status(struct usb_ep *ep)
1395 {
1396 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1397 	void __iomem		*epio = musb_ep->hw_ep->regs;
1398 	int			retval = -EINVAL;
1399 
1400 	if (musb_ep->desc && !musb_ep->is_in) {
1401 		struct musb		*musb = musb_ep->musb;
1402 		int			epnum = musb_ep->current_epnum;
1403 		void __iomem		*mbase = musb->mregs;
1404 		unsigned long		flags;
1405 
1406 		spin_lock_irqsave(&musb->lock, flags);
1407 
1408 		musb_ep_select(mbase, epnum);
1409 		/* FIXME return zero unless RXPKTRDY is set */
1410 		retval = musb_readw(epio, MUSB_RXCOUNT);
1411 
1412 		spin_unlock_irqrestore(&musb->lock, flags);
1413 	}
1414 	return retval;
1415 }
1416 
1417 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1418 {
1419 	struct musb_ep	*musb_ep = to_musb_ep(ep);
1420 	struct musb	*musb = musb_ep->musb;
1421 	u8		epnum = musb_ep->current_epnum;
1422 	void __iomem	*epio = musb->endpoints[epnum].regs;
1423 	void __iomem	*mbase;
1424 	unsigned long	flags;
1425 	u16		csr, int_txe;
1426 
1427 	mbase = musb->mregs;
1428 
1429 	spin_lock_irqsave(&musb->lock, flags);
1430 	musb_ep_select(mbase, (u8) epnum);
1431 
1432 	/* disable interrupts */
1433 	int_txe = musb_readw(mbase, MUSB_INTRTXE);
1434 	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1435 
1436 	if (musb_ep->is_in) {
1437 		csr = musb_readw(epio, MUSB_TXCSR);
1438 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1439 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1440 			musb_writew(epio, MUSB_TXCSR, csr);
1441 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1442 			musb_writew(epio, MUSB_TXCSR, csr);
1443 		}
1444 	} else {
1445 		csr = musb_readw(epio, MUSB_RXCSR);
1446 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1447 		musb_writew(epio, MUSB_RXCSR, csr);
1448 		musb_writew(epio, MUSB_RXCSR, csr);
1449 	}
1450 
1451 	/* re-enable interrupt */
1452 	musb_writew(mbase, MUSB_INTRTXE, int_txe);
1453 	spin_unlock_irqrestore(&musb->lock, flags);
1454 }
1455 
1456 static const struct usb_ep_ops musb_ep_ops = {
1457 	.enable		= musb_gadget_enable,
1458 	.disable	= musb_gadget_disable,
1459 	.alloc_request	= musb_alloc_request,
1460 	.free_request	= musb_free_request,
1461 	.queue		= musb_gadget_queue,
1462 	.dequeue	= musb_gadget_dequeue,
1463 	.set_halt	= musb_gadget_set_halt,
1464 	.set_wedge	= musb_gadget_set_wedge,
1465 	.fifo_status	= musb_gadget_fifo_status,
1466 	.fifo_flush	= musb_gadget_fifo_flush
1467 };
1468 
1469 /* ----------------------------------------------------------------------- */
1470 
1471 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1472 {
1473 	struct musb	*musb = gadget_to_musb(gadget);
1474 
1475 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
1476 }
1477 
1478 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1479 {
1480 	struct musb	*musb = gadget_to_musb(gadget);
1481 	void __iomem	*mregs = musb->mregs;
1482 	unsigned long	flags;
1483 	int		status = -EINVAL;
1484 	u8		power, devctl;
1485 	int		retries;
1486 
1487 	spin_lock_irqsave(&musb->lock, flags);
1488 
1489 	switch (musb->xceiv->state) {
1490 	case OTG_STATE_B_PERIPHERAL:
1491 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1492 		 * that's part of the standard usb 1.1 state machine, and
1493 		 * doesn't affect OTG transitions.
1494 		 */
1495 		if (musb->may_wakeup && musb->is_suspended)
1496 			break;
1497 		goto done;
1498 	case OTG_STATE_B_IDLE:
1499 		/* Start SRP ... OTG not required. */
1500 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1501 		DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1502 		devctl |= MUSB_DEVCTL_SESSION;
1503 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
1504 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1505 		retries = 100;
1506 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
1507 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1508 			if (retries-- < 1)
1509 				break;
1510 		}
1511 		retries = 10000;
1512 		while (devctl & MUSB_DEVCTL_SESSION) {
1513 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1514 			if (retries-- < 1)
1515 				break;
1516 		}
1517 
1518 		/* Block idling for at least 1s */
1519 		musb_platform_try_idle(musb,
1520 			jiffies + msecs_to_jiffies(1 * HZ));
1521 
1522 		status = 0;
1523 		goto done;
1524 	default:
1525 		DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1526 		goto done;
1527 	}
1528 
1529 	status = 0;
1530 
1531 	power = musb_readb(mregs, MUSB_POWER);
1532 	power |= MUSB_POWER_RESUME;
1533 	musb_writeb(mregs, MUSB_POWER, power);
1534 	DBG(2, "issue wakeup\n");
1535 
1536 	/* FIXME do this next chunk in a timer callback, no udelay */
1537 	mdelay(2);
1538 
1539 	power = musb_readb(mregs, MUSB_POWER);
1540 	power &= ~MUSB_POWER_RESUME;
1541 	musb_writeb(mregs, MUSB_POWER, power);
1542 done:
1543 	spin_unlock_irqrestore(&musb->lock, flags);
1544 	return status;
1545 }
1546 
1547 static int
1548 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1549 {
1550 	struct musb	*musb = gadget_to_musb(gadget);
1551 
1552 	musb->is_self_powered = !!is_selfpowered;
1553 	return 0;
1554 }
1555 
1556 static void musb_pullup(struct musb *musb, int is_on)
1557 {
1558 	u8 power;
1559 
1560 	power = musb_readb(musb->mregs, MUSB_POWER);
1561 	if (is_on)
1562 		power |= MUSB_POWER_SOFTCONN;
1563 	else
1564 		power &= ~MUSB_POWER_SOFTCONN;
1565 
1566 	/* FIXME if on, HdrcStart; if off, HdrcStop */
1567 
1568 	DBG(3, "gadget %s D+ pullup %s\n",
1569 		musb->gadget_driver->function, is_on ? "on" : "off");
1570 	musb_writeb(musb->mregs, MUSB_POWER, power);
1571 }
1572 
1573 #if 0
1574 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1575 {
1576 	DBG(2, "<= %s =>\n", __func__);
1577 
1578 	/*
1579 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
1580 	 * though that can clear it), just musb_pullup().
1581 	 */
1582 
1583 	return -EINVAL;
1584 }
1585 #endif
1586 
1587 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1588 {
1589 	struct musb	*musb = gadget_to_musb(gadget);
1590 
1591 	if (!musb->xceiv->set_power)
1592 		return -EOPNOTSUPP;
1593 	return otg_set_power(musb->xceiv, mA);
1594 }
1595 
1596 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1597 {
1598 	struct musb	*musb = gadget_to_musb(gadget);
1599 	unsigned long	flags;
1600 
1601 	is_on = !!is_on;
1602 
1603 	/* NOTE: this assumes we are sensing vbus; we'd rather
1604 	 * not pullup unless the B-session is active.
1605 	 */
1606 	spin_lock_irqsave(&musb->lock, flags);
1607 	if (is_on != musb->softconnect) {
1608 		musb->softconnect = is_on;
1609 		musb_pullup(musb, is_on);
1610 	}
1611 	spin_unlock_irqrestore(&musb->lock, flags);
1612 	return 0;
1613 }
1614 
1615 static const struct usb_gadget_ops musb_gadget_operations = {
1616 	.get_frame		= musb_gadget_get_frame,
1617 	.wakeup			= musb_gadget_wakeup,
1618 	.set_selfpowered	= musb_gadget_set_self_powered,
1619 	/* .vbus_session		= musb_gadget_vbus_session, */
1620 	.vbus_draw		= musb_gadget_vbus_draw,
1621 	.pullup			= musb_gadget_pullup,
1622 };
1623 
1624 /* ----------------------------------------------------------------------- */
1625 
1626 /* Registration */
1627 
1628 /* Only this registration code "knows" the rule (from USB standards)
1629  * about there being only one external upstream port.  It assumes
1630  * all peripheral ports are external...
1631  */
1632 static struct musb *the_gadget;
1633 
1634 static void musb_gadget_release(struct device *dev)
1635 {
1636 	/* kref_put(WHAT) */
1637 	dev_dbg(dev, "%s\n", __func__);
1638 }
1639 
1640 
1641 static void __init
1642 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1643 {
1644 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1645 
1646 	memset(ep, 0, sizeof *ep);
1647 
1648 	ep->current_epnum = epnum;
1649 	ep->musb = musb;
1650 	ep->hw_ep = hw_ep;
1651 	ep->is_in = is_in;
1652 
1653 	INIT_LIST_HEAD(&ep->req_list);
1654 
1655 	sprintf(ep->name, "ep%d%s", epnum,
1656 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1657 				is_in ? "in" : "out"));
1658 	ep->end_point.name = ep->name;
1659 	INIT_LIST_HEAD(&ep->end_point.ep_list);
1660 	if (!epnum) {
1661 		ep->end_point.maxpacket = 64;
1662 		ep->end_point.ops = &musb_g_ep0_ops;
1663 		musb->g.ep0 = &ep->end_point;
1664 	} else {
1665 		if (is_in)
1666 			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1667 		else
1668 			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1669 		ep->end_point.ops = &musb_ep_ops;
1670 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1671 	}
1672 }
1673 
1674 /*
1675  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1676  * to the rest of the driver state.
1677  */
1678 static inline void __init musb_g_init_endpoints(struct musb *musb)
1679 {
1680 	u8			epnum;
1681 	struct musb_hw_ep	*hw_ep;
1682 	unsigned		count = 0;
1683 
1684 	/* intialize endpoint list just once */
1685 	INIT_LIST_HEAD(&(musb->g.ep_list));
1686 
1687 	for (epnum = 0, hw_ep = musb->endpoints;
1688 			epnum < musb->nr_endpoints;
1689 			epnum++, hw_ep++) {
1690 		if (hw_ep->is_shared_fifo /* || !epnum */) {
1691 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1692 			count++;
1693 		} else {
1694 			if (hw_ep->max_packet_sz_tx) {
1695 				init_peripheral_ep(musb, &hw_ep->ep_in,
1696 							epnum, 1);
1697 				count++;
1698 			}
1699 			if (hw_ep->max_packet_sz_rx) {
1700 				init_peripheral_ep(musb, &hw_ep->ep_out,
1701 							epnum, 0);
1702 				count++;
1703 			}
1704 		}
1705 	}
1706 }
1707 
1708 /* called once during driver setup to initialize and link into
1709  * the driver model; memory is zeroed.
1710  */
1711 int __init musb_gadget_setup(struct musb *musb)
1712 {
1713 	int status;
1714 
1715 	/* REVISIT minor race:  if (erroneously) setting up two
1716 	 * musb peripherals at the same time, only the bus lock
1717 	 * is probably held.
1718 	 */
1719 	if (the_gadget)
1720 		return -EBUSY;
1721 	the_gadget = musb;
1722 
1723 	musb->g.ops = &musb_gadget_operations;
1724 	musb->g.is_dualspeed = 1;
1725 	musb->g.speed = USB_SPEED_UNKNOWN;
1726 
1727 	/* this "gadget" abstracts/virtualizes the controller */
1728 	dev_set_name(&musb->g.dev, "gadget");
1729 	musb->g.dev.parent = musb->controller;
1730 	musb->g.dev.dma_mask = musb->controller->dma_mask;
1731 	musb->g.dev.release = musb_gadget_release;
1732 	musb->g.name = musb_driver_name;
1733 
1734 	if (is_otg_enabled(musb))
1735 		musb->g.is_otg = 1;
1736 
1737 	musb_g_init_endpoints(musb);
1738 
1739 	musb->is_active = 0;
1740 	musb_platform_try_idle(musb, 0);
1741 
1742 	status = device_register(&musb->g.dev);
1743 	if (status != 0) {
1744 		put_device(&musb->g.dev);
1745 		the_gadget = NULL;
1746 	}
1747 	return status;
1748 }
1749 
1750 void musb_gadget_cleanup(struct musb *musb)
1751 {
1752 	if (musb != the_gadget)
1753 		return;
1754 
1755 	device_unregister(&musb->g.dev);
1756 	the_gadget = NULL;
1757 }
1758 
1759 /*
1760  * Register the gadget driver. Used by gadget drivers when
1761  * registering themselves with the controller.
1762  *
1763  * -EINVAL something went wrong (not driver)
1764  * -EBUSY another gadget is already using the controller
1765  * -ENOMEM no memeory to perform the operation
1766  *
1767  * @param driver the gadget driver
1768  * @param bind the driver's bind function
1769  * @return <0 if error, 0 if everything is fine
1770  */
1771 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1772 		int (*bind)(struct usb_gadget *))
1773 {
1774 	int retval;
1775 	unsigned long flags;
1776 	struct musb *musb = the_gadget;
1777 
1778 	if (!driver
1779 			|| driver->speed != USB_SPEED_HIGH
1780 			|| !bind || !driver->setup)
1781 		return -EINVAL;
1782 
1783 	/* driver must be initialized to support peripheral mode */
1784 	if (!musb) {
1785 		DBG(1, "%s, no dev??\n", __func__);
1786 		return -ENODEV;
1787 	}
1788 
1789 	DBG(3, "registering driver %s\n", driver->function);
1790 	spin_lock_irqsave(&musb->lock, flags);
1791 
1792 	if (musb->gadget_driver) {
1793 		DBG(1, "%s is already bound to %s\n",
1794 				musb_driver_name,
1795 				musb->gadget_driver->driver.name);
1796 		retval = -EBUSY;
1797 	} else {
1798 		musb->gadget_driver = driver;
1799 		musb->g.dev.driver = &driver->driver;
1800 		driver->driver.bus = NULL;
1801 		musb->softconnect = 1;
1802 		retval = 0;
1803 	}
1804 
1805 	spin_unlock_irqrestore(&musb->lock, flags);
1806 
1807 	if (retval == 0) {
1808 		retval = bind(&musb->g);
1809 		if (retval != 0) {
1810 			DBG(3, "bind to driver %s failed --> %d\n",
1811 					driver->driver.name, retval);
1812 			musb->gadget_driver = NULL;
1813 			musb->g.dev.driver = NULL;
1814 		}
1815 
1816 		spin_lock_irqsave(&musb->lock, flags);
1817 
1818 		otg_set_peripheral(musb->xceiv, &musb->g);
1819 		musb->xceiv->state = OTG_STATE_B_IDLE;
1820 		musb->is_active = 1;
1821 
1822 		/* FIXME this ignores the softconnect flag.  Drivers are
1823 		 * allowed hold the peripheral inactive until for example
1824 		 * userspace hooks up printer hardware or DSP codecs, so
1825 		 * hosts only see fully functional devices.
1826 		 */
1827 
1828 		if (!is_otg_enabled(musb))
1829 			musb_start(musb);
1830 
1831 		otg_set_peripheral(musb->xceiv, &musb->g);
1832 
1833 		spin_unlock_irqrestore(&musb->lock, flags);
1834 
1835 		if (is_otg_enabled(musb)) {
1836 			struct usb_hcd	*hcd = musb_to_hcd(musb);
1837 
1838 			DBG(3, "OTG startup...\n");
1839 
1840 			/* REVISIT:  funcall to other code, which also
1841 			 * handles power budgeting ... this way also
1842 			 * ensures HdrcStart is indirectly called.
1843 			 */
1844 			retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1845 			if (retval < 0) {
1846 				DBG(1, "add_hcd failed, %d\n", retval);
1847 				spin_lock_irqsave(&musb->lock, flags);
1848 				otg_set_peripheral(musb->xceiv, NULL);
1849 				musb->gadget_driver = NULL;
1850 				musb->g.dev.driver = NULL;
1851 				spin_unlock_irqrestore(&musb->lock, flags);
1852 			} else {
1853 				hcd->self.uses_pio_for_control = 1;
1854 			}
1855 		}
1856 	}
1857 
1858 	return retval;
1859 }
1860 EXPORT_SYMBOL(usb_gadget_probe_driver);
1861 
1862 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1863 {
1864 	int			i;
1865 	struct musb_hw_ep	*hw_ep;
1866 
1867 	/* don't disconnect if it's not connected */
1868 	if (musb->g.speed == USB_SPEED_UNKNOWN)
1869 		driver = NULL;
1870 	else
1871 		musb->g.speed = USB_SPEED_UNKNOWN;
1872 
1873 	/* deactivate the hardware */
1874 	if (musb->softconnect) {
1875 		musb->softconnect = 0;
1876 		musb_pullup(musb, 0);
1877 	}
1878 	musb_stop(musb);
1879 
1880 	/* killing any outstanding requests will quiesce the driver;
1881 	 * then report disconnect
1882 	 */
1883 	if (driver) {
1884 		for (i = 0, hw_ep = musb->endpoints;
1885 				i < musb->nr_endpoints;
1886 				i++, hw_ep++) {
1887 			musb_ep_select(musb->mregs, i);
1888 			if (hw_ep->is_shared_fifo /* || !epnum */) {
1889 				nuke(&hw_ep->ep_in, -ESHUTDOWN);
1890 			} else {
1891 				if (hw_ep->max_packet_sz_tx)
1892 					nuke(&hw_ep->ep_in, -ESHUTDOWN);
1893 				if (hw_ep->max_packet_sz_rx)
1894 					nuke(&hw_ep->ep_out, -ESHUTDOWN);
1895 			}
1896 		}
1897 
1898 		spin_unlock(&musb->lock);
1899 		driver->disconnect(&musb->g);
1900 		spin_lock(&musb->lock);
1901 	}
1902 }
1903 
1904 /*
1905  * Unregister the gadget driver. Used by gadget drivers when
1906  * unregistering themselves from the controller.
1907  *
1908  * @param driver the gadget driver to unregister
1909  */
1910 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1911 {
1912 	unsigned long	flags;
1913 	int		retval = 0;
1914 	struct musb	*musb = the_gadget;
1915 
1916 	if (!driver || !driver->unbind || !musb)
1917 		return -EINVAL;
1918 
1919 	/* REVISIT always use otg_set_peripheral() here too;
1920 	 * this needs to shut down the OTG engine.
1921 	 */
1922 
1923 	spin_lock_irqsave(&musb->lock, flags);
1924 
1925 #ifdef	CONFIG_USB_MUSB_OTG
1926 	musb_hnp_stop(musb);
1927 #endif
1928 
1929 	if (musb->gadget_driver == driver) {
1930 
1931 		(void) musb_gadget_vbus_draw(&musb->g, 0);
1932 
1933 		musb->xceiv->state = OTG_STATE_UNDEFINED;
1934 		stop_activity(musb, driver);
1935 		otg_set_peripheral(musb->xceiv, NULL);
1936 
1937 		DBG(3, "unregistering driver %s\n", driver->function);
1938 		spin_unlock_irqrestore(&musb->lock, flags);
1939 		driver->unbind(&musb->g);
1940 		spin_lock_irqsave(&musb->lock, flags);
1941 
1942 		musb->gadget_driver = NULL;
1943 		musb->g.dev.driver = NULL;
1944 
1945 		musb->is_active = 0;
1946 		musb_platform_try_idle(musb, 0);
1947 	} else
1948 		retval = -EINVAL;
1949 	spin_unlock_irqrestore(&musb->lock, flags);
1950 
1951 	if (is_otg_enabled(musb) && retval == 0) {
1952 		usb_remove_hcd(musb_to_hcd(musb));
1953 		/* FIXME we need to be able to register another
1954 		 * gadget driver here and have everything work;
1955 		 * that currently misbehaves.
1956 		 */
1957 	}
1958 
1959 	return retval;
1960 }
1961 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1962 
1963 
1964 /* ----------------------------------------------------------------------- */
1965 
1966 /* lifecycle operations called through plat_uds.c */
1967 
1968 void musb_g_resume(struct musb *musb)
1969 {
1970 	musb->is_suspended = 0;
1971 	switch (musb->xceiv->state) {
1972 	case OTG_STATE_B_IDLE:
1973 		break;
1974 	case OTG_STATE_B_WAIT_ACON:
1975 	case OTG_STATE_B_PERIPHERAL:
1976 		musb->is_active = 1;
1977 		if (musb->gadget_driver && musb->gadget_driver->resume) {
1978 			spin_unlock(&musb->lock);
1979 			musb->gadget_driver->resume(&musb->g);
1980 			spin_lock(&musb->lock);
1981 		}
1982 		break;
1983 	default:
1984 		WARNING("unhandled RESUME transition (%s)\n",
1985 				otg_state_string(musb));
1986 	}
1987 }
1988 
1989 /* called when SOF packets stop for 3+ msec */
1990 void musb_g_suspend(struct musb *musb)
1991 {
1992 	u8	devctl;
1993 
1994 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1995 	DBG(3, "devctl %02x\n", devctl);
1996 
1997 	switch (musb->xceiv->state) {
1998 	case OTG_STATE_B_IDLE:
1999 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2000 			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2001 		break;
2002 	case OTG_STATE_B_PERIPHERAL:
2003 		musb->is_suspended = 1;
2004 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
2005 			spin_unlock(&musb->lock);
2006 			musb->gadget_driver->suspend(&musb->g);
2007 			spin_lock(&musb->lock);
2008 		}
2009 		break;
2010 	default:
2011 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2012 		 * A_PERIPHERAL may need care too
2013 		 */
2014 		WARNING("unhandled SUSPEND transition (%s)\n",
2015 				otg_state_string(musb));
2016 	}
2017 }
2018 
2019 /* Called during SRP */
2020 void musb_g_wakeup(struct musb *musb)
2021 {
2022 	musb_gadget_wakeup(&musb->g);
2023 }
2024 
2025 /* called when VBUS drops below session threshold, and in other cases */
2026 void musb_g_disconnect(struct musb *musb)
2027 {
2028 	void __iomem	*mregs = musb->mregs;
2029 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
2030 
2031 	DBG(3, "devctl %02x\n", devctl);
2032 
2033 	/* clear HR */
2034 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2035 
2036 	/* don't draw vbus until new b-default session */
2037 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2038 
2039 	musb->g.speed = USB_SPEED_UNKNOWN;
2040 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2041 		spin_unlock(&musb->lock);
2042 		musb->gadget_driver->disconnect(&musb->g);
2043 		spin_lock(&musb->lock);
2044 	}
2045 
2046 	switch (musb->xceiv->state) {
2047 	default:
2048 #ifdef	CONFIG_USB_MUSB_OTG
2049 		DBG(2, "Unhandled disconnect %s, setting a_idle\n",
2050 			otg_state_string(musb));
2051 		musb->xceiv->state = OTG_STATE_A_IDLE;
2052 		MUSB_HST_MODE(musb);
2053 		break;
2054 	case OTG_STATE_A_PERIPHERAL:
2055 		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2056 		MUSB_HST_MODE(musb);
2057 		break;
2058 	case OTG_STATE_B_WAIT_ACON:
2059 	case OTG_STATE_B_HOST:
2060 #endif
2061 	case OTG_STATE_B_PERIPHERAL:
2062 	case OTG_STATE_B_IDLE:
2063 		musb->xceiv->state = OTG_STATE_B_IDLE;
2064 		break;
2065 	case OTG_STATE_B_SRP_INIT:
2066 		break;
2067 	}
2068 
2069 	musb->is_active = 0;
2070 }
2071 
2072 void musb_g_reset(struct musb *musb)
2073 __releases(musb->lock)
2074 __acquires(musb->lock)
2075 {
2076 	void __iomem	*mbase = musb->mregs;
2077 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
2078 	u8		power;
2079 
2080 	DBG(3, "<== %s addr=%x driver '%s'\n",
2081 			(devctl & MUSB_DEVCTL_BDEVICE)
2082 				? "B-Device" : "A-Device",
2083 			musb_readb(mbase, MUSB_FADDR),
2084 			musb->gadget_driver
2085 				? musb->gadget_driver->driver.name
2086 				: NULL
2087 			);
2088 
2089 	/* report disconnect, if we didn't already (flushing EP state) */
2090 	if (musb->g.speed != USB_SPEED_UNKNOWN)
2091 		musb_g_disconnect(musb);
2092 
2093 	/* clear HR */
2094 	else if (devctl & MUSB_DEVCTL_HR)
2095 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2096 
2097 
2098 	/* what speed did we negotiate? */
2099 	power = musb_readb(mbase, MUSB_POWER);
2100 	musb->g.speed = (power & MUSB_POWER_HSMODE)
2101 			? USB_SPEED_HIGH : USB_SPEED_FULL;
2102 
2103 	/* start in USB_STATE_DEFAULT */
2104 	musb->is_active = 1;
2105 	musb->is_suspended = 0;
2106 	MUSB_DEV_MODE(musb);
2107 	musb->address = 0;
2108 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2109 
2110 	musb->may_wakeup = 0;
2111 	musb->g.b_hnp_enable = 0;
2112 	musb->g.a_alt_hnp_support = 0;
2113 	musb->g.a_hnp_support = 0;
2114 
2115 	/* Normal reset, as B-Device;
2116 	 * or else after HNP, as A-Device
2117 	 */
2118 	if (devctl & MUSB_DEVCTL_BDEVICE) {
2119 		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2120 		musb->g.is_a_peripheral = 0;
2121 	} else if (is_otg_enabled(musb)) {
2122 		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2123 		musb->g.is_a_peripheral = 1;
2124 	} else
2125 		WARN_ON(1);
2126 
2127 	/* start with default limits on VBUS power draw */
2128 	(void) musb_gadget_vbus_draw(&musb->g,
2129 			is_otg_enabled(musb) ? 8 : 100);
2130 }
2131