xref: /openbmc/linux/drivers/usb/musb/musb_gadget.c (revision e2f1cf25)
1 /*
2  * MUSB OTG driver peripheral support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
26  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 
46 #include "musb_core.h"
47 
48 
49 /* ----------------------------------------------------------------------- */
50 
51 #define is_buffer_mapped(req) (is_dma_capable() && \
52 					(req->map_state != UN_MAPPED))
53 
54 /* Maps the buffer to dma  */
55 
56 static inline void map_dma_buffer(struct musb_request *request,
57 			struct musb *musb, struct musb_ep *musb_ep)
58 {
59 	int compatible = true;
60 	struct dma_controller *dma = musb->dma_controller;
61 
62 	request->map_state = UN_MAPPED;
63 
64 	if (!is_dma_capable() || !musb_ep->dma)
65 		return;
66 
67 	/* Check if DMA engine can handle this request.
68 	 * DMA code must reject the USB request explicitly.
69 	 * Default behaviour is to map the request.
70 	 */
71 	if (dma->is_compatible)
72 		compatible = dma->is_compatible(musb_ep->dma,
73 				musb_ep->packet_sz, request->request.buf,
74 				request->request.length);
75 	if (!compatible)
76 		return;
77 
78 	if (request->request.dma == DMA_ADDR_INVALID) {
79 		dma_addr_t dma_addr;
80 		int ret;
81 
82 		dma_addr = dma_map_single(
83 				musb->controller,
84 				request->request.buf,
85 				request->request.length,
86 				request->tx
87 					? DMA_TO_DEVICE
88 					: DMA_FROM_DEVICE);
89 		ret = dma_mapping_error(musb->controller, dma_addr);
90 		if (ret)
91 			return;
92 
93 		request->request.dma = dma_addr;
94 		request->map_state = MUSB_MAPPED;
95 	} else {
96 		dma_sync_single_for_device(musb->controller,
97 			request->request.dma,
98 			request->request.length,
99 			request->tx
100 				? DMA_TO_DEVICE
101 				: DMA_FROM_DEVICE);
102 		request->map_state = PRE_MAPPED;
103 	}
104 }
105 
106 /* Unmap the buffer from dma and maps it back to cpu */
107 static inline void unmap_dma_buffer(struct musb_request *request,
108 				struct musb *musb)
109 {
110 	struct musb_ep *musb_ep = request->ep;
111 
112 	if (!is_buffer_mapped(request) || !musb_ep->dma)
113 		return;
114 
115 	if (request->request.dma == DMA_ADDR_INVALID) {
116 		dev_vdbg(musb->controller,
117 				"not unmapping a never mapped buffer\n");
118 		return;
119 	}
120 	if (request->map_state == MUSB_MAPPED) {
121 		dma_unmap_single(musb->controller,
122 			request->request.dma,
123 			request->request.length,
124 			request->tx
125 				? DMA_TO_DEVICE
126 				: DMA_FROM_DEVICE);
127 		request->request.dma = DMA_ADDR_INVALID;
128 	} else { /* PRE_MAPPED */
129 		dma_sync_single_for_cpu(musb->controller,
130 			request->request.dma,
131 			request->request.length,
132 			request->tx
133 				? DMA_TO_DEVICE
134 				: DMA_FROM_DEVICE);
135 	}
136 	request->map_state = UN_MAPPED;
137 }
138 
139 /*
140  * Immediately complete a request.
141  *
142  * @param request the request to complete
143  * @param status the status to complete the request with
144  * Context: controller locked, IRQs blocked.
145  */
146 void musb_g_giveback(
147 	struct musb_ep		*ep,
148 	struct usb_request	*request,
149 	int			status)
150 __releases(ep->musb->lock)
151 __acquires(ep->musb->lock)
152 {
153 	struct musb_request	*req;
154 	struct musb		*musb;
155 	int			busy = ep->busy;
156 
157 	req = to_musb_request(request);
158 
159 	list_del(&req->list);
160 	if (req->request.status == -EINPROGRESS)
161 		req->request.status = status;
162 	musb = req->musb;
163 
164 	ep->busy = 1;
165 	spin_unlock(&musb->lock);
166 
167 	if (!dma_mapping_error(&musb->g.dev, request->dma))
168 		unmap_dma_buffer(req, musb);
169 
170 	if (request->status == 0)
171 		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
172 				ep->end_point.name, request,
173 				req->request.actual, req->request.length);
174 	else
175 		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
176 				ep->end_point.name, request,
177 				req->request.actual, req->request.length,
178 				request->status);
179 	usb_gadget_giveback_request(&req->ep->end_point, &req->request);
180 	spin_lock(&musb->lock);
181 	ep->busy = busy;
182 }
183 
184 /* ----------------------------------------------------------------------- */
185 
186 /*
187  * Abort requests queued to an endpoint using the status. Synchronous.
188  * caller locked controller and blocked irqs, and selected this ep.
189  */
190 static void nuke(struct musb_ep *ep, const int status)
191 {
192 	struct musb		*musb = ep->musb;
193 	struct musb_request	*req = NULL;
194 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
195 
196 	ep->busy = 1;
197 
198 	if (is_dma_capable() && ep->dma) {
199 		struct dma_controller	*c = ep->musb->dma_controller;
200 		int value;
201 
202 		if (ep->is_in) {
203 			/*
204 			 * The programming guide says that we must not clear
205 			 * the DMAMODE bit before DMAENAB, so we only
206 			 * clear it in the second write...
207 			 */
208 			musb_writew(epio, MUSB_TXCSR,
209 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
210 			musb_writew(epio, MUSB_TXCSR,
211 					0 | MUSB_TXCSR_FLUSHFIFO);
212 		} else {
213 			musb_writew(epio, MUSB_RXCSR,
214 					0 | MUSB_RXCSR_FLUSHFIFO);
215 			musb_writew(epio, MUSB_RXCSR,
216 					0 | MUSB_RXCSR_FLUSHFIFO);
217 		}
218 
219 		value = c->channel_abort(ep->dma);
220 		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
221 				ep->name, value);
222 		c->channel_release(ep->dma);
223 		ep->dma = NULL;
224 	}
225 
226 	while (!list_empty(&ep->req_list)) {
227 		req = list_first_entry(&ep->req_list, struct musb_request, list);
228 		musb_g_giveback(ep, &req->request, status);
229 	}
230 }
231 
232 /* ----------------------------------------------------------------------- */
233 
234 /* Data transfers - pure PIO, pure DMA, or mixed mode */
235 
236 /*
237  * This assumes the separate CPPI engine is responding to DMA requests
238  * from the usb core ... sequenced a bit differently from mentor dma.
239  */
240 
241 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
242 {
243 	if (can_bulk_split(musb, ep->type))
244 		return ep->hw_ep->max_packet_sz_tx;
245 	else
246 		return ep->packet_sz;
247 }
248 
249 /*
250  * An endpoint is transmitting data. This can be called either from
251  * the IRQ routine or from ep.queue() to kickstart a request on an
252  * endpoint.
253  *
254  * Context: controller locked, IRQs blocked, endpoint selected
255  */
256 static void txstate(struct musb *musb, struct musb_request *req)
257 {
258 	u8			epnum = req->epnum;
259 	struct musb_ep		*musb_ep;
260 	void __iomem		*epio = musb->endpoints[epnum].regs;
261 	struct usb_request	*request;
262 	u16			fifo_count = 0, csr;
263 	int			use_dma = 0;
264 
265 	musb_ep = req->ep;
266 
267 	/* Check if EP is disabled */
268 	if (!musb_ep->desc) {
269 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
270 						musb_ep->end_point.name);
271 		return;
272 	}
273 
274 	/* we shouldn't get here while DMA is active ... but we do ... */
275 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
276 		dev_dbg(musb->controller, "dma pending...\n");
277 		return;
278 	}
279 
280 	/* read TXCSR before */
281 	csr = musb_readw(epio, MUSB_TXCSR);
282 
283 	request = &req->request;
284 	fifo_count = min(max_ep_writesize(musb, musb_ep),
285 			(int)(request->length - request->actual));
286 
287 	if (csr & MUSB_TXCSR_TXPKTRDY) {
288 		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
289 				musb_ep->end_point.name, csr);
290 		return;
291 	}
292 
293 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
294 		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
295 				musb_ep->end_point.name, csr);
296 		return;
297 	}
298 
299 	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
300 			epnum, musb_ep->packet_sz, fifo_count,
301 			csr);
302 
303 #ifndef	CONFIG_MUSB_PIO_ONLY
304 	if (is_buffer_mapped(req)) {
305 		struct dma_controller	*c = musb->dma_controller;
306 		size_t request_size;
307 
308 		/* setup DMA, then program endpoint CSR */
309 		request_size = min_t(size_t, request->length - request->actual,
310 					musb_ep->dma->max_len);
311 
312 		use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
313 
314 		/* MUSB_TXCSR_P_ISO is still set correctly */
315 
316 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
317 		{
318 			if (request_size < musb_ep->packet_sz)
319 				musb_ep->dma->desired_mode = 0;
320 			else
321 				musb_ep->dma->desired_mode = 1;
322 
323 			use_dma = use_dma && c->channel_program(
324 					musb_ep->dma, musb_ep->packet_sz,
325 					musb_ep->dma->desired_mode,
326 					request->dma + request->actual, request_size);
327 			if (use_dma) {
328 				if (musb_ep->dma->desired_mode == 0) {
329 					/*
330 					 * We must not clear the DMAMODE bit
331 					 * before the DMAENAB bit -- and the
332 					 * latter doesn't always get cleared
333 					 * before we get here...
334 					 */
335 					csr &= ~(MUSB_TXCSR_AUTOSET
336 						| MUSB_TXCSR_DMAENAB);
337 					musb_writew(epio, MUSB_TXCSR, csr
338 						| MUSB_TXCSR_P_WZC_BITS);
339 					csr &= ~MUSB_TXCSR_DMAMODE;
340 					csr |= (MUSB_TXCSR_DMAENAB |
341 							MUSB_TXCSR_MODE);
342 					/* against programming guide */
343 				} else {
344 					csr |= (MUSB_TXCSR_DMAENAB
345 							| MUSB_TXCSR_DMAMODE
346 							| MUSB_TXCSR_MODE);
347 					/*
348 					 * Enable Autoset according to table
349 					 * below
350 					 * bulk_split hb_mult	Autoset_Enable
351 					 *	0	0	Yes(Normal)
352 					 *	0	>0	No(High BW ISO)
353 					 *	1	0	Yes(HS bulk)
354 					 *	1	>0	Yes(FS bulk)
355 					 */
356 					if (!musb_ep->hb_mult ||
357 						(musb_ep->hb_mult &&
358 						 can_bulk_split(musb,
359 						    musb_ep->type)))
360 						csr |= MUSB_TXCSR_AUTOSET;
361 				}
362 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
363 
364 				musb_writew(epio, MUSB_TXCSR, csr);
365 			}
366 		}
367 
368 #endif
369 		if (is_cppi_enabled(musb)) {
370 			/* program endpoint CSR first, then setup DMA */
371 			csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
372 			csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
373 				MUSB_TXCSR_MODE;
374 			musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
375 						~MUSB_TXCSR_P_UNDERRUN) | csr);
376 
377 			/* ensure writebuffer is empty */
378 			csr = musb_readw(epio, MUSB_TXCSR);
379 
380 			/*
381 			 * NOTE host side sets DMAENAB later than this; both are
382 			 * OK since the transfer dma glue (between CPPI and
383 			 * Mentor fifos) just tells CPPI it could start. Data
384 			 * only moves to the USB TX fifo when both fifos are
385 			 * ready.
386 			 */
387 			/*
388 			 * "mode" is irrelevant here; handle terminating ZLPs
389 			 * like PIO does, since the hardware RNDIS mode seems
390 			 * unreliable except for the
391 			 * last-packet-is-already-short case.
392 			 */
393 			use_dma = use_dma && c->channel_program(
394 					musb_ep->dma, musb_ep->packet_sz,
395 					0,
396 					request->dma + request->actual,
397 					request_size);
398 			if (!use_dma) {
399 				c->channel_release(musb_ep->dma);
400 				musb_ep->dma = NULL;
401 				csr &= ~MUSB_TXCSR_DMAENAB;
402 				musb_writew(epio, MUSB_TXCSR, csr);
403 				/* invariant: prequest->buf is non-null */
404 			}
405 		} else if (tusb_dma_omap(musb))
406 			use_dma = use_dma && c->channel_program(
407 					musb_ep->dma, musb_ep->packet_sz,
408 					request->zero,
409 					request->dma + request->actual,
410 					request_size);
411 	}
412 #endif
413 
414 	if (!use_dma) {
415 		/*
416 		 * Unmap the dma buffer back to cpu if dma channel
417 		 * programming fails
418 		 */
419 		unmap_dma_buffer(req, musb);
420 
421 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
422 				(u8 *) (request->buf + request->actual));
423 		request->actual += fifo_count;
424 		csr |= MUSB_TXCSR_TXPKTRDY;
425 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
426 		musb_writew(epio, MUSB_TXCSR, csr);
427 	}
428 
429 	/* host may already have the data when this message shows... */
430 	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
431 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
432 			request->actual, request->length,
433 			musb_readw(epio, MUSB_TXCSR),
434 			fifo_count,
435 			musb_readw(epio, MUSB_TXMAXP));
436 }
437 
438 /*
439  * FIFO state update (e.g. data ready).
440  * Called from IRQ,  with controller locked.
441  */
442 void musb_g_tx(struct musb *musb, u8 epnum)
443 {
444 	u16			csr;
445 	struct musb_request	*req;
446 	struct usb_request	*request;
447 	u8 __iomem		*mbase = musb->mregs;
448 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
449 	void __iomem		*epio = musb->endpoints[epnum].regs;
450 	struct dma_channel	*dma;
451 
452 	musb_ep_select(mbase, epnum);
453 	req = next_request(musb_ep);
454 	request = &req->request;
455 
456 	csr = musb_readw(epio, MUSB_TXCSR);
457 	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
458 
459 	dma = is_dma_capable() ? musb_ep->dma : NULL;
460 
461 	/*
462 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
463 	 * probably rates reporting as a host error.
464 	 */
465 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
466 		csr |=	MUSB_TXCSR_P_WZC_BITS;
467 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
468 		musb_writew(epio, MUSB_TXCSR, csr);
469 		return;
470 	}
471 
472 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
473 		/* We NAKed, no big deal... little reason to care. */
474 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
475 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
476 		musb_writew(epio, MUSB_TXCSR, csr);
477 		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
478 				epnum, request);
479 	}
480 
481 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
482 		/*
483 		 * SHOULD NOT HAPPEN... has with CPPI though, after
484 		 * changing SENDSTALL (and other cases); harmless?
485 		 */
486 		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
487 		return;
488 	}
489 
490 	if (request) {
491 		u8	is_dma = 0;
492 		bool	short_packet = false;
493 
494 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
495 			is_dma = 1;
496 			csr |= MUSB_TXCSR_P_WZC_BITS;
497 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
498 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
499 			musb_writew(epio, MUSB_TXCSR, csr);
500 			/* Ensure writebuffer is empty. */
501 			csr = musb_readw(epio, MUSB_TXCSR);
502 			request->actual += musb_ep->dma->actual_len;
503 			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
504 				epnum, csr, musb_ep->dma->actual_len, request);
505 		}
506 
507 		/*
508 		 * First, maybe a terminating short packet. Some DMA
509 		 * engines might handle this by themselves.
510 		 */
511 		if ((request->zero && request->length)
512 			&& (request->length % musb_ep->packet_sz == 0)
513 			&& (request->actual == request->length))
514 				short_packet = true;
515 
516 		if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
517 			(is_dma && (!dma->desired_mode ||
518 				(request->actual &
519 					(musb_ep->packet_sz - 1)))))
520 				short_packet = true;
521 
522 		if (short_packet) {
523 			/*
524 			 * On DMA completion, FIFO may not be
525 			 * available yet...
526 			 */
527 			if (csr & MUSB_TXCSR_TXPKTRDY)
528 				return;
529 
530 			dev_dbg(musb->controller, "sending zero pkt\n");
531 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
532 					| MUSB_TXCSR_TXPKTRDY);
533 			request->zero = 0;
534 		}
535 
536 		if (request->actual == request->length) {
537 			musb_g_giveback(musb_ep, request, 0);
538 			/*
539 			 * In the giveback function the MUSB lock is
540 			 * released and acquired after sometime. During
541 			 * this time period the INDEX register could get
542 			 * changed by the gadget_queue function especially
543 			 * on SMP systems. Reselect the INDEX to be sure
544 			 * we are reading/modifying the right registers
545 			 */
546 			musb_ep_select(mbase, epnum);
547 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
548 			if (!req) {
549 				dev_dbg(musb->controller, "%s idle now\n",
550 					musb_ep->end_point.name);
551 				return;
552 			}
553 		}
554 
555 		txstate(musb, req);
556 	}
557 }
558 
559 /* ------------------------------------------------------------ */
560 
561 /*
562  * Context: controller locked, IRQs blocked, endpoint selected
563  */
564 static void rxstate(struct musb *musb, struct musb_request *req)
565 {
566 	const u8		epnum = req->epnum;
567 	struct usb_request	*request = &req->request;
568 	struct musb_ep		*musb_ep;
569 	void __iomem		*epio = musb->endpoints[epnum].regs;
570 	unsigned		len = 0;
571 	u16			fifo_count;
572 	u16			csr = musb_readw(epio, MUSB_RXCSR);
573 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
574 	u8			use_mode_1;
575 
576 	if (hw_ep->is_shared_fifo)
577 		musb_ep = &hw_ep->ep_in;
578 	else
579 		musb_ep = &hw_ep->ep_out;
580 
581 	fifo_count = musb_ep->packet_sz;
582 
583 	/* Check if EP is disabled */
584 	if (!musb_ep->desc) {
585 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
586 						musb_ep->end_point.name);
587 		return;
588 	}
589 
590 	/* We shouldn't get here while DMA is active, but we do... */
591 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
592 		dev_dbg(musb->controller, "DMA pending...\n");
593 		return;
594 	}
595 
596 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
597 		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
598 		    musb_ep->end_point.name, csr);
599 		return;
600 	}
601 
602 	if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
603 		struct dma_controller	*c = musb->dma_controller;
604 		struct dma_channel	*channel = musb_ep->dma;
605 
606 		/* NOTE:  CPPI won't actually stop advancing the DMA
607 		 * queue after short packet transfers, so this is almost
608 		 * always going to run as IRQ-per-packet DMA so that
609 		 * faults will be handled correctly.
610 		 */
611 		if (c->channel_program(channel,
612 				musb_ep->packet_sz,
613 				!request->short_not_ok,
614 				request->dma + request->actual,
615 				request->length - request->actual)) {
616 
617 			/* make sure that if an rxpkt arrived after the irq,
618 			 * the cppi engine will be ready to take it as soon
619 			 * as DMA is enabled
620 			 */
621 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
622 					| MUSB_RXCSR_DMAMODE);
623 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
624 			musb_writew(epio, MUSB_RXCSR, csr);
625 			return;
626 		}
627 	}
628 
629 	if (csr & MUSB_RXCSR_RXPKTRDY) {
630 		fifo_count = musb_readw(epio, MUSB_RXCOUNT);
631 
632 		/*
633 		 * Enable Mode 1 on RX transfers only when short_not_ok flag
634 		 * is set. Currently short_not_ok flag is set only from
635 		 * file_storage and f_mass_storage drivers
636 		 */
637 
638 		if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
639 			use_mode_1 = 1;
640 		else
641 			use_mode_1 = 0;
642 
643 		if (request->actual < request->length) {
644 #ifdef CONFIG_USB_INVENTRA_DMA
645 			if (is_buffer_mapped(req)) {
646 				struct dma_controller	*c;
647 				struct dma_channel	*channel;
648 				int			use_dma = 0;
649 				unsigned int transfer_size;
650 
651 				c = musb->dma_controller;
652 				channel = musb_ep->dma;
653 
654 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
655 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
656 	 * completion. We only get interrupts from DMA controller.
657 	 *
658 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
659 	 * in advance. For mass storage class, request->length = what the host
660 	 * sends, so that'd work.  But for pretty much everything else,
661 	 * request->length is routinely more than what the host sends. For
662 	 * most these gadgets, end of is signified either by a short packet,
663 	 * or filling the last byte of the buffer.  (Sending extra data in
664 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
665 	 * we don't get DMA completion interrupt for short packets.
666 	 *
667 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
668 	 * to get endpoint interrupt on every DMA req, but that didn't seem
669 	 * to work reliably.
670 	 *
671 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
672 	 * then becomes usable as a runtime "use mode 1" hint...
673 	 */
674 
675 				/* Experimental: Mode1 works with mass storage use cases */
676 				if (use_mode_1) {
677 					csr |= MUSB_RXCSR_AUTOCLEAR;
678 					musb_writew(epio, MUSB_RXCSR, csr);
679 					csr |= MUSB_RXCSR_DMAENAB;
680 					musb_writew(epio, MUSB_RXCSR, csr);
681 
682 					/*
683 					 * this special sequence (enabling and then
684 					 * disabling MUSB_RXCSR_DMAMODE) is required
685 					 * to get DMAReq to activate
686 					 */
687 					musb_writew(epio, MUSB_RXCSR,
688 						csr | MUSB_RXCSR_DMAMODE);
689 					musb_writew(epio, MUSB_RXCSR, csr);
690 
691 					transfer_size = min_t(unsigned int,
692 							request->length -
693 							request->actual,
694 							channel->max_len);
695 					musb_ep->dma->desired_mode = 1;
696 				} else {
697 					if (!musb_ep->hb_mult &&
698 						musb_ep->hw_ep->rx_double_buffered)
699 						csr |= MUSB_RXCSR_AUTOCLEAR;
700 					csr |= MUSB_RXCSR_DMAENAB;
701 					musb_writew(epio, MUSB_RXCSR, csr);
702 
703 					transfer_size = min(request->length - request->actual,
704 							(unsigned)fifo_count);
705 					musb_ep->dma->desired_mode = 0;
706 				}
707 
708 				use_dma = c->channel_program(
709 						channel,
710 						musb_ep->packet_sz,
711 						channel->desired_mode,
712 						request->dma
713 						+ request->actual,
714 						transfer_size);
715 
716 				if (use_dma)
717 					return;
718 			}
719 #elif defined(CONFIG_USB_UX500_DMA)
720 			if ((is_buffer_mapped(req)) &&
721 				(request->actual < request->length)) {
722 
723 				struct dma_controller *c;
724 				struct dma_channel *channel;
725 				unsigned int transfer_size = 0;
726 
727 				c = musb->dma_controller;
728 				channel = musb_ep->dma;
729 
730 				/* In case first packet is short */
731 				if (fifo_count < musb_ep->packet_sz)
732 					transfer_size = fifo_count;
733 				else if (request->short_not_ok)
734 					transfer_size =	min_t(unsigned int,
735 							request->length -
736 							request->actual,
737 							channel->max_len);
738 				else
739 					transfer_size = min_t(unsigned int,
740 							request->length -
741 							request->actual,
742 							(unsigned)fifo_count);
743 
744 				csr &= ~MUSB_RXCSR_DMAMODE;
745 				csr |= (MUSB_RXCSR_DMAENAB |
746 					MUSB_RXCSR_AUTOCLEAR);
747 
748 				musb_writew(epio, MUSB_RXCSR, csr);
749 
750 				if (transfer_size <= musb_ep->packet_sz) {
751 					musb_ep->dma->desired_mode = 0;
752 				} else {
753 					musb_ep->dma->desired_mode = 1;
754 					/* Mode must be set after DMAENAB */
755 					csr |= MUSB_RXCSR_DMAMODE;
756 					musb_writew(epio, MUSB_RXCSR, csr);
757 				}
758 
759 				if (c->channel_program(channel,
760 							musb_ep->packet_sz,
761 							channel->desired_mode,
762 							request->dma
763 							+ request->actual,
764 							transfer_size))
765 
766 					return;
767 			}
768 #endif	/* Mentor's DMA */
769 
770 			len = request->length - request->actual;
771 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
772 					musb_ep->end_point.name,
773 					fifo_count, len,
774 					musb_ep->packet_sz);
775 
776 			fifo_count = min_t(unsigned, len, fifo_count);
777 
778 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
779 			if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
780 				struct dma_controller *c = musb->dma_controller;
781 				struct dma_channel *channel = musb_ep->dma;
782 				u32 dma_addr = request->dma + request->actual;
783 				int ret;
784 
785 				ret = c->channel_program(channel,
786 						musb_ep->packet_sz,
787 						channel->desired_mode,
788 						dma_addr,
789 						fifo_count);
790 				if (ret)
791 					return;
792 			}
793 #endif
794 			/*
795 			 * Unmap the dma buffer back to cpu if dma channel
796 			 * programming fails. This buffer is mapped if the
797 			 * channel allocation is successful
798 			 */
799 			 if (is_buffer_mapped(req)) {
800 				unmap_dma_buffer(req, musb);
801 
802 				/*
803 				 * Clear DMAENAB and AUTOCLEAR for the
804 				 * PIO mode transfer
805 				 */
806 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
807 				musb_writew(epio, MUSB_RXCSR, csr);
808 			}
809 
810 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
811 					(request->buf + request->actual));
812 			request->actual += fifo_count;
813 
814 			/* REVISIT if we left anything in the fifo, flush
815 			 * it and report -EOVERFLOW
816 			 */
817 
818 			/* ack the read! */
819 			csr |= MUSB_RXCSR_P_WZC_BITS;
820 			csr &= ~MUSB_RXCSR_RXPKTRDY;
821 			musb_writew(epio, MUSB_RXCSR, csr);
822 		}
823 	}
824 
825 	/* reach the end or short packet detected */
826 	if (request->actual == request->length ||
827 	    fifo_count < musb_ep->packet_sz)
828 		musb_g_giveback(musb_ep, request, 0);
829 }
830 
831 /*
832  * Data ready for a request; called from IRQ
833  */
834 void musb_g_rx(struct musb *musb, u8 epnum)
835 {
836 	u16			csr;
837 	struct musb_request	*req;
838 	struct usb_request	*request;
839 	void __iomem		*mbase = musb->mregs;
840 	struct musb_ep		*musb_ep;
841 	void __iomem		*epio = musb->endpoints[epnum].regs;
842 	struct dma_channel	*dma;
843 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
844 
845 	if (hw_ep->is_shared_fifo)
846 		musb_ep = &hw_ep->ep_in;
847 	else
848 		musb_ep = &hw_ep->ep_out;
849 
850 	musb_ep_select(mbase, epnum);
851 
852 	req = next_request(musb_ep);
853 	if (!req)
854 		return;
855 
856 	request = &req->request;
857 
858 	csr = musb_readw(epio, MUSB_RXCSR);
859 	dma = is_dma_capable() ? musb_ep->dma : NULL;
860 
861 	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
862 			csr, dma ? " (dma)" : "", request);
863 
864 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
865 		csr |= MUSB_RXCSR_P_WZC_BITS;
866 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
867 		musb_writew(epio, MUSB_RXCSR, csr);
868 		return;
869 	}
870 
871 	if (csr & MUSB_RXCSR_P_OVERRUN) {
872 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
873 		csr &= ~MUSB_RXCSR_P_OVERRUN;
874 		musb_writew(epio, MUSB_RXCSR, csr);
875 
876 		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
877 		if (request->status == -EINPROGRESS)
878 			request->status = -EOVERFLOW;
879 	}
880 	if (csr & MUSB_RXCSR_INCOMPRX) {
881 		/* REVISIT not necessarily an error */
882 		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
883 	}
884 
885 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
886 		/* "should not happen"; likely RXPKTRDY pending for DMA */
887 		dev_dbg(musb->controller, "%s busy, csr %04x\n",
888 			musb_ep->end_point.name, csr);
889 		return;
890 	}
891 
892 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
893 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
894 				| MUSB_RXCSR_DMAENAB
895 				| MUSB_RXCSR_DMAMODE);
896 		musb_writew(epio, MUSB_RXCSR,
897 			MUSB_RXCSR_P_WZC_BITS | csr);
898 
899 		request->actual += musb_ep->dma->actual_len;
900 
901 		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
902 			epnum, csr,
903 			musb_readw(epio, MUSB_RXCSR),
904 			musb_ep->dma->actual_len, request);
905 
906 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
907 	defined(CONFIG_USB_UX500_DMA)
908 		/* Autoclear doesn't clear RxPktRdy for short packets */
909 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
910 				|| (dma->actual_len
911 					& (musb_ep->packet_sz - 1))) {
912 			/* ack the read! */
913 			csr &= ~MUSB_RXCSR_RXPKTRDY;
914 			musb_writew(epio, MUSB_RXCSR, csr);
915 		}
916 
917 		/* incomplete, and not short? wait for next IN packet */
918 		if ((request->actual < request->length)
919 				&& (musb_ep->dma->actual_len
920 					== musb_ep->packet_sz)) {
921 			/* In double buffer case, continue to unload fifo if
922  			 * there is Rx packet in FIFO.
923  			 **/
924 			csr = musb_readw(epio, MUSB_RXCSR);
925 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
926 				hw_ep->rx_double_buffered)
927 				goto exit;
928 			return;
929 		}
930 #endif
931 		musb_g_giveback(musb_ep, request, 0);
932 		/*
933 		 * In the giveback function the MUSB lock is
934 		 * released and acquired after sometime. During
935 		 * this time period the INDEX register could get
936 		 * changed by the gadget_queue function especially
937 		 * on SMP systems. Reselect the INDEX to be sure
938 		 * we are reading/modifying the right registers
939 		 */
940 		musb_ep_select(mbase, epnum);
941 
942 		req = next_request(musb_ep);
943 		if (!req)
944 			return;
945 	}
946 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
947 	defined(CONFIG_USB_UX500_DMA)
948 exit:
949 #endif
950 	/* Analyze request */
951 	rxstate(musb, req);
952 }
953 
954 /* ------------------------------------------------------------ */
955 
956 static int musb_gadget_enable(struct usb_ep *ep,
957 			const struct usb_endpoint_descriptor *desc)
958 {
959 	unsigned long		flags;
960 	struct musb_ep		*musb_ep;
961 	struct musb_hw_ep	*hw_ep;
962 	void __iomem		*regs;
963 	struct musb		*musb;
964 	void __iomem	*mbase;
965 	u8		epnum;
966 	u16		csr;
967 	unsigned	tmp;
968 	int		status = -EINVAL;
969 
970 	if (!ep || !desc)
971 		return -EINVAL;
972 
973 	musb_ep = to_musb_ep(ep);
974 	hw_ep = musb_ep->hw_ep;
975 	regs = hw_ep->regs;
976 	musb = musb_ep->musb;
977 	mbase = musb->mregs;
978 	epnum = musb_ep->current_epnum;
979 
980 	spin_lock_irqsave(&musb->lock, flags);
981 
982 	if (musb_ep->desc) {
983 		status = -EBUSY;
984 		goto fail;
985 	}
986 	musb_ep->type = usb_endpoint_type(desc);
987 
988 	/* check direction and (later) maxpacket size against endpoint */
989 	if (usb_endpoint_num(desc) != epnum)
990 		goto fail;
991 
992 	/* REVISIT this rules out high bandwidth periodic transfers */
993 	tmp = usb_endpoint_maxp(desc);
994 	if (tmp & ~0x07ff) {
995 		int ok;
996 
997 		if (usb_endpoint_dir_in(desc))
998 			ok = musb->hb_iso_tx;
999 		else
1000 			ok = musb->hb_iso_rx;
1001 
1002 		if (!ok) {
1003 			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1004 			goto fail;
1005 		}
1006 		musb_ep->hb_mult = (tmp >> 11) & 3;
1007 	} else {
1008 		musb_ep->hb_mult = 0;
1009 	}
1010 
1011 	musb_ep->packet_sz = tmp & 0x7ff;
1012 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1013 
1014 	/* enable the interrupts for the endpoint, set the endpoint
1015 	 * packet size (or fail), set the mode, clear the fifo
1016 	 */
1017 	musb_ep_select(mbase, epnum);
1018 	if (usb_endpoint_dir_in(desc)) {
1019 
1020 		if (hw_ep->is_shared_fifo)
1021 			musb_ep->is_in = 1;
1022 		if (!musb_ep->is_in)
1023 			goto fail;
1024 
1025 		if (tmp > hw_ep->max_packet_sz_tx) {
1026 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1027 			goto fail;
1028 		}
1029 
1030 		musb->intrtxe |= (1 << epnum);
1031 		musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1032 
1033 		/* REVISIT if can_bulk_split(), use by updating "tmp";
1034 		 * likewise high bandwidth periodic tx
1035 		 */
1036 		/* Set TXMAXP with the FIFO size of the endpoint
1037 		 * to disable double buffering mode.
1038 		 */
1039 		if (musb->double_buffer_not_ok) {
1040 			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1041 		} else {
1042 			if (can_bulk_split(musb, musb_ep->type))
1043 				musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1044 							musb_ep->packet_sz) - 1;
1045 			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1046 					| (musb_ep->hb_mult << 11));
1047 		}
1048 
1049 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1050 		if (musb_readw(regs, MUSB_TXCSR)
1051 				& MUSB_TXCSR_FIFONOTEMPTY)
1052 			csr |= MUSB_TXCSR_FLUSHFIFO;
1053 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1054 			csr |= MUSB_TXCSR_P_ISO;
1055 
1056 		/* set twice in case of double buffering */
1057 		musb_writew(regs, MUSB_TXCSR, csr);
1058 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1059 		musb_writew(regs, MUSB_TXCSR, csr);
1060 
1061 	} else {
1062 
1063 		if (hw_ep->is_shared_fifo)
1064 			musb_ep->is_in = 0;
1065 		if (musb_ep->is_in)
1066 			goto fail;
1067 
1068 		if (tmp > hw_ep->max_packet_sz_rx) {
1069 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1070 			goto fail;
1071 		}
1072 
1073 		musb->intrrxe |= (1 << epnum);
1074 		musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1075 
1076 		/* REVISIT if can_bulk_combine() use by updating "tmp"
1077 		 * likewise high bandwidth periodic rx
1078 		 */
1079 		/* Set RXMAXP with the FIFO size of the endpoint
1080 		 * to disable double buffering mode.
1081 		 */
1082 		if (musb->double_buffer_not_ok)
1083 			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1084 		else
1085 			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1086 					| (musb_ep->hb_mult << 11));
1087 
1088 		/* force shared fifo to OUT-only mode */
1089 		if (hw_ep->is_shared_fifo) {
1090 			csr = musb_readw(regs, MUSB_TXCSR);
1091 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1092 			musb_writew(regs, MUSB_TXCSR, csr);
1093 		}
1094 
1095 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1096 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1097 			csr |= MUSB_RXCSR_P_ISO;
1098 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1099 			csr |= MUSB_RXCSR_DISNYET;
1100 
1101 		/* set twice in case of double buffering */
1102 		musb_writew(regs, MUSB_RXCSR, csr);
1103 		musb_writew(regs, MUSB_RXCSR, csr);
1104 	}
1105 
1106 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
1107 	 * for some reason you run out of channels here.
1108 	 */
1109 	if (is_dma_capable() && musb->dma_controller) {
1110 		struct dma_controller	*c = musb->dma_controller;
1111 
1112 		musb_ep->dma = c->channel_alloc(c, hw_ep,
1113 				(desc->bEndpointAddress & USB_DIR_IN));
1114 	} else
1115 		musb_ep->dma = NULL;
1116 
1117 	musb_ep->desc = desc;
1118 	musb_ep->busy = 0;
1119 	musb_ep->wedged = 0;
1120 	status = 0;
1121 
1122 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1123 			musb_driver_name, musb_ep->end_point.name,
1124 			({ char *s; switch (musb_ep->type) {
1125 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
1126 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
1127 			default:			s = "iso"; break;
1128 			} s; }),
1129 			musb_ep->is_in ? "IN" : "OUT",
1130 			musb_ep->dma ? "dma, " : "",
1131 			musb_ep->packet_sz);
1132 
1133 	schedule_work(&musb->irq_work);
1134 
1135 fail:
1136 	spin_unlock_irqrestore(&musb->lock, flags);
1137 	return status;
1138 }
1139 
1140 /*
1141  * Disable an endpoint flushing all requests queued.
1142  */
1143 static int musb_gadget_disable(struct usb_ep *ep)
1144 {
1145 	unsigned long	flags;
1146 	struct musb	*musb;
1147 	u8		epnum;
1148 	struct musb_ep	*musb_ep;
1149 	void __iomem	*epio;
1150 	int		status = 0;
1151 
1152 	musb_ep = to_musb_ep(ep);
1153 	musb = musb_ep->musb;
1154 	epnum = musb_ep->current_epnum;
1155 	epio = musb->endpoints[epnum].regs;
1156 
1157 	spin_lock_irqsave(&musb->lock, flags);
1158 	musb_ep_select(musb->mregs, epnum);
1159 
1160 	/* zero the endpoint sizes */
1161 	if (musb_ep->is_in) {
1162 		musb->intrtxe &= ~(1 << epnum);
1163 		musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1164 		musb_writew(epio, MUSB_TXMAXP, 0);
1165 	} else {
1166 		musb->intrrxe &= ~(1 << epnum);
1167 		musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1168 		musb_writew(epio, MUSB_RXMAXP, 0);
1169 	}
1170 
1171 	musb_ep->desc = NULL;
1172 	musb_ep->end_point.desc = NULL;
1173 
1174 	/* abort all pending DMA and requests */
1175 	nuke(musb_ep, -ESHUTDOWN);
1176 
1177 	schedule_work(&musb->irq_work);
1178 
1179 	spin_unlock_irqrestore(&(musb->lock), flags);
1180 
1181 	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1182 
1183 	return status;
1184 }
1185 
1186 /*
1187  * Allocate a request for an endpoint.
1188  * Reused by ep0 code.
1189  */
1190 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1191 {
1192 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1193 	struct musb		*musb = musb_ep->musb;
1194 	struct musb_request	*request = NULL;
1195 
1196 	request = kzalloc(sizeof *request, gfp_flags);
1197 	if (!request) {
1198 		dev_dbg(musb->controller, "not enough memory\n");
1199 		return NULL;
1200 	}
1201 
1202 	request->request.dma = DMA_ADDR_INVALID;
1203 	request->epnum = musb_ep->current_epnum;
1204 	request->ep = musb_ep;
1205 
1206 	return &request->request;
1207 }
1208 
1209 /*
1210  * Free a request
1211  * Reused by ep0 code.
1212  */
1213 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1214 {
1215 	kfree(to_musb_request(req));
1216 }
1217 
1218 static LIST_HEAD(buffers);
1219 
1220 struct free_record {
1221 	struct list_head	list;
1222 	struct device		*dev;
1223 	unsigned		bytes;
1224 	dma_addr_t		dma;
1225 };
1226 
1227 /*
1228  * Context: controller locked, IRQs blocked.
1229  */
1230 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1231 {
1232 	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1233 		req->tx ? "TX/IN" : "RX/OUT",
1234 		&req->request, req->request.length, req->epnum);
1235 
1236 	musb_ep_select(musb->mregs, req->epnum);
1237 	if (req->tx)
1238 		txstate(musb, req);
1239 	else
1240 		rxstate(musb, req);
1241 }
1242 
1243 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1244 			gfp_t gfp_flags)
1245 {
1246 	struct musb_ep		*musb_ep;
1247 	struct musb_request	*request;
1248 	struct musb		*musb;
1249 	int			status = 0;
1250 	unsigned long		lockflags;
1251 
1252 	if (!ep || !req)
1253 		return -EINVAL;
1254 	if (!req->buf)
1255 		return -ENODATA;
1256 
1257 	musb_ep = to_musb_ep(ep);
1258 	musb = musb_ep->musb;
1259 
1260 	request = to_musb_request(req);
1261 	request->musb = musb;
1262 
1263 	if (request->ep != musb_ep)
1264 		return -EINVAL;
1265 
1266 	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1267 
1268 	/* request is mine now... */
1269 	request->request.actual = 0;
1270 	request->request.status = -EINPROGRESS;
1271 	request->epnum = musb_ep->current_epnum;
1272 	request->tx = musb_ep->is_in;
1273 
1274 	map_dma_buffer(request, musb, musb_ep);
1275 
1276 	spin_lock_irqsave(&musb->lock, lockflags);
1277 
1278 	/* don't queue if the ep is down */
1279 	if (!musb_ep->desc) {
1280 		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1281 				req, ep->name, "disabled");
1282 		status = -ESHUTDOWN;
1283 		unmap_dma_buffer(request, musb);
1284 		goto unlock;
1285 	}
1286 
1287 	/* add request to the list */
1288 	list_add_tail(&request->list, &musb_ep->req_list);
1289 
1290 	/* it this is the head of the queue, start i/o ... */
1291 	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1292 		musb_ep_restart(musb, request);
1293 
1294 unlock:
1295 	spin_unlock_irqrestore(&musb->lock, lockflags);
1296 	return status;
1297 }
1298 
1299 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1300 {
1301 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1302 	struct musb_request	*req = to_musb_request(request);
1303 	struct musb_request	*r;
1304 	unsigned long		flags;
1305 	int			status = 0;
1306 	struct musb		*musb = musb_ep->musb;
1307 
1308 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1309 		return -EINVAL;
1310 
1311 	spin_lock_irqsave(&musb->lock, flags);
1312 
1313 	list_for_each_entry(r, &musb_ep->req_list, list) {
1314 		if (r == req)
1315 			break;
1316 	}
1317 	if (r != req) {
1318 		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1319 		status = -EINVAL;
1320 		goto done;
1321 	}
1322 
1323 	/* if the hardware doesn't have the request, easy ... */
1324 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1325 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1326 
1327 	/* ... else abort the dma transfer ... */
1328 	else if (is_dma_capable() && musb_ep->dma) {
1329 		struct dma_controller	*c = musb->dma_controller;
1330 
1331 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1332 		if (c->channel_abort)
1333 			status = c->channel_abort(musb_ep->dma);
1334 		else
1335 			status = -EBUSY;
1336 		if (status == 0)
1337 			musb_g_giveback(musb_ep, request, -ECONNRESET);
1338 	} else {
1339 		/* NOTE: by sticking to easily tested hardware/driver states,
1340 		 * we leave counting of in-flight packets imprecise.
1341 		 */
1342 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1343 	}
1344 
1345 done:
1346 	spin_unlock_irqrestore(&musb->lock, flags);
1347 	return status;
1348 }
1349 
1350 /*
1351  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1352  * data but will queue requests.
1353  *
1354  * exported to ep0 code
1355  */
1356 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1357 {
1358 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1359 	u8			epnum = musb_ep->current_epnum;
1360 	struct musb		*musb = musb_ep->musb;
1361 	void __iomem		*epio = musb->endpoints[epnum].regs;
1362 	void __iomem		*mbase;
1363 	unsigned long		flags;
1364 	u16			csr;
1365 	struct musb_request	*request;
1366 	int			status = 0;
1367 
1368 	if (!ep)
1369 		return -EINVAL;
1370 	mbase = musb->mregs;
1371 
1372 	spin_lock_irqsave(&musb->lock, flags);
1373 
1374 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1375 		status = -EINVAL;
1376 		goto done;
1377 	}
1378 
1379 	musb_ep_select(mbase, epnum);
1380 
1381 	request = next_request(musb_ep);
1382 	if (value) {
1383 		if (request) {
1384 			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1385 			    ep->name);
1386 			status = -EAGAIN;
1387 			goto done;
1388 		}
1389 		/* Cannot portably stall with non-empty FIFO */
1390 		if (musb_ep->is_in) {
1391 			csr = musb_readw(epio, MUSB_TXCSR);
1392 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1393 				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1394 				status = -EAGAIN;
1395 				goto done;
1396 			}
1397 		}
1398 	} else
1399 		musb_ep->wedged = 0;
1400 
1401 	/* set/clear the stall and toggle bits */
1402 	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1403 	if (musb_ep->is_in) {
1404 		csr = musb_readw(epio, MUSB_TXCSR);
1405 		csr |= MUSB_TXCSR_P_WZC_BITS
1406 			| MUSB_TXCSR_CLRDATATOG;
1407 		if (value)
1408 			csr |= MUSB_TXCSR_P_SENDSTALL;
1409 		else
1410 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
1411 				| MUSB_TXCSR_P_SENTSTALL);
1412 		csr &= ~MUSB_TXCSR_TXPKTRDY;
1413 		musb_writew(epio, MUSB_TXCSR, csr);
1414 	} else {
1415 		csr = musb_readw(epio, MUSB_RXCSR);
1416 		csr |= MUSB_RXCSR_P_WZC_BITS
1417 			| MUSB_RXCSR_FLUSHFIFO
1418 			| MUSB_RXCSR_CLRDATATOG;
1419 		if (value)
1420 			csr |= MUSB_RXCSR_P_SENDSTALL;
1421 		else
1422 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
1423 				| MUSB_RXCSR_P_SENTSTALL);
1424 		musb_writew(epio, MUSB_RXCSR, csr);
1425 	}
1426 
1427 	/* maybe start the first request in the queue */
1428 	if (!musb_ep->busy && !value && request) {
1429 		dev_dbg(musb->controller, "restarting the request\n");
1430 		musb_ep_restart(musb, request);
1431 	}
1432 
1433 done:
1434 	spin_unlock_irqrestore(&musb->lock, flags);
1435 	return status;
1436 }
1437 
1438 /*
1439  * Sets the halt feature with the clear requests ignored
1440  */
1441 static int musb_gadget_set_wedge(struct usb_ep *ep)
1442 {
1443 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1444 
1445 	if (!ep)
1446 		return -EINVAL;
1447 
1448 	musb_ep->wedged = 1;
1449 
1450 	return usb_ep_set_halt(ep);
1451 }
1452 
1453 static int musb_gadget_fifo_status(struct usb_ep *ep)
1454 {
1455 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1456 	void __iomem		*epio = musb_ep->hw_ep->regs;
1457 	int			retval = -EINVAL;
1458 
1459 	if (musb_ep->desc && !musb_ep->is_in) {
1460 		struct musb		*musb = musb_ep->musb;
1461 		int			epnum = musb_ep->current_epnum;
1462 		void __iomem		*mbase = musb->mregs;
1463 		unsigned long		flags;
1464 
1465 		spin_lock_irqsave(&musb->lock, flags);
1466 
1467 		musb_ep_select(mbase, epnum);
1468 		/* FIXME return zero unless RXPKTRDY is set */
1469 		retval = musb_readw(epio, MUSB_RXCOUNT);
1470 
1471 		spin_unlock_irqrestore(&musb->lock, flags);
1472 	}
1473 	return retval;
1474 }
1475 
1476 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1477 {
1478 	struct musb_ep	*musb_ep = to_musb_ep(ep);
1479 	struct musb	*musb = musb_ep->musb;
1480 	u8		epnum = musb_ep->current_epnum;
1481 	void __iomem	*epio = musb->endpoints[epnum].regs;
1482 	void __iomem	*mbase;
1483 	unsigned long	flags;
1484 	u16		csr;
1485 
1486 	mbase = musb->mregs;
1487 
1488 	spin_lock_irqsave(&musb->lock, flags);
1489 	musb_ep_select(mbase, (u8) epnum);
1490 
1491 	/* disable interrupts */
1492 	musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1493 
1494 	if (musb_ep->is_in) {
1495 		csr = musb_readw(epio, MUSB_TXCSR);
1496 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1497 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1498 			/*
1499 			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1500 			 * to interrupt current FIFO loading, but not flushing
1501 			 * the already loaded ones.
1502 			 */
1503 			csr &= ~MUSB_TXCSR_TXPKTRDY;
1504 			musb_writew(epio, MUSB_TXCSR, csr);
1505 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1506 			musb_writew(epio, MUSB_TXCSR, csr);
1507 		}
1508 	} else {
1509 		csr = musb_readw(epio, MUSB_RXCSR);
1510 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1511 		musb_writew(epio, MUSB_RXCSR, csr);
1512 		musb_writew(epio, MUSB_RXCSR, csr);
1513 	}
1514 
1515 	/* re-enable interrupt */
1516 	musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1517 	spin_unlock_irqrestore(&musb->lock, flags);
1518 }
1519 
1520 static const struct usb_ep_ops musb_ep_ops = {
1521 	.enable		= musb_gadget_enable,
1522 	.disable	= musb_gadget_disable,
1523 	.alloc_request	= musb_alloc_request,
1524 	.free_request	= musb_free_request,
1525 	.queue		= musb_gadget_queue,
1526 	.dequeue	= musb_gadget_dequeue,
1527 	.set_halt	= musb_gadget_set_halt,
1528 	.set_wedge	= musb_gadget_set_wedge,
1529 	.fifo_status	= musb_gadget_fifo_status,
1530 	.fifo_flush	= musb_gadget_fifo_flush
1531 };
1532 
1533 /* ----------------------------------------------------------------------- */
1534 
1535 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1536 {
1537 	struct musb	*musb = gadget_to_musb(gadget);
1538 
1539 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
1540 }
1541 
1542 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1543 {
1544 	struct musb	*musb = gadget_to_musb(gadget);
1545 	void __iomem	*mregs = musb->mregs;
1546 	unsigned long	flags;
1547 	int		status = -EINVAL;
1548 	u8		power, devctl;
1549 	int		retries;
1550 
1551 	spin_lock_irqsave(&musb->lock, flags);
1552 
1553 	switch (musb->xceiv->otg->state) {
1554 	case OTG_STATE_B_PERIPHERAL:
1555 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1556 		 * that's part of the standard usb 1.1 state machine, and
1557 		 * doesn't affect OTG transitions.
1558 		 */
1559 		if (musb->may_wakeup && musb->is_suspended)
1560 			break;
1561 		goto done;
1562 	case OTG_STATE_B_IDLE:
1563 		/* Start SRP ... OTG not required. */
1564 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1565 		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1566 		devctl |= MUSB_DEVCTL_SESSION;
1567 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
1568 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1569 		retries = 100;
1570 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
1571 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1572 			if (retries-- < 1)
1573 				break;
1574 		}
1575 		retries = 10000;
1576 		while (devctl & MUSB_DEVCTL_SESSION) {
1577 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1578 			if (retries-- < 1)
1579 				break;
1580 		}
1581 
1582 		spin_unlock_irqrestore(&musb->lock, flags);
1583 		otg_start_srp(musb->xceiv->otg);
1584 		spin_lock_irqsave(&musb->lock, flags);
1585 
1586 		/* Block idling for at least 1s */
1587 		musb_platform_try_idle(musb,
1588 			jiffies + msecs_to_jiffies(1 * HZ));
1589 
1590 		status = 0;
1591 		goto done;
1592 	default:
1593 		dev_dbg(musb->controller, "Unhandled wake: %s\n",
1594 			usb_otg_state_string(musb->xceiv->otg->state));
1595 		goto done;
1596 	}
1597 
1598 	status = 0;
1599 
1600 	power = musb_readb(mregs, MUSB_POWER);
1601 	power |= MUSB_POWER_RESUME;
1602 	musb_writeb(mregs, MUSB_POWER, power);
1603 	dev_dbg(musb->controller, "issue wakeup\n");
1604 
1605 	/* FIXME do this next chunk in a timer callback, no udelay */
1606 	mdelay(2);
1607 
1608 	power = musb_readb(mregs, MUSB_POWER);
1609 	power &= ~MUSB_POWER_RESUME;
1610 	musb_writeb(mregs, MUSB_POWER, power);
1611 done:
1612 	spin_unlock_irqrestore(&musb->lock, flags);
1613 	return status;
1614 }
1615 
1616 static int
1617 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1618 {
1619 	gadget->is_selfpowered = !!is_selfpowered;
1620 	return 0;
1621 }
1622 
1623 static void musb_pullup(struct musb *musb, int is_on)
1624 {
1625 	u8 power;
1626 
1627 	power = musb_readb(musb->mregs, MUSB_POWER);
1628 	if (is_on)
1629 		power |= MUSB_POWER_SOFTCONN;
1630 	else
1631 		power &= ~MUSB_POWER_SOFTCONN;
1632 
1633 	/* FIXME if on, HdrcStart; if off, HdrcStop */
1634 
1635 	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1636 		is_on ? "on" : "off");
1637 	musb_writeb(musb->mregs, MUSB_POWER, power);
1638 }
1639 
1640 #if 0
1641 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1642 {
1643 	dev_dbg(musb->controller, "<= %s =>\n", __func__);
1644 
1645 	/*
1646 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
1647 	 * though that can clear it), just musb_pullup().
1648 	 */
1649 
1650 	return -EINVAL;
1651 }
1652 #endif
1653 
1654 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1655 {
1656 	struct musb	*musb = gadget_to_musb(gadget);
1657 
1658 	if (!musb->xceiv->set_power)
1659 		return -EOPNOTSUPP;
1660 	return usb_phy_set_power(musb->xceiv, mA);
1661 }
1662 
1663 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1664 {
1665 	struct musb	*musb = gadget_to_musb(gadget);
1666 	unsigned long	flags;
1667 
1668 	is_on = !!is_on;
1669 
1670 	pm_runtime_get_sync(musb->controller);
1671 
1672 	/* NOTE: this assumes we are sensing vbus; we'd rather
1673 	 * not pullup unless the B-session is active.
1674 	 */
1675 	spin_lock_irqsave(&musb->lock, flags);
1676 	if (is_on != musb->softconnect) {
1677 		musb->softconnect = is_on;
1678 		musb_pullup(musb, is_on);
1679 	}
1680 	spin_unlock_irqrestore(&musb->lock, flags);
1681 
1682 	pm_runtime_put(musb->controller);
1683 
1684 	return 0;
1685 }
1686 
1687 static int musb_gadget_start(struct usb_gadget *g,
1688 		struct usb_gadget_driver *driver);
1689 static int musb_gadget_stop(struct usb_gadget *g);
1690 
1691 static const struct usb_gadget_ops musb_gadget_operations = {
1692 	.get_frame		= musb_gadget_get_frame,
1693 	.wakeup			= musb_gadget_wakeup,
1694 	.set_selfpowered	= musb_gadget_set_self_powered,
1695 	/* .vbus_session		= musb_gadget_vbus_session, */
1696 	.vbus_draw		= musb_gadget_vbus_draw,
1697 	.pullup			= musb_gadget_pullup,
1698 	.udc_start		= musb_gadget_start,
1699 	.udc_stop		= musb_gadget_stop,
1700 };
1701 
1702 /* ----------------------------------------------------------------------- */
1703 
1704 /* Registration */
1705 
1706 /* Only this registration code "knows" the rule (from USB standards)
1707  * about there being only one external upstream port.  It assumes
1708  * all peripheral ports are external...
1709  */
1710 
1711 static void
1712 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1713 {
1714 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1715 
1716 	memset(ep, 0, sizeof *ep);
1717 
1718 	ep->current_epnum = epnum;
1719 	ep->musb = musb;
1720 	ep->hw_ep = hw_ep;
1721 	ep->is_in = is_in;
1722 
1723 	INIT_LIST_HEAD(&ep->req_list);
1724 
1725 	sprintf(ep->name, "ep%d%s", epnum,
1726 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1727 				is_in ? "in" : "out"));
1728 	ep->end_point.name = ep->name;
1729 	INIT_LIST_HEAD(&ep->end_point.ep_list);
1730 	if (!epnum) {
1731 		usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1732 		ep->end_point.ops = &musb_g_ep0_ops;
1733 		musb->g.ep0 = &ep->end_point;
1734 	} else {
1735 		if (is_in)
1736 			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1737 		else
1738 			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1739 		ep->end_point.ops = &musb_ep_ops;
1740 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1741 	}
1742 }
1743 
1744 /*
1745  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1746  * to the rest of the driver state.
1747  */
1748 static inline void musb_g_init_endpoints(struct musb *musb)
1749 {
1750 	u8			epnum;
1751 	struct musb_hw_ep	*hw_ep;
1752 	unsigned		count = 0;
1753 
1754 	/* initialize endpoint list just once */
1755 	INIT_LIST_HEAD(&(musb->g.ep_list));
1756 
1757 	for (epnum = 0, hw_ep = musb->endpoints;
1758 			epnum < musb->nr_endpoints;
1759 			epnum++, hw_ep++) {
1760 		if (hw_ep->is_shared_fifo /* || !epnum */) {
1761 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1762 			count++;
1763 		} else {
1764 			if (hw_ep->max_packet_sz_tx) {
1765 				init_peripheral_ep(musb, &hw_ep->ep_in,
1766 							epnum, 1);
1767 				count++;
1768 			}
1769 			if (hw_ep->max_packet_sz_rx) {
1770 				init_peripheral_ep(musb, &hw_ep->ep_out,
1771 							epnum, 0);
1772 				count++;
1773 			}
1774 		}
1775 	}
1776 }
1777 
1778 /* called once during driver setup to initialize and link into
1779  * the driver model; memory is zeroed.
1780  */
1781 int musb_gadget_setup(struct musb *musb)
1782 {
1783 	int status;
1784 
1785 	/* REVISIT minor race:  if (erroneously) setting up two
1786 	 * musb peripherals at the same time, only the bus lock
1787 	 * is probably held.
1788 	 */
1789 
1790 	musb->g.ops = &musb_gadget_operations;
1791 	musb->g.max_speed = USB_SPEED_HIGH;
1792 	musb->g.speed = USB_SPEED_UNKNOWN;
1793 
1794 	MUSB_DEV_MODE(musb);
1795 	musb->xceiv->otg->default_a = 0;
1796 	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1797 
1798 	/* this "gadget" abstracts/virtualizes the controller */
1799 	musb->g.name = musb_driver_name;
1800 #if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
1801 	musb->g.is_otg = 1;
1802 #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1803 	musb->g.is_otg = 0;
1804 #endif
1805 
1806 	musb_g_init_endpoints(musb);
1807 
1808 	musb->is_active = 0;
1809 	musb_platform_try_idle(musb, 0);
1810 
1811 	status = usb_add_gadget_udc(musb->controller, &musb->g);
1812 	if (status)
1813 		goto err;
1814 
1815 	return 0;
1816 err:
1817 	musb->g.dev.parent = NULL;
1818 	device_unregister(&musb->g.dev);
1819 	return status;
1820 }
1821 
1822 void musb_gadget_cleanup(struct musb *musb)
1823 {
1824 	if (musb->port_mode == MUSB_PORT_MODE_HOST)
1825 		return;
1826 	usb_del_gadget_udc(&musb->g);
1827 }
1828 
1829 /*
1830  * Register the gadget driver. Used by gadget drivers when
1831  * registering themselves with the controller.
1832  *
1833  * -EINVAL something went wrong (not driver)
1834  * -EBUSY another gadget is already using the controller
1835  * -ENOMEM no memory to perform the operation
1836  *
1837  * @param driver the gadget driver
1838  * @return <0 if error, 0 if everything is fine
1839  */
1840 static int musb_gadget_start(struct usb_gadget *g,
1841 		struct usb_gadget_driver *driver)
1842 {
1843 	struct musb		*musb = gadget_to_musb(g);
1844 	struct usb_otg		*otg = musb->xceiv->otg;
1845 	unsigned long		flags;
1846 	int			retval = 0;
1847 
1848 	if (driver->max_speed < USB_SPEED_HIGH) {
1849 		retval = -EINVAL;
1850 		goto err;
1851 	}
1852 
1853 	pm_runtime_get_sync(musb->controller);
1854 
1855 	musb->softconnect = 0;
1856 	musb->gadget_driver = driver;
1857 
1858 	spin_lock_irqsave(&musb->lock, flags);
1859 	musb->is_active = 1;
1860 
1861 	otg_set_peripheral(otg, &musb->g);
1862 	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1863 	spin_unlock_irqrestore(&musb->lock, flags);
1864 
1865 	musb_start(musb);
1866 
1867 	/* REVISIT:  funcall to other code, which also
1868 	 * handles power budgeting ... this way also
1869 	 * ensures HdrcStart is indirectly called.
1870 	 */
1871 	if (musb->xceiv->last_event == USB_EVENT_ID)
1872 		musb_platform_set_vbus(musb, 1);
1873 
1874 	if (musb->xceiv->last_event == USB_EVENT_NONE)
1875 		pm_runtime_put(musb->controller);
1876 
1877 	return 0;
1878 
1879 err:
1880 	return retval;
1881 }
1882 
1883 /*
1884  * Unregister the gadget driver. Used by gadget drivers when
1885  * unregistering themselves from the controller.
1886  *
1887  * @param driver the gadget driver to unregister
1888  */
1889 static int musb_gadget_stop(struct usb_gadget *g)
1890 {
1891 	struct musb	*musb = gadget_to_musb(g);
1892 	unsigned long	flags;
1893 
1894 	if (musb->xceiv->last_event == USB_EVENT_NONE)
1895 		pm_runtime_get_sync(musb->controller);
1896 
1897 	/*
1898 	 * REVISIT always use otg_set_peripheral() here too;
1899 	 * this needs to shut down the OTG engine.
1900 	 */
1901 
1902 	spin_lock_irqsave(&musb->lock, flags);
1903 
1904 	musb_hnp_stop(musb);
1905 
1906 	(void) musb_gadget_vbus_draw(&musb->g, 0);
1907 
1908 	musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1909 	musb_stop(musb);
1910 	otg_set_peripheral(musb->xceiv->otg, NULL);
1911 
1912 	musb->is_active = 0;
1913 	musb->gadget_driver = NULL;
1914 	musb_platform_try_idle(musb, 0);
1915 	spin_unlock_irqrestore(&musb->lock, flags);
1916 
1917 	/*
1918 	 * FIXME we need to be able to register another
1919 	 * gadget driver here and have everything work;
1920 	 * that currently misbehaves.
1921 	 */
1922 
1923 	pm_runtime_put(musb->controller);
1924 
1925 	return 0;
1926 }
1927 
1928 /* ----------------------------------------------------------------------- */
1929 
1930 /* lifecycle operations called through plat_uds.c */
1931 
1932 void musb_g_resume(struct musb *musb)
1933 {
1934 	musb->is_suspended = 0;
1935 	switch (musb->xceiv->otg->state) {
1936 	case OTG_STATE_B_IDLE:
1937 		break;
1938 	case OTG_STATE_B_WAIT_ACON:
1939 	case OTG_STATE_B_PERIPHERAL:
1940 		musb->is_active = 1;
1941 		if (musb->gadget_driver && musb->gadget_driver->resume) {
1942 			spin_unlock(&musb->lock);
1943 			musb->gadget_driver->resume(&musb->g);
1944 			spin_lock(&musb->lock);
1945 		}
1946 		break;
1947 	default:
1948 		WARNING("unhandled RESUME transition (%s)\n",
1949 				usb_otg_state_string(musb->xceiv->otg->state));
1950 	}
1951 }
1952 
1953 /* called when SOF packets stop for 3+ msec */
1954 void musb_g_suspend(struct musb *musb)
1955 {
1956 	u8	devctl;
1957 
1958 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1959 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
1960 
1961 	switch (musb->xceiv->otg->state) {
1962 	case OTG_STATE_B_IDLE:
1963 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1964 			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1965 		break;
1966 	case OTG_STATE_B_PERIPHERAL:
1967 		musb->is_suspended = 1;
1968 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
1969 			spin_unlock(&musb->lock);
1970 			musb->gadget_driver->suspend(&musb->g);
1971 			spin_lock(&musb->lock);
1972 		}
1973 		break;
1974 	default:
1975 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1976 		 * A_PERIPHERAL may need care too
1977 		 */
1978 		WARNING("unhandled SUSPEND transition (%s)\n",
1979 				usb_otg_state_string(musb->xceiv->otg->state));
1980 	}
1981 }
1982 
1983 /* Called during SRP */
1984 void musb_g_wakeup(struct musb *musb)
1985 {
1986 	musb_gadget_wakeup(&musb->g);
1987 }
1988 
1989 /* called when VBUS drops below session threshold, and in other cases */
1990 void musb_g_disconnect(struct musb *musb)
1991 {
1992 	void __iomem	*mregs = musb->mregs;
1993 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
1994 
1995 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
1996 
1997 	/* clear HR */
1998 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1999 
2000 	/* don't draw vbus until new b-default session */
2001 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2002 
2003 	musb->g.speed = USB_SPEED_UNKNOWN;
2004 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2005 		spin_unlock(&musb->lock);
2006 		musb->gadget_driver->disconnect(&musb->g);
2007 		spin_lock(&musb->lock);
2008 	}
2009 
2010 	switch (musb->xceiv->otg->state) {
2011 	default:
2012 		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2013 			usb_otg_state_string(musb->xceiv->otg->state));
2014 		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2015 		MUSB_HST_MODE(musb);
2016 		break;
2017 	case OTG_STATE_A_PERIPHERAL:
2018 		musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2019 		MUSB_HST_MODE(musb);
2020 		break;
2021 	case OTG_STATE_B_WAIT_ACON:
2022 	case OTG_STATE_B_HOST:
2023 	case OTG_STATE_B_PERIPHERAL:
2024 	case OTG_STATE_B_IDLE:
2025 		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2026 		break;
2027 	case OTG_STATE_B_SRP_INIT:
2028 		break;
2029 	}
2030 
2031 	musb->is_active = 0;
2032 }
2033 
2034 void musb_g_reset(struct musb *musb)
2035 __releases(musb->lock)
2036 __acquires(musb->lock)
2037 {
2038 	void __iomem	*mbase = musb->mregs;
2039 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
2040 	u8		power;
2041 
2042 	dev_dbg(musb->controller, "<== %s driver '%s'\n",
2043 			(devctl & MUSB_DEVCTL_BDEVICE)
2044 				? "B-Device" : "A-Device",
2045 			musb->gadget_driver
2046 				? musb->gadget_driver->driver.name
2047 				: NULL
2048 			);
2049 
2050 	/* report reset, if we didn't already (flushing EP state) */
2051 	if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2052 		spin_unlock(&musb->lock);
2053 		usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2054 		spin_lock(&musb->lock);
2055 	}
2056 
2057 	/* clear HR */
2058 	else if (devctl & MUSB_DEVCTL_HR)
2059 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2060 
2061 
2062 	/* what speed did we negotiate? */
2063 	power = musb_readb(mbase, MUSB_POWER);
2064 	musb->g.speed = (power & MUSB_POWER_HSMODE)
2065 			? USB_SPEED_HIGH : USB_SPEED_FULL;
2066 
2067 	/* start in USB_STATE_DEFAULT */
2068 	musb->is_active = 1;
2069 	musb->is_suspended = 0;
2070 	MUSB_DEV_MODE(musb);
2071 	musb->address = 0;
2072 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2073 
2074 	musb->may_wakeup = 0;
2075 	musb->g.b_hnp_enable = 0;
2076 	musb->g.a_alt_hnp_support = 0;
2077 	musb->g.a_hnp_support = 0;
2078 
2079 	/* Normal reset, as B-Device;
2080 	 * or else after HNP, as A-Device
2081 	 */
2082 	if (!musb->g.is_otg) {
2083 		/* USB device controllers that are not OTG compatible
2084 		 * may not have DEVCTL register in silicon.
2085 		 * In that case, do not rely on devctl for setting
2086 		 * peripheral mode.
2087 		 */
2088 		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2089 		musb->g.is_a_peripheral = 0;
2090 	} else if (devctl & MUSB_DEVCTL_BDEVICE) {
2091 		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2092 		musb->g.is_a_peripheral = 0;
2093 	} else {
2094 		musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2095 		musb->g.is_a_peripheral = 1;
2096 	}
2097 
2098 	/* start with default limits on VBUS power draw */
2099 	(void) musb_gadget_vbus_draw(&musb->g, 8);
2100 }
2101