xref: /openbmc/linux/drivers/usb/musb/musb_host.c (revision d236d361)
1 /*
2  * MUSB OTG driver host support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
26  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/list.h>
43 #include <linux/dma-mapping.h>
44 
45 #include "musb_core.h"
46 #include "musb_host.h"
47 #include "musb_trace.h"
48 
49 /* MUSB HOST status 22-mar-2006
50  *
51  * - There's still lots of partial code duplication for fault paths, so
52  *   they aren't handled as consistently as they need to be.
53  *
54  * - PIO mostly behaved when last tested.
55  *     + including ep0, with all usbtest cases 9, 10
56  *     + usbtest 14 (ep0out) doesn't seem to run at all
57  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58  *       configurations, but otherwise double buffering passes basic tests.
59  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
60  *
61  * - DMA (CPPI) ... partially behaves, not currently recommended
62  *     + about 1/15 the speed of typical EHCI implementations (PCI)
63  *     + RX, all too often reqpkt seems to misbehave after tx
64  *     + TX, no known issues (other than evident silicon issue)
65  *
66  * - DMA (Mentor/OMAP) ...has at least toggle update problems
67  *
68  * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69  *   starvation ... nothing yet for TX, interrupt, or bulk.
70  *
71  * - Not tested with HNP, but some SRP paths seem to behave.
72  *
73  * NOTE 24-August-2006:
74  *
75  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
77  *   mostly works, except that with "usbnet" it's easy to trigger cases
78  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
79  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80  *   although ARP RX wins.  (That test was done with a full speed link.)
81  */
82 
83 
84 /*
85  * NOTE on endpoint usage:
86  *
87  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
88  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89  * (Yes, bulk _could_ use more of the endpoints than that, and would even
90  * benefit from it.)
91  *
92  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93  * So far that scheduling is both dumb and optimistic:  the endpoint will be
94  * "claimed" until its software queue is no longer refilled.  No multiplexing
95  * of transfers between endpoints, or anything clever.
96  */
97 
98 struct musb *hcd_to_musb(struct usb_hcd *hcd)
99 {
100 	return *(struct musb **) hcd->hcd_priv;
101 }
102 
103 
104 static void musb_ep_program(struct musb *musb, u8 epnum,
105 			struct urb *urb, int is_out,
106 			u8 *buf, u32 offset, u32 len);
107 
108 /*
109  * Clear TX fifo. Needed to avoid BABBLE errors.
110  */
111 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 {
113 	struct musb	*musb = ep->musb;
114 	void __iomem	*epio = ep->regs;
115 	u16		csr;
116 	int		retries = 1000;
117 
118 	csr = musb_readw(epio, MUSB_TXCSR);
119 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 		csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
121 		musb_writew(epio, MUSB_TXCSR, csr);
122 		csr = musb_readw(epio, MUSB_TXCSR);
123 
124 		/*
125 		 * FIXME: sometimes the tx fifo flush failed, it has been
126 		 * observed during device disconnect on AM335x.
127 		 *
128 		 * To reproduce the issue, ensure tx urb(s) are queued when
129 		 * unplug the usb device which is connected to AM335x usb
130 		 * host port.
131 		 *
132 		 * I found using a usb-ethernet device and running iperf
133 		 * (client on AM335x) has very high chance to trigger it.
134 		 *
135 		 * Better to turn on musb_dbg() in musb_cleanup_urb() with
136 		 * CPPI enabled to see the issue when aborting the tx channel.
137 		 */
138 		if (dev_WARN_ONCE(musb->controller, retries-- < 1,
139 				"Could not flush host TX%d fifo: csr: %04x\n",
140 				ep->epnum, csr))
141 			return;
142 	}
143 }
144 
145 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
146 {
147 	void __iomem	*epio = ep->regs;
148 	u16		csr;
149 	int		retries = 5;
150 
151 	/* scrub any data left in the fifo */
152 	do {
153 		csr = musb_readw(epio, MUSB_TXCSR);
154 		if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
155 			break;
156 		musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
157 		csr = musb_readw(epio, MUSB_TXCSR);
158 		udelay(10);
159 	} while (--retries);
160 
161 	WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
162 			ep->epnum, csr);
163 
164 	/* and reset for the next transfer */
165 	musb_writew(epio, MUSB_TXCSR, 0);
166 }
167 
168 /*
169  * Start transmit. Caller is responsible for locking shared resources.
170  * musb must be locked.
171  */
172 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
173 {
174 	u16	txcsr;
175 
176 	/* NOTE: no locks here; caller should lock and select EP */
177 	if (ep->epnum) {
178 		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
179 		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
180 		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
181 	} else {
182 		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
183 		musb_writew(ep->regs, MUSB_CSR0, txcsr);
184 	}
185 
186 }
187 
188 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
189 {
190 	u16	txcsr;
191 
192 	/* NOTE: no locks here; caller should lock and select EP */
193 	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
194 	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
195 	if (is_cppi_enabled(ep->musb))
196 		txcsr |= MUSB_TXCSR_DMAMODE;
197 	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
198 }
199 
200 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
201 {
202 	if (is_in != 0 || ep->is_shared_fifo)
203 		ep->in_qh  = qh;
204 	if (is_in == 0 || ep->is_shared_fifo)
205 		ep->out_qh = qh;
206 }
207 
208 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
209 {
210 	return is_in ? ep->in_qh : ep->out_qh;
211 }
212 
213 /*
214  * Start the URB at the front of an endpoint's queue
215  * end must be claimed from the caller.
216  *
217  * Context: controller locked, irqs blocked
218  */
219 static void
220 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
221 {
222 	u16			frame;
223 	u32			len;
224 	void __iomem		*mbase =  musb->mregs;
225 	struct urb		*urb = next_urb(qh);
226 	void			*buf = urb->transfer_buffer;
227 	u32			offset = 0;
228 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
229 	int			epnum = hw_ep->epnum;
230 
231 	/* initialize software qh state */
232 	qh->offset = 0;
233 	qh->segsize = 0;
234 
235 	/* gather right source of data */
236 	switch (qh->type) {
237 	case USB_ENDPOINT_XFER_CONTROL:
238 		/* control transfers always start with SETUP */
239 		is_in = 0;
240 		musb->ep0_stage = MUSB_EP0_START;
241 		buf = urb->setup_packet;
242 		len = 8;
243 		break;
244 	case USB_ENDPOINT_XFER_ISOC:
245 		qh->iso_idx = 0;
246 		qh->frame = 0;
247 		offset = urb->iso_frame_desc[0].offset;
248 		len = urb->iso_frame_desc[0].length;
249 		break;
250 	default:		/* bulk, interrupt */
251 		/* actual_length may be nonzero on retry paths */
252 		buf = urb->transfer_buffer + urb->actual_length;
253 		len = urb->transfer_buffer_length - urb->actual_length;
254 	}
255 
256 	trace_musb_urb_start(musb, urb);
257 
258 	/* Configure endpoint */
259 	musb_ep_set_qh(hw_ep, is_in, qh);
260 	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
261 
262 	/* transmit may have more work: start it when it is time */
263 	if (is_in)
264 		return;
265 
266 	/* determine if the time is right for a periodic transfer */
267 	switch (qh->type) {
268 	case USB_ENDPOINT_XFER_ISOC:
269 	case USB_ENDPOINT_XFER_INT:
270 		musb_dbg(musb, "check whether there's still time for periodic Tx");
271 		frame = musb_readw(mbase, MUSB_FRAME);
272 		/* FIXME this doesn't implement that scheduling policy ...
273 		 * or handle framecounter wrapping
274 		 */
275 		if (1) {	/* Always assume URB_ISO_ASAP */
276 			/* REVISIT the SOF irq handler shouldn't duplicate
277 			 * this code; and we don't init urb->start_frame...
278 			 */
279 			qh->frame = 0;
280 			goto start;
281 		} else {
282 			qh->frame = urb->start_frame;
283 			/* enable SOF interrupt so we can count down */
284 			musb_dbg(musb, "SOF for %d", epnum);
285 #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
286 			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
287 #endif
288 		}
289 		break;
290 	default:
291 start:
292 		musb_dbg(musb, "Start TX%d %s", epnum,
293 			hw_ep->tx_channel ? "dma" : "pio");
294 
295 		if (!hw_ep->tx_channel)
296 			musb_h_tx_start(hw_ep);
297 		else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
298 			musb_h_tx_dma_start(hw_ep);
299 	}
300 }
301 
302 /* Context: caller owns controller lock, IRQs are blocked */
303 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
304 __releases(musb->lock)
305 __acquires(musb->lock)
306 {
307 	trace_musb_urb_gb(musb, urb);
308 
309 	usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
310 	spin_unlock(&musb->lock);
311 	usb_hcd_giveback_urb(musb->hcd, urb, status);
312 	spin_lock(&musb->lock);
313 }
314 
315 /* For bulk/interrupt endpoints only */
316 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
317 				    struct urb *urb)
318 {
319 	void __iomem		*epio = qh->hw_ep->regs;
320 	u16			csr;
321 
322 	/*
323 	 * FIXME: the current Mentor DMA code seems to have
324 	 * problems getting toggle correct.
325 	 */
326 
327 	if (is_in)
328 		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
329 	else
330 		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
331 
332 	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
333 }
334 
335 /*
336  * Advance this hardware endpoint's queue, completing the specified URB and
337  * advancing to either the next URB queued to that qh, or else invalidating
338  * that qh and advancing to the next qh scheduled after the current one.
339  *
340  * Context: caller owns controller lock, IRQs are blocked
341  */
342 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
343 				  struct musb_hw_ep *hw_ep, int is_in)
344 {
345 	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
346 	struct musb_hw_ep	*ep = qh->hw_ep;
347 	int			ready = qh->is_ready;
348 	int			status;
349 
350 	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
351 
352 	/* save toggle eagerly, for paranoia */
353 	switch (qh->type) {
354 	case USB_ENDPOINT_XFER_BULK:
355 	case USB_ENDPOINT_XFER_INT:
356 		musb_save_toggle(qh, is_in, urb);
357 		break;
358 	case USB_ENDPOINT_XFER_ISOC:
359 		if (status == 0 && urb->error_count)
360 			status = -EXDEV;
361 		break;
362 	}
363 
364 	qh->is_ready = 0;
365 	musb_giveback(musb, urb, status);
366 	qh->is_ready = ready;
367 
368 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
369 	 * invalidate qh as soon as list_empty(&hep->urb_list)
370 	 */
371 	if (list_empty(&qh->hep->urb_list)) {
372 		struct list_head	*head;
373 		struct dma_controller	*dma = musb->dma_controller;
374 
375 		if (is_in) {
376 			ep->rx_reinit = 1;
377 			if (ep->rx_channel) {
378 				dma->channel_release(ep->rx_channel);
379 				ep->rx_channel = NULL;
380 			}
381 		} else {
382 			ep->tx_reinit = 1;
383 			if (ep->tx_channel) {
384 				dma->channel_release(ep->tx_channel);
385 				ep->tx_channel = NULL;
386 			}
387 		}
388 
389 		/* Clobber old pointers to this qh */
390 		musb_ep_set_qh(ep, is_in, NULL);
391 		qh->hep->hcpriv = NULL;
392 
393 		switch (qh->type) {
394 
395 		case USB_ENDPOINT_XFER_CONTROL:
396 		case USB_ENDPOINT_XFER_BULK:
397 			/* fifo policy for these lists, except that NAKing
398 			 * should rotate a qh to the end (for fairness).
399 			 */
400 			if (qh->mux == 1) {
401 				head = qh->ring.prev;
402 				list_del(&qh->ring);
403 				kfree(qh);
404 				qh = first_qh(head);
405 				break;
406 			}
407 
408 		case USB_ENDPOINT_XFER_ISOC:
409 		case USB_ENDPOINT_XFER_INT:
410 			/* this is where periodic bandwidth should be
411 			 * de-allocated if it's tracked and allocated;
412 			 * and where we'd update the schedule tree...
413 			 */
414 			kfree(qh);
415 			qh = NULL;
416 			break;
417 		}
418 	}
419 
420 	/*
421 	 * The pipe must be broken if current urb->status is set, so don't
422 	 * start next urb.
423 	 * TODO: to minimize the risk of regression, only check urb->status
424 	 * for RX, until we have a test case to understand the behavior of TX.
425 	 */
426 	if ((!status || !is_in) && qh && qh->is_ready) {
427 		musb_dbg(musb, "... next ep%d %cX urb %p",
428 		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
429 		musb_start_urb(musb, is_in, qh);
430 	}
431 }
432 
433 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
434 {
435 	/* we don't want fifo to fill itself again;
436 	 * ignore dma (various models),
437 	 * leave toggle alone (may not have been saved yet)
438 	 */
439 	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
440 	csr &= ~(MUSB_RXCSR_H_REQPKT
441 		| MUSB_RXCSR_H_AUTOREQ
442 		| MUSB_RXCSR_AUTOCLEAR);
443 
444 	/* write 2x to allow double buffering */
445 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
446 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
447 
448 	/* flush writebuffer */
449 	return musb_readw(hw_ep->regs, MUSB_RXCSR);
450 }
451 
452 /*
453  * PIO RX for a packet (or part of it).
454  */
455 static bool
456 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
457 {
458 	u16			rx_count;
459 	u8			*buf;
460 	u16			csr;
461 	bool			done = false;
462 	u32			length;
463 	int			do_flush = 0;
464 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
465 	void __iomem		*epio = hw_ep->regs;
466 	struct musb_qh		*qh = hw_ep->in_qh;
467 	int			pipe = urb->pipe;
468 	void			*buffer = urb->transfer_buffer;
469 
470 	/* musb_ep_select(mbase, epnum); */
471 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
472 	musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
473 			urb->transfer_buffer, qh->offset,
474 			urb->transfer_buffer_length);
475 
476 	/* unload FIFO */
477 	if (usb_pipeisoc(pipe)) {
478 		int					status = 0;
479 		struct usb_iso_packet_descriptor	*d;
480 
481 		if (iso_err) {
482 			status = -EILSEQ;
483 			urb->error_count++;
484 		}
485 
486 		d = urb->iso_frame_desc + qh->iso_idx;
487 		buf = buffer + d->offset;
488 		length = d->length;
489 		if (rx_count > length) {
490 			if (status == 0) {
491 				status = -EOVERFLOW;
492 				urb->error_count++;
493 			}
494 			musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
495 			do_flush = 1;
496 		} else
497 			length = rx_count;
498 		urb->actual_length += length;
499 		d->actual_length = length;
500 
501 		d->status = status;
502 
503 		/* see if we are done */
504 		done = (++qh->iso_idx >= urb->number_of_packets);
505 	} else {
506 		/* non-isoch */
507 		buf = buffer + qh->offset;
508 		length = urb->transfer_buffer_length - qh->offset;
509 		if (rx_count > length) {
510 			if (urb->status == -EINPROGRESS)
511 				urb->status = -EOVERFLOW;
512 			musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
513 			do_flush = 1;
514 		} else
515 			length = rx_count;
516 		urb->actual_length += length;
517 		qh->offset += length;
518 
519 		/* see if we are done */
520 		done = (urb->actual_length == urb->transfer_buffer_length)
521 			|| (rx_count < qh->maxpacket)
522 			|| (urb->status != -EINPROGRESS);
523 		if (done
524 				&& (urb->status == -EINPROGRESS)
525 				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
526 				&& (urb->actual_length
527 					< urb->transfer_buffer_length))
528 			urb->status = -EREMOTEIO;
529 	}
530 
531 	musb_read_fifo(hw_ep, length, buf);
532 
533 	csr = musb_readw(epio, MUSB_RXCSR);
534 	csr |= MUSB_RXCSR_H_WZC_BITS;
535 	if (unlikely(do_flush))
536 		musb_h_flush_rxfifo(hw_ep, csr);
537 	else {
538 		/* REVISIT this assumes AUTOCLEAR is never set */
539 		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
540 		if (!done)
541 			csr |= MUSB_RXCSR_H_REQPKT;
542 		musb_writew(epio, MUSB_RXCSR, csr);
543 	}
544 
545 	return done;
546 }
547 
548 /* we don't always need to reinit a given side of an endpoint...
549  * when we do, use tx/rx reinit routine and then construct a new CSR
550  * to address data toggle, NYET, and DMA or PIO.
551  *
552  * it's possible that driver bugs (especially for DMA) or aborting a
553  * transfer might have left the endpoint busier than it should be.
554  * the busy/not-empty tests are basically paranoia.
555  */
556 static void
557 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
558 {
559 	struct musb_hw_ep *ep = musb->endpoints + epnum;
560 	u16	csr;
561 
562 	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
563 	 * That always uses tx_reinit since ep0 repurposes TX register
564 	 * offsets; the initial SETUP packet is also a kind of OUT.
565 	 */
566 
567 	/* if programmed for Tx, put it in RX mode */
568 	if (ep->is_shared_fifo) {
569 		csr = musb_readw(ep->regs, MUSB_TXCSR);
570 		if (csr & MUSB_TXCSR_MODE) {
571 			musb_h_tx_flush_fifo(ep);
572 			csr = musb_readw(ep->regs, MUSB_TXCSR);
573 			musb_writew(ep->regs, MUSB_TXCSR,
574 				    csr | MUSB_TXCSR_FRCDATATOG);
575 		}
576 
577 		/*
578 		 * Clear the MODE bit (and everything else) to enable Rx.
579 		 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
580 		 */
581 		if (csr & MUSB_TXCSR_DMAMODE)
582 			musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
583 		musb_writew(ep->regs, MUSB_TXCSR, 0);
584 
585 	/* scrub all previous state, clearing toggle */
586 	}
587 	csr = musb_readw(ep->regs, MUSB_RXCSR);
588 	if (csr & MUSB_RXCSR_RXPKTRDY)
589 		WARNING("rx%d, packet/%d ready?\n", ep->epnum,
590 			musb_readw(ep->regs, MUSB_RXCOUNT));
591 
592 	musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
593 
594 	/* target addr and (for multipoint) hub addr/port */
595 	if (musb->is_multipoint) {
596 		musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
597 		musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
598 		musb_write_rxhubport(musb, epnum, qh->h_port_reg);
599 	} else
600 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
601 
602 	/* protocol/endpoint, interval/NAKlimit, i/o size */
603 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
604 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
605 	/* NOTE: bulk combining rewrites high bits of maxpacket */
606 	/* Set RXMAXP with the FIFO size of the endpoint
607 	 * to disable double buffer mode.
608 	 */
609 	if (musb->double_buffer_not_ok)
610 		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
611 	else
612 		musb_writew(ep->regs, MUSB_RXMAXP,
613 				qh->maxpacket | ((qh->hb_mult - 1) << 11));
614 
615 	ep->rx_reinit = 0;
616 }
617 
618 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
619 		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
620 		struct urb *urb, u32 offset,
621 		u32 *length, u8 *mode)
622 {
623 	struct dma_channel	*channel = hw_ep->tx_channel;
624 	void __iomem		*epio = hw_ep->regs;
625 	u16			pkt_size = qh->maxpacket;
626 	u16			csr;
627 
628 	if (*length > channel->max_len)
629 		*length = channel->max_len;
630 
631 	csr = musb_readw(epio, MUSB_TXCSR);
632 	if (*length > pkt_size) {
633 		*mode = 1;
634 		csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
635 		/* autoset shouldn't be set in high bandwidth */
636 		/*
637 		 * Enable Autoset according to table
638 		 * below
639 		 * bulk_split hb_mult	Autoset_Enable
640 		 *	0	1	Yes(Normal)
641 		 *	0	>1	No(High BW ISO)
642 		 *	1	1	Yes(HS bulk)
643 		 *	1	>1	Yes(FS bulk)
644 		 */
645 		if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
646 					can_bulk_split(hw_ep->musb, qh->type)))
647 			csr |= MUSB_TXCSR_AUTOSET;
648 	} else {
649 		*mode = 0;
650 		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
651 		csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
652 	}
653 	channel->desired_mode = *mode;
654 	musb_writew(epio, MUSB_TXCSR, csr);
655 }
656 
657 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
658 					   struct musb_hw_ep *hw_ep,
659 					   struct musb_qh *qh,
660 					   struct urb *urb,
661 					   u32 offset,
662 					   u32 *length,
663 					   u8 *mode)
664 {
665 	struct dma_channel *channel = hw_ep->tx_channel;
666 
667 	channel->actual_len = 0;
668 
669 	/*
670 	 * TX uses "RNDIS" mode automatically but needs help
671 	 * to identify the zero-length-final-packet case.
672 	 */
673 	*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
674 }
675 
676 static bool musb_tx_dma_program(struct dma_controller *dma,
677 		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
678 		struct urb *urb, u32 offset, u32 length)
679 {
680 	struct dma_channel	*channel = hw_ep->tx_channel;
681 	u16			pkt_size = qh->maxpacket;
682 	u8			mode;
683 
684 	if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
685 		musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
686 					    &length, &mode);
687 	else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
688 		musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
689 					       &length, &mode);
690 	else
691 		return false;
692 
693 	qh->segsize = length;
694 
695 	/*
696 	 * Ensure the data reaches to main memory before starting
697 	 * DMA transfer
698 	 */
699 	wmb();
700 
701 	if (!dma->channel_program(channel, pkt_size, mode,
702 			urb->transfer_dma + offset, length)) {
703 		void __iomem *epio = hw_ep->regs;
704 		u16 csr;
705 
706 		dma->channel_release(channel);
707 		hw_ep->tx_channel = NULL;
708 
709 		csr = musb_readw(epio, MUSB_TXCSR);
710 		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
711 		musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
712 		return false;
713 	}
714 	return true;
715 }
716 
717 /*
718  * Program an HDRC endpoint as per the given URB
719  * Context: irqs blocked, controller lock held
720  */
721 static void musb_ep_program(struct musb *musb, u8 epnum,
722 			struct urb *urb, int is_out,
723 			u8 *buf, u32 offset, u32 len)
724 {
725 	struct dma_controller	*dma_controller;
726 	struct dma_channel	*dma_channel;
727 	u8			dma_ok;
728 	void __iomem		*mbase = musb->mregs;
729 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
730 	void __iomem		*epio = hw_ep->regs;
731 	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
732 	u16			packet_sz = qh->maxpacket;
733 	u8			use_dma = 1;
734 	u16			csr;
735 
736 	musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
737 				"h_addr%02x h_port%02x bytes %d",
738 			is_out ? "-->" : "<--",
739 			epnum, urb, urb->dev->speed,
740 			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
741 			qh->h_addr_reg, qh->h_port_reg,
742 			len);
743 
744 	musb_ep_select(mbase, epnum);
745 
746 	if (is_out && !len) {
747 		use_dma = 0;
748 		csr = musb_readw(epio, MUSB_TXCSR);
749 		csr &= ~MUSB_TXCSR_DMAENAB;
750 		musb_writew(epio, MUSB_TXCSR, csr);
751 		hw_ep->tx_channel = NULL;
752 	}
753 
754 	/* candidate for DMA? */
755 	dma_controller = musb->dma_controller;
756 	if (use_dma && is_dma_capable() && epnum && dma_controller) {
757 		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
758 		if (!dma_channel) {
759 			dma_channel = dma_controller->channel_alloc(
760 					dma_controller, hw_ep, is_out);
761 			if (is_out)
762 				hw_ep->tx_channel = dma_channel;
763 			else
764 				hw_ep->rx_channel = dma_channel;
765 		}
766 	} else
767 		dma_channel = NULL;
768 
769 	/* make sure we clear DMAEnab, autoSet bits from previous run */
770 
771 	/* OUT/transmit/EP0 or IN/receive? */
772 	if (is_out) {
773 		u16	csr;
774 		u16	int_txe;
775 		u16	load_count;
776 
777 		csr = musb_readw(epio, MUSB_TXCSR);
778 
779 		/* disable interrupt in case we flush */
780 		int_txe = musb->intrtxe;
781 		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
782 
783 		/* general endpoint setup */
784 		if (epnum) {
785 			/* flush all old state, set default */
786 			/*
787 			 * We could be flushing valid
788 			 * packets in double buffering
789 			 * case
790 			 */
791 			if (!hw_ep->tx_double_buffered)
792 				musb_h_tx_flush_fifo(hw_ep);
793 
794 			/*
795 			 * We must not clear the DMAMODE bit before or in
796 			 * the same cycle with the DMAENAB bit, so we clear
797 			 * the latter first...
798 			 */
799 			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
800 					| MUSB_TXCSR_AUTOSET
801 					| MUSB_TXCSR_DMAENAB
802 					| MUSB_TXCSR_FRCDATATOG
803 					| MUSB_TXCSR_H_RXSTALL
804 					| MUSB_TXCSR_H_ERROR
805 					| MUSB_TXCSR_TXPKTRDY
806 					);
807 			csr |= MUSB_TXCSR_MODE;
808 
809 			if (!hw_ep->tx_double_buffered) {
810 				if (usb_gettoggle(urb->dev, qh->epnum, 1))
811 					csr |= MUSB_TXCSR_H_WR_DATATOGGLE
812 						| MUSB_TXCSR_H_DATATOGGLE;
813 				else
814 					csr |= MUSB_TXCSR_CLRDATATOG;
815 			}
816 
817 			musb_writew(epio, MUSB_TXCSR, csr);
818 			/* REVISIT may need to clear FLUSHFIFO ... */
819 			csr &= ~MUSB_TXCSR_DMAMODE;
820 			musb_writew(epio, MUSB_TXCSR, csr);
821 			csr = musb_readw(epio, MUSB_TXCSR);
822 		} else {
823 			/* endpoint 0: just flush */
824 			musb_h_ep0_flush_fifo(hw_ep);
825 		}
826 
827 		/* target addr and (for multipoint) hub addr/port */
828 		if (musb->is_multipoint) {
829 			musb_write_txfunaddr(musb, epnum, qh->addr_reg);
830 			musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
831 			musb_write_txhubport(musb, epnum, qh->h_port_reg);
832 /* FIXME if !epnum, do the same for RX ... */
833 		} else
834 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
835 
836 		/* protocol/endpoint/interval/NAKlimit */
837 		if (epnum) {
838 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
839 			if (musb->double_buffer_not_ok) {
840 				musb_writew(epio, MUSB_TXMAXP,
841 						hw_ep->max_packet_sz_tx);
842 			} else if (can_bulk_split(musb, qh->type)) {
843 				qh->hb_mult = hw_ep->max_packet_sz_tx
844 						/ packet_sz;
845 				musb_writew(epio, MUSB_TXMAXP, packet_sz
846 					| ((qh->hb_mult) - 1) << 11);
847 			} else {
848 				musb_writew(epio, MUSB_TXMAXP,
849 						qh->maxpacket |
850 						((qh->hb_mult - 1) << 11));
851 			}
852 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
853 		} else {
854 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
855 			if (musb->is_multipoint)
856 				musb_writeb(epio, MUSB_TYPE0,
857 						qh->type_reg);
858 		}
859 
860 		if (can_bulk_split(musb, qh->type))
861 			load_count = min((u32) hw_ep->max_packet_sz_tx,
862 						len);
863 		else
864 			load_count = min((u32) packet_sz, len);
865 
866 		if (dma_channel && musb_tx_dma_program(dma_controller,
867 					hw_ep, qh, urb, offset, len))
868 			load_count = 0;
869 
870 		if (load_count) {
871 			/* PIO to load FIFO */
872 			qh->segsize = load_count;
873 			if (!buf) {
874 				sg_miter_start(&qh->sg_miter, urb->sg, 1,
875 						SG_MITER_ATOMIC
876 						| SG_MITER_FROM_SG);
877 				if (!sg_miter_next(&qh->sg_miter)) {
878 					dev_err(musb->controller,
879 							"error: sg"
880 							"list empty\n");
881 					sg_miter_stop(&qh->sg_miter);
882 					goto finish;
883 				}
884 				buf = qh->sg_miter.addr + urb->sg->offset +
885 					urb->actual_length;
886 				load_count = min_t(u32, load_count,
887 						qh->sg_miter.length);
888 				musb_write_fifo(hw_ep, load_count, buf);
889 				qh->sg_miter.consumed = load_count;
890 				sg_miter_stop(&qh->sg_miter);
891 			} else
892 				musb_write_fifo(hw_ep, load_count, buf);
893 		}
894 finish:
895 		/* re-enable interrupt */
896 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
897 
898 	/* IN/receive */
899 	} else {
900 		u16	csr;
901 
902 		if (hw_ep->rx_reinit) {
903 			musb_rx_reinit(musb, qh, epnum);
904 
905 			/* init new state: toggle and NYET, maybe DMA later */
906 			if (usb_gettoggle(urb->dev, qh->epnum, 0))
907 				csr = MUSB_RXCSR_H_WR_DATATOGGLE
908 					| MUSB_RXCSR_H_DATATOGGLE;
909 			else
910 				csr = 0;
911 			if (qh->type == USB_ENDPOINT_XFER_INT)
912 				csr |= MUSB_RXCSR_DISNYET;
913 
914 		} else {
915 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
916 
917 			if (csr & (MUSB_RXCSR_RXPKTRDY
918 					| MUSB_RXCSR_DMAENAB
919 					| MUSB_RXCSR_H_REQPKT))
920 				ERR("broken !rx_reinit, ep%d csr %04x\n",
921 						hw_ep->epnum, csr);
922 
923 			/* scrub any stale state, leaving toggle alone */
924 			csr &= MUSB_RXCSR_DISNYET;
925 		}
926 
927 		/* kick things off */
928 
929 		if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
930 			/* Candidate for DMA */
931 			dma_channel->actual_len = 0L;
932 			qh->segsize = len;
933 
934 			/* AUTOREQ is in a DMA register */
935 			musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
936 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
937 
938 			/*
939 			 * Unless caller treats short RX transfers as
940 			 * errors, we dare not queue multiple transfers.
941 			 */
942 			dma_ok = dma_controller->channel_program(dma_channel,
943 					packet_sz, !(urb->transfer_flags &
944 						     URB_SHORT_NOT_OK),
945 					urb->transfer_dma + offset,
946 					qh->segsize);
947 			if (!dma_ok) {
948 				dma_controller->channel_release(dma_channel);
949 				hw_ep->rx_channel = dma_channel = NULL;
950 			} else
951 				csr |= MUSB_RXCSR_DMAENAB;
952 		}
953 
954 		csr |= MUSB_RXCSR_H_REQPKT;
955 		musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
956 		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
957 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
958 	}
959 }
960 
961 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
962  * the end; avoids starvation for other endpoints.
963  */
964 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
965 	int is_in)
966 {
967 	struct dma_channel	*dma;
968 	struct urb		*urb;
969 	void __iomem		*mbase = musb->mregs;
970 	void __iomem		*epio = ep->regs;
971 	struct musb_qh		*cur_qh, *next_qh;
972 	u16			rx_csr, tx_csr;
973 
974 	musb_ep_select(mbase, ep->epnum);
975 	if (is_in) {
976 		dma = is_dma_capable() ? ep->rx_channel : NULL;
977 
978 		/*
979 		 * Need to stop the transaction by clearing REQPKT first
980 		 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
981 		 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
982 		 */
983 		rx_csr = musb_readw(epio, MUSB_RXCSR);
984 		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
985 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
986 		musb_writew(epio, MUSB_RXCSR, rx_csr);
987 		rx_csr &= ~MUSB_RXCSR_DATAERROR;
988 		musb_writew(epio, MUSB_RXCSR, rx_csr);
989 
990 		cur_qh = first_qh(&musb->in_bulk);
991 	} else {
992 		dma = is_dma_capable() ? ep->tx_channel : NULL;
993 
994 		/* clear nak timeout bit */
995 		tx_csr = musb_readw(epio, MUSB_TXCSR);
996 		tx_csr |= MUSB_TXCSR_H_WZC_BITS;
997 		tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
998 		musb_writew(epio, MUSB_TXCSR, tx_csr);
999 
1000 		cur_qh = first_qh(&musb->out_bulk);
1001 	}
1002 	if (cur_qh) {
1003 		urb = next_urb(cur_qh);
1004 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1005 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1006 			musb->dma_controller->channel_abort(dma);
1007 			urb->actual_length += dma->actual_len;
1008 			dma->actual_len = 0L;
1009 		}
1010 		musb_save_toggle(cur_qh, is_in, urb);
1011 
1012 		if (is_in) {
1013 			/* move cur_qh to end of queue */
1014 			list_move_tail(&cur_qh->ring, &musb->in_bulk);
1015 
1016 			/* get the next qh from musb->in_bulk */
1017 			next_qh = first_qh(&musb->in_bulk);
1018 
1019 			/* set rx_reinit and schedule the next qh */
1020 			ep->rx_reinit = 1;
1021 		} else {
1022 			/* move cur_qh to end of queue */
1023 			list_move_tail(&cur_qh->ring, &musb->out_bulk);
1024 
1025 			/* get the next qh from musb->out_bulk */
1026 			next_qh = first_qh(&musb->out_bulk);
1027 
1028 			/* set tx_reinit and schedule the next qh */
1029 			ep->tx_reinit = 1;
1030 		}
1031 		musb_start_urb(musb, is_in, next_qh);
1032 	}
1033 }
1034 
1035 /*
1036  * Service the default endpoint (ep0) as host.
1037  * Return true until it's time to start the status stage.
1038  */
1039 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1040 {
1041 	bool			 more = false;
1042 	u8			*fifo_dest = NULL;
1043 	u16			fifo_count = 0;
1044 	struct musb_hw_ep	*hw_ep = musb->control_ep;
1045 	struct musb_qh		*qh = hw_ep->in_qh;
1046 	struct usb_ctrlrequest	*request;
1047 
1048 	switch (musb->ep0_stage) {
1049 	case MUSB_EP0_IN:
1050 		fifo_dest = urb->transfer_buffer + urb->actual_length;
1051 		fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1052 				   urb->actual_length);
1053 		if (fifo_count < len)
1054 			urb->status = -EOVERFLOW;
1055 
1056 		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1057 
1058 		urb->actual_length += fifo_count;
1059 		if (len < qh->maxpacket) {
1060 			/* always terminate on short read; it's
1061 			 * rarely reported as an error.
1062 			 */
1063 		} else if (urb->actual_length <
1064 				urb->transfer_buffer_length)
1065 			more = true;
1066 		break;
1067 	case MUSB_EP0_START:
1068 		request = (struct usb_ctrlrequest *) urb->setup_packet;
1069 
1070 		if (!request->wLength) {
1071 			musb_dbg(musb, "start no-DATA");
1072 			break;
1073 		} else if (request->bRequestType & USB_DIR_IN) {
1074 			musb_dbg(musb, "start IN-DATA");
1075 			musb->ep0_stage = MUSB_EP0_IN;
1076 			more = true;
1077 			break;
1078 		} else {
1079 			musb_dbg(musb, "start OUT-DATA");
1080 			musb->ep0_stage = MUSB_EP0_OUT;
1081 			more = true;
1082 		}
1083 		/* FALLTHROUGH */
1084 	case MUSB_EP0_OUT:
1085 		fifo_count = min_t(size_t, qh->maxpacket,
1086 				   urb->transfer_buffer_length -
1087 				   urb->actual_length);
1088 		if (fifo_count) {
1089 			fifo_dest = (u8 *) (urb->transfer_buffer
1090 					+ urb->actual_length);
1091 			musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1092 					fifo_count,
1093 					(fifo_count == 1) ? "" : "s",
1094 					fifo_dest);
1095 			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1096 
1097 			urb->actual_length += fifo_count;
1098 			more = true;
1099 		}
1100 		break;
1101 	default:
1102 		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1103 		break;
1104 	}
1105 
1106 	return more;
1107 }
1108 
1109 /*
1110  * Handle default endpoint interrupt as host. Only called in IRQ time
1111  * from musb_interrupt().
1112  *
1113  * called with controller irqlocked
1114  */
1115 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1116 {
1117 	struct urb		*urb;
1118 	u16			csr, len;
1119 	int			status = 0;
1120 	void __iomem		*mbase = musb->mregs;
1121 	struct musb_hw_ep	*hw_ep = musb->control_ep;
1122 	void __iomem		*epio = hw_ep->regs;
1123 	struct musb_qh		*qh = hw_ep->in_qh;
1124 	bool			complete = false;
1125 	irqreturn_t		retval = IRQ_NONE;
1126 
1127 	/* ep0 only has one queue, "in" */
1128 	urb = next_urb(qh);
1129 
1130 	musb_ep_select(mbase, 0);
1131 	csr = musb_readw(epio, MUSB_CSR0);
1132 	len = (csr & MUSB_CSR0_RXPKTRDY)
1133 			? musb_readb(epio, MUSB_COUNT0)
1134 			: 0;
1135 
1136 	musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1137 		csr, qh, len, urb, musb->ep0_stage);
1138 
1139 	/* if we just did status stage, we are done */
1140 	if (MUSB_EP0_STATUS == musb->ep0_stage) {
1141 		retval = IRQ_HANDLED;
1142 		complete = true;
1143 	}
1144 
1145 	/* prepare status */
1146 	if (csr & MUSB_CSR0_H_RXSTALL) {
1147 		musb_dbg(musb, "STALLING ENDPOINT");
1148 		status = -EPIPE;
1149 
1150 	} else if (csr & MUSB_CSR0_H_ERROR) {
1151 		musb_dbg(musb, "no response, csr0 %04x", csr);
1152 		status = -EPROTO;
1153 
1154 	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1155 		musb_dbg(musb, "control NAK timeout");
1156 
1157 		/* NOTE:  this code path would be a good place to PAUSE a
1158 		 * control transfer, if another one is queued, so that
1159 		 * ep0 is more likely to stay busy.  That's already done
1160 		 * for bulk RX transfers.
1161 		 *
1162 		 * if (qh->ring.next != &musb->control), then
1163 		 * we have a candidate... NAKing is *NOT* an error
1164 		 */
1165 		musb_writew(epio, MUSB_CSR0, 0);
1166 		retval = IRQ_HANDLED;
1167 	}
1168 
1169 	if (status) {
1170 		musb_dbg(musb, "aborting");
1171 		retval = IRQ_HANDLED;
1172 		if (urb)
1173 			urb->status = status;
1174 		complete = true;
1175 
1176 		/* use the proper sequence to abort the transfer */
1177 		if (csr & MUSB_CSR0_H_REQPKT) {
1178 			csr &= ~MUSB_CSR0_H_REQPKT;
1179 			musb_writew(epio, MUSB_CSR0, csr);
1180 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1181 			musb_writew(epio, MUSB_CSR0, csr);
1182 		} else {
1183 			musb_h_ep0_flush_fifo(hw_ep);
1184 		}
1185 
1186 		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1187 
1188 		/* clear it */
1189 		musb_writew(epio, MUSB_CSR0, 0);
1190 	}
1191 
1192 	if (unlikely(!urb)) {
1193 		/* stop endpoint since we have no place for its data, this
1194 		 * SHOULD NEVER HAPPEN! */
1195 		ERR("no URB for end 0\n");
1196 
1197 		musb_h_ep0_flush_fifo(hw_ep);
1198 		goto done;
1199 	}
1200 
1201 	if (!complete) {
1202 		/* call common logic and prepare response */
1203 		if (musb_h_ep0_continue(musb, len, urb)) {
1204 			/* more packets required */
1205 			csr = (MUSB_EP0_IN == musb->ep0_stage)
1206 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1207 		} else {
1208 			/* data transfer complete; perform status phase */
1209 			if (usb_pipeout(urb->pipe)
1210 					|| !urb->transfer_buffer_length)
1211 				csr = MUSB_CSR0_H_STATUSPKT
1212 					| MUSB_CSR0_H_REQPKT;
1213 			else
1214 				csr = MUSB_CSR0_H_STATUSPKT
1215 					| MUSB_CSR0_TXPKTRDY;
1216 
1217 			/* disable ping token in status phase */
1218 			csr |= MUSB_CSR0_H_DIS_PING;
1219 
1220 			/* flag status stage */
1221 			musb->ep0_stage = MUSB_EP0_STATUS;
1222 
1223 			musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1224 
1225 		}
1226 		musb_writew(epio, MUSB_CSR0, csr);
1227 		retval = IRQ_HANDLED;
1228 	} else
1229 		musb->ep0_stage = MUSB_EP0_IDLE;
1230 
1231 	/* call completion handler if done */
1232 	if (complete)
1233 		musb_advance_schedule(musb, urb, hw_ep, 1);
1234 done:
1235 	return retval;
1236 }
1237 
1238 
1239 #ifdef CONFIG_USB_INVENTRA_DMA
1240 
1241 /* Host side TX (OUT) using Mentor DMA works as follows:
1242 	submit_urb ->
1243 		- if queue was empty, Program Endpoint
1244 		- ... which starts DMA to fifo in mode 1 or 0
1245 
1246 	DMA Isr (transfer complete) -> TxAvail()
1247 		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
1248 					only in musb_cleanup_urb)
1249 		- TxPktRdy has to be set in mode 0 or for
1250 			short packets in mode 1.
1251 */
1252 
1253 #endif
1254 
1255 /* Service a Tx-Available or dma completion irq for the endpoint */
1256 void musb_host_tx(struct musb *musb, u8 epnum)
1257 {
1258 	int			pipe;
1259 	bool			done = false;
1260 	u16			tx_csr;
1261 	size_t			length = 0;
1262 	size_t			offset = 0;
1263 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1264 	void __iomem		*epio = hw_ep->regs;
1265 	struct musb_qh		*qh = hw_ep->out_qh;
1266 	struct urb		*urb = next_urb(qh);
1267 	u32			status = 0;
1268 	void __iomem		*mbase = musb->mregs;
1269 	struct dma_channel	*dma;
1270 	bool			transfer_pending = false;
1271 
1272 	musb_ep_select(mbase, epnum);
1273 	tx_csr = musb_readw(epio, MUSB_TXCSR);
1274 
1275 	/* with CPPI, DMA sometimes triggers "extra" irqs */
1276 	if (!urb) {
1277 		musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1278 		return;
1279 	}
1280 
1281 	pipe = urb->pipe;
1282 	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1283 	trace_musb_urb_tx(musb, urb);
1284 	musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1285 			dma ? ", dma" : "");
1286 
1287 	/* check for errors */
1288 	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1289 		/* dma was disabled, fifo flushed */
1290 		musb_dbg(musb, "TX end %d stall", epnum);
1291 
1292 		/* stall; record URB status */
1293 		status = -EPIPE;
1294 
1295 	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1296 		/* (NON-ISO) dma was disabled, fifo flushed */
1297 		musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1298 
1299 		status = -ETIMEDOUT;
1300 
1301 	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1302 		if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1303 				&& !list_is_singular(&musb->out_bulk)) {
1304 			musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1305 			musb_bulk_nak_timeout(musb, hw_ep, 0);
1306 		} else {
1307 			musb_dbg(musb, "TX ep%d device not responding", epnum);
1308 			/* NOTE:  this code path would be a good place to PAUSE a
1309 			 * transfer, if there's some other (nonperiodic) tx urb
1310 			 * that could use this fifo.  (dma complicates it...)
1311 			 * That's already done for bulk RX transfers.
1312 			 *
1313 			 * if (bulk && qh->ring.next != &musb->out_bulk), then
1314 			 * we have a candidate... NAKing is *NOT* an error
1315 			 */
1316 			musb_ep_select(mbase, epnum);
1317 			musb_writew(epio, MUSB_TXCSR,
1318 					MUSB_TXCSR_H_WZC_BITS
1319 					| MUSB_TXCSR_TXPKTRDY);
1320 		}
1321 			return;
1322 	}
1323 
1324 done:
1325 	if (status) {
1326 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1327 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1328 			musb->dma_controller->channel_abort(dma);
1329 		}
1330 
1331 		/* do the proper sequence to abort the transfer in the
1332 		 * usb core; the dma engine should already be stopped.
1333 		 */
1334 		musb_h_tx_flush_fifo(hw_ep);
1335 		tx_csr &= ~(MUSB_TXCSR_AUTOSET
1336 				| MUSB_TXCSR_DMAENAB
1337 				| MUSB_TXCSR_H_ERROR
1338 				| MUSB_TXCSR_H_RXSTALL
1339 				| MUSB_TXCSR_H_NAKTIMEOUT
1340 				);
1341 
1342 		musb_ep_select(mbase, epnum);
1343 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1344 		/* REVISIT may need to clear FLUSHFIFO ... */
1345 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1346 		musb_writeb(epio, MUSB_TXINTERVAL, 0);
1347 
1348 		done = true;
1349 	}
1350 
1351 	/* second cppi case */
1352 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1353 		musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1354 		return;
1355 	}
1356 
1357 	if (is_dma_capable() && dma && !status) {
1358 		/*
1359 		 * DMA has completed.  But if we're using DMA mode 1 (multi
1360 		 * packet DMA), we need a terminal TXPKTRDY interrupt before
1361 		 * we can consider this transfer completed, lest we trash
1362 		 * its last packet when writing the next URB's data.  So we
1363 		 * switch back to mode 0 to get that interrupt; we'll come
1364 		 * back here once it happens.
1365 		 */
1366 		if (tx_csr & MUSB_TXCSR_DMAMODE) {
1367 			/*
1368 			 * We shouldn't clear DMAMODE with DMAENAB set; so
1369 			 * clear them in a safe order.  That should be OK
1370 			 * once TXPKTRDY has been set (and I've never seen
1371 			 * it being 0 at this moment -- DMA interrupt latency
1372 			 * is significant) but if it hasn't been then we have
1373 			 * no choice but to stop being polite and ignore the
1374 			 * programmer's guide... :-)
1375 			 *
1376 			 * Note that we must write TXCSR with TXPKTRDY cleared
1377 			 * in order not to re-trigger the packet send (this bit
1378 			 * can't be cleared by CPU), and there's another caveat:
1379 			 * TXPKTRDY may be set shortly and then cleared in the
1380 			 * double-buffered FIFO mode, so we do an extra TXCSR
1381 			 * read for debouncing...
1382 			 */
1383 			tx_csr &= musb_readw(epio, MUSB_TXCSR);
1384 			if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1385 				tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1386 					    MUSB_TXCSR_TXPKTRDY);
1387 				musb_writew(epio, MUSB_TXCSR,
1388 					    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1389 			}
1390 			tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1391 				    MUSB_TXCSR_TXPKTRDY);
1392 			musb_writew(epio, MUSB_TXCSR,
1393 				    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1394 
1395 			/*
1396 			 * There is no guarantee that we'll get an interrupt
1397 			 * after clearing DMAMODE as we might have done this
1398 			 * too late (after TXPKTRDY was cleared by controller).
1399 			 * Re-read TXCSR as we have spoiled its previous value.
1400 			 */
1401 			tx_csr = musb_readw(epio, MUSB_TXCSR);
1402 		}
1403 
1404 		/*
1405 		 * We may get here from a DMA completion or TXPKTRDY interrupt.
1406 		 * In any case, we must check the FIFO status here and bail out
1407 		 * only if the FIFO still has data -- that should prevent the
1408 		 * "missed" TXPKTRDY interrupts and deal with double-buffered
1409 		 * FIFO mode too...
1410 		 */
1411 		if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1412 			musb_dbg(musb,
1413 				"DMA complete but FIFO not empty, CSR %04x",
1414 				tx_csr);
1415 			return;
1416 		}
1417 	}
1418 
1419 	if (!status || dma || usb_pipeisoc(pipe)) {
1420 		if (dma)
1421 			length = dma->actual_len;
1422 		else
1423 			length = qh->segsize;
1424 		qh->offset += length;
1425 
1426 		if (usb_pipeisoc(pipe)) {
1427 			struct usb_iso_packet_descriptor	*d;
1428 
1429 			d = urb->iso_frame_desc + qh->iso_idx;
1430 			d->actual_length = length;
1431 			d->status = status;
1432 			if (++qh->iso_idx >= urb->number_of_packets) {
1433 				done = true;
1434 			} else {
1435 				d++;
1436 				offset = d->offset;
1437 				length = d->length;
1438 			}
1439 		} else if (dma && urb->transfer_buffer_length == qh->offset) {
1440 			done = true;
1441 		} else {
1442 			/* see if we need to send more data, or ZLP */
1443 			if (qh->segsize < qh->maxpacket)
1444 				done = true;
1445 			else if (qh->offset == urb->transfer_buffer_length
1446 					&& !(urb->transfer_flags
1447 						& URB_ZERO_PACKET))
1448 				done = true;
1449 			if (!done) {
1450 				offset = qh->offset;
1451 				length = urb->transfer_buffer_length - offset;
1452 				transfer_pending = true;
1453 			}
1454 		}
1455 	}
1456 
1457 	/* urb->status != -EINPROGRESS means request has been faulted,
1458 	 * so we must abort this transfer after cleanup
1459 	 */
1460 	if (urb->status != -EINPROGRESS) {
1461 		done = true;
1462 		if (status == 0)
1463 			status = urb->status;
1464 	}
1465 
1466 	if (done) {
1467 		/* set status */
1468 		urb->status = status;
1469 		urb->actual_length = qh->offset;
1470 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1471 		return;
1472 	} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1473 		if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1474 				offset, length)) {
1475 			if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1476 				musb_h_tx_dma_start(hw_ep);
1477 			return;
1478 		}
1479 	} else	if (tx_csr & MUSB_TXCSR_DMAENAB) {
1480 		musb_dbg(musb, "not complete, but DMA enabled?");
1481 		return;
1482 	}
1483 
1484 	/*
1485 	 * PIO: start next packet in this URB.
1486 	 *
1487 	 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1488 	 * (and presumably, FIFO is not half-full) we should write *two*
1489 	 * packets before updating TXCSR; other docs disagree...
1490 	 */
1491 	if (length > qh->maxpacket)
1492 		length = qh->maxpacket;
1493 	/* Unmap the buffer so that CPU can use it */
1494 	usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1495 
1496 	/*
1497 	 * We need to map sg if the transfer_buffer is
1498 	 * NULL.
1499 	 */
1500 	if (!urb->transfer_buffer)
1501 		qh->use_sg = true;
1502 
1503 	if (qh->use_sg) {
1504 		/* sg_miter_start is already done in musb_ep_program */
1505 		if (!sg_miter_next(&qh->sg_miter)) {
1506 			dev_err(musb->controller, "error: sg list empty\n");
1507 			sg_miter_stop(&qh->sg_miter);
1508 			status = -EINVAL;
1509 			goto done;
1510 		}
1511 		urb->transfer_buffer = qh->sg_miter.addr;
1512 		length = min_t(u32, length, qh->sg_miter.length);
1513 		musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1514 		qh->sg_miter.consumed = length;
1515 		sg_miter_stop(&qh->sg_miter);
1516 	} else {
1517 		musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1518 	}
1519 
1520 	qh->segsize = length;
1521 
1522 	if (qh->use_sg) {
1523 		if (offset + length >= urb->transfer_buffer_length)
1524 			qh->use_sg = false;
1525 	}
1526 
1527 	musb_ep_select(mbase, epnum);
1528 	musb_writew(epio, MUSB_TXCSR,
1529 			MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1530 }
1531 
1532 #ifdef CONFIG_USB_TI_CPPI41_DMA
1533 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1534 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1535 				  struct musb_hw_ep *hw_ep,
1536 				  struct musb_qh *qh,
1537 				  struct urb *urb,
1538 				  size_t len)
1539 {
1540 	struct dma_channel *channel = hw_ep->rx_channel;
1541 	void __iomem *epio = hw_ep->regs;
1542 	dma_addr_t *buf;
1543 	u32 length, res;
1544 	u16 val;
1545 
1546 	buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1547 		(u32)urb->transfer_dma;
1548 
1549 	length = urb->iso_frame_desc[qh->iso_idx].length;
1550 
1551 	val = musb_readw(epio, MUSB_RXCSR);
1552 	val |= MUSB_RXCSR_DMAENAB;
1553 	musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1554 
1555 	res = dma->channel_program(channel, qh->maxpacket, 0,
1556 				   (u32)buf, length);
1557 
1558 	return res;
1559 }
1560 #else
1561 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1562 					 struct musb_hw_ep *hw_ep,
1563 					 struct musb_qh *qh,
1564 					 struct urb *urb,
1565 					 size_t len)
1566 {
1567 	return false;
1568 }
1569 #endif
1570 
1571 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1572 	defined(CONFIG_USB_TI_CPPI41_DMA)
1573 /* Host side RX (IN) using Mentor DMA works as follows:
1574 	submit_urb ->
1575 		- if queue was empty, ProgramEndpoint
1576 		- first IN token is sent out (by setting ReqPkt)
1577 	LinuxIsr -> RxReady()
1578 	/\	=> first packet is received
1579 	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
1580 	|		-> DMA Isr (transfer complete) -> RxReady()
1581 	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1582 	|		    - if urb not complete, send next IN token (ReqPkt)
1583 	|			   |		else complete urb.
1584 	|			   |
1585 	---------------------------
1586  *
1587  * Nuances of mode 1:
1588  *	For short packets, no ack (+RxPktRdy) is sent automatically
1589  *	(even if AutoClear is ON)
1590  *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1591  *	automatically => major problem, as collecting the next packet becomes
1592  *	difficult. Hence mode 1 is not used.
1593  *
1594  * REVISIT
1595  *	All we care about at this driver level is that
1596  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1597  *       (b) termination conditions are: short RX, or buffer full;
1598  *       (c) fault modes include
1599  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1600  *             (and that endpoint's dma queue stops immediately)
1601  *           - overflow (full, PLUS more bytes in the terminal packet)
1602  *
1603  *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1604  *	thus be a great candidate for using mode 1 ... for all but the
1605  *	last packet of one URB's transfer.
1606  */
1607 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1608 				       struct musb_hw_ep *hw_ep,
1609 				       struct musb_qh *qh,
1610 				       struct urb *urb,
1611 				       size_t len)
1612 {
1613 	struct dma_channel *channel = hw_ep->rx_channel;
1614 	void __iomem *epio = hw_ep->regs;
1615 	u16 val;
1616 	int pipe;
1617 	bool done;
1618 
1619 	pipe = urb->pipe;
1620 
1621 	if (usb_pipeisoc(pipe)) {
1622 		struct usb_iso_packet_descriptor *d;
1623 
1624 		d = urb->iso_frame_desc + qh->iso_idx;
1625 		d->actual_length = len;
1626 
1627 		/* even if there was an error, we did the dma
1628 		 * for iso_frame_desc->length
1629 		 */
1630 		if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1631 			d->status = 0;
1632 
1633 		if (++qh->iso_idx >= urb->number_of_packets) {
1634 			done = true;
1635 		} else {
1636 			/* REVISIT: Why ignore return value here? */
1637 			if (musb_dma_cppi41(hw_ep->musb))
1638 				done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1639 							      urb, len);
1640 			done = false;
1641 		}
1642 
1643 	} else  {
1644 		/* done if urb buffer is full or short packet is recd */
1645 		done = (urb->actual_length + len >=
1646 			urb->transfer_buffer_length
1647 			|| channel->actual_len < qh->maxpacket
1648 			|| channel->rx_packet_done);
1649 	}
1650 
1651 	/* send IN token for next packet, without AUTOREQ */
1652 	if (!done) {
1653 		val = musb_readw(epio, MUSB_RXCSR);
1654 		val |= MUSB_RXCSR_H_REQPKT;
1655 		musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1656 	}
1657 
1658 	return done;
1659 }
1660 
1661 /* Disadvantage of using mode 1:
1662  *	It's basically usable only for mass storage class; essentially all
1663  *	other protocols also terminate transfers on short packets.
1664  *
1665  * Details:
1666  *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1667  *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
1668  *	to use the extra IN token to grab the last packet using mode 0, then
1669  *	the problem is that you cannot be sure when the device will send the
1670  *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
1671  *	such that it gets lost when RxCSR is re-set at the end of the mode 1
1672  *	transfer, while sometimes it is recd just a little late so that if you
1673  *	try to configure for mode 0 soon after the mode 1 transfer is
1674  *	completed, you will find rxcount 0. Okay, so you might think why not
1675  *	wait for an interrupt when the pkt is recd. Well, you won't get any!
1676  */
1677 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1678 					  struct musb_hw_ep *hw_ep,
1679 					  struct musb_qh *qh,
1680 					  struct urb *urb,
1681 					  size_t len,
1682 					  u8 iso_err)
1683 {
1684 	struct musb *musb = hw_ep->musb;
1685 	void __iomem *epio = hw_ep->regs;
1686 	struct dma_channel *channel = hw_ep->rx_channel;
1687 	u16 rx_count, val;
1688 	int length, pipe, done;
1689 	dma_addr_t buf;
1690 
1691 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
1692 	pipe = urb->pipe;
1693 
1694 	if (usb_pipeisoc(pipe)) {
1695 		int d_status = 0;
1696 		struct usb_iso_packet_descriptor *d;
1697 
1698 		d = urb->iso_frame_desc + qh->iso_idx;
1699 
1700 		if (iso_err) {
1701 			d_status = -EILSEQ;
1702 			urb->error_count++;
1703 		}
1704 		if (rx_count > d->length) {
1705 			if (d_status == 0) {
1706 				d_status = -EOVERFLOW;
1707 				urb->error_count++;
1708 			}
1709 			musb_dbg(musb, "** OVERFLOW %d into %d",
1710 				rx_count, d->length);
1711 
1712 			length = d->length;
1713 		} else
1714 			length = rx_count;
1715 		d->status = d_status;
1716 		buf = urb->transfer_dma + d->offset;
1717 	} else {
1718 		length = rx_count;
1719 		buf = urb->transfer_dma + urb->actual_length;
1720 	}
1721 
1722 	channel->desired_mode = 0;
1723 #ifdef USE_MODE1
1724 	/* because of the issue below, mode 1 will
1725 	 * only rarely behave with correct semantics.
1726 	 */
1727 	if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1728 	    && (urb->transfer_buffer_length - urb->actual_length)
1729 	    > qh->maxpacket)
1730 		channel->desired_mode = 1;
1731 	if (rx_count < hw_ep->max_packet_sz_rx) {
1732 		length = rx_count;
1733 		channel->desired_mode = 0;
1734 	} else {
1735 		length = urb->transfer_buffer_length;
1736 	}
1737 #endif
1738 
1739 	/* See comments above on disadvantages of using mode 1 */
1740 	val = musb_readw(epio, MUSB_RXCSR);
1741 	val &= ~MUSB_RXCSR_H_REQPKT;
1742 
1743 	if (channel->desired_mode == 0)
1744 		val &= ~MUSB_RXCSR_H_AUTOREQ;
1745 	else
1746 		val |= MUSB_RXCSR_H_AUTOREQ;
1747 	val |= MUSB_RXCSR_DMAENAB;
1748 
1749 	/* autoclear shouldn't be set in high bandwidth */
1750 	if (qh->hb_mult == 1)
1751 		val |= MUSB_RXCSR_AUTOCLEAR;
1752 
1753 	musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1754 
1755 	/* REVISIT if when actual_length != 0,
1756 	 * transfer_buffer_length needs to be
1757 	 * adjusted first...
1758 	 */
1759 	done = dma->channel_program(channel, qh->maxpacket,
1760 				   channel->desired_mode,
1761 				   buf, length);
1762 
1763 	if (!done) {
1764 		dma->channel_release(channel);
1765 		hw_ep->rx_channel = NULL;
1766 		channel = NULL;
1767 		val = musb_readw(epio, MUSB_RXCSR);
1768 		val &= ~(MUSB_RXCSR_DMAENAB
1769 			 | MUSB_RXCSR_H_AUTOREQ
1770 			 | MUSB_RXCSR_AUTOCLEAR);
1771 		musb_writew(epio, MUSB_RXCSR, val);
1772 	}
1773 
1774 	return done;
1775 }
1776 #else
1777 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1778 					      struct musb_hw_ep *hw_ep,
1779 					      struct musb_qh *qh,
1780 					      struct urb *urb,
1781 					      size_t len)
1782 {
1783 	return false;
1784 }
1785 
1786 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1787 						 struct musb_hw_ep *hw_ep,
1788 						 struct musb_qh *qh,
1789 						 struct urb *urb,
1790 						 size_t len,
1791 						 u8 iso_err)
1792 {
1793 	return false;
1794 }
1795 #endif
1796 
1797 /*
1798  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1799  * and high-bandwidth IN transfer cases.
1800  */
1801 void musb_host_rx(struct musb *musb, u8 epnum)
1802 {
1803 	struct urb		*urb;
1804 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1805 	struct dma_controller	*c = musb->dma_controller;
1806 	void __iomem		*epio = hw_ep->regs;
1807 	struct musb_qh		*qh = hw_ep->in_qh;
1808 	size_t			xfer_len;
1809 	void __iomem		*mbase = musb->mregs;
1810 	int			pipe;
1811 	u16			rx_csr, val;
1812 	bool			iso_err = false;
1813 	bool			done = false;
1814 	u32			status;
1815 	struct dma_channel	*dma;
1816 	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1817 
1818 	musb_ep_select(mbase, epnum);
1819 
1820 	urb = next_urb(qh);
1821 	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1822 	status = 0;
1823 	xfer_len = 0;
1824 
1825 	rx_csr = musb_readw(epio, MUSB_RXCSR);
1826 	val = rx_csr;
1827 
1828 	if (unlikely(!urb)) {
1829 		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1830 		 * usbtest #11 (unlinks) triggers it regularly, sometimes
1831 		 * with fifo full.  (Only with DMA??)
1832 		 */
1833 		musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1834 			epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1835 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1836 		return;
1837 	}
1838 
1839 	pipe = urb->pipe;
1840 
1841 	trace_musb_urb_rx(musb, urb);
1842 
1843 	/* check for errors, concurrent stall & unlink is not really
1844 	 * handled yet! */
1845 	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1846 		musb_dbg(musb, "RX end %d STALL", epnum);
1847 
1848 		/* stall; record URB status */
1849 		status = -EPIPE;
1850 
1851 	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1852 		musb_dbg(musb, "end %d RX proto error", epnum);
1853 
1854 		status = -EPROTO;
1855 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1856 
1857 		rx_csr &= ~MUSB_RXCSR_H_ERROR;
1858 		musb_writew(epio, MUSB_RXCSR, rx_csr);
1859 
1860 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1861 
1862 		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1863 			musb_dbg(musb, "RX end %d NAK timeout", epnum);
1864 
1865 			/* NOTE: NAKing is *NOT* an error, so we want to
1866 			 * continue.  Except ... if there's a request for
1867 			 * another QH, use that instead of starving it.
1868 			 *
1869 			 * Devices like Ethernet and serial adapters keep
1870 			 * reads posted at all times, which will starve
1871 			 * other devices without this logic.
1872 			 */
1873 			if (usb_pipebulk(urb->pipe)
1874 					&& qh->mux == 1
1875 					&& !list_is_singular(&musb->in_bulk)) {
1876 				musb_bulk_nak_timeout(musb, hw_ep, 1);
1877 				return;
1878 			}
1879 			musb_ep_select(mbase, epnum);
1880 			rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1881 			rx_csr &= ~MUSB_RXCSR_DATAERROR;
1882 			musb_writew(epio, MUSB_RXCSR, rx_csr);
1883 
1884 			goto finish;
1885 		} else {
1886 			musb_dbg(musb, "RX end %d ISO data error", epnum);
1887 			/* packet error reported later */
1888 			iso_err = true;
1889 		}
1890 	} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1891 		musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1892 				epnum);
1893 		status = -EPROTO;
1894 	}
1895 
1896 	/* faults abort the transfer */
1897 	if (status) {
1898 		/* clean up dma and collect transfer count */
1899 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1900 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1901 			musb->dma_controller->channel_abort(dma);
1902 			xfer_len = dma->actual_len;
1903 		}
1904 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1905 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1906 		done = true;
1907 		goto finish;
1908 	}
1909 
1910 	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1911 		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1912 		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1913 		goto finish;
1914 	}
1915 
1916 	/* thorough shutdown for now ... given more precise fault handling
1917 	 * and better queueing support, we might keep a DMA pipeline going
1918 	 * while processing this irq for earlier completions.
1919 	 */
1920 
1921 	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1922 	if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1923 	    (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1924 		/* REVISIT this happened for a while on some short reads...
1925 		 * the cleanup still needs investigation... looks bad...
1926 		 * and also duplicates dma cleanup code above ... plus,
1927 		 * shouldn't this be the "half full" double buffer case?
1928 		 */
1929 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1930 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1931 			musb->dma_controller->channel_abort(dma);
1932 			xfer_len = dma->actual_len;
1933 			done = true;
1934 		}
1935 
1936 		musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1937 				xfer_len, dma ? ", dma" : "");
1938 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1939 
1940 		musb_ep_select(mbase, epnum);
1941 		musb_writew(epio, MUSB_RXCSR,
1942 				MUSB_RXCSR_H_WZC_BITS | rx_csr);
1943 	}
1944 
1945 	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1946 		xfer_len = dma->actual_len;
1947 
1948 		val &= ~(MUSB_RXCSR_DMAENAB
1949 			| MUSB_RXCSR_H_AUTOREQ
1950 			| MUSB_RXCSR_AUTOCLEAR
1951 			| MUSB_RXCSR_RXPKTRDY);
1952 		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1953 
1954 		if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1955 		    musb_dma_cppi41(musb)) {
1956 			    done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1957 			    musb_dbg(hw_ep->musb,
1958 				    "ep %d dma %s, rxcsr %04x, rxcount %d",
1959 				    epnum, done ? "off" : "reset",
1960 				    musb_readw(epio, MUSB_RXCSR),
1961 				    musb_readw(epio, MUSB_RXCOUNT));
1962 		} else {
1963 			done = true;
1964 		}
1965 
1966 	} else if (urb->status == -EINPROGRESS) {
1967 		/* if no errors, be sure a packet is ready for unloading */
1968 		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1969 			status = -EPROTO;
1970 			ERR("Rx interrupt with no errors or packet!\n");
1971 
1972 			/* FIXME this is another "SHOULD NEVER HAPPEN" */
1973 
1974 /* SCRUB (RX) */
1975 			/* do the proper sequence to abort the transfer */
1976 			musb_ep_select(mbase, epnum);
1977 			val &= ~MUSB_RXCSR_H_REQPKT;
1978 			musb_writew(epio, MUSB_RXCSR, val);
1979 			goto finish;
1980 		}
1981 
1982 		/* we are expecting IN packets */
1983 		if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1984 		    musb_dma_cppi41(musb)) && dma) {
1985 			musb_dbg(hw_ep->musb,
1986 				"RX%d count %d, buffer 0x%llx len %d/%d",
1987 				epnum, musb_readw(epio, MUSB_RXCOUNT),
1988 				(unsigned long long) urb->transfer_dma
1989 				+ urb->actual_length,
1990 				qh->offset,
1991 				urb->transfer_buffer_length);
1992 
1993 			if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1994 							   xfer_len, iso_err))
1995 				goto finish;
1996 			else
1997 				dev_err(musb->controller, "error: rx_dma failed\n");
1998 		}
1999 
2000 		if (!dma) {
2001 			unsigned int received_len;
2002 
2003 			/* Unmap the buffer so that CPU can use it */
2004 			usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
2005 
2006 			/*
2007 			 * We need to map sg if the transfer_buffer is
2008 			 * NULL.
2009 			 */
2010 			if (!urb->transfer_buffer) {
2011 				qh->use_sg = true;
2012 				sg_miter_start(&qh->sg_miter, urb->sg, 1,
2013 						sg_flags);
2014 			}
2015 
2016 			if (qh->use_sg) {
2017 				if (!sg_miter_next(&qh->sg_miter)) {
2018 					dev_err(musb->controller, "error: sg list empty\n");
2019 					sg_miter_stop(&qh->sg_miter);
2020 					status = -EINVAL;
2021 					done = true;
2022 					goto finish;
2023 				}
2024 				urb->transfer_buffer = qh->sg_miter.addr;
2025 				received_len = urb->actual_length;
2026 				qh->offset = 0x0;
2027 				done = musb_host_packet_rx(musb, urb, epnum,
2028 						iso_err);
2029 				/* Calculate the number of bytes received */
2030 				received_len = urb->actual_length -
2031 					received_len;
2032 				qh->sg_miter.consumed = received_len;
2033 				sg_miter_stop(&qh->sg_miter);
2034 			} else {
2035 				done = musb_host_packet_rx(musb, urb,
2036 						epnum, iso_err);
2037 			}
2038 			musb_dbg(musb, "read %spacket", done ? "last " : "");
2039 		}
2040 	}
2041 
2042 finish:
2043 	urb->actual_length += xfer_len;
2044 	qh->offset += xfer_len;
2045 	if (done) {
2046 		if (qh->use_sg)
2047 			qh->use_sg = false;
2048 
2049 		if (urb->status == -EINPROGRESS)
2050 			urb->status = status;
2051 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2052 	}
2053 }
2054 
2055 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2056  * the software schedule associates multiple such nodes with a given
2057  * host side hardware endpoint + direction; scheduling may activate
2058  * that hardware endpoint.
2059  */
2060 static int musb_schedule(
2061 	struct musb		*musb,
2062 	struct musb_qh		*qh,
2063 	int			is_in)
2064 {
2065 	int			idle = 0;
2066 	int			best_diff;
2067 	int			best_end, epnum;
2068 	struct musb_hw_ep	*hw_ep = NULL;
2069 	struct list_head	*head = NULL;
2070 	u8			toggle;
2071 	u8			txtype;
2072 	struct urb		*urb = next_urb(qh);
2073 
2074 	/* use fixed hardware for control and bulk */
2075 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2076 		head = &musb->control;
2077 		hw_ep = musb->control_ep;
2078 		goto success;
2079 	}
2080 
2081 	/* else, periodic transfers get muxed to other endpoints */
2082 
2083 	/*
2084 	 * We know this qh hasn't been scheduled, so all we need to do
2085 	 * is choose which hardware endpoint to put it on ...
2086 	 *
2087 	 * REVISIT what we really want here is a regular schedule tree
2088 	 * like e.g. OHCI uses.
2089 	 */
2090 	best_diff = 4096;
2091 	best_end = -1;
2092 
2093 	for (epnum = 1, hw_ep = musb->endpoints + 1;
2094 			epnum < musb->nr_endpoints;
2095 			epnum++, hw_ep++) {
2096 		int	diff;
2097 
2098 		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2099 			continue;
2100 
2101 		if (hw_ep == musb->bulk_ep)
2102 			continue;
2103 
2104 		if (is_in)
2105 			diff = hw_ep->max_packet_sz_rx;
2106 		else
2107 			diff = hw_ep->max_packet_sz_tx;
2108 		diff -= (qh->maxpacket * qh->hb_mult);
2109 
2110 		if (diff >= 0 && best_diff > diff) {
2111 
2112 			/*
2113 			 * Mentor controller has a bug in that if we schedule
2114 			 * a BULK Tx transfer on an endpoint that had earlier
2115 			 * handled ISOC then the BULK transfer has to start on
2116 			 * a zero toggle.  If the BULK transfer starts on a 1
2117 			 * toggle then this transfer will fail as the mentor
2118 			 * controller starts the Bulk transfer on a 0 toggle
2119 			 * irrespective of the programming of the toggle bits
2120 			 * in the TXCSR register.  Check for this condition
2121 			 * while allocating the EP for a Tx Bulk transfer.  If
2122 			 * so skip this EP.
2123 			 */
2124 			hw_ep = musb->endpoints + epnum;
2125 			toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2126 			txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2127 					>> 4) & 0x3;
2128 			if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2129 				toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2130 				continue;
2131 
2132 			best_diff = diff;
2133 			best_end = epnum;
2134 		}
2135 	}
2136 	/* use bulk reserved ep1 if no other ep is free */
2137 	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2138 		hw_ep = musb->bulk_ep;
2139 		if (is_in)
2140 			head = &musb->in_bulk;
2141 		else
2142 			head = &musb->out_bulk;
2143 
2144 		/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2145 		 * multiplexed. This scheme does not work in high speed to full
2146 		 * speed scenario as NAK interrupts are not coming from a
2147 		 * full speed device connected to a high speed device.
2148 		 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2149 		 * 4 (8 frame or 8ms) for FS device.
2150 		 */
2151 		if (qh->dev)
2152 			qh->intv_reg =
2153 				(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2154 		goto success;
2155 	} else if (best_end < 0) {
2156 		return -ENOSPC;
2157 	}
2158 
2159 	idle = 1;
2160 	qh->mux = 0;
2161 	hw_ep = musb->endpoints + best_end;
2162 	musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2163 success:
2164 	if (head) {
2165 		idle = list_empty(head);
2166 		list_add_tail(&qh->ring, head);
2167 		qh->mux = 1;
2168 	}
2169 	qh->hw_ep = hw_ep;
2170 	qh->hep->hcpriv = qh;
2171 	if (idle)
2172 		musb_start_urb(musb, is_in, qh);
2173 	return 0;
2174 }
2175 
2176 static int musb_urb_enqueue(
2177 	struct usb_hcd			*hcd,
2178 	struct urb			*urb,
2179 	gfp_t				mem_flags)
2180 {
2181 	unsigned long			flags;
2182 	struct musb			*musb = hcd_to_musb(hcd);
2183 	struct usb_host_endpoint	*hep = urb->ep;
2184 	struct musb_qh			*qh;
2185 	struct usb_endpoint_descriptor	*epd = &hep->desc;
2186 	int				ret;
2187 	unsigned			type_reg;
2188 	unsigned			interval;
2189 
2190 	/* host role must be active */
2191 	if (!is_host_active(musb) || !musb->is_active)
2192 		return -ENODEV;
2193 
2194 	trace_musb_urb_enq(musb, urb);
2195 
2196 	spin_lock_irqsave(&musb->lock, flags);
2197 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
2198 	qh = ret ? NULL : hep->hcpriv;
2199 	if (qh)
2200 		urb->hcpriv = qh;
2201 	spin_unlock_irqrestore(&musb->lock, flags);
2202 
2203 	/* DMA mapping was already done, if needed, and this urb is on
2204 	 * hep->urb_list now ... so we're done, unless hep wasn't yet
2205 	 * scheduled onto a live qh.
2206 	 *
2207 	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2208 	 * disabled, testing for empty qh->ring and avoiding qh setup costs
2209 	 * except for the first urb queued after a config change.
2210 	 */
2211 	if (qh || ret)
2212 		return ret;
2213 
2214 	/* Allocate and initialize qh, minimizing the work done each time
2215 	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
2216 	 *
2217 	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2218 	 * for bugs in other kernel code to break this driver...
2219 	 */
2220 	qh = kzalloc(sizeof *qh, mem_flags);
2221 	if (!qh) {
2222 		spin_lock_irqsave(&musb->lock, flags);
2223 		usb_hcd_unlink_urb_from_ep(hcd, urb);
2224 		spin_unlock_irqrestore(&musb->lock, flags);
2225 		return -ENOMEM;
2226 	}
2227 
2228 	qh->hep = hep;
2229 	qh->dev = urb->dev;
2230 	INIT_LIST_HEAD(&qh->ring);
2231 	qh->is_ready = 1;
2232 
2233 	qh->maxpacket = usb_endpoint_maxp(epd);
2234 	qh->type = usb_endpoint_type(epd);
2235 
2236 	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2237 	 * Some musb cores don't support high bandwidth ISO transfers; and
2238 	 * we don't (yet!) support high bandwidth interrupt transfers.
2239 	 */
2240 	qh->hb_mult = usb_endpoint_maxp_mult(epd);
2241 	if (qh->hb_mult > 1) {
2242 		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2243 
2244 		if (ok)
2245 			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2246 				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2247 		if (!ok) {
2248 			ret = -EMSGSIZE;
2249 			goto done;
2250 		}
2251 		qh->maxpacket &= 0x7ff;
2252 	}
2253 
2254 	qh->epnum = usb_endpoint_num(epd);
2255 
2256 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2257 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2258 
2259 	/* precompute rxtype/txtype/type0 register */
2260 	type_reg = (qh->type << 4) | qh->epnum;
2261 	switch (urb->dev->speed) {
2262 	case USB_SPEED_LOW:
2263 		type_reg |= 0xc0;
2264 		break;
2265 	case USB_SPEED_FULL:
2266 		type_reg |= 0x80;
2267 		break;
2268 	default:
2269 		type_reg |= 0x40;
2270 	}
2271 	qh->type_reg = type_reg;
2272 
2273 	/* Precompute RXINTERVAL/TXINTERVAL register */
2274 	switch (qh->type) {
2275 	case USB_ENDPOINT_XFER_INT:
2276 		/*
2277 		 * Full/low speeds use the  linear encoding,
2278 		 * high speed uses the logarithmic encoding.
2279 		 */
2280 		if (urb->dev->speed <= USB_SPEED_FULL) {
2281 			interval = max_t(u8, epd->bInterval, 1);
2282 			break;
2283 		}
2284 		/* FALLTHROUGH */
2285 	case USB_ENDPOINT_XFER_ISOC:
2286 		/* ISO always uses logarithmic encoding */
2287 		interval = min_t(u8, epd->bInterval, 16);
2288 		break;
2289 	default:
2290 		/* REVISIT we actually want to use NAK limits, hinting to the
2291 		 * transfer scheduling logic to try some other qh, e.g. try
2292 		 * for 2 msec first:
2293 		 *
2294 		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2295 		 *
2296 		 * The downside of disabling this is that transfer scheduling
2297 		 * gets VERY unfair for nonperiodic transfers; a misbehaving
2298 		 * peripheral could make that hurt.  That's perfectly normal
2299 		 * for reads from network or serial adapters ... so we have
2300 		 * partial NAKlimit support for bulk RX.
2301 		 *
2302 		 * The upside of disabling it is simpler transfer scheduling.
2303 		 */
2304 		interval = 0;
2305 	}
2306 	qh->intv_reg = interval;
2307 
2308 	/* precompute addressing for external hub/tt ports */
2309 	if (musb->is_multipoint) {
2310 		struct usb_device	*parent = urb->dev->parent;
2311 
2312 		if (parent != hcd->self.root_hub) {
2313 			qh->h_addr_reg = (u8) parent->devnum;
2314 
2315 			/* set up tt info if needed */
2316 			if (urb->dev->tt) {
2317 				qh->h_port_reg = (u8) urb->dev->ttport;
2318 				if (urb->dev->tt->hub)
2319 					qh->h_addr_reg =
2320 						(u8) urb->dev->tt->hub->devnum;
2321 				if (urb->dev->tt->multi)
2322 					qh->h_addr_reg |= 0x80;
2323 			}
2324 		}
2325 	}
2326 
2327 	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2328 	 * until we get real dma queues (with an entry for each urb/buffer),
2329 	 * we only have work to do in the former case.
2330 	 */
2331 	spin_lock_irqsave(&musb->lock, flags);
2332 	if (hep->hcpriv || !next_urb(qh)) {
2333 		/* some concurrent activity submitted another urb to hep...
2334 		 * odd, rare, error prone, but legal.
2335 		 */
2336 		kfree(qh);
2337 		qh = NULL;
2338 		ret = 0;
2339 	} else
2340 		ret = musb_schedule(musb, qh,
2341 				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2342 
2343 	if (ret == 0) {
2344 		urb->hcpriv = qh;
2345 		/* FIXME set urb->start_frame for iso/intr, it's tested in
2346 		 * musb_start_urb(), but otherwise only konicawc cares ...
2347 		 */
2348 	}
2349 	spin_unlock_irqrestore(&musb->lock, flags);
2350 
2351 done:
2352 	if (ret != 0) {
2353 		spin_lock_irqsave(&musb->lock, flags);
2354 		usb_hcd_unlink_urb_from_ep(hcd, urb);
2355 		spin_unlock_irqrestore(&musb->lock, flags);
2356 		kfree(qh);
2357 	}
2358 	return ret;
2359 }
2360 
2361 
2362 /*
2363  * abort a transfer that's at the head of a hardware queue.
2364  * called with controller locked, irqs blocked
2365  * that hardware queue advances to the next transfer, unless prevented
2366  */
2367 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2368 {
2369 	struct musb_hw_ep	*ep = qh->hw_ep;
2370 	struct musb		*musb = ep->musb;
2371 	void __iomem		*epio = ep->regs;
2372 	unsigned		hw_end = ep->epnum;
2373 	void __iomem		*regs = ep->musb->mregs;
2374 	int			is_in = usb_pipein(urb->pipe);
2375 	int			status = 0;
2376 	u16			csr;
2377 	struct dma_channel	*dma = NULL;
2378 
2379 	musb_ep_select(regs, hw_end);
2380 
2381 	if (is_dma_capable()) {
2382 		dma = is_in ? ep->rx_channel : ep->tx_channel;
2383 		if (dma) {
2384 			status = ep->musb->dma_controller->channel_abort(dma);
2385 			musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2386 				is_in ? 'R' : 'T', ep->epnum,
2387 				urb, status);
2388 			urb->actual_length += dma->actual_len;
2389 		}
2390 	}
2391 
2392 	/* turn off DMA requests, discard state, stop polling ... */
2393 	if (ep->epnum && is_in) {
2394 		/* giveback saves bulk toggle */
2395 		csr = musb_h_flush_rxfifo(ep, 0);
2396 
2397 		/* clear the endpoint's irq status here to avoid bogus irqs */
2398 		if (is_dma_capable() && dma)
2399 			musb_platform_clear_ep_rxintr(musb, ep->epnum);
2400 	} else if (ep->epnum) {
2401 		musb_h_tx_flush_fifo(ep);
2402 		csr = musb_readw(epio, MUSB_TXCSR);
2403 		csr &= ~(MUSB_TXCSR_AUTOSET
2404 			| MUSB_TXCSR_DMAENAB
2405 			| MUSB_TXCSR_H_RXSTALL
2406 			| MUSB_TXCSR_H_NAKTIMEOUT
2407 			| MUSB_TXCSR_H_ERROR
2408 			| MUSB_TXCSR_TXPKTRDY);
2409 		musb_writew(epio, MUSB_TXCSR, csr);
2410 		/* REVISIT may need to clear FLUSHFIFO ... */
2411 		musb_writew(epio, MUSB_TXCSR, csr);
2412 		/* flush cpu writebuffer */
2413 		csr = musb_readw(epio, MUSB_TXCSR);
2414 	} else  {
2415 		musb_h_ep0_flush_fifo(ep);
2416 	}
2417 	if (status == 0)
2418 		musb_advance_schedule(ep->musb, urb, ep, is_in);
2419 	return status;
2420 }
2421 
2422 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2423 {
2424 	struct musb		*musb = hcd_to_musb(hcd);
2425 	struct musb_qh		*qh;
2426 	unsigned long		flags;
2427 	int			is_in  = usb_pipein(urb->pipe);
2428 	int			ret;
2429 
2430 	trace_musb_urb_deq(musb, urb);
2431 
2432 	spin_lock_irqsave(&musb->lock, flags);
2433 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2434 	if (ret)
2435 		goto done;
2436 
2437 	qh = urb->hcpriv;
2438 	if (!qh)
2439 		goto done;
2440 
2441 	/*
2442 	 * Any URB not actively programmed into endpoint hardware can be
2443 	 * immediately given back; that's any URB not at the head of an
2444 	 * endpoint queue, unless someday we get real DMA queues.  And even
2445 	 * if it's at the head, it might not be known to the hardware...
2446 	 *
2447 	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2448 	 * has already been updated.  This is a synchronous abort; it'd be
2449 	 * OK to hold off until after some IRQ, though.
2450 	 *
2451 	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2452 	 */
2453 	if (!qh->is_ready
2454 			|| urb->urb_list.prev != &qh->hep->urb_list
2455 			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2456 		int	ready = qh->is_ready;
2457 
2458 		qh->is_ready = 0;
2459 		musb_giveback(musb, urb, 0);
2460 		qh->is_ready = ready;
2461 
2462 		/* If nothing else (usually musb_giveback) is using it
2463 		 * and its URB list has emptied, recycle this qh.
2464 		 */
2465 		if (ready && list_empty(&qh->hep->urb_list)) {
2466 			qh->hep->hcpriv = NULL;
2467 			list_del(&qh->ring);
2468 			kfree(qh);
2469 		}
2470 	} else
2471 		ret = musb_cleanup_urb(urb, qh);
2472 done:
2473 	spin_unlock_irqrestore(&musb->lock, flags);
2474 	return ret;
2475 }
2476 
2477 /* disable an endpoint */
2478 static void
2479 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2480 {
2481 	u8			is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2482 	unsigned long		flags;
2483 	struct musb		*musb = hcd_to_musb(hcd);
2484 	struct musb_qh		*qh;
2485 	struct urb		*urb;
2486 
2487 	spin_lock_irqsave(&musb->lock, flags);
2488 
2489 	qh = hep->hcpriv;
2490 	if (qh == NULL)
2491 		goto exit;
2492 
2493 	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2494 
2495 	/* Kick the first URB off the hardware, if needed */
2496 	qh->is_ready = 0;
2497 	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2498 		urb = next_urb(qh);
2499 
2500 		/* make software (then hardware) stop ASAP */
2501 		if (!urb->unlinked)
2502 			urb->status = -ESHUTDOWN;
2503 
2504 		/* cleanup */
2505 		musb_cleanup_urb(urb, qh);
2506 
2507 		/* Then nuke all the others ... and advance the
2508 		 * queue on hw_ep (e.g. bulk ring) when we're done.
2509 		 */
2510 		while (!list_empty(&hep->urb_list)) {
2511 			urb = next_urb(qh);
2512 			urb->status = -ESHUTDOWN;
2513 			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2514 		}
2515 	} else {
2516 		/* Just empty the queue; the hardware is busy with
2517 		 * other transfers, and since !qh->is_ready nothing
2518 		 * will activate any of these as it advances.
2519 		 */
2520 		while (!list_empty(&hep->urb_list))
2521 			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2522 
2523 		hep->hcpriv = NULL;
2524 		list_del(&qh->ring);
2525 		kfree(qh);
2526 	}
2527 exit:
2528 	spin_unlock_irqrestore(&musb->lock, flags);
2529 }
2530 
2531 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2532 {
2533 	struct musb	*musb = hcd_to_musb(hcd);
2534 
2535 	return musb_readw(musb->mregs, MUSB_FRAME);
2536 }
2537 
2538 static int musb_h_start(struct usb_hcd *hcd)
2539 {
2540 	struct musb	*musb = hcd_to_musb(hcd);
2541 
2542 	/* NOTE: musb_start() is called when the hub driver turns
2543 	 * on port power, or when (OTG) peripheral starts.
2544 	 */
2545 	hcd->state = HC_STATE_RUNNING;
2546 	musb->port1_status = 0;
2547 	return 0;
2548 }
2549 
2550 static void musb_h_stop(struct usb_hcd *hcd)
2551 {
2552 	musb_stop(hcd_to_musb(hcd));
2553 	hcd->state = HC_STATE_HALT;
2554 }
2555 
2556 static int musb_bus_suspend(struct usb_hcd *hcd)
2557 {
2558 	struct musb	*musb = hcd_to_musb(hcd);
2559 	u8		devctl;
2560 
2561 	musb_port_suspend(musb, true);
2562 
2563 	if (!is_host_active(musb))
2564 		return 0;
2565 
2566 	switch (musb->xceiv->otg->state) {
2567 	case OTG_STATE_A_SUSPEND:
2568 		return 0;
2569 	case OTG_STATE_A_WAIT_VRISE:
2570 		/* ID could be grounded even if there's no device
2571 		 * on the other end of the cable.  NOTE that the
2572 		 * A_WAIT_VRISE timers are messy with MUSB...
2573 		 */
2574 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2575 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2576 			musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2577 		break;
2578 	default:
2579 		break;
2580 	}
2581 
2582 	if (musb->is_active) {
2583 		WARNING("trying to suspend as %s while active\n",
2584 				usb_otg_state_string(musb->xceiv->otg->state));
2585 		return -EBUSY;
2586 	} else
2587 		return 0;
2588 }
2589 
2590 static int musb_bus_resume(struct usb_hcd *hcd)
2591 {
2592 	struct musb *musb = hcd_to_musb(hcd);
2593 
2594 	if (musb->config &&
2595 	    musb->config->host_port_deassert_reset_at_resume)
2596 		musb_port_reset(musb, false);
2597 
2598 	return 0;
2599 }
2600 
2601 #ifndef CONFIG_MUSB_PIO_ONLY
2602 
2603 #define MUSB_USB_DMA_ALIGN 4
2604 
2605 struct musb_temp_buffer {
2606 	void *kmalloc_ptr;
2607 	void *old_xfer_buffer;
2608 	u8 data[0];
2609 };
2610 
2611 static void musb_free_temp_buffer(struct urb *urb)
2612 {
2613 	enum dma_data_direction dir;
2614 	struct musb_temp_buffer *temp;
2615 	size_t length;
2616 
2617 	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2618 		return;
2619 
2620 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2621 
2622 	temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2623 			    data);
2624 
2625 	if (dir == DMA_FROM_DEVICE) {
2626 		if (usb_pipeisoc(urb->pipe))
2627 			length = urb->transfer_buffer_length;
2628 		else
2629 			length = urb->actual_length;
2630 
2631 		memcpy(temp->old_xfer_buffer, temp->data, length);
2632 	}
2633 	urb->transfer_buffer = temp->old_xfer_buffer;
2634 	kfree(temp->kmalloc_ptr);
2635 
2636 	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2637 }
2638 
2639 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2640 {
2641 	enum dma_data_direction dir;
2642 	struct musb_temp_buffer *temp;
2643 	void *kmalloc_ptr;
2644 	size_t kmalloc_size;
2645 
2646 	if (urb->num_sgs || urb->sg ||
2647 	    urb->transfer_buffer_length == 0 ||
2648 	    !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2649 		return 0;
2650 
2651 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2652 
2653 	/* Allocate a buffer with enough padding for alignment */
2654 	kmalloc_size = urb->transfer_buffer_length +
2655 		sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2656 
2657 	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2658 	if (!kmalloc_ptr)
2659 		return -ENOMEM;
2660 
2661 	/* Position our struct temp_buffer such that data is aligned */
2662 	temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2663 
2664 
2665 	temp->kmalloc_ptr = kmalloc_ptr;
2666 	temp->old_xfer_buffer = urb->transfer_buffer;
2667 	if (dir == DMA_TO_DEVICE)
2668 		memcpy(temp->data, urb->transfer_buffer,
2669 		       urb->transfer_buffer_length);
2670 	urb->transfer_buffer = temp->data;
2671 
2672 	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2673 
2674 	return 0;
2675 }
2676 
2677 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2678 				      gfp_t mem_flags)
2679 {
2680 	struct musb	*musb = hcd_to_musb(hcd);
2681 	int ret;
2682 
2683 	/*
2684 	 * The DMA engine in RTL1.8 and above cannot handle
2685 	 * DMA addresses that are not aligned to a 4 byte boundary.
2686 	 * For such engine implemented (un)map_urb_for_dma hooks.
2687 	 * Do not use these hooks for RTL<1.8
2688 	 */
2689 	if (musb->hwvers < MUSB_HWVERS_1800)
2690 		return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2691 
2692 	ret = musb_alloc_temp_buffer(urb, mem_flags);
2693 	if (ret)
2694 		return ret;
2695 
2696 	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2697 	if (ret)
2698 		musb_free_temp_buffer(urb);
2699 
2700 	return ret;
2701 }
2702 
2703 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2704 {
2705 	struct musb	*musb = hcd_to_musb(hcd);
2706 
2707 	usb_hcd_unmap_urb_for_dma(hcd, urb);
2708 
2709 	/* Do not use this hook for RTL<1.8 (see description above) */
2710 	if (musb->hwvers < MUSB_HWVERS_1800)
2711 		return;
2712 
2713 	musb_free_temp_buffer(urb);
2714 }
2715 #endif /* !CONFIG_MUSB_PIO_ONLY */
2716 
2717 static const struct hc_driver musb_hc_driver = {
2718 	.description		= "musb-hcd",
2719 	.product_desc		= "MUSB HDRC host driver",
2720 	.hcd_priv_size		= sizeof(struct musb *),
2721 	.flags			= HCD_USB2 | HCD_MEMORY,
2722 
2723 	/* not using irq handler or reset hooks from usbcore, since
2724 	 * those must be shared with peripheral code for OTG configs
2725 	 */
2726 
2727 	.start			= musb_h_start,
2728 	.stop			= musb_h_stop,
2729 
2730 	.get_frame_number	= musb_h_get_frame_number,
2731 
2732 	.urb_enqueue		= musb_urb_enqueue,
2733 	.urb_dequeue		= musb_urb_dequeue,
2734 	.endpoint_disable	= musb_h_disable,
2735 
2736 #ifndef CONFIG_MUSB_PIO_ONLY
2737 	.map_urb_for_dma	= musb_map_urb_for_dma,
2738 	.unmap_urb_for_dma	= musb_unmap_urb_for_dma,
2739 #endif
2740 
2741 	.hub_status_data	= musb_hub_status_data,
2742 	.hub_control		= musb_hub_control,
2743 	.bus_suspend		= musb_bus_suspend,
2744 	.bus_resume		= musb_bus_resume,
2745 	/* .start_port_reset	= NULL, */
2746 	/* .hub_irq_enable	= NULL, */
2747 };
2748 
2749 int musb_host_alloc(struct musb *musb)
2750 {
2751 	struct device	*dev = musb->controller;
2752 
2753 	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2754 	musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2755 	if (!musb->hcd)
2756 		return -EINVAL;
2757 
2758 	*musb->hcd->hcd_priv = (unsigned long) musb;
2759 	musb->hcd->self.uses_pio_for_control = 1;
2760 	musb->hcd->uses_new_polling = 1;
2761 	musb->hcd->has_tt = 1;
2762 
2763 	return 0;
2764 }
2765 
2766 void musb_host_cleanup(struct musb *musb)
2767 {
2768 	if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2769 		return;
2770 	usb_remove_hcd(musb->hcd);
2771 }
2772 
2773 void musb_host_free(struct musb *musb)
2774 {
2775 	usb_put_hcd(musb->hcd);
2776 }
2777 
2778 int musb_host_setup(struct musb *musb, int power_budget)
2779 {
2780 	int ret;
2781 	struct usb_hcd *hcd = musb->hcd;
2782 
2783 	if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2784 		MUSB_HST_MODE(musb);
2785 		musb->xceiv->otg->default_a = 1;
2786 		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2787 	}
2788 	otg_set_host(musb->xceiv->otg, &hcd->self);
2789 	hcd->self.otg_port = 1;
2790 	musb->xceiv->otg->host = &hcd->self;
2791 	hcd->power_budget = 2 * (power_budget ? : 250);
2792 
2793 	ret = usb_add_hcd(hcd, 0, 0);
2794 	if (ret < 0)
2795 		return ret;
2796 
2797 	device_wakeup_enable(hcd->self.controller);
2798 	return 0;
2799 }
2800 
2801 void musb_host_resume_root_hub(struct musb *musb)
2802 {
2803 	usb_hcd_resume_root_hub(musb->hcd);
2804 }
2805 
2806 void musb_host_poke_root_hub(struct musb *musb)
2807 {
2808 	MUSB_HST_MODE(musb);
2809 	if (musb->hcd->status_urb)
2810 		usb_hcd_poll_rh_status(musb->hcd);
2811 	else
2812 		usb_hcd_resume_root_hub(musb->hcd);
2813 }
2814