xref: /openbmc/linux/drivers/usb/musb/musb_core.c (revision 6dfcd296)
1 /*
2  * MUSB OTG driver core code
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
25  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 /*
36  * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
37  *
38  * This consists of a Host Controller Driver (HCD) and a peripheral
39  * controller driver implementing the "Gadget" API; OTG support is
40  * in the works.  These are normal Linux-USB controller drivers which
41  * use IRQs and have no dedicated thread.
42  *
43  * This version of the driver has only been used with products from
44  * Texas Instruments.  Those products integrate the Inventra logic
45  * with other DMA, IRQ, and bus modules, as well as other logic that
46  * needs to be reflected in this driver.
47  *
48  *
49  * NOTE:  the original Mentor code here was pretty much a collection
50  * of mechanisms that don't seem to have been fully integrated/working
51  * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
52  * Key open issues include:
53  *
54  *  - Lack of host-side transaction scheduling, for all transfer types.
55  *    The hardware doesn't do it; instead, software must.
56  *
57  *    This is not an issue for OTG devices that don't support external
58  *    hubs, but for more "normal" USB hosts it's a user issue that the
59  *    "multipoint" support doesn't scale in the expected ways.  That
60  *    includes DaVinci EVM in a common non-OTG mode.
61  *
62  *      * Control and bulk use dedicated endpoints, and there's as
63  *        yet no mechanism to either (a) reclaim the hardware when
64  *        peripherals are NAKing, which gets complicated with bulk
65  *        endpoints, or (b) use more than a single bulk endpoint in
66  *        each direction.
67  *
68  *        RESULT:  one device may be perceived as blocking another one.
69  *
70  *      * Interrupt and isochronous will dynamically allocate endpoint
71  *        hardware, but (a) there's no record keeping for bandwidth;
72  *        (b) in the common case that few endpoints are available, there
73  *        is no mechanism to reuse endpoints to talk to multiple devices.
74  *
75  *        RESULT:  At one extreme, bandwidth can be overcommitted in
76  *        some hardware configurations, no faults will be reported.
77  *        At the other extreme, the bandwidth capabilities which do
78  *        exist tend to be severely undercommitted.  You can't yet hook
79  *        up both a keyboard and a mouse to an external USB hub.
80  */
81 
82 /*
83  * This gets many kinds of configuration information:
84  *	- Kconfig for everything user-configurable
85  *	- platform_device for addressing, irq, and platform_data
86  *	- platform_data is mostly for board-specific information
87  *	  (plus recentrly, SOC or family details)
88  *
89  * Most of the conditional compilation will (someday) vanish.
90  */
91 
92 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/sched.h>
95 #include <linux/slab.h>
96 #include <linux/list.h>
97 #include <linux/kobject.h>
98 #include <linux/prefetch.h>
99 #include <linux/platform_device.h>
100 #include <linux/io.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/usb.h>
103 
104 #include "musb_core.h"
105 #include "musb_trace.h"
106 
107 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
108 
109 
110 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
111 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
112 
113 #define MUSB_VERSION "6.0"
114 
115 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
116 
117 #define MUSB_DRIVER_NAME "musb-hdrc"
118 const char musb_driver_name[] = MUSB_DRIVER_NAME;
119 
120 MODULE_DESCRIPTION(DRIVER_INFO);
121 MODULE_AUTHOR(DRIVER_AUTHOR);
122 MODULE_LICENSE("GPL");
123 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
124 
125 
126 /*-------------------------------------------------------------------------*/
127 
128 static inline struct musb *dev_to_musb(struct device *dev)
129 {
130 	return dev_get_drvdata(dev);
131 }
132 
133 /*-------------------------------------------------------------------------*/
134 
135 #ifndef CONFIG_BLACKFIN
136 static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
137 {
138 	void __iomem *addr = phy->io_priv;
139 	int	i = 0;
140 	u8	r;
141 	u8	power;
142 	int	ret;
143 
144 	pm_runtime_get_sync(phy->io_dev);
145 
146 	/* Make sure the transceiver is not in low power mode */
147 	power = musb_readb(addr, MUSB_POWER);
148 	power &= ~MUSB_POWER_SUSPENDM;
149 	musb_writeb(addr, MUSB_POWER, power);
150 
151 	/* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
152 	 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
153 	 */
154 
155 	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
156 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
157 			MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
158 
159 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
160 				& MUSB_ULPI_REG_CMPLT)) {
161 		i++;
162 		if (i == 10000) {
163 			ret = -ETIMEDOUT;
164 			goto out;
165 		}
166 
167 	}
168 	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
169 	r &= ~MUSB_ULPI_REG_CMPLT;
170 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
171 
172 	ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
173 
174 out:
175 	pm_runtime_put(phy->io_dev);
176 
177 	return ret;
178 }
179 
180 static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
181 {
182 	void __iomem *addr = phy->io_priv;
183 	int	i = 0;
184 	u8	r = 0;
185 	u8	power;
186 	int	ret = 0;
187 
188 	pm_runtime_get_sync(phy->io_dev);
189 
190 	/* Make sure the transceiver is not in low power mode */
191 	power = musb_readb(addr, MUSB_POWER);
192 	power &= ~MUSB_POWER_SUSPENDM;
193 	musb_writeb(addr, MUSB_POWER, power);
194 
195 	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
196 	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
197 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
198 
199 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
200 				& MUSB_ULPI_REG_CMPLT)) {
201 		i++;
202 		if (i == 10000) {
203 			ret = -ETIMEDOUT;
204 			goto out;
205 		}
206 	}
207 
208 	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
209 	r &= ~MUSB_ULPI_REG_CMPLT;
210 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
211 
212 out:
213 	pm_runtime_put(phy->io_dev);
214 
215 	return ret;
216 }
217 #else
218 #define musb_ulpi_read		NULL
219 #define musb_ulpi_write		NULL
220 #endif
221 
222 static struct usb_phy_io_ops musb_ulpi_access = {
223 	.read = musb_ulpi_read,
224 	.write = musb_ulpi_write,
225 };
226 
227 /*-------------------------------------------------------------------------*/
228 
229 static u32 musb_default_fifo_offset(u8 epnum)
230 {
231 	return 0x20 + (epnum * 4);
232 }
233 
234 /* "flat" mapping: each endpoint has its own i/o address */
235 static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
236 {
237 }
238 
239 static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
240 {
241 	return 0x100 + (0x10 * epnum) + offset;
242 }
243 
244 /* "indexed" mapping: INDEX register controls register bank select */
245 static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
246 {
247 	musb_writeb(mbase, MUSB_INDEX, epnum);
248 }
249 
250 static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
251 {
252 	return 0x10 + offset;
253 }
254 
255 static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
256 {
257 	return 0x80 + (0x08 * epnum) + offset;
258 }
259 
260 static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
261 {
262 	u8 data =  __raw_readb(addr + offset);
263 
264 	trace_musb_readb(__builtin_return_address(0), addr, offset, data);
265 	return data;
266 }
267 
268 static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
269 {
270 	trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
271 	__raw_writeb(data, addr + offset);
272 }
273 
274 static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
275 {
276 	u16 data = __raw_readw(addr + offset);
277 
278 	trace_musb_readw(__builtin_return_address(0), addr, offset, data);
279 	return data;
280 }
281 
282 static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
283 {
284 	trace_musb_writew(__builtin_return_address(0), addr, offset, data);
285 	__raw_writew(data, addr + offset);
286 }
287 
288 static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
289 {
290 	u32 data = __raw_readl(addr + offset);
291 
292 	trace_musb_readl(__builtin_return_address(0), addr, offset, data);
293 	return data;
294 }
295 
296 static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
297 {
298 	trace_musb_writel(__builtin_return_address(0), addr, offset, data);
299 	__raw_writel(data, addr + offset);
300 }
301 
302 /*
303  * Load an endpoint's FIFO
304  */
305 static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
306 				    const u8 *src)
307 {
308 	struct musb *musb = hw_ep->musb;
309 	void __iomem *fifo = hw_ep->fifo;
310 
311 	if (unlikely(len == 0))
312 		return;
313 
314 	prefetch((u8 *)src);
315 
316 	dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
317 			'T', hw_ep->epnum, fifo, len, src);
318 
319 	/* we can't assume unaligned reads work */
320 	if (likely((0x01 & (unsigned long) src) == 0)) {
321 		u16	index = 0;
322 
323 		/* best case is 32bit-aligned source address */
324 		if ((0x02 & (unsigned long) src) == 0) {
325 			if (len >= 4) {
326 				iowrite32_rep(fifo, src + index, len >> 2);
327 				index += len & ~0x03;
328 			}
329 			if (len & 0x02) {
330 				__raw_writew(*(u16 *)&src[index], fifo);
331 				index += 2;
332 			}
333 		} else {
334 			if (len >= 2) {
335 				iowrite16_rep(fifo, src + index, len >> 1);
336 				index += len & ~0x01;
337 			}
338 		}
339 		if (len & 0x01)
340 			__raw_writeb(src[index], fifo);
341 	} else  {
342 		/* byte aligned */
343 		iowrite8_rep(fifo, src, len);
344 	}
345 }
346 
347 /*
348  * Unload an endpoint's FIFO
349  */
350 static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
351 {
352 	struct musb *musb = hw_ep->musb;
353 	void __iomem *fifo = hw_ep->fifo;
354 
355 	if (unlikely(len == 0))
356 		return;
357 
358 	dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
359 			'R', hw_ep->epnum, fifo, len, dst);
360 
361 	/* we can't assume unaligned writes work */
362 	if (likely((0x01 & (unsigned long) dst) == 0)) {
363 		u16	index = 0;
364 
365 		/* best case is 32bit-aligned destination address */
366 		if ((0x02 & (unsigned long) dst) == 0) {
367 			if (len >= 4) {
368 				ioread32_rep(fifo, dst, len >> 2);
369 				index = len & ~0x03;
370 			}
371 			if (len & 0x02) {
372 				*(u16 *)&dst[index] = __raw_readw(fifo);
373 				index += 2;
374 			}
375 		} else {
376 			if (len >= 2) {
377 				ioread16_rep(fifo, dst, len >> 1);
378 				index = len & ~0x01;
379 			}
380 		}
381 		if (len & 0x01)
382 			dst[index] = __raw_readb(fifo);
383 	} else  {
384 		/* byte aligned */
385 		ioread8_rep(fifo, dst, len);
386 	}
387 }
388 
389 /*
390  * Old style IO functions
391  */
392 u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
393 EXPORT_SYMBOL_GPL(musb_readb);
394 
395 void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
396 EXPORT_SYMBOL_GPL(musb_writeb);
397 
398 u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
399 EXPORT_SYMBOL_GPL(musb_readw);
400 
401 void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
402 EXPORT_SYMBOL_GPL(musb_writew);
403 
404 u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
405 EXPORT_SYMBOL_GPL(musb_readl);
406 
407 void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
408 EXPORT_SYMBOL_GPL(musb_writel);
409 
410 #ifndef CONFIG_MUSB_PIO_ONLY
411 struct dma_controller *
412 (*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
413 EXPORT_SYMBOL(musb_dma_controller_create);
414 
415 void (*musb_dma_controller_destroy)(struct dma_controller *c);
416 EXPORT_SYMBOL(musb_dma_controller_destroy);
417 #endif
418 
419 /*
420  * New style IO functions
421  */
422 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
423 {
424 	return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
425 }
426 
427 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
428 {
429 	return hw_ep->musb->io.write_fifo(hw_ep, len, src);
430 }
431 
432 /*-------------------------------------------------------------------------*/
433 
434 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
435 static const u8 musb_test_packet[53] = {
436 	/* implicit SYNC then DATA0 to start */
437 
438 	/* JKJKJKJK x9 */
439 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
440 	/* JJKKJJKK x8 */
441 	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
442 	/* JJJJKKKK x8 */
443 	0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
444 	/* JJJJJJJKKKKKKK x8 */
445 	0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
446 	/* JJJJJJJK x8 */
447 	0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
448 	/* JKKKKKKK x10, JK */
449 	0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
450 
451 	/* implicit CRC16 then EOP to end */
452 };
453 
454 void musb_load_testpacket(struct musb *musb)
455 {
456 	void __iomem	*regs = musb->endpoints[0].regs;
457 
458 	musb_ep_select(musb->mregs, 0);
459 	musb_write_fifo(musb->control_ep,
460 			sizeof(musb_test_packet), musb_test_packet);
461 	musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
462 }
463 
464 /*-------------------------------------------------------------------------*/
465 
466 /*
467  * Handles OTG hnp timeouts, such as b_ase0_brst
468  */
469 static void musb_otg_timer_func(unsigned long data)
470 {
471 	struct musb	*musb = (struct musb *)data;
472 	unsigned long	flags;
473 
474 	spin_lock_irqsave(&musb->lock, flags);
475 	switch (musb->xceiv->otg->state) {
476 	case OTG_STATE_B_WAIT_ACON:
477 		musb_dbg(musb,
478 			"HNP: b_wait_acon timeout; back to b_peripheral");
479 		musb_g_disconnect(musb);
480 		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
481 		musb->is_active = 0;
482 		break;
483 	case OTG_STATE_A_SUSPEND:
484 	case OTG_STATE_A_WAIT_BCON:
485 		musb_dbg(musb, "HNP: %s timeout",
486 			usb_otg_state_string(musb->xceiv->otg->state));
487 		musb_platform_set_vbus(musb, 0);
488 		musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
489 		break;
490 	default:
491 		musb_dbg(musb, "HNP: Unhandled mode %s",
492 			usb_otg_state_string(musb->xceiv->otg->state));
493 	}
494 	spin_unlock_irqrestore(&musb->lock, flags);
495 }
496 
497 /*
498  * Stops the HNP transition. Caller must take care of locking.
499  */
500 void musb_hnp_stop(struct musb *musb)
501 {
502 	struct usb_hcd	*hcd = musb->hcd;
503 	void __iomem	*mbase = musb->mregs;
504 	u8	reg;
505 
506 	musb_dbg(musb, "HNP: stop from %s",
507 			usb_otg_state_string(musb->xceiv->otg->state));
508 
509 	switch (musb->xceiv->otg->state) {
510 	case OTG_STATE_A_PERIPHERAL:
511 		musb_g_disconnect(musb);
512 		musb_dbg(musb, "HNP: back to %s",
513 			usb_otg_state_string(musb->xceiv->otg->state));
514 		break;
515 	case OTG_STATE_B_HOST:
516 		musb_dbg(musb, "HNP: Disabling HR");
517 		if (hcd)
518 			hcd->self.is_b_host = 0;
519 		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
520 		MUSB_DEV_MODE(musb);
521 		reg = musb_readb(mbase, MUSB_POWER);
522 		reg |= MUSB_POWER_SUSPENDM;
523 		musb_writeb(mbase, MUSB_POWER, reg);
524 		/* REVISIT: Start SESSION_REQUEST here? */
525 		break;
526 	default:
527 		musb_dbg(musb, "HNP: Stopping in unknown state %s",
528 			usb_otg_state_string(musb->xceiv->otg->state));
529 	}
530 
531 	/*
532 	 * When returning to A state after HNP, avoid hub_port_rebounce(),
533 	 * which cause occasional OPT A "Did not receive reset after connect"
534 	 * errors.
535 	 */
536 	musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
537 }
538 
539 static void musb_recover_from_babble(struct musb *musb);
540 
541 /*
542  * Interrupt Service Routine to record USB "global" interrupts.
543  * Since these do not happen often and signify things of
544  * paramount importance, it seems OK to check them individually;
545  * the order of the tests is specified in the manual
546  *
547  * @param musb instance pointer
548  * @param int_usb register contents
549  * @param devctl
550  * @param power
551  */
552 
553 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
554 				u8 devctl)
555 {
556 	irqreturn_t handled = IRQ_NONE;
557 
558 	musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
559 
560 	/* in host mode, the peripheral may issue remote wakeup.
561 	 * in peripheral mode, the host may resume the link.
562 	 * spurious RESUME irqs happen too, paired with SUSPEND.
563 	 */
564 	if (int_usb & MUSB_INTR_RESUME) {
565 		handled = IRQ_HANDLED;
566 		musb_dbg(musb, "RESUME (%s)",
567 				usb_otg_state_string(musb->xceiv->otg->state));
568 
569 		if (devctl & MUSB_DEVCTL_HM) {
570 			switch (musb->xceiv->otg->state) {
571 			case OTG_STATE_A_SUSPEND:
572 				/* remote wakeup?  later, GetPortStatus
573 				 * will stop RESUME signaling
574 				 */
575 
576 				musb->port1_status |=
577 						(USB_PORT_STAT_C_SUSPEND << 16)
578 						| MUSB_PORT_STAT_RESUME;
579 				musb->rh_timer = jiffies
580 					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
581 				musb->need_finish_resume = 1;
582 
583 				musb->xceiv->otg->state = OTG_STATE_A_HOST;
584 				musb->is_active = 1;
585 				musb_host_resume_root_hub(musb);
586 				break;
587 			case OTG_STATE_B_WAIT_ACON:
588 				musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
589 				musb->is_active = 1;
590 				MUSB_DEV_MODE(musb);
591 				break;
592 			default:
593 				WARNING("bogus %s RESUME (%s)\n",
594 					"host",
595 					usb_otg_state_string(musb->xceiv->otg->state));
596 			}
597 		} else {
598 			switch (musb->xceiv->otg->state) {
599 			case OTG_STATE_A_SUSPEND:
600 				/* possibly DISCONNECT is upcoming */
601 				musb->xceiv->otg->state = OTG_STATE_A_HOST;
602 				musb_host_resume_root_hub(musb);
603 				break;
604 			case OTG_STATE_B_WAIT_ACON:
605 			case OTG_STATE_B_PERIPHERAL:
606 				/* disconnect while suspended?  we may
607 				 * not get a disconnect irq...
608 				 */
609 				if ((devctl & MUSB_DEVCTL_VBUS)
610 						!= (3 << MUSB_DEVCTL_VBUS_SHIFT)
611 						) {
612 					musb->int_usb |= MUSB_INTR_DISCONNECT;
613 					musb->int_usb &= ~MUSB_INTR_SUSPEND;
614 					break;
615 				}
616 				musb_g_resume(musb);
617 				break;
618 			case OTG_STATE_B_IDLE:
619 				musb->int_usb &= ~MUSB_INTR_SUSPEND;
620 				break;
621 			default:
622 				WARNING("bogus %s RESUME (%s)\n",
623 					"peripheral",
624 					usb_otg_state_string(musb->xceiv->otg->state));
625 			}
626 		}
627 	}
628 
629 	/* see manual for the order of the tests */
630 	if (int_usb & MUSB_INTR_SESSREQ) {
631 		void __iomem *mbase = musb->mregs;
632 
633 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
634 				&& (devctl & MUSB_DEVCTL_BDEVICE)) {
635 			musb_dbg(musb, "SessReq while on B state");
636 			return IRQ_HANDLED;
637 		}
638 
639 		musb_dbg(musb, "SESSION_REQUEST (%s)",
640 			usb_otg_state_string(musb->xceiv->otg->state));
641 
642 		/* IRQ arrives from ID pin sense or (later, if VBUS power
643 		 * is removed) SRP.  responses are time critical:
644 		 *  - turn on VBUS (with silicon-specific mechanism)
645 		 *  - go through A_WAIT_VRISE
646 		 *  - ... to A_WAIT_BCON.
647 		 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
648 		 */
649 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
650 		musb->ep0_stage = MUSB_EP0_START;
651 		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
652 		MUSB_HST_MODE(musb);
653 		musb_platform_set_vbus(musb, 1);
654 
655 		handled = IRQ_HANDLED;
656 	}
657 
658 	if (int_usb & MUSB_INTR_VBUSERROR) {
659 		int	ignore = 0;
660 
661 		/* During connection as an A-Device, we may see a short
662 		 * current spikes causing voltage drop, because of cable
663 		 * and peripheral capacitance combined with vbus draw.
664 		 * (So: less common with truly self-powered devices, where
665 		 * vbus doesn't act like a power supply.)
666 		 *
667 		 * Such spikes are short; usually less than ~500 usec, max
668 		 * of ~2 msec.  That is, they're not sustained overcurrent
669 		 * errors, though they're reported using VBUSERROR irqs.
670 		 *
671 		 * Workarounds:  (a) hardware: use self powered devices.
672 		 * (b) software:  ignore non-repeated VBUS errors.
673 		 *
674 		 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
675 		 * make trouble here, keeping VBUS < 4.4V ?
676 		 */
677 		switch (musb->xceiv->otg->state) {
678 		case OTG_STATE_A_HOST:
679 			/* recovery is dicey once we've gotten past the
680 			 * initial stages of enumeration, but if VBUS
681 			 * stayed ok at the other end of the link, and
682 			 * another reset is due (at least for high speed,
683 			 * to redo the chirp etc), it might work OK...
684 			 */
685 		case OTG_STATE_A_WAIT_BCON:
686 		case OTG_STATE_A_WAIT_VRISE:
687 			if (musb->vbuserr_retry) {
688 				void __iomem *mbase = musb->mregs;
689 
690 				musb->vbuserr_retry--;
691 				ignore = 1;
692 				devctl |= MUSB_DEVCTL_SESSION;
693 				musb_writeb(mbase, MUSB_DEVCTL, devctl);
694 			} else {
695 				musb->port1_status |=
696 					  USB_PORT_STAT_OVERCURRENT
697 					| (USB_PORT_STAT_C_OVERCURRENT << 16);
698 			}
699 			break;
700 		default:
701 			break;
702 		}
703 
704 		dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
705 				"VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
706 				usb_otg_state_string(musb->xceiv->otg->state),
707 				devctl,
708 				({ char *s;
709 				switch (devctl & MUSB_DEVCTL_VBUS) {
710 				case 0 << MUSB_DEVCTL_VBUS_SHIFT:
711 					s = "<SessEnd"; break;
712 				case 1 << MUSB_DEVCTL_VBUS_SHIFT:
713 					s = "<AValid"; break;
714 				case 2 << MUSB_DEVCTL_VBUS_SHIFT:
715 					s = "<VBusValid"; break;
716 				/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
717 				default:
718 					s = "VALID"; break;
719 				} s; }),
720 				VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
721 				musb->port1_status);
722 
723 		/* go through A_WAIT_VFALL then start a new session */
724 		if (!ignore)
725 			musb_platform_set_vbus(musb, 0);
726 		handled = IRQ_HANDLED;
727 	}
728 
729 	if (int_usb & MUSB_INTR_SUSPEND) {
730 		musb_dbg(musb, "SUSPEND (%s) devctl %02x",
731 			usb_otg_state_string(musb->xceiv->otg->state), devctl);
732 		handled = IRQ_HANDLED;
733 
734 		switch (musb->xceiv->otg->state) {
735 		case OTG_STATE_A_PERIPHERAL:
736 			/* We also come here if the cable is removed, since
737 			 * this silicon doesn't report ID-no-longer-grounded.
738 			 *
739 			 * We depend on T(a_wait_bcon) to shut us down, and
740 			 * hope users don't do anything dicey during this
741 			 * undesired detour through A_WAIT_BCON.
742 			 */
743 			musb_hnp_stop(musb);
744 			musb_host_resume_root_hub(musb);
745 			musb_root_disconnect(musb);
746 			musb_platform_try_idle(musb, jiffies
747 					+ msecs_to_jiffies(musb->a_wait_bcon
748 						? : OTG_TIME_A_WAIT_BCON));
749 
750 			break;
751 		case OTG_STATE_B_IDLE:
752 			if (!musb->is_active)
753 				break;
754 		case OTG_STATE_B_PERIPHERAL:
755 			musb_g_suspend(musb);
756 			musb->is_active = musb->g.b_hnp_enable;
757 			if (musb->is_active) {
758 				musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
759 				musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
760 				mod_timer(&musb->otg_timer, jiffies
761 					+ msecs_to_jiffies(
762 							OTG_TIME_B_ASE0_BRST));
763 			}
764 			break;
765 		case OTG_STATE_A_WAIT_BCON:
766 			if (musb->a_wait_bcon != 0)
767 				musb_platform_try_idle(musb, jiffies
768 					+ msecs_to_jiffies(musb->a_wait_bcon));
769 			break;
770 		case OTG_STATE_A_HOST:
771 			musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
772 			musb->is_active = musb->hcd->self.b_hnp_enable;
773 			break;
774 		case OTG_STATE_B_HOST:
775 			/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
776 			musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
777 			break;
778 		default:
779 			/* "should not happen" */
780 			musb->is_active = 0;
781 			break;
782 		}
783 	}
784 
785 	if (int_usb & MUSB_INTR_CONNECT) {
786 		struct usb_hcd *hcd = musb->hcd;
787 
788 		handled = IRQ_HANDLED;
789 		musb->is_active = 1;
790 
791 		musb->ep0_stage = MUSB_EP0_START;
792 
793 		musb->intrtxe = musb->epmask;
794 		musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
795 		musb->intrrxe = musb->epmask & 0xfffe;
796 		musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
797 		musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
798 		musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
799 					|USB_PORT_STAT_HIGH_SPEED
800 					|USB_PORT_STAT_ENABLE
801 					);
802 		musb->port1_status |= USB_PORT_STAT_CONNECTION
803 					|(USB_PORT_STAT_C_CONNECTION << 16);
804 
805 		/* high vs full speed is just a guess until after reset */
806 		if (devctl & MUSB_DEVCTL_LSDEV)
807 			musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
808 
809 		/* indicate new connection to OTG machine */
810 		switch (musb->xceiv->otg->state) {
811 		case OTG_STATE_B_PERIPHERAL:
812 			if (int_usb & MUSB_INTR_SUSPEND) {
813 				musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
814 				int_usb &= ~MUSB_INTR_SUSPEND;
815 				goto b_host;
816 			} else
817 				musb_dbg(musb, "CONNECT as b_peripheral???");
818 			break;
819 		case OTG_STATE_B_WAIT_ACON:
820 			musb_dbg(musb, "HNP: CONNECT, now b_host");
821 b_host:
822 			musb->xceiv->otg->state = OTG_STATE_B_HOST;
823 			if (musb->hcd)
824 				musb->hcd->self.is_b_host = 1;
825 			del_timer(&musb->otg_timer);
826 			break;
827 		default:
828 			if ((devctl & MUSB_DEVCTL_VBUS)
829 					== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
830 				musb->xceiv->otg->state = OTG_STATE_A_HOST;
831 				if (hcd)
832 					hcd->self.is_b_host = 0;
833 			}
834 			break;
835 		}
836 
837 		musb_host_poke_root_hub(musb);
838 
839 		musb_dbg(musb, "CONNECT (%s) devctl %02x",
840 				usb_otg_state_string(musb->xceiv->otg->state), devctl);
841 	}
842 
843 	if (int_usb & MUSB_INTR_DISCONNECT) {
844 		musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
845 				usb_otg_state_string(musb->xceiv->otg->state),
846 				MUSB_MODE(musb), devctl);
847 		handled = IRQ_HANDLED;
848 
849 		switch (musb->xceiv->otg->state) {
850 		case OTG_STATE_A_HOST:
851 		case OTG_STATE_A_SUSPEND:
852 			musb_host_resume_root_hub(musb);
853 			musb_root_disconnect(musb);
854 			if (musb->a_wait_bcon != 0)
855 				musb_platform_try_idle(musb, jiffies
856 					+ msecs_to_jiffies(musb->a_wait_bcon));
857 			break;
858 		case OTG_STATE_B_HOST:
859 			/* REVISIT this behaves for "real disconnect"
860 			 * cases; make sure the other transitions from
861 			 * from B_HOST act right too.  The B_HOST code
862 			 * in hnp_stop() is currently not used...
863 			 */
864 			musb_root_disconnect(musb);
865 			if (musb->hcd)
866 				musb->hcd->self.is_b_host = 0;
867 			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
868 			MUSB_DEV_MODE(musb);
869 			musb_g_disconnect(musb);
870 			break;
871 		case OTG_STATE_A_PERIPHERAL:
872 			musb_hnp_stop(musb);
873 			musb_root_disconnect(musb);
874 			/* FALLTHROUGH */
875 		case OTG_STATE_B_WAIT_ACON:
876 			/* FALLTHROUGH */
877 		case OTG_STATE_B_PERIPHERAL:
878 		case OTG_STATE_B_IDLE:
879 			musb_g_disconnect(musb);
880 			break;
881 		default:
882 			WARNING("unhandled DISCONNECT transition (%s)\n",
883 				usb_otg_state_string(musb->xceiv->otg->state));
884 			break;
885 		}
886 	}
887 
888 	/* mentor saves a bit: bus reset and babble share the same irq.
889 	 * only host sees babble; only peripheral sees bus reset.
890 	 */
891 	if (int_usb & MUSB_INTR_RESET) {
892 		handled = IRQ_HANDLED;
893 		if (devctl & MUSB_DEVCTL_HM) {
894 			/*
895 			 * When BABBLE happens what we can depends on which
896 			 * platform MUSB is running, because some platforms
897 			 * implemented proprietary means for 'recovering' from
898 			 * Babble conditions. One such platform is AM335x. In
899 			 * most cases, however, the only thing we can do is
900 			 * drop the session.
901 			 */
902 			dev_err(musb->controller, "Babble\n");
903 
904 			if (is_host_active(musb))
905 				musb_recover_from_babble(musb);
906 		} else {
907 			musb_dbg(musb, "BUS RESET as %s",
908 				usb_otg_state_string(musb->xceiv->otg->state));
909 			switch (musb->xceiv->otg->state) {
910 			case OTG_STATE_A_SUSPEND:
911 				musb_g_reset(musb);
912 				/* FALLTHROUGH */
913 			case OTG_STATE_A_WAIT_BCON:	/* OPT TD.4.7-900ms */
914 				/* never use invalid T(a_wait_bcon) */
915 				musb_dbg(musb, "HNP: in %s, %d msec timeout",
916 					usb_otg_state_string(musb->xceiv->otg->state),
917 					TA_WAIT_BCON(musb));
918 				mod_timer(&musb->otg_timer, jiffies
919 					+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
920 				break;
921 			case OTG_STATE_A_PERIPHERAL:
922 				del_timer(&musb->otg_timer);
923 				musb_g_reset(musb);
924 				break;
925 			case OTG_STATE_B_WAIT_ACON:
926 				musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
927 					usb_otg_state_string(musb->xceiv->otg->state));
928 				musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
929 				musb_g_reset(musb);
930 				break;
931 			case OTG_STATE_B_IDLE:
932 				musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
933 				/* FALLTHROUGH */
934 			case OTG_STATE_B_PERIPHERAL:
935 				musb_g_reset(musb);
936 				break;
937 			default:
938 				musb_dbg(musb, "Unhandled BUS RESET as %s",
939 					usb_otg_state_string(musb->xceiv->otg->state));
940 			}
941 		}
942 	}
943 
944 #if 0
945 /* REVISIT ... this would be for multiplexing periodic endpoints, or
946  * supporting transfer phasing to prevent exceeding ISO bandwidth
947  * limits of a given frame or microframe.
948  *
949  * It's not needed for peripheral side, which dedicates endpoints;
950  * though it _might_ use SOF irqs for other purposes.
951  *
952  * And it's not currently needed for host side, which also dedicates
953  * endpoints, relies on TX/RX interval registers, and isn't claimed
954  * to support ISO transfers yet.
955  */
956 	if (int_usb & MUSB_INTR_SOF) {
957 		void __iomem *mbase = musb->mregs;
958 		struct musb_hw_ep	*ep;
959 		u8 epnum;
960 		u16 frame;
961 
962 		dev_dbg(musb->controller, "START_OF_FRAME\n");
963 		handled = IRQ_HANDLED;
964 
965 		/* start any periodic Tx transfers waiting for current frame */
966 		frame = musb_readw(mbase, MUSB_FRAME);
967 		ep = musb->endpoints;
968 		for (epnum = 1; (epnum < musb->nr_endpoints)
969 					&& (musb->epmask >= (1 << epnum));
970 				epnum++, ep++) {
971 			/*
972 			 * FIXME handle framecounter wraps (12 bits)
973 			 * eliminate duplicated StartUrb logic
974 			 */
975 			if (ep->dwWaitFrame >= frame) {
976 				ep->dwWaitFrame = 0;
977 				pr_debug("SOF --> periodic TX%s on %d\n",
978 					ep->tx_channel ? " DMA" : "",
979 					epnum);
980 				if (!ep->tx_channel)
981 					musb_h_tx_start(musb, epnum);
982 				else
983 					cppi_hostdma_start(musb, epnum);
984 			}
985 		}		/* end of for loop */
986 	}
987 #endif
988 
989 	schedule_work(&musb->irq_work);
990 
991 	return handled;
992 }
993 
994 /*-------------------------------------------------------------------------*/
995 
996 static void musb_disable_interrupts(struct musb *musb)
997 {
998 	void __iomem	*mbase = musb->mregs;
999 	u16	temp;
1000 
1001 	/* disable interrupts */
1002 	musb_writeb(mbase, MUSB_INTRUSBE, 0);
1003 	musb->intrtxe = 0;
1004 	musb_writew(mbase, MUSB_INTRTXE, 0);
1005 	musb->intrrxe = 0;
1006 	musb_writew(mbase, MUSB_INTRRXE, 0);
1007 
1008 	/*  flush pending interrupts */
1009 	temp = musb_readb(mbase, MUSB_INTRUSB);
1010 	temp = musb_readw(mbase, MUSB_INTRTX);
1011 	temp = musb_readw(mbase, MUSB_INTRRX);
1012 }
1013 
1014 static void musb_enable_interrupts(struct musb *musb)
1015 {
1016 	void __iomem    *regs = musb->mregs;
1017 
1018 	/*  Set INT enable registers, enable interrupts */
1019 	musb->intrtxe = musb->epmask;
1020 	musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1021 	musb->intrrxe = musb->epmask & 0xfffe;
1022 	musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1023 	musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1024 
1025 }
1026 
1027 static void musb_generic_disable(struct musb *musb)
1028 {
1029 	void __iomem	*mbase = musb->mregs;
1030 
1031 	musb_disable_interrupts(musb);
1032 
1033 	/* off */
1034 	musb_writeb(mbase, MUSB_DEVCTL, 0);
1035 }
1036 
1037 /*
1038  * Program the HDRC to start (enable interrupts, dma, etc.).
1039  */
1040 void musb_start(struct musb *musb)
1041 {
1042 	void __iomem    *regs = musb->mregs;
1043 	u8              devctl = musb_readb(regs, MUSB_DEVCTL);
1044 	u8		power;
1045 
1046 	musb_dbg(musb, "<== devctl %02x", devctl);
1047 
1048 	musb_enable_interrupts(musb);
1049 	musb_writeb(regs, MUSB_TESTMODE, 0);
1050 
1051 	power = MUSB_POWER_ISOUPDATE;
1052 	/*
1053 	 * treating UNKNOWN as unspecified maximum speed, in which case
1054 	 * we will default to high-speed.
1055 	 */
1056 	if (musb->config->maximum_speed == USB_SPEED_HIGH ||
1057 			musb->config->maximum_speed == USB_SPEED_UNKNOWN)
1058 		power |= MUSB_POWER_HSENAB;
1059 	musb_writeb(regs, MUSB_POWER, power);
1060 
1061 	musb->is_active = 0;
1062 	devctl = musb_readb(regs, MUSB_DEVCTL);
1063 	devctl &= ~MUSB_DEVCTL_SESSION;
1064 
1065 	/* session started after:
1066 	 * (a) ID-grounded irq, host mode;
1067 	 * (b) vbus present/connect IRQ, peripheral mode;
1068 	 * (c) peripheral initiates, using SRP
1069 	 */
1070 	if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1071 			musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1072 			(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1073 		musb->is_active = 1;
1074 	} else {
1075 		devctl |= MUSB_DEVCTL_SESSION;
1076 	}
1077 
1078 	musb_platform_enable(musb);
1079 	musb_writeb(regs, MUSB_DEVCTL, devctl);
1080 }
1081 
1082 /*
1083  * Make the HDRC stop (disable interrupts, etc.);
1084  * reversible by musb_start
1085  * called on gadget driver unregister
1086  * with controller locked, irqs blocked
1087  * acts as a NOP unless some role activated the hardware
1088  */
1089 void musb_stop(struct musb *musb)
1090 {
1091 	/* stop IRQs, timers, ... */
1092 	musb_platform_disable(musb);
1093 	musb_generic_disable(musb);
1094 	musb_dbg(musb, "HDRC disabled");
1095 
1096 	/* FIXME
1097 	 *  - mark host and/or peripheral drivers unusable/inactive
1098 	 *  - disable DMA (and enable it in HdrcStart)
1099 	 *  - make sure we can musb_start() after musb_stop(); with
1100 	 *    OTG mode, gadget driver module rmmod/modprobe cycles that
1101 	 *  - ...
1102 	 */
1103 	musb_platform_try_idle(musb, 0);
1104 }
1105 
1106 /*-------------------------------------------------------------------------*/
1107 
1108 /*
1109  * The silicon either has hard-wired endpoint configurations, or else
1110  * "dynamic fifo" sizing.  The driver has support for both, though at this
1111  * writing only the dynamic sizing is very well tested.   Since we switched
1112  * away from compile-time hardware parameters, we can no longer rely on
1113  * dead code elimination to leave only the relevant one in the object file.
1114  *
1115  * We don't currently use dynamic fifo setup capability to do anything
1116  * more than selecting one of a bunch of predefined configurations.
1117  */
1118 static ushort fifo_mode;
1119 
1120 /* "modprobe ... fifo_mode=1" etc */
1121 module_param(fifo_mode, ushort, 0);
1122 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1123 
1124 /*
1125  * tables defining fifo_mode values.  define more if you like.
1126  * for host side, make sure both halves of ep1 are set up.
1127  */
1128 
1129 /* mode 0 - fits in 2KB */
1130 static struct musb_fifo_cfg mode_0_cfg[] = {
1131 { .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
1132 { .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
1133 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1134 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1135 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1136 };
1137 
1138 /* mode 1 - fits in 4KB */
1139 static struct musb_fifo_cfg mode_1_cfg[] = {
1140 { .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1141 { .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1142 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1143 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1144 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1145 };
1146 
1147 /* mode 2 - fits in 4KB */
1148 static struct musb_fifo_cfg mode_2_cfg[] = {
1149 { .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
1150 { .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
1151 { .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
1152 { .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
1153 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1154 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1155 };
1156 
1157 /* mode 3 - fits in 4KB */
1158 static struct musb_fifo_cfg mode_3_cfg[] = {
1159 { .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1160 { .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1161 { .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
1162 { .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
1163 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1164 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1165 };
1166 
1167 /* mode 4 - fits in 16KB */
1168 static struct musb_fifo_cfg mode_4_cfg[] = {
1169 { .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
1170 { .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
1171 { .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
1172 { .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
1173 { .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
1174 { .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
1175 { .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
1176 { .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
1177 { .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
1178 { .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
1179 { .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 512, },
1180 { .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 512, },
1181 { .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
1182 { .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
1183 { .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
1184 { .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
1185 { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
1186 { .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
1187 { .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 256, },
1188 { .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 64, },
1189 { .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
1190 { .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 64, },
1191 { .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
1192 { .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 64, },
1193 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1194 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1195 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1196 };
1197 
1198 /* mode 5 - fits in 8KB */
1199 static struct musb_fifo_cfg mode_5_cfg[] = {
1200 { .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
1201 { .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
1202 { .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
1203 { .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
1204 { .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
1205 { .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
1206 { .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
1207 { .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
1208 { .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
1209 { .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
1210 { .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 32, },
1211 { .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 32, },
1212 { .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 32, },
1213 { .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 32, },
1214 { .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 32, },
1215 { .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 32, },
1216 { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 32, },
1217 { .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 32, },
1218 { .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 32, },
1219 { .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 32, },
1220 { .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 32, },
1221 { .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 32, },
1222 { .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 32, },
1223 { .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 32, },
1224 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1225 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1226 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1227 };
1228 
1229 /*
1230  * configure a fifo; for non-shared endpoints, this may be called
1231  * once for a tx fifo and once for an rx fifo.
1232  *
1233  * returns negative errno or offset for next fifo.
1234  */
1235 static int
1236 fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
1237 		const struct musb_fifo_cfg *cfg, u16 offset)
1238 {
1239 	void __iomem	*mbase = musb->mregs;
1240 	int	size = 0;
1241 	u16	maxpacket = cfg->maxpacket;
1242 	u16	c_off = offset >> 3;
1243 	u8	c_size;
1244 
1245 	/* expect hw_ep has already been zero-initialized */
1246 
1247 	size = ffs(max(maxpacket, (u16) 8)) - 1;
1248 	maxpacket = 1 << size;
1249 
1250 	c_size = size - 3;
1251 	if (cfg->mode == BUF_DOUBLE) {
1252 		if ((offset + (maxpacket << 1)) >
1253 				(1 << (musb->config->ram_bits + 2)))
1254 			return -EMSGSIZE;
1255 		c_size |= MUSB_FIFOSZ_DPB;
1256 	} else {
1257 		if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1258 			return -EMSGSIZE;
1259 	}
1260 
1261 	/* configure the FIFO */
1262 	musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1263 
1264 	/* EP0 reserved endpoint for control, bidirectional;
1265 	 * EP1 reserved for bulk, two unidirectional halves.
1266 	 */
1267 	if (hw_ep->epnum == 1)
1268 		musb->bulk_ep = hw_ep;
1269 	/* REVISIT error check:  be sure ep0 can both rx and tx ... */
1270 	switch (cfg->style) {
1271 	case FIFO_TX:
1272 		musb_write_txfifosz(mbase, c_size);
1273 		musb_write_txfifoadd(mbase, c_off);
1274 		hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1275 		hw_ep->max_packet_sz_tx = maxpacket;
1276 		break;
1277 	case FIFO_RX:
1278 		musb_write_rxfifosz(mbase, c_size);
1279 		musb_write_rxfifoadd(mbase, c_off);
1280 		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1281 		hw_ep->max_packet_sz_rx = maxpacket;
1282 		break;
1283 	case FIFO_RXTX:
1284 		musb_write_txfifosz(mbase, c_size);
1285 		musb_write_txfifoadd(mbase, c_off);
1286 		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1287 		hw_ep->max_packet_sz_rx = maxpacket;
1288 
1289 		musb_write_rxfifosz(mbase, c_size);
1290 		musb_write_rxfifoadd(mbase, c_off);
1291 		hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1292 		hw_ep->max_packet_sz_tx = maxpacket;
1293 
1294 		hw_ep->is_shared_fifo = true;
1295 		break;
1296 	}
1297 
1298 	/* NOTE rx and tx endpoint irqs aren't managed separately,
1299 	 * which happens to be ok
1300 	 */
1301 	musb->epmask |= (1 << hw_ep->epnum);
1302 
1303 	return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1304 }
1305 
1306 static struct musb_fifo_cfg ep0_cfg = {
1307 	.style = FIFO_RXTX, .maxpacket = 64,
1308 };
1309 
1310 static int ep_config_from_table(struct musb *musb)
1311 {
1312 	const struct musb_fifo_cfg	*cfg;
1313 	unsigned		i, n;
1314 	int			offset;
1315 	struct musb_hw_ep	*hw_ep = musb->endpoints;
1316 
1317 	if (musb->config->fifo_cfg) {
1318 		cfg = musb->config->fifo_cfg;
1319 		n = musb->config->fifo_cfg_size;
1320 		goto done;
1321 	}
1322 
1323 	switch (fifo_mode) {
1324 	default:
1325 		fifo_mode = 0;
1326 		/* FALLTHROUGH */
1327 	case 0:
1328 		cfg = mode_0_cfg;
1329 		n = ARRAY_SIZE(mode_0_cfg);
1330 		break;
1331 	case 1:
1332 		cfg = mode_1_cfg;
1333 		n = ARRAY_SIZE(mode_1_cfg);
1334 		break;
1335 	case 2:
1336 		cfg = mode_2_cfg;
1337 		n = ARRAY_SIZE(mode_2_cfg);
1338 		break;
1339 	case 3:
1340 		cfg = mode_3_cfg;
1341 		n = ARRAY_SIZE(mode_3_cfg);
1342 		break;
1343 	case 4:
1344 		cfg = mode_4_cfg;
1345 		n = ARRAY_SIZE(mode_4_cfg);
1346 		break;
1347 	case 5:
1348 		cfg = mode_5_cfg;
1349 		n = ARRAY_SIZE(mode_5_cfg);
1350 		break;
1351 	}
1352 
1353 	pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
1354 
1355 
1356 done:
1357 	offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1358 	/* assert(offset > 0) */
1359 
1360 	/* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
1361 	 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1362 	 */
1363 
1364 	for (i = 0; i < n; i++) {
1365 		u8	epn = cfg->hw_ep_num;
1366 
1367 		if (epn >= musb->config->num_eps) {
1368 			pr_debug("%s: invalid ep %d\n",
1369 					musb_driver_name, epn);
1370 			return -EINVAL;
1371 		}
1372 		offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1373 		if (offset < 0) {
1374 			pr_debug("%s: mem overrun, ep %d\n",
1375 					musb_driver_name, epn);
1376 			return offset;
1377 		}
1378 		epn++;
1379 		musb->nr_endpoints = max(epn, musb->nr_endpoints);
1380 	}
1381 
1382 	pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1383 			musb_driver_name,
1384 			n + 1, musb->config->num_eps * 2 - 1,
1385 			offset, (1 << (musb->config->ram_bits + 2)));
1386 
1387 	if (!musb->bulk_ep) {
1388 		pr_debug("%s: missing bulk\n", musb_driver_name);
1389 		return -EINVAL;
1390 	}
1391 
1392 	return 0;
1393 }
1394 
1395 
1396 /*
1397  * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1398  * @param musb the controller
1399  */
1400 static int ep_config_from_hw(struct musb *musb)
1401 {
1402 	u8 epnum = 0;
1403 	struct musb_hw_ep *hw_ep;
1404 	void __iomem *mbase = musb->mregs;
1405 	int ret = 0;
1406 
1407 	musb_dbg(musb, "<== static silicon ep config");
1408 
1409 	/* FIXME pick up ep0 maxpacket size */
1410 
1411 	for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1412 		musb_ep_select(mbase, epnum);
1413 		hw_ep = musb->endpoints + epnum;
1414 
1415 		ret = musb_read_fifosize(musb, hw_ep, epnum);
1416 		if (ret < 0)
1417 			break;
1418 
1419 		/* FIXME set up hw_ep->{rx,tx}_double_buffered */
1420 
1421 		/* pick an RX/TX endpoint for bulk */
1422 		if (hw_ep->max_packet_sz_tx < 512
1423 				|| hw_ep->max_packet_sz_rx < 512)
1424 			continue;
1425 
1426 		/* REVISIT:  this algorithm is lazy, we should at least
1427 		 * try to pick a double buffered endpoint.
1428 		 */
1429 		if (musb->bulk_ep)
1430 			continue;
1431 		musb->bulk_ep = hw_ep;
1432 	}
1433 
1434 	if (!musb->bulk_ep) {
1435 		pr_debug("%s: missing bulk\n", musb_driver_name);
1436 		return -EINVAL;
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1443 
1444 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1445  * configure endpoints, or take their config from silicon
1446  */
1447 static int musb_core_init(u16 musb_type, struct musb *musb)
1448 {
1449 	u8 reg;
1450 	char *type;
1451 	char aInfo[90];
1452 	void __iomem	*mbase = musb->mregs;
1453 	int		status = 0;
1454 	int		i;
1455 
1456 	/* log core options (read using indexed model) */
1457 	reg = musb_read_configdata(mbase);
1458 
1459 	strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1460 	if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1461 		strcat(aInfo, ", dyn FIFOs");
1462 		musb->dyn_fifo = true;
1463 	}
1464 	if (reg & MUSB_CONFIGDATA_MPRXE) {
1465 		strcat(aInfo, ", bulk combine");
1466 		musb->bulk_combine = true;
1467 	}
1468 	if (reg & MUSB_CONFIGDATA_MPTXE) {
1469 		strcat(aInfo, ", bulk split");
1470 		musb->bulk_split = true;
1471 	}
1472 	if (reg & MUSB_CONFIGDATA_HBRXE) {
1473 		strcat(aInfo, ", HB-ISO Rx");
1474 		musb->hb_iso_rx = true;
1475 	}
1476 	if (reg & MUSB_CONFIGDATA_HBTXE) {
1477 		strcat(aInfo, ", HB-ISO Tx");
1478 		musb->hb_iso_tx = true;
1479 	}
1480 	if (reg & MUSB_CONFIGDATA_SOFTCONE)
1481 		strcat(aInfo, ", SoftConn");
1482 
1483 	pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
1484 
1485 	if (MUSB_CONTROLLER_MHDRC == musb_type) {
1486 		musb->is_multipoint = 1;
1487 		type = "M";
1488 	} else {
1489 		musb->is_multipoint = 0;
1490 		type = "";
1491 #ifndef	CONFIG_USB_OTG_BLACKLIST_HUB
1492 		pr_err("%s: kernel must blacklist external hubs\n",
1493 		       musb_driver_name);
1494 #endif
1495 	}
1496 
1497 	/* log release info */
1498 	musb->hwvers = musb_read_hwvers(mbase);
1499 	pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1500 		 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
1501 		 MUSB_HWVERS_MINOR(musb->hwvers),
1502 		 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1503 
1504 	/* configure ep0 */
1505 	musb_configure_ep0(musb);
1506 
1507 	/* discover endpoint configuration */
1508 	musb->nr_endpoints = 1;
1509 	musb->epmask = 1;
1510 
1511 	if (musb->dyn_fifo)
1512 		status = ep_config_from_table(musb);
1513 	else
1514 		status = ep_config_from_hw(musb);
1515 
1516 	if (status < 0)
1517 		return status;
1518 
1519 	/* finish init, and print endpoint config */
1520 	for (i = 0; i < musb->nr_endpoints; i++) {
1521 		struct musb_hw_ep	*hw_ep = musb->endpoints + i;
1522 
1523 		hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
1524 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1525 		if (musb->io.quirks & MUSB_IN_TUSB) {
1526 			hw_ep->fifo_async = musb->async + 0x400 +
1527 				musb->io.fifo_offset(i);
1528 			hw_ep->fifo_sync = musb->sync + 0x400 +
1529 				musb->io.fifo_offset(i);
1530 			hw_ep->fifo_sync_va =
1531 				musb->sync_va + 0x400 + musb->io.fifo_offset(i);
1532 
1533 			if (i == 0)
1534 				hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1535 			else
1536 				hw_ep->conf = mbase + 0x400 +
1537 					(((i - 1) & 0xf) << 2);
1538 		}
1539 #endif
1540 
1541 		hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
1542 		hw_ep->rx_reinit = 1;
1543 		hw_ep->tx_reinit = 1;
1544 
1545 		if (hw_ep->max_packet_sz_tx) {
1546 			musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1547 				musb_driver_name, i,
1548 				hw_ep->is_shared_fifo ? "shared" : "tx",
1549 				hw_ep->tx_double_buffered
1550 					? "doublebuffer, " : "",
1551 				hw_ep->max_packet_sz_tx);
1552 		}
1553 		if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1554 			musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1555 				musb_driver_name, i,
1556 				"rx",
1557 				hw_ep->rx_double_buffered
1558 					? "doublebuffer, " : "",
1559 				hw_ep->max_packet_sz_rx);
1560 		}
1561 		if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1562 			musb_dbg(musb, "hw_ep %d not configured", i);
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 /*-------------------------------------------------------------------------*/
1569 
1570 /*
1571  * handle all the irqs defined by the HDRC core. for now we expect:  other
1572  * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1573  * will be assigned, and the irq will already have been acked.
1574  *
1575  * called in irq context with spinlock held, irqs blocked
1576  */
1577 irqreturn_t musb_interrupt(struct musb *musb)
1578 {
1579 	irqreturn_t	retval = IRQ_NONE;
1580 	unsigned long	status;
1581 	unsigned long	epnum;
1582 	u8		devctl;
1583 
1584 	if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
1585 		return IRQ_NONE;
1586 
1587 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1588 
1589 	trace_musb_isr(musb);
1590 
1591 	/**
1592 	 * According to Mentor Graphics' documentation, flowchart on page 98,
1593 	 * IRQ should be handled as follows:
1594 	 *
1595 	 * . Resume IRQ
1596 	 * . Session Request IRQ
1597 	 * . VBUS Error IRQ
1598 	 * . Suspend IRQ
1599 	 * . Connect IRQ
1600 	 * . Disconnect IRQ
1601 	 * . Reset/Babble IRQ
1602 	 * . SOF IRQ (we're not using this one)
1603 	 * . Endpoint 0 IRQ
1604 	 * . TX Endpoints
1605 	 * . RX Endpoints
1606 	 *
1607 	 * We will be following that flowchart in order to avoid any problems
1608 	 * that might arise with internal Finite State Machine.
1609 	 */
1610 
1611 	if (musb->int_usb)
1612 		retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
1613 
1614 	if (musb->int_tx & 1) {
1615 		if (is_host_active(musb))
1616 			retval |= musb_h_ep0_irq(musb);
1617 		else
1618 			retval |= musb_g_ep0_irq(musb);
1619 
1620 		/* we have just handled endpoint 0 IRQ, clear it */
1621 		musb->int_tx &= ~BIT(0);
1622 	}
1623 
1624 	status = musb->int_tx;
1625 
1626 	for_each_set_bit(epnum, &status, 16) {
1627 		retval = IRQ_HANDLED;
1628 		if (is_host_active(musb))
1629 			musb_host_tx(musb, epnum);
1630 		else
1631 			musb_g_tx(musb, epnum);
1632 	}
1633 
1634 	status = musb->int_rx;
1635 
1636 	for_each_set_bit(epnum, &status, 16) {
1637 		retval = IRQ_HANDLED;
1638 		if (is_host_active(musb))
1639 			musb_host_rx(musb, epnum);
1640 		else
1641 			musb_g_rx(musb, epnum);
1642 	}
1643 
1644 	return retval;
1645 }
1646 EXPORT_SYMBOL_GPL(musb_interrupt);
1647 
1648 #ifndef CONFIG_MUSB_PIO_ONLY
1649 static bool use_dma = 1;
1650 
1651 /* "modprobe ... use_dma=0" etc */
1652 module_param(use_dma, bool, 0644);
1653 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1654 
1655 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1656 {
1657 	/* called with controller lock already held */
1658 
1659 	if (!epnum) {
1660 		if (!is_cppi_enabled(musb)) {
1661 			/* endpoint 0 */
1662 			if (is_host_active(musb))
1663 				musb_h_ep0_irq(musb);
1664 			else
1665 				musb_g_ep0_irq(musb);
1666 		}
1667 	} else {
1668 		/* endpoints 1..15 */
1669 		if (transmit) {
1670 			if (is_host_active(musb))
1671 				musb_host_tx(musb, epnum);
1672 			else
1673 				musb_g_tx(musb, epnum);
1674 		} else {
1675 			/* receive */
1676 			if (is_host_active(musb))
1677 				musb_host_rx(musb, epnum);
1678 			else
1679 				musb_g_rx(musb, epnum);
1680 		}
1681 	}
1682 }
1683 EXPORT_SYMBOL_GPL(musb_dma_completion);
1684 
1685 #else
1686 #define use_dma			0
1687 #endif
1688 
1689 static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1690 
1691 /*
1692  * musb_mailbox - optional phy notifier function
1693  * @status phy state change
1694  *
1695  * Optionally gets called from the USB PHY. Note that the USB PHY must be
1696  * disabled at the point the phy_callback is registered or unregistered.
1697  */
1698 int musb_mailbox(enum musb_vbus_id_status status)
1699 {
1700 	if (musb_phy_callback)
1701 		return musb_phy_callback(status);
1702 
1703 	return -ENODEV;
1704 };
1705 EXPORT_SYMBOL_GPL(musb_mailbox);
1706 
1707 /*-------------------------------------------------------------------------*/
1708 
1709 static ssize_t
1710 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1711 {
1712 	struct musb *musb = dev_to_musb(dev);
1713 	unsigned long flags;
1714 	int ret = -EINVAL;
1715 
1716 	spin_lock_irqsave(&musb->lock, flags);
1717 	ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
1718 	spin_unlock_irqrestore(&musb->lock, flags);
1719 
1720 	return ret;
1721 }
1722 
1723 static ssize_t
1724 musb_mode_store(struct device *dev, struct device_attribute *attr,
1725 		const char *buf, size_t n)
1726 {
1727 	struct musb	*musb = dev_to_musb(dev);
1728 	unsigned long	flags;
1729 	int		status;
1730 
1731 	spin_lock_irqsave(&musb->lock, flags);
1732 	if (sysfs_streq(buf, "host"))
1733 		status = musb_platform_set_mode(musb, MUSB_HOST);
1734 	else if (sysfs_streq(buf, "peripheral"))
1735 		status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1736 	else if (sysfs_streq(buf, "otg"))
1737 		status = musb_platform_set_mode(musb, MUSB_OTG);
1738 	else
1739 		status = -EINVAL;
1740 	spin_unlock_irqrestore(&musb->lock, flags);
1741 
1742 	return (status == 0) ? n : status;
1743 }
1744 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1745 
1746 static ssize_t
1747 musb_vbus_store(struct device *dev, struct device_attribute *attr,
1748 		const char *buf, size_t n)
1749 {
1750 	struct musb	*musb = dev_to_musb(dev);
1751 	unsigned long	flags;
1752 	unsigned long	val;
1753 
1754 	if (sscanf(buf, "%lu", &val) < 1) {
1755 		dev_err(dev, "Invalid VBUS timeout ms value\n");
1756 		return -EINVAL;
1757 	}
1758 
1759 	spin_lock_irqsave(&musb->lock, flags);
1760 	/* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1761 	musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1762 	if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
1763 		musb->is_active = 0;
1764 	musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1765 	spin_unlock_irqrestore(&musb->lock, flags);
1766 
1767 	return n;
1768 }
1769 
1770 static ssize_t
1771 musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1772 {
1773 	struct musb	*musb = dev_to_musb(dev);
1774 	unsigned long	flags;
1775 	unsigned long	val;
1776 	int		vbus;
1777 	u8		devctl;
1778 
1779 	spin_lock_irqsave(&musb->lock, flags);
1780 	val = musb->a_wait_bcon;
1781 	vbus = musb_platform_get_vbus_status(musb);
1782 	if (vbus < 0) {
1783 		/* Use default MUSB method by means of DEVCTL register */
1784 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1785 		if ((devctl & MUSB_DEVCTL_VBUS)
1786 				== (3 << MUSB_DEVCTL_VBUS_SHIFT))
1787 			vbus = 1;
1788 		else
1789 			vbus = 0;
1790 	}
1791 	spin_unlock_irqrestore(&musb->lock, flags);
1792 
1793 	return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1794 			vbus ? "on" : "off", val);
1795 }
1796 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1797 
1798 /* Gadget drivers can't know that a host is connected so they might want
1799  * to start SRP, but users can.  This allows userspace to trigger SRP.
1800  */
1801 static ssize_t
1802 musb_srp_store(struct device *dev, struct device_attribute *attr,
1803 		const char *buf, size_t n)
1804 {
1805 	struct musb	*musb = dev_to_musb(dev);
1806 	unsigned short	srp;
1807 
1808 	if (sscanf(buf, "%hu", &srp) != 1
1809 			|| (srp != 1)) {
1810 		dev_err(dev, "SRP: Value must be 1\n");
1811 		return -EINVAL;
1812 	}
1813 
1814 	if (srp == 1)
1815 		musb_g_wakeup(musb);
1816 
1817 	return n;
1818 }
1819 static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1820 
1821 static struct attribute *musb_attributes[] = {
1822 	&dev_attr_mode.attr,
1823 	&dev_attr_vbus.attr,
1824 	&dev_attr_srp.attr,
1825 	NULL
1826 };
1827 
1828 static const struct attribute_group musb_attr_group = {
1829 	.attrs = musb_attributes,
1830 };
1831 
1832 #define MUSB_QUIRK_B_INVALID_VBUS_91	(MUSB_DEVCTL_BDEVICE | \
1833 					 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1834 					 MUSB_DEVCTL_SESSION)
1835 #define MUSB_QUIRK_A_DISCONNECT_19	((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1836 					 MUSB_DEVCTL_SESSION)
1837 
1838 /*
1839  * Check the musb devctl session bit to determine if we want to
1840  * allow PM runtime for the device. In general, we want to keep things
1841  * active when the session bit is set except after host disconnect.
1842  *
1843  * Only called from musb_irq_work. If this ever needs to get called
1844  * elsewhere, proper locking must be implemented for musb->session.
1845  */
1846 static void musb_pm_runtime_check_session(struct musb *musb)
1847 {
1848 	u8 devctl, s;
1849 	int error;
1850 
1851 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1852 
1853 	/* Handle session status quirks first */
1854 	s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
1855 		MUSB_DEVCTL_HR;
1856 	switch (devctl & ~s) {
1857 	case MUSB_QUIRK_B_INVALID_VBUS_91:
1858 		if (!musb->session && !musb->quirk_invalid_vbus) {
1859 			musb->quirk_invalid_vbus = true;
1860 			musb_dbg(musb,
1861 				 "First invalid vbus, assume no session");
1862 			return;
1863 		}
1864 		break;
1865 	case MUSB_QUIRK_A_DISCONNECT_19:
1866 		if (!musb->session)
1867 			break;
1868 		musb_dbg(musb, "Allow PM on possible host mode disconnect");
1869 		pm_runtime_mark_last_busy(musb->controller);
1870 		pm_runtime_put_autosuspend(musb->controller);
1871 		musb->session = false;
1872 		return;
1873 	default:
1874 		break;
1875 	}
1876 
1877 	/* No need to do anything if session has not changed */
1878 	s = devctl & MUSB_DEVCTL_SESSION;
1879 	if (s == musb->session)
1880 		return;
1881 
1882 	/* Block PM or allow PM? */
1883 	if (s) {
1884 		musb_dbg(musb, "Block PM on active session: %02x", devctl);
1885 		error = pm_runtime_get_sync(musb->controller);
1886 		if (error < 0)
1887 			dev_err(musb->controller, "Could not enable: %i\n",
1888 				error);
1889 	} else {
1890 		musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1891 		musb->quirk_invalid_vbus = false;
1892 		pm_runtime_mark_last_busy(musb->controller);
1893 		pm_runtime_put_autosuspend(musb->controller);
1894 	}
1895 
1896 	musb->session = s;
1897 }
1898 
1899 /* Only used to provide driver mode change events */
1900 static void musb_irq_work(struct work_struct *data)
1901 {
1902 	struct musb *musb = container_of(data, struct musb, irq_work);
1903 
1904 	musb_pm_runtime_check_session(musb);
1905 
1906 	if (musb->xceiv->otg->state != musb->xceiv_old_state) {
1907 		musb->xceiv_old_state = musb->xceiv->otg->state;
1908 		sysfs_notify(&musb->controller->kobj, NULL, "mode");
1909 	}
1910 }
1911 
1912 static void musb_recover_from_babble(struct musb *musb)
1913 {
1914 	int ret;
1915 	u8 devctl;
1916 
1917 	musb_disable_interrupts(musb);
1918 
1919 	/*
1920 	 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1921 	 * it some slack and wait for 10us.
1922 	 */
1923 	udelay(10);
1924 
1925 	ret  = musb_platform_recover(musb);
1926 	if (ret) {
1927 		musb_enable_interrupts(musb);
1928 		return;
1929 	}
1930 
1931 	/* drop session bit */
1932 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1933 	devctl &= ~MUSB_DEVCTL_SESSION;
1934 	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
1935 
1936 	/* tell usbcore about it */
1937 	musb_root_disconnect(musb);
1938 
1939 	/*
1940 	 * When a babble condition occurs, the musb controller
1941 	 * removes the session bit and the endpoint config is lost.
1942 	 */
1943 	if (musb->dyn_fifo)
1944 		ret = ep_config_from_table(musb);
1945 	else
1946 		ret = ep_config_from_hw(musb);
1947 
1948 	/* restart session */
1949 	if (ret == 0)
1950 		musb_start(musb);
1951 }
1952 
1953 /* --------------------------------------------------------------------------
1954  * Init support
1955  */
1956 
1957 static struct musb *allocate_instance(struct device *dev,
1958 		const struct musb_hdrc_config *config, void __iomem *mbase)
1959 {
1960 	struct musb		*musb;
1961 	struct musb_hw_ep	*ep;
1962 	int			epnum;
1963 	int			ret;
1964 
1965 	musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
1966 	if (!musb)
1967 		return NULL;
1968 
1969 	INIT_LIST_HEAD(&musb->control);
1970 	INIT_LIST_HEAD(&musb->in_bulk);
1971 	INIT_LIST_HEAD(&musb->out_bulk);
1972 
1973 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1974 	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1975 	musb->mregs = mbase;
1976 	musb->ctrl_base = mbase;
1977 	musb->nIrq = -ENODEV;
1978 	musb->config = config;
1979 	BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
1980 	for (epnum = 0, ep = musb->endpoints;
1981 			epnum < musb->config->num_eps;
1982 			epnum++, ep++) {
1983 		ep->musb = musb;
1984 		ep->epnum = epnum;
1985 	}
1986 
1987 	musb->controller = dev;
1988 
1989 	ret = musb_host_alloc(musb);
1990 	if (ret < 0)
1991 		goto err_free;
1992 
1993 	dev_set_drvdata(dev, musb);
1994 
1995 	return musb;
1996 
1997 err_free:
1998 	return NULL;
1999 }
2000 
2001 static void musb_free(struct musb *musb)
2002 {
2003 	/* this has multiple entry modes. it handles fault cleanup after
2004 	 * probe(), where things may be partially set up, as well as rmmod
2005 	 * cleanup after everything's been de-activated.
2006 	 */
2007 
2008 #ifdef CONFIG_SYSFS
2009 	sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
2010 #endif
2011 
2012 	if (musb->nIrq >= 0) {
2013 		if (musb->irq_wake)
2014 			disable_irq_wake(musb->nIrq);
2015 		free_irq(musb->nIrq, musb);
2016 	}
2017 
2018 	musb_host_free(musb);
2019 }
2020 
2021 static void musb_deassert_reset(struct work_struct *work)
2022 {
2023 	struct musb *musb;
2024 	unsigned long flags;
2025 
2026 	musb = container_of(work, struct musb, deassert_reset_work.work);
2027 
2028 	spin_lock_irqsave(&musb->lock, flags);
2029 
2030 	if (musb->port1_status & USB_PORT_STAT_RESET)
2031 		musb_port_reset(musb, false);
2032 
2033 	spin_unlock_irqrestore(&musb->lock, flags);
2034 }
2035 
2036 /*
2037  * Perform generic per-controller initialization.
2038  *
2039  * @dev: the controller (already clocked, etc)
2040  * @nIrq: IRQ number
2041  * @ctrl: virtual address of controller registers,
2042  *	not yet corrected for platform-specific offsets
2043  */
2044 static int
2045 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2046 {
2047 	int			status;
2048 	struct musb		*musb;
2049 	struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
2050 
2051 	/* The driver might handle more features than the board; OK.
2052 	 * Fail when the board needs a feature that's not enabled.
2053 	 */
2054 	if (!plat) {
2055 		dev_err(dev, "no platform_data?\n");
2056 		status = -ENODEV;
2057 		goto fail0;
2058 	}
2059 
2060 	/* allocate */
2061 	musb = allocate_instance(dev, plat->config, ctrl);
2062 	if (!musb) {
2063 		status = -ENOMEM;
2064 		goto fail0;
2065 	}
2066 
2067 	spin_lock_init(&musb->lock);
2068 	musb->board_set_power = plat->set_power;
2069 	musb->min_power = plat->min_power;
2070 	musb->ops = plat->platform_ops;
2071 	musb->port_mode = plat->mode;
2072 
2073 	/*
2074 	 * Initialize the default IO functions. At least omap2430 needs
2075 	 * these early. We initialize the platform specific IO functions
2076 	 * later on.
2077 	 */
2078 	musb_readb = musb_default_readb;
2079 	musb_writeb = musb_default_writeb;
2080 	musb_readw = musb_default_readw;
2081 	musb_writew = musb_default_writew;
2082 	musb_readl = musb_default_readl;
2083 	musb_writel = musb_default_writel;
2084 
2085 	/* The musb_platform_init() call:
2086 	 *   - adjusts musb->mregs
2087 	 *   - sets the musb->isr
2088 	 *   - may initialize an integrated transceiver
2089 	 *   - initializes musb->xceiv, usually by otg_get_phy()
2090 	 *   - stops powering VBUS
2091 	 *
2092 	 * There are various transceiver configurations.  Blackfin,
2093 	 * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
2094 	 * external/discrete ones in various flavors (twl4030 family,
2095 	 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2096 	 */
2097 	status = musb_platform_init(musb);
2098 	if (status < 0)
2099 		goto fail1;
2100 
2101 	if (!musb->isr) {
2102 		status = -ENODEV;
2103 		goto fail2;
2104 	}
2105 
2106 	if (musb->ops->quirks)
2107 		musb->io.quirks = musb->ops->quirks;
2108 
2109 	/* Most devices use indexed offset or flat offset */
2110 	if (musb->io.quirks & MUSB_INDEXED_EP) {
2111 		musb->io.ep_offset = musb_indexed_ep_offset;
2112 		musb->io.ep_select = musb_indexed_ep_select;
2113 	} else {
2114 		musb->io.ep_offset = musb_flat_ep_offset;
2115 		musb->io.ep_select = musb_flat_ep_select;
2116 	}
2117 	/* And override them with platform specific ops if specified. */
2118 	if (musb->ops->ep_offset)
2119 		musb->io.ep_offset = musb->ops->ep_offset;
2120 	if (musb->ops->ep_select)
2121 		musb->io.ep_select = musb->ops->ep_select;
2122 
2123 	/* At least tusb6010 has its own offsets */
2124 	if (musb->ops->ep_offset)
2125 		musb->io.ep_offset = musb->ops->ep_offset;
2126 	if (musb->ops->ep_select)
2127 		musb->io.ep_select = musb->ops->ep_select;
2128 
2129 	if (musb->ops->fifo_mode)
2130 		fifo_mode = musb->ops->fifo_mode;
2131 	else
2132 		fifo_mode = 4;
2133 
2134 	if (musb->ops->fifo_offset)
2135 		musb->io.fifo_offset = musb->ops->fifo_offset;
2136 	else
2137 		musb->io.fifo_offset = musb_default_fifo_offset;
2138 
2139 	if (musb->ops->busctl_offset)
2140 		musb->io.busctl_offset = musb->ops->busctl_offset;
2141 	else
2142 		musb->io.busctl_offset = musb_default_busctl_offset;
2143 
2144 	if (musb->ops->readb)
2145 		musb_readb = musb->ops->readb;
2146 	if (musb->ops->writeb)
2147 		musb_writeb = musb->ops->writeb;
2148 	if (musb->ops->readw)
2149 		musb_readw = musb->ops->readw;
2150 	if (musb->ops->writew)
2151 		musb_writew = musb->ops->writew;
2152 	if (musb->ops->readl)
2153 		musb_readl = musb->ops->readl;
2154 	if (musb->ops->writel)
2155 		musb_writel = musb->ops->writel;
2156 
2157 #ifndef CONFIG_MUSB_PIO_ONLY
2158 	if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2159 		dev_err(dev, "DMA controller not set\n");
2160 		status = -ENODEV;
2161 		goto fail2;
2162 	}
2163 	musb_dma_controller_create = musb->ops->dma_init;
2164 	musb_dma_controller_destroy = musb->ops->dma_exit;
2165 #endif
2166 
2167 	if (musb->ops->read_fifo)
2168 		musb->io.read_fifo = musb->ops->read_fifo;
2169 	else
2170 		musb->io.read_fifo = musb_default_read_fifo;
2171 
2172 	if (musb->ops->write_fifo)
2173 		musb->io.write_fifo = musb->ops->write_fifo;
2174 	else
2175 		musb->io.write_fifo = musb_default_write_fifo;
2176 
2177 	if (!musb->xceiv->io_ops) {
2178 		musb->xceiv->io_dev = musb->controller;
2179 		musb->xceiv->io_priv = musb->mregs;
2180 		musb->xceiv->io_ops = &musb_ulpi_access;
2181 	}
2182 
2183 	if (musb->ops->phy_callback)
2184 		musb_phy_callback = musb->ops->phy_callback;
2185 
2186 	/*
2187 	 * We need musb_read/write functions initialized for PM.
2188 	 * Note that at least 2430 glue needs autosuspend delay
2189 	 * somewhere above 300 ms for the hardware to idle properly
2190 	 * after disconnecting the cable in host mode. Let's use
2191 	 * 500 ms for some margin.
2192 	 */
2193 	pm_runtime_use_autosuspend(musb->controller);
2194 	pm_runtime_set_autosuspend_delay(musb->controller, 500);
2195 	pm_runtime_enable(musb->controller);
2196 	pm_runtime_get_sync(musb->controller);
2197 
2198 	status = usb_phy_init(musb->xceiv);
2199 	if (status < 0)
2200 		goto err_usb_phy_init;
2201 
2202 	if (use_dma && dev->dma_mask) {
2203 		musb->dma_controller =
2204 			musb_dma_controller_create(musb, musb->mregs);
2205 		if (IS_ERR(musb->dma_controller)) {
2206 			status = PTR_ERR(musb->dma_controller);
2207 			goto fail2_5;
2208 		}
2209 	}
2210 
2211 	/* be sure interrupts are disabled before connecting ISR */
2212 	musb_platform_disable(musb);
2213 	musb_generic_disable(musb);
2214 
2215 	/* Init IRQ workqueue before request_irq */
2216 	INIT_WORK(&musb->irq_work, musb_irq_work);
2217 	INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2218 	INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2219 
2220 	/* setup musb parts of the core (especially endpoints) */
2221 	status = musb_core_init(plat->config->multipoint
2222 			? MUSB_CONTROLLER_MHDRC
2223 			: MUSB_CONTROLLER_HDRC, musb);
2224 	if (status < 0)
2225 		goto fail3;
2226 
2227 	setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
2228 
2229 	/* attach to the IRQ */
2230 	if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
2231 		dev_err(dev, "request_irq %d failed!\n", nIrq);
2232 		status = -ENODEV;
2233 		goto fail3;
2234 	}
2235 	musb->nIrq = nIrq;
2236 	/* FIXME this handles wakeup irqs wrong */
2237 	if (enable_irq_wake(nIrq) == 0) {
2238 		musb->irq_wake = 1;
2239 		device_init_wakeup(dev, 1);
2240 	} else {
2241 		musb->irq_wake = 0;
2242 	}
2243 
2244 	/* program PHY to use external vBus if required */
2245 	if (plat->extvbus) {
2246 		u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2247 		busctl |= MUSB_ULPI_USE_EXTVBUS;
2248 		musb_write_ulpi_buscontrol(musb->mregs, busctl);
2249 	}
2250 
2251 	if (musb->xceiv->otg->default_a) {
2252 		MUSB_HST_MODE(musb);
2253 		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2254 	} else {
2255 		MUSB_DEV_MODE(musb);
2256 		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2257 	}
2258 
2259 	switch (musb->port_mode) {
2260 	case MUSB_PORT_MODE_HOST:
2261 		status = musb_host_setup(musb, plat->power);
2262 		if (status < 0)
2263 			goto fail3;
2264 		status = musb_platform_set_mode(musb, MUSB_HOST);
2265 		break;
2266 	case MUSB_PORT_MODE_GADGET:
2267 		status = musb_gadget_setup(musb);
2268 		if (status < 0)
2269 			goto fail3;
2270 		status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
2271 		break;
2272 	case MUSB_PORT_MODE_DUAL_ROLE:
2273 		status = musb_host_setup(musb, plat->power);
2274 		if (status < 0)
2275 			goto fail3;
2276 		status = musb_gadget_setup(musb);
2277 		if (status) {
2278 			musb_host_cleanup(musb);
2279 			goto fail3;
2280 		}
2281 		status = musb_platform_set_mode(musb, MUSB_OTG);
2282 		break;
2283 	default:
2284 		dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
2285 		break;
2286 	}
2287 
2288 	if (status < 0)
2289 		goto fail3;
2290 
2291 	status = musb_init_debugfs(musb);
2292 	if (status < 0)
2293 		goto fail4;
2294 
2295 	status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2296 	if (status)
2297 		goto fail5;
2298 
2299 	pm_runtime_mark_last_busy(musb->controller);
2300 	pm_runtime_put_autosuspend(musb->controller);
2301 
2302 	return 0;
2303 
2304 fail5:
2305 	musb_exit_debugfs(musb);
2306 
2307 fail4:
2308 	musb_gadget_cleanup(musb);
2309 	musb_host_cleanup(musb);
2310 
2311 fail3:
2312 	cancel_work_sync(&musb->irq_work);
2313 	cancel_delayed_work_sync(&musb->finish_resume_work);
2314 	cancel_delayed_work_sync(&musb->deassert_reset_work);
2315 	if (musb->dma_controller)
2316 		musb_dma_controller_destroy(musb->dma_controller);
2317 
2318 fail2_5:
2319 	usb_phy_shutdown(musb->xceiv);
2320 
2321 err_usb_phy_init:
2322 	pm_runtime_dont_use_autosuspend(musb->controller);
2323 	pm_runtime_put_sync(musb->controller);
2324 	pm_runtime_disable(musb->controller);
2325 
2326 fail2:
2327 	if (musb->irq_wake)
2328 		device_init_wakeup(dev, 0);
2329 	musb_platform_exit(musb);
2330 
2331 fail1:
2332 	dev_err(musb->controller,
2333 		"musb_init_controller failed with status %d\n", status);
2334 
2335 	musb_free(musb);
2336 
2337 fail0:
2338 
2339 	return status;
2340 
2341 }
2342 
2343 /*-------------------------------------------------------------------------*/
2344 
2345 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2346  * bridge to a platform device; this driver then suffices.
2347  */
2348 static int musb_probe(struct platform_device *pdev)
2349 {
2350 	struct device	*dev = &pdev->dev;
2351 	int		irq = platform_get_irq_byname(pdev, "mc");
2352 	struct resource	*iomem;
2353 	void __iomem	*base;
2354 
2355 	if (irq <= 0)
2356 		return -ENODEV;
2357 
2358 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2359 	base = devm_ioremap_resource(dev, iomem);
2360 	if (IS_ERR(base))
2361 		return PTR_ERR(base);
2362 
2363 	return musb_init_controller(dev, irq, base);
2364 }
2365 
2366 static int musb_remove(struct platform_device *pdev)
2367 {
2368 	struct device	*dev = &pdev->dev;
2369 	struct musb	*musb = dev_to_musb(dev);
2370 	unsigned long	flags;
2371 
2372 	/* this gets called on rmmod.
2373 	 *  - Host mode: host may still be active
2374 	 *  - Peripheral mode: peripheral is deactivated (or never-activated)
2375 	 *  - OTG mode: both roles are deactivated (or never-activated)
2376 	 */
2377 	musb_exit_debugfs(musb);
2378 
2379 	cancel_work_sync(&musb->irq_work);
2380 	cancel_delayed_work_sync(&musb->finish_resume_work);
2381 	cancel_delayed_work_sync(&musb->deassert_reset_work);
2382 	pm_runtime_get_sync(musb->controller);
2383 	musb_host_cleanup(musb);
2384 	musb_gadget_cleanup(musb);
2385 	spin_lock_irqsave(&musb->lock, flags);
2386 	musb_platform_disable(musb);
2387 	musb_generic_disable(musb);
2388 	spin_unlock_irqrestore(&musb->lock, flags);
2389 	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2390 	pm_runtime_dont_use_autosuspend(musb->controller);
2391 	pm_runtime_put_sync(musb->controller);
2392 	pm_runtime_disable(musb->controller);
2393 	musb_platform_exit(musb);
2394 	musb_phy_callback = NULL;
2395 	if (musb->dma_controller)
2396 		musb_dma_controller_destroy(musb->dma_controller);
2397 	usb_phy_shutdown(musb->xceiv);
2398 	musb_free(musb);
2399 	device_init_wakeup(dev, 0);
2400 	return 0;
2401 }
2402 
2403 #ifdef	CONFIG_PM
2404 
2405 static void musb_save_context(struct musb *musb)
2406 {
2407 	int i;
2408 	void __iomem *musb_base = musb->mregs;
2409 	void __iomem *epio;
2410 
2411 	musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2412 	musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2413 	musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2414 	musb->context.power = musb_readb(musb_base, MUSB_POWER);
2415 	musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2416 	musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2417 	musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2418 
2419 	for (i = 0; i < musb->config->num_eps; ++i) {
2420 		struct musb_hw_ep	*hw_ep;
2421 
2422 		hw_ep = &musb->endpoints[i];
2423 		if (!hw_ep)
2424 			continue;
2425 
2426 		epio = hw_ep->regs;
2427 		if (!epio)
2428 			continue;
2429 
2430 		musb_writeb(musb_base, MUSB_INDEX, i);
2431 		musb->context.index_regs[i].txmaxp =
2432 			musb_readw(epio, MUSB_TXMAXP);
2433 		musb->context.index_regs[i].txcsr =
2434 			musb_readw(epio, MUSB_TXCSR);
2435 		musb->context.index_regs[i].rxmaxp =
2436 			musb_readw(epio, MUSB_RXMAXP);
2437 		musb->context.index_regs[i].rxcsr =
2438 			musb_readw(epio, MUSB_RXCSR);
2439 
2440 		if (musb->dyn_fifo) {
2441 			musb->context.index_regs[i].txfifoadd =
2442 					musb_read_txfifoadd(musb_base);
2443 			musb->context.index_regs[i].rxfifoadd =
2444 					musb_read_rxfifoadd(musb_base);
2445 			musb->context.index_regs[i].txfifosz =
2446 					musb_read_txfifosz(musb_base);
2447 			musb->context.index_regs[i].rxfifosz =
2448 					musb_read_rxfifosz(musb_base);
2449 		}
2450 
2451 		musb->context.index_regs[i].txtype =
2452 			musb_readb(epio, MUSB_TXTYPE);
2453 		musb->context.index_regs[i].txinterval =
2454 			musb_readb(epio, MUSB_TXINTERVAL);
2455 		musb->context.index_regs[i].rxtype =
2456 			musb_readb(epio, MUSB_RXTYPE);
2457 		musb->context.index_regs[i].rxinterval =
2458 			musb_readb(epio, MUSB_RXINTERVAL);
2459 
2460 		musb->context.index_regs[i].txfunaddr =
2461 			musb_read_txfunaddr(musb, i);
2462 		musb->context.index_regs[i].txhubaddr =
2463 			musb_read_txhubaddr(musb, i);
2464 		musb->context.index_regs[i].txhubport =
2465 			musb_read_txhubport(musb, i);
2466 
2467 		musb->context.index_regs[i].rxfunaddr =
2468 			musb_read_rxfunaddr(musb, i);
2469 		musb->context.index_regs[i].rxhubaddr =
2470 			musb_read_rxhubaddr(musb, i);
2471 		musb->context.index_regs[i].rxhubport =
2472 			musb_read_rxhubport(musb, i);
2473 	}
2474 }
2475 
2476 static void musb_restore_context(struct musb *musb)
2477 {
2478 	int i;
2479 	void __iomem *musb_base = musb->mregs;
2480 	void __iomem *epio;
2481 	u8 power;
2482 
2483 	musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2484 	musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2485 	musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2486 
2487 	/* Don't affect SUSPENDM/RESUME bits in POWER reg */
2488 	power = musb_readb(musb_base, MUSB_POWER);
2489 	power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2490 	musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2491 	power |= musb->context.power;
2492 	musb_writeb(musb_base, MUSB_POWER, power);
2493 
2494 	musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2495 	musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2496 	musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2497 	if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2498 		musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2499 
2500 	for (i = 0; i < musb->config->num_eps; ++i) {
2501 		struct musb_hw_ep	*hw_ep;
2502 
2503 		hw_ep = &musb->endpoints[i];
2504 		if (!hw_ep)
2505 			continue;
2506 
2507 		epio = hw_ep->regs;
2508 		if (!epio)
2509 			continue;
2510 
2511 		musb_writeb(musb_base, MUSB_INDEX, i);
2512 		musb_writew(epio, MUSB_TXMAXP,
2513 			musb->context.index_regs[i].txmaxp);
2514 		musb_writew(epio, MUSB_TXCSR,
2515 			musb->context.index_regs[i].txcsr);
2516 		musb_writew(epio, MUSB_RXMAXP,
2517 			musb->context.index_regs[i].rxmaxp);
2518 		musb_writew(epio, MUSB_RXCSR,
2519 			musb->context.index_regs[i].rxcsr);
2520 
2521 		if (musb->dyn_fifo) {
2522 			musb_write_txfifosz(musb_base,
2523 				musb->context.index_regs[i].txfifosz);
2524 			musb_write_rxfifosz(musb_base,
2525 				musb->context.index_regs[i].rxfifosz);
2526 			musb_write_txfifoadd(musb_base,
2527 				musb->context.index_regs[i].txfifoadd);
2528 			musb_write_rxfifoadd(musb_base,
2529 				musb->context.index_regs[i].rxfifoadd);
2530 		}
2531 
2532 		musb_writeb(epio, MUSB_TXTYPE,
2533 				musb->context.index_regs[i].txtype);
2534 		musb_writeb(epio, MUSB_TXINTERVAL,
2535 				musb->context.index_regs[i].txinterval);
2536 		musb_writeb(epio, MUSB_RXTYPE,
2537 				musb->context.index_regs[i].rxtype);
2538 		musb_writeb(epio, MUSB_RXINTERVAL,
2539 
2540 				musb->context.index_regs[i].rxinterval);
2541 		musb_write_txfunaddr(musb, i,
2542 				musb->context.index_regs[i].txfunaddr);
2543 		musb_write_txhubaddr(musb, i,
2544 				musb->context.index_regs[i].txhubaddr);
2545 		musb_write_txhubport(musb, i,
2546 				musb->context.index_regs[i].txhubport);
2547 
2548 		musb_write_rxfunaddr(musb, i,
2549 				musb->context.index_regs[i].rxfunaddr);
2550 		musb_write_rxhubaddr(musb, i,
2551 				musb->context.index_regs[i].rxhubaddr);
2552 		musb_write_rxhubport(musb, i,
2553 				musb->context.index_regs[i].rxhubport);
2554 	}
2555 	musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2556 }
2557 
2558 static int musb_suspend(struct device *dev)
2559 {
2560 	struct musb	*musb = dev_to_musb(dev);
2561 	unsigned long	flags;
2562 
2563 	musb_platform_disable(musb);
2564 	musb_generic_disable(musb);
2565 
2566 	spin_lock_irqsave(&musb->lock, flags);
2567 
2568 	if (is_peripheral_active(musb)) {
2569 		/* FIXME force disconnect unless we know USB will wake
2570 		 * the system up quickly enough to respond ...
2571 		 */
2572 	} else if (is_host_active(musb)) {
2573 		/* we know all the children are suspended; sometimes
2574 		 * they will even be wakeup-enabled.
2575 		 */
2576 	}
2577 
2578 	musb_save_context(musb);
2579 
2580 	spin_unlock_irqrestore(&musb->lock, flags);
2581 	return 0;
2582 }
2583 
2584 static int musb_resume(struct device *dev)
2585 {
2586 	struct musb	*musb = dev_to_musb(dev);
2587 	u8		devctl;
2588 	u8		mask;
2589 
2590 	/*
2591 	 * For static cmos like DaVinci, register values were preserved
2592 	 * unless for some reason the whole soc powered down or the USB
2593 	 * module got reset through the PSC (vs just being disabled).
2594 	 *
2595 	 * For the DSPS glue layer though, a full register restore has to
2596 	 * be done. As it shouldn't harm other platforms, we do it
2597 	 * unconditionally.
2598 	 */
2599 
2600 	musb_restore_context(musb);
2601 
2602 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2603 	mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2604 	if ((devctl & mask) != (musb->context.devctl & mask))
2605 		musb->port1_status = 0;
2606 	if (musb->need_finish_resume) {
2607 		musb->need_finish_resume = 0;
2608 		schedule_delayed_work(&musb->finish_resume_work,
2609 				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
2610 	}
2611 
2612 	/*
2613 	 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
2614 	 * out of suspend
2615 	 */
2616 	pm_runtime_disable(dev);
2617 	pm_runtime_set_active(dev);
2618 	pm_runtime_enable(dev);
2619 
2620 	musb_start(musb);
2621 
2622 	return 0;
2623 }
2624 
2625 static int musb_runtime_suspend(struct device *dev)
2626 {
2627 	struct musb	*musb = dev_to_musb(dev);
2628 
2629 	musb_save_context(musb);
2630 
2631 	return 0;
2632 }
2633 
2634 static int musb_runtime_resume(struct device *dev)
2635 {
2636 	struct musb	*musb = dev_to_musb(dev);
2637 	static int	first = 1;
2638 
2639 	/*
2640 	 * When pm_runtime_get_sync called for the first time in driver
2641 	 * init,  some of the structure is still not initialized which is
2642 	 * used in restore function. But clock needs to be
2643 	 * enabled before any register access, so
2644 	 * pm_runtime_get_sync has to be called.
2645 	 * Also context restore without save does not make
2646 	 * any sense
2647 	 */
2648 	if (!first)
2649 		musb_restore_context(musb);
2650 	first = 0;
2651 
2652 	if (musb->need_finish_resume) {
2653 		musb->need_finish_resume = 0;
2654 		schedule_delayed_work(&musb->finish_resume_work,
2655 				msecs_to_jiffies(USB_RESUME_TIMEOUT));
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 static const struct dev_pm_ops musb_dev_pm_ops = {
2662 	.suspend	= musb_suspend,
2663 	.resume		= musb_resume,
2664 	.runtime_suspend = musb_runtime_suspend,
2665 	.runtime_resume = musb_runtime_resume,
2666 };
2667 
2668 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2669 #else
2670 #define	MUSB_DEV_PM_OPS	NULL
2671 #endif
2672 
2673 static struct platform_driver musb_driver = {
2674 	.driver = {
2675 		.name		= (char *)musb_driver_name,
2676 		.bus		= &platform_bus_type,
2677 		.pm		= MUSB_DEV_PM_OPS,
2678 	},
2679 	.probe		= musb_probe,
2680 	.remove		= musb_remove,
2681 };
2682 
2683 module_platform_driver(musb_driver);
2684