xref: /openbmc/linux/drivers/usb/host/pci-quirks.c (revision 565d76cb)
1 /*
2  * This file contains code to reset and initialize USB host controllers.
3  * Some of it includes work-arounds for PCI hardware and BIOS quirks.
4  * It may need to run early during booting -- before USB would normally
5  * initialize -- to ensure that Linux doesn't use any legacy modes.
6  *
7  *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8  *  (and others)
9  */
10 
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/acpi.h>
17 #include "pci-quirks.h"
18 #include "xhci-ext-caps.h"
19 
20 
21 #define UHCI_USBLEGSUP		0xc0		/* legacy support */
22 #define UHCI_USBCMD		0		/* command register */
23 #define UHCI_USBINTR		4		/* interrupt register */
24 #define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
25 #define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
26 #define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
27 #define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
28 #define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
29 #define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
30 #define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
31 
32 #define OHCI_CONTROL		0x04
33 #define OHCI_CMDSTATUS		0x08
34 #define OHCI_INTRSTATUS		0x0c
35 #define OHCI_INTRENABLE		0x10
36 #define OHCI_INTRDISABLE	0x14
37 #define OHCI_OCR		(1 << 3)	/* ownership change request */
38 #define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
39 #define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
40 #define OHCI_INTR_OC		(1 << 30)	/* ownership change */
41 
42 #define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
43 #define EHCI_USBCMD		0		/* command register */
44 #define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
45 #define EHCI_USBSTS		4		/* status register */
46 #define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
47 #define EHCI_USBINTR		8		/* interrupt register */
48 #define EHCI_CONFIGFLAG		0x40		/* configured flag register */
49 #define EHCI_USBLEGSUP		0		/* legacy support register */
50 #define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
51 #define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
52 #define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
53 #define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
54 
55 /* AMD quirk use */
56 #define	AB_REG_BAR_LOW		0xe0
57 #define	AB_REG_BAR_HIGH		0xe1
58 #define	AB_REG_BAR_SB700	0xf0
59 #define	AB_INDX(addr)		((addr) + 0x00)
60 #define	AB_DATA(addr)		((addr) + 0x04)
61 #define	AX_INDXC		0x30
62 #define	AX_DATAC		0x34
63 
64 #define	NB_PCIE_INDX_ADDR	0xe0
65 #define	NB_PCIE_INDX_DATA	0xe4
66 #define	PCIE_P_CNTL		0x10040
67 #define	BIF_NB			0x10002
68 #define	NB_PIF0_PWRDOWN_0	0x01100012
69 #define	NB_PIF0_PWRDOWN_1	0x01100013
70 
71 static struct amd_chipset_info {
72 	struct pci_dev	*nb_dev;
73 	struct pci_dev	*smbus_dev;
74 	int nb_type;
75 	int sb_type;
76 	int isoc_reqs;
77 	int probe_count;
78 	int probe_result;
79 } amd_chipset;
80 
81 static DEFINE_SPINLOCK(amd_lock);
82 
83 int usb_amd_find_chipset_info(void)
84 {
85 	u8 rev = 0;
86 	unsigned long flags;
87 
88 	spin_lock_irqsave(&amd_lock, flags);
89 
90 	amd_chipset.probe_count++;
91 	/* probe only once */
92 	if (amd_chipset.probe_count > 1) {
93 		spin_unlock_irqrestore(&amd_lock, flags);
94 		return amd_chipset.probe_result;
95 	}
96 
97 	amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
98 	if (amd_chipset.smbus_dev) {
99 		rev = amd_chipset.smbus_dev->revision;
100 		if (rev >= 0x40)
101 			amd_chipset.sb_type = 1;
102 		else if (rev >= 0x30 && rev <= 0x3b)
103 			amd_chipset.sb_type = 3;
104 	} else {
105 		amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
106 							0x780b, NULL);
107 		if (!amd_chipset.smbus_dev) {
108 			spin_unlock_irqrestore(&amd_lock, flags);
109 			return 0;
110 		}
111 		rev = amd_chipset.smbus_dev->revision;
112 		if (rev >= 0x11 && rev <= 0x18)
113 			amd_chipset.sb_type = 2;
114 	}
115 
116 	if (amd_chipset.sb_type == 0) {
117 		if (amd_chipset.smbus_dev) {
118 			pci_dev_put(amd_chipset.smbus_dev);
119 			amd_chipset.smbus_dev = NULL;
120 		}
121 		spin_unlock_irqrestore(&amd_lock, flags);
122 		return 0;
123 	}
124 
125 	amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
126 	if (amd_chipset.nb_dev) {
127 		amd_chipset.nb_type = 1;
128 	} else {
129 		amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
130 							0x1510, NULL);
131 		if (amd_chipset.nb_dev) {
132 			amd_chipset.nb_type = 2;
133 		} else  {
134 			amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
135 								0x9600, NULL);
136 			if (amd_chipset.nb_dev)
137 				amd_chipset.nb_type = 3;
138 		}
139 	}
140 
141 	amd_chipset.probe_result = 1;
142 	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
143 
144 	spin_unlock_irqrestore(&amd_lock, flags);
145 	return amd_chipset.probe_result;
146 }
147 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
148 
149 /*
150  * The hardware normally enables the A-link power management feature, which
151  * lets the system lower the power consumption in idle states.
152  *
153  * This USB quirk prevents the link going into that lower power state
154  * during isochronous transfers.
155  *
156  * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
157  * some AMD platforms may stutter or have breaks occasionally.
158  */
159 static void usb_amd_quirk_pll(int disable)
160 {
161 	u32 addr, addr_low, addr_high, val;
162 	u32 bit = disable ? 0 : 1;
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&amd_lock, flags);
166 
167 	if (disable) {
168 		amd_chipset.isoc_reqs++;
169 		if (amd_chipset.isoc_reqs > 1) {
170 			spin_unlock_irqrestore(&amd_lock, flags);
171 			return;
172 		}
173 	} else {
174 		amd_chipset.isoc_reqs--;
175 		if (amd_chipset.isoc_reqs > 0) {
176 			spin_unlock_irqrestore(&amd_lock, flags);
177 			return;
178 		}
179 	}
180 
181 	if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
182 		outb_p(AB_REG_BAR_LOW, 0xcd6);
183 		addr_low = inb_p(0xcd7);
184 		outb_p(AB_REG_BAR_HIGH, 0xcd6);
185 		addr_high = inb_p(0xcd7);
186 		addr = addr_high << 8 | addr_low;
187 
188 		outl_p(0x30, AB_INDX(addr));
189 		outl_p(0x40, AB_DATA(addr));
190 		outl_p(0x34, AB_INDX(addr));
191 		val = inl_p(AB_DATA(addr));
192 	} else if (amd_chipset.sb_type == 3) {
193 		pci_read_config_dword(amd_chipset.smbus_dev,
194 					AB_REG_BAR_SB700, &addr);
195 		outl(AX_INDXC, AB_INDX(addr));
196 		outl(0x40, AB_DATA(addr));
197 		outl(AX_DATAC, AB_INDX(addr));
198 		val = inl(AB_DATA(addr));
199 	} else {
200 		spin_unlock_irqrestore(&amd_lock, flags);
201 		return;
202 	}
203 
204 	if (disable) {
205 		val &= ~0x08;
206 		val |= (1 << 4) | (1 << 9);
207 	} else {
208 		val |= 0x08;
209 		val &= ~((1 << 4) | (1 << 9));
210 	}
211 	outl_p(val, AB_DATA(addr));
212 
213 	if (!amd_chipset.nb_dev) {
214 		spin_unlock_irqrestore(&amd_lock, flags);
215 		return;
216 	}
217 
218 	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
219 		addr = PCIE_P_CNTL;
220 		pci_write_config_dword(amd_chipset.nb_dev,
221 					NB_PCIE_INDX_ADDR, addr);
222 		pci_read_config_dword(amd_chipset.nb_dev,
223 					NB_PCIE_INDX_DATA, &val);
224 
225 		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
226 		val |= bit | (bit << 3) | (bit << 12);
227 		val |= ((!bit) << 4) | ((!bit) << 9);
228 		pci_write_config_dword(amd_chipset.nb_dev,
229 					NB_PCIE_INDX_DATA, val);
230 
231 		addr = BIF_NB;
232 		pci_write_config_dword(amd_chipset.nb_dev,
233 					NB_PCIE_INDX_ADDR, addr);
234 		pci_read_config_dword(amd_chipset.nb_dev,
235 					NB_PCIE_INDX_DATA, &val);
236 		val &= ~(1 << 8);
237 		val |= bit << 8;
238 
239 		pci_write_config_dword(amd_chipset.nb_dev,
240 					NB_PCIE_INDX_DATA, val);
241 	} else if (amd_chipset.nb_type == 2) {
242 		addr = NB_PIF0_PWRDOWN_0;
243 		pci_write_config_dword(amd_chipset.nb_dev,
244 					NB_PCIE_INDX_ADDR, addr);
245 		pci_read_config_dword(amd_chipset.nb_dev,
246 					NB_PCIE_INDX_DATA, &val);
247 		if (disable)
248 			val &= ~(0x3f << 7);
249 		else
250 			val |= 0x3f << 7;
251 
252 		pci_write_config_dword(amd_chipset.nb_dev,
253 					NB_PCIE_INDX_DATA, val);
254 
255 		addr = NB_PIF0_PWRDOWN_1;
256 		pci_write_config_dword(amd_chipset.nb_dev,
257 					NB_PCIE_INDX_ADDR, addr);
258 		pci_read_config_dword(amd_chipset.nb_dev,
259 					NB_PCIE_INDX_DATA, &val);
260 		if (disable)
261 			val &= ~(0x3f << 7);
262 		else
263 			val |= 0x3f << 7;
264 
265 		pci_write_config_dword(amd_chipset.nb_dev,
266 					NB_PCIE_INDX_DATA, val);
267 	}
268 
269 	spin_unlock_irqrestore(&amd_lock, flags);
270 	return;
271 }
272 
273 void usb_amd_quirk_pll_disable(void)
274 {
275 	usb_amd_quirk_pll(1);
276 }
277 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
278 
279 void usb_amd_quirk_pll_enable(void)
280 {
281 	usb_amd_quirk_pll(0);
282 }
283 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
284 
285 void usb_amd_dev_put(void)
286 {
287 	unsigned long flags;
288 
289 	spin_lock_irqsave(&amd_lock, flags);
290 
291 	amd_chipset.probe_count--;
292 	if (amd_chipset.probe_count > 0) {
293 		spin_unlock_irqrestore(&amd_lock, flags);
294 		return;
295 	}
296 
297 	if (amd_chipset.nb_dev) {
298 		pci_dev_put(amd_chipset.nb_dev);
299 		amd_chipset.nb_dev = NULL;
300 	}
301 	if (amd_chipset.smbus_dev) {
302 		pci_dev_put(amd_chipset.smbus_dev);
303 		amd_chipset.smbus_dev = NULL;
304 	}
305 	amd_chipset.nb_type = 0;
306 	amd_chipset.sb_type = 0;
307 	amd_chipset.isoc_reqs = 0;
308 	amd_chipset.probe_result = 0;
309 
310 	spin_unlock_irqrestore(&amd_lock, flags);
311 }
312 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
313 
314 /*
315  * Make sure the controller is completely inactive, unable to
316  * generate interrupts or do DMA.
317  */
318 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
319 {
320 	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
321 	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
322 	 */
323 	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
324 
325 	/* Reset the HC - this will force us to get a
326 	 * new notification of any already connected
327 	 * ports due to the virtual disconnect that it
328 	 * implies.
329 	 */
330 	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
331 	mb();
332 	udelay(5);
333 	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
334 		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
335 
336 	/* Just to be safe, disable interrupt requests and
337 	 * make sure the controller is stopped.
338 	 */
339 	outw(0, base + UHCI_USBINTR);
340 	outw(0, base + UHCI_USBCMD);
341 }
342 EXPORT_SYMBOL_GPL(uhci_reset_hc);
343 
344 /*
345  * Initialize a controller that was newly discovered or has just been
346  * resumed.  In either case we can't be sure of its previous state.
347  *
348  * Returns: 1 if the controller was reset, 0 otherwise.
349  */
350 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
351 {
352 	u16 legsup;
353 	unsigned int cmd, intr;
354 
355 	/*
356 	 * When restarting a suspended controller, we expect all the
357 	 * settings to be the same as we left them:
358 	 *
359 	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
360 	 *	Controller is stopped and configured with EGSM set;
361 	 *	No interrupts enabled except possibly Resume Detect.
362 	 *
363 	 * If any of these conditions are violated we do a complete reset.
364 	 */
365 	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
366 	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
367 		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
368 				__func__, legsup);
369 		goto reset_needed;
370 	}
371 
372 	cmd = inw(base + UHCI_USBCMD);
373 	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
374 			!(cmd & UHCI_USBCMD_EGSM)) {
375 		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
376 				__func__, cmd);
377 		goto reset_needed;
378 	}
379 
380 	intr = inw(base + UHCI_USBINTR);
381 	if (intr & (~UHCI_USBINTR_RESUME)) {
382 		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
383 				__func__, intr);
384 		goto reset_needed;
385 	}
386 	return 0;
387 
388 reset_needed:
389 	dev_dbg(&pdev->dev, "Performing full reset\n");
390 	uhci_reset_hc(pdev, base);
391 	return 1;
392 }
393 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
394 
395 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
396 {
397 	u16 cmd;
398 	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
399 }
400 
401 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
402 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
403 
404 static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
405 {
406 	unsigned long base = 0;
407 	int i;
408 
409 	if (!pio_enabled(pdev))
410 		return;
411 
412 	for (i = 0; i < PCI_ROM_RESOURCE; i++)
413 		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
414 			base = pci_resource_start(pdev, i);
415 			break;
416 		}
417 
418 	if (base)
419 		uhci_check_and_reset_hc(pdev, base);
420 }
421 
422 static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
423 {
424 	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
425 }
426 
427 static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
428 {
429 	void __iomem *base;
430 	u32 control;
431 
432 	if (!mmio_resource_enabled(pdev, 0))
433 		return;
434 
435 	base = pci_ioremap_bar(pdev, 0);
436 	if (base == NULL)
437 		return;
438 
439 	control = readl(base + OHCI_CONTROL);
440 
441 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
442 #ifdef __hppa__
443 #define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
444 #else
445 #define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
446 
447 	if (control & OHCI_CTRL_IR) {
448 		int wait_time = 500; /* arbitrary; 5 seconds */
449 		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
450 		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
451 		while (wait_time > 0 &&
452 				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
453 			wait_time -= 10;
454 			msleep(10);
455 		}
456 		if (wait_time <= 0)
457 			dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
458 					" (BIOS bug?) %08x\n",
459 					readl(base + OHCI_CONTROL));
460 	}
461 #endif
462 
463 	/* reset controller, preserving RWC (and possibly IR) */
464 	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
465 
466 	/*
467 	 * disable interrupts
468 	 */
469 	writel(~(u32)0, base + OHCI_INTRDISABLE);
470 	writel(~(u32)0, base + OHCI_INTRSTATUS);
471 
472 	iounmap(base);
473 }
474 
475 static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
476 {
477 	int wait_time, delta;
478 	void __iomem *base, *op_reg_base;
479 	u32	hcc_params, val;
480 	u8	offset, cap_length;
481 	int	count = 256/4;
482 	int	tried_handoff = 0;
483 
484 	if (!mmio_resource_enabled(pdev, 0))
485 		return;
486 
487 	base = pci_ioremap_bar(pdev, 0);
488 	if (base == NULL)
489 		return;
490 
491 	cap_length = readb(base);
492 	op_reg_base = base + cap_length;
493 
494 	/* EHCI 0.96 and later may have "extended capabilities"
495 	 * spec section 5.1 explains the bios handoff, e.g. for
496 	 * booting from USB disk or using a usb keyboard
497 	 */
498 	hcc_params = readl(base + EHCI_HCC_PARAMS);
499 	offset = (hcc_params >> 8) & 0xff;
500 	while (offset && --count) {
501 		u32		cap;
502 		int		msec;
503 
504 		pci_read_config_dword(pdev, offset, &cap);
505 		switch (cap & 0xff) {
506 		case 1:			/* BIOS/SMM/... handoff support */
507 			if ((cap & EHCI_USBLEGSUP_BIOS)) {
508 				dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
509 
510 #if 0
511 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
512  * but that seems dubious in general (the BIOS left it off intentionally)
513  * and is known to prevent some systems from booting.  so we won't do this
514  * unless maybe we can determine when we're on a system that needs SMI forced.
515  */
516 				/* BIOS workaround (?): be sure the
517 				 * pre-Linux code receives the SMI
518 				 */
519 				pci_read_config_dword(pdev,
520 						offset + EHCI_USBLEGCTLSTS,
521 						&val);
522 				pci_write_config_dword(pdev,
523 						offset + EHCI_USBLEGCTLSTS,
524 						val | EHCI_USBLEGCTLSTS_SOOE);
525 #endif
526 
527 				/* some systems get upset if this semaphore is
528 				 * set for any other reason than forcing a BIOS
529 				 * handoff..
530 				 */
531 				pci_write_config_byte(pdev, offset + 3, 1);
532 			}
533 
534 			/* if boot firmware now owns EHCI, spin till
535 			 * it hands it over.
536 			 */
537 			msec = 1000;
538 			while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
539 				tried_handoff = 1;
540 				msleep(10);
541 				msec -= 10;
542 				pci_read_config_dword(pdev, offset, &cap);
543 			}
544 
545 			if (cap & EHCI_USBLEGSUP_BIOS) {
546 				/* well, possibly buggy BIOS... try to shut
547 				 * it down, and hope nothing goes too wrong
548 				 */
549 				dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
550 						" (BIOS bug?) %08x\n", cap);
551 				pci_write_config_byte(pdev, offset + 2, 0);
552 			}
553 
554 			/* just in case, always disable EHCI SMIs */
555 			pci_write_config_dword(pdev,
556 					offset + EHCI_USBLEGCTLSTS,
557 					0);
558 
559 			/* If the BIOS ever owned the controller then we
560 			 * can't expect any power sessions to remain intact.
561 			 */
562 			if (tried_handoff)
563 				writel(0, op_reg_base + EHCI_CONFIGFLAG);
564 			break;
565 		case 0:			/* illegal reserved capability */
566 			cap = 0;
567 			/* FALLTHROUGH */
568 		default:
569 			dev_warn(&pdev->dev, "EHCI: unrecognized capability "
570 					"%02x\n", cap & 0xff);
571 			break;
572 		}
573 		offset = (cap >> 8) & 0xff;
574 	}
575 	if (!count)
576 		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
577 
578 	/*
579 	 * halt EHCI & disable its interrupts in any case
580 	 */
581 	val = readl(op_reg_base + EHCI_USBSTS);
582 	if ((val & EHCI_USBSTS_HALTED) == 0) {
583 		val = readl(op_reg_base + EHCI_USBCMD);
584 		val &= ~EHCI_USBCMD_RUN;
585 		writel(val, op_reg_base + EHCI_USBCMD);
586 
587 		wait_time = 2000;
588 		delta = 100;
589 		do {
590 			writel(0x3f, op_reg_base + EHCI_USBSTS);
591 			udelay(delta);
592 			wait_time -= delta;
593 			val = readl(op_reg_base + EHCI_USBSTS);
594 			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
595 				break;
596 			}
597 		} while (wait_time > 0);
598 	}
599 	writel(0, op_reg_base + EHCI_USBINTR);
600 	writel(0x3f, op_reg_base + EHCI_USBSTS);
601 
602 	iounmap(base);
603 }
604 
605 /*
606  * handshake - spin reading a register until handshake completes
607  * @ptr: address of hc register to be read
608  * @mask: bits to look at in result of read
609  * @done: value of those bits when handshake succeeds
610  * @wait_usec: timeout in microseconds
611  * @delay_usec: delay in microseconds to wait between polling
612  *
613  * Polls a register every delay_usec microseconds.
614  * Returns 0 when the mask bits have the value done.
615  * Returns -ETIMEDOUT if this condition is not true after
616  * wait_usec microseconds have passed.
617  */
618 static int handshake(void __iomem *ptr, u32 mask, u32 done,
619 		int wait_usec, int delay_usec)
620 {
621 	u32	result;
622 
623 	do {
624 		result = readl(ptr);
625 		result &= mask;
626 		if (result == done)
627 			return 0;
628 		udelay(delay_usec);
629 		wait_usec -= delay_usec;
630 	} while (wait_usec > 0);
631 	return -ETIMEDOUT;
632 }
633 
634 /**
635  * PCI Quirks for xHCI.
636  *
637  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
638  * It signals to the BIOS that the OS wants control of the host controller,
639  * and then waits 5 seconds for the BIOS to hand over control.
640  * If we timeout, assume the BIOS is broken and take control anyway.
641  */
642 static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
643 {
644 	void __iomem *base;
645 	int ext_cap_offset;
646 	void __iomem *op_reg_base;
647 	u32 val;
648 	int timeout;
649 
650 	if (!mmio_resource_enabled(pdev, 0))
651 		return;
652 
653 	base = ioremap_nocache(pci_resource_start(pdev, 0),
654 				pci_resource_len(pdev, 0));
655 	if (base == NULL)
656 		return;
657 
658 	/*
659 	 * Find the Legacy Support Capability register -
660 	 * this is optional for xHCI host controllers.
661 	 */
662 	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
663 	do {
664 		if (!ext_cap_offset)
665 			/* We've reached the end of the extended capabilities */
666 			goto hc_init;
667 		val = readl(base + ext_cap_offset);
668 		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
669 			break;
670 		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
671 	} while (1);
672 
673 	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
674 	if (val & XHCI_HC_BIOS_OWNED) {
675 		writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
676 
677 		/* Wait for 5 seconds with 10 microsecond polling interval */
678 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
679 				0, 5000, 10);
680 
681 		/* Assume a buggy BIOS and take HC ownership anyway */
682 		if (timeout) {
683 			dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
684 					" (BIOS bug ?) %08x\n", val);
685 			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
686 		}
687 	}
688 
689 	/* Disable any BIOS SMIs */
690 	writel(XHCI_LEGACY_DISABLE_SMI,
691 			base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
692 
693 hc_init:
694 	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
695 
696 	/* Wait for the host controller to be ready before writing any
697 	 * operational or runtime registers.  Wait 5 seconds and no more.
698 	 */
699 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
700 			5000, 10);
701 	/* Assume a buggy HC and start HC initialization anyway */
702 	if (timeout) {
703 		val = readl(op_reg_base + XHCI_STS_OFFSET);
704 		dev_warn(&pdev->dev,
705 				"xHCI HW not ready after 5 sec (HC bug?) "
706 				"status = 0x%x\n", val);
707 	}
708 
709 	/* Send the halt and disable interrupts command */
710 	val = readl(op_reg_base + XHCI_CMD_OFFSET);
711 	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
712 	writel(val, op_reg_base + XHCI_CMD_OFFSET);
713 
714 	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
715 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
716 			XHCI_MAX_HALT_USEC, 125);
717 	if (timeout) {
718 		val = readl(op_reg_base + XHCI_STS_OFFSET);
719 		dev_warn(&pdev->dev,
720 				"xHCI HW did not halt within %d usec "
721 				"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
722 	}
723 
724 	iounmap(base);
725 }
726 
727 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
728 {
729 	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
730 		quirk_usb_handoff_uhci(pdev);
731 	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
732 		quirk_usb_handoff_ohci(pdev);
733 	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
734 		quirk_usb_disable_ehci(pdev);
735 	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
736 		quirk_usb_handoff_xhci(pdev);
737 }
738 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
739