1 /* 2 * Universal Host Controller Interface driver for USB. 3 * 4 * Maintainer: Alan Stern <stern@rowland.harvard.edu> 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 8 * (C) Copyright 1999 Randy Dunlap 9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de 10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu 17 * 18 * Intel documents this fairly well, and as far as I know there 19 * are no royalties or anything like that, but even so there are 20 * people who decided that they want to do the same thing in a 21 * completely different way. 22 * 23 */ 24 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/delay.h> 31 #include <linux/ioport.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/smp_lock.h> 35 #include <linux/errno.h> 36 #include <linux/unistd.h> 37 #include <linux/interrupt.h> 38 #include <linux/spinlock.h> 39 #include <linux/debugfs.h> 40 #include <linux/pm.h> 41 #include <linux/dmapool.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/usb.h> 44 #include <linux/bitops.h> 45 46 #include <asm/uaccess.h> 47 #include <asm/io.h> 48 #include <asm/irq.h> 49 #include <asm/system.h> 50 51 #include "../core/hcd.h" 52 #include "uhci-hcd.h" 53 54 /* 55 * Version Information 56 */ 57 #define DRIVER_VERSION "v2.3" 58 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ 59 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ 60 Alan Stern" 61 #define DRIVER_DESC "USB Universal Host Controller Interface driver" 62 63 /* 64 * debug = 0, no debugging messages 65 * debug = 1, dump failed URBs except for stalls 66 * debug = 2, dump all failed URBs (including stalls) 67 * show all queues in /debug/uhci/[pci_addr] 68 * debug = 3, show all TDs in URBs when dumping 69 */ 70 #ifdef DEBUG 71 static int debug = 1; 72 #else 73 static int debug = 0; 74 #endif 75 module_param(debug, int, S_IRUGO | S_IWUSR); 76 MODULE_PARM_DESC(debug, "Debug level"); 77 static char *errbuf; 78 #define ERRBUF_LEN (32 * 1024) 79 80 static kmem_cache_t *uhci_up_cachep; /* urb_priv */ 81 82 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); 83 static void wakeup_rh(struct uhci_hcd *uhci); 84 static void uhci_get_current_frame_number(struct uhci_hcd *uhci); 85 86 /* If a transfer is still active after this much time, turn off FSBR */ 87 #define IDLE_TIMEOUT msecs_to_jiffies(50) 88 #define FSBR_DELAY msecs_to_jiffies(50) 89 90 /* When we timeout an idle transfer for FSBR, we'll switch it over to */ 91 /* depth first traversal. We'll do it in groups of this number of TDs */ 92 /* to make sure it doesn't hog all of the bandwidth */ 93 #define DEPTH_INTERVAL 5 94 95 #include "uhci-debug.c" 96 #include "uhci-q.c" 97 #include "uhci-hub.c" 98 99 extern void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); 100 extern int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); 101 102 /* 103 * Finish up a host controller reset and update the recorded state. 104 */ 105 static void finish_reset(struct uhci_hcd *uhci) 106 { 107 int port; 108 109 /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect 110 * bits in the port status and control registers. 111 * We have to clear them by hand. 112 */ 113 for (port = 0; port < uhci->rh_numports; ++port) 114 outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); 115 116 uhci->port_c_suspend = uhci->suspended_ports = 117 uhci->resuming_ports = 0; 118 uhci->rh_state = UHCI_RH_RESET; 119 uhci->is_stopped = UHCI_IS_STOPPED; 120 uhci_to_hcd(uhci)->state = HC_STATE_HALT; 121 uhci_to_hcd(uhci)->poll_rh = 0; 122 } 123 124 /* 125 * Last rites for a defunct/nonfunctional controller 126 * or one we don't want to use any more. 127 */ 128 static void hc_died(struct uhci_hcd *uhci) 129 { 130 uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); 131 finish_reset(uhci); 132 uhci->hc_inaccessible = 1; 133 } 134 135 /* 136 * Initialize a controller that was newly discovered or has just been 137 * resumed. In either case we can't be sure of its previous state. 138 */ 139 static void check_and_reset_hc(struct uhci_hcd *uhci) 140 { 141 if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) 142 finish_reset(uhci); 143 } 144 145 /* 146 * Store the basic register settings needed by the controller. 147 */ 148 static void configure_hc(struct uhci_hcd *uhci) 149 { 150 /* Set the frame length to the default: 1 ms exactly */ 151 outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); 152 153 /* Store the frame list base address */ 154 outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); 155 156 /* Set the current frame number */ 157 outw(uhci->frame_number, uhci->io_addr + USBFRNUM); 158 159 /* Mark controller as not halted before we enable interrupts */ 160 uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; 161 mb(); 162 163 /* Enable PIRQ */ 164 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 165 USBLEGSUP_DEFAULT); 166 } 167 168 169 static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) 170 { 171 int port; 172 173 switch (to_pci_dev(uhci_dev(uhci))->vendor) { 174 default: 175 break; 176 177 case PCI_VENDOR_ID_GENESYS: 178 /* Genesys Logic's GL880S controllers don't generate 179 * resume-detect interrupts. 180 */ 181 return 1; 182 183 case PCI_VENDOR_ID_INTEL: 184 /* Some of Intel's USB controllers have a bug that causes 185 * resume-detect interrupts if any port has an over-current 186 * condition. To make matters worse, some motherboards 187 * hardwire unused USB ports' over-current inputs active! 188 * To prevent problems, we will not enable resume-detect 189 * interrupts if any ports are OC. 190 */ 191 for (port = 0; port < uhci->rh_numports; ++port) { 192 if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & 193 USBPORTSC_OC) 194 return 1; 195 } 196 break; 197 } 198 return 0; 199 } 200 201 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) 202 __releases(uhci->lock) 203 __acquires(uhci->lock) 204 { 205 int auto_stop; 206 int int_enable; 207 208 auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); 209 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 210 (auto_stop ? " (auto-stop)" : "")); 211 212 /* If we get a suspend request when we're already auto-stopped 213 * then there's nothing to do. 214 */ 215 if (uhci->rh_state == UHCI_RH_AUTO_STOPPED) { 216 uhci->rh_state = new_state; 217 return; 218 } 219 220 /* Enable resume-detect interrupts if they work. 221 * Then enter Global Suspend mode, still configured. 222 */ 223 uhci->working_RD = 1; 224 int_enable = USBINTR_RESUME; 225 if (resume_detect_interrupts_are_broken(uhci)) { 226 uhci->working_RD = int_enable = 0; 227 } 228 outw(int_enable, uhci->io_addr + USBINTR); 229 outw(USBCMD_EGSM | USBCMD_CF, uhci->io_addr + USBCMD); 230 mb(); 231 udelay(5); 232 233 /* If we're auto-stopping then no devices have been attached 234 * for a while, so there shouldn't be any active URBs and the 235 * controller should stop after a few microseconds. Otherwise 236 * we will give the controller one frame to stop. 237 */ 238 if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) { 239 uhci->rh_state = UHCI_RH_SUSPENDING; 240 spin_unlock_irq(&uhci->lock); 241 msleep(1); 242 spin_lock_irq(&uhci->lock); 243 if (uhci->hc_inaccessible) /* Died */ 244 return; 245 } 246 if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) 247 dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); 248 249 uhci_get_current_frame_number(uhci); 250 smp_wmb(); 251 252 uhci->rh_state = new_state; 253 uhci->is_stopped = UHCI_IS_STOPPED; 254 uhci_to_hcd(uhci)->poll_rh = !int_enable; 255 256 uhci_scan_schedule(uhci, NULL); 257 } 258 259 static void start_rh(struct uhci_hcd *uhci) 260 { 261 uhci_to_hcd(uhci)->state = HC_STATE_RUNNING; 262 uhci->is_stopped = 0; 263 smp_wmb(); 264 265 /* Mark it configured and running with a 64-byte max packet. 266 * All interrupts are enabled, even though RESUME won't do anything. 267 */ 268 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD); 269 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, 270 uhci->io_addr + USBINTR); 271 mb(); 272 uhci->rh_state = UHCI_RH_RUNNING; 273 uhci_to_hcd(uhci)->poll_rh = 1; 274 } 275 276 static void wakeup_rh(struct uhci_hcd *uhci) 277 __releases(uhci->lock) 278 __acquires(uhci->lock) 279 { 280 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 281 uhci->rh_state == UHCI_RH_AUTO_STOPPED ? 282 " (auto-start)" : ""); 283 284 /* If we are auto-stopped then no devices are attached so there's 285 * no need for wakeup signals. Otherwise we send Global Resume 286 * for 20 ms. 287 */ 288 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 289 uhci->rh_state = UHCI_RH_RESUMING; 290 outw(USBCMD_FGR | USBCMD_EGSM | USBCMD_CF, 291 uhci->io_addr + USBCMD); 292 spin_unlock_irq(&uhci->lock); 293 msleep(20); 294 spin_lock_irq(&uhci->lock); 295 if (uhci->hc_inaccessible) /* Died */ 296 return; 297 298 /* End Global Resume and wait for EOP to be sent */ 299 outw(USBCMD_CF, uhci->io_addr + USBCMD); 300 mb(); 301 udelay(4); 302 if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR) 303 dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); 304 } 305 306 start_rh(uhci); 307 308 /* Restart root hub polling */ 309 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 310 } 311 312 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) 313 { 314 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 315 unsigned short status; 316 unsigned long flags; 317 318 /* 319 * Read the interrupt status, and write it back to clear the 320 * interrupt cause. Contrary to the UHCI specification, the 321 * "HC Halted" status bit is persistent: it is RO, not R/WC. 322 */ 323 status = inw(uhci->io_addr + USBSTS); 324 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ 325 return IRQ_NONE; 326 outw(status, uhci->io_addr + USBSTS); /* Clear it */ 327 328 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { 329 if (status & USBSTS_HSE) 330 dev_err(uhci_dev(uhci), "host system error, " 331 "PCI problems?\n"); 332 if (status & USBSTS_HCPE) 333 dev_err(uhci_dev(uhci), "host controller process " 334 "error, something bad happened!\n"); 335 if (status & USBSTS_HCH) { 336 spin_lock_irqsave(&uhci->lock, flags); 337 if (uhci->rh_state >= UHCI_RH_RUNNING) { 338 dev_err(uhci_dev(uhci), 339 "host controller halted, " 340 "very bad!\n"); 341 hc_died(uhci); 342 343 /* Force a callback in case there are 344 * pending unlinks */ 345 mod_timer(&hcd->rh_timer, jiffies); 346 } 347 spin_unlock_irqrestore(&uhci->lock, flags); 348 } 349 } 350 351 if (status & USBSTS_RD) 352 usb_hcd_poll_rh_status(hcd); 353 else { 354 spin_lock_irqsave(&uhci->lock, flags); 355 uhci_scan_schedule(uhci, regs); 356 spin_unlock_irqrestore(&uhci->lock, flags); 357 } 358 359 return IRQ_HANDLED; 360 } 361 362 /* 363 * Store the current frame number in uhci->frame_number if the controller 364 * is runnning 365 */ 366 static void uhci_get_current_frame_number(struct uhci_hcd *uhci) 367 { 368 if (!uhci->is_stopped) 369 uhci->frame_number = inw(uhci->io_addr + USBFRNUM); 370 } 371 372 /* 373 * De-allocate all resources 374 */ 375 static void release_uhci(struct uhci_hcd *uhci) 376 { 377 int i; 378 379 for (i = 0; i < UHCI_NUM_SKELQH; i++) 380 uhci_free_qh(uhci, uhci->skelqh[i]); 381 382 uhci_free_td(uhci, uhci->term_td); 383 384 dma_pool_destroy(uhci->qh_pool); 385 386 dma_pool_destroy(uhci->td_pool); 387 388 kfree(uhci->frame_cpu); 389 390 dma_free_coherent(uhci_dev(uhci), 391 UHCI_NUMFRAMES * sizeof(*uhci->frame), 392 uhci->frame, uhci->frame_dma_handle); 393 394 debugfs_remove(uhci->dentry); 395 } 396 397 static int uhci_reset(struct usb_hcd *hcd) 398 { 399 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 400 unsigned io_size = (unsigned) hcd->rsrc_len; 401 int port; 402 403 uhci->io_addr = (unsigned long) hcd->rsrc_start; 404 405 /* The UHCI spec says devices must have 2 ports, and goes on to say 406 * they may have more but gives no way to determine how many there 407 * are. However according to the UHCI spec, Bit 7 of the port 408 * status and control register is always set to 1. So we try to 409 * use this to our advantage. Another common failure mode when 410 * a nonexistent register is addressed is to return all ones, so 411 * we test for that also. 412 */ 413 for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { 414 unsigned int portstatus; 415 416 portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2)); 417 if (!(portstatus & 0x0080) || portstatus == 0xffff) 418 break; 419 } 420 if (debug) 421 dev_info(uhci_dev(uhci), "detected %d ports\n", port); 422 423 /* Anything greater than 7 is weird so we'll ignore it. */ 424 if (port > UHCI_RH_MAXCHILD) { 425 dev_info(uhci_dev(uhci), "port count misdetected? " 426 "forcing to 2 ports\n"); 427 port = 2; 428 } 429 uhci->rh_numports = port; 430 431 /* Kick BIOS off this hardware and reset if the controller 432 * isn't already safely quiescent. 433 */ 434 check_and_reset_hc(uhci); 435 return 0; 436 } 437 438 /* Make sure the controller is quiescent and that we're not using it 439 * any more. This is mainly for the benefit of programs which, like kexec, 440 * expect the hardware to be idle: not doing DMA or generating IRQs. 441 * 442 * This routine may be called in a damaged or failing kernel. Hence we 443 * do not acquire the spinlock before shutting down the controller. 444 */ 445 static void uhci_shutdown(struct pci_dev *pdev) 446 { 447 struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev); 448 449 hc_died(hcd_to_uhci(hcd)); 450 } 451 452 /* 453 * Allocate a frame list, and then setup the skeleton 454 * 455 * The hardware doesn't really know any difference 456 * in the queues, but the order does matter for the 457 * protocols higher up. The order is: 458 * 459 * - any isochronous events handled before any 460 * of the queues. We don't do that here, because 461 * we'll create the actual TD entries on demand. 462 * - The first queue is the interrupt queue. 463 * - The second queue is the control queue, split into low- and full-speed 464 * - The third queue is bulk queue. 465 * - The fourth queue is the bandwidth reclamation queue, which loops back 466 * to the full-speed control queue. 467 */ 468 static int uhci_start(struct usb_hcd *hcd) 469 { 470 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 471 int retval = -EBUSY; 472 int i; 473 struct dentry *dentry; 474 475 hcd->uses_new_polling = 1; 476 477 dentry = debugfs_create_file(hcd->self.bus_name, 478 S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci, 479 &uhci_debug_operations); 480 if (!dentry) { 481 dev_err(uhci_dev(uhci), 482 "couldn't create uhci debugfs entry\n"); 483 retval = -ENOMEM; 484 goto err_create_debug_entry; 485 } 486 uhci->dentry = dentry; 487 488 uhci->fsbr = 0; 489 uhci->fsbrtimeout = 0; 490 491 spin_lock_init(&uhci->lock); 492 INIT_LIST_HEAD(&uhci->qh_remove_list); 493 494 INIT_LIST_HEAD(&uhci->td_remove_list); 495 496 INIT_LIST_HEAD(&uhci->urb_remove_list); 497 498 INIT_LIST_HEAD(&uhci->urb_list); 499 500 INIT_LIST_HEAD(&uhci->complete_list); 501 502 init_waitqueue_head(&uhci->waitqh); 503 504 uhci->frame = dma_alloc_coherent(uhci_dev(uhci), 505 UHCI_NUMFRAMES * sizeof(*uhci->frame), 506 &uhci->frame_dma_handle, 0); 507 if (!uhci->frame) { 508 dev_err(uhci_dev(uhci), "unable to allocate " 509 "consistent memory for frame list\n"); 510 goto err_alloc_frame; 511 } 512 memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame)); 513 514 uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu), 515 GFP_KERNEL); 516 if (!uhci->frame_cpu) { 517 dev_err(uhci_dev(uhci), "unable to allocate " 518 "memory for frame pointers\n"); 519 goto err_alloc_frame_cpu; 520 } 521 522 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci), 523 sizeof(struct uhci_td), 16, 0); 524 if (!uhci->td_pool) { 525 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n"); 526 goto err_create_td_pool; 527 } 528 529 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci), 530 sizeof(struct uhci_qh), 16, 0); 531 if (!uhci->qh_pool) { 532 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n"); 533 goto err_create_qh_pool; 534 } 535 536 uhci->term_td = uhci_alloc_td(uhci); 537 if (!uhci->term_td) { 538 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n"); 539 goto err_alloc_term_td; 540 } 541 542 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 543 uhci->skelqh[i] = uhci_alloc_qh(uhci); 544 if (!uhci->skelqh[i]) { 545 dev_err(uhci_dev(uhci), "unable to allocate QH\n"); 546 goto err_alloc_skelqh; 547 } 548 } 549 550 /* 551 * 8 Interrupt queues; link all higher int queues to int1, 552 * then link int1 to control and control to bulk 553 */ 554 uhci->skel_int128_qh->link = 555 uhci->skel_int64_qh->link = 556 uhci->skel_int32_qh->link = 557 uhci->skel_int16_qh->link = 558 uhci->skel_int8_qh->link = 559 uhci->skel_int4_qh->link = 560 uhci->skel_int2_qh->link = 561 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; 562 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; 563 564 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; 565 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; 566 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; 567 568 /* This dummy TD is to work around a bug in Intel PIIX controllers */ 569 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | 570 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 571 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); 572 573 uhci->skel_term_qh->link = UHCI_PTR_TERM; 574 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); 575 576 /* 577 * Fill the frame list: make all entries point to the proper 578 * interrupt queue. 579 * 580 * The interrupt queues will be interleaved as evenly as possible. 581 * There's not much to be done about period-1 interrupts; they have 582 * to occur in every frame. But we can schedule period-2 interrupts 583 * in odd-numbered frames, period-4 interrupts in frames congruent 584 * to 2 (mod 4), and so on. This way each frame only has two 585 * interrupt QHs, which will help spread out bandwidth utilization. 586 */ 587 for (i = 0; i < UHCI_NUMFRAMES; i++) { 588 int irq; 589 590 /* 591 * ffs (Find First bit Set) does exactly what we need: 592 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6], 593 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc. 594 * ffs > 6 => not on any high-period queue, so use 595 * skel_int1_qh = skelqh[7]. 596 * Add UHCI_NUMFRAMES to insure at least one bit is set. 597 */ 598 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES); 599 if (irq < 0) 600 irq = 7; 601 602 /* Only place we don't use the frame list routines */ 603 uhci->frame[i] = UHCI_PTR_QH | 604 cpu_to_le32(uhci->skelqh[irq]->dma_handle); 605 } 606 607 /* 608 * Some architectures require a full mb() to enforce completion of 609 * the memory writes above before the I/O transfers in configure_hc(). 610 */ 611 mb(); 612 613 configure_hc(uhci); 614 start_rh(uhci); 615 return 0; 616 617 /* 618 * error exits: 619 */ 620 err_alloc_skelqh: 621 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 622 if (uhci->skelqh[i]) 623 uhci_free_qh(uhci, uhci->skelqh[i]); 624 } 625 626 uhci_free_td(uhci, uhci->term_td); 627 628 err_alloc_term_td: 629 dma_pool_destroy(uhci->qh_pool); 630 631 err_create_qh_pool: 632 dma_pool_destroy(uhci->td_pool); 633 634 err_create_td_pool: 635 kfree(uhci->frame_cpu); 636 637 err_alloc_frame_cpu: 638 dma_free_coherent(uhci_dev(uhci), 639 UHCI_NUMFRAMES * sizeof(*uhci->frame), 640 uhci->frame, uhci->frame_dma_handle); 641 642 err_alloc_frame: 643 debugfs_remove(uhci->dentry); 644 645 err_create_debug_entry: 646 return retval; 647 } 648 649 static void uhci_stop(struct usb_hcd *hcd) 650 { 651 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 652 653 spin_lock_irq(&uhci->lock); 654 if (!uhci->hc_inaccessible) 655 hc_died(uhci); 656 uhci_scan_schedule(uhci, NULL); 657 spin_unlock_irq(&uhci->lock); 658 659 release_uhci(uhci); 660 } 661 662 #ifdef CONFIG_PM 663 static int uhci_rh_suspend(struct usb_hcd *hcd) 664 { 665 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 666 667 spin_lock_irq(&uhci->lock); 668 if (!uhci->hc_inaccessible) /* Not dead */ 669 suspend_rh(uhci, UHCI_RH_SUSPENDED); 670 spin_unlock_irq(&uhci->lock); 671 return 0; 672 } 673 674 static int uhci_rh_resume(struct usb_hcd *hcd) 675 { 676 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 677 int rc = 0; 678 679 spin_lock_irq(&uhci->lock); 680 if (uhci->hc_inaccessible) { 681 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 682 dev_warn(uhci_dev(uhci), "HC isn't running!\n"); 683 rc = -ENODEV; 684 } 685 /* Otherwise the HC is dead */ 686 } else 687 wakeup_rh(uhci); 688 spin_unlock_irq(&uhci->lock); 689 return rc; 690 } 691 692 static int uhci_suspend(struct usb_hcd *hcd, pm_message_t message) 693 { 694 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 695 int rc = 0; 696 697 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 698 699 spin_lock_irq(&uhci->lock); 700 if (uhci->hc_inaccessible) /* Dead or already suspended */ 701 goto done; 702 703 if (uhci->rh_state > UHCI_RH_SUSPENDED) { 704 dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); 705 rc = -EBUSY; 706 goto done; 707 }; 708 709 /* All PCI host controllers are required to disable IRQ generation 710 * at the source, so we must turn off PIRQ. 711 */ 712 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0); 713 mb(); 714 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 715 uhci->hc_inaccessible = 1; 716 hcd->poll_rh = 0; 717 718 /* FIXME: Enable non-PME# remote wakeup? */ 719 720 done: 721 spin_unlock_irq(&uhci->lock); 722 return rc; 723 } 724 725 static int uhci_resume(struct usb_hcd *hcd) 726 { 727 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 728 729 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 730 731 /* Since we aren't in D3 any more, it's safe to set this flag 732 * even if the controller was dead. It might not even be dead 733 * any more, if the firmware or quirks code has reset it. 734 */ 735 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 736 mb(); 737 738 if (uhci->rh_state == UHCI_RH_RESET) /* Dead */ 739 return 0; 740 spin_lock_irq(&uhci->lock); 741 742 /* FIXME: Disable non-PME# remote wakeup? */ 743 744 uhci->hc_inaccessible = 0; 745 746 /* The BIOS may have changed the controller settings during a 747 * system wakeup. Check it and reconfigure to avoid problems. 748 */ 749 check_and_reset_hc(uhci); 750 configure_hc(uhci); 751 752 if (uhci->rh_state == UHCI_RH_RESET) { 753 754 /* The controller had to be reset */ 755 usb_root_hub_lost_power(hcd->self.root_hub); 756 suspend_rh(uhci, UHCI_RH_SUSPENDED); 757 } 758 759 spin_unlock_irq(&uhci->lock); 760 761 if (!uhci->working_RD) { 762 /* Suspended root hub needs to be polled */ 763 hcd->poll_rh = 1; 764 usb_hcd_poll_rh_status(hcd); 765 } 766 return 0; 767 } 768 #endif 769 770 /* Wait until all the URBs for a particular device/endpoint are gone */ 771 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, 772 struct usb_host_endpoint *ep) 773 { 774 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 775 776 wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list)); 777 } 778 779 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) 780 { 781 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 782 unsigned long flags; 783 int is_stopped; 784 int frame_number; 785 786 /* Minimize latency by avoiding the spinlock */ 787 local_irq_save(flags); 788 is_stopped = uhci->is_stopped; 789 smp_rmb(); 790 frame_number = (is_stopped ? uhci->frame_number : 791 inw(uhci->io_addr + USBFRNUM)); 792 local_irq_restore(flags); 793 return frame_number; 794 } 795 796 static const char hcd_name[] = "uhci_hcd"; 797 798 static const struct hc_driver uhci_driver = { 799 .description = hcd_name, 800 .product_desc = "UHCI Host Controller", 801 .hcd_priv_size = sizeof(struct uhci_hcd), 802 803 /* Generic hardware linkage */ 804 .irq = uhci_irq, 805 .flags = HCD_USB11, 806 807 /* Basic lifecycle operations */ 808 .reset = uhci_reset, 809 .start = uhci_start, 810 #ifdef CONFIG_PM 811 .suspend = uhci_suspend, 812 .resume = uhci_resume, 813 .bus_suspend = uhci_rh_suspend, 814 .bus_resume = uhci_rh_resume, 815 #endif 816 .stop = uhci_stop, 817 818 .urb_enqueue = uhci_urb_enqueue, 819 .urb_dequeue = uhci_urb_dequeue, 820 821 .endpoint_disable = uhci_hcd_endpoint_disable, 822 .get_frame_number = uhci_hcd_get_frame_number, 823 824 .hub_status_data = uhci_hub_status_data, 825 .hub_control = uhci_hub_control, 826 }; 827 828 static const struct pci_device_id uhci_pci_ids[] = { { 829 /* handle any USB UHCI controller */ 830 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), 831 .driver_data = (unsigned long) &uhci_driver, 832 }, { /* end: all zeroes */ } 833 }; 834 835 MODULE_DEVICE_TABLE(pci, uhci_pci_ids); 836 837 static struct pci_driver uhci_pci_driver = { 838 .name = (char *)hcd_name, 839 .id_table = uhci_pci_ids, 840 841 .probe = usb_hcd_pci_probe, 842 .remove = usb_hcd_pci_remove, 843 .shutdown = uhci_shutdown, 844 845 #ifdef CONFIG_PM 846 .suspend = usb_hcd_pci_suspend, 847 .resume = usb_hcd_pci_resume, 848 #endif /* PM */ 849 }; 850 851 static int __init uhci_hcd_init(void) 852 { 853 int retval = -ENOMEM; 854 855 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n"); 856 857 if (usb_disabled()) 858 return -ENODEV; 859 860 if (debug) { 861 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 862 if (!errbuf) 863 goto errbuf_failed; 864 } 865 866 uhci_debugfs_root = debugfs_create_dir("uhci", NULL); 867 if (!uhci_debugfs_root) 868 goto debug_failed; 869 870 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 871 sizeof(struct urb_priv), 0, 0, NULL, NULL); 872 if (!uhci_up_cachep) 873 goto up_failed; 874 875 retval = pci_register_driver(&uhci_pci_driver); 876 if (retval) 877 goto init_failed; 878 879 return 0; 880 881 init_failed: 882 if (kmem_cache_destroy(uhci_up_cachep)) 883 warn("not all urb_privs were freed!"); 884 885 up_failed: 886 debugfs_remove(uhci_debugfs_root); 887 888 debug_failed: 889 kfree(errbuf); 890 891 errbuf_failed: 892 893 return retval; 894 } 895 896 static void __exit uhci_hcd_cleanup(void) 897 { 898 pci_unregister_driver(&uhci_pci_driver); 899 900 if (kmem_cache_destroy(uhci_up_cachep)) 901 warn("not all urb_privs were freed!"); 902 903 debugfs_remove(uhci_debugfs_root); 904 kfree(errbuf); 905 } 906 907 module_init(uhci_hcd_init); 908 module_exit(uhci_hcd_cleanup); 909 910 MODULE_AUTHOR(DRIVER_AUTHOR); 911 MODULE_DESCRIPTION(DRIVER_DESC); 912 MODULE_LICENSE("GPL"); 913