1 #include <linux/module.h> 2 #include <linux/string.h> 3 #include <linux/bitops.h> 4 #include <linux/slab.h> 5 #include <linux/init.h> 6 #include <linux/log2.h> 7 #include <linux/usb.h> 8 #include <linux/wait.h> 9 #include <linux/usb/hcd.h> 10 11 #define to_urb(d) container_of(d, struct urb, kref) 12 13 14 static void urb_destroy(struct kref *kref) 15 { 16 struct urb *urb = to_urb(kref); 17 18 if (urb->transfer_flags & URB_FREE_BUFFER) 19 kfree(urb->transfer_buffer); 20 21 kfree(urb); 22 } 23 24 /** 25 * usb_init_urb - initializes a urb so that it can be used by a USB driver 26 * @urb: pointer to the urb to initialize 27 * 28 * Initializes a urb so that the USB subsystem can use it properly. 29 * 30 * If a urb is created with a call to usb_alloc_urb() it is not 31 * necessary to call this function. Only use this if you allocate the 32 * space for a struct urb on your own. If you call this function, be 33 * careful when freeing the memory for your urb that it is no longer in 34 * use by the USB core. 35 * 36 * Only use this function if you _really_ understand what you are doing. 37 */ 38 void usb_init_urb(struct urb *urb) 39 { 40 if (urb) { 41 memset(urb, 0, sizeof(*urb)); 42 kref_init(&urb->kref); 43 INIT_LIST_HEAD(&urb->anchor_list); 44 } 45 } 46 EXPORT_SYMBOL_GPL(usb_init_urb); 47 48 /** 49 * usb_alloc_urb - creates a new urb for a USB driver to use 50 * @iso_packets: number of iso packets for this urb 51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of 52 * valid options for this. 53 * 54 * Creates an urb for the USB driver to use, initializes a few internal 55 * structures, incrementes the usage counter, and returns a pointer to it. 56 * 57 * If no memory is available, NULL is returned. 58 * 59 * If the driver want to use this urb for interrupt, control, or bulk 60 * endpoints, pass '0' as the number of iso packets. 61 * 62 * The driver must call usb_free_urb() when it is finished with the urb. 63 */ 64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) 65 { 66 struct urb *urb; 67 68 urb = kmalloc(sizeof(struct urb) + 69 iso_packets * sizeof(struct usb_iso_packet_descriptor), 70 mem_flags); 71 if (!urb) { 72 printk(KERN_ERR "alloc_urb: kmalloc failed\n"); 73 return NULL; 74 } 75 usb_init_urb(urb); 76 return urb; 77 } 78 EXPORT_SYMBOL_GPL(usb_alloc_urb); 79 80 /** 81 * usb_free_urb - frees the memory used by a urb when all users of it are finished 82 * @urb: pointer to the urb to free, may be NULL 83 * 84 * Must be called when a user of a urb is finished with it. When the last user 85 * of the urb calls this function, the memory of the urb is freed. 86 * 87 * Note: The transfer buffer associated with the urb is not freed unless the 88 * URB_FREE_BUFFER transfer flag is set. 89 */ 90 void usb_free_urb(struct urb *urb) 91 { 92 if (urb) 93 kref_put(&urb->kref, urb_destroy); 94 } 95 EXPORT_SYMBOL_GPL(usb_free_urb); 96 97 /** 98 * usb_get_urb - increments the reference count of the urb 99 * @urb: pointer to the urb to modify, may be NULL 100 * 101 * This must be called whenever a urb is transferred from a device driver to a 102 * host controller driver. This allows proper reference counting to happen 103 * for urbs. 104 * 105 * A pointer to the urb with the incremented reference counter is returned. 106 */ 107 struct urb *usb_get_urb(struct urb *urb) 108 { 109 if (urb) 110 kref_get(&urb->kref); 111 return urb; 112 } 113 EXPORT_SYMBOL_GPL(usb_get_urb); 114 115 /** 116 * usb_anchor_urb - anchors an URB while it is processed 117 * @urb: pointer to the urb to anchor 118 * @anchor: pointer to the anchor 119 * 120 * This can be called to have access to URBs which are to be executed 121 * without bothering to track them 122 */ 123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) 124 { 125 unsigned long flags; 126 127 spin_lock_irqsave(&anchor->lock, flags); 128 usb_get_urb(urb); 129 list_add_tail(&urb->anchor_list, &anchor->urb_list); 130 urb->anchor = anchor; 131 132 if (unlikely(anchor->poisoned)) { 133 atomic_inc(&urb->reject); 134 } 135 136 spin_unlock_irqrestore(&anchor->lock, flags); 137 } 138 EXPORT_SYMBOL_GPL(usb_anchor_urb); 139 140 /* Callers must hold anchor->lock */ 141 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) 142 { 143 urb->anchor = NULL; 144 list_del(&urb->anchor_list); 145 usb_put_urb(urb); 146 if (list_empty(&anchor->urb_list)) 147 wake_up(&anchor->wait); 148 } 149 150 /** 151 * usb_unanchor_urb - unanchors an URB 152 * @urb: pointer to the urb to anchor 153 * 154 * Call this to stop the system keeping track of this URB 155 */ 156 void usb_unanchor_urb(struct urb *urb) 157 { 158 unsigned long flags; 159 struct usb_anchor *anchor; 160 161 if (!urb) 162 return; 163 164 anchor = urb->anchor; 165 if (!anchor) 166 return; 167 168 spin_lock_irqsave(&anchor->lock, flags); 169 /* 170 * At this point, we could be competing with another thread which 171 * has the same intention. To protect the urb from being unanchored 172 * twice, only the winner of the race gets the job. 173 */ 174 if (likely(anchor == urb->anchor)) 175 __usb_unanchor_urb(urb, anchor); 176 spin_unlock_irqrestore(&anchor->lock, flags); 177 } 178 EXPORT_SYMBOL_GPL(usb_unanchor_urb); 179 180 /*-------------------------------------------------------------------*/ 181 182 /** 183 * usb_submit_urb - issue an asynchronous transfer request for an endpoint 184 * @urb: pointer to the urb describing the request 185 * @mem_flags: the type of memory to allocate, see kmalloc() for a list 186 * of valid options for this. 187 * 188 * This submits a transfer request, and transfers control of the URB 189 * describing that request to the USB subsystem. Request completion will 190 * be indicated later, asynchronously, by calling the completion handler. 191 * The three types of completion are success, error, and unlink 192 * (a software-induced fault, also called "request cancellation"). 193 * 194 * URBs may be submitted in interrupt context. 195 * 196 * The caller must have correctly initialized the URB before submitting 197 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are 198 * available to ensure that most fields are correctly initialized, for 199 * the particular kind of transfer, although they will not initialize 200 * any transfer flags. 201 * 202 * Successful submissions return 0; otherwise this routine returns a 203 * negative error number. If the submission is successful, the complete() 204 * callback from the URB will be called exactly once, when the USB core and 205 * Host Controller Driver (HCD) are finished with the URB. When the completion 206 * function is called, control of the URB is returned to the device 207 * driver which issued the request. The completion handler may then 208 * immediately free or reuse that URB. 209 * 210 * With few exceptions, USB device drivers should never access URB fields 211 * provided by usbcore or the HCD until its complete() is called. 212 * The exceptions relate to periodic transfer scheduling. For both 213 * interrupt and isochronous urbs, as part of successful URB submission 214 * urb->interval is modified to reflect the actual transfer period used 215 * (normally some power of two units). And for isochronous urbs, 216 * urb->start_frame is modified to reflect when the URB's transfers were 217 * scheduled to start. Not all isochronous transfer scheduling policies 218 * will work, but most host controller drivers should easily handle ISO 219 * queues going from now until 10-200 msec into the future. 220 * 221 * For control endpoints, the synchronous usb_control_msg() call is 222 * often used (in non-interrupt context) instead of this call. 223 * That is often used through convenience wrappers, for the requests 224 * that are standardized in the USB 2.0 specification. For bulk 225 * endpoints, a synchronous usb_bulk_msg() call is available. 226 * 227 * Request Queuing: 228 * 229 * URBs may be submitted to endpoints before previous ones complete, to 230 * minimize the impact of interrupt latencies and system overhead on data 231 * throughput. With that queuing policy, an endpoint's queue would never 232 * be empty. This is required for continuous isochronous data streams, 233 * and may also be required for some kinds of interrupt transfers. Such 234 * queuing also maximizes bandwidth utilization by letting USB controllers 235 * start work on later requests before driver software has finished the 236 * completion processing for earlier (successful) requests. 237 * 238 * As of Linux 2.6, all USB endpoint transfer queues support depths greater 239 * than one. This was previously a HCD-specific behavior, except for ISO 240 * transfers. Non-isochronous endpoint queues are inactive during cleanup 241 * after faults (transfer errors or cancellation). 242 * 243 * Reserved Bandwidth Transfers: 244 * 245 * Periodic transfers (interrupt or isochronous) are performed repeatedly, 246 * using the interval specified in the urb. Submitting the first urb to 247 * the endpoint reserves the bandwidth necessary to make those transfers. 248 * If the USB subsystem can't allocate sufficient bandwidth to perform 249 * the periodic request, submitting such a periodic request should fail. 250 * 251 * For devices under xHCI, the bandwidth is reserved at configuration time, or 252 * when the alt setting is selected. If there is not enough bus bandwidth, the 253 * configuration/alt setting request will fail. Therefore, submissions to 254 * periodic endpoints on devices under xHCI should never fail due to bandwidth 255 * constraints. 256 * 257 * Device drivers must explicitly request that repetition, by ensuring that 258 * some URB is always on the endpoint's queue (except possibly for short 259 * periods during completion callacks). When there is no longer an urb 260 * queued, the endpoint's bandwidth reservation is canceled. This means 261 * drivers can use their completion handlers to ensure they keep bandwidth 262 * they need, by reinitializing and resubmitting the just-completed urb 263 * until the driver longer needs that periodic bandwidth. 264 * 265 * Memory Flags: 266 * 267 * The general rules for how to decide which mem_flags to use 268 * are the same as for kmalloc. There are four 269 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and 270 * GFP_ATOMIC. 271 * 272 * GFP_NOFS is not ever used, as it has not been implemented yet. 273 * 274 * GFP_ATOMIC is used when 275 * (a) you are inside a completion handler, an interrupt, bottom half, 276 * tasklet or timer, or 277 * (b) you are holding a spinlock or rwlock (does not apply to 278 * semaphores), or 279 * (c) current->state != TASK_RUNNING, this is the case only after 280 * you've changed it. 281 * 282 * GFP_NOIO is used in the block io path and error handling of storage 283 * devices. 284 * 285 * All other situations use GFP_KERNEL. 286 * 287 * Some more specific rules for mem_flags can be inferred, such as 288 * (1) start_xmit, timeout, and receive methods of network drivers must 289 * use GFP_ATOMIC (they are called with a spinlock held); 290 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also 291 * called with a spinlock held); 292 * (3) If you use a kernel thread with a network driver you must use 293 * GFP_NOIO, unless (b) or (c) apply; 294 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) 295 * apply or your are in a storage driver's block io path; 296 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and 297 * (6) changing firmware on a running storage or net device uses 298 * GFP_NOIO, unless b) or c) apply 299 * 300 */ 301 int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 302 { 303 int xfertype, max; 304 struct usb_device *dev; 305 struct usb_host_endpoint *ep; 306 int is_out; 307 308 if (!urb || urb->hcpriv || !urb->complete) 309 return -EINVAL; 310 dev = urb->dev; 311 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 312 return -ENODEV; 313 314 /* For now, get the endpoint from the pipe. Eventually drivers 315 * will be required to set urb->ep directly and we will eliminate 316 * urb->pipe. 317 */ 318 ep = usb_pipe_endpoint(dev, urb->pipe); 319 if (!ep) 320 return -ENOENT; 321 322 urb->ep = ep; 323 urb->status = -EINPROGRESS; 324 urb->actual_length = 0; 325 326 /* Lots of sanity checks, so HCDs can rely on clean data 327 * and don't need to duplicate tests 328 */ 329 xfertype = usb_endpoint_type(&ep->desc); 330 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 331 struct usb_ctrlrequest *setup = 332 (struct usb_ctrlrequest *) urb->setup_packet; 333 334 if (!setup) 335 return -ENOEXEC; 336 is_out = !(setup->bRequestType & USB_DIR_IN) || 337 !setup->wLength; 338 } else { 339 is_out = usb_endpoint_dir_out(&ep->desc); 340 } 341 342 /* Clear the internal flags and cache the direction for later use */ 343 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | 344 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | 345 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | 346 URB_DMA_SG_COMBINED); 347 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); 348 349 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 350 dev->state < USB_STATE_CONFIGURED) 351 return -ENODEV; 352 353 max = le16_to_cpu(ep->desc.wMaxPacketSize); 354 if (max <= 0) { 355 dev_dbg(&dev->dev, 356 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 357 usb_endpoint_num(&ep->desc), is_out ? "out" : "in", 358 __func__, max); 359 return -EMSGSIZE; 360 } 361 362 /* periodic transfers limit size per frame/uframe, 363 * but drivers only control those sizes for ISO. 364 * while we're checking, initialize return status. 365 */ 366 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 367 int n, len; 368 369 /* SuperSpeed isoc endpoints have up to 16 bursts of up to 370 * 3 packets each 371 */ 372 if (dev->speed == USB_SPEED_SUPER) { 373 int burst = 1 + ep->ss_ep_comp.bMaxBurst; 374 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); 375 max *= burst; 376 max *= mult; 377 } 378 379 /* "high bandwidth" mode, 1-3 packets/uframe? */ 380 if (dev->speed == USB_SPEED_HIGH) { 381 int mult = 1 + ((max >> 11) & 0x03); 382 max &= 0x07ff; 383 max *= mult; 384 } 385 386 if (urb->number_of_packets <= 0) 387 return -EINVAL; 388 for (n = 0; n < urb->number_of_packets; n++) { 389 len = urb->iso_frame_desc[n].length; 390 if (len < 0 || len > max) 391 return -EMSGSIZE; 392 urb->iso_frame_desc[n].status = -EXDEV; 393 urb->iso_frame_desc[n].actual_length = 0; 394 } 395 } 396 397 /* the I/O buffer must be mapped/unmapped, except when length=0 */ 398 if (urb->transfer_buffer_length > INT_MAX) 399 return -EMSGSIZE; 400 401 #ifdef DEBUG 402 /* stuff that drivers shouldn't do, but which shouldn't 403 * cause problems in HCDs if they get it wrong. 404 */ 405 { 406 unsigned int orig_flags = urb->transfer_flags; 407 unsigned int allowed; 408 static int pipetypes[4] = { 409 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 410 }; 411 412 /* Check that the pipe's type matches the endpoint's type */ 413 if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) { 414 dev_err(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", 415 usb_pipetype(urb->pipe), pipetypes[xfertype]); 416 return -EPIPE; /* The most suitable error code :-) */ 417 } 418 419 /* enforce simple/standard policy */ 420 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | 421 URB_FREE_BUFFER); 422 switch (xfertype) { 423 case USB_ENDPOINT_XFER_BULK: 424 if (is_out) 425 allowed |= URB_ZERO_PACKET; 426 /* FALLTHROUGH */ 427 case USB_ENDPOINT_XFER_CONTROL: 428 allowed |= URB_NO_FSBR; /* only affects UHCI */ 429 /* FALLTHROUGH */ 430 default: /* all non-iso endpoints */ 431 if (!is_out) 432 allowed |= URB_SHORT_NOT_OK; 433 break; 434 case USB_ENDPOINT_XFER_ISOC: 435 allowed |= URB_ISO_ASAP; 436 break; 437 } 438 urb->transfer_flags &= allowed; 439 440 /* fail if submitter gave bogus flags */ 441 if (urb->transfer_flags != orig_flags) { 442 dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n", 443 orig_flags, urb->transfer_flags); 444 return -EINVAL; 445 } 446 } 447 #endif 448 /* 449 * Force periodic transfer intervals to be legal values that are 450 * a power of two (so HCDs don't need to). 451 * 452 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC 453 * supports different values... this uses EHCI/UHCI defaults (and 454 * EHCI can use smaller non-default values). 455 */ 456 switch (xfertype) { 457 case USB_ENDPOINT_XFER_ISOC: 458 case USB_ENDPOINT_XFER_INT: 459 /* too small? */ 460 switch (dev->speed) { 461 case USB_SPEED_WIRELESS: 462 if (urb->interval < 6) 463 return -EINVAL; 464 break; 465 default: 466 if (urb->interval <= 0) 467 return -EINVAL; 468 break; 469 } 470 /* too big? */ 471 switch (dev->speed) { 472 case USB_SPEED_SUPER: /* units are 125us */ 473 /* Handle up to 2^(16-1) microframes */ 474 if (urb->interval > (1 << 15)) 475 return -EINVAL; 476 max = 1 << 15; 477 break; 478 case USB_SPEED_WIRELESS: 479 if (urb->interval > 16) 480 return -EINVAL; 481 break; 482 case USB_SPEED_HIGH: /* units are microframes */ 483 /* NOTE usb handles 2^15 */ 484 if (urb->interval > (1024 * 8)) 485 urb->interval = 1024 * 8; 486 max = 1024 * 8; 487 break; 488 case USB_SPEED_FULL: /* units are frames/msec */ 489 case USB_SPEED_LOW: 490 if (xfertype == USB_ENDPOINT_XFER_INT) { 491 if (urb->interval > 255) 492 return -EINVAL; 493 /* NOTE ohci only handles up to 32 */ 494 max = 128; 495 } else { 496 if (urb->interval > 1024) 497 urb->interval = 1024; 498 /* NOTE usb and ohci handle up to 2^15 */ 499 max = 1024; 500 } 501 break; 502 default: 503 return -EINVAL; 504 } 505 if (dev->speed != USB_SPEED_WIRELESS) { 506 /* Round down to a power of 2, no more than max */ 507 urb->interval = min(max, 1 << ilog2(urb->interval)); 508 } 509 } 510 511 return usb_hcd_submit_urb(urb, mem_flags); 512 } 513 EXPORT_SYMBOL_GPL(usb_submit_urb); 514 515 /*-------------------------------------------------------------------*/ 516 517 /** 518 * usb_unlink_urb - abort/cancel a transfer request for an endpoint 519 * @urb: pointer to urb describing a previously submitted request, 520 * may be NULL 521 * 522 * This routine cancels an in-progress request. URBs complete only once 523 * per submission, and may be canceled only once per submission. 524 * Successful cancellation means termination of @urb will be expedited 525 * and the completion handler will be called with a status code 526 * indicating that the request has been canceled (rather than any other 527 * code). 528 * 529 * Drivers should not call this routine or related routines, such as 530 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect 531 * method has returned. The disconnect function should synchronize with 532 * a driver's I/O routines to insure that all URB-related activity has 533 * completed before it returns. 534 * 535 * This request is always asynchronous. Success is indicated by 536 * returning -EINPROGRESS, at which time the URB will probably not yet 537 * have been given back to the device driver. When it is eventually 538 * called, the completion function will see @urb->status == -ECONNRESET. 539 * Failure is indicated by usb_unlink_urb() returning any other value. 540 * Unlinking will fail when @urb is not currently "linked" (i.e., it was 541 * never submitted, or it was unlinked before, or the hardware is already 542 * finished with it), even if the completion handler has not yet run. 543 * 544 * Unlinking and Endpoint Queues: 545 * 546 * [The behaviors and guarantees described below do not apply to virtual 547 * root hubs but only to endpoint queues for physical USB devices.] 548 * 549 * Host Controller Drivers (HCDs) place all the URBs for a particular 550 * endpoint in a queue. Normally the queue advances as the controller 551 * hardware processes each request. But when an URB terminates with an 552 * error its queue generally stops (see below), at least until that URB's 553 * completion routine returns. It is guaranteed that a stopped queue 554 * will not restart until all its unlinked URBs have been fully retired, 555 * with their completion routines run, even if that's not until some time 556 * after the original completion handler returns. The same behavior and 557 * guarantee apply when an URB terminates because it was unlinked. 558 * 559 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 560 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 561 * and -EREMOTEIO. Control endpoint queues behave the same way except 562 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 563 * for isochronous endpoints are treated differently, because they must 564 * advance at fixed rates. Such queues do not stop when an URB 565 * encounters an error or is unlinked. An unlinked isochronous URB may 566 * leave a gap in the stream of packets; it is undefined whether such 567 * gaps can be filled in. 568 * 569 * Note that early termination of an URB because a short packet was 570 * received will generate a -EREMOTEIO error if and only if the 571 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 572 * drivers can build deep queues for large or complex bulk transfers 573 * and clean them up reliably after any sort of aborted transfer by 574 * unlinking all pending URBs at the first fault. 575 * 576 * When a control URB terminates with an error other than -EREMOTEIO, it 577 * is quite likely that the status stage of the transfer will not take 578 * place. 579 */ 580 int usb_unlink_urb(struct urb *urb) 581 { 582 if (!urb) 583 return -EINVAL; 584 if (!urb->dev) 585 return -ENODEV; 586 if (!urb->ep) 587 return -EIDRM; 588 return usb_hcd_unlink_urb(urb, -ECONNRESET); 589 } 590 EXPORT_SYMBOL_GPL(usb_unlink_urb); 591 592 /** 593 * usb_kill_urb - cancel a transfer request and wait for it to finish 594 * @urb: pointer to URB describing a previously submitted request, 595 * may be NULL 596 * 597 * This routine cancels an in-progress request. It is guaranteed that 598 * upon return all completion handlers will have finished and the URB 599 * will be totally idle and available for reuse. These features make 600 * this an ideal way to stop I/O in a disconnect() callback or close() 601 * function. If the request has not already finished or been unlinked 602 * the completion handler will see urb->status == -ENOENT. 603 * 604 * While the routine is running, attempts to resubmit the URB will fail 605 * with error -EPERM. Thus even if the URB's completion handler always 606 * tries to resubmit, it will not succeed and the URB will become idle. 607 * 608 * This routine may not be used in an interrupt context (such as a bottom 609 * half or a completion handler), or when holding a spinlock, or in other 610 * situations where the caller can't schedule(). 611 * 612 * This routine should not be called by a driver after its disconnect 613 * method has returned. 614 */ 615 void usb_kill_urb(struct urb *urb) 616 { 617 might_sleep(); 618 if (!(urb && urb->dev && urb->ep)) 619 return; 620 atomic_inc(&urb->reject); 621 622 usb_hcd_unlink_urb(urb, -ENOENT); 623 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 624 625 atomic_dec(&urb->reject); 626 } 627 EXPORT_SYMBOL_GPL(usb_kill_urb); 628 629 /** 630 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB 631 * @urb: pointer to URB describing a previously submitted request, 632 * may be NULL 633 * 634 * This routine cancels an in-progress request. It is guaranteed that 635 * upon return all completion handlers will have finished and the URB 636 * will be totally idle and cannot be reused. These features make 637 * this an ideal way to stop I/O in a disconnect() callback. 638 * If the request has not already finished or been unlinked 639 * the completion handler will see urb->status == -ENOENT. 640 * 641 * After and while the routine runs, attempts to resubmit the URB will fail 642 * with error -EPERM. Thus even if the URB's completion handler always 643 * tries to resubmit, it will not succeed and the URB will become idle. 644 * 645 * This routine may not be used in an interrupt context (such as a bottom 646 * half or a completion handler), or when holding a spinlock, or in other 647 * situations where the caller can't schedule(). 648 * 649 * This routine should not be called by a driver after its disconnect 650 * method has returned. 651 */ 652 void usb_poison_urb(struct urb *urb) 653 { 654 might_sleep(); 655 if (!(urb && urb->dev && urb->ep)) 656 return; 657 atomic_inc(&urb->reject); 658 659 usb_hcd_unlink_urb(urb, -ENOENT); 660 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 661 } 662 EXPORT_SYMBOL_GPL(usb_poison_urb); 663 664 void usb_unpoison_urb(struct urb *urb) 665 { 666 if (!urb) 667 return; 668 669 atomic_dec(&urb->reject); 670 } 671 EXPORT_SYMBOL_GPL(usb_unpoison_urb); 672 673 /** 674 * usb_kill_anchored_urbs - cancel transfer requests en masse 675 * @anchor: anchor the requests are bound to 676 * 677 * this allows all outstanding URBs to be killed starting 678 * from the back of the queue 679 * 680 * This routine should not be called by a driver after its disconnect 681 * method has returned. 682 */ 683 void usb_kill_anchored_urbs(struct usb_anchor *anchor) 684 { 685 struct urb *victim; 686 687 spin_lock_irq(&anchor->lock); 688 while (!list_empty(&anchor->urb_list)) { 689 victim = list_entry(anchor->urb_list.prev, struct urb, 690 anchor_list); 691 /* we must make sure the URB isn't freed before we kill it*/ 692 usb_get_urb(victim); 693 spin_unlock_irq(&anchor->lock); 694 /* this will unanchor the URB */ 695 usb_kill_urb(victim); 696 usb_put_urb(victim); 697 spin_lock_irq(&anchor->lock); 698 } 699 spin_unlock_irq(&anchor->lock); 700 } 701 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 702 703 704 /** 705 * usb_poison_anchored_urbs - cease all traffic from an anchor 706 * @anchor: anchor the requests are bound to 707 * 708 * this allows all outstanding URBs to be poisoned starting 709 * from the back of the queue. Newly added URBs will also be 710 * poisoned 711 * 712 * This routine should not be called by a driver after its disconnect 713 * method has returned. 714 */ 715 void usb_poison_anchored_urbs(struct usb_anchor *anchor) 716 { 717 struct urb *victim; 718 719 spin_lock_irq(&anchor->lock); 720 anchor->poisoned = 1; 721 while (!list_empty(&anchor->urb_list)) { 722 victim = list_entry(anchor->urb_list.prev, struct urb, 723 anchor_list); 724 /* we must make sure the URB isn't freed before we kill it*/ 725 usb_get_urb(victim); 726 spin_unlock_irq(&anchor->lock); 727 /* this will unanchor the URB */ 728 usb_poison_urb(victim); 729 usb_put_urb(victim); 730 spin_lock_irq(&anchor->lock); 731 } 732 spin_unlock_irq(&anchor->lock); 733 } 734 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 735 736 /** 737 * usb_unpoison_anchored_urbs - let an anchor be used successfully again 738 * @anchor: anchor the requests are bound to 739 * 740 * Reverses the effect of usb_poison_anchored_urbs 741 * the anchor can be used normally after it returns 742 */ 743 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) 744 { 745 unsigned long flags; 746 struct urb *lazarus; 747 748 spin_lock_irqsave(&anchor->lock, flags); 749 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { 750 usb_unpoison_urb(lazarus); 751 } 752 anchor->poisoned = 0; 753 spin_unlock_irqrestore(&anchor->lock, flags); 754 } 755 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); 756 /** 757 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 758 * @anchor: anchor the requests are bound to 759 * 760 * this allows all outstanding URBs to be unlinked starting 761 * from the back of the queue. This function is asynchronous. 762 * The unlinking is just tiggered. It may happen after this 763 * function has returned. 764 * 765 * This routine should not be called by a driver after its disconnect 766 * method has returned. 767 */ 768 void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 769 { 770 struct urb *victim; 771 772 while ((victim = usb_get_from_anchor(anchor)) != NULL) { 773 usb_unlink_urb(victim); 774 usb_put_urb(victim); 775 } 776 } 777 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); 778 779 /** 780 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused 781 * @anchor: the anchor you want to become unused 782 * @timeout: how long you are willing to wait in milliseconds 783 * 784 * Call this is you want to be sure all an anchor's 785 * URBs have finished 786 */ 787 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 788 unsigned int timeout) 789 { 790 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list), 791 msecs_to_jiffies(timeout)); 792 } 793 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); 794 795 /** 796 * usb_get_from_anchor - get an anchor's oldest urb 797 * @anchor: the anchor whose urb you want 798 * 799 * this will take the oldest urb from an anchor, 800 * unanchor and return it 801 */ 802 struct urb *usb_get_from_anchor(struct usb_anchor *anchor) 803 { 804 struct urb *victim; 805 unsigned long flags; 806 807 spin_lock_irqsave(&anchor->lock, flags); 808 if (!list_empty(&anchor->urb_list)) { 809 victim = list_entry(anchor->urb_list.next, struct urb, 810 anchor_list); 811 usb_get_urb(victim); 812 __usb_unanchor_urb(victim, anchor); 813 } else { 814 victim = NULL; 815 } 816 spin_unlock_irqrestore(&anchor->lock, flags); 817 818 return victim; 819 } 820 821 EXPORT_SYMBOL_GPL(usb_get_from_anchor); 822 823 /** 824 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs 825 * @anchor: the anchor whose urbs you want to unanchor 826 * 827 * use this to get rid of all an anchor's urbs 828 */ 829 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) 830 { 831 struct urb *victim; 832 unsigned long flags; 833 834 spin_lock_irqsave(&anchor->lock, flags); 835 while (!list_empty(&anchor->urb_list)) { 836 victim = list_entry(anchor->urb_list.prev, struct urb, 837 anchor_list); 838 __usb_unanchor_urb(victim, anchor); 839 } 840 spin_unlock_irqrestore(&anchor->lock, flags); 841 } 842 843 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); 844 845 /** 846 * usb_anchor_empty - is an anchor empty 847 * @anchor: the anchor you want to query 848 * 849 * returns 1 if the anchor has no urbs associated with it 850 */ 851 int usb_anchor_empty(struct usb_anchor *anchor) 852 { 853 return list_empty(&anchor->urb_list); 854 } 855 856 EXPORT_SYMBOL_GPL(usb_anchor_empty); 857 858