1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Released under the GPLv2 only. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/string.h> 8 #include <linux/bitops.h> 9 #include <linux/slab.h> 10 #include <linux/log2.h> 11 #include <linux/kmsan.h> 12 #include <linux/usb.h> 13 #include <linux/wait.h> 14 #include <linux/usb/hcd.h> 15 #include <linux/scatterlist.h> 16 17 #define to_urb(d) container_of(d, struct urb, kref) 18 19 20 static void urb_destroy(struct kref *kref) 21 { 22 struct urb *urb = to_urb(kref); 23 24 if (urb->transfer_flags & URB_FREE_BUFFER) 25 kfree(urb->transfer_buffer); 26 27 kfree(urb); 28 } 29 30 /** 31 * usb_init_urb - initializes a urb so that it can be used by a USB driver 32 * @urb: pointer to the urb to initialize 33 * 34 * Initializes a urb so that the USB subsystem can use it properly. 35 * 36 * If a urb is created with a call to usb_alloc_urb() it is not 37 * necessary to call this function. Only use this if you allocate the 38 * space for a struct urb on your own. If you call this function, be 39 * careful when freeing the memory for your urb that it is no longer in 40 * use by the USB core. 41 * 42 * Only use this function if you _really_ understand what you are doing. 43 */ 44 void usb_init_urb(struct urb *urb) 45 { 46 if (urb) { 47 memset(urb, 0, sizeof(*urb)); 48 kref_init(&urb->kref); 49 INIT_LIST_HEAD(&urb->urb_list); 50 INIT_LIST_HEAD(&urb->anchor_list); 51 } 52 } 53 EXPORT_SYMBOL_GPL(usb_init_urb); 54 55 /** 56 * usb_alloc_urb - creates a new urb for a USB driver to use 57 * @iso_packets: number of iso packets for this urb 58 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of 59 * valid options for this. 60 * 61 * Creates an urb for the USB driver to use, initializes a few internal 62 * structures, increments the usage counter, and returns a pointer to it. 63 * 64 * If the driver want to use this urb for interrupt, control, or bulk 65 * endpoints, pass '0' as the number of iso packets. 66 * 67 * The driver must call usb_free_urb() when it is finished with the urb. 68 * 69 * Return: A pointer to the new urb, or %NULL if no memory is available. 70 */ 71 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) 72 { 73 struct urb *urb; 74 75 urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets), 76 mem_flags); 77 if (!urb) 78 return NULL; 79 usb_init_urb(urb); 80 return urb; 81 } 82 EXPORT_SYMBOL_GPL(usb_alloc_urb); 83 84 /** 85 * usb_free_urb - frees the memory used by a urb when all users of it are finished 86 * @urb: pointer to the urb to free, may be NULL 87 * 88 * Must be called when a user of a urb is finished with it. When the last user 89 * of the urb calls this function, the memory of the urb is freed. 90 * 91 * Note: The transfer buffer associated with the urb is not freed unless the 92 * URB_FREE_BUFFER transfer flag is set. 93 */ 94 void usb_free_urb(struct urb *urb) 95 { 96 if (urb) 97 kref_put(&urb->kref, urb_destroy); 98 } 99 EXPORT_SYMBOL_GPL(usb_free_urb); 100 101 /** 102 * usb_get_urb - increments the reference count of the urb 103 * @urb: pointer to the urb to modify, may be NULL 104 * 105 * This must be called whenever a urb is transferred from a device driver to a 106 * host controller driver. This allows proper reference counting to happen 107 * for urbs. 108 * 109 * Return: A pointer to the urb with the incremented reference counter. 110 */ 111 struct urb *usb_get_urb(struct urb *urb) 112 { 113 if (urb) 114 kref_get(&urb->kref); 115 return urb; 116 } 117 EXPORT_SYMBOL_GPL(usb_get_urb); 118 119 /** 120 * usb_anchor_urb - anchors an URB while it is processed 121 * @urb: pointer to the urb to anchor 122 * @anchor: pointer to the anchor 123 * 124 * This can be called to have access to URBs which are to be executed 125 * without bothering to track them 126 */ 127 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) 128 { 129 unsigned long flags; 130 131 spin_lock_irqsave(&anchor->lock, flags); 132 usb_get_urb(urb); 133 list_add_tail(&urb->anchor_list, &anchor->urb_list); 134 urb->anchor = anchor; 135 136 if (unlikely(anchor->poisoned)) 137 atomic_inc(&urb->reject); 138 139 spin_unlock_irqrestore(&anchor->lock, flags); 140 } 141 EXPORT_SYMBOL_GPL(usb_anchor_urb); 142 143 static int usb_anchor_check_wakeup(struct usb_anchor *anchor) 144 { 145 return atomic_read(&anchor->suspend_wakeups) == 0 && 146 list_empty(&anchor->urb_list); 147 } 148 149 /* Callers must hold anchor->lock */ 150 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) 151 { 152 urb->anchor = NULL; 153 list_del(&urb->anchor_list); 154 usb_put_urb(urb); 155 if (usb_anchor_check_wakeup(anchor)) 156 wake_up(&anchor->wait); 157 } 158 159 /** 160 * usb_unanchor_urb - unanchors an URB 161 * @urb: pointer to the urb to anchor 162 * 163 * Call this to stop the system keeping track of this URB 164 */ 165 void usb_unanchor_urb(struct urb *urb) 166 { 167 unsigned long flags; 168 struct usb_anchor *anchor; 169 170 if (!urb) 171 return; 172 173 anchor = urb->anchor; 174 if (!anchor) 175 return; 176 177 spin_lock_irqsave(&anchor->lock, flags); 178 /* 179 * At this point, we could be competing with another thread which 180 * has the same intention. To protect the urb from being unanchored 181 * twice, only the winner of the race gets the job. 182 */ 183 if (likely(anchor == urb->anchor)) 184 __usb_unanchor_urb(urb, anchor); 185 spin_unlock_irqrestore(&anchor->lock, flags); 186 } 187 EXPORT_SYMBOL_GPL(usb_unanchor_urb); 188 189 /*-------------------------------------------------------------------*/ 190 191 static const int pipetypes[4] = { 192 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 193 }; 194 195 /** 196 * usb_pipe_type_check - sanity check of a specific pipe for a usb device 197 * @dev: struct usb_device to be checked 198 * @pipe: pipe to check 199 * 200 * This performs a light-weight sanity check for the endpoint in the 201 * given usb device. It returns 0 if the pipe is valid for the specific usb 202 * device, otherwise a negative error code. 203 */ 204 int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe) 205 { 206 const struct usb_host_endpoint *ep; 207 208 ep = usb_pipe_endpoint(dev, pipe); 209 if (!ep) 210 return -EINVAL; 211 if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) 212 return -EINVAL; 213 return 0; 214 } 215 EXPORT_SYMBOL_GPL(usb_pipe_type_check); 216 217 /** 218 * usb_urb_ep_type_check - sanity check of endpoint in the given urb 219 * @urb: urb to be checked 220 * 221 * This performs a light-weight sanity check for the endpoint in the 222 * given urb. It returns 0 if the urb contains a valid endpoint, otherwise 223 * a negative error code. 224 */ 225 int usb_urb_ep_type_check(const struct urb *urb) 226 { 227 return usb_pipe_type_check(urb->dev, urb->pipe); 228 } 229 EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); 230 231 /** 232 * usb_submit_urb - issue an asynchronous transfer request for an endpoint 233 * @urb: pointer to the urb describing the request 234 * @mem_flags: the type of memory to allocate, see kmalloc() for a list 235 * of valid options for this. 236 * 237 * This submits a transfer request, and transfers control of the URB 238 * describing that request to the USB subsystem. Request completion will 239 * be indicated later, asynchronously, by calling the completion handler. 240 * The three types of completion are success, error, and unlink 241 * (a software-induced fault, also called "request cancellation"). 242 * 243 * URBs may be submitted in interrupt context. 244 * 245 * The caller must have correctly initialized the URB before submitting 246 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are 247 * available to ensure that most fields are correctly initialized, for 248 * the particular kind of transfer, although they will not initialize 249 * any transfer flags. 250 * 251 * If the submission is successful, the complete() callback from the URB 252 * will be called exactly once, when the USB core and Host Controller Driver 253 * (HCD) are finished with the URB. When the completion function is called, 254 * control of the URB is returned to the device driver which issued the 255 * request. The completion handler may then immediately free or reuse that 256 * URB. 257 * 258 * With few exceptions, USB device drivers should never access URB fields 259 * provided by usbcore or the HCD until its complete() is called. 260 * The exceptions relate to periodic transfer scheduling. For both 261 * interrupt and isochronous urbs, as part of successful URB submission 262 * urb->interval is modified to reflect the actual transfer period used 263 * (normally some power of two units). And for isochronous urbs, 264 * urb->start_frame is modified to reflect when the URB's transfers were 265 * scheduled to start. 266 * 267 * Not all isochronous transfer scheduling policies will work, but most 268 * host controller drivers should easily handle ISO queues going from now 269 * until 10-200 msec into the future. Drivers should try to keep at 270 * least one or two msec of data in the queue; many controllers require 271 * that new transfers start at least 1 msec in the future when they are 272 * added. If the driver is unable to keep up and the queue empties out, 273 * the behavior for new submissions is governed by the URB_ISO_ASAP flag. 274 * If the flag is set, or if the queue is idle, then the URB is always 275 * assigned to the first available (and not yet expired) slot in the 276 * endpoint's schedule. If the flag is not set and the queue is active 277 * then the URB is always assigned to the next slot in the schedule 278 * following the end of the endpoint's previous URB, even if that slot is 279 * in the past. When a packet is assigned in this way to a slot that has 280 * already expired, the packet is not transmitted and the corresponding 281 * usb_iso_packet_descriptor's status field will return -EXDEV. If this 282 * would happen to all the packets in the URB, submission fails with a 283 * -EXDEV error code. 284 * 285 * For control endpoints, the synchronous usb_control_msg() call is 286 * often used (in non-interrupt context) instead of this call. 287 * That is often used through convenience wrappers, for the requests 288 * that are standardized in the USB 2.0 specification. For bulk 289 * endpoints, a synchronous usb_bulk_msg() call is available. 290 * 291 * Return: 292 * 0 on successful submissions. A negative error number otherwise. 293 * 294 * Request Queuing: 295 * 296 * URBs may be submitted to endpoints before previous ones complete, to 297 * minimize the impact of interrupt latencies and system overhead on data 298 * throughput. With that queuing policy, an endpoint's queue would never 299 * be empty. This is required for continuous isochronous data streams, 300 * and may also be required for some kinds of interrupt transfers. Such 301 * queuing also maximizes bandwidth utilization by letting USB controllers 302 * start work on later requests before driver software has finished the 303 * completion processing for earlier (successful) requests. 304 * 305 * As of Linux 2.6, all USB endpoint transfer queues support depths greater 306 * than one. This was previously a HCD-specific behavior, except for ISO 307 * transfers. Non-isochronous endpoint queues are inactive during cleanup 308 * after faults (transfer errors or cancellation). 309 * 310 * Reserved Bandwidth Transfers: 311 * 312 * Periodic transfers (interrupt or isochronous) are performed repeatedly, 313 * using the interval specified in the urb. Submitting the first urb to 314 * the endpoint reserves the bandwidth necessary to make those transfers. 315 * If the USB subsystem can't allocate sufficient bandwidth to perform 316 * the periodic request, submitting such a periodic request should fail. 317 * 318 * For devices under xHCI, the bandwidth is reserved at configuration time, or 319 * when the alt setting is selected. If there is not enough bus bandwidth, the 320 * configuration/alt setting request will fail. Therefore, submissions to 321 * periodic endpoints on devices under xHCI should never fail due to bandwidth 322 * constraints. 323 * 324 * Device drivers must explicitly request that repetition, by ensuring that 325 * some URB is always on the endpoint's queue (except possibly for short 326 * periods during completion callbacks). When there is no longer an urb 327 * queued, the endpoint's bandwidth reservation is canceled. This means 328 * drivers can use their completion handlers to ensure they keep bandwidth 329 * they need, by reinitializing and resubmitting the just-completed urb 330 * until the driver longer needs that periodic bandwidth. 331 * 332 * Memory Flags: 333 * 334 * The general rules for how to decide which mem_flags to use 335 * are the same as for kmalloc. There are four 336 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and 337 * GFP_ATOMIC. 338 * 339 * GFP_NOFS is not ever used, as it has not been implemented yet. 340 * 341 * GFP_ATOMIC is used when 342 * (a) you are inside a completion handler, an interrupt, bottom half, 343 * tasklet or timer, or 344 * (b) you are holding a spinlock or rwlock (does not apply to 345 * semaphores), or 346 * (c) current->state != TASK_RUNNING, this is the case only after 347 * you've changed it. 348 * 349 * GFP_NOIO is used in the block io path and error handling of storage 350 * devices. 351 * 352 * All other situations use GFP_KERNEL. 353 * 354 * Some more specific rules for mem_flags can be inferred, such as 355 * (1) start_xmit, timeout, and receive methods of network drivers must 356 * use GFP_ATOMIC (they are called with a spinlock held); 357 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also 358 * called with a spinlock held); 359 * (3) If you use a kernel thread with a network driver you must use 360 * GFP_NOIO, unless (b) or (c) apply; 361 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) 362 * apply or your are in a storage driver's block io path; 363 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and 364 * (6) changing firmware on a running storage or net device uses 365 * GFP_NOIO, unless b) or c) apply 366 * 367 */ 368 int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 369 { 370 int xfertype, max; 371 struct usb_device *dev; 372 struct usb_host_endpoint *ep; 373 int is_out; 374 unsigned int allowed; 375 376 if (!urb || !urb->complete) 377 return -EINVAL; 378 if (urb->hcpriv) { 379 WARN_ONCE(1, "URB %pK submitted while active\n", urb); 380 return -EBUSY; 381 } 382 383 dev = urb->dev; 384 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 385 return -ENODEV; 386 387 /* For now, get the endpoint from the pipe. Eventually drivers 388 * will be required to set urb->ep directly and we will eliminate 389 * urb->pipe. 390 */ 391 ep = usb_pipe_endpoint(dev, urb->pipe); 392 if (!ep) 393 return -ENOENT; 394 395 urb->ep = ep; 396 urb->status = -EINPROGRESS; 397 urb->actual_length = 0; 398 399 /* Lots of sanity checks, so HCDs can rely on clean data 400 * and don't need to duplicate tests 401 */ 402 xfertype = usb_endpoint_type(&ep->desc); 403 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 404 struct usb_ctrlrequest *setup = 405 (struct usb_ctrlrequest *) urb->setup_packet; 406 407 if (!setup) 408 return -ENOEXEC; 409 is_out = !(setup->bRequestType & USB_DIR_IN) || 410 !setup->wLength; 411 dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out), 412 "BOGUS control dir, pipe %x doesn't match bRequestType %x\n", 413 urb->pipe, setup->bRequestType); 414 if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) { 415 dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n", 416 le16_to_cpu(setup->wLength), 417 urb->transfer_buffer_length); 418 return -EBADR; 419 } 420 } else { 421 is_out = usb_endpoint_dir_out(&ep->desc); 422 } 423 424 /* Clear the internal flags and cache the direction for later use */ 425 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | 426 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | 427 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | 428 URB_DMA_SG_COMBINED); 429 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); 430 kmsan_handle_urb(urb, is_out); 431 432 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 433 dev->state < USB_STATE_CONFIGURED) 434 return -ENODEV; 435 436 max = usb_endpoint_maxp(&ep->desc); 437 if (max <= 0) { 438 dev_dbg(&dev->dev, 439 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 440 usb_endpoint_num(&ep->desc), is_out ? "out" : "in", 441 __func__, max); 442 return -EMSGSIZE; 443 } 444 445 /* periodic transfers limit size per frame/uframe, 446 * but drivers only control those sizes for ISO. 447 * while we're checking, initialize return status. 448 */ 449 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 450 int n, len; 451 452 /* SuperSpeed isoc endpoints have up to 16 bursts of up to 453 * 3 packets each 454 */ 455 if (dev->speed >= USB_SPEED_SUPER) { 456 int burst = 1 + ep->ss_ep_comp.bMaxBurst; 457 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); 458 max *= burst; 459 max *= mult; 460 } 461 462 if (dev->speed == USB_SPEED_SUPER_PLUS && 463 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) { 464 struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp; 465 466 isoc_ep_comp = &ep->ssp_isoc_ep_comp; 467 max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval); 468 } 469 470 /* "high bandwidth" mode, 1-3 packets/uframe? */ 471 if (dev->speed == USB_SPEED_HIGH) 472 max *= usb_endpoint_maxp_mult(&ep->desc); 473 474 if (urb->number_of_packets <= 0) 475 return -EINVAL; 476 for (n = 0; n < urb->number_of_packets; n++) { 477 len = urb->iso_frame_desc[n].length; 478 if (len < 0 || len > max) 479 return -EMSGSIZE; 480 urb->iso_frame_desc[n].status = -EXDEV; 481 urb->iso_frame_desc[n].actual_length = 0; 482 } 483 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint && 484 dev->speed != USB_SPEED_WIRELESS) { 485 struct scatterlist *sg; 486 int i; 487 488 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i) 489 if (sg->length % max) 490 return -EINVAL; 491 } 492 493 /* the I/O buffer must be mapped/unmapped, except when length=0 */ 494 if (urb->transfer_buffer_length > INT_MAX) 495 return -EMSGSIZE; 496 497 /* 498 * stuff that drivers shouldn't do, but which shouldn't 499 * cause problems in HCDs if they get it wrong. 500 */ 501 502 /* Check that the pipe's type matches the endpoint's type */ 503 if (usb_pipe_type_check(urb->dev, urb->pipe)) 504 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", 505 usb_pipetype(urb->pipe), pipetypes[xfertype]); 506 507 /* Check against a simple/standard policy */ 508 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | 509 URB_FREE_BUFFER); 510 switch (xfertype) { 511 case USB_ENDPOINT_XFER_BULK: 512 case USB_ENDPOINT_XFER_INT: 513 if (is_out) 514 allowed |= URB_ZERO_PACKET; 515 fallthrough; 516 default: /* all non-iso endpoints */ 517 if (!is_out) 518 allowed |= URB_SHORT_NOT_OK; 519 break; 520 case USB_ENDPOINT_XFER_ISOC: 521 allowed |= URB_ISO_ASAP; 522 break; 523 } 524 allowed &= urb->transfer_flags; 525 526 /* warn if submitter gave bogus flags */ 527 if (allowed != urb->transfer_flags) 528 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", 529 urb->transfer_flags, allowed); 530 531 /* 532 * Force periodic transfer intervals to be legal values that are 533 * a power of two (so HCDs don't need to). 534 * 535 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC 536 * supports different values... this uses EHCI/UHCI defaults (and 537 * EHCI can use smaller non-default values). 538 */ 539 switch (xfertype) { 540 case USB_ENDPOINT_XFER_ISOC: 541 case USB_ENDPOINT_XFER_INT: 542 /* too small? */ 543 switch (dev->speed) { 544 case USB_SPEED_WIRELESS: 545 if ((urb->interval < 6) 546 && (xfertype == USB_ENDPOINT_XFER_INT)) 547 return -EINVAL; 548 fallthrough; 549 default: 550 if (urb->interval <= 0) 551 return -EINVAL; 552 break; 553 } 554 /* too big? */ 555 switch (dev->speed) { 556 case USB_SPEED_SUPER_PLUS: 557 case USB_SPEED_SUPER: /* units are 125us */ 558 /* Handle up to 2^(16-1) microframes */ 559 if (urb->interval > (1 << 15)) 560 return -EINVAL; 561 max = 1 << 15; 562 break; 563 case USB_SPEED_WIRELESS: 564 if (urb->interval > 16) 565 return -EINVAL; 566 break; 567 case USB_SPEED_HIGH: /* units are microframes */ 568 /* NOTE usb handles 2^15 */ 569 if (urb->interval > (1024 * 8)) 570 urb->interval = 1024 * 8; 571 max = 1024 * 8; 572 break; 573 case USB_SPEED_FULL: /* units are frames/msec */ 574 case USB_SPEED_LOW: 575 if (xfertype == USB_ENDPOINT_XFER_INT) { 576 if (urb->interval > 255) 577 return -EINVAL; 578 /* NOTE ohci only handles up to 32 */ 579 max = 128; 580 } else { 581 if (urb->interval > 1024) 582 urb->interval = 1024; 583 /* NOTE usb and ohci handle up to 2^15 */ 584 max = 1024; 585 } 586 break; 587 default: 588 return -EINVAL; 589 } 590 if (dev->speed != USB_SPEED_WIRELESS) { 591 /* Round down to a power of 2, no more than max */ 592 urb->interval = min(max, 1 << ilog2(urb->interval)); 593 } 594 } 595 596 return usb_hcd_submit_urb(urb, mem_flags); 597 } 598 EXPORT_SYMBOL_GPL(usb_submit_urb); 599 600 /*-------------------------------------------------------------------*/ 601 602 /** 603 * usb_unlink_urb - abort/cancel a transfer request for an endpoint 604 * @urb: pointer to urb describing a previously submitted request, 605 * may be NULL 606 * 607 * This routine cancels an in-progress request. URBs complete only once 608 * per submission, and may be canceled only once per submission. 609 * Successful cancellation means termination of @urb will be expedited 610 * and the completion handler will be called with a status code 611 * indicating that the request has been canceled (rather than any other 612 * code). 613 * 614 * Drivers should not call this routine or related routines, such as 615 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect 616 * method has returned. The disconnect function should synchronize with 617 * a driver's I/O routines to insure that all URB-related activity has 618 * completed before it returns. 619 * 620 * This request is asynchronous, however the HCD might call the ->complete() 621 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they 622 * must not hold any locks that may be taken by the completion function. 623 * Success is indicated by returning -EINPROGRESS, at which time the URB will 624 * probably not yet have been given back to the device driver. When it is 625 * eventually called, the completion function will see @urb->status == 626 * -ECONNRESET. 627 * Failure is indicated by usb_unlink_urb() returning any other value. 628 * Unlinking will fail when @urb is not currently "linked" (i.e., it was 629 * never submitted, or it was unlinked before, or the hardware is already 630 * finished with it), even if the completion handler has not yet run. 631 * 632 * The URB must not be deallocated while this routine is running. In 633 * particular, when a driver calls this routine, it must insure that the 634 * completion handler cannot deallocate the URB. 635 * 636 * Return: -EINPROGRESS on success. See description for other values on 637 * failure. 638 * 639 * Unlinking and Endpoint Queues: 640 * 641 * [The behaviors and guarantees described below do not apply to virtual 642 * root hubs but only to endpoint queues for physical USB devices.] 643 * 644 * Host Controller Drivers (HCDs) place all the URBs for a particular 645 * endpoint in a queue. Normally the queue advances as the controller 646 * hardware processes each request. But when an URB terminates with an 647 * error its queue generally stops (see below), at least until that URB's 648 * completion routine returns. It is guaranteed that a stopped queue 649 * will not restart until all its unlinked URBs have been fully retired, 650 * with their completion routines run, even if that's not until some time 651 * after the original completion handler returns. The same behavior and 652 * guarantee apply when an URB terminates because it was unlinked. 653 * 654 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 655 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 656 * and -EREMOTEIO. Control endpoint queues behave the same way except 657 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 658 * for isochronous endpoints are treated differently, because they must 659 * advance at fixed rates. Such queues do not stop when an URB 660 * encounters an error or is unlinked. An unlinked isochronous URB may 661 * leave a gap in the stream of packets; it is undefined whether such 662 * gaps can be filled in. 663 * 664 * Note that early termination of an URB because a short packet was 665 * received will generate a -EREMOTEIO error if and only if the 666 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 667 * drivers can build deep queues for large or complex bulk transfers 668 * and clean them up reliably after any sort of aborted transfer by 669 * unlinking all pending URBs at the first fault. 670 * 671 * When a control URB terminates with an error other than -EREMOTEIO, it 672 * is quite likely that the status stage of the transfer will not take 673 * place. 674 */ 675 int usb_unlink_urb(struct urb *urb) 676 { 677 if (!urb) 678 return -EINVAL; 679 if (!urb->dev) 680 return -ENODEV; 681 if (!urb->ep) 682 return -EIDRM; 683 return usb_hcd_unlink_urb(urb, -ECONNRESET); 684 } 685 EXPORT_SYMBOL_GPL(usb_unlink_urb); 686 687 /** 688 * usb_kill_urb - cancel a transfer request and wait for it to finish 689 * @urb: pointer to URB describing a previously submitted request, 690 * may be NULL 691 * 692 * This routine cancels an in-progress request. It is guaranteed that 693 * upon return all completion handlers will have finished and the URB 694 * will be totally idle and available for reuse. These features make 695 * this an ideal way to stop I/O in a disconnect() callback or close() 696 * function. If the request has not already finished or been unlinked 697 * the completion handler will see urb->status == -ENOENT. 698 * 699 * While the routine is running, attempts to resubmit the URB will fail 700 * with error -EPERM. Thus even if the URB's completion handler always 701 * tries to resubmit, it will not succeed and the URB will become idle. 702 * 703 * The URB must not be deallocated while this routine is running. In 704 * particular, when a driver calls this routine, it must insure that the 705 * completion handler cannot deallocate the URB. 706 * 707 * This routine may not be used in an interrupt context (such as a bottom 708 * half or a completion handler), or when holding a spinlock, or in other 709 * situations where the caller can't schedule(). 710 * 711 * This routine should not be called by a driver after its disconnect 712 * method has returned. 713 */ 714 void usb_kill_urb(struct urb *urb) 715 { 716 might_sleep(); 717 if (!(urb && urb->dev && urb->ep)) 718 return; 719 atomic_inc(&urb->reject); 720 /* 721 * Order the write of urb->reject above before the read 722 * of urb->use_count below. Pairs with the barriers in 723 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). 724 */ 725 smp_mb__after_atomic(); 726 727 usb_hcd_unlink_urb(urb, -ENOENT); 728 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 729 730 atomic_dec(&urb->reject); 731 } 732 EXPORT_SYMBOL_GPL(usb_kill_urb); 733 734 /** 735 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB 736 * @urb: pointer to URB describing a previously submitted request, 737 * may be NULL 738 * 739 * This routine cancels an in-progress request. It is guaranteed that 740 * upon return all completion handlers will have finished and the URB 741 * will be totally idle and cannot be reused. These features make 742 * this an ideal way to stop I/O in a disconnect() callback. 743 * If the request has not already finished or been unlinked 744 * the completion handler will see urb->status == -ENOENT. 745 * 746 * After and while the routine runs, attempts to resubmit the URB will fail 747 * with error -EPERM. Thus even if the URB's completion handler always 748 * tries to resubmit, it will not succeed and the URB will become idle. 749 * 750 * The URB must not be deallocated while this routine is running. In 751 * particular, when a driver calls this routine, it must insure that the 752 * completion handler cannot deallocate the URB. 753 * 754 * This routine may not be used in an interrupt context (such as a bottom 755 * half or a completion handler), or when holding a spinlock, or in other 756 * situations where the caller can't schedule(). 757 * 758 * This routine should not be called by a driver after its disconnect 759 * method has returned. 760 */ 761 void usb_poison_urb(struct urb *urb) 762 { 763 might_sleep(); 764 if (!urb) 765 return; 766 atomic_inc(&urb->reject); 767 /* 768 * Order the write of urb->reject above before the read 769 * of urb->use_count below. Pairs with the barriers in 770 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). 771 */ 772 smp_mb__after_atomic(); 773 774 if (!urb->dev || !urb->ep) 775 return; 776 777 usb_hcd_unlink_urb(urb, -ENOENT); 778 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 779 } 780 EXPORT_SYMBOL_GPL(usb_poison_urb); 781 782 void usb_unpoison_urb(struct urb *urb) 783 { 784 if (!urb) 785 return; 786 787 atomic_dec(&urb->reject); 788 } 789 EXPORT_SYMBOL_GPL(usb_unpoison_urb); 790 791 /** 792 * usb_block_urb - reliably prevent further use of an URB 793 * @urb: pointer to URB to be blocked, may be NULL 794 * 795 * After the routine has run, attempts to resubmit the URB will fail 796 * with error -EPERM. Thus even if the URB's completion handler always 797 * tries to resubmit, it will not succeed and the URB will become idle. 798 * 799 * The URB must not be deallocated while this routine is running. In 800 * particular, when a driver calls this routine, it must insure that the 801 * completion handler cannot deallocate the URB. 802 */ 803 void usb_block_urb(struct urb *urb) 804 { 805 if (!urb) 806 return; 807 808 atomic_inc(&urb->reject); 809 } 810 EXPORT_SYMBOL_GPL(usb_block_urb); 811 812 /** 813 * usb_kill_anchored_urbs - kill all URBs associated with an anchor 814 * @anchor: anchor the requests are bound to 815 * 816 * This kills all outstanding URBs starting from the back of the queue, 817 * with guarantee that no completer callbacks will take place from the 818 * anchor after this function returns. 819 * 820 * This routine should not be called by a driver after its disconnect 821 * method has returned. 822 */ 823 void usb_kill_anchored_urbs(struct usb_anchor *anchor) 824 { 825 struct urb *victim; 826 int surely_empty; 827 828 do { 829 spin_lock_irq(&anchor->lock); 830 while (!list_empty(&anchor->urb_list)) { 831 victim = list_entry(anchor->urb_list.prev, 832 struct urb, anchor_list); 833 /* make sure the URB isn't freed before we kill it */ 834 usb_get_urb(victim); 835 spin_unlock_irq(&anchor->lock); 836 /* this will unanchor the URB */ 837 usb_kill_urb(victim); 838 usb_put_urb(victim); 839 spin_lock_irq(&anchor->lock); 840 } 841 surely_empty = usb_anchor_check_wakeup(anchor); 842 843 spin_unlock_irq(&anchor->lock); 844 cpu_relax(); 845 } while (!surely_empty); 846 } 847 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 848 849 850 /** 851 * usb_poison_anchored_urbs - cease all traffic from an anchor 852 * @anchor: anchor the requests are bound to 853 * 854 * this allows all outstanding URBs to be poisoned starting 855 * from the back of the queue. Newly added URBs will also be 856 * poisoned 857 * 858 * This routine should not be called by a driver after its disconnect 859 * method has returned. 860 */ 861 void usb_poison_anchored_urbs(struct usb_anchor *anchor) 862 { 863 struct urb *victim; 864 int surely_empty; 865 866 do { 867 spin_lock_irq(&anchor->lock); 868 anchor->poisoned = 1; 869 while (!list_empty(&anchor->urb_list)) { 870 victim = list_entry(anchor->urb_list.prev, 871 struct urb, anchor_list); 872 /* make sure the URB isn't freed before we kill it */ 873 usb_get_urb(victim); 874 spin_unlock_irq(&anchor->lock); 875 /* this will unanchor the URB */ 876 usb_poison_urb(victim); 877 usb_put_urb(victim); 878 spin_lock_irq(&anchor->lock); 879 } 880 surely_empty = usb_anchor_check_wakeup(anchor); 881 882 spin_unlock_irq(&anchor->lock); 883 cpu_relax(); 884 } while (!surely_empty); 885 } 886 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 887 888 /** 889 * usb_unpoison_anchored_urbs - let an anchor be used successfully again 890 * @anchor: anchor the requests are bound to 891 * 892 * Reverses the effect of usb_poison_anchored_urbs 893 * the anchor can be used normally after it returns 894 */ 895 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) 896 { 897 unsigned long flags; 898 struct urb *lazarus; 899 900 spin_lock_irqsave(&anchor->lock, flags); 901 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { 902 usb_unpoison_urb(lazarus); 903 } 904 anchor->poisoned = 0; 905 spin_unlock_irqrestore(&anchor->lock, flags); 906 } 907 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); 908 /** 909 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 910 * @anchor: anchor the requests are bound to 911 * 912 * this allows all outstanding URBs to be unlinked starting 913 * from the back of the queue. This function is asynchronous. 914 * The unlinking is just triggered. It may happen after this 915 * function has returned. 916 * 917 * This routine should not be called by a driver after its disconnect 918 * method has returned. 919 */ 920 void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 921 { 922 struct urb *victim; 923 924 while ((victim = usb_get_from_anchor(anchor)) != NULL) { 925 usb_unlink_urb(victim); 926 usb_put_urb(victim); 927 } 928 } 929 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); 930 931 /** 932 * usb_anchor_suspend_wakeups 933 * @anchor: the anchor you want to suspend wakeups on 934 * 935 * Call this to stop the last urb being unanchored from waking up any 936 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give- 937 * back path to delay waking up until after the completion handler has run. 938 */ 939 void usb_anchor_suspend_wakeups(struct usb_anchor *anchor) 940 { 941 if (anchor) 942 atomic_inc(&anchor->suspend_wakeups); 943 } 944 EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups); 945 946 /** 947 * usb_anchor_resume_wakeups 948 * @anchor: the anchor you want to resume wakeups on 949 * 950 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and 951 * wake up any current waiters if the anchor is empty. 952 */ 953 void usb_anchor_resume_wakeups(struct usb_anchor *anchor) 954 { 955 if (!anchor) 956 return; 957 958 atomic_dec(&anchor->suspend_wakeups); 959 if (usb_anchor_check_wakeup(anchor)) 960 wake_up(&anchor->wait); 961 } 962 EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups); 963 964 /** 965 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused 966 * @anchor: the anchor you want to become unused 967 * @timeout: how long you are willing to wait in milliseconds 968 * 969 * Call this is you want to be sure all an anchor's 970 * URBs have finished 971 * 972 * Return: Non-zero if the anchor became unused. Zero on timeout. 973 */ 974 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 975 unsigned int timeout) 976 { 977 return wait_event_timeout(anchor->wait, 978 usb_anchor_check_wakeup(anchor), 979 msecs_to_jiffies(timeout)); 980 } 981 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); 982 983 /** 984 * usb_get_from_anchor - get an anchor's oldest urb 985 * @anchor: the anchor whose urb you want 986 * 987 * This will take the oldest urb from an anchor, 988 * unanchor and return it 989 * 990 * Return: The oldest urb from @anchor, or %NULL if @anchor has no 991 * urbs associated with it. 992 */ 993 struct urb *usb_get_from_anchor(struct usb_anchor *anchor) 994 { 995 struct urb *victim; 996 unsigned long flags; 997 998 spin_lock_irqsave(&anchor->lock, flags); 999 if (!list_empty(&anchor->urb_list)) { 1000 victim = list_entry(anchor->urb_list.next, struct urb, 1001 anchor_list); 1002 usb_get_urb(victim); 1003 __usb_unanchor_urb(victim, anchor); 1004 } else { 1005 victim = NULL; 1006 } 1007 spin_unlock_irqrestore(&anchor->lock, flags); 1008 1009 return victim; 1010 } 1011 1012 EXPORT_SYMBOL_GPL(usb_get_from_anchor); 1013 1014 /** 1015 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs 1016 * @anchor: the anchor whose urbs you want to unanchor 1017 * 1018 * use this to get rid of all an anchor's urbs 1019 */ 1020 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) 1021 { 1022 struct urb *victim; 1023 unsigned long flags; 1024 int surely_empty; 1025 1026 do { 1027 spin_lock_irqsave(&anchor->lock, flags); 1028 while (!list_empty(&anchor->urb_list)) { 1029 victim = list_entry(anchor->urb_list.prev, 1030 struct urb, anchor_list); 1031 __usb_unanchor_urb(victim, anchor); 1032 } 1033 surely_empty = usb_anchor_check_wakeup(anchor); 1034 1035 spin_unlock_irqrestore(&anchor->lock, flags); 1036 cpu_relax(); 1037 } while (!surely_empty); 1038 } 1039 1040 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); 1041 1042 /** 1043 * usb_anchor_empty - is an anchor empty 1044 * @anchor: the anchor you want to query 1045 * 1046 * Return: 1 if the anchor has no urbs associated with it. 1047 */ 1048 int usb_anchor_empty(struct usb_anchor *anchor) 1049 { 1050 return list_empty(&anchor->urb_list); 1051 } 1052 1053 EXPORT_SYMBOL_GPL(usb_anchor_empty); 1054 1055