1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include <linux/dma-mapping.h> 70 #include "xhci.h" 71 #include "xhci-trace.h" 72 #include "xhci-mtk.h" 73 74 /* 75 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 76 * address of the TRB. 77 */ 78 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 79 union xhci_trb *trb) 80 { 81 unsigned long segment_offset; 82 83 if (!seg || !trb || trb < seg->trbs) 84 return 0; 85 /* offset in TRBs */ 86 segment_offset = trb - seg->trbs; 87 if (segment_offset >= TRBS_PER_SEGMENT) 88 return 0; 89 return seg->dma + (segment_offset * sizeof(*trb)); 90 } 91 92 static bool trb_is_noop(union xhci_trb *trb) 93 { 94 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); 95 } 96 97 static bool trb_is_link(union xhci_trb *trb) 98 { 99 return TRB_TYPE_LINK_LE32(trb->link.control); 100 } 101 102 static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) 103 { 104 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; 105 } 106 107 static bool last_trb_on_ring(struct xhci_ring *ring, 108 struct xhci_segment *seg, union xhci_trb *trb) 109 { 110 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); 111 } 112 113 static bool link_trb_toggles_cycle(union xhci_trb *trb) 114 { 115 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 116 } 117 118 static bool last_td_in_urb(struct xhci_td *td) 119 { 120 struct urb_priv *urb_priv = td->urb->hcpriv; 121 122 return urb_priv->td_cnt == urb_priv->length; 123 } 124 125 static void inc_td_cnt(struct urb *urb) 126 { 127 struct urb_priv *urb_priv = urb->hcpriv; 128 129 urb_priv->td_cnt++; 130 } 131 132 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 133 * TRB is in a new segment. This does not skip over link TRBs, and it does not 134 * effect the ring dequeue or enqueue pointers. 135 */ 136 static void next_trb(struct xhci_hcd *xhci, 137 struct xhci_ring *ring, 138 struct xhci_segment **seg, 139 union xhci_trb **trb) 140 { 141 if (trb_is_link(*trb)) { 142 *seg = (*seg)->next; 143 *trb = ((*seg)->trbs); 144 } else { 145 (*trb)++; 146 } 147 } 148 149 /* 150 * See Cycle bit rules. SW is the consumer for the event ring only. 151 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 152 */ 153 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 154 { 155 ring->deq_updates++; 156 157 /* event ring doesn't have link trbs, check for last trb */ 158 if (ring->type == TYPE_EVENT) { 159 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { 160 ring->dequeue++; 161 return; 162 } 163 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) 164 ring->cycle_state ^= 1; 165 ring->deq_seg = ring->deq_seg->next; 166 ring->dequeue = ring->deq_seg->trbs; 167 return; 168 } 169 170 /* All other rings have link trbs */ 171 if (!trb_is_link(ring->dequeue)) { 172 ring->dequeue++; 173 ring->num_trbs_free++; 174 } 175 while (trb_is_link(ring->dequeue)) { 176 ring->deq_seg = ring->deq_seg->next; 177 ring->dequeue = ring->deq_seg->trbs; 178 } 179 return; 180 } 181 182 /* 183 * See Cycle bit rules. SW is the consumer for the event ring only. 184 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 185 * 186 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 187 * chain bit is set), then set the chain bit in all the following link TRBs. 188 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 189 * have their chain bit cleared (so that each Link TRB is a separate TD). 190 * 191 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 192 * set, but other sections talk about dealing with the chain bit set. This was 193 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 194 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 195 * 196 * @more_trbs_coming: Will you enqueue more TRBs before calling 197 * prepare_transfer()? 198 */ 199 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 200 bool more_trbs_coming) 201 { 202 u32 chain; 203 union xhci_trb *next; 204 205 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 206 /* If this is not event ring, there is one less usable TRB */ 207 if (!trb_is_link(ring->enqueue)) 208 ring->num_trbs_free--; 209 next = ++(ring->enqueue); 210 211 ring->enq_updates++; 212 /* Update the dequeue pointer further if that was a link TRB */ 213 while (trb_is_link(next)) { 214 215 /* 216 * If the caller doesn't plan on enqueueing more TDs before 217 * ringing the doorbell, then we don't want to give the link TRB 218 * to the hardware just yet. We'll give the link TRB back in 219 * prepare_ring() just before we enqueue the TD at the top of 220 * the ring. 221 */ 222 if (!chain && !more_trbs_coming) 223 break; 224 225 /* If we're not dealing with 0.95 hardware or isoc rings on 226 * AMD 0.96 host, carry over the chain bit of the previous TRB 227 * (which may mean the chain bit is cleared). 228 */ 229 if (!(ring->type == TYPE_ISOC && 230 (xhci->quirks & XHCI_AMD_0x96_HOST)) && 231 !xhci_link_trb_quirk(xhci)) { 232 next->link.control &= cpu_to_le32(~TRB_CHAIN); 233 next->link.control |= cpu_to_le32(chain); 234 } 235 /* Give this link TRB to the hardware */ 236 wmb(); 237 next->link.control ^= cpu_to_le32(TRB_CYCLE); 238 239 /* Toggle the cycle bit after the last ring segment. */ 240 if (link_trb_toggles_cycle(next)) 241 ring->cycle_state ^= 1; 242 243 ring->enq_seg = ring->enq_seg->next; 244 ring->enqueue = ring->enq_seg->trbs; 245 next = ring->enqueue; 246 } 247 } 248 249 /* 250 * Check to see if there's room to enqueue num_trbs on the ring and make sure 251 * enqueue pointer will not advance into dequeue segment. See rules above. 252 */ 253 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 254 unsigned int num_trbs) 255 { 256 int num_trbs_in_deq_seg; 257 258 if (ring->num_trbs_free < num_trbs) 259 return 0; 260 261 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { 262 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; 263 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) 264 return 0; 265 } 266 267 return 1; 268 } 269 270 /* Ring the host controller doorbell after placing a command on the ring */ 271 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 272 { 273 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) 274 return; 275 276 xhci_dbg(xhci, "// Ding dong!\n"); 277 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); 278 /* Flush PCI posted writes */ 279 readl(&xhci->dba->doorbell[0]); 280 } 281 282 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) 283 { 284 u64 temp_64; 285 int ret; 286 287 xhci_dbg(xhci, "Abort command ring\n"); 288 289 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 290 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 291 292 /* 293 * Writing the CMD_RING_ABORT bit should cause a cmd completion event, 294 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared 295 * but the completion event in never sent. Use the cmd timeout timer to 296 * handle those cases. Use twice the time to cover the bit polling retry 297 */ 298 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); 299 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 300 &xhci->op_regs->cmd_ring); 301 302 /* Section 4.6.1.2 of xHCI 1.0 spec says software should 303 * time the completion od all xHCI commands, including 304 * the Command Abort operation. If software doesn't see 305 * CRR negated in a timely manner (e.g. longer than 5 306 * seconds), then it should assume that the there are 307 * larger problems with the xHC and assert HCRST. 308 */ 309 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 310 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 311 if (ret < 0) { 312 /* we are about to kill xhci, give it one more chance */ 313 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 314 &xhci->op_regs->cmd_ring); 315 udelay(1000); 316 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 317 CMD_RING_RUNNING, 0, 3 * 1000 * 1000); 318 if (ret == 0) 319 return 0; 320 321 xhci_err(xhci, "Stopped the command ring failed, " 322 "maybe the host is dead\n"); 323 del_timer(&xhci->cmd_timer); 324 xhci->xhc_state |= XHCI_STATE_DYING; 325 xhci_halt(xhci); 326 return -ESHUTDOWN; 327 } 328 329 return 0; 330 } 331 332 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 333 unsigned int slot_id, 334 unsigned int ep_index, 335 unsigned int stream_id) 336 { 337 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 338 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 339 unsigned int ep_state = ep->ep_state; 340 341 /* Don't ring the doorbell for this endpoint if there are pending 342 * cancellations because we don't want to interrupt processing. 343 * We don't want to restart any stream rings if there's a set dequeue 344 * pointer command pending because the device can choose to start any 345 * stream once the endpoint is on the HW schedule. 346 */ 347 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 348 (ep_state & EP_HALTED)) 349 return; 350 writel(DB_VALUE(ep_index, stream_id), db_addr); 351 /* The CPU has better things to do at this point than wait for a 352 * write-posting flush. It'll get there soon enough. 353 */ 354 } 355 356 /* Ring the doorbell for any rings with pending URBs */ 357 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 358 unsigned int slot_id, 359 unsigned int ep_index) 360 { 361 unsigned int stream_id; 362 struct xhci_virt_ep *ep; 363 364 ep = &xhci->devs[slot_id]->eps[ep_index]; 365 366 /* A ring has pending URBs if its TD list is not empty */ 367 if (!(ep->ep_state & EP_HAS_STREAMS)) { 368 if (ep->ring && !(list_empty(&ep->ring->td_list))) 369 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 370 return; 371 } 372 373 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 374 stream_id++) { 375 struct xhci_stream_info *stream_info = ep->stream_info; 376 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 377 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 378 stream_id); 379 } 380 } 381 382 /* Get the right ring for the given slot_id, ep_index and stream_id. 383 * If the endpoint supports streams, boundary check the URB's stream ID. 384 * If the endpoint doesn't support streams, return the singular endpoint ring. 385 */ 386 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 387 unsigned int slot_id, unsigned int ep_index, 388 unsigned int stream_id) 389 { 390 struct xhci_virt_ep *ep; 391 392 ep = &xhci->devs[slot_id]->eps[ep_index]; 393 /* Common case: no streams */ 394 if (!(ep->ep_state & EP_HAS_STREAMS)) 395 return ep->ring; 396 397 if (stream_id == 0) { 398 xhci_warn(xhci, 399 "WARN: Slot ID %u, ep index %u has streams, " 400 "but URB has no stream ID.\n", 401 slot_id, ep_index); 402 return NULL; 403 } 404 405 if (stream_id < ep->stream_info->num_streams) 406 return ep->stream_info->stream_rings[stream_id]; 407 408 xhci_warn(xhci, 409 "WARN: Slot ID %u, ep index %u has " 410 "stream IDs 1 to %u allocated, " 411 "but stream ID %u is requested.\n", 412 slot_id, ep_index, 413 ep->stream_info->num_streams - 1, 414 stream_id); 415 return NULL; 416 } 417 418 /* 419 * Move the xHC's endpoint ring dequeue pointer past cur_td. 420 * Record the new state of the xHC's endpoint ring dequeue segment, 421 * dequeue pointer, and new consumer cycle state in state. 422 * Update our internal representation of the ring's dequeue pointer. 423 * 424 * We do this in three jumps: 425 * - First we update our new ring state to be the same as when the xHC stopped. 426 * - Then we traverse the ring to find the segment that contains 427 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 428 * any link TRBs with the toggle cycle bit set. 429 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 430 * if we've moved it past a link TRB with the toggle cycle bit set. 431 * 432 * Some of the uses of xhci_generic_trb are grotty, but if they're done 433 * with correct __le32 accesses they should work fine. Only users of this are 434 * in here. 435 */ 436 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 437 unsigned int slot_id, unsigned int ep_index, 438 unsigned int stream_id, struct xhci_td *cur_td, 439 struct xhci_dequeue_state *state) 440 { 441 struct xhci_virt_device *dev = xhci->devs[slot_id]; 442 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 443 struct xhci_ring *ep_ring; 444 struct xhci_segment *new_seg; 445 union xhci_trb *new_deq; 446 dma_addr_t addr; 447 u64 hw_dequeue; 448 bool cycle_found = false; 449 bool td_last_trb_found = false; 450 451 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 452 ep_index, stream_id); 453 if (!ep_ring) { 454 xhci_warn(xhci, "WARN can't find new dequeue state " 455 "for invalid stream ID %u.\n", 456 stream_id); 457 return; 458 } 459 460 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 461 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 462 "Finding endpoint context"); 463 /* 4.6.9 the css flag is written to the stream context for streams */ 464 if (ep->ep_state & EP_HAS_STREAMS) { 465 struct xhci_stream_ctx *ctx = 466 &ep->stream_info->stream_ctx_array[stream_id]; 467 hw_dequeue = le64_to_cpu(ctx->stream_ring); 468 } else { 469 struct xhci_ep_ctx *ep_ctx 470 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 471 hw_dequeue = le64_to_cpu(ep_ctx->deq); 472 } 473 474 new_seg = ep_ring->deq_seg; 475 new_deq = ep_ring->dequeue; 476 state->new_cycle_state = hw_dequeue & 0x1; 477 478 /* 479 * We want to find the pointer, segment and cycle state of the new trb 480 * (the one after current TD's last_trb). We know the cycle state at 481 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are 482 * found. 483 */ 484 do { 485 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) 486 == (dma_addr_t)(hw_dequeue & ~0xf)) { 487 cycle_found = true; 488 if (td_last_trb_found) 489 break; 490 } 491 if (new_deq == cur_td->last_trb) 492 td_last_trb_found = true; 493 494 if (cycle_found && trb_is_link(new_deq) && 495 link_trb_toggles_cycle(new_deq)) 496 state->new_cycle_state ^= 0x1; 497 498 next_trb(xhci, ep_ring, &new_seg, &new_deq); 499 500 /* Search wrapped around, bail out */ 501 if (new_deq == ep->ring->dequeue) { 502 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); 503 state->new_deq_seg = NULL; 504 state->new_deq_ptr = NULL; 505 return; 506 } 507 508 } while (!cycle_found || !td_last_trb_found); 509 510 state->new_deq_seg = new_seg; 511 state->new_deq_ptr = new_deq; 512 513 /* Don't update the ring cycle state for the producer (us). */ 514 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 515 "Cycle state = 0x%x", state->new_cycle_state); 516 517 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 518 "New dequeue segment = %p (virtual)", 519 state->new_deq_seg); 520 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 521 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 522 "New dequeue pointer = 0x%llx (DMA)", 523 (unsigned long long) addr); 524 } 525 526 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 527 * (The last TRB actually points to the ring enqueue pointer, which is not part 528 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 529 */ 530 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 531 struct xhci_td *td, bool flip_cycle) 532 { 533 struct xhci_segment *seg = td->start_seg; 534 union xhci_trb *trb = td->first_trb; 535 536 while (1) { 537 if (trb_is_link(trb)) { 538 /* unchain chained link TRBs */ 539 trb->link.control &= cpu_to_le32(~TRB_CHAIN); 540 } else { 541 trb->generic.field[0] = 0; 542 trb->generic.field[1] = 0; 543 trb->generic.field[2] = 0; 544 /* Preserve only the cycle bit of this TRB */ 545 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 546 trb->generic.field[3] |= cpu_to_le32( 547 TRB_TYPE(TRB_TR_NOOP)); 548 } 549 /* flip cycle if asked to */ 550 if (flip_cycle && trb != td->first_trb && trb != td->last_trb) 551 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); 552 553 if (trb == td->last_trb) 554 break; 555 556 next_trb(xhci, ep_ring, &seg, &trb); 557 } 558 } 559 560 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 561 struct xhci_virt_ep *ep) 562 { 563 ep->ep_state &= ~EP_HALT_PENDING; 564 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 565 * timer is running on another CPU, we don't decrement stop_cmds_pending 566 * (since we didn't successfully stop the watchdog timer). 567 */ 568 if (del_timer(&ep->stop_cmd_timer)) 569 ep->stop_cmds_pending--; 570 } 571 572 /* 573 * Must be called with xhci->lock held in interrupt context, 574 * releases and re-acquires xhci->lock 575 */ 576 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 577 struct xhci_td *cur_td, int status) 578 { 579 struct urb *urb = cur_td->urb; 580 struct urb_priv *urb_priv = urb->hcpriv; 581 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); 582 583 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 584 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 585 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 586 if (xhci->quirks & XHCI_AMD_PLL_FIX) 587 usb_amd_quirk_pll_enable(); 588 } 589 } 590 xhci_urb_free_priv(urb_priv); 591 usb_hcd_unlink_urb_from_ep(hcd, urb); 592 spin_unlock(&xhci->lock); 593 usb_hcd_giveback_urb(hcd, urb, status); 594 spin_lock(&xhci->lock); 595 } 596 597 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, 598 struct xhci_ring *ring, struct xhci_td *td) 599 { 600 struct device *dev = xhci_to_hcd(xhci)->self.controller; 601 struct xhci_segment *seg = td->bounce_seg; 602 struct urb *urb = td->urb; 603 604 if (!seg || !urb) 605 return; 606 607 if (usb_urb_dir_out(urb)) { 608 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, 609 DMA_TO_DEVICE); 610 return; 611 } 612 613 /* for in tranfers we need to copy the data from bounce to sg */ 614 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, 615 seg->bounce_len, seg->bounce_offs); 616 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, 617 DMA_FROM_DEVICE); 618 seg->bounce_len = 0; 619 seg->bounce_offs = 0; 620 } 621 622 /* 623 * When we get a command completion for a Stop Endpoint Command, we need to 624 * unlink any cancelled TDs from the ring. There are two ways to do that: 625 * 626 * 1. If the HW was in the middle of processing the TD that needs to be 627 * cancelled, then we must move the ring's dequeue pointer past the last TRB 628 * in the TD with a Set Dequeue Pointer Command. 629 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 630 * bit cleared) so that the HW will skip over them. 631 */ 632 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, 633 union xhci_trb *trb, struct xhci_event_cmd *event) 634 { 635 unsigned int ep_index; 636 struct xhci_ring *ep_ring; 637 struct xhci_virt_ep *ep; 638 struct list_head *entry; 639 struct xhci_td *cur_td = NULL; 640 struct xhci_td *last_unlinked_td; 641 642 struct xhci_dequeue_state deq_state; 643 644 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { 645 if (!xhci->devs[slot_id]) 646 xhci_warn(xhci, "Stop endpoint command " 647 "completion for disabled slot %u\n", 648 slot_id); 649 return; 650 } 651 652 memset(&deq_state, 0, sizeof(deq_state)); 653 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 654 ep = &xhci->devs[slot_id]->eps[ep_index]; 655 656 if (list_empty(&ep->cancelled_td_list)) { 657 xhci_stop_watchdog_timer_in_irq(xhci, ep); 658 ep->stopped_td = NULL; 659 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 660 return; 661 } 662 663 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 664 * We have the xHCI lock, so nothing can modify this list until we drop 665 * it. We're also in the event handler, so we can't get re-interrupted 666 * if another Stop Endpoint command completes 667 */ 668 list_for_each(entry, &ep->cancelled_td_list) { 669 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 670 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 671 "Removing canceled TD starting at 0x%llx (dma).", 672 (unsigned long long)xhci_trb_virt_to_dma( 673 cur_td->start_seg, cur_td->first_trb)); 674 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 675 if (!ep_ring) { 676 /* This shouldn't happen unless a driver is mucking 677 * with the stream ID after submission. This will 678 * leave the TD on the hardware ring, and the hardware 679 * will try to execute it, and may access a buffer 680 * that has already been freed. In the best case, the 681 * hardware will execute it, and the event handler will 682 * ignore the completion event for that TD, since it was 683 * removed from the td_list for that endpoint. In 684 * short, don't muck with the stream ID after 685 * submission. 686 */ 687 xhci_warn(xhci, "WARN Cancelled URB %p " 688 "has invalid stream ID %u.\n", 689 cur_td->urb, 690 cur_td->urb->stream_id); 691 goto remove_finished_td; 692 } 693 /* 694 * If we stopped on the TD we need to cancel, then we have to 695 * move the xHC endpoint ring dequeue pointer past this TD. 696 */ 697 if (cur_td == ep->stopped_td) 698 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 699 cur_td->urb->stream_id, 700 cur_td, &deq_state); 701 else 702 td_to_noop(xhci, ep_ring, cur_td, false); 703 remove_finished_td: 704 /* 705 * The event handler won't see a completion for this TD anymore, 706 * so remove it from the endpoint ring's TD list. Keep it in 707 * the cancelled TD list for URB completion later. 708 */ 709 list_del_init(&cur_td->td_list); 710 } 711 last_unlinked_td = cur_td; 712 xhci_stop_watchdog_timer_in_irq(xhci, ep); 713 714 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 715 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 716 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, 717 ep->stopped_td->urb->stream_id, &deq_state); 718 xhci_ring_cmd_db(xhci); 719 } else { 720 /* Otherwise ring the doorbell(s) to restart queued transfers */ 721 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 722 } 723 724 ep->stopped_td = NULL; 725 726 /* 727 * Drop the lock and complete the URBs in the cancelled TD list. 728 * New TDs to be cancelled might be added to the end of the list before 729 * we can complete all the URBs for the TDs we already unlinked. 730 * So stop when we've completed the URB for the last TD we unlinked. 731 */ 732 do { 733 cur_td = list_entry(ep->cancelled_td_list.next, 734 struct xhci_td, cancelled_td_list); 735 list_del_init(&cur_td->cancelled_td_list); 736 737 /* Clean up the cancelled URB */ 738 /* Doesn't matter what we pass for status, since the core will 739 * just overwrite it (because the URB has been unlinked). 740 */ 741 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 742 if (ep_ring && cur_td->bounce_seg) 743 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); 744 inc_td_cnt(cur_td->urb); 745 if (last_td_in_urb(cur_td)) 746 xhci_giveback_urb_in_irq(xhci, cur_td, 0); 747 748 /* Stop processing the cancelled list if the watchdog timer is 749 * running. 750 */ 751 if (xhci->xhc_state & XHCI_STATE_DYING) 752 return; 753 } while (cur_td != last_unlinked_td); 754 755 /* Return to the event handler with xhci->lock re-acquired */ 756 } 757 758 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) 759 { 760 struct xhci_td *cur_td; 761 762 while (!list_empty(&ring->td_list)) { 763 cur_td = list_first_entry(&ring->td_list, 764 struct xhci_td, td_list); 765 list_del_init(&cur_td->td_list); 766 if (!list_empty(&cur_td->cancelled_td_list)) 767 list_del_init(&cur_td->cancelled_td_list); 768 769 if (cur_td->bounce_seg) 770 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); 771 772 inc_td_cnt(cur_td->urb); 773 if (last_td_in_urb(cur_td)) 774 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 775 } 776 } 777 778 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, 779 int slot_id, int ep_index) 780 { 781 struct xhci_td *cur_td; 782 struct xhci_virt_ep *ep; 783 struct xhci_ring *ring; 784 785 ep = &xhci->devs[slot_id]->eps[ep_index]; 786 if ((ep->ep_state & EP_HAS_STREAMS) || 787 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 788 int stream_id; 789 790 for (stream_id = 0; stream_id < ep->stream_info->num_streams; 791 stream_id++) { 792 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 793 "Killing URBs for slot ID %u, ep index %u, stream %u", 794 slot_id, ep_index, stream_id + 1); 795 xhci_kill_ring_urbs(xhci, 796 ep->stream_info->stream_rings[stream_id]); 797 } 798 } else { 799 ring = ep->ring; 800 if (!ring) 801 return; 802 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 803 "Killing URBs for slot ID %u, ep index %u", 804 slot_id, ep_index); 805 xhci_kill_ring_urbs(xhci, ring); 806 } 807 while (!list_empty(&ep->cancelled_td_list)) { 808 cur_td = list_first_entry(&ep->cancelled_td_list, 809 struct xhci_td, cancelled_td_list); 810 list_del_init(&cur_td->cancelled_td_list); 811 812 inc_td_cnt(cur_td->urb); 813 if (last_td_in_urb(cur_td)) 814 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 815 } 816 } 817 818 /* Watchdog timer function for when a stop endpoint command fails to complete. 819 * In this case, we assume the host controller is broken or dying or dead. The 820 * host may still be completing some other events, so we have to be careful to 821 * let the event ring handler and the URB dequeueing/enqueueing functions know 822 * through xhci->state. 823 * 824 * The timer may also fire if the host takes a very long time to respond to the 825 * command, and the stop endpoint command completion handler cannot delete the 826 * timer before the timer function is called. Another endpoint cancellation may 827 * sneak in before the timer function can grab the lock, and that may queue 828 * another stop endpoint command and add the timer back. So we cannot use a 829 * simple flag to say whether there is a pending stop endpoint command for a 830 * particular endpoint. 831 * 832 * Instead we use a combination of that flag and a counter for the number of 833 * pending stop endpoint commands. If the timer is the tail end of the last 834 * stop endpoint command, and the endpoint's command is still pending, we assume 835 * the host is dying. 836 */ 837 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 838 { 839 struct xhci_hcd *xhci; 840 struct xhci_virt_ep *ep; 841 int ret, i, j; 842 unsigned long flags; 843 844 ep = (struct xhci_virt_ep *) arg; 845 xhci = ep->xhci; 846 847 spin_lock_irqsave(&xhci->lock, flags); 848 849 ep->stop_cmds_pending--; 850 if (xhci->xhc_state & XHCI_STATE_REMOVING) { 851 spin_unlock_irqrestore(&xhci->lock, flags); 852 return; 853 } 854 if (xhci->xhc_state & XHCI_STATE_DYING) { 855 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 856 "Stop EP timer ran, but another timer marked " 857 "xHCI as DYING, exiting."); 858 spin_unlock_irqrestore(&xhci->lock, flags); 859 return; 860 } 861 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 862 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 863 "Stop EP timer ran, but no command pending, " 864 "exiting."); 865 spin_unlock_irqrestore(&xhci->lock, flags); 866 return; 867 } 868 869 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 870 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 871 /* Oops, HC is dead or dying or at least not responding to the stop 872 * endpoint command. 873 */ 874 xhci->xhc_state |= XHCI_STATE_DYING; 875 /* Disable interrupts from the host controller and start halting it */ 876 xhci_quiesce(xhci); 877 spin_unlock_irqrestore(&xhci->lock, flags); 878 879 ret = xhci_halt(xhci); 880 881 spin_lock_irqsave(&xhci->lock, flags); 882 if (ret < 0) { 883 /* This is bad; the host is not responding to commands and it's 884 * not allowing itself to be halted. At least interrupts are 885 * disabled. If we call usb_hc_died(), it will attempt to 886 * disconnect all device drivers under this host. Those 887 * disconnect() methods will wait for all URBs to be unlinked, 888 * so we must complete them. 889 */ 890 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 891 xhci_warn(xhci, "Completing active URBs anyway.\n"); 892 /* We could turn all TDs on the rings to no-ops. This won't 893 * help if the host has cached part of the ring, and is slow if 894 * we want to preserve the cycle bit. Skip it and hope the host 895 * doesn't touch the memory. 896 */ 897 } 898 for (i = 0; i < MAX_HC_SLOTS; i++) { 899 if (!xhci->devs[i]) 900 continue; 901 for (j = 0; j < 31; j++) 902 xhci_kill_endpoint_urbs(xhci, i, j); 903 } 904 spin_unlock_irqrestore(&xhci->lock, flags); 905 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 906 "Calling usb_hc_died()"); 907 usb_hc_died(xhci_to_hcd(xhci)); 908 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 909 "xHCI host controller is dead."); 910 } 911 912 913 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, 914 struct xhci_virt_device *dev, 915 struct xhci_ring *ep_ring, 916 unsigned int ep_index) 917 { 918 union xhci_trb *dequeue_temp; 919 int num_trbs_free_temp; 920 bool revert = false; 921 922 num_trbs_free_temp = ep_ring->num_trbs_free; 923 dequeue_temp = ep_ring->dequeue; 924 925 /* If we get two back-to-back stalls, and the first stalled transfer 926 * ends just before a link TRB, the dequeue pointer will be left on 927 * the link TRB by the code in the while loop. So we have to update 928 * the dequeue pointer one segment further, or we'll jump off 929 * the segment into la-la-land. 930 */ 931 if (trb_is_link(ep_ring->dequeue)) { 932 ep_ring->deq_seg = ep_ring->deq_seg->next; 933 ep_ring->dequeue = ep_ring->deq_seg->trbs; 934 } 935 936 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { 937 /* We have more usable TRBs */ 938 ep_ring->num_trbs_free++; 939 ep_ring->dequeue++; 940 if (trb_is_link(ep_ring->dequeue)) { 941 if (ep_ring->dequeue == 942 dev->eps[ep_index].queued_deq_ptr) 943 break; 944 ep_ring->deq_seg = ep_ring->deq_seg->next; 945 ep_ring->dequeue = ep_ring->deq_seg->trbs; 946 } 947 if (ep_ring->dequeue == dequeue_temp) { 948 revert = true; 949 break; 950 } 951 } 952 953 if (revert) { 954 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); 955 ep_ring->num_trbs_free = num_trbs_free_temp; 956 } 957 } 958 959 /* 960 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 961 * we need to clear the set deq pending flag in the endpoint ring state, so that 962 * the TD queueing code can ring the doorbell again. We also need to ring the 963 * endpoint doorbell to restart the ring, but only if there aren't more 964 * cancellations pending. 965 */ 966 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, 967 union xhci_trb *trb, u32 cmd_comp_code) 968 { 969 unsigned int ep_index; 970 unsigned int stream_id; 971 struct xhci_ring *ep_ring; 972 struct xhci_virt_device *dev; 973 struct xhci_virt_ep *ep; 974 struct xhci_ep_ctx *ep_ctx; 975 struct xhci_slot_ctx *slot_ctx; 976 977 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 978 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 979 dev = xhci->devs[slot_id]; 980 ep = &dev->eps[ep_index]; 981 982 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 983 if (!ep_ring) { 984 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", 985 stream_id); 986 /* XXX: Harmless??? */ 987 goto cleanup; 988 } 989 990 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 991 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 992 993 if (cmd_comp_code != COMP_SUCCESS) { 994 unsigned int ep_state; 995 unsigned int slot_state; 996 997 switch (cmd_comp_code) { 998 case COMP_TRB_ERR: 999 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); 1000 break; 1001 case COMP_CTX_STATE: 1002 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); 1003 ep_state = GET_EP_CTX_STATE(ep_ctx); 1004 slot_state = le32_to_cpu(slot_ctx->dev_state); 1005 slot_state = GET_SLOT_STATE(slot_state); 1006 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1007 "Slot state = %u, EP state = %u", 1008 slot_state, ep_state); 1009 break; 1010 case COMP_EBADSLT: 1011 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", 1012 slot_id); 1013 break; 1014 default: 1015 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", 1016 cmd_comp_code); 1017 break; 1018 } 1019 /* OK what do we do now? The endpoint state is hosed, and we 1020 * should never get to this point if the synchronization between 1021 * queueing, and endpoint state are correct. This might happen 1022 * if the device gets disconnected after we've finished 1023 * cancelling URBs, which might not be an error... 1024 */ 1025 } else { 1026 u64 deq; 1027 /* 4.6.10 deq ptr is written to the stream ctx for streams */ 1028 if (ep->ep_state & EP_HAS_STREAMS) { 1029 struct xhci_stream_ctx *ctx = 1030 &ep->stream_info->stream_ctx_array[stream_id]; 1031 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; 1032 } else { 1033 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 1034 } 1035 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1036 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); 1037 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, 1038 ep->queued_deq_ptr) == deq) { 1039 /* Update the ring's dequeue segment and dequeue pointer 1040 * to reflect the new position. 1041 */ 1042 update_ring_for_set_deq_completion(xhci, dev, 1043 ep_ring, ep_index); 1044 } else { 1045 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); 1046 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 1047 ep->queued_deq_seg, ep->queued_deq_ptr); 1048 } 1049 } 1050 1051 cleanup: 1052 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1053 dev->eps[ep_index].queued_deq_seg = NULL; 1054 dev->eps[ep_index].queued_deq_ptr = NULL; 1055 /* Restart any rings with pending URBs */ 1056 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1057 } 1058 1059 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, 1060 union xhci_trb *trb, u32 cmd_comp_code) 1061 { 1062 unsigned int ep_index; 1063 1064 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1065 /* This command will only fail if the endpoint wasn't halted, 1066 * but we don't care. 1067 */ 1068 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 1069 "Ignoring reset ep completion code of %u", cmd_comp_code); 1070 1071 /* HW with the reset endpoint quirk needs to have a configure endpoint 1072 * command complete before the endpoint can be used. Queue that here 1073 * because the HW can't handle two commands being queued in a row. 1074 */ 1075 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1076 struct xhci_command *command; 1077 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1078 if (!command) { 1079 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); 1080 return; 1081 } 1082 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1083 "Queueing configure endpoint command"); 1084 xhci_queue_configure_endpoint(xhci, command, 1085 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1086 false); 1087 xhci_ring_cmd_db(xhci); 1088 } else { 1089 /* Clear our internal halted state */ 1090 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1091 } 1092 } 1093 1094 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1095 struct xhci_command *command, u32 cmd_comp_code) 1096 { 1097 if (cmd_comp_code == COMP_SUCCESS) 1098 command->slot_id = slot_id; 1099 else 1100 command->slot_id = 0; 1101 } 1102 1103 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) 1104 { 1105 struct xhci_virt_device *virt_dev; 1106 1107 virt_dev = xhci->devs[slot_id]; 1108 if (!virt_dev) 1109 return; 1110 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1111 /* Delete default control endpoint resources */ 1112 xhci_free_device_endpoint_resources(xhci, virt_dev, true); 1113 xhci_free_virt_device(xhci, slot_id); 1114 } 1115 1116 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, 1117 struct xhci_event_cmd *event, u32 cmd_comp_code) 1118 { 1119 struct xhci_virt_device *virt_dev; 1120 struct xhci_input_control_ctx *ctrl_ctx; 1121 unsigned int ep_index; 1122 unsigned int ep_state; 1123 u32 add_flags, drop_flags; 1124 1125 /* 1126 * Configure endpoint commands can come from the USB core 1127 * configuration or alt setting changes, or because the HW 1128 * needed an extra configure endpoint command after a reset 1129 * endpoint command or streams were being configured. 1130 * If the command was for a halted endpoint, the xHCI driver 1131 * is not waiting on the configure endpoint command. 1132 */ 1133 virt_dev = xhci->devs[slot_id]; 1134 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1135 if (!ctrl_ctx) { 1136 xhci_warn(xhci, "Could not get input context, bad type.\n"); 1137 return; 1138 } 1139 1140 add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1141 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1142 /* Input ctx add_flags are the endpoint index plus one */ 1143 ep_index = xhci_last_valid_endpoint(add_flags) - 1; 1144 1145 /* A usb_set_interface() call directly after clearing a halted 1146 * condition may race on this quirky hardware. Not worth 1147 * worrying about, since this is prototype hardware. Not sure 1148 * if this will work for streams, but streams support was 1149 * untested on this prototype. 1150 */ 1151 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1152 ep_index != (unsigned int) -1 && 1153 add_flags - SLOT_FLAG == drop_flags) { 1154 ep_state = virt_dev->eps[ep_index].ep_state; 1155 if (!(ep_state & EP_HALTED)) 1156 return; 1157 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1158 "Completed config ep cmd - " 1159 "last ep index = %d, state = %d", 1160 ep_index, ep_state); 1161 /* Clear internal halted state and restart ring(s) */ 1162 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; 1163 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1164 return; 1165 } 1166 return; 1167 } 1168 1169 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, 1170 struct xhci_event_cmd *event) 1171 { 1172 xhci_dbg(xhci, "Completed reset device command.\n"); 1173 if (!xhci->devs[slot_id]) 1174 xhci_warn(xhci, "Reset device command completion " 1175 "for disabled slot %u\n", slot_id); 1176 } 1177 1178 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, 1179 struct xhci_event_cmd *event) 1180 { 1181 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1182 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); 1183 return; 1184 } 1185 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1186 "NEC firmware version %2x.%02x", 1187 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1188 NEC_FW_MINOR(le32_to_cpu(event->status))); 1189 } 1190 1191 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) 1192 { 1193 list_del(&cmd->cmd_list); 1194 1195 if (cmd->completion) { 1196 cmd->status = status; 1197 complete(cmd->completion); 1198 } else { 1199 kfree(cmd); 1200 } 1201 } 1202 1203 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) 1204 { 1205 struct xhci_command *cur_cmd, *tmp_cmd; 1206 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) 1207 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); 1208 } 1209 1210 /* 1211 * Turn all commands on command ring with status set to "aborted" to no-op trbs. 1212 * If there are other commands waiting then restart the ring and kick the timer. 1213 * This must be called with command ring stopped and xhci->lock held. 1214 */ 1215 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, 1216 struct xhci_command *cur_cmd) 1217 { 1218 struct xhci_command *i_cmd, *tmp_cmd; 1219 u32 cycle_state; 1220 1221 /* Turn all aborted commands in list to no-ops, then restart */ 1222 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, 1223 cmd_list) { 1224 1225 if (i_cmd->status != COMP_CMD_ABORT) 1226 continue; 1227 1228 i_cmd->status = COMP_CMD_STOP; 1229 1230 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 1231 i_cmd->command_trb); 1232 /* get cycle state from the original cmd trb */ 1233 cycle_state = le32_to_cpu( 1234 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; 1235 /* modify the command trb to no-op command */ 1236 i_cmd->command_trb->generic.field[0] = 0; 1237 i_cmd->command_trb->generic.field[1] = 0; 1238 i_cmd->command_trb->generic.field[2] = 0; 1239 i_cmd->command_trb->generic.field[3] = cpu_to_le32( 1240 TRB_TYPE(TRB_CMD_NOOP) | cycle_state); 1241 1242 /* 1243 * caller waiting for completion is called when command 1244 * completion event is received for these no-op commands 1245 */ 1246 } 1247 1248 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 1249 1250 /* ring command ring doorbell to restart the command ring */ 1251 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && 1252 !(xhci->xhc_state & XHCI_STATE_DYING)) { 1253 xhci->current_cmd = cur_cmd; 1254 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1255 xhci_ring_cmd_db(xhci); 1256 } 1257 return; 1258 } 1259 1260 1261 void xhci_handle_command_timeout(unsigned long data) 1262 { 1263 struct xhci_hcd *xhci; 1264 int ret; 1265 unsigned long flags; 1266 u64 hw_ring_state; 1267 bool second_timeout = false; 1268 xhci = (struct xhci_hcd *) data; 1269 1270 /* mark this command to be cancelled */ 1271 spin_lock_irqsave(&xhci->lock, flags); 1272 if (xhci->current_cmd) { 1273 if (xhci->current_cmd->status == COMP_CMD_ABORT) 1274 second_timeout = true; 1275 xhci->current_cmd->status = COMP_CMD_ABORT; 1276 } 1277 1278 /* Make sure command ring is running before aborting it */ 1279 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1280 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1281 (hw_ring_state & CMD_RING_RUNNING)) { 1282 spin_unlock_irqrestore(&xhci->lock, flags); 1283 xhci_dbg(xhci, "Command timeout\n"); 1284 ret = xhci_abort_cmd_ring(xhci); 1285 if (unlikely(ret == -ESHUTDOWN)) { 1286 xhci_err(xhci, "Abort command ring failed\n"); 1287 xhci_cleanup_command_queue(xhci); 1288 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1289 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1290 } 1291 return; 1292 } 1293 1294 /* command ring failed to restart, or host removed. Bail out */ 1295 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { 1296 spin_unlock_irqrestore(&xhci->lock, flags); 1297 xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); 1298 xhci_cleanup_command_queue(xhci); 1299 return; 1300 } 1301 1302 /* command timeout on stopped ring, ring can't be aborted */ 1303 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1304 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1305 spin_unlock_irqrestore(&xhci->lock, flags); 1306 return; 1307 } 1308 1309 static void handle_cmd_completion(struct xhci_hcd *xhci, 1310 struct xhci_event_cmd *event) 1311 { 1312 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1313 u64 cmd_dma; 1314 dma_addr_t cmd_dequeue_dma; 1315 u32 cmd_comp_code; 1316 union xhci_trb *cmd_trb; 1317 struct xhci_command *cmd; 1318 u32 cmd_type; 1319 1320 cmd_dma = le64_to_cpu(event->cmd_trb); 1321 cmd_trb = xhci->cmd_ring->dequeue; 1322 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1323 cmd_trb); 1324 /* 1325 * Check whether the completion event is for our internal kept 1326 * command. 1327 */ 1328 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { 1329 xhci_warn(xhci, 1330 "ERROR mismatched command completion event\n"); 1331 return; 1332 } 1333 1334 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); 1335 1336 del_timer(&xhci->cmd_timer); 1337 1338 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1339 1340 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1341 1342 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1343 if (cmd_comp_code == COMP_CMD_STOP) { 1344 xhci_handle_stopped_cmd_ring(xhci, cmd); 1345 return; 1346 } 1347 1348 if (cmd->command_trb != xhci->cmd_ring->dequeue) { 1349 xhci_err(xhci, 1350 "Command completion event does not match command\n"); 1351 return; 1352 } 1353 1354 /* 1355 * Host aborted the command ring, check if the current command was 1356 * supposed to be aborted, otherwise continue normally. 1357 * The command ring is stopped now, but the xHC will issue a Command 1358 * Ring Stopped event which will cause us to restart it. 1359 */ 1360 if (cmd_comp_code == COMP_CMD_ABORT) { 1361 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 1362 if (cmd->status == COMP_CMD_ABORT) 1363 goto event_handled; 1364 } 1365 1366 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1367 switch (cmd_type) { 1368 case TRB_ENABLE_SLOT: 1369 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); 1370 break; 1371 case TRB_DISABLE_SLOT: 1372 xhci_handle_cmd_disable_slot(xhci, slot_id); 1373 break; 1374 case TRB_CONFIG_EP: 1375 if (!cmd->completion) 1376 xhci_handle_cmd_config_ep(xhci, slot_id, event, 1377 cmd_comp_code); 1378 break; 1379 case TRB_EVAL_CONTEXT: 1380 break; 1381 case TRB_ADDR_DEV: 1382 break; 1383 case TRB_STOP_RING: 1384 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1385 le32_to_cpu(cmd_trb->generic.field[3]))); 1386 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); 1387 break; 1388 case TRB_SET_DEQ: 1389 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1390 le32_to_cpu(cmd_trb->generic.field[3]))); 1391 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1392 break; 1393 case TRB_CMD_NOOP: 1394 /* Is this an aborted command turned to NO-OP? */ 1395 if (cmd->status == COMP_CMD_STOP) 1396 cmd_comp_code = COMP_CMD_STOP; 1397 break; 1398 case TRB_RESET_EP: 1399 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1400 le32_to_cpu(cmd_trb->generic.field[3]))); 1401 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); 1402 break; 1403 case TRB_RESET_DEV: 1404 /* SLOT_ID field in reset device cmd completion event TRB is 0. 1405 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) 1406 */ 1407 slot_id = TRB_TO_SLOT_ID( 1408 le32_to_cpu(cmd_trb->generic.field[3])); 1409 xhci_handle_cmd_reset_dev(xhci, slot_id, event); 1410 break; 1411 case TRB_NEC_GET_FW: 1412 xhci_handle_cmd_nec_get_fw(xhci, event); 1413 break; 1414 default: 1415 /* Skip over unknown commands on the event ring */ 1416 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); 1417 break; 1418 } 1419 1420 /* restart timer if this wasn't the last command */ 1421 if (cmd->cmd_list.next != &xhci->cmd_list) { 1422 xhci->current_cmd = list_entry(cmd->cmd_list.next, 1423 struct xhci_command, cmd_list); 1424 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1425 } 1426 1427 event_handled: 1428 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); 1429 1430 inc_deq(xhci, xhci->cmd_ring); 1431 } 1432 1433 static void handle_vendor_event(struct xhci_hcd *xhci, 1434 union xhci_trb *event) 1435 { 1436 u32 trb_type; 1437 1438 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1439 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1440 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1441 handle_cmd_completion(xhci, &event->event_cmd); 1442 } 1443 1444 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1445 * port registers -- USB 3.0 and USB 2.0). 1446 * 1447 * Returns a zero-based port number, which is suitable for indexing into each of 1448 * the split roothubs' port arrays and bus state arrays. 1449 * Add one to it in order to call xhci_find_slot_id_by_port. 1450 */ 1451 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1452 struct xhci_hcd *xhci, u32 port_id) 1453 { 1454 unsigned int i; 1455 unsigned int num_similar_speed_ports = 0; 1456 1457 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1458 * and usb2_ports are 0-based indexes. Count the number of similar 1459 * speed ports, up to 1 port before this port. 1460 */ 1461 for (i = 0; i < (port_id - 1); i++) { 1462 u8 port_speed = xhci->port_array[i]; 1463 1464 /* 1465 * Skip ports that don't have known speeds, or have duplicate 1466 * Extended Capabilities port speed entries. 1467 */ 1468 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1469 continue; 1470 1471 /* 1472 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1473 * 1.1 ports are under the USB 2.0 hub. If the port speed 1474 * matches the device speed, it's a similar speed port. 1475 */ 1476 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3)) 1477 num_similar_speed_ports++; 1478 } 1479 return num_similar_speed_ports; 1480 } 1481 1482 static void handle_device_notification(struct xhci_hcd *xhci, 1483 union xhci_trb *event) 1484 { 1485 u32 slot_id; 1486 struct usb_device *udev; 1487 1488 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); 1489 if (!xhci->devs[slot_id]) { 1490 xhci_warn(xhci, "Device Notification event for " 1491 "unused slot %u\n", slot_id); 1492 return; 1493 } 1494 1495 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", 1496 slot_id); 1497 udev = xhci->devs[slot_id]->udev; 1498 if (udev && udev->parent) 1499 usb_wakeup_notification(udev->parent, udev->portnum); 1500 } 1501 1502 static void handle_port_status(struct xhci_hcd *xhci, 1503 union xhci_trb *event) 1504 { 1505 struct usb_hcd *hcd; 1506 u32 port_id; 1507 u32 temp, temp1; 1508 int max_ports; 1509 int slot_id; 1510 unsigned int faked_port_index; 1511 u8 major_revision; 1512 struct xhci_bus_state *bus_state; 1513 __le32 __iomem **port_array; 1514 bool bogus_port_status = false; 1515 1516 /* Port status change events always have a successful completion code */ 1517 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) 1518 xhci_warn(xhci, 1519 "WARN: xHC returned failed port status event\n"); 1520 1521 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1522 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1523 1524 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1525 if ((port_id <= 0) || (port_id > max_ports)) { 1526 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1527 inc_deq(xhci, xhci->event_ring); 1528 return; 1529 } 1530 1531 /* Figure out which usb_hcd this port is attached to: 1532 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1533 */ 1534 major_revision = xhci->port_array[port_id - 1]; 1535 1536 /* Find the right roothub. */ 1537 hcd = xhci_to_hcd(xhci); 1538 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3)) 1539 hcd = xhci->shared_hcd; 1540 1541 if (major_revision == 0) { 1542 xhci_warn(xhci, "Event for port %u not in " 1543 "Extended Capabilities, ignoring.\n", 1544 port_id); 1545 bogus_port_status = true; 1546 goto cleanup; 1547 } 1548 if (major_revision == DUPLICATE_ENTRY) { 1549 xhci_warn(xhci, "Event for port %u duplicated in" 1550 "Extended Capabilities, ignoring.\n", 1551 port_id); 1552 bogus_port_status = true; 1553 goto cleanup; 1554 } 1555 1556 /* 1557 * Hardware port IDs reported by a Port Status Change Event include USB 1558 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1559 * resume event, but we first need to translate the hardware port ID 1560 * into the index into the ports on the correct split roothub, and the 1561 * correct bus_state structure. 1562 */ 1563 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1564 if (hcd->speed >= HCD_USB3) 1565 port_array = xhci->usb3_ports; 1566 else 1567 port_array = xhci->usb2_ports; 1568 /* Find the faked port hub number */ 1569 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1570 port_id); 1571 1572 temp = readl(port_array[faked_port_index]); 1573 if (hcd->state == HC_STATE_SUSPENDED) { 1574 xhci_dbg(xhci, "resume root hub\n"); 1575 usb_hcd_resume_root_hub(hcd); 1576 } 1577 1578 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) 1579 bus_state->port_remote_wakeup &= ~(1 << faked_port_index); 1580 1581 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1582 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1583 1584 temp1 = readl(&xhci->op_regs->command); 1585 if (!(temp1 & CMD_RUN)) { 1586 xhci_warn(xhci, "xHC is not running.\n"); 1587 goto cleanup; 1588 } 1589 1590 if (DEV_SUPERSPEED_ANY(temp)) { 1591 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); 1592 /* Set a flag to say the port signaled remote wakeup, 1593 * so we can tell the difference between the end of 1594 * device and host initiated resume. 1595 */ 1596 bus_state->port_remote_wakeup |= 1 << faked_port_index; 1597 xhci_test_and_clear_bit(xhci, port_array, 1598 faked_port_index, PORT_PLC); 1599 xhci_set_link_state(xhci, port_array, faked_port_index, 1600 XDEV_U0); 1601 /* Need to wait until the next link state change 1602 * indicates the device is actually in U0. 1603 */ 1604 bogus_port_status = true; 1605 goto cleanup; 1606 } else if (!test_bit(faked_port_index, 1607 &bus_state->resuming_ports)) { 1608 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1609 bus_state->resume_done[faked_port_index] = jiffies + 1610 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1611 set_bit(faked_port_index, &bus_state->resuming_ports); 1612 mod_timer(&hcd->rh_timer, 1613 bus_state->resume_done[faked_port_index]); 1614 /* Do the rest in GetPortStatus */ 1615 } 1616 } 1617 1618 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && 1619 DEV_SUPERSPEED_ANY(temp)) { 1620 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1621 /* We've just brought the device into U0 through either the 1622 * Resume state after a device remote wakeup, or through the 1623 * U3Exit state after a host-initiated resume. If it's a device 1624 * initiated remote wake, don't pass up the link state change, 1625 * so the roothub behavior is consistent with external 1626 * USB 3.0 hub behavior. 1627 */ 1628 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1629 faked_port_index + 1); 1630 if (slot_id && xhci->devs[slot_id]) 1631 xhci_ring_device(xhci, slot_id); 1632 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1633 bus_state->port_remote_wakeup &= 1634 ~(1 << faked_port_index); 1635 xhci_test_and_clear_bit(xhci, port_array, 1636 faked_port_index, PORT_PLC); 1637 usb_wakeup_notification(hcd->self.root_hub, 1638 faked_port_index + 1); 1639 bogus_port_status = true; 1640 goto cleanup; 1641 } 1642 } 1643 1644 /* 1645 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or 1646 * RExit to a disconnect state). If so, let the the driver know it's 1647 * out of the RExit state. 1648 */ 1649 if (!DEV_SUPERSPEED_ANY(temp) && 1650 test_and_clear_bit(faked_port_index, 1651 &bus_state->rexit_ports)) { 1652 complete(&bus_state->rexit_done[faked_port_index]); 1653 bogus_port_status = true; 1654 goto cleanup; 1655 } 1656 1657 if (hcd->speed < HCD_USB3) 1658 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1659 PORT_PLC); 1660 1661 cleanup: 1662 /* Update event ring dequeue pointer before dropping the lock */ 1663 inc_deq(xhci, xhci->event_ring); 1664 1665 /* Don't make the USB core poll the roothub if we got a bad port status 1666 * change event. Besides, at that point we can't tell which roothub 1667 * (USB 2.0 or USB 3.0) to kick. 1668 */ 1669 if (bogus_port_status) 1670 return; 1671 1672 /* 1673 * xHCI port-status-change events occur when the "or" of all the 1674 * status-change bits in the portsc register changes from 0 to 1. 1675 * New status changes won't cause an event if any other change 1676 * bits are still set. When an event occurs, switch over to 1677 * polling to avoid losing status changes. 1678 */ 1679 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1680 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1681 spin_unlock(&xhci->lock); 1682 /* Pass this up to the core */ 1683 usb_hcd_poll_rh_status(hcd); 1684 spin_lock(&xhci->lock); 1685 } 1686 1687 /* 1688 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1689 * at end_trb, which may be in another segment. If the suspect DMA address is a 1690 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1691 * returns 0. 1692 */ 1693 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, 1694 struct xhci_segment *start_seg, 1695 union xhci_trb *start_trb, 1696 union xhci_trb *end_trb, 1697 dma_addr_t suspect_dma, 1698 bool debug) 1699 { 1700 dma_addr_t start_dma; 1701 dma_addr_t end_seg_dma; 1702 dma_addr_t end_trb_dma; 1703 struct xhci_segment *cur_seg; 1704 1705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1706 cur_seg = start_seg; 1707 1708 do { 1709 if (start_dma == 0) 1710 return NULL; 1711 /* We may get an event for a Link TRB in the middle of a TD */ 1712 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1713 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1714 /* If the end TRB isn't in this segment, this is set to 0 */ 1715 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1716 1717 if (debug) 1718 xhci_warn(xhci, 1719 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", 1720 (unsigned long long)suspect_dma, 1721 (unsigned long long)start_dma, 1722 (unsigned long long)end_trb_dma, 1723 (unsigned long long)cur_seg->dma, 1724 (unsigned long long)end_seg_dma); 1725 1726 if (end_trb_dma > 0) { 1727 /* The end TRB is in this segment, so suspect should be here */ 1728 if (start_dma <= end_trb_dma) { 1729 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1730 return cur_seg; 1731 } else { 1732 /* Case for one segment with 1733 * a TD wrapped around to the top 1734 */ 1735 if ((suspect_dma >= start_dma && 1736 suspect_dma <= end_seg_dma) || 1737 (suspect_dma >= cur_seg->dma && 1738 suspect_dma <= end_trb_dma)) 1739 return cur_seg; 1740 } 1741 return NULL; 1742 } else { 1743 /* Might still be somewhere in this segment */ 1744 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1745 return cur_seg; 1746 } 1747 cur_seg = cur_seg->next; 1748 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1749 } while (cur_seg != start_seg); 1750 1751 return NULL; 1752 } 1753 1754 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1755 unsigned int slot_id, unsigned int ep_index, 1756 unsigned int stream_id, 1757 struct xhci_td *td, union xhci_trb *ep_trb) 1758 { 1759 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1760 struct xhci_command *command; 1761 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1762 if (!command) 1763 return; 1764 1765 ep->ep_state |= EP_HALTED; 1766 ep->stopped_stream = stream_id; 1767 1768 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1769 xhci_cleanup_stalled_ring(xhci, ep_index, td); 1770 1771 ep->stopped_stream = 0; 1772 1773 xhci_ring_cmd_db(xhci); 1774 } 1775 1776 /* Check if an error has halted the endpoint ring. The class driver will 1777 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1778 * However, a babble and other errors also halt the endpoint ring, and the class 1779 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1780 * Ring Dequeue Pointer command manually. 1781 */ 1782 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1783 struct xhci_ep_ctx *ep_ctx, 1784 unsigned int trb_comp_code) 1785 { 1786 /* TRB completion codes that may require a manual halt cleanup */ 1787 if (trb_comp_code == COMP_TX_ERR || 1788 trb_comp_code == COMP_BABBLE || 1789 trb_comp_code == COMP_SPLIT_ERR) 1790 /* The 0.95 spec says a babbling control endpoint 1791 * is not halted. The 0.96 spec says it is. Some HW 1792 * claims to be 0.95 compliant, but it halts the control 1793 * endpoint anyway. Check if a babble halted the 1794 * endpoint. 1795 */ 1796 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) 1797 return 1; 1798 1799 return 0; 1800 } 1801 1802 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1803 { 1804 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1805 /* Vendor defined "informational" completion code, 1806 * treat as not-an-error. 1807 */ 1808 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1809 trb_comp_code); 1810 xhci_dbg(xhci, "Treating code as success.\n"); 1811 return 1; 1812 } 1813 return 0; 1814 } 1815 1816 /* 1817 * Finish the td processing, remove the td from td list; 1818 * Return 1 if the urb can be given back. 1819 */ 1820 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1821 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 1822 struct xhci_virt_ep *ep, int *status, bool skip) 1823 { 1824 struct xhci_virt_device *xdev; 1825 struct xhci_ring *ep_ring; 1826 unsigned int slot_id; 1827 int ep_index; 1828 struct urb *urb = NULL; 1829 struct xhci_ep_ctx *ep_ctx; 1830 struct urb_priv *urb_priv; 1831 u32 trb_comp_code; 1832 1833 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1834 xdev = xhci->devs[slot_id]; 1835 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1836 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1837 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1838 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1839 1840 if (skip) 1841 goto td_cleanup; 1842 1843 if (trb_comp_code == COMP_STOP_INVAL || 1844 trb_comp_code == COMP_STOP || 1845 trb_comp_code == COMP_STOP_SHORT) { 1846 /* The Endpoint Stop Command completion will take care of any 1847 * stopped TDs. A stopped TD may be restarted, so don't update 1848 * the ring dequeue pointer or take this TD off any lists yet. 1849 */ 1850 ep->stopped_td = td; 1851 return 0; 1852 } 1853 if (trb_comp_code == COMP_STALL || 1854 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 1855 trb_comp_code)) { 1856 /* Issue a reset endpoint command to clear the host side 1857 * halt, followed by a set dequeue command to move the 1858 * dequeue pointer past the TD. 1859 * The class driver clears the device side halt later. 1860 */ 1861 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 1862 ep_ring->stream_id, td, ep_trb); 1863 } else { 1864 /* Update ring dequeue pointer */ 1865 while (ep_ring->dequeue != td->last_trb) 1866 inc_deq(xhci, ep_ring); 1867 inc_deq(xhci, ep_ring); 1868 } 1869 1870 td_cleanup: 1871 /* Clean up the endpoint's TD list */ 1872 urb = td->urb; 1873 urb_priv = urb->hcpriv; 1874 1875 /* if a bounce buffer was used to align this td then unmap it */ 1876 if (td->bounce_seg) 1877 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); 1878 1879 /* Do one last check of the actual transfer length. 1880 * If the host controller said we transferred more data than the buffer 1881 * length, urb->actual_length will be a very big number (since it's 1882 * unsigned). Play it safe and say we didn't transfer anything. 1883 */ 1884 if (urb->actual_length > urb->transfer_buffer_length) { 1885 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", 1886 urb->transfer_buffer_length, urb->actual_length); 1887 urb->actual_length = 0; 1888 *status = 0; 1889 } 1890 list_del_init(&td->td_list); 1891 /* Was this TD slated to be cancelled but completed anyway? */ 1892 if (!list_empty(&td->cancelled_td_list)) 1893 list_del_init(&td->cancelled_td_list); 1894 1895 inc_td_cnt(urb); 1896 /* Giveback the urb when all the tds are completed */ 1897 if (last_td_in_urb(td)) { 1898 if ((urb->actual_length != urb->transfer_buffer_length && 1899 (urb->transfer_flags & URB_SHORT_NOT_OK)) || 1900 (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) 1901 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", 1902 urb, urb->actual_length, 1903 urb->transfer_buffer_length, *status); 1904 1905 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ 1906 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 1907 *status = 0; 1908 xhci_giveback_urb_in_irq(xhci, td, *status); 1909 } 1910 return 0; 1911 } 1912 1913 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ 1914 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, 1915 union xhci_trb *stop_trb) 1916 { 1917 u32 sum; 1918 union xhci_trb *trb = ring->dequeue; 1919 struct xhci_segment *seg = ring->deq_seg; 1920 1921 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { 1922 if (!trb_is_noop(trb) && !trb_is_link(trb)) 1923 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); 1924 } 1925 return sum; 1926 } 1927 1928 /* 1929 * Process control tds, update urb status and actual_length. 1930 */ 1931 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1932 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 1933 struct xhci_virt_ep *ep, int *status) 1934 { 1935 struct xhci_virt_device *xdev; 1936 struct xhci_ring *ep_ring; 1937 unsigned int slot_id; 1938 int ep_index; 1939 struct xhci_ep_ctx *ep_ctx; 1940 u32 trb_comp_code; 1941 u32 remaining, requested; 1942 bool on_data_stage; 1943 1944 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1945 xdev = xhci->devs[slot_id]; 1946 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1947 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1948 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1949 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1950 requested = td->urb->transfer_buffer_length; 1951 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1952 1953 /* not setup (dequeue), or status stage means we are at data stage */ 1954 on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb); 1955 1956 switch (trb_comp_code) { 1957 case COMP_SUCCESS: 1958 if (ep_trb != td->last_trb) { 1959 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", 1960 on_data_stage ? "data" : "setup"); 1961 *status = -ESHUTDOWN; 1962 break; 1963 } 1964 *status = 0; 1965 break; 1966 case COMP_SHORT_TX: 1967 *status = 0; 1968 break; 1969 case COMP_STOP_SHORT: 1970 if (on_data_stage) 1971 td->urb->actual_length = remaining; 1972 else 1973 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); 1974 goto finish_td; 1975 case COMP_STOP: 1976 if (on_data_stage) 1977 td->urb->actual_length = requested - remaining; 1978 goto finish_td; 1979 case COMP_STOP_INVAL: 1980 goto finish_td; 1981 default: 1982 if (!xhci_requires_manual_halt_cleanup(xhci, 1983 ep_ctx, trb_comp_code)) 1984 break; 1985 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", 1986 trb_comp_code, ep_index); 1987 /* else fall through */ 1988 case COMP_STALL: 1989 /* Did we transfer part of the data (middle) phase? */ 1990 if (on_data_stage) 1991 td->urb->actual_length = requested - remaining; 1992 else if (!td->urb_length_set) 1993 td->urb->actual_length = 0; 1994 goto finish_td; 1995 } 1996 1997 /* stopped at setup stage, no data transferred */ 1998 if (ep_trb == ep_ring->dequeue) 1999 goto finish_td; 2000 2001 /* 2002 * if on data stage then update the actual_length of the URB and flag it 2003 * as set, so it won't be overwritten in the event for the last TRB. 2004 */ 2005 if (on_data_stage) { 2006 td->urb_length_set = true; 2007 td->urb->actual_length = requested - remaining; 2008 xhci_dbg(xhci, "Waiting for status stage event\n"); 2009 return 0; 2010 } 2011 2012 /* at status stage */ 2013 if (!td->urb_length_set) 2014 td->urb->actual_length = requested; 2015 2016 finish_td: 2017 return finish_td(xhci, td, ep_trb, event, ep, status, false); 2018 } 2019 2020 /* 2021 * Process isochronous tds, update urb packet status and actual_length. 2022 */ 2023 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2024 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 2025 struct xhci_virt_ep *ep, int *status) 2026 { 2027 struct xhci_ring *ep_ring; 2028 struct urb_priv *urb_priv; 2029 int idx; 2030 struct usb_iso_packet_descriptor *frame; 2031 u32 trb_comp_code; 2032 bool sum_trbs_for_length = false; 2033 u32 remaining, requested, ep_trb_len; 2034 int short_framestatus; 2035 2036 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2037 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2038 urb_priv = td->urb->hcpriv; 2039 idx = urb_priv->td_cnt; 2040 frame = &td->urb->iso_frame_desc[idx]; 2041 requested = frame->length; 2042 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2043 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); 2044 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 2045 -EREMOTEIO : 0; 2046 2047 /* handle completion code */ 2048 switch (trb_comp_code) { 2049 case COMP_SUCCESS: 2050 if (remaining) { 2051 frame->status = short_framestatus; 2052 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2053 sum_trbs_for_length = true; 2054 break; 2055 } 2056 frame->status = 0; 2057 break; 2058 case COMP_SHORT_TX: 2059 frame->status = short_framestatus; 2060 sum_trbs_for_length = true; 2061 break; 2062 case COMP_BW_OVER: 2063 frame->status = -ECOMM; 2064 break; 2065 case COMP_BUFF_OVER: 2066 case COMP_BABBLE: 2067 frame->status = -EOVERFLOW; 2068 break; 2069 case COMP_DEV_ERR: 2070 case COMP_STALL: 2071 frame->status = -EPROTO; 2072 break; 2073 case COMP_TX_ERR: 2074 frame->status = -EPROTO; 2075 if (ep_trb != td->last_trb) 2076 return 0; 2077 break; 2078 case COMP_STOP: 2079 sum_trbs_for_length = true; 2080 break; 2081 case COMP_STOP_SHORT: 2082 /* field normally containing residue now contains tranferred */ 2083 frame->status = short_framestatus; 2084 requested = remaining; 2085 break; 2086 case COMP_STOP_INVAL: 2087 requested = 0; 2088 remaining = 0; 2089 break; 2090 default: 2091 sum_trbs_for_length = true; 2092 frame->status = -1; 2093 break; 2094 } 2095 2096 if (sum_trbs_for_length) 2097 frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + 2098 ep_trb_len - remaining; 2099 else 2100 frame->actual_length = requested; 2101 2102 td->urb->actual_length += frame->actual_length; 2103 2104 return finish_td(xhci, td, ep_trb, event, ep, status, false); 2105 } 2106 2107 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2108 struct xhci_transfer_event *event, 2109 struct xhci_virt_ep *ep, int *status) 2110 { 2111 struct xhci_ring *ep_ring; 2112 struct urb_priv *urb_priv; 2113 struct usb_iso_packet_descriptor *frame; 2114 int idx; 2115 2116 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2117 urb_priv = td->urb->hcpriv; 2118 idx = urb_priv->td_cnt; 2119 frame = &td->urb->iso_frame_desc[idx]; 2120 2121 /* The transfer is partly done. */ 2122 frame->status = -EXDEV; 2123 2124 /* calc actual length */ 2125 frame->actual_length = 0; 2126 2127 /* Update ring dequeue pointer */ 2128 while (ep_ring->dequeue != td->last_trb) 2129 inc_deq(xhci, ep_ring); 2130 inc_deq(xhci, ep_ring); 2131 2132 return finish_td(xhci, td, NULL, event, ep, status, true); 2133 } 2134 2135 /* 2136 * Process bulk and interrupt tds, update urb status and actual_length. 2137 */ 2138 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 2139 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 2140 struct xhci_virt_ep *ep, int *status) 2141 { 2142 struct xhci_ring *ep_ring; 2143 u32 trb_comp_code; 2144 u32 remaining, requested, ep_trb_len; 2145 2146 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2147 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2148 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2149 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); 2150 requested = td->urb->transfer_buffer_length; 2151 2152 switch (trb_comp_code) { 2153 case COMP_SUCCESS: 2154 /* handle success with untransferred data as short packet */ 2155 if (ep_trb != td->last_trb || remaining) { 2156 xhci_warn(xhci, "WARN Successful completion on short TX\n"); 2157 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", 2158 td->urb->ep->desc.bEndpointAddress, 2159 requested, remaining); 2160 } 2161 *status = 0; 2162 break; 2163 case COMP_SHORT_TX: 2164 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", 2165 td->urb->ep->desc.bEndpointAddress, 2166 requested, remaining); 2167 *status = 0; 2168 break; 2169 case COMP_STOP_SHORT: 2170 td->urb->actual_length = remaining; 2171 goto finish_td; 2172 case COMP_STOP_INVAL: 2173 /* stopped on ep trb with invalid length, exclude it */ 2174 ep_trb_len = 0; 2175 remaining = 0; 2176 break; 2177 default: 2178 /* do nothing */ 2179 break; 2180 } 2181 2182 if (ep_trb == td->last_trb) 2183 td->urb->actual_length = requested - remaining; 2184 else 2185 td->urb->actual_length = 2186 sum_trb_lengths(xhci, ep_ring, ep_trb) + 2187 ep_trb_len - remaining; 2188 finish_td: 2189 if (remaining > requested) { 2190 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", 2191 remaining); 2192 td->urb->actual_length = 0; 2193 } 2194 return finish_td(xhci, td, ep_trb, event, ep, status, false); 2195 } 2196 2197 /* 2198 * If this function returns an error condition, it means it got a Transfer 2199 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 2200 * At this point, the host controller is probably hosed and should be reset. 2201 */ 2202 static int handle_tx_event(struct xhci_hcd *xhci, 2203 struct xhci_transfer_event *event) 2204 __releases(&xhci->lock) 2205 __acquires(&xhci->lock) 2206 { 2207 struct xhci_virt_device *xdev; 2208 struct xhci_virt_ep *ep; 2209 struct xhci_ring *ep_ring; 2210 unsigned int slot_id; 2211 int ep_index; 2212 struct xhci_td *td = NULL; 2213 dma_addr_t ep_trb_dma; 2214 struct xhci_segment *ep_seg; 2215 union xhci_trb *ep_trb; 2216 int status = -EINPROGRESS; 2217 struct xhci_ep_ctx *ep_ctx; 2218 struct list_head *tmp; 2219 u32 trb_comp_code; 2220 int td_num = 0; 2221 bool handling_skipped_tds = false; 2222 2223 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2224 xdev = xhci->devs[slot_id]; 2225 if (!xdev) { 2226 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 2227 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2228 (unsigned long long) xhci_trb_virt_to_dma( 2229 xhci->event_ring->deq_seg, 2230 xhci->event_ring->dequeue), 2231 lower_32_bits(le64_to_cpu(event->buffer)), 2232 upper_32_bits(le64_to_cpu(event->buffer)), 2233 le32_to_cpu(event->transfer_len), 2234 le32_to_cpu(event->flags)); 2235 xhci_dbg(xhci, "Event ring:\n"); 2236 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2237 return -ENODEV; 2238 } 2239 2240 /* Endpoint ID is 1 based, our index is zero based */ 2241 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2242 ep = &xdev->eps[ep_index]; 2243 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2244 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2245 if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { 2246 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 2247 "or incorrect stream ring\n"); 2248 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2249 (unsigned long long) xhci_trb_virt_to_dma( 2250 xhci->event_ring->deq_seg, 2251 xhci->event_ring->dequeue), 2252 lower_32_bits(le64_to_cpu(event->buffer)), 2253 upper_32_bits(le64_to_cpu(event->buffer)), 2254 le32_to_cpu(event->transfer_len), 2255 le32_to_cpu(event->flags)); 2256 xhci_dbg(xhci, "Event ring:\n"); 2257 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2258 return -ENODEV; 2259 } 2260 2261 /* Count current td numbers if ep->skip is set */ 2262 if (ep->skip) { 2263 list_for_each(tmp, &ep_ring->td_list) 2264 td_num++; 2265 } 2266 2267 ep_trb_dma = le64_to_cpu(event->buffer); 2268 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2269 /* Look for common error cases */ 2270 switch (trb_comp_code) { 2271 /* Skip codes that require special handling depending on 2272 * transfer type 2273 */ 2274 case COMP_SUCCESS: 2275 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2276 break; 2277 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2278 trb_comp_code = COMP_SHORT_TX; 2279 else 2280 xhci_warn_ratelimited(xhci, 2281 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); 2282 case COMP_SHORT_TX: 2283 break; 2284 case COMP_STOP: 2285 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 2286 break; 2287 case COMP_STOP_INVAL: 2288 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 2289 break; 2290 case COMP_STOP_SHORT: 2291 xhci_dbg(xhci, "Stopped with short packet transfer detected\n"); 2292 break; 2293 case COMP_STALL: 2294 xhci_dbg(xhci, "Stalled endpoint\n"); 2295 ep->ep_state |= EP_HALTED; 2296 status = -EPIPE; 2297 break; 2298 case COMP_TRB_ERR: 2299 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 2300 status = -EILSEQ; 2301 break; 2302 case COMP_SPLIT_ERR: 2303 case COMP_TX_ERR: 2304 xhci_dbg(xhci, "Transfer error on endpoint\n"); 2305 status = -EPROTO; 2306 break; 2307 case COMP_BABBLE: 2308 xhci_dbg(xhci, "Babble error on endpoint\n"); 2309 status = -EOVERFLOW; 2310 break; 2311 case COMP_DB_ERR: 2312 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 2313 status = -ENOSR; 2314 break; 2315 case COMP_BW_OVER: 2316 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 2317 break; 2318 case COMP_BUFF_OVER: 2319 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 2320 break; 2321 case COMP_UNDERRUN: 2322 /* 2323 * When the Isoch ring is empty, the xHC will generate 2324 * a Ring Overrun Event for IN Isoch endpoint or Ring 2325 * Underrun Event for OUT Isoch endpoint. 2326 */ 2327 xhci_dbg(xhci, "underrun event on endpoint\n"); 2328 if (!list_empty(&ep_ring->td_list)) 2329 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2330 "still with TDs queued?\n", 2331 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2332 ep_index); 2333 goto cleanup; 2334 case COMP_OVERRUN: 2335 xhci_dbg(xhci, "overrun event on endpoint\n"); 2336 if (!list_empty(&ep_ring->td_list)) 2337 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2338 "still with TDs queued?\n", 2339 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2340 ep_index); 2341 goto cleanup; 2342 case COMP_DEV_ERR: 2343 xhci_warn(xhci, "WARN: detect an incompatible device"); 2344 status = -EPROTO; 2345 break; 2346 case COMP_MISSED_INT: 2347 /* 2348 * When encounter missed service error, one or more isoc tds 2349 * may be missed by xHC. 2350 * Set skip flag of the ep_ring; Complete the missed tds as 2351 * short transfer when process the ep_ring next time. 2352 */ 2353 ep->skip = true; 2354 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2355 goto cleanup; 2356 case COMP_PING_ERR: 2357 ep->skip = true; 2358 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); 2359 goto cleanup; 2360 default: 2361 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2362 status = 0; 2363 break; 2364 } 2365 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n", 2366 trb_comp_code); 2367 goto cleanup; 2368 } 2369 2370 do { 2371 /* This TRB should be in the TD at the head of this ring's 2372 * TD list. 2373 */ 2374 if (list_empty(&ep_ring->td_list)) { 2375 /* 2376 * A stopped endpoint may generate an extra completion 2377 * event if the device was suspended. Don't print 2378 * warnings. 2379 */ 2380 if (!(trb_comp_code == COMP_STOP || 2381 trb_comp_code == COMP_STOP_INVAL)) { 2382 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 2383 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2384 ep_index); 2385 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2386 (le32_to_cpu(event->flags) & 2387 TRB_TYPE_BITMASK)>>10); 2388 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2389 } 2390 if (ep->skip) { 2391 ep->skip = false; 2392 xhci_dbg(xhci, "td_list is empty while skip " 2393 "flag set. Clear skip flag.\n"); 2394 } 2395 goto cleanup; 2396 } 2397 2398 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2399 if (ep->skip && td_num == 0) { 2400 ep->skip = false; 2401 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2402 "Clear skip flag.\n"); 2403 goto cleanup; 2404 } 2405 2406 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2407 if (ep->skip) 2408 td_num--; 2409 2410 /* Is this a TRB in the currently executing TD? */ 2411 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, 2412 td->last_trb, ep_trb_dma, false); 2413 2414 /* 2415 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2416 * is not in the current TD pointed by ep_ring->dequeue because 2417 * that the hardware dequeue pointer still at the previous TRB 2418 * of the current TD. The previous TRB maybe a Link TD or the 2419 * last TRB of the previous TD. The command completion handle 2420 * will take care the rest. 2421 */ 2422 if (!ep_seg && (trb_comp_code == COMP_STOP || 2423 trb_comp_code == COMP_STOP_INVAL)) { 2424 goto cleanup; 2425 } 2426 2427 if (!ep_seg) { 2428 if (!ep->skip || 2429 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2430 /* Some host controllers give a spurious 2431 * successful event after a short transfer. 2432 * Ignore it. 2433 */ 2434 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2435 ep_ring->last_td_was_short) { 2436 ep_ring->last_td_was_short = false; 2437 goto cleanup; 2438 } 2439 /* HC is busted, give up! */ 2440 xhci_err(xhci, 2441 "ERROR Transfer event TRB DMA ptr not " 2442 "part of current TD ep_index %d " 2443 "comp_code %u\n", ep_index, 2444 trb_comp_code); 2445 trb_in_td(xhci, ep_ring->deq_seg, 2446 ep_ring->dequeue, td->last_trb, 2447 ep_trb_dma, true); 2448 return -ESHUTDOWN; 2449 } 2450 2451 skip_isoc_td(xhci, td, event, ep, &status); 2452 goto cleanup; 2453 } 2454 if (trb_comp_code == COMP_SHORT_TX) 2455 ep_ring->last_td_was_short = true; 2456 else 2457 ep_ring->last_td_was_short = false; 2458 2459 if (ep->skip) { 2460 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2461 ep->skip = false; 2462 } 2463 2464 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / 2465 sizeof(*ep_trb)]; 2466 /* 2467 * No-op TRB should not trigger interrupts. 2468 * If ep_trb is a no-op TRB, it means the 2469 * corresponding TD has been cancelled. Just ignore 2470 * the TD. 2471 */ 2472 if (trb_is_noop(ep_trb)) { 2473 xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n"); 2474 goto cleanup; 2475 } 2476 2477 /* update the urb's actual_length and give back to the core */ 2478 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2479 process_ctrl_td(xhci, td, ep_trb, event, ep, &status); 2480 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2481 process_isoc_td(xhci, td, ep_trb, event, ep, &status); 2482 else 2483 process_bulk_intr_td(xhci, td, ep_trb, event, ep, 2484 &status); 2485 cleanup: 2486 handling_skipped_tds = ep->skip && 2487 trb_comp_code != COMP_MISSED_INT && 2488 trb_comp_code != COMP_PING_ERR; 2489 2490 /* 2491 * Do not update event ring dequeue pointer if we're in a loop 2492 * processing missed tds. 2493 */ 2494 if (!handling_skipped_tds) 2495 inc_deq(xhci, xhci->event_ring); 2496 2497 /* 2498 * If ep->skip is set, it means there are missed tds on the 2499 * endpoint ring need to take care of. 2500 * Process them as short transfer until reach the td pointed by 2501 * the event. 2502 */ 2503 } while (handling_skipped_tds); 2504 2505 return 0; 2506 } 2507 2508 /* 2509 * This function handles all OS-owned events on the event ring. It may drop 2510 * xhci->lock between event processing (e.g. to pass up port status changes). 2511 * Returns >0 for "possibly more events to process" (caller should call again), 2512 * otherwise 0 if done. In future, <0 returns should indicate error code. 2513 */ 2514 static int xhci_handle_event(struct xhci_hcd *xhci) 2515 { 2516 union xhci_trb *event; 2517 int update_ptrs = 1; 2518 int ret; 2519 2520 /* Event ring hasn't been allocated yet. */ 2521 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2522 xhci_err(xhci, "ERROR event ring not ready\n"); 2523 return -ENOMEM; 2524 } 2525 2526 event = xhci->event_ring->dequeue; 2527 /* Does the HC or OS own the TRB? */ 2528 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2529 xhci->event_ring->cycle_state) 2530 return 0; 2531 2532 /* 2533 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2534 * speculative reads of the event's flags/data below. 2535 */ 2536 rmb(); 2537 /* FIXME: Handle more event types. */ 2538 switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { 2539 case TRB_TYPE(TRB_COMPLETION): 2540 handle_cmd_completion(xhci, &event->event_cmd); 2541 break; 2542 case TRB_TYPE(TRB_PORT_STATUS): 2543 handle_port_status(xhci, event); 2544 update_ptrs = 0; 2545 break; 2546 case TRB_TYPE(TRB_TRANSFER): 2547 ret = handle_tx_event(xhci, &event->trans_event); 2548 if (ret >= 0) 2549 update_ptrs = 0; 2550 break; 2551 case TRB_TYPE(TRB_DEV_NOTE): 2552 handle_device_notification(xhci, event); 2553 break; 2554 default: 2555 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2556 TRB_TYPE(48)) 2557 handle_vendor_event(xhci, event); 2558 else 2559 xhci_warn(xhci, "ERROR unknown event type %d\n", 2560 TRB_FIELD_TO_TYPE( 2561 le32_to_cpu(event->event_cmd.flags))); 2562 } 2563 /* Any of the above functions may drop and re-acquire the lock, so check 2564 * to make sure a watchdog timer didn't mark the host as non-responsive. 2565 */ 2566 if (xhci->xhc_state & XHCI_STATE_DYING) { 2567 xhci_dbg(xhci, "xHCI host dying, returning from " 2568 "event handler.\n"); 2569 return 0; 2570 } 2571 2572 if (update_ptrs) 2573 /* Update SW event ring dequeue pointer */ 2574 inc_deq(xhci, xhci->event_ring); 2575 2576 /* Are there more items on the event ring? Caller will call us again to 2577 * check. 2578 */ 2579 return 1; 2580 } 2581 2582 /* 2583 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2584 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2585 * indicators of an event TRB error, but we check the status *first* to be safe. 2586 */ 2587 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2588 { 2589 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2590 u32 status; 2591 u64 temp_64; 2592 union xhci_trb *event_ring_deq; 2593 dma_addr_t deq; 2594 2595 spin_lock(&xhci->lock); 2596 /* Check if the xHC generated the interrupt, or the irq is shared */ 2597 status = readl(&xhci->op_regs->status); 2598 if (status == 0xffffffff) 2599 goto hw_died; 2600 2601 if (!(status & STS_EINT)) { 2602 spin_unlock(&xhci->lock); 2603 return IRQ_NONE; 2604 } 2605 if (status & STS_FATAL) { 2606 xhci_warn(xhci, "WARNING: Host System Error\n"); 2607 xhci_halt(xhci); 2608 hw_died: 2609 spin_unlock(&xhci->lock); 2610 return IRQ_HANDLED; 2611 } 2612 2613 /* 2614 * Clear the op reg interrupt status first, 2615 * so we can receive interrupts from other MSI-X interrupters. 2616 * Write 1 to clear the interrupt status. 2617 */ 2618 status |= STS_EINT; 2619 writel(status, &xhci->op_regs->status); 2620 /* FIXME when MSI-X is supported and there are multiple vectors */ 2621 /* Clear the MSI-X event interrupt status */ 2622 2623 if (hcd->irq) { 2624 u32 irq_pending; 2625 /* Acknowledge the PCI interrupt */ 2626 irq_pending = readl(&xhci->ir_set->irq_pending); 2627 irq_pending |= IMAN_IP; 2628 writel(irq_pending, &xhci->ir_set->irq_pending); 2629 } 2630 2631 if (xhci->xhc_state & XHCI_STATE_DYING || 2632 xhci->xhc_state & XHCI_STATE_HALTED) { 2633 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2634 "Shouldn't IRQs be disabled?\n"); 2635 /* Clear the event handler busy flag (RW1C); 2636 * the event ring should be empty. 2637 */ 2638 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2639 xhci_write_64(xhci, temp_64 | ERST_EHB, 2640 &xhci->ir_set->erst_dequeue); 2641 spin_unlock(&xhci->lock); 2642 2643 return IRQ_HANDLED; 2644 } 2645 2646 event_ring_deq = xhci->event_ring->dequeue; 2647 /* FIXME this should be a delayed service routine 2648 * that clears the EHB. 2649 */ 2650 while (xhci_handle_event(xhci) > 0) {} 2651 2652 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2653 /* If necessary, update the HW's version of the event ring deq ptr. */ 2654 if (event_ring_deq != xhci->event_ring->dequeue) { 2655 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2656 xhci->event_ring->dequeue); 2657 if (deq == 0) 2658 xhci_warn(xhci, "WARN something wrong with SW event " 2659 "ring dequeue ptr.\n"); 2660 /* Update HC event ring dequeue pointer */ 2661 temp_64 &= ERST_PTR_MASK; 2662 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2663 } 2664 2665 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2666 temp_64 |= ERST_EHB; 2667 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2668 2669 spin_unlock(&xhci->lock); 2670 2671 return IRQ_HANDLED; 2672 } 2673 2674 irqreturn_t xhci_msi_irq(int irq, void *hcd) 2675 { 2676 return xhci_irq(hcd); 2677 } 2678 2679 /**** Endpoint Ring Operations ****/ 2680 2681 /* 2682 * Generic function for queueing a TRB on a ring. 2683 * The caller must have checked to make sure there's room on the ring. 2684 * 2685 * @more_trbs_coming: Will you enqueue more TRBs before calling 2686 * prepare_transfer()? 2687 */ 2688 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2689 bool more_trbs_coming, 2690 u32 field1, u32 field2, u32 field3, u32 field4) 2691 { 2692 struct xhci_generic_trb *trb; 2693 2694 trb = &ring->enqueue->generic; 2695 trb->field[0] = cpu_to_le32(field1); 2696 trb->field[1] = cpu_to_le32(field2); 2697 trb->field[2] = cpu_to_le32(field3); 2698 trb->field[3] = cpu_to_le32(field4); 2699 inc_enq(xhci, ring, more_trbs_coming); 2700 } 2701 2702 /* 2703 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2704 * FIXME allocate segments if the ring is full. 2705 */ 2706 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2707 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2708 { 2709 unsigned int num_trbs_needed; 2710 2711 /* Make sure the endpoint has been added to xHC schedule */ 2712 switch (ep_state) { 2713 case EP_STATE_DISABLED: 2714 /* 2715 * USB core changed config/interfaces without notifying us, 2716 * or hardware is reporting the wrong state. 2717 */ 2718 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2719 return -ENOENT; 2720 case EP_STATE_ERROR: 2721 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2722 /* FIXME event handling code for error needs to clear it */ 2723 /* XXX not sure if this should be -ENOENT or not */ 2724 return -EINVAL; 2725 case EP_STATE_HALTED: 2726 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2727 case EP_STATE_STOPPED: 2728 case EP_STATE_RUNNING: 2729 break; 2730 default: 2731 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2732 /* 2733 * FIXME issue Configure Endpoint command to try to get the HC 2734 * back into a known state. 2735 */ 2736 return -EINVAL; 2737 } 2738 2739 while (1) { 2740 if (room_on_ring(xhci, ep_ring, num_trbs)) 2741 break; 2742 2743 if (ep_ring == xhci->cmd_ring) { 2744 xhci_err(xhci, "Do not support expand command ring\n"); 2745 return -ENOMEM; 2746 } 2747 2748 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 2749 "ERROR no room on ep ring, try ring expansion"); 2750 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2751 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2752 mem_flags)) { 2753 xhci_err(xhci, "Ring expansion failed\n"); 2754 return -ENOMEM; 2755 } 2756 } 2757 2758 while (trb_is_link(ep_ring->enqueue)) { 2759 /* If we're not dealing with 0.95 hardware or isoc rings 2760 * on AMD 0.96 host, clear the chain bit. 2761 */ 2762 if (!xhci_link_trb_quirk(xhci) && 2763 !(ep_ring->type == TYPE_ISOC && 2764 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2765 ep_ring->enqueue->link.control &= 2766 cpu_to_le32(~TRB_CHAIN); 2767 else 2768 ep_ring->enqueue->link.control |= 2769 cpu_to_le32(TRB_CHAIN); 2770 2771 wmb(); 2772 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); 2773 2774 /* Toggle the cycle bit after the last ring segment. */ 2775 if (link_trb_toggles_cycle(ep_ring->enqueue)) 2776 ep_ring->cycle_state ^= 1; 2777 2778 ep_ring->enq_seg = ep_ring->enq_seg->next; 2779 ep_ring->enqueue = ep_ring->enq_seg->trbs; 2780 } 2781 return 0; 2782 } 2783 2784 static int prepare_transfer(struct xhci_hcd *xhci, 2785 struct xhci_virt_device *xdev, 2786 unsigned int ep_index, 2787 unsigned int stream_id, 2788 unsigned int num_trbs, 2789 struct urb *urb, 2790 unsigned int td_index, 2791 gfp_t mem_flags) 2792 { 2793 int ret; 2794 struct urb_priv *urb_priv; 2795 struct xhci_td *td; 2796 struct xhci_ring *ep_ring; 2797 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2798 2799 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2800 if (!ep_ring) { 2801 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2802 stream_id); 2803 return -EINVAL; 2804 } 2805 2806 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), 2807 num_trbs, mem_flags); 2808 if (ret) 2809 return ret; 2810 2811 urb_priv = urb->hcpriv; 2812 td = urb_priv->td[td_index]; 2813 2814 INIT_LIST_HEAD(&td->td_list); 2815 INIT_LIST_HEAD(&td->cancelled_td_list); 2816 2817 if (td_index == 0) { 2818 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2819 if (unlikely(ret)) 2820 return ret; 2821 } 2822 2823 td->urb = urb; 2824 /* Add this TD to the tail of the endpoint ring's TD list */ 2825 list_add_tail(&td->td_list, &ep_ring->td_list); 2826 td->start_seg = ep_ring->enq_seg; 2827 td->first_trb = ep_ring->enqueue; 2828 2829 urb_priv->td[td_index] = td; 2830 2831 return 0; 2832 } 2833 2834 static unsigned int count_trbs(u64 addr, u64 len) 2835 { 2836 unsigned int num_trbs; 2837 2838 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 2839 TRB_MAX_BUFF_SIZE); 2840 if (num_trbs == 0) 2841 num_trbs++; 2842 2843 return num_trbs; 2844 } 2845 2846 static inline unsigned int count_trbs_needed(struct urb *urb) 2847 { 2848 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); 2849 } 2850 2851 static unsigned int count_sg_trbs_needed(struct urb *urb) 2852 { 2853 struct scatterlist *sg; 2854 unsigned int i, len, full_len, num_trbs = 0; 2855 2856 full_len = urb->transfer_buffer_length; 2857 2858 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 2859 len = sg_dma_len(sg); 2860 num_trbs += count_trbs(sg_dma_address(sg), len); 2861 len = min_t(unsigned int, len, full_len); 2862 full_len -= len; 2863 if (full_len == 0) 2864 break; 2865 } 2866 2867 return num_trbs; 2868 } 2869 2870 static unsigned int count_isoc_trbs_needed(struct urb *urb, int i) 2871 { 2872 u64 addr, len; 2873 2874 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 2875 len = urb->iso_frame_desc[i].length; 2876 2877 return count_trbs(addr, len); 2878 } 2879 2880 static void check_trb_math(struct urb *urb, int running_total) 2881 { 2882 if (unlikely(running_total != urb->transfer_buffer_length)) 2883 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2884 "queued %#x (%d), asked for %#x (%d)\n", 2885 __func__, 2886 urb->ep->desc.bEndpointAddress, 2887 running_total, running_total, 2888 urb->transfer_buffer_length, 2889 urb->transfer_buffer_length); 2890 } 2891 2892 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2893 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2894 struct xhci_generic_trb *start_trb) 2895 { 2896 /* 2897 * Pass all the TRBs to the hardware at once and make sure this write 2898 * isn't reordered. 2899 */ 2900 wmb(); 2901 if (start_cycle) 2902 start_trb->field[3] |= cpu_to_le32(start_cycle); 2903 else 2904 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 2905 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2906 } 2907 2908 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, 2909 struct xhci_ep_ctx *ep_ctx) 2910 { 2911 int xhci_interval; 2912 int ep_interval; 2913 2914 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 2915 ep_interval = urb->interval; 2916 2917 /* Convert to microframes */ 2918 if (urb->dev->speed == USB_SPEED_LOW || 2919 urb->dev->speed == USB_SPEED_FULL) 2920 ep_interval *= 8; 2921 2922 /* FIXME change this to a warning and a suggestion to use the new API 2923 * to set the polling interval (once the API is added). 2924 */ 2925 if (xhci_interval != ep_interval) { 2926 dev_dbg_ratelimited(&urb->dev->dev, 2927 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 2928 ep_interval, ep_interval == 1 ? "" : "s", 2929 xhci_interval, xhci_interval == 1 ? "" : "s"); 2930 urb->interval = xhci_interval; 2931 /* Convert back to frames for LS/FS devices */ 2932 if (urb->dev->speed == USB_SPEED_LOW || 2933 urb->dev->speed == USB_SPEED_FULL) 2934 urb->interval /= 8; 2935 } 2936 } 2937 2938 /* 2939 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2940 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2941 * (comprised of sg list entries) can take several service intervals to 2942 * transmit. 2943 */ 2944 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2945 struct urb *urb, int slot_id, unsigned int ep_index) 2946 { 2947 struct xhci_ep_ctx *ep_ctx; 2948 2949 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); 2950 check_interval(xhci, urb, ep_ctx); 2951 2952 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 2953 } 2954 2955 /* 2956 * For xHCI 1.0 host controllers, TD size is the number of max packet sized 2957 * packets remaining in the TD (*not* including this TRB). 2958 * 2959 * Total TD packet count = total_packet_count = 2960 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) 2961 * 2962 * Packets transferred up to and including this TRB = packets_transferred = 2963 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 2964 * 2965 * TD size = total_packet_count - packets_transferred 2966 * 2967 * For xHCI 0.96 and older, TD size field should be the remaining bytes 2968 * including this TRB, right shifted by 10 2969 * 2970 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. 2971 * This is taken care of in the TRB_TD_SIZE() macro 2972 * 2973 * The last TRB in a TD must have the TD size set to zero. 2974 */ 2975 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, 2976 int trb_buff_len, unsigned int td_total_len, 2977 struct urb *urb, bool more_trbs_coming) 2978 { 2979 u32 maxp, total_packet_count; 2980 2981 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ 2982 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) 2983 return ((td_total_len - transferred) >> 10); 2984 2985 /* One TRB with a zero-length data packet. */ 2986 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || 2987 trb_buff_len == td_total_len) 2988 return 0; 2989 2990 /* for MTK xHCI, TD size doesn't include this TRB */ 2991 if (xhci->quirks & XHCI_MTK_HOST) 2992 trb_buff_len = 0; 2993 2994 maxp = usb_endpoint_maxp(&urb->ep->desc); 2995 total_packet_count = DIV_ROUND_UP(td_total_len, maxp); 2996 2997 /* Queueing functions don't count the current TRB into transferred */ 2998 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); 2999 } 3000 3001 3002 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, 3003 u32 *trb_buff_len, struct xhci_segment *seg) 3004 { 3005 struct device *dev = xhci_to_hcd(xhci)->self.controller; 3006 unsigned int unalign; 3007 unsigned int max_pkt; 3008 u32 new_buff_len; 3009 3010 max_pkt = usb_endpoint_maxp(&urb->ep->desc); 3011 unalign = (enqd_len + *trb_buff_len) % max_pkt; 3012 3013 /* we got lucky, last normal TRB data on segment is packet aligned */ 3014 if (unalign == 0) 3015 return 0; 3016 3017 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", 3018 unalign, *trb_buff_len); 3019 3020 /* is the last nornal TRB alignable by splitting it */ 3021 if (*trb_buff_len > unalign) { 3022 *trb_buff_len -= unalign; 3023 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); 3024 return 0; 3025 } 3026 3027 /* 3028 * We want enqd_len + trb_buff_len to sum up to a number aligned to 3029 * number which is divisible by the endpoint's wMaxPacketSize. IOW: 3030 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. 3031 */ 3032 new_buff_len = max_pkt - (enqd_len % max_pkt); 3033 3034 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) 3035 new_buff_len = (urb->transfer_buffer_length - enqd_len); 3036 3037 /* create a max max_pkt sized bounce buffer pointed to by last trb */ 3038 if (usb_urb_dir_out(urb)) { 3039 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, 3040 seg->bounce_buf, new_buff_len, enqd_len); 3041 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, 3042 max_pkt, DMA_TO_DEVICE); 3043 } else { 3044 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, 3045 max_pkt, DMA_FROM_DEVICE); 3046 } 3047 3048 if (dma_mapping_error(dev, seg->bounce_dma)) { 3049 /* try without aligning. Some host controllers survive */ 3050 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); 3051 return 0; 3052 } 3053 *trb_buff_len = new_buff_len; 3054 seg->bounce_len = new_buff_len; 3055 seg->bounce_offs = enqd_len; 3056 3057 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); 3058 3059 return 1; 3060 } 3061 3062 /* This is very similar to what ehci-q.c qtd_fill() does */ 3063 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3064 struct urb *urb, int slot_id, unsigned int ep_index) 3065 { 3066 struct xhci_ring *ring; 3067 struct urb_priv *urb_priv; 3068 struct xhci_td *td; 3069 struct xhci_generic_trb *start_trb; 3070 struct scatterlist *sg = NULL; 3071 bool more_trbs_coming = true; 3072 bool need_zero_pkt = false; 3073 bool first_trb = true; 3074 unsigned int num_trbs; 3075 unsigned int start_cycle, num_sgs = 0; 3076 unsigned int enqd_len, block_len, trb_buff_len, full_len; 3077 int sent_len, ret; 3078 u32 field, length_field, remainder; 3079 u64 addr, send_addr; 3080 3081 ring = xhci_urb_to_transfer_ring(xhci, urb); 3082 if (!ring) 3083 return -EINVAL; 3084 3085 full_len = urb->transfer_buffer_length; 3086 /* If we have scatter/gather list, we use it. */ 3087 if (urb->num_sgs) { 3088 num_sgs = urb->num_mapped_sgs; 3089 sg = urb->sg; 3090 addr = (u64) sg_dma_address(sg); 3091 block_len = sg_dma_len(sg); 3092 num_trbs = count_sg_trbs_needed(urb); 3093 } else { 3094 num_trbs = count_trbs_needed(urb); 3095 addr = (u64) urb->transfer_dma; 3096 block_len = full_len; 3097 } 3098 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3099 ep_index, urb->stream_id, 3100 num_trbs, urb, 0, mem_flags); 3101 if (unlikely(ret < 0)) 3102 return ret; 3103 3104 urb_priv = urb->hcpriv; 3105 3106 /* Deal with URB_ZERO_PACKET - need one more td/trb */ 3107 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1) 3108 need_zero_pkt = true; 3109 3110 td = urb_priv->td[0]; 3111 3112 /* 3113 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3114 * until we've finished creating all the other TRBs. The ring's cycle 3115 * state may change as we enqueue the other TRBs, so save it too. 3116 */ 3117 start_trb = &ring->enqueue->generic; 3118 start_cycle = ring->cycle_state; 3119 send_addr = addr; 3120 3121 /* Queue the TRBs, even if they are zero-length */ 3122 for (enqd_len = 0; first_trb || enqd_len < full_len; 3123 enqd_len += trb_buff_len) { 3124 field = TRB_TYPE(TRB_NORMAL); 3125 3126 /* TRB buffer should not cross 64KB boundaries */ 3127 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); 3128 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); 3129 3130 if (enqd_len + trb_buff_len > full_len) 3131 trb_buff_len = full_len - enqd_len; 3132 3133 /* Don't change the cycle bit of the first TRB until later */ 3134 if (first_trb) { 3135 first_trb = false; 3136 if (start_cycle == 0) 3137 field |= TRB_CYCLE; 3138 } else 3139 field |= ring->cycle_state; 3140 3141 /* Chain all the TRBs together; clear the chain bit in the last 3142 * TRB to indicate it's the last TRB in the chain. 3143 */ 3144 if (enqd_len + trb_buff_len < full_len) { 3145 field |= TRB_CHAIN; 3146 if (trb_is_link(ring->enqueue + 1)) { 3147 if (xhci_align_td(xhci, urb, enqd_len, 3148 &trb_buff_len, 3149 ring->enq_seg)) { 3150 send_addr = ring->enq_seg->bounce_dma; 3151 /* assuming TD won't span 2 segs */ 3152 td->bounce_seg = ring->enq_seg; 3153 } 3154 } 3155 } 3156 if (enqd_len + trb_buff_len >= full_len) { 3157 field &= ~TRB_CHAIN; 3158 field |= TRB_IOC; 3159 more_trbs_coming = false; 3160 td->last_trb = ring->enqueue; 3161 } 3162 3163 /* Only set interrupt on short packet for IN endpoints */ 3164 if (usb_urb_dir_in(urb)) 3165 field |= TRB_ISP; 3166 3167 /* Set the TRB length, TD size, and interrupter fields. */ 3168 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, 3169 full_len, urb, more_trbs_coming); 3170 3171 length_field = TRB_LEN(trb_buff_len) | 3172 TRB_TD_SIZE(remainder) | 3173 TRB_INTR_TARGET(0); 3174 3175 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, 3176 lower_32_bits(send_addr), 3177 upper_32_bits(send_addr), 3178 length_field, 3179 field); 3180 3181 addr += trb_buff_len; 3182 sent_len = trb_buff_len; 3183 3184 while (sg && sent_len >= block_len) { 3185 /* New sg entry */ 3186 --num_sgs; 3187 sent_len -= block_len; 3188 if (num_sgs != 0) { 3189 sg = sg_next(sg); 3190 block_len = sg_dma_len(sg); 3191 addr = (u64) sg_dma_address(sg); 3192 addr += sent_len; 3193 } 3194 } 3195 block_len -= sent_len; 3196 send_addr = addr; 3197 } 3198 3199 if (need_zero_pkt) { 3200 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3201 ep_index, urb->stream_id, 3202 1, urb, 1, mem_flags); 3203 urb_priv->td[1]->last_trb = ring->enqueue; 3204 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; 3205 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); 3206 } 3207 3208 check_trb_math(urb, enqd_len); 3209 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3210 start_cycle, start_trb); 3211 return 0; 3212 } 3213 3214 /* Caller must have locked xhci->lock */ 3215 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3216 struct urb *urb, int slot_id, unsigned int ep_index) 3217 { 3218 struct xhci_ring *ep_ring; 3219 int num_trbs; 3220 int ret; 3221 struct usb_ctrlrequest *setup; 3222 struct xhci_generic_trb *start_trb; 3223 int start_cycle; 3224 u32 field, length_field, remainder; 3225 struct urb_priv *urb_priv; 3226 struct xhci_td *td; 3227 3228 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3229 if (!ep_ring) 3230 return -EINVAL; 3231 3232 /* 3233 * Need to copy setup packet into setup TRB, so we can't use the setup 3234 * DMA address. 3235 */ 3236 if (!urb->setup_packet) 3237 return -EINVAL; 3238 3239 /* 1 TRB for setup, 1 for status */ 3240 num_trbs = 2; 3241 /* 3242 * Don't need to check if we need additional event data and normal TRBs, 3243 * since data in control transfers will never get bigger than 16MB 3244 * XXX: can we get a buffer that crosses 64KB boundaries? 3245 */ 3246 if (urb->transfer_buffer_length > 0) 3247 num_trbs++; 3248 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3249 ep_index, urb->stream_id, 3250 num_trbs, urb, 0, mem_flags); 3251 if (ret < 0) 3252 return ret; 3253 3254 urb_priv = urb->hcpriv; 3255 td = urb_priv->td[0]; 3256 3257 /* 3258 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3259 * until we've finished creating all the other TRBs. The ring's cycle 3260 * state may change as we enqueue the other TRBs, so save it too. 3261 */ 3262 start_trb = &ep_ring->enqueue->generic; 3263 start_cycle = ep_ring->cycle_state; 3264 3265 /* Queue setup TRB - see section 6.4.1.2.1 */ 3266 /* FIXME better way to translate setup_packet into two u32 fields? */ 3267 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3268 field = 0; 3269 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3270 if (start_cycle == 0) 3271 field |= 0x1; 3272 3273 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ 3274 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { 3275 if (urb->transfer_buffer_length > 0) { 3276 if (setup->bRequestType & USB_DIR_IN) 3277 field |= TRB_TX_TYPE(TRB_DATA_IN); 3278 else 3279 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3280 } 3281 } 3282 3283 queue_trb(xhci, ep_ring, true, 3284 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3285 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3286 TRB_LEN(8) | TRB_INTR_TARGET(0), 3287 /* Immediate data in pointer */ 3288 field); 3289 3290 /* If there's data, queue data TRBs */ 3291 /* Only set interrupt on short packet for IN endpoints */ 3292 if (usb_urb_dir_in(urb)) 3293 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3294 else 3295 field = TRB_TYPE(TRB_DATA); 3296 3297 remainder = xhci_td_remainder(xhci, 0, 3298 urb->transfer_buffer_length, 3299 urb->transfer_buffer_length, 3300 urb, 1); 3301 3302 length_field = TRB_LEN(urb->transfer_buffer_length) | 3303 TRB_TD_SIZE(remainder) | 3304 TRB_INTR_TARGET(0); 3305 3306 if (urb->transfer_buffer_length > 0) { 3307 if (setup->bRequestType & USB_DIR_IN) 3308 field |= TRB_DIR_IN; 3309 queue_trb(xhci, ep_ring, true, 3310 lower_32_bits(urb->transfer_dma), 3311 upper_32_bits(urb->transfer_dma), 3312 length_field, 3313 field | ep_ring->cycle_state); 3314 } 3315 3316 /* Save the DMA address of the last TRB in the TD */ 3317 td->last_trb = ep_ring->enqueue; 3318 3319 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3320 /* If the device sent data, the status stage is an OUT transfer */ 3321 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3322 field = 0; 3323 else 3324 field = TRB_DIR_IN; 3325 queue_trb(xhci, ep_ring, false, 3326 0, 3327 0, 3328 TRB_INTR_TARGET(0), 3329 /* Event on completion */ 3330 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3331 3332 giveback_first_trb(xhci, slot_id, ep_index, 0, 3333 start_cycle, start_trb); 3334 return 0; 3335 } 3336 3337 /* 3338 * The transfer burst count field of the isochronous TRB defines the number of 3339 * bursts that are required to move all packets in this TD. Only SuperSpeed 3340 * devices can burst up to bMaxBurst number of packets per service interval. 3341 * This field is zero based, meaning a value of zero in the field means one 3342 * burst. Basically, for everything but SuperSpeed devices, this field will be 3343 * zero. Only xHCI 1.0 host controllers support this field. 3344 */ 3345 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3346 struct urb *urb, unsigned int total_packet_count) 3347 { 3348 unsigned int max_burst; 3349 3350 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) 3351 return 0; 3352 3353 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3354 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; 3355 } 3356 3357 /* 3358 * Returns the number of packets in the last "burst" of packets. This field is 3359 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3360 * the last burst packet count is equal to the total number of packets in the 3361 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3362 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3363 * contain 1 to (bMaxBurst + 1) packets. 3364 */ 3365 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3366 struct urb *urb, unsigned int total_packet_count) 3367 { 3368 unsigned int max_burst; 3369 unsigned int residue; 3370 3371 if (xhci->hci_version < 0x100) 3372 return 0; 3373 3374 if (urb->dev->speed >= USB_SPEED_SUPER) { 3375 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3376 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3377 residue = total_packet_count % (max_burst + 1); 3378 /* If residue is zero, the last burst contains (max_burst + 1) 3379 * number of packets, but the TLBPC field is zero-based. 3380 */ 3381 if (residue == 0) 3382 return max_burst; 3383 return residue - 1; 3384 } 3385 if (total_packet_count == 0) 3386 return 0; 3387 return total_packet_count - 1; 3388 } 3389 3390 /* 3391 * Calculates Frame ID field of the isochronous TRB identifies the 3392 * target frame that the Interval associated with this Isochronous 3393 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. 3394 * 3395 * Returns actual frame id on success, negative value on error. 3396 */ 3397 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, 3398 struct urb *urb, int index) 3399 { 3400 int start_frame, ist, ret = 0; 3401 int start_frame_id, end_frame_id, current_frame_id; 3402 3403 if (urb->dev->speed == USB_SPEED_LOW || 3404 urb->dev->speed == USB_SPEED_FULL) 3405 start_frame = urb->start_frame + index * urb->interval; 3406 else 3407 start_frame = (urb->start_frame + index * urb->interval) >> 3; 3408 3409 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): 3410 * 3411 * If bit [3] of IST is cleared to '0', software can add a TRB no 3412 * later than IST[2:0] Microframes before that TRB is scheduled to 3413 * be executed. 3414 * If bit [3] of IST is set to '1', software can add a TRB no later 3415 * than IST[2:0] Frames before that TRB is scheduled to be executed. 3416 */ 3417 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3418 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3419 ist <<= 3; 3420 3421 /* Software shall not schedule an Isoch TD with a Frame ID value that 3422 * is less than the Start Frame ID or greater than the End Frame ID, 3423 * where: 3424 * 3425 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 3426 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 3427 * 3428 * Both the End Frame ID and Start Frame ID values are calculated 3429 * in microframes. When software determines the valid Frame ID value; 3430 * The End Frame ID value should be rounded down to the nearest Frame 3431 * boundary, and the Start Frame ID value should be rounded up to the 3432 * nearest Frame boundary. 3433 */ 3434 current_frame_id = readl(&xhci->run_regs->microframe_index); 3435 start_frame_id = roundup(current_frame_id + ist + 1, 8); 3436 end_frame_id = rounddown(current_frame_id + 895 * 8, 8); 3437 3438 start_frame &= 0x7ff; 3439 start_frame_id = (start_frame_id >> 3) & 0x7ff; 3440 end_frame_id = (end_frame_id >> 3) & 0x7ff; 3441 3442 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", 3443 __func__, index, readl(&xhci->run_regs->microframe_index), 3444 start_frame_id, end_frame_id, start_frame); 3445 3446 if (start_frame_id < end_frame_id) { 3447 if (start_frame > end_frame_id || 3448 start_frame < start_frame_id) 3449 ret = -EINVAL; 3450 } else if (start_frame_id > end_frame_id) { 3451 if ((start_frame > end_frame_id && 3452 start_frame < start_frame_id)) 3453 ret = -EINVAL; 3454 } else { 3455 ret = -EINVAL; 3456 } 3457 3458 if (index == 0) { 3459 if (ret == -EINVAL || start_frame == start_frame_id) { 3460 start_frame = start_frame_id + 1; 3461 if (urb->dev->speed == USB_SPEED_LOW || 3462 urb->dev->speed == USB_SPEED_FULL) 3463 urb->start_frame = start_frame; 3464 else 3465 urb->start_frame = start_frame << 3; 3466 ret = 0; 3467 } 3468 } 3469 3470 if (ret) { 3471 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", 3472 start_frame, current_frame_id, index, 3473 start_frame_id, end_frame_id); 3474 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); 3475 return ret; 3476 } 3477 3478 return start_frame; 3479 } 3480 3481 /* This is for isoc transfer */ 3482 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3483 struct urb *urb, int slot_id, unsigned int ep_index) 3484 { 3485 struct xhci_ring *ep_ring; 3486 struct urb_priv *urb_priv; 3487 struct xhci_td *td; 3488 int num_tds, trbs_per_td; 3489 struct xhci_generic_trb *start_trb; 3490 bool first_trb; 3491 int start_cycle; 3492 u32 field, length_field; 3493 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3494 u64 start_addr, addr; 3495 int i, j; 3496 bool more_trbs_coming; 3497 struct xhci_virt_ep *xep; 3498 int frame_id; 3499 3500 xep = &xhci->devs[slot_id]->eps[ep_index]; 3501 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3502 3503 num_tds = urb->number_of_packets; 3504 if (num_tds < 1) { 3505 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3506 return -EINVAL; 3507 } 3508 start_addr = (u64) urb->transfer_dma; 3509 start_trb = &ep_ring->enqueue->generic; 3510 start_cycle = ep_ring->cycle_state; 3511 3512 urb_priv = urb->hcpriv; 3513 /* Queue the TRBs for each TD, even if they are zero-length */ 3514 for (i = 0; i < num_tds; i++) { 3515 unsigned int total_pkt_count, max_pkt; 3516 unsigned int burst_count, last_burst_pkt_count; 3517 u32 sia_frame_id; 3518 3519 first_trb = true; 3520 running_total = 0; 3521 addr = start_addr + urb->iso_frame_desc[i].offset; 3522 td_len = urb->iso_frame_desc[i].length; 3523 td_remain_len = td_len; 3524 max_pkt = usb_endpoint_maxp(&urb->ep->desc); 3525 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); 3526 3527 /* A zero-length transfer still involves at least one packet. */ 3528 if (total_pkt_count == 0) 3529 total_pkt_count++; 3530 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); 3531 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, 3532 urb, total_pkt_count); 3533 3534 trbs_per_td = count_isoc_trbs_needed(urb, i); 3535 3536 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3537 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3538 if (ret < 0) { 3539 if (i == 0) 3540 return ret; 3541 goto cleanup; 3542 } 3543 td = urb_priv->td[i]; 3544 3545 /* use SIA as default, if frame id is used overwrite it */ 3546 sia_frame_id = TRB_SIA; 3547 if (!(urb->transfer_flags & URB_ISO_ASAP) && 3548 HCC_CFC(xhci->hcc_params)) { 3549 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); 3550 if (frame_id >= 0) 3551 sia_frame_id = TRB_FRAME_ID(frame_id); 3552 } 3553 /* 3554 * Set isoc specific data for the first TRB in a TD. 3555 * Prevent HW from getting the TRBs by keeping the cycle state 3556 * inverted in the first TDs isoc TRB. 3557 */ 3558 field = TRB_TYPE(TRB_ISOC) | 3559 TRB_TLBPC(last_burst_pkt_count) | 3560 sia_frame_id | 3561 (i ? ep_ring->cycle_state : !start_cycle); 3562 3563 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ 3564 if (!xep->use_extended_tbc) 3565 field |= TRB_TBC(burst_count); 3566 3567 /* fill the rest of the TRB fields, and remaining normal TRBs */ 3568 for (j = 0; j < trbs_per_td; j++) { 3569 u32 remainder = 0; 3570 3571 /* only first TRB is isoc, overwrite otherwise */ 3572 if (!first_trb) 3573 field = TRB_TYPE(TRB_NORMAL) | 3574 ep_ring->cycle_state; 3575 3576 /* Only set interrupt on short packet for IN EPs */ 3577 if (usb_urb_dir_in(urb)) 3578 field |= TRB_ISP; 3579 3580 /* Set the chain bit for all except the last TRB */ 3581 if (j < trbs_per_td - 1) { 3582 more_trbs_coming = true; 3583 field |= TRB_CHAIN; 3584 } else { 3585 more_trbs_coming = false; 3586 td->last_trb = ep_ring->enqueue; 3587 field |= TRB_IOC; 3588 /* set BEI, except for the last TD */ 3589 if (xhci->hci_version >= 0x100 && 3590 !(xhci->quirks & XHCI_AVOID_BEI) && 3591 i < num_tds - 1) 3592 field |= TRB_BEI; 3593 } 3594 /* Calculate TRB length */ 3595 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); 3596 if (trb_buff_len > td_remain_len) 3597 trb_buff_len = td_remain_len; 3598 3599 /* Set the TRB length, TD size, & interrupter fields. */ 3600 remainder = xhci_td_remainder(xhci, running_total, 3601 trb_buff_len, td_len, 3602 urb, more_trbs_coming); 3603 3604 length_field = TRB_LEN(trb_buff_len) | 3605 TRB_INTR_TARGET(0); 3606 3607 /* xhci 1.1 with ETE uses TD Size field for TBC */ 3608 if (first_trb && xep->use_extended_tbc) 3609 length_field |= TRB_TD_SIZE_TBC(burst_count); 3610 else 3611 length_field |= TRB_TD_SIZE(remainder); 3612 first_trb = false; 3613 3614 queue_trb(xhci, ep_ring, more_trbs_coming, 3615 lower_32_bits(addr), 3616 upper_32_bits(addr), 3617 length_field, 3618 field); 3619 running_total += trb_buff_len; 3620 3621 addr += trb_buff_len; 3622 td_remain_len -= trb_buff_len; 3623 } 3624 3625 /* Check TD length */ 3626 if (running_total != td_len) { 3627 xhci_err(xhci, "ISOC TD length unmatch\n"); 3628 ret = -EINVAL; 3629 goto cleanup; 3630 } 3631 } 3632 3633 /* store the next frame id */ 3634 if (HCC_CFC(xhci->hcc_params)) 3635 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; 3636 3637 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3638 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3639 usb_amd_quirk_pll_disable(); 3640 } 3641 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3642 3643 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3644 start_cycle, start_trb); 3645 return 0; 3646 cleanup: 3647 /* Clean up a partially enqueued isoc transfer. */ 3648 3649 for (i--; i >= 0; i--) 3650 list_del_init(&urb_priv->td[i]->td_list); 3651 3652 /* Use the first TD as a temporary variable to turn the TDs we've queued 3653 * into No-ops with a software-owned cycle bit. That way the hardware 3654 * won't accidentally start executing bogus TDs when we partially 3655 * overwrite them. td->first_trb and td->start_seg are already set. 3656 */ 3657 urb_priv->td[0]->last_trb = ep_ring->enqueue; 3658 /* Every TRB except the first & last will have its cycle bit flipped. */ 3659 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3660 3661 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3662 ep_ring->enqueue = urb_priv->td[0]->first_trb; 3663 ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3664 ep_ring->cycle_state = start_cycle; 3665 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; 3666 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3667 return ret; 3668 } 3669 3670 /* 3671 * Check transfer ring to guarantee there is enough room for the urb. 3672 * Update ISO URB start_frame and interval. 3673 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to 3674 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or 3675 * Contiguous Frame ID is not supported by HC. 3676 */ 3677 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3678 struct urb *urb, int slot_id, unsigned int ep_index) 3679 { 3680 struct xhci_virt_device *xdev; 3681 struct xhci_ring *ep_ring; 3682 struct xhci_ep_ctx *ep_ctx; 3683 int start_frame; 3684 int num_tds, num_trbs, i; 3685 int ret; 3686 struct xhci_virt_ep *xep; 3687 int ist; 3688 3689 xdev = xhci->devs[slot_id]; 3690 xep = &xhci->devs[slot_id]->eps[ep_index]; 3691 ep_ring = xdev->eps[ep_index].ring; 3692 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3693 3694 num_trbs = 0; 3695 num_tds = urb->number_of_packets; 3696 for (i = 0; i < num_tds; i++) 3697 num_trbs += count_isoc_trbs_needed(urb, i); 3698 3699 /* Check the ring to guarantee there is enough room for the whole urb. 3700 * Do not insert any td of the urb to the ring if the check failed. 3701 */ 3702 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), 3703 num_trbs, mem_flags); 3704 if (ret) 3705 return ret; 3706 3707 /* 3708 * Check interval value. This should be done before we start to 3709 * calculate the start frame value. 3710 */ 3711 check_interval(xhci, urb, ep_ctx); 3712 3713 /* Calculate the start frame and put it in urb->start_frame. */ 3714 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { 3715 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { 3716 urb->start_frame = xep->next_frame_id; 3717 goto skip_start_over; 3718 } 3719 } 3720 3721 start_frame = readl(&xhci->run_regs->microframe_index); 3722 start_frame &= 0x3fff; 3723 /* 3724 * Round up to the next frame and consider the time before trb really 3725 * gets scheduled by hardare. 3726 */ 3727 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3728 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3729 ist <<= 3; 3730 start_frame += ist + XHCI_CFC_DELAY; 3731 start_frame = roundup(start_frame, 8); 3732 3733 /* 3734 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT 3735 * is greate than 8 microframes. 3736 */ 3737 if (urb->dev->speed == USB_SPEED_LOW || 3738 urb->dev->speed == USB_SPEED_FULL) { 3739 start_frame = roundup(start_frame, urb->interval << 3); 3740 urb->start_frame = start_frame >> 3; 3741 } else { 3742 start_frame = roundup(start_frame, urb->interval); 3743 urb->start_frame = start_frame; 3744 } 3745 3746 skip_start_over: 3747 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 3748 3749 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); 3750 } 3751 3752 /**** Command Ring Operations ****/ 3753 3754 /* Generic function for queueing a command TRB on the command ring. 3755 * Check to make sure there's room on the command ring for one command TRB. 3756 * Also check that there's room reserved for commands that must not fail. 3757 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3758 * then only check for the number of reserved spots. 3759 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3760 * because the command event handler may want to resubmit a failed command. 3761 */ 3762 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3763 u32 field1, u32 field2, 3764 u32 field3, u32 field4, bool command_must_succeed) 3765 { 3766 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3767 int ret; 3768 3769 if ((xhci->xhc_state & XHCI_STATE_DYING) || 3770 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3771 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); 3772 return -ESHUTDOWN; 3773 } 3774 3775 if (!command_must_succeed) 3776 reserved_trbs++; 3777 3778 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3779 reserved_trbs, GFP_ATOMIC); 3780 if (ret < 0) { 3781 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3782 if (command_must_succeed) 3783 xhci_err(xhci, "ERR: Reserved TRB counting for " 3784 "unfailable commands failed.\n"); 3785 return ret; 3786 } 3787 3788 cmd->command_trb = xhci->cmd_ring->enqueue; 3789 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); 3790 3791 /* if there are no other commands queued we start the timeout timer */ 3792 if (xhci->cmd_list.next == &cmd->cmd_list && 3793 !timer_pending(&xhci->cmd_timer)) { 3794 xhci->current_cmd = cmd; 3795 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 3796 } 3797 3798 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3799 field4 | xhci->cmd_ring->cycle_state); 3800 return 0; 3801 } 3802 3803 /* Queue a slot enable or disable request on the command ring */ 3804 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, 3805 u32 trb_type, u32 slot_id) 3806 { 3807 return queue_command(xhci, cmd, 0, 0, 0, 3808 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3809 } 3810 3811 /* Queue an address device command TRB */ 3812 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3813 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) 3814 { 3815 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3816 upper_32_bits(in_ctx_ptr), 0, 3817 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) 3818 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); 3819 } 3820 3821 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3822 u32 field1, u32 field2, u32 field3, u32 field4) 3823 { 3824 return queue_command(xhci, cmd, field1, field2, field3, field4, false); 3825 } 3826 3827 /* Queue a reset device command TRB */ 3828 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3829 u32 slot_id) 3830 { 3831 return queue_command(xhci, cmd, 0, 0, 0, 3832 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3833 false); 3834 } 3835 3836 /* Queue a configure endpoint command TRB */ 3837 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, 3838 struct xhci_command *cmd, dma_addr_t in_ctx_ptr, 3839 u32 slot_id, bool command_must_succeed) 3840 { 3841 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3842 upper_32_bits(in_ctx_ptr), 0, 3843 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3844 command_must_succeed); 3845 } 3846 3847 /* Queue an evaluate context command TRB */ 3848 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, 3849 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) 3850 { 3851 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3852 upper_32_bits(in_ctx_ptr), 0, 3853 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3854 command_must_succeed); 3855 } 3856 3857 /* 3858 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3859 * activity on an endpoint that is about to be suspended. 3860 */ 3861 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, 3862 int slot_id, unsigned int ep_index, int suspend) 3863 { 3864 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3865 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3866 u32 type = TRB_TYPE(TRB_STOP_RING); 3867 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3868 3869 return queue_command(xhci, cmd, 0, 0, 0, 3870 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3871 } 3872 3873 /* Set Transfer Ring Dequeue Pointer command */ 3874 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 3875 unsigned int slot_id, unsigned int ep_index, 3876 unsigned int stream_id, 3877 struct xhci_dequeue_state *deq_state) 3878 { 3879 dma_addr_t addr; 3880 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3881 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3882 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3883 u32 trb_sct = 0; 3884 u32 type = TRB_TYPE(TRB_SET_DEQ); 3885 struct xhci_virt_ep *ep; 3886 struct xhci_command *cmd; 3887 int ret; 3888 3889 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 3890 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u", 3891 deq_state->new_deq_seg, 3892 (unsigned long long)deq_state->new_deq_seg->dma, 3893 deq_state->new_deq_ptr, 3894 (unsigned long long)xhci_trb_virt_to_dma( 3895 deq_state->new_deq_seg, deq_state->new_deq_ptr), 3896 deq_state->new_cycle_state); 3897 3898 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 3899 deq_state->new_deq_ptr); 3900 if (addr == 0) { 3901 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3902 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3903 deq_state->new_deq_seg, deq_state->new_deq_ptr); 3904 return; 3905 } 3906 ep = &xhci->devs[slot_id]->eps[ep_index]; 3907 if ((ep->ep_state & SET_DEQ_PENDING)) { 3908 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3909 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3910 return; 3911 } 3912 3913 /* This function gets called from contexts where it cannot sleep */ 3914 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 3915 if (!cmd) { 3916 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n"); 3917 return; 3918 } 3919 3920 ep->queued_deq_seg = deq_state->new_deq_seg; 3921 ep->queued_deq_ptr = deq_state->new_deq_ptr; 3922 if (stream_id) 3923 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 3924 ret = queue_command(xhci, cmd, 3925 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state, 3926 upper_32_bits(addr), trb_stream_id, 3927 trb_slot_id | trb_ep_index | type, false); 3928 if (ret < 0) { 3929 xhci_free_command(xhci, cmd); 3930 return; 3931 } 3932 3933 /* Stop the TD queueing code from ringing the doorbell until 3934 * this command completes. The HC won't set the dequeue pointer 3935 * if the ring is running, and ringing the doorbell starts the 3936 * ring running. 3937 */ 3938 ep->ep_state |= SET_DEQ_PENDING; 3939 } 3940 3941 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, 3942 int slot_id, unsigned int ep_index) 3943 { 3944 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3945 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3946 u32 type = TRB_TYPE(TRB_RESET_EP); 3947 3948 return queue_command(xhci, cmd, 0, 0, 0, 3949 trb_slot_id | trb_ep_index | type, false); 3950 } 3951