1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 #include "xhci-trace.h" 71 72 /* 73 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 74 * address of the TRB. 75 */ 76 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 77 union xhci_trb *trb) 78 { 79 unsigned long segment_offset; 80 81 if (!seg || !trb || trb < seg->trbs) 82 return 0; 83 /* offset in TRBs */ 84 segment_offset = trb - seg->trbs; 85 if (segment_offset >= TRBS_PER_SEGMENT) 86 return 0; 87 return seg->dma + (segment_offset * sizeof(*trb)); 88 } 89 90 /* Does this link TRB point to the first segment in a ring, 91 * or was the previous TRB the last TRB on the last segment in the ERST? 92 */ 93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 94 struct xhci_segment *seg, union xhci_trb *trb) 95 { 96 if (ring == xhci->event_ring) 97 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 98 (seg->next == xhci->event_ring->first_seg); 99 else 100 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 101 } 102 103 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 104 * segment? I.e. would the updated event TRB pointer step off the end of the 105 * event seg? 106 */ 107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 108 struct xhci_segment *seg, union xhci_trb *trb) 109 { 110 if (ring == xhci->event_ring) 111 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 112 else 113 return TRB_TYPE_LINK_LE32(trb->link.control); 114 } 115 116 static int enqueue_is_link_trb(struct xhci_ring *ring) 117 { 118 struct xhci_link_trb *link = &ring->enqueue->link; 119 return TRB_TYPE_LINK_LE32(link->control); 120 } 121 122 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 123 * TRB is in a new segment. This does not skip over link TRBs, and it does not 124 * effect the ring dequeue or enqueue pointers. 125 */ 126 static void next_trb(struct xhci_hcd *xhci, 127 struct xhci_ring *ring, 128 struct xhci_segment **seg, 129 union xhci_trb **trb) 130 { 131 if (last_trb(xhci, ring, *seg, *trb)) { 132 *seg = (*seg)->next; 133 *trb = ((*seg)->trbs); 134 } else { 135 (*trb)++; 136 } 137 } 138 139 /* 140 * See Cycle bit rules. SW is the consumer for the event ring only. 141 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 142 */ 143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 144 { 145 ring->deq_updates++; 146 147 /* 148 * If this is not event ring, and the dequeue pointer 149 * is not on a link TRB, there is one more usable TRB 150 */ 151 if (ring->type != TYPE_EVENT && 152 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) 153 ring->num_trbs_free++; 154 155 do { 156 /* 157 * Update the dequeue pointer further if that was a link TRB or 158 * we're at the end of an event ring segment (which doesn't have 159 * link TRBS) 160 */ 161 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { 162 if (ring->type == TYPE_EVENT && 163 last_trb_on_last_seg(xhci, ring, 164 ring->deq_seg, ring->dequeue)) { 165 ring->cycle_state ^= 1; 166 } 167 ring->deq_seg = ring->deq_seg->next; 168 ring->dequeue = ring->deq_seg->trbs; 169 } else { 170 ring->dequeue++; 171 } 172 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); 173 } 174 175 /* 176 * See Cycle bit rules. SW is the consumer for the event ring only. 177 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 178 * 179 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 180 * chain bit is set), then set the chain bit in all the following link TRBs. 181 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 182 * have their chain bit cleared (so that each Link TRB is a separate TD). 183 * 184 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 185 * set, but other sections talk about dealing with the chain bit set. This was 186 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 187 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 188 * 189 * @more_trbs_coming: Will you enqueue more TRBs before calling 190 * prepare_transfer()? 191 */ 192 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 193 bool more_trbs_coming) 194 { 195 u32 chain; 196 union xhci_trb *next; 197 198 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 199 /* If this is not event ring, there is one less usable TRB */ 200 if (ring->type != TYPE_EVENT && 201 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) 202 ring->num_trbs_free--; 203 next = ++(ring->enqueue); 204 205 ring->enq_updates++; 206 /* Update the dequeue pointer further if that was a link TRB or we're at 207 * the end of an event ring segment (which doesn't have link TRBS) 208 */ 209 while (last_trb(xhci, ring, ring->enq_seg, next)) { 210 if (ring->type != TYPE_EVENT) { 211 /* 212 * If the caller doesn't plan on enqueueing more 213 * TDs before ringing the doorbell, then we 214 * don't want to give the link TRB to the 215 * hardware just yet. We'll give the link TRB 216 * back in prepare_ring() just before we enqueue 217 * the TD at the top of the ring. 218 */ 219 if (!chain && !more_trbs_coming) 220 break; 221 222 /* If we're not dealing with 0.95 hardware or 223 * isoc rings on AMD 0.96 host, 224 * carry over the chain bit of the previous TRB 225 * (which may mean the chain bit is cleared). 226 */ 227 if (!(ring->type == TYPE_ISOC && 228 (xhci->quirks & XHCI_AMD_0x96_HOST)) 229 && !xhci_link_trb_quirk(xhci)) { 230 next->link.control &= 231 cpu_to_le32(~TRB_CHAIN); 232 next->link.control |= 233 cpu_to_le32(chain); 234 } 235 /* Give this link TRB to the hardware */ 236 wmb(); 237 next->link.control ^= cpu_to_le32(TRB_CYCLE); 238 239 /* Toggle the cycle bit after the last ring segment. */ 240 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 241 ring->cycle_state ^= 1; 242 } 243 } 244 ring->enq_seg = ring->enq_seg->next; 245 ring->enqueue = ring->enq_seg->trbs; 246 next = ring->enqueue; 247 } 248 } 249 250 /* 251 * Check to see if there's room to enqueue num_trbs on the ring and make sure 252 * enqueue pointer will not advance into dequeue segment. See rules above. 253 */ 254 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 255 unsigned int num_trbs) 256 { 257 int num_trbs_in_deq_seg; 258 259 if (ring->num_trbs_free < num_trbs) 260 return 0; 261 262 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { 263 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; 264 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) 265 return 0; 266 } 267 268 return 1; 269 } 270 271 /* Ring the host controller doorbell after placing a command on the ring */ 272 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 273 { 274 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) 275 return; 276 277 xhci_dbg(xhci, "// Ding dong!\n"); 278 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); 279 /* Flush PCI posted writes */ 280 readl(&xhci->dba->doorbell[0]); 281 } 282 283 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) 284 { 285 u64 temp_64; 286 int ret; 287 288 xhci_dbg(xhci, "Abort command ring\n"); 289 290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 293 &xhci->op_regs->cmd_ring); 294 295 /* Section 4.6.1.2 of xHCI 1.0 spec says software should 296 * time the completion od all xHCI commands, including 297 * the Command Abort operation. If software doesn't see 298 * CRR negated in a timely manner (e.g. longer than 5 299 * seconds), then it should assume that the there are 300 * larger problems with the xHC and assert HCRST. 301 */ 302 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 304 if (ret < 0) { 305 xhci_err(xhci, "Stopped the command ring failed, " 306 "maybe the host is dead\n"); 307 xhci->xhc_state |= XHCI_STATE_DYING; 308 xhci_quiesce(xhci); 309 xhci_halt(xhci); 310 return -ESHUTDOWN; 311 } 312 313 return 0; 314 } 315 316 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 317 unsigned int slot_id, 318 unsigned int ep_index, 319 unsigned int stream_id) 320 { 321 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 322 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 323 unsigned int ep_state = ep->ep_state; 324 325 /* Don't ring the doorbell for this endpoint if there are pending 326 * cancellations because we don't want to interrupt processing. 327 * We don't want to restart any stream rings if there's a set dequeue 328 * pointer command pending because the device can choose to start any 329 * stream once the endpoint is on the HW schedule. 330 */ 331 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 332 (ep_state & EP_HALTED)) 333 return; 334 writel(DB_VALUE(ep_index, stream_id), db_addr); 335 /* The CPU has better things to do at this point than wait for a 336 * write-posting flush. It'll get there soon enough. 337 */ 338 } 339 340 /* Ring the doorbell for any rings with pending URBs */ 341 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 342 unsigned int slot_id, 343 unsigned int ep_index) 344 { 345 unsigned int stream_id; 346 struct xhci_virt_ep *ep; 347 348 ep = &xhci->devs[slot_id]->eps[ep_index]; 349 350 /* A ring has pending URBs if its TD list is not empty */ 351 if (!(ep->ep_state & EP_HAS_STREAMS)) { 352 if (ep->ring && !(list_empty(&ep->ring->td_list))) 353 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 354 return; 355 } 356 357 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 358 stream_id++) { 359 struct xhci_stream_info *stream_info = ep->stream_info; 360 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 361 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 362 stream_id); 363 } 364 } 365 366 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 367 unsigned int slot_id, unsigned int ep_index, 368 unsigned int stream_id) 369 { 370 struct xhci_virt_ep *ep; 371 372 ep = &xhci->devs[slot_id]->eps[ep_index]; 373 /* Common case: no streams */ 374 if (!(ep->ep_state & EP_HAS_STREAMS)) 375 return ep->ring; 376 377 if (stream_id == 0) { 378 xhci_warn(xhci, 379 "WARN: Slot ID %u, ep index %u has streams, " 380 "but URB has no stream ID.\n", 381 slot_id, ep_index); 382 return NULL; 383 } 384 385 if (stream_id < ep->stream_info->num_streams) 386 return ep->stream_info->stream_rings[stream_id]; 387 388 xhci_warn(xhci, 389 "WARN: Slot ID %u, ep index %u has " 390 "stream IDs 1 to %u allocated, " 391 "but stream ID %u is requested.\n", 392 slot_id, ep_index, 393 ep->stream_info->num_streams - 1, 394 stream_id); 395 return NULL; 396 } 397 398 /* Get the right ring for the given URB. 399 * If the endpoint supports streams, boundary check the URB's stream ID. 400 * If the endpoint doesn't support streams, return the singular endpoint ring. 401 */ 402 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 403 struct urb *urb) 404 { 405 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 406 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 407 } 408 409 /* 410 * Move the xHC's endpoint ring dequeue pointer past cur_td. 411 * Record the new state of the xHC's endpoint ring dequeue segment, 412 * dequeue pointer, and new consumer cycle state in state. 413 * Update our internal representation of the ring's dequeue pointer. 414 * 415 * We do this in three jumps: 416 * - First we update our new ring state to be the same as when the xHC stopped. 417 * - Then we traverse the ring to find the segment that contains 418 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 419 * any link TRBs with the toggle cycle bit set. 420 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 421 * if we've moved it past a link TRB with the toggle cycle bit set. 422 * 423 * Some of the uses of xhci_generic_trb are grotty, but if they're done 424 * with correct __le32 accesses they should work fine. Only users of this are 425 * in here. 426 */ 427 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 428 unsigned int slot_id, unsigned int ep_index, 429 unsigned int stream_id, struct xhci_td *cur_td, 430 struct xhci_dequeue_state *state) 431 { 432 struct xhci_virt_device *dev = xhci->devs[slot_id]; 433 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 434 struct xhci_ring *ep_ring; 435 struct xhci_segment *new_seg; 436 union xhci_trb *new_deq; 437 dma_addr_t addr; 438 u64 hw_dequeue; 439 bool cycle_found = false; 440 bool td_last_trb_found = false; 441 442 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 443 ep_index, stream_id); 444 if (!ep_ring) { 445 xhci_warn(xhci, "WARN can't find new dequeue state " 446 "for invalid stream ID %u.\n", 447 stream_id); 448 return; 449 } 450 451 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 452 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 453 "Finding endpoint context"); 454 /* 4.6.9 the css flag is written to the stream context for streams */ 455 if (ep->ep_state & EP_HAS_STREAMS) { 456 struct xhci_stream_ctx *ctx = 457 &ep->stream_info->stream_ctx_array[stream_id]; 458 hw_dequeue = le64_to_cpu(ctx->stream_ring); 459 } else { 460 struct xhci_ep_ctx *ep_ctx 461 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 462 hw_dequeue = le64_to_cpu(ep_ctx->deq); 463 } 464 465 new_seg = ep_ring->deq_seg; 466 new_deq = ep_ring->dequeue; 467 state->new_cycle_state = hw_dequeue & 0x1; 468 469 /* 470 * We want to find the pointer, segment and cycle state of the new trb 471 * (the one after current TD's last_trb). We know the cycle state at 472 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are 473 * found. 474 */ 475 do { 476 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) 477 == (dma_addr_t)(hw_dequeue & ~0xf)) { 478 cycle_found = true; 479 if (td_last_trb_found) 480 break; 481 } 482 if (new_deq == cur_td->last_trb) 483 td_last_trb_found = true; 484 485 if (cycle_found && 486 TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && 487 new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) 488 state->new_cycle_state ^= 0x1; 489 490 next_trb(xhci, ep_ring, &new_seg, &new_deq); 491 492 /* Search wrapped around, bail out */ 493 if (new_deq == ep->ring->dequeue) { 494 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); 495 state->new_deq_seg = NULL; 496 state->new_deq_ptr = NULL; 497 return; 498 } 499 500 } while (!cycle_found || !td_last_trb_found); 501 502 state->new_deq_seg = new_seg; 503 state->new_deq_ptr = new_deq; 504 505 /* Don't update the ring cycle state for the producer (us). */ 506 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 507 "Cycle state = 0x%x", state->new_cycle_state); 508 509 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 510 "New dequeue segment = %p (virtual)", 511 state->new_deq_seg); 512 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 513 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 514 "New dequeue pointer = 0x%llx (DMA)", 515 (unsigned long long) addr); 516 } 517 518 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 519 * (The last TRB actually points to the ring enqueue pointer, which is not part 520 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 521 */ 522 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 523 struct xhci_td *cur_td, bool flip_cycle) 524 { 525 struct xhci_segment *cur_seg; 526 union xhci_trb *cur_trb; 527 528 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 529 true; 530 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 531 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { 532 /* Unchain any chained Link TRBs, but 533 * leave the pointers intact. 534 */ 535 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 536 /* Flip the cycle bit (link TRBs can't be the first 537 * or last TRB). 538 */ 539 if (flip_cycle) 540 cur_trb->generic.field[3] ^= 541 cpu_to_le32(TRB_CYCLE); 542 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 543 "Cancel (unchain) link TRB"); 544 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 545 "Address = %p (0x%llx dma); " 546 "in seg %p (0x%llx dma)", 547 cur_trb, 548 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 549 cur_seg, 550 (unsigned long long)cur_seg->dma); 551 } else { 552 cur_trb->generic.field[0] = 0; 553 cur_trb->generic.field[1] = 0; 554 cur_trb->generic.field[2] = 0; 555 /* Preserve only the cycle bit of this TRB */ 556 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 557 /* Flip the cycle bit except on the first or last TRB */ 558 if (flip_cycle && cur_trb != cur_td->first_trb && 559 cur_trb != cur_td->last_trb) 560 cur_trb->generic.field[3] ^= 561 cpu_to_le32(TRB_CYCLE); 562 cur_trb->generic.field[3] |= cpu_to_le32( 563 TRB_TYPE(TRB_TR_NOOP)); 564 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 565 "TRB to noop at offset 0x%llx", 566 (unsigned long long) 567 xhci_trb_virt_to_dma(cur_seg, cur_trb)); 568 } 569 if (cur_trb == cur_td->last_trb) 570 break; 571 } 572 } 573 574 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 575 struct xhci_virt_ep *ep) 576 { 577 ep->ep_state &= ~EP_HALT_PENDING; 578 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 579 * timer is running on another CPU, we don't decrement stop_cmds_pending 580 * (since we didn't successfully stop the watchdog timer). 581 */ 582 if (del_timer(&ep->stop_cmd_timer)) 583 ep->stop_cmds_pending--; 584 } 585 586 /* Must be called with xhci->lock held in interrupt context */ 587 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 588 struct xhci_td *cur_td, int status) 589 { 590 struct usb_hcd *hcd; 591 struct urb *urb; 592 struct urb_priv *urb_priv; 593 594 urb = cur_td->urb; 595 urb_priv = urb->hcpriv; 596 urb_priv->td_cnt++; 597 hcd = bus_to_hcd(urb->dev->bus); 598 599 /* Only giveback urb when this is the last td in urb */ 600 if (urb_priv->td_cnt == urb_priv->length) { 601 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 602 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 603 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 604 if (xhci->quirks & XHCI_AMD_PLL_FIX) 605 usb_amd_quirk_pll_enable(); 606 } 607 } 608 usb_hcd_unlink_urb_from_ep(hcd, urb); 609 610 spin_unlock(&xhci->lock); 611 usb_hcd_giveback_urb(hcd, urb, status); 612 xhci_urb_free_priv(urb_priv); 613 spin_lock(&xhci->lock); 614 } 615 } 616 617 /* 618 * When we get a command completion for a Stop Endpoint Command, we need to 619 * unlink any cancelled TDs from the ring. There are two ways to do that: 620 * 621 * 1. If the HW was in the middle of processing the TD that needs to be 622 * cancelled, then we must move the ring's dequeue pointer past the last TRB 623 * in the TD with a Set Dequeue Pointer Command. 624 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 625 * bit cleared) so that the HW will skip over them. 626 */ 627 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, 628 union xhci_trb *trb, struct xhci_event_cmd *event) 629 { 630 unsigned int ep_index; 631 struct xhci_ring *ep_ring; 632 struct xhci_virt_ep *ep; 633 struct list_head *entry; 634 struct xhci_td *cur_td = NULL; 635 struct xhci_td *last_unlinked_td; 636 637 struct xhci_dequeue_state deq_state; 638 639 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { 640 if (!xhci->devs[slot_id]) 641 xhci_warn(xhci, "Stop endpoint command " 642 "completion for disabled slot %u\n", 643 slot_id); 644 return; 645 } 646 647 memset(&deq_state, 0, sizeof(deq_state)); 648 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 649 ep = &xhci->devs[slot_id]->eps[ep_index]; 650 651 if (list_empty(&ep->cancelled_td_list)) { 652 xhci_stop_watchdog_timer_in_irq(xhci, ep); 653 ep->stopped_td = NULL; 654 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 655 return; 656 } 657 658 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 659 * We have the xHCI lock, so nothing can modify this list until we drop 660 * it. We're also in the event handler, so we can't get re-interrupted 661 * if another Stop Endpoint command completes 662 */ 663 list_for_each(entry, &ep->cancelled_td_list) { 664 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 665 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 666 "Removing canceled TD starting at 0x%llx (dma).", 667 (unsigned long long)xhci_trb_virt_to_dma( 668 cur_td->start_seg, cur_td->first_trb)); 669 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 670 if (!ep_ring) { 671 /* This shouldn't happen unless a driver is mucking 672 * with the stream ID after submission. This will 673 * leave the TD on the hardware ring, and the hardware 674 * will try to execute it, and may access a buffer 675 * that has already been freed. In the best case, the 676 * hardware will execute it, and the event handler will 677 * ignore the completion event for that TD, since it was 678 * removed from the td_list for that endpoint. In 679 * short, don't muck with the stream ID after 680 * submission. 681 */ 682 xhci_warn(xhci, "WARN Cancelled URB %p " 683 "has invalid stream ID %u.\n", 684 cur_td->urb, 685 cur_td->urb->stream_id); 686 goto remove_finished_td; 687 } 688 /* 689 * If we stopped on the TD we need to cancel, then we have to 690 * move the xHC endpoint ring dequeue pointer past this TD. 691 */ 692 if (cur_td == ep->stopped_td) 693 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 694 cur_td->urb->stream_id, 695 cur_td, &deq_state); 696 else 697 td_to_noop(xhci, ep_ring, cur_td, false); 698 remove_finished_td: 699 /* 700 * The event handler won't see a completion for this TD anymore, 701 * so remove it from the endpoint ring's TD list. Keep it in 702 * the cancelled TD list for URB completion later. 703 */ 704 list_del_init(&cur_td->td_list); 705 } 706 last_unlinked_td = cur_td; 707 xhci_stop_watchdog_timer_in_irq(xhci, ep); 708 709 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 710 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 711 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, 712 ep->stopped_td->urb->stream_id, &deq_state); 713 xhci_ring_cmd_db(xhci); 714 } else { 715 /* Otherwise ring the doorbell(s) to restart queued transfers */ 716 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 717 } 718 719 ep->stopped_td = NULL; 720 721 /* 722 * Drop the lock and complete the URBs in the cancelled TD list. 723 * New TDs to be cancelled might be added to the end of the list before 724 * we can complete all the URBs for the TDs we already unlinked. 725 * So stop when we've completed the URB for the last TD we unlinked. 726 */ 727 do { 728 cur_td = list_entry(ep->cancelled_td_list.next, 729 struct xhci_td, cancelled_td_list); 730 list_del_init(&cur_td->cancelled_td_list); 731 732 /* Clean up the cancelled URB */ 733 /* Doesn't matter what we pass for status, since the core will 734 * just overwrite it (because the URB has been unlinked). 735 */ 736 xhci_giveback_urb_in_irq(xhci, cur_td, 0); 737 738 /* Stop processing the cancelled list if the watchdog timer is 739 * running. 740 */ 741 if (xhci->xhc_state & XHCI_STATE_DYING) 742 return; 743 } while (cur_td != last_unlinked_td); 744 745 /* Return to the event handler with xhci->lock re-acquired */ 746 } 747 748 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) 749 { 750 struct xhci_td *cur_td; 751 752 while (!list_empty(&ring->td_list)) { 753 cur_td = list_first_entry(&ring->td_list, 754 struct xhci_td, td_list); 755 list_del_init(&cur_td->td_list); 756 if (!list_empty(&cur_td->cancelled_td_list)) 757 list_del_init(&cur_td->cancelled_td_list); 758 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 759 } 760 } 761 762 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, 763 int slot_id, int ep_index) 764 { 765 struct xhci_td *cur_td; 766 struct xhci_virt_ep *ep; 767 struct xhci_ring *ring; 768 769 ep = &xhci->devs[slot_id]->eps[ep_index]; 770 if ((ep->ep_state & EP_HAS_STREAMS) || 771 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 772 int stream_id; 773 774 for (stream_id = 0; stream_id < ep->stream_info->num_streams; 775 stream_id++) { 776 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 777 "Killing URBs for slot ID %u, ep index %u, stream %u", 778 slot_id, ep_index, stream_id + 1); 779 xhci_kill_ring_urbs(xhci, 780 ep->stream_info->stream_rings[stream_id]); 781 } 782 } else { 783 ring = ep->ring; 784 if (!ring) 785 return; 786 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 787 "Killing URBs for slot ID %u, ep index %u", 788 slot_id, ep_index); 789 xhci_kill_ring_urbs(xhci, ring); 790 } 791 while (!list_empty(&ep->cancelled_td_list)) { 792 cur_td = list_first_entry(&ep->cancelled_td_list, 793 struct xhci_td, cancelled_td_list); 794 list_del_init(&cur_td->cancelled_td_list); 795 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 796 } 797 } 798 799 /* Watchdog timer function for when a stop endpoint command fails to complete. 800 * In this case, we assume the host controller is broken or dying or dead. The 801 * host may still be completing some other events, so we have to be careful to 802 * let the event ring handler and the URB dequeueing/enqueueing functions know 803 * through xhci->state. 804 * 805 * The timer may also fire if the host takes a very long time to respond to the 806 * command, and the stop endpoint command completion handler cannot delete the 807 * timer before the timer function is called. Another endpoint cancellation may 808 * sneak in before the timer function can grab the lock, and that may queue 809 * another stop endpoint command and add the timer back. So we cannot use a 810 * simple flag to say whether there is a pending stop endpoint command for a 811 * particular endpoint. 812 * 813 * Instead we use a combination of that flag and a counter for the number of 814 * pending stop endpoint commands. If the timer is the tail end of the last 815 * stop endpoint command, and the endpoint's command is still pending, we assume 816 * the host is dying. 817 */ 818 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 819 { 820 struct xhci_hcd *xhci; 821 struct xhci_virt_ep *ep; 822 int ret, i, j; 823 unsigned long flags; 824 825 ep = (struct xhci_virt_ep *) arg; 826 xhci = ep->xhci; 827 828 spin_lock_irqsave(&xhci->lock, flags); 829 830 ep->stop_cmds_pending--; 831 if (xhci->xhc_state & XHCI_STATE_DYING) { 832 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 833 "Stop EP timer ran, but another timer marked " 834 "xHCI as DYING, exiting."); 835 spin_unlock_irqrestore(&xhci->lock, flags); 836 return; 837 } 838 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 839 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 840 "Stop EP timer ran, but no command pending, " 841 "exiting."); 842 spin_unlock_irqrestore(&xhci->lock, flags); 843 return; 844 } 845 846 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 847 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 848 /* Oops, HC is dead or dying or at least not responding to the stop 849 * endpoint command. 850 */ 851 xhci->xhc_state |= XHCI_STATE_DYING; 852 /* Disable interrupts from the host controller and start halting it */ 853 xhci_quiesce(xhci); 854 spin_unlock_irqrestore(&xhci->lock, flags); 855 856 ret = xhci_halt(xhci); 857 858 spin_lock_irqsave(&xhci->lock, flags); 859 if (ret < 0) { 860 /* This is bad; the host is not responding to commands and it's 861 * not allowing itself to be halted. At least interrupts are 862 * disabled. If we call usb_hc_died(), it will attempt to 863 * disconnect all device drivers under this host. Those 864 * disconnect() methods will wait for all URBs to be unlinked, 865 * so we must complete them. 866 */ 867 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 868 xhci_warn(xhci, "Completing active URBs anyway.\n"); 869 /* We could turn all TDs on the rings to no-ops. This won't 870 * help if the host has cached part of the ring, and is slow if 871 * we want to preserve the cycle bit. Skip it and hope the host 872 * doesn't touch the memory. 873 */ 874 } 875 for (i = 0; i < MAX_HC_SLOTS; i++) { 876 if (!xhci->devs[i]) 877 continue; 878 for (j = 0; j < 31; j++) 879 xhci_kill_endpoint_urbs(xhci, i, j); 880 } 881 spin_unlock_irqrestore(&xhci->lock, flags); 882 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 883 "Calling usb_hc_died()"); 884 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 885 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 886 "xHCI host controller is dead."); 887 } 888 889 890 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, 891 struct xhci_virt_device *dev, 892 struct xhci_ring *ep_ring, 893 unsigned int ep_index) 894 { 895 union xhci_trb *dequeue_temp; 896 int num_trbs_free_temp; 897 bool revert = false; 898 899 num_trbs_free_temp = ep_ring->num_trbs_free; 900 dequeue_temp = ep_ring->dequeue; 901 902 /* If we get two back-to-back stalls, and the first stalled transfer 903 * ends just before a link TRB, the dequeue pointer will be left on 904 * the link TRB by the code in the while loop. So we have to update 905 * the dequeue pointer one segment further, or we'll jump off 906 * the segment into la-la-land. 907 */ 908 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { 909 ep_ring->deq_seg = ep_ring->deq_seg->next; 910 ep_ring->dequeue = ep_ring->deq_seg->trbs; 911 } 912 913 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { 914 /* We have more usable TRBs */ 915 ep_ring->num_trbs_free++; 916 ep_ring->dequeue++; 917 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, 918 ep_ring->dequeue)) { 919 if (ep_ring->dequeue == 920 dev->eps[ep_index].queued_deq_ptr) 921 break; 922 ep_ring->deq_seg = ep_ring->deq_seg->next; 923 ep_ring->dequeue = ep_ring->deq_seg->trbs; 924 } 925 if (ep_ring->dequeue == dequeue_temp) { 926 revert = true; 927 break; 928 } 929 } 930 931 if (revert) { 932 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); 933 ep_ring->num_trbs_free = num_trbs_free_temp; 934 } 935 } 936 937 /* 938 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 939 * we need to clear the set deq pending flag in the endpoint ring state, so that 940 * the TD queueing code can ring the doorbell again. We also need to ring the 941 * endpoint doorbell to restart the ring, but only if there aren't more 942 * cancellations pending. 943 */ 944 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, 945 union xhci_trb *trb, u32 cmd_comp_code) 946 { 947 unsigned int ep_index; 948 unsigned int stream_id; 949 struct xhci_ring *ep_ring; 950 struct xhci_virt_device *dev; 951 struct xhci_virt_ep *ep; 952 struct xhci_ep_ctx *ep_ctx; 953 struct xhci_slot_ctx *slot_ctx; 954 955 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 956 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 957 dev = xhci->devs[slot_id]; 958 ep = &dev->eps[ep_index]; 959 960 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 961 if (!ep_ring) { 962 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", 963 stream_id); 964 /* XXX: Harmless??? */ 965 goto cleanup; 966 } 967 968 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 969 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 970 971 if (cmd_comp_code != COMP_SUCCESS) { 972 unsigned int ep_state; 973 unsigned int slot_state; 974 975 switch (cmd_comp_code) { 976 case COMP_TRB_ERR: 977 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); 978 break; 979 case COMP_CTX_STATE: 980 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); 981 ep_state = le32_to_cpu(ep_ctx->ep_info); 982 ep_state &= EP_STATE_MASK; 983 slot_state = le32_to_cpu(slot_ctx->dev_state); 984 slot_state = GET_SLOT_STATE(slot_state); 985 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 986 "Slot state = %u, EP state = %u", 987 slot_state, ep_state); 988 break; 989 case COMP_EBADSLT: 990 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", 991 slot_id); 992 break; 993 default: 994 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", 995 cmd_comp_code); 996 break; 997 } 998 /* OK what do we do now? The endpoint state is hosed, and we 999 * should never get to this point if the synchronization between 1000 * queueing, and endpoint state are correct. This might happen 1001 * if the device gets disconnected after we've finished 1002 * cancelling URBs, which might not be an error... 1003 */ 1004 } else { 1005 u64 deq; 1006 /* 4.6.10 deq ptr is written to the stream ctx for streams */ 1007 if (ep->ep_state & EP_HAS_STREAMS) { 1008 struct xhci_stream_ctx *ctx = 1009 &ep->stream_info->stream_ctx_array[stream_id]; 1010 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; 1011 } else { 1012 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 1013 } 1014 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1015 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); 1016 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, 1017 ep->queued_deq_ptr) == deq) { 1018 /* Update the ring's dequeue segment and dequeue pointer 1019 * to reflect the new position. 1020 */ 1021 update_ring_for_set_deq_completion(xhci, dev, 1022 ep_ring, ep_index); 1023 } else { 1024 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); 1025 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 1026 ep->queued_deq_seg, ep->queued_deq_ptr); 1027 } 1028 } 1029 1030 cleanup: 1031 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1032 dev->eps[ep_index].queued_deq_seg = NULL; 1033 dev->eps[ep_index].queued_deq_ptr = NULL; 1034 /* Restart any rings with pending URBs */ 1035 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1036 } 1037 1038 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, 1039 union xhci_trb *trb, u32 cmd_comp_code) 1040 { 1041 unsigned int ep_index; 1042 1043 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1044 /* This command will only fail if the endpoint wasn't halted, 1045 * but we don't care. 1046 */ 1047 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 1048 "Ignoring reset ep completion code of %u", cmd_comp_code); 1049 1050 /* HW with the reset endpoint quirk needs to have a configure endpoint 1051 * command complete before the endpoint can be used. Queue that here 1052 * because the HW can't handle two commands being queued in a row. 1053 */ 1054 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1055 struct xhci_command *command; 1056 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1057 if (!command) { 1058 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); 1059 return; 1060 } 1061 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1062 "Queueing configure endpoint command"); 1063 xhci_queue_configure_endpoint(xhci, command, 1064 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1065 false); 1066 xhci_ring_cmd_db(xhci); 1067 } else { 1068 /* Clear our internal halted state */ 1069 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1070 } 1071 } 1072 1073 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1074 u32 cmd_comp_code) 1075 { 1076 if (cmd_comp_code == COMP_SUCCESS) 1077 xhci->slot_id = slot_id; 1078 else 1079 xhci->slot_id = 0; 1080 } 1081 1082 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) 1083 { 1084 struct xhci_virt_device *virt_dev; 1085 1086 virt_dev = xhci->devs[slot_id]; 1087 if (!virt_dev) 1088 return; 1089 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1090 /* Delete default control endpoint resources */ 1091 xhci_free_device_endpoint_resources(xhci, virt_dev, true); 1092 xhci_free_virt_device(xhci, slot_id); 1093 } 1094 1095 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, 1096 struct xhci_event_cmd *event, u32 cmd_comp_code) 1097 { 1098 struct xhci_virt_device *virt_dev; 1099 struct xhci_input_control_ctx *ctrl_ctx; 1100 unsigned int ep_index; 1101 unsigned int ep_state; 1102 u32 add_flags, drop_flags; 1103 1104 /* 1105 * Configure endpoint commands can come from the USB core 1106 * configuration or alt setting changes, or because the HW 1107 * needed an extra configure endpoint command after a reset 1108 * endpoint command or streams were being configured. 1109 * If the command was for a halted endpoint, the xHCI driver 1110 * is not waiting on the configure endpoint command. 1111 */ 1112 virt_dev = xhci->devs[slot_id]; 1113 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1114 if (!ctrl_ctx) { 1115 xhci_warn(xhci, "Could not get input context, bad type.\n"); 1116 return; 1117 } 1118 1119 add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1120 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1121 /* Input ctx add_flags are the endpoint index plus one */ 1122 ep_index = xhci_last_valid_endpoint(add_flags) - 1; 1123 1124 /* A usb_set_interface() call directly after clearing a halted 1125 * condition may race on this quirky hardware. Not worth 1126 * worrying about, since this is prototype hardware. Not sure 1127 * if this will work for streams, but streams support was 1128 * untested on this prototype. 1129 */ 1130 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1131 ep_index != (unsigned int) -1 && 1132 add_flags - SLOT_FLAG == drop_flags) { 1133 ep_state = virt_dev->eps[ep_index].ep_state; 1134 if (!(ep_state & EP_HALTED)) 1135 return; 1136 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1137 "Completed config ep cmd - " 1138 "last ep index = %d, state = %d", 1139 ep_index, ep_state); 1140 /* Clear internal halted state and restart ring(s) */ 1141 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; 1142 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1143 return; 1144 } 1145 return; 1146 } 1147 1148 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, 1149 struct xhci_event_cmd *event) 1150 { 1151 xhci_dbg(xhci, "Completed reset device command.\n"); 1152 if (!xhci->devs[slot_id]) 1153 xhci_warn(xhci, "Reset device command completion " 1154 "for disabled slot %u\n", slot_id); 1155 } 1156 1157 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, 1158 struct xhci_event_cmd *event) 1159 { 1160 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1161 xhci->error_bitmask |= 1 << 6; 1162 return; 1163 } 1164 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1165 "NEC firmware version %2x.%02x", 1166 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1167 NEC_FW_MINOR(le32_to_cpu(event->status))); 1168 } 1169 1170 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) 1171 { 1172 list_del(&cmd->cmd_list); 1173 1174 if (cmd->completion) { 1175 cmd->status = status; 1176 complete(cmd->completion); 1177 } else { 1178 kfree(cmd); 1179 } 1180 } 1181 1182 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) 1183 { 1184 struct xhci_command *cur_cmd, *tmp_cmd; 1185 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) 1186 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); 1187 } 1188 1189 /* 1190 * Turn all commands on command ring with status set to "aborted" to no-op trbs. 1191 * If there are other commands waiting then restart the ring and kick the timer. 1192 * This must be called with command ring stopped and xhci->lock held. 1193 */ 1194 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, 1195 struct xhci_command *cur_cmd) 1196 { 1197 struct xhci_command *i_cmd, *tmp_cmd; 1198 u32 cycle_state; 1199 1200 /* Turn all aborted commands in list to no-ops, then restart */ 1201 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, 1202 cmd_list) { 1203 1204 if (i_cmd->status != COMP_CMD_ABORT) 1205 continue; 1206 1207 i_cmd->status = COMP_CMD_STOP; 1208 1209 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 1210 i_cmd->command_trb); 1211 /* get cycle state from the original cmd trb */ 1212 cycle_state = le32_to_cpu( 1213 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; 1214 /* modify the command trb to no-op command */ 1215 i_cmd->command_trb->generic.field[0] = 0; 1216 i_cmd->command_trb->generic.field[1] = 0; 1217 i_cmd->command_trb->generic.field[2] = 0; 1218 i_cmd->command_trb->generic.field[3] = cpu_to_le32( 1219 TRB_TYPE(TRB_CMD_NOOP) | cycle_state); 1220 1221 /* 1222 * caller waiting for completion is called when command 1223 * completion event is received for these no-op commands 1224 */ 1225 } 1226 1227 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 1228 1229 /* ring command ring doorbell to restart the command ring */ 1230 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && 1231 !(xhci->xhc_state & XHCI_STATE_DYING)) { 1232 xhci->current_cmd = cur_cmd; 1233 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1234 xhci_ring_cmd_db(xhci); 1235 } 1236 return; 1237 } 1238 1239 1240 void xhci_handle_command_timeout(unsigned long data) 1241 { 1242 struct xhci_hcd *xhci; 1243 int ret; 1244 unsigned long flags; 1245 u64 hw_ring_state; 1246 struct xhci_command *cur_cmd = NULL; 1247 xhci = (struct xhci_hcd *) data; 1248 1249 /* mark this command to be cancelled */ 1250 spin_lock_irqsave(&xhci->lock, flags); 1251 if (xhci->current_cmd) { 1252 cur_cmd = xhci->current_cmd; 1253 cur_cmd->status = COMP_CMD_ABORT; 1254 } 1255 1256 1257 /* Make sure command ring is running before aborting it */ 1258 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1259 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1260 (hw_ring_state & CMD_RING_RUNNING)) { 1261 1262 spin_unlock_irqrestore(&xhci->lock, flags); 1263 xhci_dbg(xhci, "Command timeout\n"); 1264 ret = xhci_abort_cmd_ring(xhci); 1265 if (unlikely(ret == -ESHUTDOWN)) { 1266 xhci_err(xhci, "Abort command ring failed\n"); 1267 xhci_cleanup_command_queue(xhci); 1268 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1269 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1270 } 1271 return; 1272 } 1273 /* command timeout on stopped ring, ring can't be aborted */ 1274 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1275 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1276 spin_unlock_irqrestore(&xhci->lock, flags); 1277 return; 1278 } 1279 1280 static void handle_cmd_completion(struct xhci_hcd *xhci, 1281 struct xhci_event_cmd *event) 1282 { 1283 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1284 u64 cmd_dma; 1285 dma_addr_t cmd_dequeue_dma; 1286 u32 cmd_comp_code; 1287 union xhci_trb *cmd_trb; 1288 struct xhci_command *cmd; 1289 u32 cmd_type; 1290 1291 cmd_dma = le64_to_cpu(event->cmd_trb); 1292 cmd_trb = xhci->cmd_ring->dequeue; 1293 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1294 cmd_trb); 1295 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1296 if (cmd_dequeue_dma == 0) { 1297 xhci->error_bitmask |= 1 << 4; 1298 return; 1299 } 1300 /* Does the DMA address match our internal dequeue pointer address? */ 1301 if (cmd_dma != (u64) cmd_dequeue_dma) { 1302 xhci->error_bitmask |= 1 << 5; 1303 return; 1304 } 1305 1306 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); 1307 1308 if (cmd->command_trb != xhci->cmd_ring->dequeue) { 1309 xhci_err(xhci, 1310 "Command completion event does not match command\n"); 1311 return; 1312 } 1313 1314 del_timer(&xhci->cmd_timer); 1315 1316 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1317 1318 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1319 1320 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1321 if (cmd_comp_code == COMP_CMD_STOP) { 1322 xhci_handle_stopped_cmd_ring(xhci, cmd); 1323 return; 1324 } 1325 /* 1326 * Host aborted the command ring, check if the current command was 1327 * supposed to be aborted, otherwise continue normally. 1328 * The command ring is stopped now, but the xHC will issue a Command 1329 * Ring Stopped event which will cause us to restart it. 1330 */ 1331 if (cmd_comp_code == COMP_CMD_ABORT) { 1332 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 1333 if (cmd->status == COMP_CMD_ABORT) 1334 goto event_handled; 1335 } 1336 1337 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1338 switch (cmd_type) { 1339 case TRB_ENABLE_SLOT: 1340 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); 1341 break; 1342 case TRB_DISABLE_SLOT: 1343 xhci_handle_cmd_disable_slot(xhci, slot_id); 1344 break; 1345 case TRB_CONFIG_EP: 1346 if (!cmd->completion) 1347 xhci_handle_cmd_config_ep(xhci, slot_id, event, 1348 cmd_comp_code); 1349 break; 1350 case TRB_EVAL_CONTEXT: 1351 break; 1352 case TRB_ADDR_DEV: 1353 break; 1354 case TRB_STOP_RING: 1355 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1356 le32_to_cpu(cmd_trb->generic.field[3]))); 1357 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); 1358 break; 1359 case TRB_SET_DEQ: 1360 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1361 le32_to_cpu(cmd_trb->generic.field[3]))); 1362 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1363 break; 1364 case TRB_CMD_NOOP: 1365 /* Is this an aborted command turned to NO-OP? */ 1366 if (cmd->status == COMP_CMD_STOP) 1367 cmd_comp_code = COMP_CMD_STOP; 1368 break; 1369 case TRB_RESET_EP: 1370 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1371 le32_to_cpu(cmd_trb->generic.field[3]))); 1372 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); 1373 break; 1374 case TRB_RESET_DEV: 1375 /* SLOT_ID field in reset device cmd completion event TRB is 0. 1376 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) 1377 */ 1378 slot_id = TRB_TO_SLOT_ID( 1379 le32_to_cpu(cmd_trb->generic.field[3])); 1380 xhci_handle_cmd_reset_dev(xhci, slot_id, event); 1381 break; 1382 case TRB_NEC_GET_FW: 1383 xhci_handle_cmd_nec_get_fw(xhci, event); 1384 break; 1385 default: 1386 /* Skip over unknown commands on the event ring */ 1387 xhci->error_bitmask |= 1 << 6; 1388 break; 1389 } 1390 1391 /* restart timer if this wasn't the last command */ 1392 if (cmd->cmd_list.next != &xhci->cmd_list) { 1393 xhci->current_cmd = list_entry(cmd->cmd_list.next, 1394 struct xhci_command, cmd_list); 1395 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1396 } 1397 1398 event_handled: 1399 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); 1400 1401 inc_deq(xhci, xhci->cmd_ring); 1402 } 1403 1404 static void handle_vendor_event(struct xhci_hcd *xhci, 1405 union xhci_trb *event) 1406 { 1407 u32 trb_type; 1408 1409 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1410 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1411 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1412 handle_cmd_completion(xhci, &event->event_cmd); 1413 } 1414 1415 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1416 * port registers -- USB 3.0 and USB 2.0). 1417 * 1418 * Returns a zero-based port number, which is suitable for indexing into each of 1419 * the split roothubs' port arrays and bus state arrays. 1420 * Add one to it in order to call xhci_find_slot_id_by_port. 1421 */ 1422 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1423 struct xhci_hcd *xhci, u32 port_id) 1424 { 1425 unsigned int i; 1426 unsigned int num_similar_speed_ports = 0; 1427 1428 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1429 * and usb2_ports are 0-based indexes. Count the number of similar 1430 * speed ports, up to 1 port before this port. 1431 */ 1432 for (i = 0; i < (port_id - 1); i++) { 1433 u8 port_speed = xhci->port_array[i]; 1434 1435 /* 1436 * Skip ports that don't have known speeds, or have duplicate 1437 * Extended Capabilities port speed entries. 1438 */ 1439 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1440 continue; 1441 1442 /* 1443 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1444 * 1.1 ports are under the USB 2.0 hub. If the port speed 1445 * matches the device speed, it's a similar speed port. 1446 */ 1447 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) 1448 num_similar_speed_ports++; 1449 } 1450 return num_similar_speed_ports; 1451 } 1452 1453 static void handle_device_notification(struct xhci_hcd *xhci, 1454 union xhci_trb *event) 1455 { 1456 u32 slot_id; 1457 struct usb_device *udev; 1458 1459 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); 1460 if (!xhci->devs[slot_id]) { 1461 xhci_warn(xhci, "Device Notification event for " 1462 "unused slot %u\n", slot_id); 1463 return; 1464 } 1465 1466 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", 1467 slot_id); 1468 udev = xhci->devs[slot_id]->udev; 1469 if (udev && udev->parent) 1470 usb_wakeup_notification(udev->parent, udev->portnum); 1471 } 1472 1473 static void handle_port_status(struct xhci_hcd *xhci, 1474 union xhci_trb *event) 1475 { 1476 struct usb_hcd *hcd; 1477 u32 port_id; 1478 u32 temp, temp1; 1479 int max_ports; 1480 int slot_id; 1481 unsigned int faked_port_index; 1482 u8 major_revision; 1483 struct xhci_bus_state *bus_state; 1484 __le32 __iomem **port_array; 1485 bool bogus_port_status = false; 1486 1487 /* Port status change events always have a successful completion code */ 1488 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { 1489 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1490 xhci->error_bitmask |= 1 << 8; 1491 } 1492 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1493 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1494 1495 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1496 if ((port_id <= 0) || (port_id > max_ports)) { 1497 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1498 inc_deq(xhci, xhci->event_ring); 1499 return; 1500 } 1501 1502 /* Figure out which usb_hcd this port is attached to: 1503 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1504 */ 1505 major_revision = xhci->port_array[port_id - 1]; 1506 1507 /* Find the right roothub. */ 1508 hcd = xhci_to_hcd(xhci); 1509 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) 1510 hcd = xhci->shared_hcd; 1511 1512 if (major_revision == 0) { 1513 xhci_warn(xhci, "Event for port %u not in " 1514 "Extended Capabilities, ignoring.\n", 1515 port_id); 1516 bogus_port_status = true; 1517 goto cleanup; 1518 } 1519 if (major_revision == DUPLICATE_ENTRY) { 1520 xhci_warn(xhci, "Event for port %u duplicated in" 1521 "Extended Capabilities, ignoring.\n", 1522 port_id); 1523 bogus_port_status = true; 1524 goto cleanup; 1525 } 1526 1527 /* 1528 * Hardware port IDs reported by a Port Status Change Event include USB 1529 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1530 * resume event, but we first need to translate the hardware port ID 1531 * into the index into the ports on the correct split roothub, and the 1532 * correct bus_state structure. 1533 */ 1534 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1535 if (hcd->speed == HCD_USB3) 1536 port_array = xhci->usb3_ports; 1537 else 1538 port_array = xhci->usb2_ports; 1539 /* Find the faked port hub number */ 1540 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1541 port_id); 1542 1543 temp = readl(port_array[faked_port_index]); 1544 if (hcd->state == HC_STATE_SUSPENDED) { 1545 xhci_dbg(xhci, "resume root hub\n"); 1546 usb_hcd_resume_root_hub(hcd); 1547 } 1548 1549 if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) 1550 bus_state->port_remote_wakeup &= ~(1 << faked_port_index); 1551 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1554 1555 temp1 = readl(&xhci->op_regs->command); 1556 if (!(temp1 & CMD_RUN)) { 1557 xhci_warn(xhci, "xHC is not running.\n"); 1558 goto cleanup; 1559 } 1560 1561 if (DEV_SUPERSPEED(temp)) { 1562 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); 1563 /* Set a flag to say the port signaled remote wakeup, 1564 * so we can tell the difference between the end of 1565 * device and host initiated resume. 1566 */ 1567 bus_state->port_remote_wakeup |= 1 << faked_port_index; 1568 xhci_test_and_clear_bit(xhci, port_array, 1569 faked_port_index, PORT_PLC); 1570 xhci_set_link_state(xhci, port_array, faked_port_index, 1571 XDEV_U0); 1572 /* Need to wait until the next link state change 1573 * indicates the device is actually in U0. 1574 */ 1575 bogus_port_status = true; 1576 goto cleanup; 1577 } else { 1578 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1579 bus_state->resume_done[faked_port_index] = jiffies + 1580 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1581 set_bit(faked_port_index, &bus_state->resuming_ports); 1582 mod_timer(&hcd->rh_timer, 1583 bus_state->resume_done[faked_port_index]); 1584 /* Do the rest in GetPortStatus */ 1585 } 1586 } 1587 1588 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && 1589 DEV_SUPERSPEED(temp)) { 1590 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1591 /* We've just brought the device into U0 through either the 1592 * Resume state after a device remote wakeup, or through the 1593 * U3Exit state after a host-initiated resume. If it's a device 1594 * initiated remote wake, don't pass up the link state change, 1595 * so the roothub behavior is consistent with external 1596 * USB 3.0 hub behavior. 1597 */ 1598 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1599 faked_port_index + 1); 1600 if (slot_id && xhci->devs[slot_id]) 1601 xhci_ring_device(xhci, slot_id); 1602 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1603 bus_state->port_remote_wakeup &= 1604 ~(1 << faked_port_index); 1605 xhci_test_and_clear_bit(xhci, port_array, 1606 faked_port_index, PORT_PLC); 1607 usb_wakeup_notification(hcd->self.root_hub, 1608 faked_port_index + 1); 1609 bogus_port_status = true; 1610 goto cleanup; 1611 } 1612 } 1613 1614 /* 1615 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or 1616 * RExit to a disconnect state). If so, let the the driver know it's 1617 * out of the RExit state. 1618 */ 1619 if (!DEV_SUPERSPEED(temp) && 1620 test_and_clear_bit(faked_port_index, 1621 &bus_state->rexit_ports)) { 1622 complete(&bus_state->rexit_done[faked_port_index]); 1623 bogus_port_status = true; 1624 goto cleanup; 1625 } 1626 1627 if (hcd->speed != HCD_USB3) 1628 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1629 PORT_PLC); 1630 1631 cleanup: 1632 /* Update event ring dequeue pointer before dropping the lock */ 1633 inc_deq(xhci, xhci->event_ring); 1634 1635 /* Don't make the USB core poll the roothub if we got a bad port status 1636 * change event. Besides, at that point we can't tell which roothub 1637 * (USB 2.0 or USB 3.0) to kick. 1638 */ 1639 if (bogus_port_status) 1640 return; 1641 1642 /* 1643 * xHCI port-status-change events occur when the "or" of all the 1644 * status-change bits in the portsc register changes from 0 to 1. 1645 * New status changes won't cause an event if any other change 1646 * bits are still set. When an event occurs, switch over to 1647 * polling to avoid losing status changes. 1648 */ 1649 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1650 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1651 spin_unlock(&xhci->lock); 1652 /* Pass this up to the core */ 1653 usb_hcd_poll_rh_status(hcd); 1654 spin_lock(&xhci->lock); 1655 } 1656 1657 /* 1658 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1659 * at end_trb, which may be in another segment. If the suspect DMA address is a 1660 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1661 * returns 0. 1662 */ 1663 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, 1664 struct xhci_segment *start_seg, 1665 union xhci_trb *start_trb, 1666 union xhci_trb *end_trb, 1667 dma_addr_t suspect_dma, 1668 bool debug) 1669 { 1670 dma_addr_t start_dma; 1671 dma_addr_t end_seg_dma; 1672 dma_addr_t end_trb_dma; 1673 struct xhci_segment *cur_seg; 1674 1675 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1676 cur_seg = start_seg; 1677 1678 do { 1679 if (start_dma == 0) 1680 return NULL; 1681 /* We may get an event for a Link TRB in the middle of a TD */ 1682 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1683 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1684 /* If the end TRB isn't in this segment, this is set to 0 */ 1685 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1686 1687 if (debug) 1688 xhci_warn(xhci, 1689 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", 1690 (unsigned long long)suspect_dma, 1691 (unsigned long long)start_dma, 1692 (unsigned long long)end_trb_dma, 1693 (unsigned long long)cur_seg->dma, 1694 (unsigned long long)end_seg_dma); 1695 1696 if (end_trb_dma > 0) { 1697 /* The end TRB is in this segment, so suspect should be here */ 1698 if (start_dma <= end_trb_dma) { 1699 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1700 return cur_seg; 1701 } else { 1702 /* Case for one segment with 1703 * a TD wrapped around to the top 1704 */ 1705 if ((suspect_dma >= start_dma && 1706 suspect_dma <= end_seg_dma) || 1707 (suspect_dma >= cur_seg->dma && 1708 suspect_dma <= end_trb_dma)) 1709 return cur_seg; 1710 } 1711 return NULL; 1712 } else { 1713 /* Might still be somewhere in this segment */ 1714 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1715 return cur_seg; 1716 } 1717 cur_seg = cur_seg->next; 1718 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1719 } while (cur_seg != start_seg); 1720 1721 return NULL; 1722 } 1723 1724 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1725 unsigned int slot_id, unsigned int ep_index, 1726 unsigned int stream_id, 1727 struct xhci_td *td, union xhci_trb *event_trb) 1728 { 1729 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1730 struct xhci_command *command; 1731 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1732 if (!command) 1733 return; 1734 1735 ep->ep_state |= EP_HALTED; 1736 ep->stopped_stream = stream_id; 1737 1738 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1739 xhci_cleanup_stalled_ring(xhci, ep_index, td); 1740 1741 ep->stopped_stream = 0; 1742 1743 xhci_ring_cmd_db(xhci); 1744 } 1745 1746 /* Check if an error has halted the endpoint ring. The class driver will 1747 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1748 * However, a babble and other errors also halt the endpoint ring, and the class 1749 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1750 * Ring Dequeue Pointer command manually. 1751 */ 1752 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1753 struct xhci_ep_ctx *ep_ctx, 1754 unsigned int trb_comp_code) 1755 { 1756 /* TRB completion codes that may require a manual halt cleanup */ 1757 if (trb_comp_code == COMP_TX_ERR || 1758 trb_comp_code == COMP_BABBLE || 1759 trb_comp_code == COMP_SPLIT_ERR) 1760 /* The 0.96 spec says a babbling control endpoint 1761 * is not halted. The 0.96 spec says it is. Some HW 1762 * claims to be 0.95 compliant, but it halts the control 1763 * endpoint anyway. Check if a babble halted the 1764 * endpoint. 1765 */ 1766 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1767 cpu_to_le32(EP_STATE_HALTED)) 1768 return 1; 1769 1770 return 0; 1771 } 1772 1773 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1774 { 1775 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1776 /* Vendor defined "informational" completion code, 1777 * treat as not-an-error. 1778 */ 1779 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1780 trb_comp_code); 1781 xhci_dbg(xhci, "Treating code as success.\n"); 1782 return 1; 1783 } 1784 return 0; 1785 } 1786 1787 /* 1788 * Finish the td processing, remove the td from td list; 1789 * Return 1 if the urb can be given back. 1790 */ 1791 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1792 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1793 struct xhci_virt_ep *ep, int *status, bool skip) 1794 { 1795 struct xhci_virt_device *xdev; 1796 struct xhci_ring *ep_ring; 1797 unsigned int slot_id; 1798 int ep_index; 1799 struct urb *urb = NULL; 1800 struct xhci_ep_ctx *ep_ctx; 1801 int ret = 0; 1802 struct urb_priv *urb_priv; 1803 u32 trb_comp_code; 1804 1805 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1806 xdev = xhci->devs[slot_id]; 1807 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1808 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1809 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1810 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1811 1812 if (skip) 1813 goto td_cleanup; 1814 1815 if (trb_comp_code == COMP_STOP_INVAL || 1816 trb_comp_code == COMP_STOP || 1817 trb_comp_code == COMP_STOP_SHORT) { 1818 /* The Endpoint Stop Command completion will take care of any 1819 * stopped TDs. A stopped TD may be restarted, so don't update 1820 * the ring dequeue pointer or take this TD off any lists yet. 1821 */ 1822 ep->stopped_td = td; 1823 return 0; 1824 } 1825 if (trb_comp_code == COMP_STALL || 1826 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 1827 trb_comp_code)) { 1828 /* Issue a reset endpoint command to clear the host side 1829 * halt, followed by a set dequeue command to move the 1830 * dequeue pointer past the TD. 1831 * The class driver clears the device side halt later. 1832 */ 1833 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 1834 ep_ring->stream_id, td, event_trb); 1835 } else { 1836 /* Update ring dequeue pointer */ 1837 while (ep_ring->dequeue != td->last_trb) 1838 inc_deq(xhci, ep_ring); 1839 inc_deq(xhci, ep_ring); 1840 } 1841 1842 td_cleanup: 1843 /* Clean up the endpoint's TD list */ 1844 urb = td->urb; 1845 urb_priv = urb->hcpriv; 1846 1847 /* Do one last check of the actual transfer length. 1848 * If the host controller said we transferred more data than the buffer 1849 * length, urb->actual_length will be a very big number (since it's 1850 * unsigned). Play it safe and say we didn't transfer anything. 1851 */ 1852 if (urb->actual_length > urb->transfer_buffer_length) { 1853 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", 1854 urb->transfer_buffer_length, 1855 urb->actual_length); 1856 urb->actual_length = 0; 1857 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1858 *status = -EREMOTEIO; 1859 else 1860 *status = 0; 1861 } 1862 list_del_init(&td->td_list); 1863 /* Was this TD slated to be cancelled but completed anyway? */ 1864 if (!list_empty(&td->cancelled_td_list)) 1865 list_del_init(&td->cancelled_td_list); 1866 1867 urb_priv->td_cnt++; 1868 /* Giveback the urb when all the tds are completed */ 1869 if (urb_priv->td_cnt == urb_priv->length) { 1870 ret = 1; 1871 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 1872 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 1873 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 1874 if (xhci->quirks & XHCI_AMD_PLL_FIX) 1875 usb_amd_quirk_pll_enable(); 1876 } 1877 } 1878 } 1879 1880 return ret; 1881 } 1882 1883 /* 1884 * Process control tds, update urb status and actual_length. 1885 */ 1886 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1887 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1888 struct xhci_virt_ep *ep, int *status) 1889 { 1890 struct xhci_virt_device *xdev; 1891 struct xhci_ring *ep_ring; 1892 unsigned int slot_id; 1893 int ep_index; 1894 struct xhci_ep_ctx *ep_ctx; 1895 u32 trb_comp_code; 1896 1897 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1898 xdev = xhci->devs[slot_id]; 1899 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1900 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1901 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1902 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1903 1904 switch (trb_comp_code) { 1905 case COMP_SUCCESS: 1906 if (event_trb == ep_ring->dequeue) { 1907 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1908 "without IOC set??\n"); 1909 *status = -ESHUTDOWN; 1910 } else if (event_trb != td->last_trb) { 1911 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1912 "without IOC set??\n"); 1913 *status = -ESHUTDOWN; 1914 } else { 1915 *status = 0; 1916 } 1917 break; 1918 case COMP_SHORT_TX: 1919 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1920 *status = -EREMOTEIO; 1921 else 1922 *status = 0; 1923 break; 1924 case COMP_STOP_SHORT: 1925 if (event_trb == ep_ring->dequeue || event_trb == td->last_trb) 1926 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); 1927 else 1928 td->urb->actual_length = 1929 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1930 1931 return finish_td(xhci, td, event_trb, event, ep, status, false); 1932 case COMP_STOP: 1933 /* Did we stop at data stage? */ 1934 if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) 1935 td->urb->actual_length = 1936 td->urb->transfer_buffer_length - 1937 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1938 /* fall through */ 1939 case COMP_STOP_INVAL: 1940 return finish_td(xhci, td, event_trb, event, ep, status, false); 1941 default: 1942 if (!xhci_requires_manual_halt_cleanup(xhci, 1943 ep_ctx, trb_comp_code)) 1944 break; 1945 xhci_dbg(xhci, "TRB error code %u, " 1946 "halted endpoint index = %u\n", 1947 trb_comp_code, ep_index); 1948 /* else fall through */ 1949 case COMP_STALL: 1950 /* Did we transfer part of the data (middle) phase? */ 1951 if (event_trb != ep_ring->dequeue && 1952 event_trb != td->last_trb) 1953 td->urb->actual_length = 1954 td->urb->transfer_buffer_length - 1955 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1956 else if (!td->urb_length_set) 1957 td->urb->actual_length = 0; 1958 1959 return finish_td(xhci, td, event_trb, event, ep, status, false); 1960 } 1961 /* 1962 * Did we transfer any data, despite the errors that might have 1963 * happened? I.e. did we get past the setup stage? 1964 */ 1965 if (event_trb != ep_ring->dequeue) { 1966 /* The event was for the status stage */ 1967 if (event_trb == td->last_trb) { 1968 if (td->urb_length_set) { 1969 /* Don't overwrite a previously set error code 1970 */ 1971 if ((*status == -EINPROGRESS || *status == 0) && 1972 (td->urb->transfer_flags 1973 & URB_SHORT_NOT_OK)) 1974 /* Did we already see a short data 1975 * stage? */ 1976 *status = -EREMOTEIO; 1977 } else { 1978 td->urb->actual_length = 1979 td->urb->transfer_buffer_length; 1980 } 1981 } else { 1982 /* 1983 * Maybe the event was for the data stage? If so, update 1984 * already the actual_length of the URB and flag it as 1985 * set, so that it is not overwritten in the event for 1986 * the last TRB. 1987 */ 1988 td->urb_length_set = true; 1989 td->urb->actual_length = 1990 td->urb->transfer_buffer_length - 1991 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1992 xhci_dbg(xhci, "Waiting for status " 1993 "stage event\n"); 1994 return 0; 1995 } 1996 } 1997 1998 return finish_td(xhci, td, event_trb, event, ep, status, false); 1999 } 2000 2001 /* 2002 * Process isochronous tds, update urb packet status and actual_length. 2003 */ 2004 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2005 union xhci_trb *event_trb, struct xhci_transfer_event *event, 2006 struct xhci_virt_ep *ep, int *status) 2007 { 2008 struct xhci_ring *ep_ring; 2009 struct urb_priv *urb_priv; 2010 int idx; 2011 int len = 0; 2012 union xhci_trb *cur_trb; 2013 struct xhci_segment *cur_seg; 2014 struct usb_iso_packet_descriptor *frame; 2015 u32 trb_comp_code; 2016 bool skip_td = false; 2017 2018 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2019 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2020 urb_priv = td->urb->hcpriv; 2021 idx = urb_priv->td_cnt; 2022 frame = &td->urb->iso_frame_desc[idx]; 2023 2024 /* handle completion code */ 2025 switch (trb_comp_code) { 2026 case COMP_SUCCESS: 2027 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { 2028 frame->status = 0; 2029 break; 2030 } 2031 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 2032 trb_comp_code = COMP_SHORT_TX; 2033 /* fallthrough */ 2034 case COMP_STOP_SHORT: 2035 case COMP_SHORT_TX: 2036 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 2037 -EREMOTEIO : 0; 2038 break; 2039 case COMP_BW_OVER: 2040 frame->status = -ECOMM; 2041 skip_td = true; 2042 break; 2043 case COMP_BUFF_OVER: 2044 case COMP_BABBLE: 2045 frame->status = -EOVERFLOW; 2046 skip_td = true; 2047 break; 2048 case COMP_DEV_ERR: 2049 case COMP_STALL: 2050 frame->status = -EPROTO; 2051 skip_td = true; 2052 break; 2053 case COMP_TX_ERR: 2054 frame->status = -EPROTO; 2055 if (event_trb != td->last_trb) 2056 return 0; 2057 skip_td = true; 2058 break; 2059 case COMP_STOP: 2060 case COMP_STOP_INVAL: 2061 break; 2062 default: 2063 frame->status = -1; 2064 break; 2065 } 2066 2067 if (trb_comp_code == COMP_SUCCESS || skip_td) { 2068 frame->actual_length = frame->length; 2069 td->urb->actual_length += frame->length; 2070 } else if (trb_comp_code == COMP_STOP_SHORT) { 2071 frame->actual_length = 2072 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2073 td->urb->actual_length += frame->actual_length; 2074 } else { 2075 for (cur_trb = ep_ring->dequeue, 2076 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 2077 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 2078 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 2079 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 2080 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2081 } 2082 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2083 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2084 2085 if (trb_comp_code != COMP_STOP_INVAL) { 2086 frame->actual_length = len; 2087 td->urb->actual_length += len; 2088 } 2089 } 2090 2091 return finish_td(xhci, td, event_trb, event, ep, status, false); 2092 } 2093 2094 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2095 struct xhci_transfer_event *event, 2096 struct xhci_virt_ep *ep, int *status) 2097 { 2098 struct xhci_ring *ep_ring; 2099 struct urb_priv *urb_priv; 2100 struct usb_iso_packet_descriptor *frame; 2101 int idx; 2102 2103 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2104 urb_priv = td->urb->hcpriv; 2105 idx = urb_priv->td_cnt; 2106 frame = &td->urb->iso_frame_desc[idx]; 2107 2108 /* The transfer is partly done. */ 2109 frame->status = -EXDEV; 2110 2111 /* calc actual length */ 2112 frame->actual_length = 0; 2113 2114 /* Update ring dequeue pointer */ 2115 while (ep_ring->dequeue != td->last_trb) 2116 inc_deq(xhci, ep_ring); 2117 inc_deq(xhci, ep_ring); 2118 2119 return finish_td(xhci, td, NULL, event, ep, status, true); 2120 } 2121 2122 /* 2123 * Process bulk and interrupt tds, update urb status and actual_length. 2124 */ 2125 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 2126 union xhci_trb *event_trb, struct xhci_transfer_event *event, 2127 struct xhci_virt_ep *ep, int *status) 2128 { 2129 struct xhci_ring *ep_ring; 2130 union xhci_trb *cur_trb; 2131 struct xhci_segment *cur_seg; 2132 u32 trb_comp_code; 2133 2134 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2135 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2136 2137 switch (trb_comp_code) { 2138 case COMP_SUCCESS: 2139 /* Double check that the HW transferred everything. */ 2140 if (event_trb != td->last_trb || 2141 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2142 xhci_warn(xhci, "WARN Successful completion " 2143 "on short TX\n"); 2144 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2145 *status = -EREMOTEIO; 2146 else 2147 *status = 0; 2148 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 2149 trb_comp_code = COMP_SHORT_TX; 2150 } else { 2151 *status = 0; 2152 } 2153 break; 2154 case COMP_STOP_SHORT: 2155 case COMP_SHORT_TX: 2156 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2157 *status = -EREMOTEIO; 2158 else 2159 *status = 0; 2160 break; 2161 default: 2162 /* Others already handled above */ 2163 break; 2164 } 2165 if (trb_comp_code == COMP_SHORT_TX) 2166 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 2167 "%d bytes untransferred\n", 2168 td->urb->ep->desc.bEndpointAddress, 2169 td->urb->transfer_buffer_length, 2170 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); 2171 /* Stopped - short packet completion */ 2172 if (trb_comp_code == COMP_STOP_SHORT) { 2173 td->urb->actual_length = 2174 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2175 2176 if (td->urb->transfer_buffer_length < 2177 td->urb->actual_length) { 2178 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n", 2179 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); 2180 td->urb->actual_length = 0; 2181 /* status will be set by usb core for canceled urbs */ 2182 } 2183 /* Fast path - was this the last TRB in the TD for this URB? */ 2184 } else if (event_trb == td->last_trb) { 2185 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2186 td->urb->actual_length = 2187 td->urb->transfer_buffer_length - 2188 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2189 if (td->urb->transfer_buffer_length < 2190 td->urb->actual_length) { 2191 xhci_warn(xhci, "HC gave bad length " 2192 "of %d bytes left\n", 2193 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); 2194 td->urb->actual_length = 0; 2195 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2196 *status = -EREMOTEIO; 2197 else 2198 *status = 0; 2199 } 2200 /* Don't overwrite a previously set error code */ 2201 if (*status == -EINPROGRESS) { 2202 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2203 *status = -EREMOTEIO; 2204 else 2205 *status = 0; 2206 } 2207 } else { 2208 td->urb->actual_length = 2209 td->urb->transfer_buffer_length; 2210 /* Ignore a short packet completion if the 2211 * untransferred length was zero. 2212 */ 2213 if (*status == -EREMOTEIO) 2214 *status = 0; 2215 } 2216 } else { 2217 /* Slow path - walk the list, starting from the dequeue 2218 * pointer, to get the actual length transferred. 2219 */ 2220 td->urb->actual_length = 0; 2221 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 2222 cur_trb != event_trb; 2223 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 2224 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 2225 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 2226 td->urb->actual_length += 2227 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2228 } 2229 /* If the ring didn't stop on a Link or No-op TRB, add 2230 * in the actual bytes transferred from the Normal TRB 2231 */ 2232 if (trb_comp_code != COMP_STOP_INVAL) 2233 td->urb->actual_length += 2234 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2235 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2236 } 2237 2238 return finish_td(xhci, td, event_trb, event, ep, status, false); 2239 } 2240 2241 /* 2242 * If this function returns an error condition, it means it got a Transfer 2243 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 2244 * At this point, the host controller is probably hosed and should be reset. 2245 */ 2246 static int handle_tx_event(struct xhci_hcd *xhci, 2247 struct xhci_transfer_event *event) 2248 __releases(&xhci->lock) 2249 __acquires(&xhci->lock) 2250 { 2251 struct xhci_virt_device *xdev; 2252 struct xhci_virt_ep *ep; 2253 struct xhci_ring *ep_ring; 2254 unsigned int slot_id; 2255 int ep_index; 2256 struct xhci_td *td = NULL; 2257 dma_addr_t event_dma; 2258 struct xhci_segment *event_seg; 2259 union xhci_trb *event_trb; 2260 struct urb *urb = NULL; 2261 int status = -EINPROGRESS; 2262 struct urb_priv *urb_priv; 2263 struct xhci_ep_ctx *ep_ctx; 2264 struct list_head *tmp; 2265 u32 trb_comp_code; 2266 int ret = 0; 2267 int td_num = 0; 2268 2269 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2270 xdev = xhci->devs[slot_id]; 2271 if (!xdev) { 2272 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 2273 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2274 (unsigned long long) xhci_trb_virt_to_dma( 2275 xhci->event_ring->deq_seg, 2276 xhci->event_ring->dequeue), 2277 lower_32_bits(le64_to_cpu(event->buffer)), 2278 upper_32_bits(le64_to_cpu(event->buffer)), 2279 le32_to_cpu(event->transfer_len), 2280 le32_to_cpu(event->flags)); 2281 xhci_dbg(xhci, "Event ring:\n"); 2282 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2283 return -ENODEV; 2284 } 2285 2286 /* Endpoint ID is 1 based, our index is zero based */ 2287 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2288 ep = &xdev->eps[ep_index]; 2289 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2290 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2291 if (!ep_ring || 2292 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 2293 EP_STATE_DISABLED) { 2294 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 2295 "or incorrect stream ring\n"); 2296 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2297 (unsigned long long) xhci_trb_virt_to_dma( 2298 xhci->event_ring->deq_seg, 2299 xhci->event_ring->dequeue), 2300 lower_32_bits(le64_to_cpu(event->buffer)), 2301 upper_32_bits(le64_to_cpu(event->buffer)), 2302 le32_to_cpu(event->transfer_len), 2303 le32_to_cpu(event->flags)); 2304 xhci_dbg(xhci, "Event ring:\n"); 2305 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2306 return -ENODEV; 2307 } 2308 2309 /* Count current td numbers if ep->skip is set */ 2310 if (ep->skip) { 2311 list_for_each(tmp, &ep_ring->td_list) 2312 td_num++; 2313 } 2314 2315 event_dma = le64_to_cpu(event->buffer); 2316 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2317 /* Look for common error cases */ 2318 switch (trb_comp_code) { 2319 /* Skip codes that require special handling depending on 2320 * transfer type 2321 */ 2322 case COMP_SUCCESS: 2323 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2324 break; 2325 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2326 trb_comp_code = COMP_SHORT_TX; 2327 else 2328 xhci_warn_ratelimited(xhci, 2329 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); 2330 case COMP_SHORT_TX: 2331 break; 2332 case COMP_STOP: 2333 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 2334 break; 2335 case COMP_STOP_INVAL: 2336 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 2337 break; 2338 case COMP_STOP_SHORT: 2339 xhci_dbg(xhci, "Stopped with short packet transfer detected\n"); 2340 break; 2341 case COMP_STALL: 2342 xhci_dbg(xhci, "Stalled endpoint\n"); 2343 ep->ep_state |= EP_HALTED; 2344 status = -EPIPE; 2345 break; 2346 case COMP_TRB_ERR: 2347 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 2348 status = -EILSEQ; 2349 break; 2350 case COMP_SPLIT_ERR: 2351 case COMP_TX_ERR: 2352 xhci_dbg(xhci, "Transfer error on endpoint\n"); 2353 status = -EPROTO; 2354 break; 2355 case COMP_BABBLE: 2356 xhci_dbg(xhci, "Babble error on endpoint\n"); 2357 status = -EOVERFLOW; 2358 break; 2359 case COMP_DB_ERR: 2360 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 2361 status = -ENOSR; 2362 break; 2363 case COMP_BW_OVER: 2364 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 2365 break; 2366 case COMP_BUFF_OVER: 2367 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 2368 break; 2369 case COMP_UNDERRUN: 2370 /* 2371 * When the Isoch ring is empty, the xHC will generate 2372 * a Ring Overrun Event for IN Isoch endpoint or Ring 2373 * Underrun Event for OUT Isoch endpoint. 2374 */ 2375 xhci_dbg(xhci, "underrun event on endpoint\n"); 2376 if (!list_empty(&ep_ring->td_list)) 2377 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2378 "still with TDs queued?\n", 2379 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2380 ep_index); 2381 goto cleanup; 2382 case COMP_OVERRUN: 2383 xhci_dbg(xhci, "overrun event on endpoint\n"); 2384 if (!list_empty(&ep_ring->td_list)) 2385 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2386 "still with TDs queued?\n", 2387 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2388 ep_index); 2389 goto cleanup; 2390 case COMP_DEV_ERR: 2391 xhci_warn(xhci, "WARN: detect an incompatible device"); 2392 status = -EPROTO; 2393 break; 2394 case COMP_MISSED_INT: 2395 /* 2396 * When encounter missed service error, one or more isoc tds 2397 * may be missed by xHC. 2398 * Set skip flag of the ep_ring; Complete the missed tds as 2399 * short transfer when process the ep_ring next time. 2400 */ 2401 ep->skip = true; 2402 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2403 goto cleanup; 2404 default: 2405 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2406 status = 0; 2407 break; 2408 } 2409 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n", 2410 trb_comp_code); 2411 goto cleanup; 2412 } 2413 2414 do { 2415 /* This TRB should be in the TD at the head of this ring's 2416 * TD list. 2417 */ 2418 if (list_empty(&ep_ring->td_list)) { 2419 /* 2420 * A stopped endpoint may generate an extra completion 2421 * event if the device was suspended. Don't print 2422 * warnings. 2423 */ 2424 if (!(trb_comp_code == COMP_STOP || 2425 trb_comp_code == COMP_STOP_INVAL)) { 2426 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 2427 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2428 ep_index); 2429 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2430 (le32_to_cpu(event->flags) & 2431 TRB_TYPE_BITMASK)>>10); 2432 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2433 } 2434 if (ep->skip) { 2435 ep->skip = false; 2436 xhci_dbg(xhci, "td_list is empty while skip " 2437 "flag set. Clear skip flag.\n"); 2438 } 2439 ret = 0; 2440 goto cleanup; 2441 } 2442 2443 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2444 if (ep->skip && td_num == 0) { 2445 ep->skip = false; 2446 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2447 "Clear skip flag.\n"); 2448 ret = 0; 2449 goto cleanup; 2450 } 2451 2452 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2453 if (ep->skip) 2454 td_num--; 2455 2456 /* Is this a TRB in the currently executing TD? */ 2457 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, 2458 td->last_trb, event_dma, false); 2459 2460 /* 2461 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2462 * is not in the current TD pointed by ep_ring->dequeue because 2463 * that the hardware dequeue pointer still at the previous TRB 2464 * of the current TD. The previous TRB maybe a Link TD or the 2465 * last TRB of the previous TD. The command completion handle 2466 * will take care the rest. 2467 */ 2468 if (!event_seg && (trb_comp_code == COMP_STOP || 2469 trb_comp_code == COMP_STOP_INVAL)) { 2470 ret = 0; 2471 goto cleanup; 2472 } 2473 2474 if (!event_seg) { 2475 if (!ep->skip || 2476 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2477 /* Some host controllers give a spurious 2478 * successful event after a short transfer. 2479 * Ignore it. 2480 */ 2481 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2482 ep_ring->last_td_was_short) { 2483 ep_ring->last_td_was_short = false; 2484 ret = 0; 2485 goto cleanup; 2486 } 2487 /* HC is busted, give up! */ 2488 xhci_err(xhci, 2489 "ERROR Transfer event TRB DMA ptr not " 2490 "part of current TD ep_index %d " 2491 "comp_code %u\n", ep_index, 2492 trb_comp_code); 2493 trb_in_td(xhci, ep_ring->deq_seg, 2494 ep_ring->dequeue, td->last_trb, 2495 event_dma, true); 2496 return -ESHUTDOWN; 2497 } 2498 2499 ret = skip_isoc_td(xhci, td, event, ep, &status); 2500 goto cleanup; 2501 } 2502 if (trb_comp_code == COMP_SHORT_TX) 2503 ep_ring->last_td_was_short = true; 2504 else 2505 ep_ring->last_td_was_short = false; 2506 2507 if (ep->skip) { 2508 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2509 ep->skip = false; 2510 } 2511 2512 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / 2513 sizeof(*event_trb)]; 2514 /* 2515 * No-op TRB should not trigger interrupts. 2516 * If event_trb is a no-op TRB, it means the 2517 * corresponding TD has been cancelled. Just ignore 2518 * the TD. 2519 */ 2520 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { 2521 xhci_dbg(xhci, 2522 "event_trb is a no-op TRB. Skip it\n"); 2523 goto cleanup; 2524 } 2525 2526 /* Now update the urb's actual_length and give back to 2527 * the core 2528 */ 2529 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2530 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 2531 &status); 2532 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2533 ret = process_isoc_td(xhci, td, event_trb, event, ep, 2534 &status); 2535 else 2536 ret = process_bulk_intr_td(xhci, td, event_trb, event, 2537 ep, &status); 2538 2539 cleanup: 2540 /* 2541 * Do not update event ring dequeue pointer if ep->skip is set. 2542 * Will roll back to continue process missed tds. 2543 */ 2544 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2545 inc_deq(xhci, xhci->event_ring); 2546 } 2547 2548 if (ret) { 2549 urb = td->urb; 2550 urb_priv = urb->hcpriv; 2551 2552 xhci_urb_free_priv(urb_priv); 2553 2554 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2555 if ((urb->actual_length != urb->transfer_buffer_length && 2556 (urb->transfer_flags & 2557 URB_SHORT_NOT_OK)) || 2558 (status != 0 && 2559 !usb_endpoint_xfer_isoc(&urb->ep->desc))) 2560 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2561 "expected = %d, status = %d\n", 2562 urb, urb->actual_length, 2563 urb->transfer_buffer_length, 2564 status); 2565 spin_unlock(&xhci->lock); 2566 /* EHCI, UHCI, and OHCI always unconditionally set the 2567 * urb->status of an isochronous endpoint to 0. 2568 */ 2569 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2570 status = 0; 2571 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2572 spin_lock(&xhci->lock); 2573 } 2574 2575 /* 2576 * If ep->skip is set, it means there are missed tds on the 2577 * endpoint ring need to take care of. 2578 * Process them as short transfer until reach the td pointed by 2579 * the event. 2580 */ 2581 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2582 2583 return 0; 2584 } 2585 2586 /* 2587 * This function handles all OS-owned events on the event ring. It may drop 2588 * xhci->lock between event processing (e.g. to pass up port status changes). 2589 * Returns >0 for "possibly more events to process" (caller should call again), 2590 * otherwise 0 if done. In future, <0 returns should indicate error code. 2591 */ 2592 static int xhci_handle_event(struct xhci_hcd *xhci) 2593 { 2594 union xhci_trb *event; 2595 int update_ptrs = 1; 2596 int ret; 2597 2598 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2599 xhci->error_bitmask |= 1 << 1; 2600 return 0; 2601 } 2602 2603 event = xhci->event_ring->dequeue; 2604 /* Does the HC or OS own the TRB? */ 2605 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2606 xhci->event_ring->cycle_state) { 2607 xhci->error_bitmask |= 1 << 2; 2608 return 0; 2609 } 2610 2611 /* 2612 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2613 * speculative reads of the event's flags/data below. 2614 */ 2615 rmb(); 2616 /* FIXME: Handle more event types. */ 2617 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2618 case TRB_TYPE(TRB_COMPLETION): 2619 handle_cmd_completion(xhci, &event->event_cmd); 2620 break; 2621 case TRB_TYPE(TRB_PORT_STATUS): 2622 handle_port_status(xhci, event); 2623 update_ptrs = 0; 2624 break; 2625 case TRB_TYPE(TRB_TRANSFER): 2626 ret = handle_tx_event(xhci, &event->trans_event); 2627 if (ret < 0) 2628 xhci->error_bitmask |= 1 << 9; 2629 else 2630 update_ptrs = 0; 2631 break; 2632 case TRB_TYPE(TRB_DEV_NOTE): 2633 handle_device_notification(xhci, event); 2634 break; 2635 default: 2636 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2637 TRB_TYPE(48)) 2638 handle_vendor_event(xhci, event); 2639 else 2640 xhci->error_bitmask |= 1 << 3; 2641 } 2642 /* Any of the above functions may drop and re-acquire the lock, so check 2643 * to make sure a watchdog timer didn't mark the host as non-responsive. 2644 */ 2645 if (xhci->xhc_state & XHCI_STATE_DYING) { 2646 xhci_dbg(xhci, "xHCI host dying, returning from " 2647 "event handler.\n"); 2648 return 0; 2649 } 2650 2651 if (update_ptrs) 2652 /* Update SW event ring dequeue pointer */ 2653 inc_deq(xhci, xhci->event_ring); 2654 2655 /* Are there more items on the event ring? Caller will call us again to 2656 * check. 2657 */ 2658 return 1; 2659 } 2660 2661 /* 2662 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2663 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2664 * indicators of an event TRB error, but we check the status *first* to be safe. 2665 */ 2666 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2667 { 2668 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2669 u32 status; 2670 u64 temp_64; 2671 union xhci_trb *event_ring_deq; 2672 dma_addr_t deq; 2673 2674 spin_lock(&xhci->lock); 2675 /* Check if the xHC generated the interrupt, or the irq is shared */ 2676 status = readl(&xhci->op_regs->status); 2677 if (status == 0xffffffff) 2678 goto hw_died; 2679 2680 if (!(status & STS_EINT)) { 2681 spin_unlock(&xhci->lock); 2682 return IRQ_NONE; 2683 } 2684 if (status & STS_FATAL) { 2685 xhci_warn(xhci, "WARNING: Host System Error\n"); 2686 xhci_halt(xhci); 2687 hw_died: 2688 spin_unlock(&xhci->lock); 2689 return IRQ_HANDLED; 2690 } 2691 2692 /* 2693 * Clear the op reg interrupt status first, 2694 * so we can receive interrupts from other MSI-X interrupters. 2695 * Write 1 to clear the interrupt status. 2696 */ 2697 status |= STS_EINT; 2698 writel(status, &xhci->op_regs->status); 2699 /* FIXME when MSI-X is supported and there are multiple vectors */ 2700 /* Clear the MSI-X event interrupt status */ 2701 2702 if (hcd->irq) { 2703 u32 irq_pending; 2704 /* Acknowledge the PCI interrupt */ 2705 irq_pending = readl(&xhci->ir_set->irq_pending); 2706 irq_pending |= IMAN_IP; 2707 writel(irq_pending, &xhci->ir_set->irq_pending); 2708 } 2709 2710 if (xhci->xhc_state & XHCI_STATE_DYING) { 2711 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2712 "Shouldn't IRQs be disabled?\n"); 2713 /* Clear the event handler busy flag (RW1C); 2714 * the event ring should be empty. 2715 */ 2716 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2717 xhci_write_64(xhci, temp_64 | ERST_EHB, 2718 &xhci->ir_set->erst_dequeue); 2719 spin_unlock(&xhci->lock); 2720 2721 return IRQ_HANDLED; 2722 } 2723 2724 event_ring_deq = xhci->event_ring->dequeue; 2725 /* FIXME this should be a delayed service routine 2726 * that clears the EHB. 2727 */ 2728 while (xhci_handle_event(xhci) > 0) {} 2729 2730 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2731 /* If necessary, update the HW's version of the event ring deq ptr. */ 2732 if (event_ring_deq != xhci->event_ring->dequeue) { 2733 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2734 xhci->event_ring->dequeue); 2735 if (deq == 0) 2736 xhci_warn(xhci, "WARN something wrong with SW event " 2737 "ring dequeue ptr.\n"); 2738 /* Update HC event ring dequeue pointer */ 2739 temp_64 &= ERST_PTR_MASK; 2740 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2741 } 2742 2743 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2744 temp_64 |= ERST_EHB; 2745 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2746 2747 spin_unlock(&xhci->lock); 2748 2749 return IRQ_HANDLED; 2750 } 2751 2752 irqreturn_t xhci_msi_irq(int irq, void *hcd) 2753 { 2754 return xhci_irq(hcd); 2755 } 2756 2757 /**** Endpoint Ring Operations ****/ 2758 2759 /* 2760 * Generic function for queueing a TRB on a ring. 2761 * The caller must have checked to make sure there's room on the ring. 2762 * 2763 * @more_trbs_coming: Will you enqueue more TRBs before calling 2764 * prepare_transfer()? 2765 */ 2766 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2767 bool more_trbs_coming, 2768 u32 field1, u32 field2, u32 field3, u32 field4) 2769 { 2770 struct xhci_generic_trb *trb; 2771 2772 trb = &ring->enqueue->generic; 2773 trb->field[0] = cpu_to_le32(field1); 2774 trb->field[1] = cpu_to_le32(field2); 2775 trb->field[2] = cpu_to_le32(field3); 2776 trb->field[3] = cpu_to_le32(field4); 2777 inc_enq(xhci, ring, more_trbs_coming); 2778 } 2779 2780 /* 2781 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2782 * FIXME allocate segments if the ring is full. 2783 */ 2784 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2785 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2786 { 2787 unsigned int num_trbs_needed; 2788 2789 /* Make sure the endpoint has been added to xHC schedule */ 2790 switch (ep_state) { 2791 case EP_STATE_DISABLED: 2792 /* 2793 * USB core changed config/interfaces without notifying us, 2794 * or hardware is reporting the wrong state. 2795 */ 2796 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2797 return -ENOENT; 2798 case EP_STATE_ERROR: 2799 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2800 /* FIXME event handling code for error needs to clear it */ 2801 /* XXX not sure if this should be -ENOENT or not */ 2802 return -EINVAL; 2803 case EP_STATE_HALTED: 2804 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2805 case EP_STATE_STOPPED: 2806 case EP_STATE_RUNNING: 2807 break; 2808 default: 2809 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2810 /* 2811 * FIXME issue Configure Endpoint command to try to get the HC 2812 * back into a known state. 2813 */ 2814 return -EINVAL; 2815 } 2816 2817 while (1) { 2818 if (room_on_ring(xhci, ep_ring, num_trbs)) 2819 break; 2820 2821 if (ep_ring == xhci->cmd_ring) { 2822 xhci_err(xhci, "Do not support expand command ring\n"); 2823 return -ENOMEM; 2824 } 2825 2826 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 2827 "ERROR no room on ep ring, try ring expansion"); 2828 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2829 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2830 mem_flags)) { 2831 xhci_err(xhci, "Ring expansion failed\n"); 2832 return -ENOMEM; 2833 } 2834 } 2835 2836 if (enqueue_is_link_trb(ep_ring)) { 2837 struct xhci_ring *ring = ep_ring; 2838 union xhci_trb *next; 2839 2840 next = ring->enqueue; 2841 2842 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2843 /* If we're not dealing with 0.95 hardware or isoc rings 2844 * on AMD 0.96 host, clear the chain bit. 2845 */ 2846 if (!xhci_link_trb_quirk(xhci) && 2847 !(ring->type == TYPE_ISOC && 2848 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2849 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2850 else 2851 next->link.control |= cpu_to_le32(TRB_CHAIN); 2852 2853 wmb(); 2854 next->link.control ^= cpu_to_le32(TRB_CYCLE); 2855 2856 /* Toggle the cycle bit after the last ring segment. */ 2857 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2858 ring->cycle_state ^= 1; 2859 } 2860 ring->enq_seg = ring->enq_seg->next; 2861 ring->enqueue = ring->enq_seg->trbs; 2862 next = ring->enqueue; 2863 } 2864 } 2865 2866 return 0; 2867 } 2868 2869 static int prepare_transfer(struct xhci_hcd *xhci, 2870 struct xhci_virt_device *xdev, 2871 unsigned int ep_index, 2872 unsigned int stream_id, 2873 unsigned int num_trbs, 2874 struct urb *urb, 2875 unsigned int td_index, 2876 gfp_t mem_flags) 2877 { 2878 int ret; 2879 struct urb_priv *urb_priv; 2880 struct xhci_td *td; 2881 struct xhci_ring *ep_ring; 2882 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2883 2884 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2885 if (!ep_ring) { 2886 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2887 stream_id); 2888 return -EINVAL; 2889 } 2890 2891 ret = prepare_ring(xhci, ep_ring, 2892 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2893 num_trbs, mem_flags); 2894 if (ret) 2895 return ret; 2896 2897 urb_priv = urb->hcpriv; 2898 td = urb_priv->td[td_index]; 2899 2900 INIT_LIST_HEAD(&td->td_list); 2901 INIT_LIST_HEAD(&td->cancelled_td_list); 2902 2903 if (td_index == 0) { 2904 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2905 if (unlikely(ret)) 2906 return ret; 2907 } 2908 2909 td->urb = urb; 2910 /* Add this TD to the tail of the endpoint ring's TD list */ 2911 list_add_tail(&td->td_list, &ep_ring->td_list); 2912 td->start_seg = ep_ring->enq_seg; 2913 td->first_trb = ep_ring->enqueue; 2914 2915 urb_priv->td[td_index] = td; 2916 2917 return 0; 2918 } 2919 2920 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2921 { 2922 int num_sgs, num_trbs, running_total, temp, i; 2923 struct scatterlist *sg; 2924 2925 sg = NULL; 2926 num_sgs = urb->num_mapped_sgs; 2927 temp = urb->transfer_buffer_length; 2928 2929 num_trbs = 0; 2930 for_each_sg(urb->sg, sg, num_sgs, i) { 2931 unsigned int len = sg_dma_len(sg); 2932 2933 /* Scatter gather list entries may cross 64KB boundaries */ 2934 running_total = TRB_MAX_BUFF_SIZE - 2935 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); 2936 running_total &= TRB_MAX_BUFF_SIZE - 1; 2937 if (running_total != 0) 2938 num_trbs++; 2939 2940 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2941 while (running_total < sg_dma_len(sg) && running_total < temp) { 2942 num_trbs++; 2943 running_total += TRB_MAX_BUFF_SIZE; 2944 } 2945 len = min_t(int, len, temp); 2946 temp -= len; 2947 if (temp == 0) 2948 break; 2949 } 2950 return num_trbs; 2951 } 2952 2953 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2954 { 2955 if (num_trbs != 0) 2956 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2957 "TRBs, %d left\n", __func__, 2958 urb->ep->desc.bEndpointAddress, num_trbs); 2959 if (running_total != urb->transfer_buffer_length) 2960 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2961 "queued %#x (%d), asked for %#x (%d)\n", 2962 __func__, 2963 urb->ep->desc.bEndpointAddress, 2964 running_total, running_total, 2965 urb->transfer_buffer_length, 2966 urb->transfer_buffer_length); 2967 } 2968 2969 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2970 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2971 struct xhci_generic_trb *start_trb) 2972 { 2973 /* 2974 * Pass all the TRBs to the hardware at once and make sure this write 2975 * isn't reordered. 2976 */ 2977 wmb(); 2978 if (start_cycle) 2979 start_trb->field[3] |= cpu_to_le32(start_cycle); 2980 else 2981 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 2982 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2983 } 2984 2985 /* 2986 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2987 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2988 * (comprised of sg list entries) can take several service intervals to 2989 * transmit. 2990 */ 2991 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2992 struct urb *urb, int slot_id, unsigned int ep_index) 2993 { 2994 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 2995 xhci->devs[slot_id]->out_ctx, ep_index); 2996 int xhci_interval; 2997 int ep_interval; 2998 2999 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3000 ep_interval = urb->interval; 3001 /* Convert to microframes */ 3002 if (urb->dev->speed == USB_SPEED_LOW || 3003 urb->dev->speed == USB_SPEED_FULL) 3004 ep_interval *= 8; 3005 /* FIXME change this to a warning and a suggestion to use the new API 3006 * to set the polling interval (once the API is added). 3007 */ 3008 if (xhci_interval != ep_interval) { 3009 dev_dbg_ratelimited(&urb->dev->dev, 3010 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 3011 ep_interval, ep_interval == 1 ? "" : "s", 3012 xhci_interval, xhci_interval == 1 ? "" : "s"); 3013 urb->interval = xhci_interval; 3014 /* Convert back to frames for LS/FS devices */ 3015 if (urb->dev->speed == USB_SPEED_LOW || 3016 urb->dev->speed == USB_SPEED_FULL) 3017 urb->interval /= 8; 3018 } 3019 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 3020 } 3021 3022 /* 3023 * The TD size is the number of bytes remaining in the TD (including this TRB), 3024 * right shifted by 10. 3025 * It must fit in bits 21:17, so it can't be bigger than 31. 3026 */ 3027 static u32 xhci_td_remainder(unsigned int remainder) 3028 { 3029 u32 max = (1 << (21 - 17 + 1)) - 1; 3030 3031 if ((remainder >> 10) >= max) 3032 return max << 17; 3033 else 3034 return (remainder >> 10) << 17; 3035 } 3036 3037 /* 3038 * For xHCI 1.0 host controllers, TD size is the number of max packet sized 3039 * packets remaining in the TD (*not* including this TRB). 3040 * 3041 * Total TD packet count = total_packet_count = 3042 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) 3043 * 3044 * Packets transferred up to and including this TRB = packets_transferred = 3045 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 3046 * 3047 * TD size = total_packet_count - packets_transferred 3048 * 3049 * It must fit in bits 21:17, so it can't be bigger than 31. 3050 * The last TRB in a TD must have the TD size set to zero. 3051 */ 3052 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, 3053 unsigned int total_packet_count, struct urb *urb, 3054 unsigned int num_trbs_left) 3055 { 3056 int packets_transferred; 3057 3058 /* One TRB with a zero-length data packet. */ 3059 if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) 3060 return 0; 3061 3062 /* All the TRB queueing functions don't count the current TRB in 3063 * running_total. 3064 */ 3065 packets_transferred = (running_total + trb_buff_len) / 3066 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); 3067 3068 if ((total_packet_count - packets_transferred) > 31) 3069 return 31 << 17; 3070 return (total_packet_count - packets_transferred) << 17; 3071 } 3072 3073 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3074 struct urb *urb, int slot_id, unsigned int ep_index) 3075 { 3076 struct xhci_ring *ep_ring; 3077 unsigned int num_trbs; 3078 struct urb_priv *urb_priv; 3079 struct xhci_td *td; 3080 struct scatterlist *sg; 3081 int num_sgs; 3082 int trb_buff_len, this_sg_len, running_total, ret; 3083 unsigned int total_packet_count; 3084 bool zero_length_needed; 3085 bool first_trb; 3086 int last_trb_num; 3087 u64 addr; 3088 bool more_trbs_coming; 3089 3090 struct xhci_generic_trb *start_trb; 3091 int start_cycle; 3092 3093 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3094 if (!ep_ring) 3095 return -EINVAL; 3096 3097 num_trbs = count_sg_trbs_needed(xhci, urb); 3098 num_sgs = urb->num_mapped_sgs; 3099 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, 3100 usb_endpoint_maxp(&urb->ep->desc)); 3101 3102 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3103 ep_index, urb->stream_id, 3104 num_trbs, urb, 0, mem_flags); 3105 if (ret < 0) 3106 return ret; 3107 3108 urb_priv = urb->hcpriv; 3109 3110 /* Deal with URB_ZERO_PACKET - need one more td/trb */ 3111 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && 3112 urb_priv->length == 2; 3113 if (zero_length_needed) { 3114 num_trbs++; 3115 xhci_dbg(xhci, "Creating zero length td.\n"); 3116 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3117 ep_index, urb->stream_id, 3118 1, urb, 1, mem_flags); 3119 if (ret < 0) 3120 return ret; 3121 } 3122 3123 td = urb_priv->td[0]; 3124 3125 /* 3126 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3127 * until we've finished creating all the other TRBs. The ring's cycle 3128 * state may change as we enqueue the other TRBs, so save it too. 3129 */ 3130 start_trb = &ep_ring->enqueue->generic; 3131 start_cycle = ep_ring->cycle_state; 3132 3133 running_total = 0; 3134 /* 3135 * How much data is in the first TRB? 3136 * 3137 * There are three forces at work for TRB buffer pointers and lengths: 3138 * 1. We don't want to walk off the end of this sg-list entry buffer. 3139 * 2. The transfer length that the driver requested may be smaller than 3140 * the amount of memory allocated for this scatter-gather list. 3141 * 3. TRBs buffers can't cross 64KB boundaries. 3142 */ 3143 sg = urb->sg; 3144 addr = (u64) sg_dma_address(sg); 3145 this_sg_len = sg_dma_len(sg); 3146 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3147 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 3148 if (trb_buff_len > urb->transfer_buffer_length) 3149 trb_buff_len = urb->transfer_buffer_length; 3150 3151 first_trb = true; 3152 last_trb_num = zero_length_needed ? 2 : 1; 3153 /* Queue the first TRB, even if it's zero-length */ 3154 do { 3155 u32 field = 0; 3156 u32 length_field = 0; 3157 u32 remainder = 0; 3158 3159 /* Don't change the cycle bit of the first TRB until later */ 3160 if (first_trb) { 3161 first_trb = false; 3162 if (start_cycle == 0) 3163 field |= 0x1; 3164 } else 3165 field |= ep_ring->cycle_state; 3166 3167 /* Chain all the TRBs together; clear the chain bit in the last 3168 * TRB to indicate it's the last TRB in the chain. 3169 */ 3170 if (num_trbs > last_trb_num) { 3171 field |= TRB_CHAIN; 3172 } else if (num_trbs == last_trb_num) { 3173 td->last_trb = ep_ring->enqueue; 3174 field |= TRB_IOC; 3175 } else if (zero_length_needed && num_trbs == 1) { 3176 trb_buff_len = 0; 3177 urb_priv->td[1]->last_trb = ep_ring->enqueue; 3178 field |= TRB_IOC; 3179 } 3180 3181 /* Only set interrupt on short packet for IN endpoints */ 3182 if (usb_urb_dir_in(urb)) 3183 field |= TRB_ISP; 3184 3185 if (TRB_MAX_BUFF_SIZE - 3186 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { 3187 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 3188 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 3189 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 3190 (unsigned int) addr + trb_buff_len); 3191 } 3192 3193 /* Set the TRB length, TD size, and interrupter fields. */ 3194 if (xhci->hci_version < 0x100) { 3195 remainder = xhci_td_remainder( 3196 urb->transfer_buffer_length - 3197 running_total); 3198 } else { 3199 remainder = xhci_v1_0_td_remainder(running_total, 3200 trb_buff_len, total_packet_count, urb, 3201 num_trbs - 1); 3202 } 3203 length_field = TRB_LEN(trb_buff_len) | 3204 remainder | 3205 TRB_INTR_TARGET(0); 3206 3207 if (num_trbs > 1) 3208 more_trbs_coming = true; 3209 else 3210 more_trbs_coming = false; 3211 queue_trb(xhci, ep_ring, more_trbs_coming, 3212 lower_32_bits(addr), 3213 upper_32_bits(addr), 3214 length_field, 3215 field | TRB_TYPE(TRB_NORMAL)); 3216 --num_trbs; 3217 running_total += trb_buff_len; 3218 3219 /* Calculate length for next transfer -- 3220 * Are we done queueing all the TRBs for this sg entry? 3221 */ 3222 this_sg_len -= trb_buff_len; 3223 if (this_sg_len == 0) { 3224 --num_sgs; 3225 if (num_sgs == 0) 3226 break; 3227 sg = sg_next(sg); 3228 addr = (u64) sg_dma_address(sg); 3229 this_sg_len = sg_dma_len(sg); 3230 } else { 3231 addr += trb_buff_len; 3232 } 3233 3234 trb_buff_len = TRB_MAX_BUFF_SIZE - 3235 (addr & (TRB_MAX_BUFF_SIZE - 1)); 3236 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 3237 if (running_total + trb_buff_len > urb->transfer_buffer_length) 3238 trb_buff_len = 3239 urb->transfer_buffer_length - running_total; 3240 } while (num_trbs > 0); 3241 3242 check_trb_math(urb, num_trbs, running_total); 3243 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3244 start_cycle, start_trb); 3245 return 0; 3246 } 3247 3248 /* This is very similar to what ehci-q.c qtd_fill() does */ 3249 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3250 struct urb *urb, int slot_id, unsigned int ep_index) 3251 { 3252 struct xhci_ring *ep_ring; 3253 struct urb_priv *urb_priv; 3254 struct xhci_td *td; 3255 int num_trbs; 3256 struct xhci_generic_trb *start_trb; 3257 bool first_trb; 3258 int last_trb_num; 3259 bool more_trbs_coming; 3260 bool zero_length_needed; 3261 int start_cycle; 3262 u32 field, length_field; 3263 3264 int running_total, trb_buff_len, ret; 3265 unsigned int total_packet_count; 3266 u64 addr; 3267 3268 if (urb->num_sgs) 3269 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 3270 3271 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3272 if (!ep_ring) 3273 return -EINVAL; 3274 3275 num_trbs = 0; 3276 /* How much data is (potentially) left before the 64KB boundary? */ 3277 running_total = TRB_MAX_BUFF_SIZE - 3278 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3279 running_total &= TRB_MAX_BUFF_SIZE - 1; 3280 3281 /* If there's some data on this 64KB chunk, or we have to send a 3282 * zero-length transfer, we need at least one TRB 3283 */ 3284 if (running_total != 0 || urb->transfer_buffer_length == 0) 3285 num_trbs++; 3286 /* How many more 64KB chunks to transfer, how many more TRBs? */ 3287 while (running_total < urb->transfer_buffer_length) { 3288 num_trbs++; 3289 running_total += TRB_MAX_BUFF_SIZE; 3290 } 3291 3292 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3293 ep_index, urb->stream_id, 3294 num_trbs, urb, 0, mem_flags); 3295 if (ret < 0) 3296 return ret; 3297 3298 urb_priv = urb->hcpriv; 3299 3300 /* Deal with URB_ZERO_PACKET - need one more td/trb */ 3301 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && 3302 urb_priv->length == 2; 3303 if (zero_length_needed) { 3304 num_trbs++; 3305 xhci_dbg(xhci, "Creating zero length td.\n"); 3306 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3307 ep_index, urb->stream_id, 3308 1, urb, 1, mem_flags); 3309 if (ret < 0) 3310 return ret; 3311 } 3312 3313 td = urb_priv->td[0]; 3314 3315 /* 3316 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3317 * until we've finished creating all the other TRBs. The ring's cycle 3318 * state may change as we enqueue the other TRBs, so save it too. 3319 */ 3320 start_trb = &ep_ring->enqueue->generic; 3321 start_cycle = ep_ring->cycle_state; 3322 3323 running_total = 0; 3324 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, 3325 usb_endpoint_maxp(&urb->ep->desc)); 3326 /* How much data is in the first TRB? */ 3327 addr = (u64) urb->transfer_dma; 3328 trb_buff_len = TRB_MAX_BUFF_SIZE - 3329 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3330 if (trb_buff_len > urb->transfer_buffer_length) 3331 trb_buff_len = urb->transfer_buffer_length; 3332 3333 first_trb = true; 3334 last_trb_num = zero_length_needed ? 2 : 1; 3335 /* Queue the first TRB, even if it's zero-length */ 3336 do { 3337 u32 remainder = 0; 3338 field = 0; 3339 3340 /* Don't change the cycle bit of the first TRB until later */ 3341 if (first_trb) { 3342 first_trb = false; 3343 if (start_cycle == 0) 3344 field |= 0x1; 3345 } else 3346 field |= ep_ring->cycle_state; 3347 3348 /* Chain all the TRBs together; clear the chain bit in the last 3349 * TRB to indicate it's the last TRB in the chain. 3350 */ 3351 if (num_trbs > last_trb_num) { 3352 field |= TRB_CHAIN; 3353 } else if (num_trbs == last_trb_num) { 3354 td->last_trb = ep_ring->enqueue; 3355 field |= TRB_IOC; 3356 } else if (zero_length_needed && num_trbs == 1) { 3357 trb_buff_len = 0; 3358 urb_priv->td[1]->last_trb = ep_ring->enqueue; 3359 field |= TRB_IOC; 3360 } 3361 3362 /* Only set interrupt on short packet for IN endpoints */ 3363 if (usb_urb_dir_in(urb)) 3364 field |= TRB_ISP; 3365 3366 /* Set the TRB length, TD size, and interrupter fields. */ 3367 if (xhci->hci_version < 0x100) { 3368 remainder = xhci_td_remainder( 3369 urb->transfer_buffer_length - 3370 running_total); 3371 } else { 3372 remainder = xhci_v1_0_td_remainder(running_total, 3373 trb_buff_len, total_packet_count, urb, 3374 num_trbs - 1); 3375 } 3376 length_field = TRB_LEN(trb_buff_len) | 3377 remainder | 3378 TRB_INTR_TARGET(0); 3379 3380 if (num_trbs > 1) 3381 more_trbs_coming = true; 3382 else 3383 more_trbs_coming = false; 3384 queue_trb(xhci, ep_ring, more_trbs_coming, 3385 lower_32_bits(addr), 3386 upper_32_bits(addr), 3387 length_field, 3388 field | TRB_TYPE(TRB_NORMAL)); 3389 --num_trbs; 3390 running_total += trb_buff_len; 3391 3392 /* Calculate length for next transfer */ 3393 addr += trb_buff_len; 3394 trb_buff_len = urb->transfer_buffer_length - running_total; 3395 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 3396 trb_buff_len = TRB_MAX_BUFF_SIZE; 3397 } while (num_trbs > 0); 3398 3399 check_trb_math(urb, num_trbs, running_total); 3400 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3401 start_cycle, start_trb); 3402 return 0; 3403 } 3404 3405 /* Caller must have locked xhci->lock */ 3406 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3407 struct urb *urb, int slot_id, unsigned int ep_index) 3408 { 3409 struct xhci_ring *ep_ring; 3410 int num_trbs; 3411 int ret; 3412 struct usb_ctrlrequest *setup; 3413 struct xhci_generic_trb *start_trb; 3414 int start_cycle; 3415 u32 field, length_field; 3416 struct urb_priv *urb_priv; 3417 struct xhci_td *td; 3418 3419 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3420 if (!ep_ring) 3421 return -EINVAL; 3422 3423 /* 3424 * Need to copy setup packet into setup TRB, so we can't use the setup 3425 * DMA address. 3426 */ 3427 if (!urb->setup_packet) 3428 return -EINVAL; 3429 3430 /* 1 TRB for setup, 1 for status */ 3431 num_trbs = 2; 3432 /* 3433 * Don't need to check if we need additional event data and normal TRBs, 3434 * since data in control transfers will never get bigger than 16MB 3435 * XXX: can we get a buffer that crosses 64KB boundaries? 3436 */ 3437 if (urb->transfer_buffer_length > 0) 3438 num_trbs++; 3439 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3440 ep_index, urb->stream_id, 3441 num_trbs, urb, 0, mem_flags); 3442 if (ret < 0) 3443 return ret; 3444 3445 urb_priv = urb->hcpriv; 3446 td = urb_priv->td[0]; 3447 3448 /* 3449 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3450 * until we've finished creating all the other TRBs. The ring's cycle 3451 * state may change as we enqueue the other TRBs, so save it too. 3452 */ 3453 start_trb = &ep_ring->enqueue->generic; 3454 start_cycle = ep_ring->cycle_state; 3455 3456 /* Queue setup TRB - see section 6.4.1.2.1 */ 3457 /* FIXME better way to translate setup_packet into two u32 fields? */ 3458 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3459 field = 0; 3460 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3461 if (start_cycle == 0) 3462 field |= 0x1; 3463 3464 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3465 if (xhci->hci_version == 0x100) { 3466 if (urb->transfer_buffer_length > 0) { 3467 if (setup->bRequestType & USB_DIR_IN) 3468 field |= TRB_TX_TYPE(TRB_DATA_IN); 3469 else 3470 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3471 } 3472 } 3473 3474 queue_trb(xhci, ep_ring, true, 3475 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3476 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3477 TRB_LEN(8) | TRB_INTR_TARGET(0), 3478 /* Immediate data in pointer */ 3479 field); 3480 3481 /* If there's data, queue data TRBs */ 3482 /* Only set interrupt on short packet for IN endpoints */ 3483 if (usb_urb_dir_in(urb)) 3484 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3485 else 3486 field = TRB_TYPE(TRB_DATA); 3487 3488 length_field = TRB_LEN(urb->transfer_buffer_length) | 3489 xhci_td_remainder(urb->transfer_buffer_length) | 3490 TRB_INTR_TARGET(0); 3491 if (urb->transfer_buffer_length > 0) { 3492 if (setup->bRequestType & USB_DIR_IN) 3493 field |= TRB_DIR_IN; 3494 queue_trb(xhci, ep_ring, true, 3495 lower_32_bits(urb->transfer_dma), 3496 upper_32_bits(urb->transfer_dma), 3497 length_field, 3498 field | ep_ring->cycle_state); 3499 } 3500 3501 /* Save the DMA address of the last TRB in the TD */ 3502 td->last_trb = ep_ring->enqueue; 3503 3504 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3505 /* If the device sent data, the status stage is an OUT transfer */ 3506 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3507 field = 0; 3508 else 3509 field = TRB_DIR_IN; 3510 queue_trb(xhci, ep_ring, false, 3511 0, 3512 0, 3513 TRB_INTR_TARGET(0), 3514 /* Event on completion */ 3515 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3516 3517 giveback_first_trb(xhci, slot_id, ep_index, 0, 3518 start_cycle, start_trb); 3519 return 0; 3520 } 3521 3522 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 3523 struct urb *urb, int i) 3524 { 3525 int num_trbs = 0; 3526 u64 addr, td_len; 3527 3528 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3529 td_len = urb->iso_frame_desc[i].length; 3530 3531 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 3532 TRB_MAX_BUFF_SIZE); 3533 if (num_trbs == 0) 3534 num_trbs++; 3535 3536 return num_trbs; 3537 } 3538 3539 /* 3540 * The transfer burst count field of the isochronous TRB defines the number of 3541 * bursts that are required to move all packets in this TD. Only SuperSpeed 3542 * devices can burst up to bMaxBurst number of packets per service interval. 3543 * This field is zero based, meaning a value of zero in the field means one 3544 * burst. Basically, for everything but SuperSpeed devices, this field will be 3545 * zero. Only xHCI 1.0 host controllers support this field. 3546 */ 3547 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3548 struct usb_device *udev, 3549 struct urb *urb, unsigned int total_packet_count) 3550 { 3551 unsigned int max_burst; 3552 3553 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) 3554 return 0; 3555 3556 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3557 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; 3558 } 3559 3560 /* 3561 * Returns the number of packets in the last "burst" of packets. This field is 3562 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3563 * the last burst packet count is equal to the total number of packets in the 3564 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3565 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3566 * contain 1 to (bMaxBurst + 1) packets. 3567 */ 3568 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3569 struct usb_device *udev, 3570 struct urb *urb, unsigned int total_packet_count) 3571 { 3572 unsigned int max_burst; 3573 unsigned int residue; 3574 3575 if (xhci->hci_version < 0x100) 3576 return 0; 3577 3578 switch (udev->speed) { 3579 case USB_SPEED_SUPER: 3580 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3581 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3582 residue = total_packet_count % (max_burst + 1); 3583 /* If residue is zero, the last burst contains (max_burst + 1) 3584 * number of packets, but the TLBPC field is zero-based. 3585 */ 3586 if (residue == 0) 3587 return max_burst; 3588 return residue - 1; 3589 default: 3590 if (total_packet_count == 0) 3591 return 0; 3592 return total_packet_count - 1; 3593 } 3594 } 3595 3596 /* 3597 * Calculates Frame ID field of the isochronous TRB identifies the 3598 * target frame that the Interval associated with this Isochronous 3599 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. 3600 * 3601 * Returns actual frame id on success, negative value on error. 3602 */ 3603 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, 3604 struct urb *urb, int index) 3605 { 3606 int start_frame, ist, ret = 0; 3607 int start_frame_id, end_frame_id, current_frame_id; 3608 3609 if (urb->dev->speed == USB_SPEED_LOW || 3610 urb->dev->speed == USB_SPEED_FULL) 3611 start_frame = urb->start_frame + index * urb->interval; 3612 else 3613 start_frame = (urb->start_frame + index * urb->interval) >> 3; 3614 3615 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): 3616 * 3617 * If bit [3] of IST is cleared to '0', software can add a TRB no 3618 * later than IST[2:0] Microframes before that TRB is scheduled to 3619 * be executed. 3620 * If bit [3] of IST is set to '1', software can add a TRB no later 3621 * than IST[2:0] Frames before that TRB is scheduled to be executed. 3622 */ 3623 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3624 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3625 ist <<= 3; 3626 3627 /* Software shall not schedule an Isoch TD with a Frame ID value that 3628 * is less than the Start Frame ID or greater than the End Frame ID, 3629 * where: 3630 * 3631 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 3632 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 3633 * 3634 * Both the End Frame ID and Start Frame ID values are calculated 3635 * in microframes. When software determines the valid Frame ID value; 3636 * The End Frame ID value should be rounded down to the nearest Frame 3637 * boundary, and the Start Frame ID value should be rounded up to the 3638 * nearest Frame boundary. 3639 */ 3640 current_frame_id = readl(&xhci->run_regs->microframe_index); 3641 start_frame_id = roundup(current_frame_id + ist + 1, 8); 3642 end_frame_id = rounddown(current_frame_id + 895 * 8, 8); 3643 3644 start_frame &= 0x7ff; 3645 start_frame_id = (start_frame_id >> 3) & 0x7ff; 3646 end_frame_id = (end_frame_id >> 3) & 0x7ff; 3647 3648 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", 3649 __func__, index, readl(&xhci->run_regs->microframe_index), 3650 start_frame_id, end_frame_id, start_frame); 3651 3652 if (start_frame_id < end_frame_id) { 3653 if (start_frame > end_frame_id || 3654 start_frame < start_frame_id) 3655 ret = -EINVAL; 3656 } else if (start_frame_id > end_frame_id) { 3657 if ((start_frame > end_frame_id && 3658 start_frame < start_frame_id)) 3659 ret = -EINVAL; 3660 } else { 3661 ret = -EINVAL; 3662 } 3663 3664 if (index == 0) { 3665 if (ret == -EINVAL || start_frame == start_frame_id) { 3666 start_frame = start_frame_id + 1; 3667 if (urb->dev->speed == USB_SPEED_LOW || 3668 urb->dev->speed == USB_SPEED_FULL) 3669 urb->start_frame = start_frame; 3670 else 3671 urb->start_frame = start_frame << 3; 3672 ret = 0; 3673 } 3674 } 3675 3676 if (ret) { 3677 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", 3678 start_frame, current_frame_id, index, 3679 start_frame_id, end_frame_id); 3680 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); 3681 return ret; 3682 } 3683 3684 return start_frame; 3685 } 3686 3687 /* This is for isoc transfer */ 3688 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3689 struct urb *urb, int slot_id, unsigned int ep_index) 3690 { 3691 struct xhci_ring *ep_ring; 3692 struct urb_priv *urb_priv; 3693 struct xhci_td *td; 3694 int num_tds, trbs_per_td; 3695 struct xhci_generic_trb *start_trb; 3696 bool first_trb; 3697 int start_cycle; 3698 u32 field, length_field; 3699 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3700 u64 start_addr, addr; 3701 int i, j; 3702 bool more_trbs_coming; 3703 struct xhci_virt_ep *xep; 3704 3705 xep = &xhci->devs[slot_id]->eps[ep_index]; 3706 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3707 3708 num_tds = urb->number_of_packets; 3709 if (num_tds < 1) { 3710 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3711 return -EINVAL; 3712 } 3713 3714 start_addr = (u64) urb->transfer_dma; 3715 start_trb = &ep_ring->enqueue->generic; 3716 start_cycle = ep_ring->cycle_state; 3717 3718 urb_priv = urb->hcpriv; 3719 /* Queue the first TRB, even if it's zero-length */ 3720 for (i = 0; i < num_tds; i++) { 3721 unsigned int total_packet_count; 3722 unsigned int burst_count; 3723 unsigned int residue; 3724 3725 first_trb = true; 3726 running_total = 0; 3727 addr = start_addr + urb->iso_frame_desc[i].offset; 3728 td_len = urb->iso_frame_desc[i].length; 3729 td_remain_len = td_len; 3730 total_packet_count = DIV_ROUND_UP(td_len, 3731 GET_MAX_PACKET( 3732 usb_endpoint_maxp(&urb->ep->desc))); 3733 /* A zero-length transfer still involves at least one packet. */ 3734 if (total_packet_count == 0) 3735 total_packet_count++; 3736 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3737 total_packet_count); 3738 residue = xhci_get_last_burst_packet_count(xhci, 3739 urb->dev, urb, total_packet_count); 3740 3741 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3742 3743 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3744 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3745 if (ret < 0) { 3746 if (i == 0) 3747 return ret; 3748 goto cleanup; 3749 } 3750 3751 td = urb_priv->td[i]; 3752 for (j = 0; j < trbs_per_td; j++) { 3753 int frame_id = 0; 3754 u32 remainder = 0; 3755 field = 0; 3756 3757 if (first_trb) { 3758 field = TRB_TBC(burst_count) | 3759 TRB_TLBPC(residue); 3760 /* Queue the isoc TRB */ 3761 field |= TRB_TYPE(TRB_ISOC); 3762 3763 /* Calculate Frame ID and SIA fields */ 3764 if (!(urb->transfer_flags & URB_ISO_ASAP) && 3765 HCC_CFC(xhci->hcc_params)) { 3766 frame_id = xhci_get_isoc_frame_id(xhci, 3767 urb, 3768 i); 3769 if (frame_id >= 0) 3770 field |= TRB_FRAME_ID(frame_id); 3771 else 3772 field |= TRB_SIA; 3773 } else 3774 field |= TRB_SIA; 3775 3776 if (i == 0) { 3777 if (start_cycle == 0) 3778 field |= 0x1; 3779 } else 3780 field |= ep_ring->cycle_state; 3781 first_trb = false; 3782 } else { 3783 /* Queue other normal TRBs */ 3784 field |= TRB_TYPE(TRB_NORMAL); 3785 field |= ep_ring->cycle_state; 3786 } 3787 3788 /* Only set interrupt on short packet for IN EPs */ 3789 if (usb_urb_dir_in(urb)) 3790 field |= TRB_ISP; 3791 3792 /* Chain all the TRBs together; clear the chain bit in 3793 * the last TRB to indicate it's the last TRB in the 3794 * chain. 3795 */ 3796 if (j < trbs_per_td - 1) { 3797 field |= TRB_CHAIN; 3798 more_trbs_coming = true; 3799 } else { 3800 td->last_trb = ep_ring->enqueue; 3801 field |= TRB_IOC; 3802 if (xhci->hci_version == 0x100 && 3803 !(xhci->quirks & 3804 XHCI_AVOID_BEI)) { 3805 /* Set BEI bit except for the last td */ 3806 if (i < num_tds - 1) 3807 field |= TRB_BEI; 3808 } 3809 more_trbs_coming = false; 3810 } 3811 3812 /* Calculate TRB length */ 3813 trb_buff_len = TRB_MAX_BUFF_SIZE - 3814 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 3815 if (trb_buff_len > td_remain_len) 3816 trb_buff_len = td_remain_len; 3817 3818 /* Set the TRB length, TD size, & interrupter fields. */ 3819 if (xhci->hci_version < 0x100) { 3820 remainder = xhci_td_remainder( 3821 td_len - running_total); 3822 } else { 3823 remainder = xhci_v1_0_td_remainder( 3824 running_total, trb_buff_len, 3825 total_packet_count, urb, 3826 (trbs_per_td - j - 1)); 3827 } 3828 length_field = TRB_LEN(trb_buff_len) | 3829 remainder | 3830 TRB_INTR_TARGET(0); 3831 3832 queue_trb(xhci, ep_ring, more_trbs_coming, 3833 lower_32_bits(addr), 3834 upper_32_bits(addr), 3835 length_field, 3836 field); 3837 running_total += trb_buff_len; 3838 3839 addr += trb_buff_len; 3840 td_remain_len -= trb_buff_len; 3841 } 3842 3843 /* Check TD length */ 3844 if (running_total != td_len) { 3845 xhci_err(xhci, "ISOC TD length unmatch\n"); 3846 ret = -EINVAL; 3847 goto cleanup; 3848 } 3849 } 3850 3851 /* store the next frame id */ 3852 if (HCC_CFC(xhci->hcc_params)) 3853 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; 3854 3855 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3856 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3857 usb_amd_quirk_pll_disable(); 3858 } 3859 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3860 3861 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3862 start_cycle, start_trb); 3863 return 0; 3864 cleanup: 3865 /* Clean up a partially enqueued isoc transfer. */ 3866 3867 for (i--; i >= 0; i--) 3868 list_del_init(&urb_priv->td[i]->td_list); 3869 3870 /* Use the first TD as a temporary variable to turn the TDs we've queued 3871 * into No-ops with a software-owned cycle bit. That way the hardware 3872 * won't accidentally start executing bogus TDs when we partially 3873 * overwrite them. td->first_trb and td->start_seg are already set. 3874 */ 3875 urb_priv->td[0]->last_trb = ep_ring->enqueue; 3876 /* Every TRB except the first & last will have its cycle bit flipped. */ 3877 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3878 3879 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3880 ep_ring->enqueue = urb_priv->td[0]->first_trb; 3881 ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3882 ep_ring->cycle_state = start_cycle; 3883 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; 3884 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3885 return ret; 3886 } 3887 3888 static int ep_ring_is_processing(struct xhci_hcd *xhci, 3889 int slot_id, unsigned int ep_index) 3890 { 3891 struct xhci_virt_device *xdev; 3892 struct xhci_ring *ep_ring; 3893 struct xhci_ep_ctx *ep_ctx; 3894 struct xhci_virt_ep *xep; 3895 dma_addr_t hw_deq; 3896 3897 xdev = xhci->devs[slot_id]; 3898 xep = &xhci->devs[slot_id]->eps[ep_index]; 3899 ep_ring = xep->ring; 3900 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3901 3902 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING) 3903 return 0; 3904 3905 hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 3906 return (hw_deq != 3907 xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue)); 3908 } 3909 3910 /* 3911 * Check transfer ring to guarantee there is enough room for the urb. 3912 * Update ISO URB start_frame and interval. 3913 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to 3914 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or 3915 * Contiguous Frame ID is not supported by HC. 3916 */ 3917 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3918 struct urb *urb, int slot_id, unsigned int ep_index) 3919 { 3920 struct xhci_virt_device *xdev; 3921 struct xhci_ring *ep_ring; 3922 struct xhci_ep_ctx *ep_ctx; 3923 int start_frame; 3924 int xhci_interval; 3925 int ep_interval; 3926 int num_tds, num_trbs, i; 3927 int ret; 3928 struct xhci_virt_ep *xep; 3929 int ist; 3930 3931 xdev = xhci->devs[slot_id]; 3932 xep = &xhci->devs[slot_id]->eps[ep_index]; 3933 ep_ring = xdev->eps[ep_index].ring; 3934 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3935 3936 num_trbs = 0; 3937 num_tds = urb->number_of_packets; 3938 for (i = 0; i < num_tds; i++) 3939 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3940 3941 /* Check the ring to guarantee there is enough room for the whole urb. 3942 * Do not insert any td of the urb to the ring if the check failed. 3943 */ 3944 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3945 num_trbs, mem_flags); 3946 if (ret) 3947 return ret; 3948 3949 /* 3950 * Check interval value. This should be done before we start to 3951 * calculate the start frame value. 3952 */ 3953 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3954 ep_interval = urb->interval; 3955 /* Convert to microframes */ 3956 if (urb->dev->speed == USB_SPEED_LOW || 3957 urb->dev->speed == USB_SPEED_FULL) 3958 ep_interval *= 8; 3959 /* FIXME change this to a warning and a suggestion to use the new API 3960 * to set the polling interval (once the API is added). 3961 */ 3962 if (xhci_interval != ep_interval) { 3963 dev_dbg_ratelimited(&urb->dev->dev, 3964 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 3965 ep_interval, ep_interval == 1 ? "" : "s", 3966 xhci_interval, xhci_interval == 1 ? "" : "s"); 3967 urb->interval = xhci_interval; 3968 /* Convert back to frames for LS/FS devices */ 3969 if (urb->dev->speed == USB_SPEED_LOW || 3970 urb->dev->speed == USB_SPEED_FULL) 3971 urb->interval /= 8; 3972 } 3973 3974 /* Calculate the start frame and put it in urb->start_frame. */ 3975 if (HCC_CFC(xhci->hcc_params) && 3976 ep_ring_is_processing(xhci, slot_id, ep_index)) { 3977 urb->start_frame = xep->next_frame_id; 3978 goto skip_start_over; 3979 } 3980 3981 start_frame = readl(&xhci->run_regs->microframe_index); 3982 start_frame &= 0x3fff; 3983 /* 3984 * Round up to the next frame and consider the time before trb really 3985 * gets scheduled by hardare. 3986 */ 3987 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3988 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3989 ist <<= 3; 3990 start_frame += ist + XHCI_CFC_DELAY; 3991 start_frame = roundup(start_frame, 8); 3992 3993 /* 3994 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT 3995 * is greate than 8 microframes. 3996 */ 3997 if (urb->dev->speed == USB_SPEED_LOW || 3998 urb->dev->speed == USB_SPEED_FULL) { 3999 start_frame = roundup(start_frame, urb->interval << 3); 4000 urb->start_frame = start_frame >> 3; 4001 } else { 4002 start_frame = roundup(start_frame, urb->interval); 4003 urb->start_frame = start_frame; 4004 } 4005 4006 skip_start_over: 4007 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 4008 4009 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); 4010 } 4011 4012 /**** Command Ring Operations ****/ 4013 4014 /* Generic function for queueing a command TRB on the command ring. 4015 * Check to make sure there's room on the command ring for one command TRB. 4016 * Also check that there's room reserved for commands that must not fail. 4017 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 4018 * then only check for the number of reserved spots. 4019 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 4020 * because the command event handler may want to resubmit a failed command. 4021 */ 4022 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 4023 u32 field1, u32 field2, 4024 u32 field3, u32 field4, bool command_must_succeed) 4025 { 4026 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 4027 int ret; 4028 4029 if (xhci->xhc_state) { 4030 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); 4031 return -ESHUTDOWN; 4032 } 4033 4034 if (!command_must_succeed) 4035 reserved_trbs++; 4036 4037 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 4038 reserved_trbs, GFP_ATOMIC); 4039 if (ret < 0) { 4040 xhci_err(xhci, "ERR: No room for command on command ring\n"); 4041 if (command_must_succeed) 4042 xhci_err(xhci, "ERR: Reserved TRB counting for " 4043 "unfailable commands failed.\n"); 4044 return ret; 4045 } 4046 4047 cmd->command_trb = xhci->cmd_ring->enqueue; 4048 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); 4049 4050 /* if there are no other commands queued we start the timeout timer */ 4051 if (xhci->cmd_list.next == &cmd->cmd_list && 4052 !timer_pending(&xhci->cmd_timer)) { 4053 xhci->current_cmd = cmd; 4054 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 4055 } 4056 4057 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 4058 field4 | xhci->cmd_ring->cycle_state); 4059 return 0; 4060 } 4061 4062 /* Queue a slot enable or disable request on the command ring */ 4063 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, 4064 u32 trb_type, u32 slot_id) 4065 { 4066 return queue_command(xhci, cmd, 0, 0, 0, 4067 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 4068 } 4069 4070 /* Queue an address device command TRB */ 4071 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 4072 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) 4073 { 4074 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 4075 upper_32_bits(in_ctx_ptr), 0, 4076 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) 4077 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); 4078 } 4079 4080 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 4081 u32 field1, u32 field2, u32 field3, u32 field4) 4082 { 4083 return queue_command(xhci, cmd, field1, field2, field3, field4, false); 4084 } 4085 4086 /* Queue a reset device command TRB */ 4087 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 4088 u32 slot_id) 4089 { 4090 return queue_command(xhci, cmd, 0, 0, 0, 4091 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 4092 false); 4093 } 4094 4095 /* Queue a configure endpoint command TRB */ 4096 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, 4097 struct xhci_command *cmd, dma_addr_t in_ctx_ptr, 4098 u32 slot_id, bool command_must_succeed) 4099 { 4100 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 4101 upper_32_bits(in_ctx_ptr), 0, 4102 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 4103 command_must_succeed); 4104 } 4105 4106 /* Queue an evaluate context command TRB */ 4107 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, 4108 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) 4109 { 4110 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 4111 upper_32_bits(in_ctx_ptr), 0, 4112 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 4113 command_must_succeed); 4114 } 4115 4116 /* 4117 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 4118 * activity on an endpoint that is about to be suspended. 4119 */ 4120 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, 4121 int slot_id, unsigned int ep_index, int suspend) 4122 { 4123 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 4124 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 4125 u32 type = TRB_TYPE(TRB_STOP_RING); 4126 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 4127 4128 return queue_command(xhci, cmd, 0, 0, 0, 4129 trb_slot_id | trb_ep_index | type | trb_suspend, false); 4130 } 4131 4132 /* Set Transfer Ring Dequeue Pointer command */ 4133 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 4134 unsigned int slot_id, unsigned int ep_index, 4135 unsigned int stream_id, 4136 struct xhci_dequeue_state *deq_state) 4137 { 4138 dma_addr_t addr; 4139 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 4140 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 4141 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 4142 u32 trb_sct = 0; 4143 u32 type = TRB_TYPE(TRB_SET_DEQ); 4144 struct xhci_virt_ep *ep; 4145 struct xhci_command *cmd; 4146 int ret; 4147 4148 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 4149 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u", 4150 deq_state->new_deq_seg, 4151 (unsigned long long)deq_state->new_deq_seg->dma, 4152 deq_state->new_deq_ptr, 4153 (unsigned long long)xhci_trb_virt_to_dma( 4154 deq_state->new_deq_seg, deq_state->new_deq_ptr), 4155 deq_state->new_cycle_state); 4156 4157 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 4158 deq_state->new_deq_ptr); 4159 if (addr == 0) { 4160 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 4161 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 4162 deq_state->new_deq_seg, deq_state->new_deq_ptr); 4163 return; 4164 } 4165 ep = &xhci->devs[slot_id]->eps[ep_index]; 4166 if ((ep->ep_state & SET_DEQ_PENDING)) { 4167 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 4168 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 4169 return; 4170 } 4171 4172 /* This function gets called from contexts where it cannot sleep */ 4173 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 4174 if (!cmd) { 4175 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n"); 4176 return; 4177 } 4178 4179 ep->queued_deq_seg = deq_state->new_deq_seg; 4180 ep->queued_deq_ptr = deq_state->new_deq_ptr; 4181 if (stream_id) 4182 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 4183 ret = queue_command(xhci, cmd, 4184 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state, 4185 upper_32_bits(addr), trb_stream_id, 4186 trb_slot_id | trb_ep_index | type, false); 4187 if (ret < 0) { 4188 xhci_free_command(xhci, cmd); 4189 return; 4190 } 4191 4192 /* Stop the TD queueing code from ringing the doorbell until 4193 * this command completes. The HC won't set the dequeue pointer 4194 * if the ring is running, and ringing the doorbell starts the 4195 * ring running. 4196 */ 4197 ep->ep_state |= SET_DEQ_PENDING; 4198 } 4199 4200 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, 4201 int slot_id, unsigned int ep_index) 4202 { 4203 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 4204 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 4205 u32 type = TRB_TYPE(TRB_RESET_EP); 4206 4207 return queue_command(xhci, cmd, 0, 0, 0, 4208 trb_slot_id | trb_ep_index | type, false); 4209 } 4210