1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 /* 12 * Ring initialization rules: 13 * 1. Each segment is initialized to zero, except for link TRBs. 14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 15 * Consumer Cycle State (CCS), depending on ring function. 16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 17 * 18 * Ring behavior rules: 19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 20 * least one free TRB in the ring. This is useful if you want to turn that 21 * into a link TRB and expand the ring. 22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 23 * link TRB, then load the pointer with the address in the link TRB. If the 24 * link TRB had its toggle bit set, you may need to update the ring cycle 25 * state (see cycle bit rules). You may have to do this multiple times 26 * until you reach a non-link TRB. 27 * 3. A ring is full if enqueue++ (for the definition of increment above) 28 * equals the dequeue pointer. 29 * 30 * Cycle bit rules: 31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 32 * in a link TRB, it must toggle the ring cycle state. 33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 34 * in a link TRB, it must toggle the ring cycle state. 35 * 36 * Producer rules: 37 * 1. Check if ring is full before you enqueue. 38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 39 * Update enqueue pointer between each write (which may update the ring 40 * cycle state). 41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 42 * and endpoint rings. If HC is the producer for the event ring, 43 * and it generates an interrupt according to interrupt modulation rules. 44 * 45 * Consumer rules: 46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 47 * the TRB is owned by the consumer. 48 * 2. Update dequeue pointer (which may update the ring cycle state) and 49 * continue processing TRBs until you reach a TRB which is not owned by you. 50 * 3. Notify the producer. SW is the consumer for the event ring, and it 51 * updates event ring dequeue pointer. HC is the consumer for the command and 52 * endpoint rings; it generates events on the event ring for these. 53 */ 54 55 #include <linux/scatterlist.h> 56 #include <linux/slab.h> 57 #include <linux/dma-mapping.h> 58 #include "xhci.h" 59 #include "xhci-trace.h" 60 #include "xhci-mtk.h" 61 62 /* 63 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 64 * address of the TRB. 65 */ 66 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 67 union xhci_trb *trb) 68 { 69 unsigned long segment_offset; 70 71 if (!seg || !trb || trb < seg->trbs) 72 return 0; 73 /* offset in TRBs */ 74 segment_offset = trb - seg->trbs; 75 if (segment_offset >= TRBS_PER_SEGMENT) 76 return 0; 77 return seg->dma + (segment_offset * sizeof(*trb)); 78 } 79 80 static bool trb_is_noop(union xhci_trb *trb) 81 { 82 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); 83 } 84 85 static bool trb_is_link(union xhci_trb *trb) 86 { 87 return TRB_TYPE_LINK_LE32(trb->link.control); 88 } 89 90 static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) 91 { 92 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; 93 } 94 95 static bool last_trb_on_ring(struct xhci_ring *ring, 96 struct xhci_segment *seg, union xhci_trb *trb) 97 { 98 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); 99 } 100 101 static bool link_trb_toggles_cycle(union xhci_trb *trb) 102 { 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 104 } 105 106 static bool last_td_in_urb(struct xhci_td *td) 107 { 108 struct urb_priv *urb_priv = td->urb->hcpriv; 109 110 return urb_priv->num_tds_done == urb_priv->num_tds; 111 } 112 113 static void inc_td_cnt(struct urb *urb) 114 { 115 struct urb_priv *urb_priv = urb->hcpriv; 116 117 urb_priv->num_tds_done++; 118 } 119 120 static void trb_to_noop(union xhci_trb *trb, u32 noop_type) 121 { 122 if (trb_is_link(trb)) { 123 /* unchain chained link TRBs */ 124 trb->link.control &= cpu_to_le32(~TRB_CHAIN); 125 } else { 126 trb->generic.field[0] = 0; 127 trb->generic.field[1] = 0; 128 trb->generic.field[2] = 0; 129 /* Preserve only the cycle bit of this TRB */ 130 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 131 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); 132 } 133 } 134 135 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 136 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * effect the ring dequeue or enqueue pointers. 138 */ 139 static void next_trb(struct xhci_hcd *xhci, 140 struct xhci_ring *ring, 141 struct xhci_segment **seg, 142 union xhci_trb **trb) 143 { 144 if (trb_is_link(*trb)) { 145 *seg = (*seg)->next; 146 *trb = ((*seg)->trbs); 147 } else { 148 (*trb)++; 149 } 150 } 151 152 /* 153 * See Cycle bit rules. SW is the consumer for the event ring only. 154 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 155 */ 156 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 157 { 158 /* event ring doesn't have link trbs, check for last trb */ 159 if (ring->type == TYPE_EVENT) { 160 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { 161 ring->dequeue++; 162 goto out; 163 } 164 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) 165 ring->cycle_state ^= 1; 166 ring->deq_seg = ring->deq_seg->next; 167 ring->dequeue = ring->deq_seg->trbs; 168 goto out; 169 } 170 171 /* All other rings have link trbs */ 172 if (!trb_is_link(ring->dequeue)) { 173 ring->dequeue++; 174 ring->num_trbs_free++; 175 } 176 while (trb_is_link(ring->dequeue)) { 177 ring->deq_seg = ring->deq_seg->next; 178 ring->dequeue = ring->deq_seg->trbs; 179 } 180 181 out: 182 trace_xhci_inc_deq(ring); 183 184 return; 185 } 186 187 /* 188 * See Cycle bit rules. SW is the consumer for the event ring only. 189 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 190 * 191 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 192 * chain bit is set), then set the chain bit in all the following link TRBs. 193 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 194 * have their chain bit cleared (so that each Link TRB is a separate TD). 195 * 196 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 197 * set, but other sections talk about dealing with the chain bit set. This was 198 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 199 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 200 * 201 * @more_trbs_coming: Will you enqueue more TRBs before calling 202 * prepare_transfer()? 203 */ 204 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 205 bool more_trbs_coming) 206 { 207 u32 chain; 208 union xhci_trb *next; 209 210 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 211 /* If this is not event ring, there is one less usable TRB */ 212 if (!trb_is_link(ring->enqueue)) 213 ring->num_trbs_free--; 214 next = ++(ring->enqueue); 215 216 /* Update the dequeue pointer further if that was a link TRB */ 217 while (trb_is_link(next)) { 218 219 /* 220 * If the caller doesn't plan on enqueueing more TDs before 221 * ringing the doorbell, then we don't want to give the link TRB 222 * to the hardware just yet. We'll give the link TRB back in 223 * prepare_ring() just before we enqueue the TD at the top of 224 * the ring. 225 */ 226 if (!chain && !more_trbs_coming) 227 break; 228 229 /* If we're not dealing with 0.95 hardware or isoc rings on 230 * AMD 0.96 host, carry over the chain bit of the previous TRB 231 * (which may mean the chain bit is cleared). 232 */ 233 if (!(ring->type == TYPE_ISOC && 234 (xhci->quirks & XHCI_AMD_0x96_HOST)) && 235 !xhci_link_trb_quirk(xhci)) { 236 next->link.control &= cpu_to_le32(~TRB_CHAIN); 237 next->link.control |= cpu_to_le32(chain); 238 } 239 /* Give this link TRB to the hardware */ 240 wmb(); 241 next->link.control ^= cpu_to_le32(TRB_CYCLE); 242 243 /* Toggle the cycle bit after the last ring segment. */ 244 if (link_trb_toggles_cycle(next)) 245 ring->cycle_state ^= 1; 246 247 ring->enq_seg = ring->enq_seg->next; 248 ring->enqueue = ring->enq_seg->trbs; 249 next = ring->enqueue; 250 } 251 252 trace_xhci_inc_enq(ring); 253 } 254 255 /* 256 * Check to see if there's room to enqueue num_trbs on the ring and make sure 257 * enqueue pointer will not advance into dequeue segment. See rules above. 258 */ 259 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 260 unsigned int num_trbs) 261 { 262 int num_trbs_in_deq_seg; 263 264 if (ring->num_trbs_free < num_trbs) 265 return 0; 266 267 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { 268 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; 269 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) 270 return 0; 271 } 272 273 return 1; 274 } 275 276 /* Ring the host controller doorbell after placing a command on the ring */ 277 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 278 { 279 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) 280 return; 281 282 xhci_dbg(xhci, "// Ding dong!\n"); 283 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); 284 /* Flush PCI posted writes */ 285 readl(&xhci->dba->doorbell[0]); 286 } 287 288 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) 289 { 290 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); 291 } 292 293 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) 294 { 295 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, 296 cmd_list); 297 } 298 299 /* 300 * Turn all commands on command ring with status set to "aborted" to no-op trbs. 301 * If there are other commands waiting then restart the ring and kick the timer. 302 * This must be called with command ring stopped and xhci->lock held. 303 */ 304 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, 305 struct xhci_command *cur_cmd) 306 { 307 struct xhci_command *i_cmd; 308 309 /* Turn all aborted commands in list to no-ops, then restart */ 310 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { 311 312 if (i_cmd->status != COMP_COMMAND_ABORTED) 313 continue; 314 315 i_cmd->status = COMP_COMMAND_RING_STOPPED; 316 317 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 318 i_cmd->command_trb); 319 320 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); 321 322 /* 323 * caller waiting for completion is called when command 324 * completion event is received for these no-op commands 325 */ 326 } 327 328 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 329 330 /* ring command ring doorbell to restart the command ring */ 331 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && 332 !(xhci->xhc_state & XHCI_STATE_DYING)) { 333 xhci->current_cmd = cur_cmd; 334 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); 335 xhci_ring_cmd_db(xhci); 336 } 337 } 338 339 /* Must be called with xhci->lock held, releases and aquires lock back */ 340 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) 341 { 342 u64 temp_64; 343 int ret; 344 345 xhci_dbg(xhci, "Abort command ring\n"); 346 347 reinit_completion(&xhci->cmd_ring_stop_completion); 348 349 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 350 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 351 &xhci->op_regs->cmd_ring); 352 353 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the 354 * completion of the Command Abort operation. If CRR is not negated in 5 355 * seconds then driver handles it as if host died (-ENODEV). 356 * In the future we should distinguish between -ENODEV and -ETIMEDOUT 357 * and try to recover a -ETIMEDOUT with a host controller reset. 358 */ 359 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 360 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 361 if (ret < 0) { 362 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); 363 xhci_halt(xhci); 364 xhci_hc_died(xhci); 365 return ret; 366 } 367 /* 368 * Writing the CMD_RING_ABORT bit should cause a cmd completion event, 369 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared 370 * but the completion event in never sent. Wait 2 secs (arbitrary 371 * number) to handle those cases after negation of CMD_RING_RUNNING. 372 */ 373 spin_unlock_irqrestore(&xhci->lock, flags); 374 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, 375 msecs_to_jiffies(2000)); 376 spin_lock_irqsave(&xhci->lock, flags); 377 if (!ret) { 378 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); 379 xhci_cleanup_command_queue(xhci); 380 } else { 381 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); 382 } 383 return 0; 384 } 385 386 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 387 unsigned int slot_id, 388 unsigned int ep_index, 389 unsigned int stream_id) 390 { 391 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 392 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 393 unsigned int ep_state = ep->ep_state; 394 395 /* Don't ring the doorbell for this endpoint if there are pending 396 * cancellations because we don't want to interrupt processing. 397 * We don't want to restart any stream rings if there's a set dequeue 398 * pointer command pending because the device can choose to start any 399 * stream once the endpoint is on the HW schedule. 400 */ 401 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || 402 (ep_state & EP_HALTED)) 403 return; 404 writel(DB_VALUE(ep_index, stream_id), db_addr); 405 /* The CPU has better things to do at this point than wait for a 406 * write-posting flush. It'll get there soon enough. 407 */ 408 } 409 410 /* Ring the doorbell for any rings with pending URBs */ 411 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 412 unsigned int slot_id, 413 unsigned int ep_index) 414 { 415 unsigned int stream_id; 416 struct xhci_virt_ep *ep; 417 418 ep = &xhci->devs[slot_id]->eps[ep_index]; 419 420 /* A ring has pending URBs if its TD list is not empty */ 421 if (!(ep->ep_state & EP_HAS_STREAMS)) { 422 if (ep->ring && !(list_empty(&ep->ring->td_list))) 423 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 424 return; 425 } 426 427 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 428 stream_id++) { 429 struct xhci_stream_info *stream_info = ep->stream_info; 430 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 431 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 432 stream_id); 433 } 434 } 435 436 /* Get the right ring for the given slot_id, ep_index and stream_id. 437 * If the endpoint supports streams, boundary check the URB's stream ID. 438 * If the endpoint doesn't support streams, return the singular endpoint ring. 439 */ 440 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 441 unsigned int slot_id, unsigned int ep_index, 442 unsigned int stream_id) 443 { 444 struct xhci_virt_ep *ep; 445 446 ep = &xhci->devs[slot_id]->eps[ep_index]; 447 /* Common case: no streams */ 448 if (!(ep->ep_state & EP_HAS_STREAMS)) 449 return ep->ring; 450 451 if (stream_id == 0) { 452 xhci_warn(xhci, 453 "WARN: Slot ID %u, ep index %u has streams, " 454 "but URB has no stream ID.\n", 455 slot_id, ep_index); 456 return NULL; 457 } 458 459 if (stream_id < ep->stream_info->num_streams) 460 return ep->stream_info->stream_rings[stream_id]; 461 462 xhci_warn(xhci, 463 "WARN: Slot ID %u, ep index %u has " 464 "stream IDs 1 to %u allocated, " 465 "but stream ID %u is requested.\n", 466 slot_id, ep_index, 467 ep->stream_info->num_streams - 1, 468 stream_id); 469 return NULL; 470 } 471 472 473 /* 474 * Get the hw dequeue pointer xHC stopped on, either directly from the 475 * endpoint context, or if streams are in use from the stream context. 476 * The returned hw_dequeue contains the lowest four bits with cycle state 477 * and possbile stream context type. 478 */ 479 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, 480 unsigned int ep_index, unsigned int stream_id) 481 { 482 struct xhci_ep_ctx *ep_ctx; 483 struct xhci_stream_ctx *st_ctx; 484 struct xhci_virt_ep *ep; 485 486 ep = &vdev->eps[ep_index]; 487 488 if (ep->ep_state & EP_HAS_STREAMS) { 489 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; 490 return le64_to_cpu(st_ctx->stream_ring); 491 } 492 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); 493 return le64_to_cpu(ep_ctx->deq); 494 } 495 496 /* 497 * Move the xHC's endpoint ring dequeue pointer past cur_td. 498 * Record the new state of the xHC's endpoint ring dequeue segment, 499 * dequeue pointer, stream id, and new consumer cycle state in state. 500 * Update our internal representation of the ring's dequeue pointer. 501 * 502 * We do this in three jumps: 503 * - First we update our new ring state to be the same as when the xHC stopped. 504 * - Then we traverse the ring to find the segment that contains 505 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 506 * any link TRBs with the toggle cycle bit set. 507 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 508 * if we've moved it past a link TRB with the toggle cycle bit set. 509 * 510 * Some of the uses of xhci_generic_trb are grotty, but if they're done 511 * with correct __le32 accesses they should work fine. Only users of this are 512 * in here. 513 */ 514 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 515 unsigned int slot_id, unsigned int ep_index, 516 unsigned int stream_id, struct xhci_td *cur_td, 517 struct xhci_dequeue_state *state) 518 { 519 struct xhci_virt_device *dev = xhci->devs[slot_id]; 520 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 521 struct xhci_ring *ep_ring; 522 struct xhci_segment *new_seg; 523 union xhci_trb *new_deq; 524 dma_addr_t addr; 525 u64 hw_dequeue; 526 bool cycle_found = false; 527 bool td_last_trb_found = false; 528 529 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 530 ep_index, stream_id); 531 if (!ep_ring) { 532 xhci_warn(xhci, "WARN can't find new dequeue state " 533 "for invalid stream ID %u.\n", 534 stream_id); 535 return; 536 } 537 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 538 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 539 "Finding endpoint context"); 540 541 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); 542 new_seg = ep_ring->deq_seg; 543 new_deq = ep_ring->dequeue; 544 state->new_cycle_state = hw_dequeue & 0x1; 545 state->stream_id = stream_id; 546 547 /* 548 * We want to find the pointer, segment and cycle state of the new trb 549 * (the one after current TD's last_trb). We know the cycle state at 550 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are 551 * found. 552 */ 553 do { 554 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) 555 == (dma_addr_t)(hw_dequeue & ~0xf)) { 556 cycle_found = true; 557 if (td_last_trb_found) 558 break; 559 } 560 if (new_deq == cur_td->last_trb) 561 td_last_trb_found = true; 562 563 if (cycle_found && trb_is_link(new_deq) && 564 link_trb_toggles_cycle(new_deq)) 565 state->new_cycle_state ^= 0x1; 566 567 next_trb(xhci, ep_ring, &new_seg, &new_deq); 568 569 /* Search wrapped around, bail out */ 570 if (new_deq == ep->ring->dequeue) { 571 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); 572 state->new_deq_seg = NULL; 573 state->new_deq_ptr = NULL; 574 return; 575 } 576 577 } while (!cycle_found || !td_last_trb_found); 578 579 state->new_deq_seg = new_seg; 580 state->new_deq_ptr = new_deq; 581 582 /* Don't update the ring cycle state for the producer (us). */ 583 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 584 "Cycle state = 0x%x", state->new_cycle_state); 585 586 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 587 "New dequeue segment = %p (virtual)", 588 state->new_deq_seg); 589 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 590 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 591 "New dequeue pointer = 0x%llx (DMA)", 592 (unsigned long long) addr); 593 } 594 595 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 596 * (The last TRB actually points to the ring enqueue pointer, which is not part 597 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 598 */ 599 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 600 struct xhci_td *td, bool flip_cycle) 601 { 602 struct xhci_segment *seg = td->start_seg; 603 union xhci_trb *trb = td->first_trb; 604 605 while (1) { 606 trb_to_noop(trb, TRB_TR_NOOP); 607 608 /* flip cycle if asked to */ 609 if (flip_cycle && trb != td->first_trb && trb != td->last_trb) 610 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); 611 612 if (trb == td->last_trb) 613 break; 614 615 next_trb(xhci, ep_ring, &seg, &trb); 616 } 617 } 618 619 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 620 struct xhci_virt_ep *ep) 621 { 622 ep->ep_state &= ~EP_STOP_CMD_PENDING; 623 /* Can't del_timer_sync in interrupt */ 624 del_timer(&ep->stop_cmd_timer); 625 } 626 627 /* 628 * Must be called with xhci->lock held in interrupt context, 629 * releases and re-acquires xhci->lock 630 */ 631 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 632 struct xhci_td *cur_td, int status) 633 { 634 struct urb *urb = cur_td->urb; 635 struct urb_priv *urb_priv = urb->hcpriv; 636 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); 637 638 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 639 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 640 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 641 if (xhci->quirks & XHCI_AMD_PLL_FIX) 642 usb_amd_quirk_pll_enable(); 643 } 644 } 645 xhci_urb_free_priv(urb_priv); 646 usb_hcd_unlink_urb_from_ep(hcd, urb); 647 spin_unlock(&xhci->lock); 648 trace_xhci_urb_giveback(urb); 649 usb_hcd_giveback_urb(hcd, urb, status); 650 spin_lock(&xhci->lock); 651 } 652 653 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, 654 struct xhci_ring *ring, struct xhci_td *td) 655 { 656 struct device *dev = xhci_to_hcd(xhci)->self.controller; 657 struct xhci_segment *seg = td->bounce_seg; 658 struct urb *urb = td->urb; 659 660 if (!ring || !seg || !urb) 661 return; 662 663 if (usb_urb_dir_out(urb)) { 664 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, 665 DMA_TO_DEVICE); 666 return; 667 } 668 669 /* for in tranfers we need to copy the data from bounce to sg */ 670 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, 671 seg->bounce_len, seg->bounce_offs); 672 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, 673 DMA_FROM_DEVICE); 674 seg->bounce_len = 0; 675 seg->bounce_offs = 0; 676 } 677 678 /* 679 * When we get a command completion for a Stop Endpoint Command, we need to 680 * unlink any cancelled TDs from the ring. There are two ways to do that: 681 * 682 * 1. If the HW was in the middle of processing the TD that needs to be 683 * cancelled, then we must move the ring's dequeue pointer past the last TRB 684 * in the TD with a Set Dequeue Pointer Command. 685 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 686 * bit cleared) so that the HW will skip over them. 687 */ 688 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, 689 union xhci_trb *trb, struct xhci_event_cmd *event) 690 { 691 unsigned int ep_index; 692 struct xhci_ring *ep_ring; 693 struct xhci_virt_ep *ep; 694 struct xhci_td *cur_td = NULL; 695 struct xhci_td *last_unlinked_td; 696 struct xhci_ep_ctx *ep_ctx; 697 struct xhci_virt_device *vdev; 698 u64 hw_deq; 699 struct xhci_dequeue_state deq_state; 700 701 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { 702 if (!xhci->devs[slot_id]) 703 xhci_warn(xhci, "Stop endpoint command " 704 "completion for disabled slot %u\n", 705 slot_id); 706 return; 707 } 708 709 memset(&deq_state, 0, sizeof(deq_state)); 710 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 711 712 vdev = xhci->devs[slot_id]; 713 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); 714 trace_xhci_handle_cmd_stop_ep(ep_ctx); 715 716 ep = &xhci->devs[slot_id]->eps[ep_index]; 717 last_unlinked_td = list_last_entry(&ep->cancelled_td_list, 718 struct xhci_td, cancelled_td_list); 719 720 if (list_empty(&ep->cancelled_td_list)) { 721 xhci_stop_watchdog_timer_in_irq(xhci, ep); 722 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 723 return; 724 } 725 726 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 727 * We have the xHCI lock, so nothing can modify this list until we drop 728 * it. We're also in the event handler, so we can't get re-interrupted 729 * if another Stop Endpoint command completes 730 */ 731 list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) { 732 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 733 "Removing canceled TD starting at 0x%llx (dma).", 734 (unsigned long long)xhci_trb_virt_to_dma( 735 cur_td->start_seg, cur_td->first_trb)); 736 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 737 if (!ep_ring) { 738 /* This shouldn't happen unless a driver is mucking 739 * with the stream ID after submission. This will 740 * leave the TD on the hardware ring, and the hardware 741 * will try to execute it, and may access a buffer 742 * that has already been freed. In the best case, the 743 * hardware will execute it, and the event handler will 744 * ignore the completion event for that TD, since it was 745 * removed from the td_list for that endpoint. In 746 * short, don't muck with the stream ID after 747 * submission. 748 */ 749 xhci_warn(xhci, "WARN Cancelled URB %p " 750 "has invalid stream ID %u.\n", 751 cur_td->urb, 752 cur_td->urb->stream_id); 753 goto remove_finished_td; 754 } 755 /* 756 * If we stopped on the TD we need to cancel, then we have to 757 * move the xHC endpoint ring dequeue pointer past this TD. 758 */ 759 hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index, 760 cur_td->urb->stream_id); 761 hw_deq &= ~0xf; 762 763 if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb, 764 cur_td->last_trb, hw_deq, false)) { 765 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 766 cur_td->urb->stream_id, 767 cur_td, &deq_state); 768 } else { 769 td_to_noop(xhci, ep_ring, cur_td, false); 770 } 771 772 remove_finished_td: 773 /* 774 * The event handler won't see a completion for this TD anymore, 775 * so remove it from the endpoint ring's TD list. Keep it in 776 * the cancelled TD list for URB completion later. 777 */ 778 list_del_init(&cur_td->td_list); 779 } 780 781 xhci_stop_watchdog_timer_in_irq(xhci, ep); 782 783 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 784 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 785 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, 786 &deq_state); 787 xhci_ring_cmd_db(xhci); 788 } else { 789 /* Otherwise ring the doorbell(s) to restart queued transfers */ 790 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 791 } 792 793 /* 794 * Drop the lock and complete the URBs in the cancelled TD list. 795 * New TDs to be cancelled might be added to the end of the list before 796 * we can complete all the URBs for the TDs we already unlinked. 797 * So stop when we've completed the URB for the last TD we unlinked. 798 */ 799 do { 800 cur_td = list_first_entry(&ep->cancelled_td_list, 801 struct xhci_td, cancelled_td_list); 802 list_del_init(&cur_td->cancelled_td_list); 803 804 /* Clean up the cancelled URB */ 805 /* Doesn't matter what we pass for status, since the core will 806 * just overwrite it (because the URB has been unlinked). 807 */ 808 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 809 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); 810 inc_td_cnt(cur_td->urb); 811 if (last_td_in_urb(cur_td)) 812 xhci_giveback_urb_in_irq(xhci, cur_td, 0); 813 814 /* Stop processing the cancelled list if the watchdog timer is 815 * running. 816 */ 817 if (xhci->xhc_state & XHCI_STATE_DYING) 818 return; 819 } while (cur_td != last_unlinked_td); 820 821 /* Return to the event handler with xhci->lock re-acquired */ 822 } 823 824 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) 825 { 826 struct xhci_td *cur_td; 827 struct xhci_td *tmp; 828 829 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { 830 list_del_init(&cur_td->td_list); 831 832 if (!list_empty(&cur_td->cancelled_td_list)) 833 list_del_init(&cur_td->cancelled_td_list); 834 835 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); 836 837 inc_td_cnt(cur_td->urb); 838 if (last_td_in_urb(cur_td)) 839 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 840 } 841 } 842 843 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, 844 int slot_id, int ep_index) 845 { 846 struct xhci_td *cur_td; 847 struct xhci_td *tmp; 848 struct xhci_virt_ep *ep; 849 struct xhci_ring *ring; 850 851 ep = &xhci->devs[slot_id]->eps[ep_index]; 852 if ((ep->ep_state & EP_HAS_STREAMS) || 853 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 854 int stream_id; 855 856 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 857 stream_id++) { 858 ring = ep->stream_info->stream_rings[stream_id]; 859 if (!ring) 860 continue; 861 862 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 863 "Killing URBs for slot ID %u, ep index %u, stream %u", 864 slot_id, ep_index, stream_id); 865 xhci_kill_ring_urbs(xhci, ring); 866 } 867 } else { 868 ring = ep->ring; 869 if (!ring) 870 return; 871 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 872 "Killing URBs for slot ID %u, ep index %u", 873 slot_id, ep_index); 874 xhci_kill_ring_urbs(xhci, ring); 875 } 876 877 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, 878 cancelled_td_list) { 879 list_del_init(&cur_td->cancelled_td_list); 880 inc_td_cnt(cur_td->urb); 881 882 if (last_td_in_urb(cur_td)) 883 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 884 } 885 } 886 887 /* 888 * host controller died, register read returns 0xffffffff 889 * Complete pending commands, mark them ABORTED. 890 * URBs need to be given back as usb core might be waiting with device locks 891 * held for the URBs to finish during device disconnect, blocking host remove. 892 * 893 * Call with xhci->lock held. 894 * lock is relased and re-acquired while giving back urb. 895 */ 896 void xhci_hc_died(struct xhci_hcd *xhci) 897 { 898 int i, j; 899 900 if (xhci->xhc_state & XHCI_STATE_DYING) 901 return; 902 903 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); 904 xhci->xhc_state |= XHCI_STATE_DYING; 905 906 xhci_cleanup_command_queue(xhci); 907 908 /* return any pending urbs, remove may be waiting for them */ 909 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { 910 if (!xhci->devs[i]) 911 continue; 912 for (j = 0; j < 31; j++) 913 xhci_kill_endpoint_urbs(xhci, i, j); 914 } 915 916 /* inform usb core hc died if PCI remove isn't already handling it */ 917 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) 918 usb_hc_died(xhci_to_hcd(xhci)); 919 } 920 921 /* Watchdog timer function for when a stop endpoint command fails to complete. 922 * In this case, we assume the host controller is broken or dying or dead. The 923 * host may still be completing some other events, so we have to be careful to 924 * let the event ring handler and the URB dequeueing/enqueueing functions know 925 * through xhci->state. 926 * 927 * The timer may also fire if the host takes a very long time to respond to the 928 * command, and the stop endpoint command completion handler cannot delete the 929 * timer before the timer function is called. Another endpoint cancellation may 930 * sneak in before the timer function can grab the lock, and that may queue 931 * another stop endpoint command and add the timer back. So we cannot use a 932 * simple flag to say whether there is a pending stop endpoint command for a 933 * particular endpoint. 934 * 935 * Instead we use a combination of that flag and checking if a new timer is 936 * pending. 937 */ 938 void xhci_stop_endpoint_command_watchdog(struct timer_list *t) 939 { 940 struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer); 941 struct xhci_hcd *xhci = ep->xhci; 942 unsigned long flags; 943 944 spin_lock_irqsave(&xhci->lock, flags); 945 946 /* bail out if cmd completed but raced with stop ep watchdog timer.*/ 947 if (!(ep->ep_state & EP_STOP_CMD_PENDING) || 948 timer_pending(&ep->stop_cmd_timer)) { 949 spin_unlock_irqrestore(&xhci->lock, flags); 950 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit"); 951 return; 952 } 953 954 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 955 ep->ep_state &= ~EP_STOP_CMD_PENDING; 956 957 xhci_halt(xhci); 958 959 /* 960 * handle a stop endpoint cmd timeout as if host died (-ENODEV). 961 * In the future we could distinguish between -ENODEV and -ETIMEDOUT 962 * and try to recover a -ETIMEDOUT with a host controller reset 963 */ 964 xhci_hc_died(xhci); 965 966 spin_unlock_irqrestore(&xhci->lock, flags); 967 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 968 "xHCI host controller is dead."); 969 } 970 971 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, 972 struct xhci_virt_device *dev, 973 struct xhci_ring *ep_ring, 974 unsigned int ep_index) 975 { 976 union xhci_trb *dequeue_temp; 977 int num_trbs_free_temp; 978 bool revert = false; 979 980 num_trbs_free_temp = ep_ring->num_trbs_free; 981 dequeue_temp = ep_ring->dequeue; 982 983 /* If we get two back-to-back stalls, and the first stalled transfer 984 * ends just before a link TRB, the dequeue pointer will be left on 985 * the link TRB by the code in the while loop. So we have to update 986 * the dequeue pointer one segment further, or we'll jump off 987 * the segment into la-la-land. 988 */ 989 if (trb_is_link(ep_ring->dequeue)) { 990 ep_ring->deq_seg = ep_ring->deq_seg->next; 991 ep_ring->dequeue = ep_ring->deq_seg->trbs; 992 } 993 994 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { 995 /* We have more usable TRBs */ 996 ep_ring->num_trbs_free++; 997 ep_ring->dequeue++; 998 if (trb_is_link(ep_ring->dequeue)) { 999 if (ep_ring->dequeue == 1000 dev->eps[ep_index].queued_deq_ptr) 1001 break; 1002 ep_ring->deq_seg = ep_ring->deq_seg->next; 1003 ep_ring->dequeue = ep_ring->deq_seg->trbs; 1004 } 1005 if (ep_ring->dequeue == dequeue_temp) { 1006 revert = true; 1007 break; 1008 } 1009 } 1010 1011 if (revert) { 1012 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); 1013 ep_ring->num_trbs_free = num_trbs_free_temp; 1014 } 1015 } 1016 1017 /* 1018 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 1019 * we need to clear the set deq pending flag in the endpoint ring state, so that 1020 * the TD queueing code can ring the doorbell again. We also need to ring the 1021 * endpoint doorbell to restart the ring, but only if there aren't more 1022 * cancellations pending. 1023 */ 1024 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, 1025 union xhci_trb *trb, u32 cmd_comp_code) 1026 { 1027 unsigned int ep_index; 1028 unsigned int stream_id; 1029 struct xhci_ring *ep_ring; 1030 struct xhci_virt_device *dev; 1031 struct xhci_virt_ep *ep; 1032 struct xhci_ep_ctx *ep_ctx; 1033 struct xhci_slot_ctx *slot_ctx; 1034 1035 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1036 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 1037 dev = xhci->devs[slot_id]; 1038 ep = &dev->eps[ep_index]; 1039 1040 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 1041 if (!ep_ring) { 1042 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", 1043 stream_id); 1044 /* XXX: Harmless??? */ 1045 goto cleanup; 1046 } 1047 1048 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 1049 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 1050 trace_xhci_handle_cmd_set_deq(slot_ctx); 1051 trace_xhci_handle_cmd_set_deq_ep(ep_ctx); 1052 1053 if (cmd_comp_code != COMP_SUCCESS) { 1054 unsigned int ep_state; 1055 unsigned int slot_state; 1056 1057 switch (cmd_comp_code) { 1058 case COMP_TRB_ERROR: 1059 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); 1060 break; 1061 case COMP_CONTEXT_STATE_ERROR: 1062 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); 1063 ep_state = GET_EP_CTX_STATE(ep_ctx); 1064 slot_state = le32_to_cpu(slot_ctx->dev_state); 1065 slot_state = GET_SLOT_STATE(slot_state); 1066 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1067 "Slot state = %u, EP state = %u", 1068 slot_state, ep_state); 1069 break; 1070 case COMP_SLOT_NOT_ENABLED_ERROR: 1071 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", 1072 slot_id); 1073 break; 1074 default: 1075 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", 1076 cmd_comp_code); 1077 break; 1078 } 1079 /* OK what do we do now? The endpoint state is hosed, and we 1080 * should never get to this point if the synchronization between 1081 * queueing, and endpoint state are correct. This might happen 1082 * if the device gets disconnected after we've finished 1083 * cancelling URBs, which might not be an error... 1084 */ 1085 } else { 1086 u64 deq; 1087 /* 4.6.10 deq ptr is written to the stream ctx for streams */ 1088 if (ep->ep_state & EP_HAS_STREAMS) { 1089 struct xhci_stream_ctx *ctx = 1090 &ep->stream_info->stream_ctx_array[stream_id]; 1091 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; 1092 } else { 1093 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 1094 } 1095 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1096 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); 1097 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, 1098 ep->queued_deq_ptr) == deq) { 1099 /* Update the ring's dequeue segment and dequeue pointer 1100 * to reflect the new position. 1101 */ 1102 update_ring_for_set_deq_completion(xhci, dev, 1103 ep_ring, ep_index); 1104 } else { 1105 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); 1106 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 1107 ep->queued_deq_seg, ep->queued_deq_ptr); 1108 } 1109 } 1110 1111 cleanup: 1112 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1113 dev->eps[ep_index].queued_deq_seg = NULL; 1114 dev->eps[ep_index].queued_deq_ptr = NULL; 1115 /* Restart any rings with pending URBs */ 1116 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1117 } 1118 1119 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, 1120 union xhci_trb *trb, u32 cmd_comp_code) 1121 { 1122 struct xhci_virt_device *vdev; 1123 struct xhci_ep_ctx *ep_ctx; 1124 unsigned int ep_index; 1125 1126 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1127 vdev = xhci->devs[slot_id]; 1128 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); 1129 trace_xhci_handle_cmd_reset_ep(ep_ctx); 1130 1131 /* This command will only fail if the endpoint wasn't halted, 1132 * but we don't care. 1133 */ 1134 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 1135 "Ignoring reset ep completion code of %u", cmd_comp_code); 1136 1137 /* HW with the reset endpoint quirk needs to have a configure endpoint 1138 * command complete before the endpoint can be used. Queue that here 1139 * because the HW can't handle two commands being queued in a row. 1140 */ 1141 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1142 struct xhci_command *command; 1143 1144 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1145 if (!command) 1146 return; 1147 1148 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1149 "Queueing configure endpoint command"); 1150 xhci_queue_configure_endpoint(xhci, command, 1151 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1152 false); 1153 xhci_ring_cmd_db(xhci); 1154 } else { 1155 /* Clear our internal halted state */ 1156 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1157 } 1158 } 1159 1160 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1161 struct xhci_command *command, u32 cmd_comp_code) 1162 { 1163 if (cmd_comp_code == COMP_SUCCESS) 1164 command->slot_id = slot_id; 1165 else 1166 command->slot_id = 0; 1167 } 1168 1169 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) 1170 { 1171 struct xhci_virt_device *virt_dev; 1172 struct xhci_slot_ctx *slot_ctx; 1173 1174 virt_dev = xhci->devs[slot_id]; 1175 if (!virt_dev) 1176 return; 1177 1178 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 1179 trace_xhci_handle_cmd_disable_slot(slot_ctx); 1180 1181 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1182 /* Delete default control endpoint resources */ 1183 xhci_free_device_endpoint_resources(xhci, virt_dev, true); 1184 xhci_free_virt_device(xhci, slot_id); 1185 } 1186 1187 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, 1188 struct xhci_event_cmd *event, u32 cmd_comp_code) 1189 { 1190 struct xhci_virt_device *virt_dev; 1191 struct xhci_input_control_ctx *ctrl_ctx; 1192 struct xhci_ep_ctx *ep_ctx; 1193 unsigned int ep_index; 1194 unsigned int ep_state; 1195 u32 add_flags, drop_flags; 1196 1197 /* 1198 * Configure endpoint commands can come from the USB core 1199 * configuration or alt setting changes, or because the HW 1200 * needed an extra configure endpoint command after a reset 1201 * endpoint command or streams were being configured. 1202 * If the command was for a halted endpoint, the xHCI driver 1203 * is not waiting on the configure endpoint command. 1204 */ 1205 virt_dev = xhci->devs[slot_id]; 1206 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 1207 if (!ctrl_ctx) { 1208 xhci_warn(xhci, "Could not get input context, bad type.\n"); 1209 return; 1210 } 1211 1212 add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1213 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1214 /* Input ctx add_flags are the endpoint index plus one */ 1215 ep_index = xhci_last_valid_endpoint(add_flags) - 1; 1216 1217 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); 1218 trace_xhci_handle_cmd_config_ep(ep_ctx); 1219 1220 /* A usb_set_interface() call directly after clearing a halted 1221 * condition may race on this quirky hardware. Not worth 1222 * worrying about, since this is prototype hardware. Not sure 1223 * if this will work for streams, but streams support was 1224 * untested on this prototype. 1225 */ 1226 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1227 ep_index != (unsigned int) -1 && 1228 add_flags - SLOT_FLAG == drop_flags) { 1229 ep_state = virt_dev->eps[ep_index].ep_state; 1230 if (!(ep_state & EP_HALTED)) 1231 return; 1232 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1233 "Completed config ep cmd - " 1234 "last ep index = %d, state = %d", 1235 ep_index, ep_state); 1236 /* Clear internal halted state and restart ring(s) */ 1237 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; 1238 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1239 return; 1240 } 1241 return; 1242 } 1243 1244 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) 1245 { 1246 struct xhci_virt_device *vdev; 1247 struct xhci_slot_ctx *slot_ctx; 1248 1249 vdev = xhci->devs[slot_id]; 1250 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 1251 trace_xhci_handle_cmd_addr_dev(slot_ctx); 1252 } 1253 1254 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, 1255 struct xhci_event_cmd *event) 1256 { 1257 struct xhci_virt_device *vdev; 1258 struct xhci_slot_ctx *slot_ctx; 1259 1260 vdev = xhci->devs[slot_id]; 1261 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 1262 trace_xhci_handle_cmd_reset_dev(slot_ctx); 1263 1264 xhci_dbg(xhci, "Completed reset device command.\n"); 1265 if (!xhci->devs[slot_id]) 1266 xhci_warn(xhci, "Reset device command completion " 1267 "for disabled slot %u\n", slot_id); 1268 } 1269 1270 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, 1271 struct xhci_event_cmd *event) 1272 { 1273 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1274 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); 1275 return; 1276 } 1277 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1278 "NEC firmware version %2x.%02x", 1279 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1280 NEC_FW_MINOR(le32_to_cpu(event->status))); 1281 } 1282 1283 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) 1284 { 1285 list_del(&cmd->cmd_list); 1286 1287 if (cmd->completion) { 1288 cmd->status = status; 1289 complete(cmd->completion); 1290 } else { 1291 kfree(cmd); 1292 } 1293 } 1294 1295 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) 1296 { 1297 struct xhci_command *cur_cmd, *tmp_cmd; 1298 xhci->current_cmd = NULL; 1299 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) 1300 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); 1301 } 1302 1303 void xhci_handle_command_timeout(struct work_struct *work) 1304 { 1305 struct xhci_hcd *xhci; 1306 unsigned long flags; 1307 u64 hw_ring_state; 1308 1309 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); 1310 1311 spin_lock_irqsave(&xhci->lock, flags); 1312 1313 /* 1314 * If timeout work is pending, or current_cmd is NULL, it means we 1315 * raced with command completion. Command is handled so just return. 1316 */ 1317 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { 1318 spin_unlock_irqrestore(&xhci->lock, flags); 1319 return; 1320 } 1321 /* mark this command to be cancelled */ 1322 xhci->current_cmd->status = COMP_COMMAND_ABORTED; 1323 1324 /* Make sure command ring is running before aborting it */ 1325 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1326 if (hw_ring_state == ~(u64)0) { 1327 xhci_hc_died(xhci); 1328 goto time_out_completed; 1329 } 1330 1331 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1332 (hw_ring_state & CMD_RING_RUNNING)) { 1333 /* Prevent new doorbell, and start command abort */ 1334 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 1335 xhci_dbg(xhci, "Command timeout\n"); 1336 xhci_abort_cmd_ring(xhci, flags); 1337 goto time_out_completed; 1338 } 1339 1340 /* host removed. Bail out */ 1341 if (xhci->xhc_state & XHCI_STATE_REMOVING) { 1342 xhci_dbg(xhci, "host removed, ring start fail?\n"); 1343 xhci_cleanup_command_queue(xhci); 1344 1345 goto time_out_completed; 1346 } 1347 1348 /* command timeout on stopped ring, ring can't be aborted */ 1349 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1350 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1351 1352 time_out_completed: 1353 spin_unlock_irqrestore(&xhci->lock, flags); 1354 return; 1355 } 1356 1357 static void handle_cmd_completion(struct xhci_hcd *xhci, 1358 struct xhci_event_cmd *event) 1359 { 1360 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1361 u64 cmd_dma; 1362 dma_addr_t cmd_dequeue_dma; 1363 u32 cmd_comp_code; 1364 union xhci_trb *cmd_trb; 1365 struct xhci_command *cmd; 1366 u32 cmd_type; 1367 1368 cmd_dma = le64_to_cpu(event->cmd_trb); 1369 cmd_trb = xhci->cmd_ring->dequeue; 1370 1371 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); 1372 1373 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1374 cmd_trb); 1375 /* 1376 * Check whether the completion event is for our internal kept 1377 * command. 1378 */ 1379 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { 1380 xhci_warn(xhci, 1381 "ERROR mismatched command completion event\n"); 1382 return; 1383 } 1384 1385 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); 1386 1387 cancel_delayed_work(&xhci->cmd_timer); 1388 1389 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1390 1391 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1392 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { 1393 complete_all(&xhci->cmd_ring_stop_completion); 1394 return; 1395 } 1396 1397 if (cmd->command_trb != xhci->cmd_ring->dequeue) { 1398 xhci_err(xhci, 1399 "Command completion event does not match command\n"); 1400 return; 1401 } 1402 1403 /* 1404 * Host aborted the command ring, check if the current command was 1405 * supposed to be aborted, otherwise continue normally. 1406 * The command ring is stopped now, but the xHC will issue a Command 1407 * Ring Stopped event which will cause us to restart it. 1408 */ 1409 if (cmd_comp_code == COMP_COMMAND_ABORTED) { 1410 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 1411 if (cmd->status == COMP_COMMAND_ABORTED) { 1412 if (xhci->current_cmd == cmd) 1413 xhci->current_cmd = NULL; 1414 goto event_handled; 1415 } 1416 } 1417 1418 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1419 switch (cmd_type) { 1420 case TRB_ENABLE_SLOT: 1421 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); 1422 break; 1423 case TRB_DISABLE_SLOT: 1424 xhci_handle_cmd_disable_slot(xhci, slot_id); 1425 break; 1426 case TRB_CONFIG_EP: 1427 if (!cmd->completion) 1428 xhci_handle_cmd_config_ep(xhci, slot_id, event, 1429 cmd_comp_code); 1430 break; 1431 case TRB_EVAL_CONTEXT: 1432 break; 1433 case TRB_ADDR_DEV: 1434 xhci_handle_cmd_addr_dev(xhci, slot_id); 1435 break; 1436 case TRB_STOP_RING: 1437 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1438 le32_to_cpu(cmd_trb->generic.field[3]))); 1439 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); 1440 break; 1441 case TRB_SET_DEQ: 1442 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1443 le32_to_cpu(cmd_trb->generic.field[3]))); 1444 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1445 break; 1446 case TRB_CMD_NOOP: 1447 /* Is this an aborted command turned to NO-OP? */ 1448 if (cmd->status == COMP_COMMAND_RING_STOPPED) 1449 cmd_comp_code = COMP_COMMAND_RING_STOPPED; 1450 break; 1451 case TRB_RESET_EP: 1452 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1453 le32_to_cpu(cmd_trb->generic.field[3]))); 1454 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); 1455 break; 1456 case TRB_RESET_DEV: 1457 /* SLOT_ID field in reset device cmd completion event TRB is 0. 1458 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) 1459 */ 1460 slot_id = TRB_TO_SLOT_ID( 1461 le32_to_cpu(cmd_trb->generic.field[3])); 1462 xhci_handle_cmd_reset_dev(xhci, slot_id, event); 1463 break; 1464 case TRB_NEC_GET_FW: 1465 xhci_handle_cmd_nec_get_fw(xhci, event); 1466 break; 1467 default: 1468 /* Skip over unknown commands on the event ring */ 1469 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); 1470 break; 1471 } 1472 1473 /* restart timer if this wasn't the last command */ 1474 if (!list_is_singular(&xhci->cmd_list)) { 1475 xhci->current_cmd = list_first_entry(&cmd->cmd_list, 1476 struct xhci_command, cmd_list); 1477 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); 1478 } else if (xhci->current_cmd == cmd) { 1479 xhci->current_cmd = NULL; 1480 } 1481 1482 event_handled: 1483 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); 1484 1485 inc_deq(xhci, xhci->cmd_ring); 1486 } 1487 1488 static void handle_vendor_event(struct xhci_hcd *xhci, 1489 union xhci_trb *event) 1490 { 1491 u32 trb_type; 1492 1493 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1494 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1495 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1496 handle_cmd_completion(xhci, &event->event_cmd); 1497 } 1498 1499 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1500 * port registers -- USB 3.0 and USB 2.0). 1501 * 1502 * Returns a zero-based port number, which is suitable for indexing into each of 1503 * the split roothubs' port arrays and bus state arrays. 1504 * Add one to it in order to call xhci_find_slot_id_by_port. 1505 */ 1506 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1507 struct xhci_hcd *xhci, u32 port_id) 1508 { 1509 unsigned int i; 1510 unsigned int num_similar_speed_ports = 0; 1511 1512 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1513 * and usb2_ports are 0-based indexes. Count the number of similar 1514 * speed ports, up to 1 port before this port. 1515 */ 1516 for (i = 0; i < (port_id - 1); i++) { 1517 u8 port_speed = xhci->port_array[i]; 1518 1519 /* 1520 * Skip ports that don't have known speeds, or have duplicate 1521 * Extended Capabilities port speed entries. 1522 */ 1523 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1524 continue; 1525 1526 /* 1527 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1528 * 1.1 ports are under the USB 2.0 hub. If the port speed 1529 * matches the device speed, it's a similar speed port. 1530 */ 1531 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3)) 1532 num_similar_speed_ports++; 1533 } 1534 return num_similar_speed_ports; 1535 } 1536 1537 static void handle_device_notification(struct xhci_hcd *xhci, 1538 union xhci_trb *event) 1539 { 1540 u32 slot_id; 1541 struct usb_device *udev; 1542 1543 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); 1544 if (!xhci->devs[slot_id]) { 1545 xhci_warn(xhci, "Device Notification event for " 1546 "unused slot %u\n", slot_id); 1547 return; 1548 } 1549 1550 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", 1551 slot_id); 1552 udev = xhci->devs[slot_id]->udev; 1553 if (udev && udev->parent) 1554 usb_wakeup_notification(udev->parent, udev->portnum); 1555 } 1556 1557 static void handle_port_status(struct xhci_hcd *xhci, 1558 union xhci_trb *event) 1559 { 1560 struct usb_hcd *hcd; 1561 u32 port_id; 1562 u32 portsc, cmd_reg; 1563 int max_ports; 1564 int slot_id; 1565 unsigned int faked_port_index; 1566 u8 major_revision; 1567 struct xhci_bus_state *bus_state; 1568 __le32 __iomem **port_array; 1569 bool bogus_port_status = false; 1570 1571 /* Port status change events always have a successful completion code */ 1572 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) 1573 xhci_warn(xhci, 1574 "WARN: xHC returned failed port status event\n"); 1575 1576 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1577 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1578 1579 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1580 if ((port_id <= 0) || (port_id > max_ports)) { 1581 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1582 inc_deq(xhci, xhci->event_ring); 1583 return; 1584 } 1585 1586 /* Figure out which usb_hcd this port is attached to: 1587 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1588 */ 1589 major_revision = xhci->port_array[port_id - 1]; 1590 1591 /* Find the right roothub. */ 1592 hcd = xhci_to_hcd(xhci); 1593 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3)) 1594 hcd = xhci->shared_hcd; 1595 1596 if (major_revision == 0) { 1597 xhci_warn(xhci, "Event for port %u not in " 1598 "Extended Capabilities, ignoring.\n", 1599 port_id); 1600 bogus_port_status = true; 1601 goto cleanup; 1602 } 1603 if (major_revision == DUPLICATE_ENTRY) { 1604 xhci_warn(xhci, "Event for port %u duplicated in" 1605 "Extended Capabilities, ignoring.\n", 1606 port_id); 1607 bogus_port_status = true; 1608 goto cleanup; 1609 } 1610 1611 /* 1612 * Hardware port IDs reported by a Port Status Change Event include USB 1613 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1614 * resume event, but we first need to translate the hardware port ID 1615 * into the index into the ports on the correct split roothub, and the 1616 * correct bus_state structure. 1617 */ 1618 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1619 if (hcd->speed >= HCD_USB3) 1620 port_array = xhci->usb3_ports; 1621 else 1622 port_array = xhci->usb2_ports; 1623 /* Find the faked port hub number */ 1624 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1625 port_id); 1626 portsc = readl(port_array[faked_port_index]); 1627 1628 trace_xhci_handle_port_status(faked_port_index, portsc); 1629 1630 if (hcd->state == HC_STATE_SUSPENDED) { 1631 xhci_dbg(xhci, "resume root hub\n"); 1632 usb_hcd_resume_root_hub(hcd); 1633 } 1634 1635 if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) 1636 bus_state->port_remote_wakeup &= ~(1 << faked_port_index); 1637 1638 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { 1639 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1640 1641 cmd_reg = readl(&xhci->op_regs->command); 1642 if (!(cmd_reg & CMD_RUN)) { 1643 xhci_warn(xhci, "xHC is not running.\n"); 1644 goto cleanup; 1645 } 1646 1647 if (DEV_SUPERSPEED_ANY(portsc)) { 1648 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); 1649 /* Set a flag to say the port signaled remote wakeup, 1650 * so we can tell the difference between the end of 1651 * device and host initiated resume. 1652 */ 1653 bus_state->port_remote_wakeup |= 1 << faked_port_index; 1654 xhci_test_and_clear_bit(xhci, port_array, 1655 faked_port_index, PORT_PLC); 1656 xhci_set_link_state(xhci, port_array, faked_port_index, 1657 XDEV_U0); 1658 /* Need to wait until the next link state change 1659 * indicates the device is actually in U0. 1660 */ 1661 bogus_port_status = true; 1662 goto cleanup; 1663 } else if (!test_bit(faked_port_index, 1664 &bus_state->resuming_ports)) { 1665 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1666 bus_state->resume_done[faked_port_index] = jiffies + 1667 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1668 set_bit(faked_port_index, &bus_state->resuming_ports); 1669 /* Do the rest in GetPortStatus after resume time delay. 1670 * Avoid polling roothub status before that so that a 1671 * usb device auto-resume latency around ~40ms. 1672 */ 1673 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1674 mod_timer(&hcd->rh_timer, 1675 bus_state->resume_done[faked_port_index]); 1676 bogus_port_status = true; 1677 } 1678 } 1679 1680 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && 1681 DEV_SUPERSPEED_ANY(portsc)) { 1682 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1683 /* We've just brought the device into U0 through either the 1684 * Resume state after a device remote wakeup, or through the 1685 * U3Exit state after a host-initiated resume. If it's a device 1686 * initiated remote wake, don't pass up the link state change, 1687 * so the roothub behavior is consistent with external 1688 * USB 3.0 hub behavior. 1689 */ 1690 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1691 faked_port_index + 1); 1692 if (slot_id && xhci->devs[slot_id]) 1693 xhci_ring_device(xhci, slot_id); 1694 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1695 bus_state->port_remote_wakeup &= 1696 ~(1 << faked_port_index); 1697 xhci_test_and_clear_bit(xhci, port_array, 1698 faked_port_index, PORT_PLC); 1699 usb_wakeup_notification(hcd->self.root_hub, 1700 faked_port_index + 1); 1701 bogus_port_status = true; 1702 goto cleanup; 1703 } 1704 } 1705 1706 /* 1707 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or 1708 * RExit to a disconnect state). If so, let the the driver know it's 1709 * out of the RExit state. 1710 */ 1711 if (!DEV_SUPERSPEED_ANY(portsc) && 1712 test_and_clear_bit(faked_port_index, 1713 &bus_state->rexit_ports)) { 1714 complete(&bus_state->rexit_done[faked_port_index]); 1715 bogus_port_status = true; 1716 goto cleanup; 1717 } 1718 1719 if (hcd->speed < HCD_USB3) 1720 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1721 PORT_PLC); 1722 1723 cleanup: 1724 /* Update event ring dequeue pointer before dropping the lock */ 1725 inc_deq(xhci, xhci->event_ring); 1726 1727 /* Don't make the USB core poll the roothub if we got a bad port status 1728 * change event. Besides, at that point we can't tell which roothub 1729 * (USB 2.0 or USB 3.0) to kick. 1730 */ 1731 if (bogus_port_status) 1732 return; 1733 1734 /* 1735 * xHCI port-status-change events occur when the "or" of all the 1736 * status-change bits in the portsc register changes from 0 to 1. 1737 * New status changes won't cause an event if any other change 1738 * bits are still set. When an event occurs, switch over to 1739 * polling to avoid losing status changes. 1740 */ 1741 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1742 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1743 spin_unlock(&xhci->lock); 1744 /* Pass this up to the core */ 1745 usb_hcd_poll_rh_status(hcd); 1746 spin_lock(&xhci->lock); 1747 } 1748 1749 /* 1750 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1751 * at end_trb, which may be in another segment. If the suspect DMA address is a 1752 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1753 * returns 0. 1754 */ 1755 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, 1756 struct xhci_segment *start_seg, 1757 union xhci_trb *start_trb, 1758 union xhci_trb *end_trb, 1759 dma_addr_t suspect_dma, 1760 bool debug) 1761 { 1762 dma_addr_t start_dma; 1763 dma_addr_t end_seg_dma; 1764 dma_addr_t end_trb_dma; 1765 struct xhci_segment *cur_seg; 1766 1767 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1768 cur_seg = start_seg; 1769 1770 do { 1771 if (start_dma == 0) 1772 return NULL; 1773 /* We may get an event for a Link TRB in the middle of a TD */ 1774 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1775 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1776 /* If the end TRB isn't in this segment, this is set to 0 */ 1777 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1778 1779 if (debug) 1780 xhci_warn(xhci, 1781 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", 1782 (unsigned long long)suspect_dma, 1783 (unsigned long long)start_dma, 1784 (unsigned long long)end_trb_dma, 1785 (unsigned long long)cur_seg->dma, 1786 (unsigned long long)end_seg_dma); 1787 1788 if (end_trb_dma > 0) { 1789 /* The end TRB is in this segment, so suspect should be here */ 1790 if (start_dma <= end_trb_dma) { 1791 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1792 return cur_seg; 1793 } else { 1794 /* Case for one segment with 1795 * a TD wrapped around to the top 1796 */ 1797 if ((suspect_dma >= start_dma && 1798 suspect_dma <= end_seg_dma) || 1799 (suspect_dma >= cur_seg->dma && 1800 suspect_dma <= end_trb_dma)) 1801 return cur_seg; 1802 } 1803 return NULL; 1804 } else { 1805 /* Might still be somewhere in this segment */ 1806 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1807 return cur_seg; 1808 } 1809 cur_seg = cur_seg->next; 1810 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1811 } while (cur_seg != start_seg); 1812 1813 return NULL; 1814 } 1815 1816 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1817 unsigned int slot_id, unsigned int ep_index, 1818 unsigned int stream_id, 1819 struct xhci_td *td, union xhci_trb *ep_trb, 1820 enum xhci_ep_reset_type reset_type) 1821 { 1822 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1823 struct xhci_command *command; 1824 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1825 if (!command) 1826 return; 1827 1828 ep->ep_state |= EP_HALTED; 1829 1830 xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); 1831 1832 if (reset_type == EP_HARD_RESET) 1833 xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); 1834 1835 xhci_ring_cmd_db(xhci); 1836 } 1837 1838 /* Check if an error has halted the endpoint ring. The class driver will 1839 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1840 * However, a babble and other errors also halt the endpoint ring, and the class 1841 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1842 * Ring Dequeue Pointer command manually. 1843 */ 1844 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1845 struct xhci_ep_ctx *ep_ctx, 1846 unsigned int trb_comp_code) 1847 { 1848 /* TRB completion codes that may require a manual halt cleanup */ 1849 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || 1850 trb_comp_code == COMP_BABBLE_DETECTED_ERROR || 1851 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) 1852 /* The 0.95 spec says a babbling control endpoint 1853 * is not halted. The 0.96 spec says it is. Some HW 1854 * claims to be 0.95 compliant, but it halts the control 1855 * endpoint anyway. Check if a babble halted the 1856 * endpoint. 1857 */ 1858 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) 1859 return 1; 1860 1861 return 0; 1862 } 1863 1864 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1865 { 1866 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1867 /* Vendor defined "informational" completion code, 1868 * treat as not-an-error. 1869 */ 1870 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1871 trb_comp_code); 1872 xhci_dbg(xhci, "Treating code as success.\n"); 1873 return 1; 1874 } 1875 return 0; 1876 } 1877 1878 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, 1879 struct xhci_ring *ep_ring, int *status) 1880 { 1881 struct urb *urb = NULL; 1882 1883 /* Clean up the endpoint's TD list */ 1884 urb = td->urb; 1885 1886 /* if a bounce buffer was used to align this td then unmap it */ 1887 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); 1888 1889 /* Do one last check of the actual transfer length. 1890 * If the host controller said we transferred more data than the buffer 1891 * length, urb->actual_length will be a very big number (since it's 1892 * unsigned). Play it safe and say we didn't transfer anything. 1893 */ 1894 if (urb->actual_length > urb->transfer_buffer_length) { 1895 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", 1896 urb->transfer_buffer_length, urb->actual_length); 1897 urb->actual_length = 0; 1898 *status = 0; 1899 } 1900 list_del_init(&td->td_list); 1901 /* Was this TD slated to be cancelled but completed anyway? */ 1902 if (!list_empty(&td->cancelled_td_list)) 1903 list_del_init(&td->cancelled_td_list); 1904 1905 inc_td_cnt(urb); 1906 /* Giveback the urb when all the tds are completed */ 1907 if (last_td_in_urb(td)) { 1908 if ((urb->actual_length != urb->transfer_buffer_length && 1909 (urb->transfer_flags & URB_SHORT_NOT_OK)) || 1910 (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) 1911 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", 1912 urb, urb->actual_length, 1913 urb->transfer_buffer_length, *status); 1914 1915 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ 1916 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 1917 *status = 0; 1918 xhci_giveback_urb_in_irq(xhci, td, *status); 1919 } 1920 1921 return 0; 1922 } 1923 1924 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1925 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 1926 struct xhci_virt_ep *ep, int *status) 1927 { 1928 struct xhci_virt_device *xdev; 1929 struct xhci_ep_ctx *ep_ctx; 1930 struct xhci_ring *ep_ring; 1931 unsigned int slot_id; 1932 u32 trb_comp_code; 1933 int ep_index; 1934 1935 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1936 xdev = xhci->devs[slot_id]; 1937 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1938 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1939 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1940 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1941 1942 if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || 1943 trb_comp_code == COMP_STOPPED || 1944 trb_comp_code == COMP_STOPPED_SHORT_PACKET) { 1945 /* The Endpoint Stop Command completion will take care of any 1946 * stopped TDs. A stopped TD may be restarted, so don't update 1947 * the ring dequeue pointer or take this TD off any lists yet. 1948 */ 1949 return 0; 1950 } 1951 if (trb_comp_code == COMP_STALL_ERROR || 1952 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 1953 trb_comp_code)) { 1954 /* Issue a reset endpoint command to clear the host side 1955 * halt, followed by a set dequeue command to move the 1956 * dequeue pointer past the TD. 1957 * The class driver clears the device side halt later. 1958 */ 1959 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 1960 ep_ring->stream_id, td, ep_trb, 1961 EP_HARD_RESET); 1962 } else { 1963 /* Update ring dequeue pointer */ 1964 while (ep_ring->dequeue != td->last_trb) 1965 inc_deq(xhci, ep_ring); 1966 inc_deq(xhci, ep_ring); 1967 } 1968 1969 return xhci_td_cleanup(xhci, td, ep_ring, status); 1970 } 1971 1972 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ 1973 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, 1974 union xhci_trb *stop_trb) 1975 { 1976 u32 sum; 1977 union xhci_trb *trb = ring->dequeue; 1978 struct xhci_segment *seg = ring->deq_seg; 1979 1980 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { 1981 if (!trb_is_noop(trb) && !trb_is_link(trb)) 1982 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); 1983 } 1984 return sum; 1985 } 1986 1987 /* 1988 * Process control tds, update urb status and actual_length. 1989 */ 1990 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1991 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 1992 struct xhci_virt_ep *ep, int *status) 1993 { 1994 struct xhci_virt_device *xdev; 1995 unsigned int slot_id; 1996 int ep_index; 1997 struct xhci_ep_ctx *ep_ctx; 1998 u32 trb_comp_code; 1999 u32 remaining, requested; 2000 u32 trb_type; 2001 2002 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); 2003 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2004 xdev = xhci->devs[slot_id]; 2005 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2006 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2007 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2008 requested = td->urb->transfer_buffer_length; 2009 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2010 2011 switch (trb_comp_code) { 2012 case COMP_SUCCESS: 2013 if (trb_type != TRB_STATUS) { 2014 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", 2015 (trb_type == TRB_DATA) ? "data" : "setup"); 2016 *status = -ESHUTDOWN; 2017 break; 2018 } 2019 *status = 0; 2020 break; 2021 case COMP_SHORT_PACKET: 2022 *status = 0; 2023 break; 2024 case COMP_STOPPED_SHORT_PACKET: 2025 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) 2026 td->urb->actual_length = remaining; 2027 else 2028 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); 2029 goto finish_td; 2030 case COMP_STOPPED: 2031 switch (trb_type) { 2032 case TRB_SETUP: 2033 td->urb->actual_length = 0; 2034 goto finish_td; 2035 case TRB_DATA: 2036 case TRB_NORMAL: 2037 td->urb->actual_length = requested - remaining; 2038 goto finish_td; 2039 case TRB_STATUS: 2040 td->urb->actual_length = requested; 2041 goto finish_td; 2042 default: 2043 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 2044 trb_type); 2045 goto finish_td; 2046 } 2047 case COMP_STOPPED_LENGTH_INVALID: 2048 goto finish_td; 2049 default: 2050 if (!xhci_requires_manual_halt_cleanup(xhci, 2051 ep_ctx, trb_comp_code)) 2052 break; 2053 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", 2054 trb_comp_code, ep_index); 2055 /* else fall through */ 2056 case COMP_STALL_ERROR: 2057 /* Did we transfer part of the data (middle) phase? */ 2058 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) 2059 td->urb->actual_length = requested - remaining; 2060 else if (!td->urb_length_set) 2061 td->urb->actual_length = 0; 2062 goto finish_td; 2063 } 2064 2065 /* stopped at setup stage, no data transferred */ 2066 if (trb_type == TRB_SETUP) 2067 goto finish_td; 2068 2069 /* 2070 * if on data stage then update the actual_length of the URB and flag it 2071 * as set, so it won't be overwritten in the event for the last TRB. 2072 */ 2073 if (trb_type == TRB_DATA || 2074 trb_type == TRB_NORMAL) { 2075 td->urb_length_set = true; 2076 td->urb->actual_length = requested - remaining; 2077 xhci_dbg(xhci, "Waiting for status stage event\n"); 2078 return 0; 2079 } 2080 2081 /* at status stage */ 2082 if (!td->urb_length_set) 2083 td->urb->actual_length = requested; 2084 2085 finish_td: 2086 return finish_td(xhci, td, ep_trb, event, ep, status); 2087 } 2088 2089 /* 2090 * Process isochronous tds, update urb packet status and actual_length. 2091 */ 2092 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2093 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 2094 struct xhci_virt_ep *ep, int *status) 2095 { 2096 struct xhci_ring *ep_ring; 2097 struct urb_priv *urb_priv; 2098 int idx; 2099 struct usb_iso_packet_descriptor *frame; 2100 u32 trb_comp_code; 2101 bool sum_trbs_for_length = false; 2102 u32 remaining, requested, ep_trb_len; 2103 int short_framestatus; 2104 2105 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2106 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2107 urb_priv = td->urb->hcpriv; 2108 idx = urb_priv->num_tds_done; 2109 frame = &td->urb->iso_frame_desc[idx]; 2110 requested = frame->length; 2111 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2112 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); 2113 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 2114 -EREMOTEIO : 0; 2115 2116 /* handle completion code */ 2117 switch (trb_comp_code) { 2118 case COMP_SUCCESS: 2119 if (remaining) { 2120 frame->status = short_framestatus; 2121 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2122 sum_trbs_for_length = true; 2123 break; 2124 } 2125 frame->status = 0; 2126 break; 2127 case COMP_SHORT_PACKET: 2128 frame->status = short_framestatus; 2129 sum_trbs_for_length = true; 2130 break; 2131 case COMP_BANDWIDTH_OVERRUN_ERROR: 2132 frame->status = -ECOMM; 2133 break; 2134 case COMP_ISOCH_BUFFER_OVERRUN: 2135 case COMP_BABBLE_DETECTED_ERROR: 2136 frame->status = -EOVERFLOW; 2137 break; 2138 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2139 case COMP_STALL_ERROR: 2140 frame->status = -EPROTO; 2141 break; 2142 case COMP_USB_TRANSACTION_ERROR: 2143 frame->status = -EPROTO; 2144 if (ep_trb != td->last_trb) 2145 return 0; 2146 break; 2147 case COMP_STOPPED: 2148 sum_trbs_for_length = true; 2149 break; 2150 case COMP_STOPPED_SHORT_PACKET: 2151 /* field normally containing residue now contains tranferred */ 2152 frame->status = short_framestatus; 2153 requested = remaining; 2154 break; 2155 case COMP_STOPPED_LENGTH_INVALID: 2156 requested = 0; 2157 remaining = 0; 2158 break; 2159 default: 2160 sum_trbs_for_length = true; 2161 frame->status = -1; 2162 break; 2163 } 2164 2165 if (sum_trbs_for_length) 2166 frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + 2167 ep_trb_len - remaining; 2168 else 2169 frame->actual_length = requested; 2170 2171 td->urb->actual_length += frame->actual_length; 2172 2173 return finish_td(xhci, td, ep_trb, event, ep, status); 2174 } 2175 2176 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2177 struct xhci_transfer_event *event, 2178 struct xhci_virt_ep *ep, int *status) 2179 { 2180 struct xhci_ring *ep_ring; 2181 struct urb_priv *urb_priv; 2182 struct usb_iso_packet_descriptor *frame; 2183 int idx; 2184 2185 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2186 urb_priv = td->urb->hcpriv; 2187 idx = urb_priv->num_tds_done; 2188 frame = &td->urb->iso_frame_desc[idx]; 2189 2190 /* The transfer is partly done. */ 2191 frame->status = -EXDEV; 2192 2193 /* calc actual length */ 2194 frame->actual_length = 0; 2195 2196 /* Update ring dequeue pointer */ 2197 while (ep_ring->dequeue != td->last_trb) 2198 inc_deq(xhci, ep_ring); 2199 inc_deq(xhci, ep_ring); 2200 2201 return xhci_td_cleanup(xhci, td, ep_ring, status); 2202 } 2203 2204 /* 2205 * Process bulk and interrupt tds, update urb status and actual_length. 2206 */ 2207 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 2208 union xhci_trb *ep_trb, struct xhci_transfer_event *event, 2209 struct xhci_virt_ep *ep, int *status) 2210 { 2211 struct xhci_ring *ep_ring; 2212 u32 trb_comp_code; 2213 u32 remaining, requested, ep_trb_len; 2214 2215 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2216 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2217 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2218 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); 2219 requested = td->urb->transfer_buffer_length; 2220 2221 switch (trb_comp_code) { 2222 case COMP_SUCCESS: 2223 /* handle success with untransferred data as short packet */ 2224 if (ep_trb != td->last_trb || remaining) { 2225 xhci_warn(xhci, "WARN Successful completion on short TX\n"); 2226 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", 2227 td->urb->ep->desc.bEndpointAddress, 2228 requested, remaining); 2229 } 2230 *status = 0; 2231 break; 2232 case COMP_SHORT_PACKET: 2233 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", 2234 td->urb->ep->desc.bEndpointAddress, 2235 requested, remaining); 2236 *status = 0; 2237 break; 2238 case COMP_STOPPED_SHORT_PACKET: 2239 td->urb->actual_length = remaining; 2240 goto finish_td; 2241 case COMP_STOPPED_LENGTH_INVALID: 2242 /* stopped on ep trb with invalid length, exclude it */ 2243 ep_trb_len = 0; 2244 remaining = 0; 2245 break; 2246 default: 2247 /* do nothing */ 2248 break; 2249 } 2250 2251 if (ep_trb == td->last_trb) 2252 td->urb->actual_length = requested - remaining; 2253 else 2254 td->urb->actual_length = 2255 sum_trb_lengths(xhci, ep_ring, ep_trb) + 2256 ep_trb_len - remaining; 2257 finish_td: 2258 if (remaining > requested) { 2259 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", 2260 remaining); 2261 td->urb->actual_length = 0; 2262 } 2263 return finish_td(xhci, td, ep_trb, event, ep, status); 2264 } 2265 2266 /* 2267 * If this function returns an error condition, it means it got a Transfer 2268 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 2269 * At this point, the host controller is probably hosed and should be reset. 2270 */ 2271 static int handle_tx_event(struct xhci_hcd *xhci, 2272 struct xhci_transfer_event *event) 2273 { 2274 struct xhci_virt_device *xdev; 2275 struct xhci_virt_ep *ep; 2276 struct xhci_ring *ep_ring; 2277 unsigned int slot_id; 2278 int ep_index; 2279 struct xhci_td *td = NULL; 2280 dma_addr_t ep_trb_dma; 2281 struct xhci_segment *ep_seg; 2282 union xhci_trb *ep_trb; 2283 int status = -EINPROGRESS; 2284 struct xhci_ep_ctx *ep_ctx; 2285 struct list_head *tmp; 2286 u32 trb_comp_code; 2287 int td_num = 0; 2288 bool handling_skipped_tds = false; 2289 2290 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2291 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2292 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2293 ep_trb_dma = le64_to_cpu(event->buffer); 2294 2295 xdev = xhci->devs[slot_id]; 2296 if (!xdev) { 2297 xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n", 2298 slot_id); 2299 goto err_out; 2300 } 2301 2302 ep = &xdev->eps[ep_index]; 2303 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); 2304 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2305 2306 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { 2307 xhci_err(xhci, 2308 "ERROR Transfer event for disabled endpoint slot %u ep %u\n", 2309 slot_id, ep_index); 2310 goto err_out; 2311 } 2312 2313 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ 2314 if (!ep_ring) { 2315 switch (trb_comp_code) { 2316 case COMP_STALL_ERROR: 2317 case COMP_USB_TRANSACTION_ERROR: 2318 case COMP_INVALID_STREAM_TYPE_ERROR: 2319 case COMP_INVALID_STREAM_ID_ERROR: 2320 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, 2321 NULL, NULL, EP_SOFT_RESET); 2322 goto cleanup; 2323 case COMP_RING_UNDERRUN: 2324 case COMP_RING_OVERRUN: 2325 goto cleanup; 2326 default: 2327 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", 2328 slot_id, ep_index); 2329 goto err_out; 2330 } 2331 } 2332 2333 /* Count current td numbers if ep->skip is set */ 2334 if (ep->skip) { 2335 list_for_each(tmp, &ep_ring->td_list) 2336 td_num++; 2337 } 2338 2339 /* Look for common error cases */ 2340 switch (trb_comp_code) { 2341 /* Skip codes that require special handling depending on 2342 * transfer type 2343 */ 2344 case COMP_SUCCESS: 2345 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2346 break; 2347 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2348 trb_comp_code = COMP_SHORT_PACKET; 2349 else 2350 xhci_warn_ratelimited(xhci, 2351 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n", 2352 slot_id, ep_index); 2353 case COMP_SHORT_PACKET: 2354 break; 2355 /* Completion codes for endpoint stopped state */ 2356 case COMP_STOPPED: 2357 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", 2358 slot_id, ep_index); 2359 break; 2360 case COMP_STOPPED_LENGTH_INVALID: 2361 xhci_dbg(xhci, 2362 "Stopped on No-op or Link TRB for slot %u ep %u\n", 2363 slot_id, ep_index); 2364 break; 2365 case COMP_STOPPED_SHORT_PACKET: 2366 xhci_dbg(xhci, 2367 "Stopped with short packet transfer detected for slot %u ep %u\n", 2368 slot_id, ep_index); 2369 break; 2370 /* Completion codes for endpoint halted state */ 2371 case COMP_STALL_ERROR: 2372 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, 2373 ep_index); 2374 ep->ep_state |= EP_HALTED; 2375 status = -EPIPE; 2376 break; 2377 case COMP_SPLIT_TRANSACTION_ERROR: 2378 case COMP_USB_TRANSACTION_ERROR: 2379 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", 2380 slot_id, ep_index); 2381 status = -EPROTO; 2382 break; 2383 case COMP_BABBLE_DETECTED_ERROR: 2384 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", 2385 slot_id, ep_index); 2386 status = -EOVERFLOW; 2387 break; 2388 /* Completion codes for endpoint error state */ 2389 case COMP_TRB_ERROR: 2390 xhci_warn(xhci, 2391 "WARN: TRB error for slot %u ep %u on endpoint\n", 2392 slot_id, ep_index); 2393 status = -EILSEQ; 2394 break; 2395 /* completion codes not indicating endpoint state change */ 2396 case COMP_DATA_BUFFER_ERROR: 2397 xhci_warn(xhci, 2398 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n", 2399 slot_id, ep_index); 2400 status = -ENOSR; 2401 break; 2402 case COMP_BANDWIDTH_OVERRUN_ERROR: 2403 xhci_warn(xhci, 2404 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n", 2405 slot_id, ep_index); 2406 break; 2407 case COMP_ISOCH_BUFFER_OVERRUN: 2408 xhci_warn(xhci, 2409 "WARN: buffer overrun event for slot %u ep %u on endpoint", 2410 slot_id, ep_index); 2411 break; 2412 case COMP_RING_UNDERRUN: 2413 /* 2414 * When the Isoch ring is empty, the xHC will generate 2415 * a Ring Overrun Event for IN Isoch endpoint or Ring 2416 * Underrun Event for OUT Isoch endpoint. 2417 */ 2418 xhci_dbg(xhci, "underrun event on endpoint\n"); 2419 if (!list_empty(&ep_ring->td_list)) 2420 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2421 "still with TDs queued?\n", 2422 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2423 ep_index); 2424 goto cleanup; 2425 case COMP_RING_OVERRUN: 2426 xhci_dbg(xhci, "overrun event on endpoint\n"); 2427 if (!list_empty(&ep_ring->td_list)) 2428 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2429 "still with TDs queued?\n", 2430 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2431 ep_index); 2432 goto cleanup; 2433 case COMP_MISSED_SERVICE_ERROR: 2434 /* 2435 * When encounter missed service error, one or more isoc tds 2436 * may be missed by xHC. 2437 * Set skip flag of the ep_ring; Complete the missed tds as 2438 * short transfer when process the ep_ring next time. 2439 */ 2440 ep->skip = true; 2441 xhci_dbg(xhci, 2442 "Miss service interval error for slot %u ep %u, set skip flag\n", 2443 slot_id, ep_index); 2444 goto cleanup; 2445 case COMP_NO_PING_RESPONSE_ERROR: 2446 ep->skip = true; 2447 xhci_dbg(xhci, 2448 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n", 2449 slot_id, ep_index); 2450 goto cleanup; 2451 2452 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2453 /* needs disable slot command to recover */ 2454 xhci_warn(xhci, 2455 "WARN: detect an incompatible device for slot %u ep %u", 2456 slot_id, ep_index); 2457 status = -EPROTO; 2458 break; 2459 default: 2460 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2461 status = 0; 2462 break; 2463 } 2464 xhci_warn(xhci, 2465 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n", 2466 trb_comp_code, slot_id, ep_index); 2467 goto cleanup; 2468 } 2469 2470 do { 2471 /* This TRB should be in the TD at the head of this ring's 2472 * TD list. 2473 */ 2474 if (list_empty(&ep_ring->td_list)) { 2475 /* 2476 * Don't print wanings if it's due to a stopped endpoint 2477 * generating an extra completion event if the device 2478 * was suspended. Or, a event for the last TRB of a 2479 * short TD we already got a short event for. 2480 * The short TD is already removed from the TD list. 2481 */ 2482 2483 if (!(trb_comp_code == COMP_STOPPED || 2484 trb_comp_code == COMP_STOPPED_LENGTH_INVALID || 2485 ep_ring->last_td_was_short)) { 2486 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 2487 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2488 ep_index); 2489 } 2490 if (ep->skip) { 2491 ep->skip = false; 2492 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", 2493 slot_id, ep_index); 2494 } 2495 goto cleanup; 2496 } 2497 2498 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2499 if (ep->skip && td_num == 0) { 2500 ep->skip = false; 2501 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", 2502 slot_id, ep_index); 2503 goto cleanup; 2504 } 2505 2506 td = list_first_entry(&ep_ring->td_list, struct xhci_td, 2507 td_list); 2508 if (ep->skip) 2509 td_num--; 2510 2511 /* Is this a TRB in the currently executing TD? */ 2512 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, 2513 td->last_trb, ep_trb_dma, false); 2514 2515 /* 2516 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2517 * is not in the current TD pointed by ep_ring->dequeue because 2518 * that the hardware dequeue pointer still at the previous TRB 2519 * of the current TD. The previous TRB maybe a Link TD or the 2520 * last TRB of the previous TD. The command completion handle 2521 * will take care the rest. 2522 */ 2523 if (!ep_seg && (trb_comp_code == COMP_STOPPED || 2524 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { 2525 goto cleanup; 2526 } 2527 2528 if (!ep_seg) { 2529 if (!ep->skip || 2530 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2531 /* Some host controllers give a spurious 2532 * successful event after a short transfer. 2533 * Ignore it. 2534 */ 2535 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2536 ep_ring->last_td_was_short) { 2537 ep_ring->last_td_was_short = false; 2538 goto cleanup; 2539 } 2540 /* HC is busted, give up! */ 2541 xhci_err(xhci, 2542 "ERROR Transfer event TRB DMA ptr not " 2543 "part of current TD ep_index %d " 2544 "comp_code %u\n", ep_index, 2545 trb_comp_code); 2546 trb_in_td(xhci, ep_ring->deq_seg, 2547 ep_ring->dequeue, td->last_trb, 2548 ep_trb_dma, true); 2549 return -ESHUTDOWN; 2550 } 2551 2552 skip_isoc_td(xhci, td, event, ep, &status); 2553 goto cleanup; 2554 } 2555 if (trb_comp_code == COMP_SHORT_PACKET) 2556 ep_ring->last_td_was_short = true; 2557 else 2558 ep_ring->last_td_was_short = false; 2559 2560 if (ep->skip) { 2561 xhci_dbg(xhci, 2562 "Found td. Clear skip flag for slot %u ep %u.\n", 2563 slot_id, ep_index); 2564 ep->skip = false; 2565 } 2566 2567 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / 2568 sizeof(*ep_trb)]; 2569 2570 trace_xhci_handle_transfer(ep_ring, 2571 (struct xhci_generic_trb *) ep_trb); 2572 2573 /* 2574 * No-op TRB could trigger interrupts in a case where 2575 * a URB was killed and a STALL_ERROR happens right 2576 * after the endpoint ring stopped. Reset the halted 2577 * endpoint. Otherwise, the endpoint remains stalled 2578 * indefinitely. 2579 */ 2580 if (trb_is_noop(ep_trb)) { 2581 if (trb_comp_code == COMP_STALL_ERROR || 2582 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 2583 trb_comp_code)) 2584 xhci_cleanup_halted_endpoint(xhci, slot_id, 2585 ep_index, 2586 ep_ring->stream_id, 2587 td, ep_trb, 2588 EP_HARD_RESET); 2589 goto cleanup; 2590 } 2591 2592 /* update the urb's actual_length and give back to the core */ 2593 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2594 process_ctrl_td(xhci, td, ep_trb, event, ep, &status); 2595 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2596 process_isoc_td(xhci, td, ep_trb, event, ep, &status); 2597 else 2598 process_bulk_intr_td(xhci, td, ep_trb, event, ep, 2599 &status); 2600 cleanup: 2601 handling_skipped_tds = ep->skip && 2602 trb_comp_code != COMP_MISSED_SERVICE_ERROR && 2603 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; 2604 2605 /* 2606 * Do not update event ring dequeue pointer if we're in a loop 2607 * processing missed tds. 2608 */ 2609 if (!handling_skipped_tds) 2610 inc_deq(xhci, xhci->event_ring); 2611 2612 /* 2613 * If ep->skip is set, it means there are missed tds on the 2614 * endpoint ring need to take care of. 2615 * Process them as short transfer until reach the td pointed by 2616 * the event. 2617 */ 2618 } while (handling_skipped_tds); 2619 2620 return 0; 2621 2622 err_out: 2623 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2624 (unsigned long long) xhci_trb_virt_to_dma( 2625 xhci->event_ring->deq_seg, 2626 xhci->event_ring->dequeue), 2627 lower_32_bits(le64_to_cpu(event->buffer)), 2628 upper_32_bits(le64_to_cpu(event->buffer)), 2629 le32_to_cpu(event->transfer_len), 2630 le32_to_cpu(event->flags)); 2631 return -ENODEV; 2632 } 2633 2634 /* 2635 * This function handles all OS-owned events on the event ring. It may drop 2636 * xhci->lock between event processing (e.g. to pass up port status changes). 2637 * Returns >0 for "possibly more events to process" (caller should call again), 2638 * otherwise 0 if done. In future, <0 returns should indicate error code. 2639 */ 2640 static int xhci_handle_event(struct xhci_hcd *xhci) 2641 { 2642 union xhci_trb *event; 2643 int update_ptrs = 1; 2644 int ret; 2645 2646 /* Event ring hasn't been allocated yet. */ 2647 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2648 xhci_err(xhci, "ERROR event ring not ready\n"); 2649 return -ENOMEM; 2650 } 2651 2652 event = xhci->event_ring->dequeue; 2653 /* Does the HC or OS own the TRB? */ 2654 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2655 xhci->event_ring->cycle_state) 2656 return 0; 2657 2658 trace_xhci_handle_event(xhci->event_ring, &event->generic); 2659 2660 /* 2661 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2662 * speculative reads of the event's flags/data below. 2663 */ 2664 rmb(); 2665 /* FIXME: Handle more event types. */ 2666 switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { 2667 case TRB_TYPE(TRB_COMPLETION): 2668 handle_cmd_completion(xhci, &event->event_cmd); 2669 break; 2670 case TRB_TYPE(TRB_PORT_STATUS): 2671 handle_port_status(xhci, event); 2672 update_ptrs = 0; 2673 break; 2674 case TRB_TYPE(TRB_TRANSFER): 2675 ret = handle_tx_event(xhci, &event->trans_event); 2676 if (ret >= 0) 2677 update_ptrs = 0; 2678 break; 2679 case TRB_TYPE(TRB_DEV_NOTE): 2680 handle_device_notification(xhci, event); 2681 break; 2682 default: 2683 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2684 TRB_TYPE(48)) 2685 handle_vendor_event(xhci, event); 2686 else 2687 xhci_warn(xhci, "ERROR unknown event type %d\n", 2688 TRB_FIELD_TO_TYPE( 2689 le32_to_cpu(event->event_cmd.flags))); 2690 } 2691 /* Any of the above functions may drop and re-acquire the lock, so check 2692 * to make sure a watchdog timer didn't mark the host as non-responsive. 2693 */ 2694 if (xhci->xhc_state & XHCI_STATE_DYING) { 2695 xhci_dbg(xhci, "xHCI host dying, returning from " 2696 "event handler.\n"); 2697 return 0; 2698 } 2699 2700 if (update_ptrs) 2701 /* Update SW event ring dequeue pointer */ 2702 inc_deq(xhci, xhci->event_ring); 2703 2704 /* Are there more items on the event ring? Caller will call us again to 2705 * check. 2706 */ 2707 return 1; 2708 } 2709 2710 /* 2711 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2712 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2713 * indicators of an event TRB error, but we check the status *first* to be safe. 2714 */ 2715 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2716 { 2717 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2718 union xhci_trb *event_ring_deq; 2719 irqreturn_t ret = IRQ_NONE; 2720 unsigned long flags; 2721 dma_addr_t deq; 2722 u64 temp_64; 2723 u32 status; 2724 2725 spin_lock_irqsave(&xhci->lock, flags); 2726 /* Check if the xHC generated the interrupt, or the irq is shared */ 2727 status = readl(&xhci->op_regs->status); 2728 if (status == ~(u32)0) { 2729 xhci_hc_died(xhci); 2730 ret = IRQ_HANDLED; 2731 goto out; 2732 } 2733 2734 if (!(status & STS_EINT)) 2735 goto out; 2736 2737 if (status & STS_FATAL) { 2738 xhci_warn(xhci, "WARNING: Host System Error\n"); 2739 xhci_halt(xhci); 2740 ret = IRQ_HANDLED; 2741 goto out; 2742 } 2743 2744 /* 2745 * Clear the op reg interrupt status first, 2746 * so we can receive interrupts from other MSI-X interrupters. 2747 * Write 1 to clear the interrupt status. 2748 */ 2749 status |= STS_EINT; 2750 writel(status, &xhci->op_regs->status); 2751 2752 if (!hcd->msi_enabled) { 2753 u32 irq_pending; 2754 irq_pending = readl(&xhci->ir_set->irq_pending); 2755 irq_pending |= IMAN_IP; 2756 writel(irq_pending, &xhci->ir_set->irq_pending); 2757 } 2758 2759 if (xhci->xhc_state & XHCI_STATE_DYING || 2760 xhci->xhc_state & XHCI_STATE_HALTED) { 2761 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2762 "Shouldn't IRQs be disabled?\n"); 2763 /* Clear the event handler busy flag (RW1C); 2764 * the event ring should be empty. 2765 */ 2766 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2767 xhci_write_64(xhci, temp_64 | ERST_EHB, 2768 &xhci->ir_set->erst_dequeue); 2769 ret = IRQ_HANDLED; 2770 goto out; 2771 } 2772 2773 event_ring_deq = xhci->event_ring->dequeue; 2774 /* FIXME this should be a delayed service routine 2775 * that clears the EHB. 2776 */ 2777 while (xhci_handle_event(xhci) > 0) {} 2778 2779 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2780 /* If necessary, update the HW's version of the event ring deq ptr. */ 2781 if (event_ring_deq != xhci->event_ring->dequeue) { 2782 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2783 xhci->event_ring->dequeue); 2784 if (deq == 0) 2785 xhci_warn(xhci, "WARN something wrong with SW event " 2786 "ring dequeue ptr.\n"); 2787 /* Update HC event ring dequeue pointer */ 2788 temp_64 &= ERST_PTR_MASK; 2789 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2790 } 2791 2792 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2793 temp_64 |= ERST_EHB; 2794 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2795 ret = IRQ_HANDLED; 2796 2797 out: 2798 spin_unlock_irqrestore(&xhci->lock, flags); 2799 2800 return ret; 2801 } 2802 2803 irqreturn_t xhci_msi_irq(int irq, void *hcd) 2804 { 2805 return xhci_irq(hcd); 2806 } 2807 2808 /**** Endpoint Ring Operations ****/ 2809 2810 /* 2811 * Generic function for queueing a TRB on a ring. 2812 * The caller must have checked to make sure there's room on the ring. 2813 * 2814 * @more_trbs_coming: Will you enqueue more TRBs before calling 2815 * prepare_transfer()? 2816 */ 2817 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2818 bool more_trbs_coming, 2819 u32 field1, u32 field2, u32 field3, u32 field4) 2820 { 2821 struct xhci_generic_trb *trb; 2822 2823 trb = &ring->enqueue->generic; 2824 trb->field[0] = cpu_to_le32(field1); 2825 trb->field[1] = cpu_to_le32(field2); 2826 trb->field[2] = cpu_to_le32(field3); 2827 trb->field[3] = cpu_to_le32(field4); 2828 2829 trace_xhci_queue_trb(ring, trb); 2830 2831 inc_enq(xhci, ring, more_trbs_coming); 2832 } 2833 2834 /* 2835 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2836 * FIXME allocate segments if the ring is full. 2837 */ 2838 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2839 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2840 { 2841 unsigned int num_trbs_needed; 2842 2843 /* Make sure the endpoint has been added to xHC schedule */ 2844 switch (ep_state) { 2845 case EP_STATE_DISABLED: 2846 /* 2847 * USB core changed config/interfaces without notifying us, 2848 * or hardware is reporting the wrong state. 2849 */ 2850 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2851 return -ENOENT; 2852 case EP_STATE_ERROR: 2853 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2854 /* FIXME event handling code for error needs to clear it */ 2855 /* XXX not sure if this should be -ENOENT or not */ 2856 return -EINVAL; 2857 case EP_STATE_HALTED: 2858 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2859 case EP_STATE_STOPPED: 2860 case EP_STATE_RUNNING: 2861 break; 2862 default: 2863 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2864 /* 2865 * FIXME issue Configure Endpoint command to try to get the HC 2866 * back into a known state. 2867 */ 2868 return -EINVAL; 2869 } 2870 2871 while (1) { 2872 if (room_on_ring(xhci, ep_ring, num_trbs)) 2873 break; 2874 2875 if (ep_ring == xhci->cmd_ring) { 2876 xhci_err(xhci, "Do not support expand command ring\n"); 2877 return -ENOMEM; 2878 } 2879 2880 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 2881 "ERROR no room on ep ring, try ring expansion"); 2882 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2883 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2884 mem_flags)) { 2885 xhci_err(xhci, "Ring expansion failed\n"); 2886 return -ENOMEM; 2887 } 2888 } 2889 2890 while (trb_is_link(ep_ring->enqueue)) { 2891 /* If we're not dealing with 0.95 hardware or isoc rings 2892 * on AMD 0.96 host, clear the chain bit. 2893 */ 2894 if (!xhci_link_trb_quirk(xhci) && 2895 !(ep_ring->type == TYPE_ISOC && 2896 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2897 ep_ring->enqueue->link.control &= 2898 cpu_to_le32(~TRB_CHAIN); 2899 else 2900 ep_ring->enqueue->link.control |= 2901 cpu_to_le32(TRB_CHAIN); 2902 2903 wmb(); 2904 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); 2905 2906 /* Toggle the cycle bit after the last ring segment. */ 2907 if (link_trb_toggles_cycle(ep_ring->enqueue)) 2908 ep_ring->cycle_state ^= 1; 2909 2910 ep_ring->enq_seg = ep_ring->enq_seg->next; 2911 ep_ring->enqueue = ep_ring->enq_seg->trbs; 2912 } 2913 return 0; 2914 } 2915 2916 static int prepare_transfer(struct xhci_hcd *xhci, 2917 struct xhci_virt_device *xdev, 2918 unsigned int ep_index, 2919 unsigned int stream_id, 2920 unsigned int num_trbs, 2921 struct urb *urb, 2922 unsigned int td_index, 2923 gfp_t mem_flags) 2924 { 2925 int ret; 2926 struct urb_priv *urb_priv; 2927 struct xhci_td *td; 2928 struct xhci_ring *ep_ring; 2929 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2930 2931 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2932 if (!ep_ring) { 2933 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2934 stream_id); 2935 return -EINVAL; 2936 } 2937 2938 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), 2939 num_trbs, mem_flags); 2940 if (ret) 2941 return ret; 2942 2943 urb_priv = urb->hcpriv; 2944 td = &urb_priv->td[td_index]; 2945 2946 INIT_LIST_HEAD(&td->td_list); 2947 INIT_LIST_HEAD(&td->cancelled_td_list); 2948 2949 if (td_index == 0) { 2950 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2951 if (unlikely(ret)) 2952 return ret; 2953 } 2954 2955 td->urb = urb; 2956 /* Add this TD to the tail of the endpoint ring's TD list */ 2957 list_add_tail(&td->td_list, &ep_ring->td_list); 2958 td->start_seg = ep_ring->enq_seg; 2959 td->first_trb = ep_ring->enqueue; 2960 2961 return 0; 2962 } 2963 2964 unsigned int count_trbs(u64 addr, u64 len) 2965 { 2966 unsigned int num_trbs; 2967 2968 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 2969 TRB_MAX_BUFF_SIZE); 2970 if (num_trbs == 0) 2971 num_trbs++; 2972 2973 return num_trbs; 2974 } 2975 2976 static inline unsigned int count_trbs_needed(struct urb *urb) 2977 { 2978 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); 2979 } 2980 2981 static unsigned int count_sg_trbs_needed(struct urb *urb) 2982 { 2983 struct scatterlist *sg; 2984 unsigned int i, len, full_len, num_trbs = 0; 2985 2986 full_len = urb->transfer_buffer_length; 2987 2988 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 2989 len = sg_dma_len(sg); 2990 num_trbs += count_trbs(sg_dma_address(sg), len); 2991 len = min_t(unsigned int, len, full_len); 2992 full_len -= len; 2993 if (full_len == 0) 2994 break; 2995 } 2996 2997 return num_trbs; 2998 } 2999 3000 static unsigned int count_isoc_trbs_needed(struct urb *urb, int i) 3001 { 3002 u64 addr, len; 3003 3004 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3005 len = urb->iso_frame_desc[i].length; 3006 3007 return count_trbs(addr, len); 3008 } 3009 3010 static void check_trb_math(struct urb *urb, int running_total) 3011 { 3012 if (unlikely(running_total != urb->transfer_buffer_length)) 3013 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 3014 "queued %#x (%d), asked for %#x (%d)\n", 3015 __func__, 3016 urb->ep->desc.bEndpointAddress, 3017 running_total, running_total, 3018 urb->transfer_buffer_length, 3019 urb->transfer_buffer_length); 3020 } 3021 3022 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 3023 unsigned int ep_index, unsigned int stream_id, int start_cycle, 3024 struct xhci_generic_trb *start_trb) 3025 { 3026 /* 3027 * Pass all the TRBs to the hardware at once and make sure this write 3028 * isn't reordered. 3029 */ 3030 wmb(); 3031 if (start_cycle) 3032 start_trb->field[3] |= cpu_to_le32(start_cycle); 3033 else 3034 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 3035 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 3036 } 3037 3038 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, 3039 struct xhci_ep_ctx *ep_ctx) 3040 { 3041 int xhci_interval; 3042 int ep_interval; 3043 3044 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3045 ep_interval = urb->interval; 3046 3047 /* Convert to microframes */ 3048 if (urb->dev->speed == USB_SPEED_LOW || 3049 urb->dev->speed == USB_SPEED_FULL) 3050 ep_interval *= 8; 3051 3052 /* FIXME change this to a warning and a suggestion to use the new API 3053 * to set the polling interval (once the API is added). 3054 */ 3055 if (xhci_interval != ep_interval) { 3056 dev_dbg_ratelimited(&urb->dev->dev, 3057 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 3058 ep_interval, ep_interval == 1 ? "" : "s", 3059 xhci_interval, xhci_interval == 1 ? "" : "s"); 3060 urb->interval = xhci_interval; 3061 /* Convert back to frames for LS/FS devices */ 3062 if (urb->dev->speed == USB_SPEED_LOW || 3063 urb->dev->speed == USB_SPEED_FULL) 3064 urb->interval /= 8; 3065 } 3066 } 3067 3068 /* 3069 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 3070 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 3071 * (comprised of sg list entries) can take several service intervals to 3072 * transmit. 3073 */ 3074 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3075 struct urb *urb, int slot_id, unsigned int ep_index) 3076 { 3077 struct xhci_ep_ctx *ep_ctx; 3078 3079 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); 3080 check_interval(xhci, urb, ep_ctx); 3081 3082 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 3083 } 3084 3085 /* 3086 * For xHCI 1.0 host controllers, TD size is the number of max packet sized 3087 * packets remaining in the TD (*not* including this TRB). 3088 * 3089 * Total TD packet count = total_packet_count = 3090 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) 3091 * 3092 * Packets transferred up to and including this TRB = packets_transferred = 3093 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 3094 * 3095 * TD size = total_packet_count - packets_transferred 3096 * 3097 * For xHCI 0.96 and older, TD size field should be the remaining bytes 3098 * including this TRB, right shifted by 10 3099 * 3100 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. 3101 * This is taken care of in the TRB_TD_SIZE() macro 3102 * 3103 * The last TRB in a TD must have the TD size set to zero. 3104 */ 3105 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, 3106 int trb_buff_len, unsigned int td_total_len, 3107 struct urb *urb, bool more_trbs_coming) 3108 { 3109 u32 maxp, total_packet_count; 3110 3111 /* MTK xHCI 0.96 contains some features from 1.0 */ 3112 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) 3113 return ((td_total_len - transferred) >> 10); 3114 3115 /* One TRB with a zero-length data packet. */ 3116 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || 3117 trb_buff_len == td_total_len) 3118 return 0; 3119 3120 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ 3121 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) 3122 trb_buff_len = 0; 3123 3124 maxp = usb_endpoint_maxp(&urb->ep->desc); 3125 total_packet_count = DIV_ROUND_UP(td_total_len, maxp); 3126 3127 /* Queueing functions don't count the current TRB into transferred */ 3128 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); 3129 } 3130 3131 3132 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, 3133 u32 *trb_buff_len, struct xhci_segment *seg) 3134 { 3135 struct device *dev = xhci_to_hcd(xhci)->self.controller; 3136 unsigned int unalign; 3137 unsigned int max_pkt; 3138 u32 new_buff_len; 3139 3140 max_pkt = usb_endpoint_maxp(&urb->ep->desc); 3141 unalign = (enqd_len + *trb_buff_len) % max_pkt; 3142 3143 /* we got lucky, last normal TRB data on segment is packet aligned */ 3144 if (unalign == 0) 3145 return 0; 3146 3147 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", 3148 unalign, *trb_buff_len); 3149 3150 /* is the last nornal TRB alignable by splitting it */ 3151 if (*trb_buff_len > unalign) { 3152 *trb_buff_len -= unalign; 3153 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); 3154 return 0; 3155 } 3156 3157 /* 3158 * We want enqd_len + trb_buff_len to sum up to a number aligned to 3159 * number which is divisible by the endpoint's wMaxPacketSize. IOW: 3160 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. 3161 */ 3162 new_buff_len = max_pkt - (enqd_len % max_pkt); 3163 3164 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) 3165 new_buff_len = (urb->transfer_buffer_length - enqd_len); 3166 3167 /* create a max max_pkt sized bounce buffer pointed to by last trb */ 3168 if (usb_urb_dir_out(urb)) { 3169 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, 3170 seg->bounce_buf, new_buff_len, enqd_len); 3171 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, 3172 max_pkt, DMA_TO_DEVICE); 3173 } else { 3174 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, 3175 max_pkt, DMA_FROM_DEVICE); 3176 } 3177 3178 if (dma_mapping_error(dev, seg->bounce_dma)) { 3179 /* try without aligning. Some host controllers survive */ 3180 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); 3181 return 0; 3182 } 3183 *trb_buff_len = new_buff_len; 3184 seg->bounce_len = new_buff_len; 3185 seg->bounce_offs = enqd_len; 3186 3187 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); 3188 3189 return 1; 3190 } 3191 3192 /* This is very similar to what ehci-q.c qtd_fill() does */ 3193 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3194 struct urb *urb, int slot_id, unsigned int ep_index) 3195 { 3196 struct xhci_ring *ring; 3197 struct urb_priv *urb_priv; 3198 struct xhci_td *td; 3199 struct xhci_generic_trb *start_trb; 3200 struct scatterlist *sg = NULL; 3201 bool more_trbs_coming = true; 3202 bool need_zero_pkt = false; 3203 bool first_trb = true; 3204 unsigned int num_trbs; 3205 unsigned int start_cycle, num_sgs = 0; 3206 unsigned int enqd_len, block_len, trb_buff_len, full_len; 3207 int sent_len, ret; 3208 u32 field, length_field, remainder; 3209 u64 addr, send_addr; 3210 3211 ring = xhci_urb_to_transfer_ring(xhci, urb); 3212 if (!ring) 3213 return -EINVAL; 3214 3215 full_len = urb->transfer_buffer_length; 3216 /* If we have scatter/gather list, we use it. */ 3217 if (urb->num_sgs) { 3218 num_sgs = urb->num_mapped_sgs; 3219 sg = urb->sg; 3220 addr = (u64) sg_dma_address(sg); 3221 block_len = sg_dma_len(sg); 3222 num_trbs = count_sg_trbs_needed(urb); 3223 } else { 3224 num_trbs = count_trbs_needed(urb); 3225 addr = (u64) urb->transfer_dma; 3226 block_len = full_len; 3227 } 3228 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3229 ep_index, urb->stream_id, 3230 num_trbs, urb, 0, mem_flags); 3231 if (unlikely(ret < 0)) 3232 return ret; 3233 3234 urb_priv = urb->hcpriv; 3235 3236 /* Deal with URB_ZERO_PACKET - need one more td/trb */ 3237 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) 3238 need_zero_pkt = true; 3239 3240 td = &urb_priv->td[0]; 3241 3242 /* 3243 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3244 * until we've finished creating all the other TRBs. The ring's cycle 3245 * state may change as we enqueue the other TRBs, so save it too. 3246 */ 3247 start_trb = &ring->enqueue->generic; 3248 start_cycle = ring->cycle_state; 3249 send_addr = addr; 3250 3251 /* Queue the TRBs, even if they are zero-length */ 3252 for (enqd_len = 0; first_trb || enqd_len < full_len; 3253 enqd_len += trb_buff_len) { 3254 field = TRB_TYPE(TRB_NORMAL); 3255 3256 /* TRB buffer should not cross 64KB boundaries */ 3257 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); 3258 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); 3259 3260 if (enqd_len + trb_buff_len > full_len) 3261 trb_buff_len = full_len - enqd_len; 3262 3263 /* Don't change the cycle bit of the first TRB until later */ 3264 if (first_trb) { 3265 first_trb = false; 3266 if (start_cycle == 0) 3267 field |= TRB_CYCLE; 3268 } else 3269 field |= ring->cycle_state; 3270 3271 /* Chain all the TRBs together; clear the chain bit in the last 3272 * TRB to indicate it's the last TRB in the chain. 3273 */ 3274 if (enqd_len + trb_buff_len < full_len) { 3275 field |= TRB_CHAIN; 3276 if (trb_is_link(ring->enqueue + 1)) { 3277 if (xhci_align_td(xhci, urb, enqd_len, 3278 &trb_buff_len, 3279 ring->enq_seg)) { 3280 send_addr = ring->enq_seg->bounce_dma; 3281 /* assuming TD won't span 2 segs */ 3282 td->bounce_seg = ring->enq_seg; 3283 } 3284 } 3285 } 3286 if (enqd_len + trb_buff_len >= full_len) { 3287 field &= ~TRB_CHAIN; 3288 field |= TRB_IOC; 3289 more_trbs_coming = false; 3290 td->last_trb = ring->enqueue; 3291 } 3292 3293 /* Only set interrupt on short packet for IN endpoints */ 3294 if (usb_urb_dir_in(urb)) 3295 field |= TRB_ISP; 3296 3297 /* Set the TRB length, TD size, and interrupter fields. */ 3298 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, 3299 full_len, urb, more_trbs_coming); 3300 3301 length_field = TRB_LEN(trb_buff_len) | 3302 TRB_TD_SIZE(remainder) | 3303 TRB_INTR_TARGET(0); 3304 3305 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, 3306 lower_32_bits(send_addr), 3307 upper_32_bits(send_addr), 3308 length_field, 3309 field); 3310 3311 addr += trb_buff_len; 3312 sent_len = trb_buff_len; 3313 3314 while (sg && sent_len >= block_len) { 3315 /* New sg entry */ 3316 --num_sgs; 3317 sent_len -= block_len; 3318 if (num_sgs != 0) { 3319 sg = sg_next(sg); 3320 block_len = sg_dma_len(sg); 3321 addr = (u64) sg_dma_address(sg); 3322 addr += sent_len; 3323 } 3324 } 3325 block_len -= sent_len; 3326 send_addr = addr; 3327 } 3328 3329 if (need_zero_pkt) { 3330 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3331 ep_index, urb->stream_id, 3332 1, urb, 1, mem_flags); 3333 urb_priv->td[1].last_trb = ring->enqueue; 3334 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; 3335 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); 3336 } 3337 3338 check_trb_math(urb, enqd_len); 3339 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3340 start_cycle, start_trb); 3341 return 0; 3342 } 3343 3344 /* Caller must have locked xhci->lock */ 3345 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3346 struct urb *urb, int slot_id, unsigned int ep_index) 3347 { 3348 struct xhci_ring *ep_ring; 3349 int num_trbs; 3350 int ret; 3351 struct usb_ctrlrequest *setup; 3352 struct xhci_generic_trb *start_trb; 3353 int start_cycle; 3354 u32 field; 3355 struct urb_priv *urb_priv; 3356 struct xhci_td *td; 3357 3358 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3359 if (!ep_ring) 3360 return -EINVAL; 3361 3362 /* 3363 * Need to copy setup packet into setup TRB, so we can't use the setup 3364 * DMA address. 3365 */ 3366 if (!urb->setup_packet) 3367 return -EINVAL; 3368 3369 /* 1 TRB for setup, 1 for status */ 3370 num_trbs = 2; 3371 /* 3372 * Don't need to check if we need additional event data and normal TRBs, 3373 * since data in control transfers will never get bigger than 16MB 3374 * XXX: can we get a buffer that crosses 64KB boundaries? 3375 */ 3376 if (urb->transfer_buffer_length > 0) 3377 num_trbs++; 3378 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3379 ep_index, urb->stream_id, 3380 num_trbs, urb, 0, mem_flags); 3381 if (ret < 0) 3382 return ret; 3383 3384 urb_priv = urb->hcpriv; 3385 td = &urb_priv->td[0]; 3386 3387 /* 3388 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3389 * until we've finished creating all the other TRBs. The ring's cycle 3390 * state may change as we enqueue the other TRBs, so save it too. 3391 */ 3392 start_trb = &ep_ring->enqueue->generic; 3393 start_cycle = ep_ring->cycle_state; 3394 3395 /* Queue setup TRB - see section 6.4.1.2.1 */ 3396 /* FIXME better way to translate setup_packet into two u32 fields? */ 3397 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3398 field = 0; 3399 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3400 if (start_cycle == 0) 3401 field |= 0x1; 3402 3403 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ 3404 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { 3405 if (urb->transfer_buffer_length > 0) { 3406 if (setup->bRequestType & USB_DIR_IN) 3407 field |= TRB_TX_TYPE(TRB_DATA_IN); 3408 else 3409 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3410 } 3411 } 3412 3413 queue_trb(xhci, ep_ring, true, 3414 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3415 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3416 TRB_LEN(8) | TRB_INTR_TARGET(0), 3417 /* Immediate data in pointer */ 3418 field); 3419 3420 /* If there's data, queue data TRBs */ 3421 /* Only set interrupt on short packet for IN endpoints */ 3422 if (usb_urb_dir_in(urb)) 3423 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3424 else 3425 field = TRB_TYPE(TRB_DATA); 3426 3427 if (urb->transfer_buffer_length > 0) { 3428 u32 length_field, remainder; 3429 3430 remainder = xhci_td_remainder(xhci, 0, 3431 urb->transfer_buffer_length, 3432 urb->transfer_buffer_length, 3433 urb, 1); 3434 length_field = TRB_LEN(urb->transfer_buffer_length) | 3435 TRB_TD_SIZE(remainder) | 3436 TRB_INTR_TARGET(0); 3437 if (setup->bRequestType & USB_DIR_IN) 3438 field |= TRB_DIR_IN; 3439 queue_trb(xhci, ep_ring, true, 3440 lower_32_bits(urb->transfer_dma), 3441 upper_32_bits(urb->transfer_dma), 3442 length_field, 3443 field | ep_ring->cycle_state); 3444 } 3445 3446 /* Save the DMA address of the last TRB in the TD */ 3447 td->last_trb = ep_ring->enqueue; 3448 3449 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3450 /* If the device sent data, the status stage is an OUT transfer */ 3451 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3452 field = 0; 3453 else 3454 field = TRB_DIR_IN; 3455 queue_trb(xhci, ep_ring, false, 3456 0, 3457 0, 3458 TRB_INTR_TARGET(0), 3459 /* Event on completion */ 3460 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3461 3462 giveback_first_trb(xhci, slot_id, ep_index, 0, 3463 start_cycle, start_trb); 3464 return 0; 3465 } 3466 3467 /* 3468 * The transfer burst count field of the isochronous TRB defines the number of 3469 * bursts that are required to move all packets in this TD. Only SuperSpeed 3470 * devices can burst up to bMaxBurst number of packets per service interval. 3471 * This field is zero based, meaning a value of zero in the field means one 3472 * burst. Basically, for everything but SuperSpeed devices, this field will be 3473 * zero. Only xHCI 1.0 host controllers support this field. 3474 */ 3475 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3476 struct urb *urb, unsigned int total_packet_count) 3477 { 3478 unsigned int max_burst; 3479 3480 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) 3481 return 0; 3482 3483 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3484 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; 3485 } 3486 3487 /* 3488 * Returns the number of packets in the last "burst" of packets. This field is 3489 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3490 * the last burst packet count is equal to the total number of packets in the 3491 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3492 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3493 * contain 1 to (bMaxBurst + 1) packets. 3494 */ 3495 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3496 struct urb *urb, unsigned int total_packet_count) 3497 { 3498 unsigned int max_burst; 3499 unsigned int residue; 3500 3501 if (xhci->hci_version < 0x100) 3502 return 0; 3503 3504 if (urb->dev->speed >= USB_SPEED_SUPER) { 3505 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3506 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3507 residue = total_packet_count % (max_burst + 1); 3508 /* If residue is zero, the last burst contains (max_burst + 1) 3509 * number of packets, but the TLBPC field is zero-based. 3510 */ 3511 if (residue == 0) 3512 return max_burst; 3513 return residue - 1; 3514 } 3515 if (total_packet_count == 0) 3516 return 0; 3517 return total_packet_count - 1; 3518 } 3519 3520 /* 3521 * Calculates Frame ID field of the isochronous TRB identifies the 3522 * target frame that the Interval associated with this Isochronous 3523 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. 3524 * 3525 * Returns actual frame id on success, negative value on error. 3526 */ 3527 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, 3528 struct urb *urb, int index) 3529 { 3530 int start_frame, ist, ret = 0; 3531 int start_frame_id, end_frame_id, current_frame_id; 3532 3533 if (urb->dev->speed == USB_SPEED_LOW || 3534 urb->dev->speed == USB_SPEED_FULL) 3535 start_frame = urb->start_frame + index * urb->interval; 3536 else 3537 start_frame = (urb->start_frame + index * urb->interval) >> 3; 3538 3539 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): 3540 * 3541 * If bit [3] of IST is cleared to '0', software can add a TRB no 3542 * later than IST[2:0] Microframes before that TRB is scheduled to 3543 * be executed. 3544 * If bit [3] of IST is set to '1', software can add a TRB no later 3545 * than IST[2:0] Frames before that TRB is scheduled to be executed. 3546 */ 3547 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3548 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3549 ist <<= 3; 3550 3551 /* Software shall not schedule an Isoch TD with a Frame ID value that 3552 * is less than the Start Frame ID or greater than the End Frame ID, 3553 * where: 3554 * 3555 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 3556 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 3557 * 3558 * Both the End Frame ID and Start Frame ID values are calculated 3559 * in microframes. When software determines the valid Frame ID value; 3560 * The End Frame ID value should be rounded down to the nearest Frame 3561 * boundary, and the Start Frame ID value should be rounded up to the 3562 * nearest Frame boundary. 3563 */ 3564 current_frame_id = readl(&xhci->run_regs->microframe_index); 3565 start_frame_id = roundup(current_frame_id + ist + 1, 8); 3566 end_frame_id = rounddown(current_frame_id + 895 * 8, 8); 3567 3568 start_frame &= 0x7ff; 3569 start_frame_id = (start_frame_id >> 3) & 0x7ff; 3570 end_frame_id = (end_frame_id >> 3) & 0x7ff; 3571 3572 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", 3573 __func__, index, readl(&xhci->run_regs->microframe_index), 3574 start_frame_id, end_frame_id, start_frame); 3575 3576 if (start_frame_id < end_frame_id) { 3577 if (start_frame > end_frame_id || 3578 start_frame < start_frame_id) 3579 ret = -EINVAL; 3580 } else if (start_frame_id > end_frame_id) { 3581 if ((start_frame > end_frame_id && 3582 start_frame < start_frame_id)) 3583 ret = -EINVAL; 3584 } else { 3585 ret = -EINVAL; 3586 } 3587 3588 if (index == 0) { 3589 if (ret == -EINVAL || start_frame == start_frame_id) { 3590 start_frame = start_frame_id + 1; 3591 if (urb->dev->speed == USB_SPEED_LOW || 3592 urb->dev->speed == USB_SPEED_FULL) 3593 urb->start_frame = start_frame; 3594 else 3595 urb->start_frame = start_frame << 3; 3596 ret = 0; 3597 } 3598 } 3599 3600 if (ret) { 3601 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", 3602 start_frame, current_frame_id, index, 3603 start_frame_id, end_frame_id); 3604 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); 3605 return ret; 3606 } 3607 3608 return start_frame; 3609 } 3610 3611 /* This is for isoc transfer */ 3612 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3613 struct urb *urb, int slot_id, unsigned int ep_index) 3614 { 3615 struct xhci_ring *ep_ring; 3616 struct urb_priv *urb_priv; 3617 struct xhci_td *td; 3618 int num_tds, trbs_per_td; 3619 struct xhci_generic_trb *start_trb; 3620 bool first_trb; 3621 int start_cycle; 3622 u32 field, length_field; 3623 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3624 u64 start_addr, addr; 3625 int i, j; 3626 bool more_trbs_coming; 3627 struct xhci_virt_ep *xep; 3628 int frame_id; 3629 3630 xep = &xhci->devs[slot_id]->eps[ep_index]; 3631 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3632 3633 num_tds = urb->number_of_packets; 3634 if (num_tds < 1) { 3635 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3636 return -EINVAL; 3637 } 3638 start_addr = (u64) urb->transfer_dma; 3639 start_trb = &ep_ring->enqueue->generic; 3640 start_cycle = ep_ring->cycle_state; 3641 3642 urb_priv = urb->hcpriv; 3643 /* Queue the TRBs for each TD, even if they are zero-length */ 3644 for (i = 0; i < num_tds; i++) { 3645 unsigned int total_pkt_count, max_pkt; 3646 unsigned int burst_count, last_burst_pkt_count; 3647 u32 sia_frame_id; 3648 3649 first_trb = true; 3650 running_total = 0; 3651 addr = start_addr + urb->iso_frame_desc[i].offset; 3652 td_len = urb->iso_frame_desc[i].length; 3653 td_remain_len = td_len; 3654 max_pkt = usb_endpoint_maxp(&urb->ep->desc); 3655 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); 3656 3657 /* A zero-length transfer still involves at least one packet. */ 3658 if (total_pkt_count == 0) 3659 total_pkt_count++; 3660 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); 3661 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, 3662 urb, total_pkt_count); 3663 3664 trbs_per_td = count_isoc_trbs_needed(urb, i); 3665 3666 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3667 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3668 if (ret < 0) { 3669 if (i == 0) 3670 return ret; 3671 goto cleanup; 3672 } 3673 td = &urb_priv->td[i]; 3674 3675 /* use SIA as default, if frame id is used overwrite it */ 3676 sia_frame_id = TRB_SIA; 3677 if (!(urb->transfer_flags & URB_ISO_ASAP) && 3678 HCC_CFC(xhci->hcc_params)) { 3679 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); 3680 if (frame_id >= 0) 3681 sia_frame_id = TRB_FRAME_ID(frame_id); 3682 } 3683 /* 3684 * Set isoc specific data for the first TRB in a TD. 3685 * Prevent HW from getting the TRBs by keeping the cycle state 3686 * inverted in the first TDs isoc TRB. 3687 */ 3688 field = TRB_TYPE(TRB_ISOC) | 3689 TRB_TLBPC(last_burst_pkt_count) | 3690 sia_frame_id | 3691 (i ? ep_ring->cycle_state : !start_cycle); 3692 3693 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ 3694 if (!xep->use_extended_tbc) 3695 field |= TRB_TBC(burst_count); 3696 3697 /* fill the rest of the TRB fields, and remaining normal TRBs */ 3698 for (j = 0; j < trbs_per_td; j++) { 3699 u32 remainder = 0; 3700 3701 /* only first TRB is isoc, overwrite otherwise */ 3702 if (!first_trb) 3703 field = TRB_TYPE(TRB_NORMAL) | 3704 ep_ring->cycle_state; 3705 3706 /* Only set interrupt on short packet for IN EPs */ 3707 if (usb_urb_dir_in(urb)) 3708 field |= TRB_ISP; 3709 3710 /* Set the chain bit for all except the last TRB */ 3711 if (j < trbs_per_td - 1) { 3712 more_trbs_coming = true; 3713 field |= TRB_CHAIN; 3714 } else { 3715 more_trbs_coming = false; 3716 td->last_trb = ep_ring->enqueue; 3717 field |= TRB_IOC; 3718 /* set BEI, except for the last TD */ 3719 if (xhci->hci_version >= 0x100 && 3720 !(xhci->quirks & XHCI_AVOID_BEI) && 3721 i < num_tds - 1) 3722 field |= TRB_BEI; 3723 } 3724 /* Calculate TRB length */ 3725 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); 3726 if (trb_buff_len > td_remain_len) 3727 trb_buff_len = td_remain_len; 3728 3729 /* Set the TRB length, TD size, & interrupter fields. */ 3730 remainder = xhci_td_remainder(xhci, running_total, 3731 trb_buff_len, td_len, 3732 urb, more_trbs_coming); 3733 3734 length_field = TRB_LEN(trb_buff_len) | 3735 TRB_INTR_TARGET(0); 3736 3737 /* xhci 1.1 with ETE uses TD Size field for TBC */ 3738 if (first_trb && xep->use_extended_tbc) 3739 length_field |= TRB_TD_SIZE_TBC(burst_count); 3740 else 3741 length_field |= TRB_TD_SIZE(remainder); 3742 first_trb = false; 3743 3744 queue_trb(xhci, ep_ring, more_trbs_coming, 3745 lower_32_bits(addr), 3746 upper_32_bits(addr), 3747 length_field, 3748 field); 3749 running_total += trb_buff_len; 3750 3751 addr += trb_buff_len; 3752 td_remain_len -= trb_buff_len; 3753 } 3754 3755 /* Check TD length */ 3756 if (running_total != td_len) { 3757 xhci_err(xhci, "ISOC TD length unmatch\n"); 3758 ret = -EINVAL; 3759 goto cleanup; 3760 } 3761 } 3762 3763 /* store the next frame id */ 3764 if (HCC_CFC(xhci->hcc_params)) 3765 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; 3766 3767 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3768 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3769 usb_amd_quirk_pll_disable(); 3770 } 3771 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3772 3773 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3774 start_cycle, start_trb); 3775 return 0; 3776 cleanup: 3777 /* Clean up a partially enqueued isoc transfer. */ 3778 3779 for (i--; i >= 0; i--) 3780 list_del_init(&urb_priv->td[i].td_list); 3781 3782 /* Use the first TD as a temporary variable to turn the TDs we've queued 3783 * into No-ops with a software-owned cycle bit. That way the hardware 3784 * won't accidentally start executing bogus TDs when we partially 3785 * overwrite them. td->first_trb and td->start_seg are already set. 3786 */ 3787 urb_priv->td[0].last_trb = ep_ring->enqueue; 3788 /* Every TRB except the first & last will have its cycle bit flipped. */ 3789 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); 3790 3791 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3792 ep_ring->enqueue = urb_priv->td[0].first_trb; 3793 ep_ring->enq_seg = urb_priv->td[0].start_seg; 3794 ep_ring->cycle_state = start_cycle; 3795 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; 3796 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3797 return ret; 3798 } 3799 3800 /* 3801 * Check transfer ring to guarantee there is enough room for the urb. 3802 * Update ISO URB start_frame and interval. 3803 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to 3804 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or 3805 * Contiguous Frame ID is not supported by HC. 3806 */ 3807 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3808 struct urb *urb, int slot_id, unsigned int ep_index) 3809 { 3810 struct xhci_virt_device *xdev; 3811 struct xhci_ring *ep_ring; 3812 struct xhci_ep_ctx *ep_ctx; 3813 int start_frame; 3814 int num_tds, num_trbs, i; 3815 int ret; 3816 struct xhci_virt_ep *xep; 3817 int ist; 3818 3819 xdev = xhci->devs[slot_id]; 3820 xep = &xhci->devs[slot_id]->eps[ep_index]; 3821 ep_ring = xdev->eps[ep_index].ring; 3822 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3823 3824 num_trbs = 0; 3825 num_tds = urb->number_of_packets; 3826 for (i = 0; i < num_tds; i++) 3827 num_trbs += count_isoc_trbs_needed(urb, i); 3828 3829 /* Check the ring to guarantee there is enough room for the whole urb. 3830 * Do not insert any td of the urb to the ring if the check failed. 3831 */ 3832 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), 3833 num_trbs, mem_flags); 3834 if (ret) 3835 return ret; 3836 3837 /* 3838 * Check interval value. This should be done before we start to 3839 * calculate the start frame value. 3840 */ 3841 check_interval(xhci, urb, ep_ctx); 3842 3843 /* Calculate the start frame and put it in urb->start_frame. */ 3844 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { 3845 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { 3846 urb->start_frame = xep->next_frame_id; 3847 goto skip_start_over; 3848 } 3849 } 3850 3851 start_frame = readl(&xhci->run_regs->microframe_index); 3852 start_frame &= 0x3fff; 3853 /* 3854 * Round up to the next frame and consider the time before trb really 3855 * gets scheduled by hardare. 3856 */ 3857 ist = HCS_IST(xhci->hcs_params2) & 0x7; 3858 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) 3859 ist <<= 3; 3860 start_frame += ist + XHCI_CFC_DELAY; 3861 start_frame = roundup(start_frame, 8); 3862 3863 /* 3864 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT 3865 * is greate than 8 microframes. 3866 */ 3867 if (urb->dev->speed == USB_SPEED_LOW || 3868 urb->dev->speed == USB_SPEED_FULL) { 3869 start_frame = roundup(start_frame, urb->interval << 3); 3870 urb->start_frame = start_frame >> 3; 3871 } else { 3872 start_frame = roundup(start_frame, urb->interval); 3873 urb->start_frame = start_frame; 3874 } 3875 3876 skip_start_over: 3877 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 3878 3879 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); 3880 } 3881 3882 /**** Command Ring Operations ****/ 3883 3884 /* Generic function for queueing a command TRB on the command ring. 3885 * Check to make sure there's room on the command ring for one command TRB. 3886 * Also check that there's room reserved for commands that must not fail. 3887 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3888 * then only check for the number of reserved spots. 3889 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3890 * because the command event handler may want to resubmit a failed command. 3891 */ 3892 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3893 u32 field1, u32 field2, 3894 u32 field3, u32 field4, bool command_must_succeed) 3895 { 3896 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3897 int ret; 3898 3899 if ((xhci->xhc_state & XHCI_STATE_DYING) || 3900 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3901 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); 3902 return -ESHUTDOWN; 3903 } 3904 3905 if (!command_must_succeed) 3906 reserved_trbs++; 3907 3908 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3909 reserved_trbs, GFP_ATOMIC); 3910 if (ret < 0) { 3911 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3912 if (command_must_succeed) 3913 xhci_err(xhci, "ERR: Reserved TRB counting for " 3914 "unfailable commands failed.\n"); 3915 return ret; 3916 } 3917 3918 cmd->command_trb = xhci->cmd_ring->enqueue; 3919 3920 /* if there are no other commands queued we start the timeout timer */ 3921 if (list_empty(&xhci->cmd_list)) { 3922 xhci->current_cmd = cmd; 3923 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); 3924 } 3925 3926 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); 3927 3928 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3929 field4 | xhci->cmd_ring->cycle_state); 3930 return 0; 3931 } 3932 3933 /* Queue a slot enable or disable request on the command ring */ 3934 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, 3935 u32 trb_type, u32 slot_id) 3936 { 3937 return queue_command(xhci, cmd, 0, 0, 0, 3938 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3939 } 3940 3941 /* Queue an address device command TRB */ 3942 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3943 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) 3944 { 3945 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3946 upper_32_bits(in_ctx_ptr), 0, 3947 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) 3948 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); 3949 } 3950 3951 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3952 u32 field1, u32 field2, u32 field3, u32 field4) 3953 { 3954 return queue_command(xhci, cmd, field1, field2, field3, field4, false); 3955 } 3956 3957 /* Queue a reset device command TRB */ 3958 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3959 u32 slot_id) 3960 { 3961 return queue_command(xhci, cmd, 0, 0, 0, 3962 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3963 false); 3964 } 3965 3966 /* Queue a configure endpoint command TRB */ 3967 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, 3968 struct xhci_command *cmd, dma_addr_t in_ctx_ptr, 3969 u32 slot_id, bool command_must_succeed) 3970 { 3971 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3972 upper_32_bits(in_ctx_ptr), 0, 3973 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3974 command_must_succeed); 3975 } 3976 3977 /* Queue an evaluate context command TRB */ 3978 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, 3979 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) 3980 { 3981 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3982 upper_32_bits(in_ctx_ptr), 0, 3983 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3984 command_must_succeed); 3985 } 3986 3987 /* 3988 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3989 * activity on an endpoint that is about to be suspended. 3990 */ 3991 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, 3992 int slot_id, unsigned int ep_index, int suspend) 3993 { 3994 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3995 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3996 u32 type = TRB_TYPE(TRB_STOP_RING); 3997 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3998 3999 return queue_command(xhci, cmd, 0, 0, 0, 4000 trb_slot_id | trb_ep_index | type | trb_suspend, false); 4001 } 4002 4003 /* Set Transfer Ring Dequeue Pointer command */ 4004 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 4005 unsigned int slot_id, unsigned int ep_index, 4006 struct xhci_dequeue_state *deq_state) 4007 { 4008 dma_addr_t addr; 4009 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 4010 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 4011 u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id); 4012 u32 trb_sct = 0; 4013 u32 type = TRB_TYPE(TRB_SET_DEQ); 4014 struct xhci_virt_ep *ep; 4015 struct xhci_command *cmd; 4016 int ret; 4017 4018 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 4019 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u", 4020 deq_state->new_deq_seg, 4021 (unsigned long long)deq_state->new_deq_seg->dma, 4022 deq_state->new_deq_ptr, 4023 (unsigned long long)xhci_trb_virt_to_dma( 4024 deq_state->new_deq_seg, deq_state->new_deq_ptr), 4025 deq_state->new_cycle_state); 4026 4027 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 4028 deq_state->new_deq_ptr); 4029 if (addr == 0) { 4030 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 4031 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 4032 deq_state->new_deq_seg, deq_state->new_deq_ptr); 4033 return; 4034 } 4035 ep = &xhci->devs[slot_id]->eps[ep_index]; 4036 if ((ep->ep_state & SET_DEQ_PENDING)) { 4037 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 4038 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 4039 return; 4040 } 4041 4042 /* This function gets called from contexts where it cannot sleep */ 4043 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); 4044 if (!cmd) 4045 return; 4046 4047 ep->queued_deq_seg = deq_state->new_deq_seg; 4048 ep->queued_deq_ptr = deq_state->new_deq_ptr; 4049 if (deq_state->stream_id) 4050 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 4051 ret = queue_command(xhci, cmd, 4052 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state, 4053 upper_32_bits(addr), trb_stream_id, 4054 trb_slot_id | trb_ep_index | type, false); 4055 if (ret < 0) { 4056 xhci_free_command(xhci, cmd); 4057 return; 4058 } 4059 4060 /* Stop the TD queueing code from ringing the doorbell until 4061 * this command completes. The HC won't set the dequeue pointer 4062 * if the ring is running, and ringing the doorbell starts the 4063 * ring running. 4064 */ 4065 ep->ep_state |= SET_DEQ_PENDING; 4066 } 4067 4068 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, 4069 int slot_id, unsigned int ep_index, 4070 enum xhci_ep_reset_type reset_type) 4071 { 4072 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 4073 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 4074 u32 type = TRB_TYPE(TRB_RESET_EP); 4075 4076 if (reset_type == EP_SOFT_RESET) 4077 type |= TRB_TSP; 4078 4079 return queue_command(xhci, cmd, 0, 0, 0, 4080 trb_slot_id | trb_ep_index | type, false); 4081 } 4082