1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 72 struct xhci_virt_device *virt_dev, 73 struct xhci_event_cmd *event); 74 75 /* 76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 77 * address of the TRB. 78 */ 79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 80 union xhci_trb *trb) 81 { 82 unsigned long segment_offset; 83 84 if (!seg || !trb || trb < seg->trbs) 85 return 0; 86 /* offset in TRBs */ 87 segment_offset = trb - seg->trbs; 88 if (segment_offset > TRBS_PER_SEGMENT) 89 return 0; 90 return seg->dma + (segment_offset * sizeof(*trb)); 91 } 92 93 /* Does this link TRB point to the first segment in a ring, 94 * or was the previous TRB the last TRB on the last segment in the ERST? 95 */ 96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 97 struct xhci_segment *seg, union xhci_trb *trb) 98 { 99 if (ring == xhci->event_ring) 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 101 (seg->next == xhci->event_ring->first_seg); 102 else 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 104 } 105 106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 107 * segment? I.e. would the updated event TRB pointer step off the end of the 108 * event seg? 109 */ 110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 111 struct xhci_segment *seg, union xhci_trb *trb) 112 { 113 if (ring == xhci->event_ring) 114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 115 else 116 return TRB_TYPE_LINK_LE32(trb->link.control); 117 } 118 119 static int enqueue_is_link_trb(struct xhci_ring *ring) 120 { 121 struct xhci_link_trb *link = &ring->enqueue->link; 122 return TRB_TYPE_LINK_LE32(link->control); 123 } 124 125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 126 * TRB is in a new segment. This does not skip over link TRBs, and it does not 127 * effect the ring dequeue or enqueue pointers. 128 */ 129 static void next_trb(struct xhci_hcd *xhci, 130 struct xhci_ring *ring, 131 struct xhci_segment **seg, 132 union xhci_trb **trb) 133 { 134 if (last_trb(xhci, ring, *seg, *trb)) { 135 *seg = (*seg)->next; 136 *trb = ((*seg)->trbs); 137 } else { 138 (*trb)++; 139 } 140 } 141 142 /* 143 * See Cycle bit rules. SW is the consumer for the event ring only. 144 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 145 */ 146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 147 { 148 unsigned long long addr; 149 150 ring->deq_updates++; 151 152 /* 153 * If this is not event ring, and the dequeue pointer 154 * is not on a link TRB, there is one more usable TRB 155 */ 156 if (ring->type != TYPE_EVENT && 157 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) 158 ring->num_trbs_free++; 159 160 do { 161 /* 162 * Update the dequeue pointer further if that was a link TRB or 163 * we're at the end of an event ring segment (which doesn't have 164 * link TRBS) 165 */ 166 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { 167 if (ring->type == TYPE_EVENT && 168 last_trb_on_last_seg(xhci, ring, 169 ring->deq_seg, ring->dequeue)) { 170 ring->cycle_state = (ring->cycle_state ? 0 : 1); 171 } 172 ring->deq_seg = ring->deq_seg->next; 173 ring->dequeue = ring->deq_seg->trbs; 174 } else { 175 ring->dequeue++; 176 } 177 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); 178 179 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 180 } 181 182 /* 183 * See Cycle bit rules. SW is the consumer for the event ring only. 184 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 185 * 186 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 187 * chain bit is set), then set the chain bit in all the following link TRBs. 188 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 189 * have their chain bit cleared (so that each Link TRB is a separate TD). 190 * 191 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 192 * set, but other sections talk about dealing with the chain bit set. This was 193 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 194 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 195 * 196 * @more_trbs_coming: Will you enqueue more TRBs before calling 197 * prepare_transfer()? 198 */ 199 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 200 bool more_trbs_coming) 201 { 202 u32 chain; 203 union xhci_trb *next; 204 unsigned long long addr; 205 206 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 207 /* If this is not event ring, there is one less usable TRB */ 208 if (ring->type != TYPE_EVENT && 209 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) 210 ring->num_trbs_free--; 211 next = ++(ring->enqueue); 212 213 ring->enq_updates++; 214 /* Update the dequeue pointer further if that was a link TRB or we're at 215 * the end of an event ring segment (which doesn't have link TRBS) 216 */ 217 while (last_trb(xhci, ring, ring->enq_seg, next)) { 218 if (ring->type != TYPE_EVENT) { 219 /* 220 * If the caller doesn't plan on enqueueing more 221 * TDs before ringing the doorbell, then we 222 * don't want to give the link TRB to the 223 * hardware just yet. We'll give the link TRB 224 * back in prepare_ring() just before we enqueue 225 * the TD at the top of the ring. 226 */ 227 if (!chain && !more_trbs_coming) 228 break; 229 230 /* If we're not dealing with 0.95 hardware or 231 * isoc rings on AMD 0.96 host, 232 * carry over the chain bit of the previous TRB 233 * (which may mean the chain bit is cleared). 234 */ 235 if (!(ring->type == TYPE_ISOC && 236 (xhci->quirks & XHCI_AMD_0x96_HOST)) 237 && !xhci_link_trb_quirk(xhci)) { 238 next->link.control &= 239 cpu_to_le32(~TRB_CHAIN); 240 next->link.control |= 241 cpu_to_le32(chain); 242 } 243 /* Give this link TRB to the hardware */ 244 wmb(); 245 next->link.control ^= cpu_to_le32(TRB_CYCLE); 246 247 /* Toggle the cycle bit after the last ring segment. */ 248 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 249 ring->cycle_state = (ring->cycle_state ? 0 : 1); 250 } 251 } 252 ring->enq_seg = ring->enq_seg->next; 253 ring->enqueue = ring->enq_seg->trbs; 254 next = ring->enqueue; 255 } 256 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 257 } 258 259 /* 260 * Check to see if there's room to enqueue num_trbs on the ring and make sure 261 * enqueue pointer will not advance into dequeue segment. See rules above. 262 */ 263 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 264 unsigned int num_trbs) 265 { 266 int num_trbs_in_deq_seg; 267 268 if (ring->num_trbs_free < num_trbs) 269 return 0; 270 271 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { 272 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; 273 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) 274 return 0; 275 } 276 277 return 1; 278 } 279 280 /* Ring the host controller doorbell after placing a command on the ring */ 281 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 282 { 283 xhci_dbg(xhci, "// Ding dong!\n"); 284 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); 285 /* Flush PCI posted writes */ 286 xhci_readl(xhci, &xhci->dba->doorbell[0]); 287 } 288 289 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 290 unsigned int slot_id, 291 unsigned int ep_index, 292 unsigned int stream_id) 293 { 294 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 295 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 296 unsigned int ep_state = ep->ep_state; 297 298 /* Don't ring the doorbell for this endpoint if there are pending 299 * cancellations because we don't want to interrupt processing. 300 * We don't want to restart any stream rings if there's a set dequeue 301 * pointer command pending because the device can choose to start any 302 * stream once the endpoint is on the HW schedule. 303 * FIXME - check all the stream rings for pending cancellations. 304 */ 305 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 306 (ep_state & EP_HALTED)) 307 return; 308 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); 309 /* The CPU has better things to do at this point than wait for a 310 * write-posting flush. It'll get there soon enough. 311 */ 312 } 313 314 /* Ring the doorbell for any rings with pending URBs */ 315 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 316 unsigned int slot_id, 317 unsigned int ep_index) 318 { 319 unsigned int stream_id; 320 struct xhci_virt_ep *ep; 321 322 ep = &xhci->devs[slot_id]->eps[ep_index]; 323 324 /* A ring has pending URBs if its TD list is not empty */ 325 if (!(ep->ep_state & EP_HAS_STREAMS)) { 326 if (!(list_empty(&ep->ring->td_list))) 327 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 328 return; 329 } 330 331 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 332 stream_id++) { 333 struct xhci_stream_info *stream_info = ep->stream_info; 334 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 335 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 336 stream_id); 337 } 338 } 339 340 /* 341 * Find the segment that trb is in. Start searching in start_seg. 342 * If we must move past a segment that has a link TRB with a toggle cycle state 343 * bit set, then we will toggle the value pointed at by cycle_state. 344 */ 345 static struct xhci_segment *find_trb_seg( 346 struct xhci_segment *start_seg, 347 union xhci_trb *trb, int *cycle_state) 348 { 349 struct xhci_segment *cur_seg = start_seg; 350 struct xhci_generic_trb *generic_trb; 351 352 while (cur_seg->trbs > trb || 353 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 354 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 355 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) 356 *cycle_state ^= 0x1; 357 cur_seg = cur_seg->next; 358 if (cur_seg == start_seg) 359 /* Looped over the entire list. Oops! */ 360 return NULL; 361 } 362 return cur_seg; 363 } 364 365 366 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 367 unsigned int slot_id, unsigned int ep_index, 368 unsigned int stream_id) 369 { 370 struct xhci_virt_ep *ep; 371 372 ep = &xhci->devs[slot_id]->eps[ep_index]; 373 /* Common case: no streams */ 374 if (!(ep->ep_state & EP_HAS_STREAMS)) 375 return ep->ring; 376 377 if (stream_id == 0) { 378 xhci_warn(xhci, 379 "WARN: Slot ID %u, ep index %u has streams, " 380 "but URB has no stream ID.\n", 381 slot_id, ep_index); 382 return NULL; 383 } 384 385 if (stream_id < ep->stream_info->num_streams) 386 return ep->stream_info->stream_rings[stream_id]; 387 388 xhci_warn(xhci, 389 "WARN: Slot ID %u, ep index %u has " 390 "stream IDs 1 to %u allocated, " 391 "but stream ID %u is requested.\n", 392 slot_id, ep_index, 393 ep->stream_info->num_streams - 1, 394 stream_id); 395 return NULL; 396 } 397 398 /* Get the right ring for the given URB. 399 * If the endpoint supports streams, boundary check the URB's stream ID. 400 * If the endpoint doesn't support streams, return the singular endpoint ring. 401 */ 402 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 403 struct urb *urb) 404 { 405 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 406 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 407 } 408 409 /* 410 * Move the xHC's endpoint ring dequeue pointer past cur_td. 411 * Record the new state of the xHC's endpoint ring dequeue segment, 412 * dequeue pointer, and new consumer cycle state in state. 413 * Update our internal representation of the ring's dequeue pointer. 414 * 415 * We do this in three jumps: 416 * - First we update our new ring state to be the same as when the xHC stopped. 417 * - Then we traverse the ring to find the segment that contains 418 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 419 * any link TRBs with the toggle cycle bit set. 420 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 421 * if we've moved it past a link TRB with the toggle cycle bit set. 422 * 423 * Some of the uses of xhci_generic_trb are grotty, but if they're done 424 * with correct __le32 accesses they should work fine. Only users of this are 425 * in here. 426 */ 427 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 428 unsigned int slot_id, unsigned int ep_index, 429 unsigned int stream_id, struct xhci_td *cur_td, 430 struct xhci_dequeue_state *state) 431 { 432 struct xhci_virt_device *dev = xhci->devs[slot_id]; 433 struct xhci_ring *ep_ring; 434 struct xhci_generic_trb *trb; 435 struct xhci_ep_ctx *ep_ctx; 436 dma_addr_t addr; 437 438 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 439 ep_index, stream_id); 440 if (!ep_ring) { 441 xhci_warn(xhci, "WARN can't find new dequeue state " 442 "for invalid stream ID %u.\n", 443 stream_id); 444 return; 445 } 446 state->new_cycle_state = 0; 447 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 448 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 449 dev->eps[ep_index].stopped_trb, 450 &state->new_cycle_state); 451 if (!state->new_deq_seg) { 452 WARN_ON(1); 453 return; 454 } 455 456 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 457 xhci_dbg(xhci, "Finding endpoint context\n"); 458 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 459 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 460 461 state->new_deq_ptr = cur_td->last_trb; 462 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 463 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 464 state->new_deq_ptr, 465 &state->new_cycle_state); 466 if (!state->new_deq_seg) { 467 WARN_ON(1); 468 return; 469 } 470 471 trb = &state->new_deq_ptr->generic; 472 if (TRB_TYPE_LINK_LE32(trb->field[3]) && 473 (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 474 state->new_cycle_state ^= 0x1; 475 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 476 477 /* 478 * If there is only one segment in a ring, find_trb_seg()'s while loop 479 * will not run, and it will return before it has a chance to see if it 480 * needs to toggle the cycle bit. It can't tell if the stalled transfer 481 * ended just before the link TRB on a one-segment ring, or if the TD 482 * wrapped around the top of the ring, because it doesn't have the TD in 483 * question. Look for the one-segment case where stalled TRB's address 484 * is greater than the new dequeue pointer address. 485 */ 486 if (ep_ring->first_seg == ep_ring->first_seg->next && 487 state->new_deq_ptr < dev->eps[ep_index].stopped_trb) 488 state->new_cycle_state ^= 0x1; 489 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); 490 491 /* Don't update the ring cycle state for the producer (us). */ 492 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 493 state->new_deq_seg); 494 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 495 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 496 (unsigned long long) addr); 497 } 498 499 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 500 * (The last TRB actually points to the ring enqueue pointer, which is not part 501 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 502 */ 503 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 504 struct xhci_td *cur_td, bool flip_cycle) 505 { 506 struct xhci_segment *cur_seg; 507 union xhci_trb *cur_trb; 508 509 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 510 true; 511 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 512 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { 513 /* Unchain any chained Link TRBs, but 514 * leave the pointers intact. 515 */ 516 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 517 /* Flip the cycle bit (link TRBs can't be the first 518 * or last TRB). 519 */ 520 if (flip_cycle) 521 cur_trb->generic.field[3] ^= 522 cpu_to_le32(TRB_CYCLE); 523 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 524 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 525 "in seg %p (0x%llx dma)\n", 526 cur_trb, 527 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 528 cur_seg, 529 (unsigned long long)cur_seg->dma); 530 } else { 531 cur_trb->generic.field[0] = 0; 532 cur_trb->generic.field[1] = 0; 533 cur_trb->generic.field[2] = 0; 534 /* Preserve only the cycle bit of this TRB */ 535 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 536 /* Flip the cycle bit except on the first or last TRB */ 537 if (flip_cycle && cur_trb != cur_td->first_trb && 538 cur_trb != cur_td->last_trb) 539 cur_trb->generic.field[3] ^= 540 cpu_to_le32(TRB_CYCLE); 541 cur_trb->generic.field[3] |= cpu_to_le32( 542 TRB_TYPE(TRB_TR_NOOP)); 543 xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", 544 (unsigned long long) 545 xhci_trb_virt_to_dma(cur_seg, cur_trb)); 546 } 547 if (cur_trb == cur_td->last_trb) 548 break; 549 } 550 } 551 552 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 553 unsigned int ep_index, unsigned int stream_id, 554 struct xhci_segment *deq_seg, 555 union xhci_trb *deq_ptr, u32 cycle_state); 556 557 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 558 unsigned int slot_id, unsigned int ep_index, 559 unsigned int stream_id, 560 struct xhci_dequeue_state *deq_state) 561 { 562 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 563 564 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 565 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 566 deq_state->new_deq_seg, 567 (unsigned long long)deq_state->new_deq_seg->dma, 568 deq_state->new_deq_ptr, 569 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 570 deq_state->new_cycle_state); 571 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 572 deq_state->new_deq_seg, 573 deq_state->new_deq_ptr, 574 (u32) deq_state->new_cycle_state); 575 /* Stop the TD queueing code from ringing the doorbell until 576 * this command completes. The HC won't set the dequeue pointer 577 * if the ring is running, and ringing the doorbell starts the 578 * ring running. 579 */ 580 ep->ep_state |= SET_DEQ_PENDING; 581 } 582 583 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 584 struct xhci_virt_ep *ep) 585 { 586 ep->ep_state &= ~EP_HALT_PENDING; 587 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 588 * timer is running on another CPU, we don't decrement stop_cmds_pending 589 * (since we didn't successfully stop the watchdog timer). 590 */ 591 if (del_timer(&ep->stop_cmd_timer)) 592 ep->stop_cmds_pending--; 593 } 594 595 /* Must be called with xhci->lock held in interrupt context */ 596 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 597 struct xhci_td *cur_td, int status, char *adjective) 598 { 599 struct usb_hcd *hcd; 600 struct urb *urb; 601 struct urb_priv *urb_priv; 602 603 urb = cur_td->urb; 604 urb_priv = urb->hcpriv; 605 urb_priv->td_cnt++; 606 hcd = bus_to_hcd(urb->dev->bus); 607 608 /* Only giveback urb when this is the last td in urb */ 609 if (urb_priv->td_cnt == urb_priv->length) { 610 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 611 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 612 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 613 if (xhci->quirks & XHCI_AMD_PLL_FIX) 614 usb_amd_quirk_pll_enable(); 615 } 616 } 617 usb_hcd_unlink_urb_from_ep(hcd, urb); 618 619 spin_unlock(&xhci->lock); 620 usb_hcd_giveback_urb(hcd, urb, status); 621 xhci_urb_free_priv(xhci, urb_priv); 622 spin_lock(&xhci->lock); 623 } 624 } 625 626 /* 627 * When we get a command completion for a Stop Endpoint Command, we need to 628 * unlink any cancelled TDs from the ring. There are two ways to do that: 629 * 630 * 1. If the HW was in the middle of processing the TD that needs to be 631 * cancelled, then we must move the ring's dequeue pointer past the last TRB 632 * in the TD with a Set Dequeue Pointer Command. 633 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 634 * bit cleared) so that the HW will skip over them. 635 */ 636 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 637 union xhci_trb *trb, struct xhci_event_cmd *event) 638 { 639 unsigned int slot_id; 640 unsigned int ep_index; 641 struct xhci_virt_device *virt_dev; 642 struct xhci_ring *ep_ring; 643 struct xhci_virt_ep *ep; 644 struct list_head *entry; 645 struct xhci_td *cur_td = NULL; 646 struct xhci_td *last_unlinked_td; 647 648 struct xhci_dequeue_state deq_state; 649 650 if (unlikely(TRB_TO_SUSPEND_PORT( 651 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { 652 slot_id = TRB_TO_SLOT_ID( 653 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 654 virt_dev = xhci->devs[slot_id]; 655 if (virt_dev) 656 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 657 event); 658 else 659 xhci_warn(xhci, "Stop endpoint command " 660 "completion for disabled slot %u\n", 661 slot_id); 662 return; 663 } 664 665 memset(&deq_state, 0, sizeof(deq_state)); 666 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 667 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 668 ep = &xhci->devs[slot_id]->eps[ep_index]; 669 670 if (list_empty(&ep->cancelled_td_list)) { 671 xhci_stop_watchdog_timer_in_irq(xhci, ep); 672 ep->stopped_td = NULL; 673 ep->stopped_trb = NULL; 674 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 675 return; 676 } 677 678 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 679 * We have the xHCI lock, so nothing can modify this list until we drop 680 * it. We're also in the event handler, so we can't get re-interrupted 681 * if another Stop Endpoint command completes 682 */ 683 list_for_each(entry, &ep->cancelled_td_list) { 684 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 685 xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", 686 (unsigned long long)xhci_trb_virt_to_dma( 687 cur_td->start_seg, cur_td->first_trb)); 688 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 689 if (!ep_ring) { 690 /* This shouldn't happen unless a driver is mucking 691 * with the stream ID after submission. This will 692 * leave the TD on the hardware ring, and the hardware 693 * will try to execute it, and may access a buffer 694 * that has already been freed. In the best case, the 695 * hardware will execute it, and the event handler will 696 * ignore the completion event for that TD, since it was 697 * removed from the td_list for that endpoint. In 698 * short, don't muck with the stream ID after 699 * submission. 700 */ 701 xhci_warn(xhci, "WARN Cancelled URB %p " 702 "has invalid stream ID %u.\n", 703 cur_td->urb, 704 cur_td->urb->stream_id); 705 goto remove_finished_td; 706 } 707 /* 708 * If we stopped on the TD we need to cancel, then we have to 709 * move the xHC endpoint ring dequeue pointer past this TD. 710 */ 711 if (cur_td == ep->stopped_td) 712 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 713 cur_td->urb->stream_id, 714 cur_td, &deq_state); 715 else 716 td_to_noop(xhci, ep_ring, cur_td, false); 717 remove_finished_td: 718 /* 719 * The event handler won't see a completion for this TD anymore, 720 * so remove it from the endpoint ring's TD list. Keep it in 721 * the cancelled TD list for URB completion later. 722 */ 723 list_del_init(&cur_td->td_list); 724 } 725 last_unlinked_td = cur_td; 726 xhci_stop_watchdog_timer_in_irq(xhci, ep); 727 728 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 729 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 730 xhci_queue_new_dequeue_state(xhci, 731 slot_id, ep_index, 732 ep->stopped_td->urb->stream_id, 733 &deq_state); 734 xhci_ring_cmd_db(xhci); 735 } else { 736 /* Otherwise ring the doorbell(s) to restart queued transfers */ 737 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 738 } 739 ep->stopped_td = NULL; 740 ep->stopped_trb = NULL; 741 742 /* 743 * Drop the lock and complete the URBs in the cancelled TD list. 744 * New TDs to be cancelled might be added to the end of the list before 745 * we can complete all the URBs for the TDs we already unlinked. 746 * So stop when we've completed the URB for the last TD we unlinked. 747 */ 748 do { 749 cur_td = list_entry(ep->cancelled_td_list.next, 750 struct xhci_td, cancelled_td_list); 751 list_del_init(&cur_td->cancelled_td_list); 752 753 /* Clean up the cancelled URB */ 754 /* Doesn't matter what we pass for status, since the core will 755 * just overwrite it (because the URB has been unlinked). 756 */ 757 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); 758 759 /* Stop processing the cancelled list if the watchdog timer is 760 * running. 761 */ 762 if (xhci->xhc_state & XHCI_STATE_DYING) 763 return; 764 } while (cur_td != last_unlinked_td); 765 766 /* Return to the event handler with xhci->lock re-acquired */ 767 } 768 769 /* Watchdog timer function for when a stop endpoint command fails to complete. 770 * In this case, we assume the host controller is broken or dying or dead. The 771 * host may still be completing some other events, so we have to be careful to 772 * let the event ring handler and the URB dequeueing/enqueueing functions know 773 * through xhci->state. 774 * 775 * The timer may also fire if the host takes a very long time to respond to the 776 * command, and the stop endpoint command completion handler cannot delete the 777 * timer before the timer function is called. Another endpoint cancellation may 778 * sneak in before the timer function can grab the lock, and that may queue 779 * another stop endpoint command and add the timer back. So we cannot use a 780 * simple flag to say whether there is a pending stop endpoint command for a 781 * particular endpoint. 782 * 783 * Instead we use a combination of that flag and a counter for the number of 784 * pending stop endpoint commands. If the timer is the tail end of the last 785 * stop endpoint command, and the endpoint's command is still pending, we assume 786 * the host is dying. 787 */ 788 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 789 { 790 struct xhci_hcd *xhci; 791 struct xhci_virt_ep *ep; 792 struct xhci_virt_ep *temp_ep; 793 struct xhci_ring *ring; 794 struct xhci_td *cur_td; 795 int ret, i, j; 796 unsigned long flags; 797 798 ep = (struct xhci_virt_ep *) arg; 799 xhci = ep->xhci; 800 801 spin_lock_irqsave(&xhci->lock, flags); 802 803 ep->stop_cmds_pending--; 804 if (xhci->xhc_state & XHCI_STATE_DYING) { 805 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 806 "xHCI as DYING, exiting.\n"); 807 spin_unlock_irqrestore(&xhci->lock, flags); 808 return; 809 } 810 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 811 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 812 "exiting.\n"); 813 spin_unlock_irqrestore(&xhci->lock, flags); 814 return; 815 } 816 817 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 818 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 819 /* Oops, HC is dead or dying or at least not responding to the stop 820 * endpoint command. 821 */ 822 xhci->xhc_state |= XHCI_STATE_DYING; 823 /* Disable interrupts from the host controller and start halting it */ 824 xhci_quiesce(xhci); 825 spin_unlock_irqrestore(&xhci->lock, flags); 826 827 ret = xhci_halt(xhci); 828 829 spin_lock_irqsave(&xhci->lock, flags); 830 if (ret < 0) { 831 /* This is bad; the host is not responding to commands and it's 832 * not allowing itself to be halted. At least interrupts are 833 * disabled. If we call usb_hc_died(), it will attempt to 834 * disconnect all device drivers under this host. Those 835 * disconnect() methods will wait for all URBs to be unlinked, 836 * so we must complete them. 837 */ 838 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 839 xhci_warn(xhci, "Completing active URBs anyway.\n"); 840 /* We could turn all TDs on the rings to no-ops. This won't 841 * help if the host has cached part of the ring, and is slow if 842 * we want to preserve the cycle bit. Skip it and hope the host 843 * doesn't touch the memory. 844 */ 845 } 846 for (i = 0; i < MAX_HC_SLOTS; i++) { 847 if (!xhci->devs[i]) 848 continue; 849 for (j = 0; j < 31; j++) { 850 temp_ep = &xhci->devs[i]->eps[j]; 851 ring = temp_ep->ring; 852 if (!ring) 853 continue; 854 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 855 "ep index %u\n", i, j); 856 while (!list_empty(&ring->td_list)) { 857 cur_td = list_first_entry(&ring->td_list, 858 struct xhci_td, 859 td_list); 860 list_del_init(&cur_td->td_list); 861 if (!list_empty(&cur_td->cancelled_td_list)) 862 list_del_init(&cur_td->cancelled_td_list); 863 xhci_giveback_urb_in_irq(xhci, cur_td, 864 -ESHUTDOWN, "killed"); 865 } 866 while (!list_empty(&temp_ep->cancelled_td_list)) { 867 cur_td = list_first_entry( 868 &temp_ep->cancelled_td_list, 869 struct xhci_td, 870 cancelled_td_list); 871 list_del_init(&cur_td->cancelled_td_list); 872 xhci_giveback_urb_in_irq(xhci, cur_td, 873 -ESHUTDOWN, "killed"); 874 } 875 } 876 } 877 spin_unlock_irqrestore(&xhci->lock, flags); 878 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 879 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 880 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 881 } 882 883 884 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, 885 struct xhci_virt_device *dev, 886 struct xhci_ring *ep_ring, 887 unsigned int ep_index) 888 { 889 union xhci_trb *dequeue_temp; 890 int num_trbs_free_temp; 891 bool revert = false; 892 893 num_trbs_free_temp = ep_ring->num_trbs_free; 894 dequeue_temp = ep_ring->dequeue; 895 896 /* If we get two back-to-back stalls, and the first stalled transfer 897 * ends just before a link TRB, the dequeue pointer will be left on 898 * the link TRB by the code in the while loop. So we have to update 899 * the dequeue pointer one segment further, or we'll jump off 900 * the segment into la-la-land. 901 */ 902 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { 903 ep_ring->deq_seg = ep_ring->deq_seg->next; 904 ep_ring->dequeue = ep_ring->deq_seg->trbs; 905 } 906 907 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { 908 /* We have more usable TRBs */ 909 ep_ring->num_trbs_free++; 910 ep_ring->dequeue++; 911 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, 912 ep_ring->dequeue)) { 913 if (ep_ring->dequeue == 914 dev->eps[ep_index].queued_deq_ptr) 915 break; 916 ep_ring->deq_seg = ep_ring->deq_seg->next; 917 ep_ring->dequeue = ep_ring->deq_seg->trbs; 918 } 919 if (ep_ring->dequeue == dequeue_temp) { 920 revert = true; 921 break; 922 } 923 } 924 925 if (revert) { 926 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); 927 ep_ring->num_trbs_free = num_trbs_free_temp; 928 } 929 } 930 931 /* 932 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 933 * we need to clear the set deq pending flag in the endpoint ring state, so that 934 * the TD queueing code can ring the doorbell again. We also need to ring the 935 * endpoint doorbell to restart the ring, but only if there aren't more 936 * cancellations pending. 937 */ 938 static void handle_set_deq_completion(struct xhci_hcd *xhci, 939 struct xhci_event_cmd *event, 940 union xhci_trb *trb) 941 { 942 unsigned int slot_id; 943 unsigned int ep_index; 944 unsigned int stream_id; 945 struct xhci_ring *ep_ring; 946 struct xhci_virt_device *dev; 947 struct xhci_ep_ctx *ep_ctx; 948 struct xhci_slot_ctx *slot_ctx; 949 950 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 951 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 952 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 953 dev = xhci->devs[slot_id]; 954 955 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 956 if (!ep_ring) { 957 xhci_warn(xhci, "WARN Set TR deq ptr command for " 958 "freed stream ID %u\n", 959 stream_id); 960 /* XXX: Harmless??? */ 961 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 962 return; 963 } 964 965 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 966 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 967 968 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { 969 unsigned int ep_state; 970 unsigned int slot_state; 971 972 switch (GET_COMP_CODE(le32_to_cpu(event->status))) { 973 case COMP_TRB_ERR: 974 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 975 "of stream ID configuration\n"); 976 break; 977 case COMP_CTX_STATE: 978 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 979 "to incorrect slot or ep state.\n"); 980 ep_state = le32_to_cpu(ep_ctx->ep_info); 981 ep_state &= EP_STATE_MASK; 982 slot_state = le32_to_cpu(slot_ctx->dev_state); 983 slot_state = GET_SLOT_STATE(slot_state); 984 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 985 slot_state, ep_state); 986 break; 987 case COMP_EBADSLT: 988 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 989 "slot %u was not enabled.\n", slot_id); 990 break; 991 default: 992 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 993 "completion code of %u.\n", 994 GET_COMP_CODE(le32_to_cpu(event->status))); 995 break; 996 } 997 /* OK what do we do now? The endpoint state is hosed, and we 998 * should never get to this point if the synchronization between 999 * queueing, and endpoint state are correct. This might happen 1000 * if the device gets disconnected after we've finished 1001 * cancelling URBs, which might not be an error... 1002 */ 1003 } else { 1004 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 1005 le64_to_cpu(ep_ctx->deq)); 1006 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 1007 dev->eps[ep_index].queued_deq_ptr) == 1008 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) { 1009 /* Update the ring's dequeue segment and dequeue pointer 1010 * to reflect the new position. 1011 */ 1012 update_ring_for_set_deq_completion(xhci, dev, 1013 ep_ring, ep_index); 1014 } else { 1015 xhci_warn(xhci, "Mismatch between completed Set TR Deq " 1016 "Ptr command & xHCI internal state.\n"); 1017 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 1018 dev->eps[ep_index].queued_deq_seg, 1019 dev->eps[ep_index].queued_deq_ptr); 1020 } 1021 } 1022 1023 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1024 dev->eps[ep_index].queued_deq_seg = NULL; 1025 dev->eps[ep_index].queued_deq_ptr = NULL; 1026 /* Restart any rings with pending URBs */ 1027 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1028 } 1029 1030 static void handle_reset_ep_completion(struct xhci_hcd *xhci, 1031 struct xhci_event_cmd *event, 1032 union xhci_trb *trb) 1033 { 1034 int slot_id; 1035 unsigned int ep_index; 1036 1037 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 1038 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1039 /* This command will only fail if the endpoint wasn't halted, 1040 * but we don't care. 1041 */ 1042 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1043 GET_COMP_CODE(le32_to_cpu(event->status))); 1044 1045 /* HW with the reset endpoint quirk needs to have a configure endpoint 1046 * command complete before the endpoint can be used. Queue that here 1047 * because the HW can't handle two commands being queued in a row. 1048 */ 1049 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1050 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 1051 xhci_queue_configure_endpoint(xhci, 1052 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1053 false); 1054 xhci_ring_cmd_db(xhci); 1055 } else { 1056 /* Clear our internal halted state and restart the ring(s) */ 1057 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1058 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1059 } 1060 } 1061 1062 /* Check to see if a command in the device's command queue matches this one. 1063 * Signal the completion or free the command, and return 1. Return 0 if the 1064 * completed command isn't at the head of the command list. 1065 */ 1066 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 1067 struct xhci_virt_device *virt_dev, 1068 struct xhci_event_cmd *event) 1069 { 1070 struct xhci_command *command; 1071 1072 if (list_empty(&virt_dev->cmd_list)) 1073 return 0; 1074 1075 command = list_entry(virt_dev->cmd_list.next, 1076 struct xhci_command, cmd_list); 1077 if (xhci->cmd_ring->dequeue != command->command_trb) 1078 return 0; 1079 1080 command->status = GET_COMP_CODE(le32_to_cpu(event->status)); 1081 list_del(&command->cmd_list); 1082 if (command->completion) 1083 complete(command->completion); 1084 else 1085 xhci_free_command(xhci, command); 1086 return 1; 1087 } 1088 1089 static void handle_cmd_completion(struct xhci_hcd *xhci, 1090 struct xhci_event_cmd *event) 1091 { 1092 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1093 u64 cmd_dma; 1094 dma_addr_t cmd_dequeue_dma; 1095 struct xhci_input_control_ctx *ctrl_ctx; 1096 struct xhci_virt_device *virt_dev; 1097 unsigned int ep_index; 1098 struct xhci_ring *ep_ring; 1099 unsigned int ep_state; 1100 1101 cmd_dma = le64_to_cpu(event->cmd_trb); 1102 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1103 xhci->cmd_ring->dequeue); 1104 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1105 if (cmd_dequeue_dma == 0) { 1106 xhci->error_bitmask |= 1 << 4; 1107 return; 1108 } 1109 /* Does the DMA address match our internal dequeue pointer address? */ 1110 if (cmd_dma != (u64) cmd_dequeue_dma) { 1111 xhci->error_bitmask |= 1 << 5; 1112 return; 1113 } 1114 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1115 & TRB_TYPE_BITMASK) { 1116 case TRB_TYPE(TRB_ENABLE_SLOT): 1117 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) 1118 xhci->slot_id = slot_id; 1119 else 1120 xhci->slot_id = 0; 1121 complete(&xhci->addr_dev); 1122 break; 1123 case TRB_TYPE(TRB_DISABLE_SLOT): 1124 if (xhci->devs[slot_id]) { 1125 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1126 /* Delete default control endpoint resources */ 1127 xhci_free_device_endpoint_resources(xhci, 1128 xhci->devs[slot_id], true); 1129 xhci_free_virt_device(xhci, slot_id); 1130 } 1131 break; 1132 case TRB_TYPE(TRB_CONFIG_EP): 1133 virt_dev = xhci->devs[slot_id]; 1134 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1135 break; 1136 /* 1137 * Configure endpoint commands can come from the USB core 1138 * configuration or alt setting changes, or because the HW 1139 * needed an extra configure endpoint command after a reset 1140 * endpoint command or streams were being configured. 1141 * If the command was for a halted endpoint, the xHCI driver 1142 * is not waiting on the configure endpoint command. 1143 */ 1144 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1145 virt_dev->in_ctx); 1146 /* Input ctx add_flags are the endpoint index plus one */ 1147 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; 1148 /* A usb_set_interface() call directly after clearing a halted 1149 * condition may race on this quirky hardware. Not worth 1150 * worrying about, since this is prototype hardware. Not sure 1151 * if this will work for streams, but streams support was 1152 * untested on this prototype. 1153 */ 1154 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1155 ep_index != (unsigned int) -1 && 1156 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == 1157 le32_to_cpu(ctrl_ctx->drop_flags)) { 1158 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1159 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1160 if (!(ep_state & EP_HALTED)) 1161 goto bandwidth_change; 1162 xhci_dbg(xhci, "Completed config ep cmd - " 1163 "last ep index = %d, state = %d\n", 1164 ep_index, ep_state); 1165 /* Clear internal halted state and restart ring(s) */ 1166 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1167 ~EP_HALTED; 1168 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1169 break; 1170 } 1171 bandwidth_change: 1172 xhci_dbg(xhci, "Completed config ep cmd\n"); 1173 xhci->devs[slot_id]->cmd_status = 1174 GET_COMP_CODE(le32_to_cpu(event->status)); 1175 complete(&xhci->devs[slot_id]->cmd_completion); 1176 break; 1177 case TRB_TYPE(TRB_EVAL_CONTEXT): 1178 virt_dev = xhci->devs[slot_id]; 1179 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1180 break; 1181 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1182 complete(&xhci->devs[slot_id]->cmd_completion); 1183 break; 1184 case TRB_TYPE(TRB_ADDR_DEV): 1185 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1186 complete(&xhci->addr_dev); 1187 break; 1188 case TRB_TYPE(TRB_STOP_RING): 1189 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); 1190 break; 1191 case TRB_TYPE(TRB_SET_DEQ): 1192 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 1193 break; 1194 case TRB_TYPE(TRB_CMD_NOOP): 1195 break; 1196 case TRB_TYPE(TRB_RESET_EP): 1197 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 1198 break; 1199 case TRB_TYPE(TRB_RESET_DEV): 1200 xhci_dbg(xhci, "Completed reset device command.\n"); 1201 slot_id = TRB_TO_SLOT_ID( 1202 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 1203 virt_dev = xhci->devs[slot_id]; 1204 if (virt_dev) 1205 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1206 else 1207 xhci_warn(xhci, "Reset device command completion " 1208 "for disabled slot %u\n", slot_id); 1209 break; 1210 case TRB_TYPE(TRB_NEC_GET_FW): 1211 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1212 xhci->error_bitmask |= 1 << 6; 1213 break; 1214 } 1215 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1216 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1217 NEC_FW_MINOR(le32_to_cpu(event->status))); 1218 break; 1219 default: 1220 /* Skip over unknown commands on the event ring */ 1221 xhci->error_bitmask |= 1 << 6; 1222 break; 1223 } 1224 inc_deq(xhci, xhci->cmd_ring); 1225 } 1226 1227 static void handle_vendor_event(struct xhci_hcd *xhci, 1228 union xhci_trb *event) 1229 { 1230 u32 trb_type; 1231 1232 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1233 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1234 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1235 handle_cmd_completion(xhci, &event->event_cmd); 1236 } 1237 1238 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1239 * port registers -- USB 3.0 and USB 2.0). 1240 * 1241 * Returns a zero-based port number, which is suitable for indexing into each of 1242 * the split roothubs' port arrays and bus state arrays. 1243 * Add one to it in order to call xhci_find_slot_id_by_port. 1244 */ 1245 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1246 struct xhci_hcd *xhci, u32 port_id) 1247 { 1248 unsigned int i; 1249 unsigned int num_similar_speed_ports = 0; 1250 1251 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1252 * and usb2_ports are 0-based indexes. Count the number of similar 1253 * speed ports, up to 1 port before this port. 1254 */ 1255 for (i = 0; i < (port_id - 1); i++) { 1256 u8 port_speed = xhci->port_array[i]; 1257 1258 /* 1259 * Skip ports that don't have known speeds, or have duplicate 1260 * Extended Capabilities port speed entries. 1261 */ 1262 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1263 continue; 1264 1265 /* 1266 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1267 * 1.1 ports are under the USB 2.0 hub. If the port speed 1268 * matches the device speed, it's a similar speed port. 1269 */ 1270 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) 1271 num_similar_speed_ports++; 1272 } 1273 return num_similar_speed_ports; 1274 } 1275 1276 static void handle_device_notification(struct xhci_hcd *xhci, 1277 union xhci_trb *event) 1278 { 1279 u32 slot_id; 1280 struct usb_device *udev; 1281 1282 slot_id = TRB_TO_SLOT_ID(event->generic.field[3]); 1283 if (!xhci->devs[slot_id]) { 1284 xhci_warn(xhci, "Device Notification event for " 1285 "unused slot %u\n", slot_id); 1286 return; 1287 } 1288 1289 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", 1290 slot_id); 1291 udev = xhci->devs[slot_id]->udev; 1292 if (udev && udev->parent) 1293 usb_wakeup_notification(udev->parent, udev->portnum); 1294 } 1295 1296 static void handle_port_status(struct xhci_hcd *xhci, 1297 union xhci_trb *event) 1298 { 1299 struct usb_hcd *hcd; 1300 u32 port_id; 1301 u32 temp, temp1; 1302 int max_ports; 1303 int slot_id; 1304 unsigned int faked_port_index; 1305 u8 major_revision; 1306 struct xhci_bus_state *bus_state; 1307 __le32 __iomem **port_array; 1308 bool bogus_port_status = false; 1309 1310 /* Port status change events always have a successful completion code */ 1311 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { 1312 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1313 xhci->error_bitmask |= 1 << 8; 1314 } 1315 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1316 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1317 1318 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1319 if ((port_id <= 0) || (port_id > max_ports)) { 1320 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1321 bogus_port_status = true; 1322 goto cleanup; 1323 } 1324 1325 /* Figure out which usb_hcd this port is attached to: 1326 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1327 */ 1328 major_revision = xhci->port_array[port_id - 1]; 1329 if (major_revision == 0) { 1330 xhci_warn(xhci, "Event for port %u not in " 1331 "Extended Capabilities, ignoring.\n", 1332 port_id); 1333 bogus_port_status = true; 1334 goto cleanup; 1335 } 1336 if (major_revision == DUPLICATE_ENTRY) { 1337 xhci_warn(xhci, "Event for port %u duplicated in" 1338 "Extended Capabilities, ignoring.\n", 1339 port_id); 1340 bogus_port_status = true; 1341 goto cleanup; 1342 } 1343 1344 /* 1345 * Hardware port IDs reported by a Port Status Change Event include USB 1346 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1347 * resume event, but we first need to translate the hardware port ID 1348 * into the index into the ports on the correct split roothub, and the 1349 * correct bus_state structure. 1350 */ 1351 /* Find the right roothub. */ 1352 hcd = xhci_to_hcd(xhci); 1353 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) 1354 hcd = xhci->shared_hcd; 1355 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1356 if (hcd->speed == HCD_USB3) 1357 port_array = xhci->usb3_ports; 1358 else 1359 port_array = xhci->usb2_ports; 1360 /* Find the faked port hub number */ 1361 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1362 port_id); 1363 1364 temp = xhci_readl(xhci, port_array[faked_port_index]); 1365 if (hcd->state == HC_STATE_SUSPENDED) { 1366 xhci_dbg(xhci, "resume root hub\n"); 1367 usb_hcd_resume_root_hub(hcd); 1368 } 1369 1370 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1371 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1372 1373 temp1 = xhci_readl(xhci, &xhci->op_regs->command); 1374 if (!(temp1 & CMD_RUN)) { 1375 xhci_warn(xhci, "xHC is not running.\n"); 1376 goto cleanup; 1377 } 1378 1379 if (DEV_SUPERSPEED(temp)) { 1380 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); 1381 /* Set a flag to say the port signaled remote wakeup, 1382 * so we can tell the difference between the end of 1383 * device and host initiated resume. 1384 */ 1385 bus_state->port_remote_wakeup |= 1 << faked_port_index; 1386 xhci_test_and_clear_bit(xhci, port_array, 1387 faked_port_index, PORT_PLC); 1388 xhci_set_link_state(xhci, port_array, faked_port_index, 1389 XDEV_U0); 1390 /* Need to wait until the next link state change 1391 * indicates the device is actually in U0. 1392 */ 1393 bogus_port_status = true; 1394 goto cleanup; 1395 } else { 1396 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1397 bus_state->resume_done[faked_port_index] = jiffies + 1398 msecs_to_jiffies(20); 1399 set_bit(faked_port_index, &bus_state->resuming_ports); 1400 mod_timer(&hcd->rh_timer, 1401 bus_state->resume_done[faked_port_index]); 1402 /* Do the rest in GetPortStatus */ 1403 } 1404 } 1405 1406 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && 1407 DEV_SUPERSPEED(temp)) { 1408 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1409 /* We've just brought the device into U0 through either the 1410 * Resume state after a device remote wakeup, or through the 1411 * U3Exit state after a host-initiated resume. If it's a device 1412 * initiated remote wake, don't pass up the link state change, 1413 * so the roothub behavior is consistent with external 1414 * USB 3.0 hub behavior. 1415 */ 1416 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1417 faked_port_index + 1); 1418 if (slot_id && xhci->devs[slot_id]) 1419 xhci_ring_device(xhci, slot_id); 1420 if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { 1421 bus_state->port_remote_wakeup &= 1422 ~(1 << faked_port_index); 1423 xhci_test_and_clear_bit(xhci, port_array, 1424 faked_port_index, PORT_PLC); 1425 usb_wakeup_notification(hcd->self.root_hub, 1426 faked_port_index + 1); 1427 bogus_port_status = true; 1428 goto cleanup; 1429 } 1430 } 1431 1432 if (hcd->speed != HCD_USB3) 1433 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1434 PORT_PLC); 1435 1436 cleanup: 1437 /* Update event ring dequeue pointer before dropping the lock */ 1438 inc_deq(xhci, xhci->event_ring); 1439 1440 /* Don't make the USB core poll the roothub if we got a bad port status 1441 * change event. Besides, at that point we can't tell which roothub 1442 * (USB 2.0 or USB 3.0) to kick. 1443 */ 1444 if (bogus_port_status) 1445 return; 1446 1447 spin_unlock(&xhci->lock); 1448 /* Pass this up to the core */ 1449 usb_hcd_poll_rh_status(hcd); 1450 spin_lock(&xhci->lock); 1451 } 1452 1453 /* 1454 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1455 * at end_trb, which may be in another segment. If the suspect DMA address is a 1456 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1457 * returns 0. 1458 */ 1459 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1460 union xhci_trb *start_trb, 1461 union xhci_trb *end_trb, 1462 dma_addr_t suspect_dma) 1463 { 1464 dma_addr_t start_dma; 1465 dma_addr_t end_seg_dma; 1466 dma_addr_t end_trb_dma; 1467 struct xhci_segment *cur_seg; 1468 1469 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1470 cur_seg = start_seg; 1471 1472 do { 1473 if (start_dma == 0) 1474 return NULL; 1475 /* We may get an event for a Link TRB in the middle of a TD */ 1476 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1477 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1478 /* If the end TRB isn't in this segment, this is set to 0 */ 1479 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1480 1481 if (end_trb_dma > 0) { 1482 /* The end TRB is in this segment, so suspect should be here */ 1483 if (start_dma <= end_trb_dma) { 1484 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1485 return cur_seg; 1486 } else { 1487 /* Case for one segment with 1488 * a TD wrapped around to the top 1489 */ 1490 if ((suspect_dma >= start_dma && 1491 suspect_dma <= end_seg_dma) || 1492 (suspect_dma >= cur_seg->dma && 1493 suspect_dma <= end_trb_dma)) 1494 return cur_seg; 1495 } 1496 return NULL; 1497 } else { 1498 /* Might still be somewhere in this segment */ 1499 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1500 return cur_seg; 1501 } 1502 cur_seg = cur_seg->next; 1503 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1504 } while (cur_seg != start_seg); 1505 1506 return NULL; 1507 } 1508 1509 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1510 unsigned int slot_id, unsigned int ep_index, 1511 unsigned int stream_id, 1512 struct xhci_td *td, union xhci_trb *event_trb) 1513 { 1514 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1515 ep->ep_state |= EP_HALTED; 1516 ep->stopped_td = td; 1517 ep->stopped_trb = event_trb; 1518 ep->stopped_stream = stream_id; 1519 1520 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1521 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1522 1523 ep->stopped_td = NULL; 1524 ep->stopped_trb = NULL; 1525 ep->stopped_stream = 0; 1526 1527 xhci_ring_cmd_db(xhci); 1528 } 1529 1530 /* Check if an error has halted the endpoint ring. The class driver will 1531 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1532 * However, a babble and other errors also halt the endpoint ring, and the class 1533 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1534 * Ring Dequeue Pointer command manually. 1535 */ 1536 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1537 struct xhci_ep_ctx *ep_ctx, 1538 unsigned int trb_comp_code) 1539 { 1540 /* TRB completion codes that may require a manual halt cleanup */ 1541 if (trb_comp_code == COMP_TX_ERR || 1542 trb_comp_code == COMP_BABBLE || 1543 trb_comp_code == COMP_SPLIT_ERR) 1544 /* The 0.96 spec says a babbling control endpoint 1545 * is not halted. The 0.96 spec says it is. Some HW 1546 * claims to be 0.95 compliant, but it halts the control 1547 * endpoint anyway. Check if a babble halted the 1548 * endpoint. 1549 */ 1550 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1551 cpu_to_le32(EP_STATE_HALTED)) 1552 return 1; 1553 1554 return 0; 1555 } 1556 1557 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1558 { 1559 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1560 /* Vendor defined "informational" completion code, 1561 * treat as not-an-error. 1562 */ 1563 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1564 trb_comp_code); 1565 xhci_dbg(xhci, "Treating code as success.\n"); 1566 return 1; 1567 } 1568 return 0; 1569 } 1570 1571 /* 1572 * Finish the td processing, remove the td from td list; 1573 * Return 1 if the urb can be given back. 1574 */ 1575 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1576 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1577 struct xhci_virt_ep *ep, int *status, bool skip) 1578 { 1579 struct xhci_virt_device *xdev; 1580 struct xhci_ring *ep_ring; 1581 unsigned int slot_id; 1582 int ep_index; 1583 struct urb *urb = NULL; 1584 struct xhci_ep_ctx *ep_ctx; 1585 int ret = 0; 1586 struct urb_priv *urb_priv; 1587 u32 trb_comp_code; 1588 1589 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1590 xdev = xhci->devs[slot_id]; 1591 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1592 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1593 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1594 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1595 1596 if (skip) 1597 goto td_cleanup; 1598 1599 if (trb_comp_code == COMP_STOP_INVAL || 1600 trb_comp_code == COMP_STOP) { 1601 /* The Endpoint Stop Command completion will take care of any 1602 * stopped TDs. A stopped TD may be restarted, so don't update 1603 * the ring dequeue pointer or take this TD off any lists yet. 1604 */ 1605 ep->stopped_td = td; 1606 ep->stopped_trb = event_trb; 1607 return 0; 1608 } else { 1609 if (trb_comp_code == COMP_STALL) { 1610 /* The transfer is completed from the driver's 1611 * perspective, but we need to issue a set dequeue 1612 * command for this stalled endpoint to move the dequeue 1613 * pointer past the TD. We can't do that here because 1614 * the halt condition must be cleared first. Let the 1615 * USB class driver clear the stall later. 1616 */ 1617 ep->stopped_td = td; 1618 ep->stopped_trb = event_trb; 1619 ep->stopped_stream = ep_ring->stream_id; 1620 } else if (xhci_requires_manual_halt_cleanup(xhci, 1621 ep_ctx, trb_comp_code)) { 1622 /* Other types of errors halt the endpoint, but the 1623 * class driver doesn't call usb_reset_endpoint() unless 1624 * the error is -EPIPE. Clear the halted status in the 1625 * xHCI hardware manually. 1626 */ 1627 xhci_cleanup_halted_endpoint(xhci, 1628 slot_id, ep_index, ep_ring->stream_id, 1629 td, event_trb); 1630 } else { 1631 /* Update ring dequeue pointer */ 1632 while (ep_ring->dequeue != td->last_trb) 1633 inc_deq(xhci, ep_ring); 1634 inc_deq(xhci, ep_ring); 1635 } 1636 1637 td_cleanup: 1638 /* Clean up the endpoint's TD list */ 1639 urb = td->urb; 1640 urb_priv = urb->hcpriv; 1641 1642 /* Do one last check of the actual transfer length. 1643 * If the host controller said we transferred more data than 1644 * the buffer length, urb->actual_length will be a very big 1645 * number (since it's unsigned). Play it safe and say we didn't 1646 * transfer anything. 1647 */ 1648 if (urb->actual_length > urb->transfer_buffer_length) { 1649 xhci_warn(xhci, "URB transfer length is wrong, " 1650 "xHC issue? req. len = %u, " 1651 "act. len = %u\n", 1652 urb->transfer_buffer_length, 1653 urb->actual_length); 1654 urb->actual_length = 0; 1655 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1656 *status = -EREMOTEIO; 1657 else 1658 *status = 0; 1659 } 1660 list_del_init(&td->td_list); 1661 /* Was this TD slated to be cancelled but completed anyway? */ 1662 if (!list_empty(&td->cancelled_td_list)) 1663 list_del_init(&td->cancelled_td_list); 1664 1665 urb_priv->td_cnt++; 1666 /* Giveback the urb when all the tds are completed */ 1667 if (urb_priv->td_cnt == urb_priv->length) { 1668 ret = 1; 1669 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 1670 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 1671 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs 1672 == 0) { 1673 if (xhci->quirks & XHCI_AMD_PLL_FIX) 1674 usb_amd_quirk_pll_enable(); 1675 } 1676 } 1677 } 1678 } 1679 1680 return ret; 1681 } 1682 1683 /* 1684 * Process control tds, update urb status and actual_length. 1685 */ 1686 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1687 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1688 struct xhci_virt_ep *ep, int *status) 1689 { 1690 struct xhci_virt_device *xdev; 1691 struct xhci_ring *ep_ring; 1692 unsigned int slot_id; 1693 int ep_index; 1694 struct xhci_ep_ctx *ep_ctx; 1695 u32 trb_comp_code; 1696 1697 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1698 xdev = xhci->devs[slot_id]; 1699 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1700 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1701 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1702 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1703 1704 switch (trb_comp_code) { 1705 case COMP_SUCCESS: 1706 if (event_trb == ep_ring->dequeue) { 1707 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1708 "without IOC set??\n"); 1709 *status = -ESHUTDOWN; 1710 } else if (event_trb != td->last_trb) { 1711 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1712 "without IOC set??\n"); 1713 *status = -ESHUTDOWN; 1714 } else { 1715 *status = 0; 1716 } 1717 break; 1718 case COMP_SHORT_TX: 1719 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1720 *status = -EREMOTEIO; 1721 else 1722 *status = 0; 1723 break; 1724 case COMP_STOP_INVAL: 1725 case COMP_STOP: 1726 return finish_td(xhci, td, event_trb, event, ep, status, false); 1727 default: 1728 if (!xhci_requires_manual_halt_cleanup(xhci, 1729 ep_ctx, trb_comp_code)) 1730 break; 1731 xhci_dbg(xhci, "TRB error code %u, " 1732 "halted endpoint index = %u\n", 1733 trb_comp_code, ep_index); 1734 /* else fall through */ 1735 case COMP_STALL: 1736 /* Did we transfer part of the data (middle) phase? */ 1737 if (event_trb != ep_ring->dequeue && 1738 event_trb != td->last_trb) 1739 td->urb->actual_length = 1740 td->urb->transfer_buffer_length 1741 - TRB_LEN(le32_to_cpu(event->transfer_len)); 1742 else 1743 td->urb->actual_length = 0; 1744 1745 xhci_cleanup_halted_endpoint(xhci, 1746 slot_id, ep_index, 0, td, event_trb); 1747 return finish_td(xhci, td, event_trb, event, ep, status, true); 1748 } 1749 /* 1750 * Did we transfer any data, despite the errors that might have 1751 * happened? I.e. did we get past the setup stage? 1752 */ 1753 if (event_trb != ep_ring->dequeue) { 1754 /* The event was for the status stage */ 1755 if (event_trb == td->last_trb) { 1756 if (td->urb->actual_length != 0) { 1757 /* Don't overwrite a previously set error code 1758 */ 1759 if ((*status == -EINPROGRESS || *status == 0) && 1760 (td->urb->transfer_flags 1761 & URB_SHORT_NOT_OK)) 1762 /* Did we already see a short data 1763 * stage? */ 1764 *status = -EREMOTEIO; 1765 } else { 1766 td->urb->actual_length = 1767 td->urb->transfer_buffer_length; 1768 } 1769 } else { 1770 /* Maybe the event was for the data stage? */ 1771 td->urb->actual_length = 1772 td->urb->transfer_buffer_length - 1773 TRB_LEN(le32_to_cpu(event->transfer_len)); 1774 xhci_dbg(xhci, "Waiting for status " 1775 "stage event\n"); 1776 return 0; 1777 } 1778 } 1779 1780 return finish_td(xhci, td, event_trb, event, ep, status, false); 1781 } 1782 1783 /* 1784 * Process isochronous tds, update urb packet status and actual_length. 1785 */ 1786 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1787 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1788 struct xhci_virt_ep *ep, int *status) 1789 { 1790 struct xhci_ring *ep_ring; 1791 struct urb_priv *urb_priv; 1792 int idx; 1793 int len = 0; 1794 union xhci_trb *cur_trb; 1795 struct xhci_segment *cur_seg; 1796 struct usb_iso_packet_descriptor *frame; 1797 u32 trb_comp_code; 1798 bool skip_td = false; 1799 1800 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1801 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1802 urb_priv = td->urb->hcpriv; 1803 idx = urb_priv->td_cnt; 1804 frame = &td->urb->iso_frame_desc[idx]; 1805 1806 /* handle completion code */ 1807 switch (trb_comp_code) { 1808 case COMP_SUCCESS: 1809 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { 1810 frame->status = 0; 1811 break; 1812 } 1813 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 1814 trb_comp_code = COMP_SHORT_TX; 1815 case COMP_SHORT_TX: 1816 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 1817 -EREMOTEIO : 0; 1818 break; 1819 case COMP_BW_OVER: 1820 frame->status = -ECOMM; 1821 skip_td = true; 1822 break; 1823 case COMP_BUFF_OVER: 1824 case COMP_BABBLE: 1825 frame->status = -EOVERFLOW; 1826 skip_td = true; 1827 break; 1828 case COMP_DEV_ERR: 1829 case COMP_STALL: 1830 case COMP_TX_ERR: 1831 frame->status = -EPROTO; 1832 skip_td = true; 1833 break; 1834 case COMP_STOP: 1835 case COMP_STOP_INVAL: 1836 break; 1837 default: 1838 frame->status = -1; 1839 break; 1840 } 1841 1842 if (trb_comp_code == COMP_SUCCESS || skip_td) { 1843 frame->actual_length = frame->length; 1844 td->urb->actual_length += frame->length; 1845 } else { 1846 for (cur_trb = ep_ring->dequeue, 1847 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1848 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1849 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1850 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1851 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1852 } 1853 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1854 TRB_LEN(le32_to_cpu(event->transfer_len)); 1855 1856 if (trb_comp_code != COMP_STOP_INVAL) { 1857 frame->actual_length = len; 1858 td->urb->actual_length += len; 1859 } 1860 } 1861 1862 return finish_td(xhci, td, event_trb, event, ep, status, false); 1863 } 1864 1865 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1866 struct xhci_transfer_event *event, 1867 struct xhci_virt_ep *ep, int *status) 1868 { 1869 struct xhci_ring *ep_ring; 1870 struct urb_priv *urb_priv; 1871 struct usb_iso_packet_descriptor *frame; 1872 int idx; 1873 1874 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1875 urb_priv = td->urb->hcpriv; 1876 idx = urb_priv->td_cnt; 1877 frame = &td->urb->iso_frame_desc[idx]; 1878 1879 /* The transfer is partly done. */ 1880 frame->status = -EXDEV; 1881 1882 /* calc actual length */ 1883 frame->actual_length = 0; 1884 1885 /* Update ring dequeue pointer */ 1886 while (ep_ring->dequeue != td->last_trb) 1887 inc_deq(xhci, ep_ring); 1888 inc_deq(xhci, ep_ring); 1889 1890 return finish_td(xhci, td, NULL, event, ep, status, true); 1891 } 1892 1893 /* 1894 * Process bulk and interrupt tds, update urb status and actual_length. 1895 */ 1896 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 1897 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1898 struct xhci_virt_ep *ep, int *status) 1899 { 1900 struct xhci_ring *ep_ring; 1901 union xhci_trb *cur_trb; 1902 struct xhci_segment *cur_seg; 1903 u32 trb_comp_code; 1904 1905 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1906 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1907 1908 switch (trb_comp_code) { 1909 case COMP_SUCCESS: 1910 /* Double check that the HW transferred everything. */ 1911 if (event_trb != td->last_trb || 1912 TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1913 xhci_warn(xhci, "WARN Successful completion " 1914 "on short TX\n"); 1915 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1916 *status = -EREMOTEIO; 1917 else 1918 *status = 0; 1919 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 1920 trb_comp_code = COMP_SHORT_TX; 1921 } else { 1922 *status = 0; 1923 } 1924 break; 1925 case COMP_SHORT_TX: 1926 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1927 *status = -EREMOTEIO; 1928 else 1929 *status = 0; 1930 break; 1931 default: 1932 /* Others already handled above */ 1933 break; 1934 } 1935 if (trb_comp_code == COMP_SHORT_TX) 1936 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1937 "%d bytes untransferred\n", 1938 td->urb->ep->desc.bEndpointAddress, 1939 td->urb->transfer_buffer_length, 1940 TRB_LEN(le32_to_cpu(event->transfer_len))); 1941 /* Fast path - was this the last TRB in the TD for this URB? */ 1942 if (event_trb == td->last_trb) { 1943 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1944 td->urb->actual_length = 1945 td->urb->transfer_buffer_length - 1946 TRB_LEN(le32_to_cpu(event->transfer_len)); 1947 if (td->urb->transfer_buffer_length < 1948 td->urb->actual_length) { 1949 xhci_warn(xhci, "HC gave bad length " 1950 "of %d bytes left\n", 1951 TRB_LEN(le32_to_cpu(event->transfer_len))); 1952 td->urb->actual_length = 0; 1953 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1954 *status = -EREMOTEIO; 1955 else 1956 *status = 0; 1957 } 1958 /* Don't overwrite a previously set error code */ 1959 if (*status == -EINPROGRESS) { 1960 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1961 *status = -EREMOTEIO; 1962 else 1963 *status = 0; 1964 } 1965 } else { 1966 td->urb->actual_length = 1967 td->urb->transfer_buffer_length; 1968 /* Ignore a short packet completion if the 1969 * untransferred length was zero. 1970 */ 1971 if (*status == -EREMOTEIO) 1972 *status = 0; 1973 } 1974 } else { 1975 /* Slow path - walk the list, starting from the dequeue 1976 * pointer, to get the actual length transferred. 1977 */ 1978 td->urb->actual_length = 0; 1979 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1980 cur_trb != event_trb; 1981 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1982 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1983 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1984 td->urb->actual_length += 1985 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1986 } 1987 /* If the ring didn't stop on a Link or No-op TRB, add 1988 * in the actual bytes transferred from the Normal TRB 1989 */ 1990 if (trb_comp_code != COMP_STOP_INVAL) 1991 td->urb->actual_length += 1992 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1993 TRB_LEN(le32_to_cpu(event->transfer_len)); 1994 } 1995 1996 return finish_td(xhci, td, event_trb, event, ep, status, false); 1997 } 1998 1999 /* 2000 * If this function returns an error condition, it means it got a Transfer 2001 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 2002 * At this point, the host controller is probably hosed and should be reset. 2003 */ 2004 static int handle_tx_event(struct xhci_hcd *xhci, 2005 struct xhci_transfer_event *event) 2006 { 2007 struct xhci_virt_device *xdev; 2008 struct xhci_virt_ep *ep; 2009 struct xhci_ring *ep_ring; 2010 unsigned int slot_id; 2011 int ep_index; 2012 struct xhci_td *td = NULL; 2013 dma_addr_t event_dma; 2014 struct xhci_segment *event_seg; 2015 union xhci_trb *event_trb; 2016 struct urb *urb = NULL; 2017 int status = -EINPROGRESS; 2018 struct urb_priv *urb_priv; 2019 struct xhci_ep_ctx *ep_ctx; 2020 struct list_head *tmp; 2021 u32 trb_comp_code; 2022 int ret = 0; 2023 int td_num = 0; 2024 2025 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2026 xdev = xhci->devs[slot_id]; 2027 if (!xdev) { 2028 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 2029 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2030 (unsigned long long) xhci_trb_virt_to_dma( 2031 xhci->event_ring->deq_seg, 2032 xhci->event_ring->dequeue), 2033 lower_32_bits(le64_to_cpu(event->buffer)), 2034 upper_32_bits(le64_to_cpu(event->buffer)), 2035 le32_to_cpu(event->transfer_len), 2036 le32_to_cpu(event->flags)); 2037 xhci_dbg(xhci, "Event ring:\n"); 2038 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2039 return -ENODEV; 2040 } 2041 2042 /* Endpoint ID is 1 based, our index is zero based */ 2043 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2044 ep = &xdev->eps[ep_index]; 2045 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2046 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2047 if (!ep_ring || 2048 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 2049 EP_STATE_DISABLED) { 2050 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 2051 "or incorrect stream ring\n"); 2052 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2053 (unsigned long long) xhci_trb_virt_to_dma( 2054 xhci->event_ring->deq_seg, 2055 xhci->event_ring->dequeue), 2056 lower_32_bits(le64_to_cpu(event->buffer)), 2057 upper_32_bits(le64_to_cpu(event->buffer)), 2058 le32_to_cpu(event->transfer_len), 2059 le32_to_cpu(event->flags)); 2060 xhci_dbg(xhci, "Event ring:\n"); 2061 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2062 return -ENODEV; 2063 } 2064 2065 /* Count current td numbers if ep->skip is set */ 2066 if (ep->skip) { 2067 list_for_each(tmp, &ep_ring->td_list) 2068 td_num++; 2069 } 2070 2071 event_dma = le64_to_cpu(event->buffer); 2072 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2073 /* Look for common error cases */ 2074 switch (trb_comp_code) { 2075 /* Skip codes that require special handling depending on 2076 * transfer type 2077 */ 2078 case COMP_SUCCESS: 2079 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2080 break; 2081 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2082 trb_comp_code = COMP_SHORT_TX; 2083 else 2084 xhci_warn_ratelimited(xhci, 2085 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); 2086 case COMP_SHORT_TX: 2087 break; 2088 case COMP_STOP: 2089 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 2090 break; 2091 case COMP_STOP_INVAL: 2092 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 2093 break; 2094 case COMP_STALL: 2095 xhci_dbg(xhci, "Stalled endpoint\n"); 2096 ep->ep_state |= EP_HALTED; 2097 status = -EPIPE; 2098 break; 2099 case COMP_TRB_ERR: 2100 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 2101 status = -EILSEQ; 2102 break; 2103 case COMP_SPLIT_ERR: 2104 case COMP_TX_ERR: 2105 xhci_dbg(xhci, "Transfer error on endpoint\n"); 2106 status = -EPROTO; 2107 break; 2108 case COMP_BABBLE: 2109 xhci_dbg(xhci, "Babble error on endpoint\n"); 2110 status = -EOVERFLOW; 2111 break; 2112 case COMP_DB_ERR: 2113 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 2114 status = -ENOSR; 2115 break; 2116 case COMP_BW_OVER: 2117 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 2118 break; 2119 case COMP_BUFF_OVER: 2120 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 2121 break; 2122 case COMP_UNDERRUN: 2123 /* 2124 * When the Isoch ring is empty, the xHC will generate 2125 * a Ring Overrun Event for IN Isoch endpoint or Ring 2126 * Underrun Event for OUT Isoch endpoint. 2127 */ 2128 xhci_dbg(xhci, "underrun event on endpoint\n"); 2129 if (!list_empty(&ep_ring->td_list)) 2130 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2131 "still with TDs queued?\n", 2132 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2133 ep_index); 2134 goto cleanup; 2135 case COMP_OVERRUN: 2136 xhci_dbg(xhci, "overrun event on endpoint\n"); 2137 if (!list_empty(&ep_ring->td_list)) 2138 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2139 "still with TDs queued?\n", 2140 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2141 ep_index); 2142 goto cleanup; 2143 case COMP_DEV_ERR: 2144 xhci_warn(xhci, "WARN: detect an incompatible device"); 2145 status = -EPROTO; 2146 break; 2147 case COMP_MISSED_INT: 2148 /* 2149 * When encounter missed service error, one or more isoc tds 2150 * may be missed by xHC. 2151 * Set skip flag of the ep_ring; Complete the missed tds as 2152 * short transfer when process the ep_ring next time. 2153 */ 2154 ep->skip = true; 2155 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2156 goto cleanup; 2157 default: 2158 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2159 status = 0; 2160 break; 2161 } 2162 xhci_warn(xhci, "ERROR Unknown event condition, HC probably " 2163 "busted\n"); 2164 goto cleanup; 2165 } 2166 2167 do { 2168 /* This TRB should be in the TD at the head of this ring's 2169 * TD list. 2170 */ 2171 if (list_empty(&ep_ring->td_list)) { 2172 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2173 "with no TDs queued?\n", 2174 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2175 ep_index); 2176 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2177 (le32_to_cpu(event->flags) & 2178 TRB_TYPE_BITMASK)>>10); 2179 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2180 if (ep->skip) { 2181 ep->skip = false; 2182 xhci_dbg(xhci, "td_list is empty while skip " 2183 "flag set. Clear skip flag.\n"); 2184 } 2185 ret = 0; 2186 goto cleanup; 2187 } 2188 2189 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2190 if (ep->skip && td_num == 0) { 2191 ep->skip = false; 2192 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2193 "Clear skip flag.\n"); 2194 ret = 0; 2195 goto cleanup; 2196 } 2197 2198 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2199 if (ep->skip) 2200 td_num--; 2201 2202 /* Is this a TRB in the currently executing TD? */ 2203 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2204 td->last_trb, event_dma); 2205 2206 /* 2207 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2208 * is not in the current TD pointed by ep_ring->dequeue because 2209 * that the hardware dequeue pointer still at the previous TRB 2210 * of the current TD. The previous TRB maybe a Link TD or the 2211 * last TRB of the previous TD. The command completion handle 2212 * will take care the rest. 2213 */ 2214 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { 2215 ret = 0; 2216 goto cleanup; 2217 } 2218 2219 if (!event_seg) { 2220 if (!ep->skip || 2221 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2222 /* Some host controllers give a spurious 2223 * successful event after a short transfer. 2224 * Ignore it. 2225 */ 2226 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2227 ep_ring->last_td_was_short) { 2228 ep_ring->last_td_was_short = false; 2229 ret = 0; 2230 goto cleanup; 2231 } 2232 /* HC is busted, give up! */ 2233 xhci_err(xhci, 2234 "ERROR Transfer event TRB DMA ptr not " 2235 "part of current TD\n"); 2236 return -ESHUTDOWN; 2237 } 2238 2239 ret = skip_isoc_td(xhci, td, event, ep, &status); 2240 goto cleanup; 2241 } 2242 if (trb_comp_code == COMP_SHORT_TX) 2243 ep_ring->last_td_was_short = true; 2244 else 2245 ep_ring->last_td_was_short = false; 2246 2247 if (ep->skip) { 2248 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2249 ep->skip = false; 2250 } 2251 2252 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / 2253 sizeof(*event_trb)]; 2254 /* 2255 * No-op TRB should not trigger interrupts. 2256 * If event_trb is a no-op TRB, it means the 2257 * corresponding TD has been cancelled. Just ignore 2258 * the TD. 2259 */ 2260 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { 2261 xhci_dbg(xhci, 2262 "event_trb is a no-op TRB. Skip it\n"); 2263 goto cleanup; 2264 } 2265 2266 /* Now update the urb's actual_length and give back to 2267 * the core 2268 */ 2269 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2270 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 2271 &status); 2272 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2273 ret = process_isoc_td(xhci, td, event_trb, event, ep, 2274 &status); 2275 else 2276 ret = process_bulk_intr_td(xhci, td, event_trb, event, 2277 ep, &status); 2278 2279 cleanup: 2280 /* 2281 * Do not update event ring dequeue pointer if ep->skip is set. 2282 * Will roll back to continue process missed tds. 2283 */ 2284 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2285 inc_deq(xhci, xhci->event_ring); 2286 } 2287 2288 if (ret) { 2289 urb = td->urb; 2290 urb_priv = urb->hcpriv; 2291 /* Leave the TD around for the reset endpoint function 2292 * to use(but only if it's not a control endpoint, 2293 * since we already queued the Set TR dequeue pointer 2294 * command for stalled control endpoints). 2295 */ 2296 if (usb_endpoint_xfer_control(&urb->ep->desc) || 2297 (trb_comp_code != COMP_STALL && 2298 trb_comp_code != COMP_BABBLE)) 2299 xhci_urb_free_priv(xhci, urb_priv); 2300 2301 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2302 if ((urb->actual_length != urb->transfer_buffer_length && 2303 (urb->transfer_flags & 2304 URB_SHORT_NOT_OK)) || 2305 (status != 0 && 2306 !usb_endpoint_xfer_isoc(&urb->ep->desc))) 2307 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2308 "expected = %d, status = %d\n", 2309 urb, urb->actual_length, 2310 urb->transfer_buffer_length, 2311 status); 2312 spin_unlock(&xhci->lock); 2313 /* EHCI, UHCI, and OHCI always unconditionally set the 2314 * urb->status of an isochronous endpoint to 0. 2315 */ 2316 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2317 status = 0; 2318 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2319 spin_lock(&xhci->lock); 2320 } 2321 2322 /* 2323 * If ep->skip is set, it means there are missed tds on the 2324 * endpoint ring need to take care of. 2325 * Process them as short transfer until reach the td pointed by 2326 * the event. 2327 */ 2328 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2329 2330 return 0; 2331 } 2332 2333 /* 2334 * This function handles all OS-owned events on the event ring. It may drop 2335 * xhci->lock between event processing (e.g. to pass up port status changes). 2336 * Returns >0 for "possibly more events to process" (caller should call again), 2337 * otherwise 0 if done. In future, <0 returns should indicate error code. 2338 */ 2339 static int xhci_handle_event(struct xhci_hcd *xhci) 2340 { 2341 union xhci_trb *event; 2342 int update_ptrs = 1; 2343 int ret; 2344 2345 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2346 xhci->error_bitmask |= 1 << 1; 2347 return 0; 2348 } 2349 2350 event = xhci->event_ring->dequeue; 2351 /* Does the HC or OS own the TRB? */ 2352 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2353 xhci->event_ring->cycle_state) { 2354 xhci->error_bitmask |= 1 << 2; 2355 return 0; 2356 } 2357 2358 /* 2359 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2360 * speculative reads of the event's flags/data below. 2361 */ 2362 rmb(); 2363 /* FIXME: Handle more event types. */ 2364 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2365 case TRB_TYPE(TRB_COMPLETION): 2366 handle_cmd_completion(xhci, &event->event_cmd); 2367 break; 2368 case TRB_TYPE(TRB_PORT_STATUS): 2369 handle_port_status(xhci, event); 2370 update_ptrs = 0; 2371 break; 2372 case TRB_TYPE(TRB_TRANSFER): 2373 ret = handle_tx_event(xhci, &event->trans_event); 2374 if (ret < 0) 2375 xhci->error_bitmask |= 1 << 9; 2376 else 2377 update_ptrs = 0; 2378 break; 2379 case TRB_TYPE(TRB_DEV_NOTE): 2380 handle_device_notification(xhci, event); 2381 break; 2382 default: 2383 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2384 TRB_TYPE(48)) 2385 handle_vendor_event(xhci, event); 2386 else 2387 xhci->error_bitmask |= 1 << 3; 2388 } 2389 /* Any of the above functions may drop and re-acquire the lock, so check 2390 * to make sure a watchdog timer didn't mark the host as non-responsive. 2391 */ 2392 if (xhci->xhc_state & XHCI_STATE_DYING) { 2393 xhci_dbg(xhci, "xHCI host dying, returning from " 2394 "event handler.\n"); 2395 return 0; 2396 } 2397 2398 if (update_ptrs) 2399 /* Update SW event ring dequeue pointer */ 2400 inc_deq(xhci, xhci->event_ring); 2401 2402 /* Are there more items on the event ring? Caller will call us again to 2403 * check. 2404 */ 2405 return 1; 2406 } 2407 2408 /* 2409 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2410 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2411 * indicators of an event TRB error, but we check the status *first* to be safe. 2412 */ 2413 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2414 { 2415 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2416 u32 status; 2417 union xhci_trb *trb; 2418 u64 temp_64; 2419 union xhci_trb *event_ring_deq; 2420 dma_addr_t deq; 2421 2422 spin_lock(&xhci->lock); 2423 trb = xhci->event_ring->dequeue; 2424 /* Check if the xHC generated the interrupt, or the irq is shared */ 2425 status = xhci_readl(xhci, &xhci->op_regs->status); 2426 if (status == 0xffffffff) 2427 goto hw_died; 2428 2429 if (!(status & STS_EINT)) { 2430 spin_unlock(&xhci->lock); 2431 return IRQ_NONE; 2432 } 2433 if (status & STS_FATAL) { 2434 xhci_warn(xhci, "WARNING: Host System Error\n"); 2435 xhci_halt(xhci); 2436 hw_died: 2437 spin_unlock(&xhci->lock); 2438 return -ESHUTDOWN; 2439 } 2440 2441 /* 2442 * Clear the op reg interrupt status first, 2443 * so we can receive interrupts from other MSI-X interrupters. 2444 * Write 1 to clear the interrupt status. 2445 */ 2446 status |= STS_EINT; 2447 xhci_writel(xhci, status, &xhci->op_regs->status); 2448 /* FIXME when MSI-X is supported and there are multiple vectors */ 2449 /* Clear the MSI-X event interrupt status */ 2450 2451 if (hcd->irq) { 2452 u32 irq_pending; 2453 /* Acknowledge the PCI interrupt */ 2454 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 2455 irq_pending |= IMAN_IP; 2456 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); 2457 } 2458 2459 if (xhci->xhc_state & XHCI_STATE_DYING) { 2460 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2461 "Shouldn't IRQs be disabled?\n"); 2462 /* Clear the event handler busy flag (RW1C); 2463 * the event ring should be empty. 2464 */ 2465 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2466 xhci_write_64(xhci, temp_64 | ERST_EHB, 2467 &xhci->ir_set->erst_dequeue); 2468 spin_unlock(&xhci->lock); 2469 2470 return IRQ_HANDLED; 2471 } 2472 2473 event_ring_deq = xhci->event_ring->dequeue; 2474 /* FIXME this should be a delayed service routine 2475 * that clears the EHB. 2476 */ 2477 while (xhci_handle_event(xhci) > 0) {} 2478 2479 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2480 /* If necessary, update the HW's version of the event ring deq ptr. */ 2481 if (event_ring_deq != xhci->event_ring->dequeue) { 2482 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2483 xhci->event_ring->dequeue); 2484 if (deq == 0) 2485 xhci_warn(xhci, "WARN something wrong with SW event " 2486 "ring dequeue ptr.\n"); 2487 /* Update HC event ring dequeue pointer */ 2488 temp_64 &= ERST_PTR_MASK; 2489 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2490 } 2491 2492 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2493 temp_64 |= ERST_EHB; 2494 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2495 2496 spin_unlock(&xhci->lock); 2497 2498 return IRQ_HANDLED; 2499 } 2500 2501 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) 2502 { 2503 return xhci_irq(hcd); 2504 } 2505 2506 /**** Endpoint Ring Operations ****/ 2507 2508 /* 2509 * Generic function for queueing a TRB on a ring. 2510 * The caller must have checked to make sure there's room on the ring. 2511 * 2512 * @more_trbs_coming: Will you enqueue more TRBs before calling 2513 * prepare_transfer()? 2514 */ 2515 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2516 bool more_trbs_coming, 2517 u32 field1, u32 field2, u32 field3, u32 field4) 2518 { 2519 struct xhci_generic_trb *trb; 2520 2521 trb = &ring->enqueue->generic; 2522 trb->field[0] = cpu_to_le32(field1); 2523 trb->field[1] = cpu_to_le32(field2); 2524 trb->field[2] = cpu_to_le32(field3); 2525 trb->field[3] = cpu_to_le32(field4); 2526 inc_enq(xhci, ring, more_trbs_coming); 2527 } 2528 2529 /* 2530 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2531 * FIXME allocate segments if the ring is full. 2532 */ 2533 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2534 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2535 { 2536 unsigned int num_trbs_needed; 2537 2538 /* Make sure the endpoint has been added to xHC schedule */ 2539 switch (ep_state) { 2540 case EP_STATE_DISABLED: 2541 /* 2542 * USB core changed config/interfaces without notifying us, 2543 * or hardware is reporting the wrong state. 2544 */ 2545 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2546 return -ENOENT; 2547 case EP_STATE_ERROR: 2548 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2549 /* FIXME event handling code for error needs to clear it */ 2550 /* XXX not sure if this should be -ENOENT or not */ 2551 return -EINVAL; 2552 case EP_STATE_HALTED: 2553 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2554 case EP_STATE_STOPPED: 2555 case EP_STATE_RUNNING: 2556 break; 2557 default: 2558 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2559 /* 2560 * FIXME issue Configure Endpoint command to try to get the HC 2561 * back into a known state. 2562 */ 2563 return -EINVAL; 2564 } 2565 2566 while (1) { 2567 if (room_on_ring(xhci, ep_ring, num_trbs)) 2568 break; 2569 2570 if (ep_ring == xhci->cmd_ring) { 2571 xhci_err(xhci, "Do not support expand command ring\n"); 2572 return -ENOMEM; 2573 } 2574 2575 xhci_dbg(xhci, "ERROR no room on ep ring, " 2576 "try ring expansion\n"); 2577 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2578 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2579 mem_flags)) { 2580 xhci_err(xhci, "Ring expansion failed\n"); 2581 return -ENOMEM; 2582 } 2583 }; 2584 2585 if (enqueue_is_link_trb(ep_ring)) { 2586 struct xhci_ring *ring = ep_ring; 2587 union xhci_trb *next; 2588 2589 next = ring->enqueue; 2590 2591 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2592 /* If we're not dealing with 0.95 hardware or isoc rings 2593 * on AMD 0.96 host, clear the chain bit. 2594 */ 2595 if (!xhci_link_trb_quirk(xhci) && 2596 !(ring->type == TYPE_ISOC && 2597 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2598 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2599 else 2600 next->link.control |= cpu_to_le32(TRB_CHAIN); 2601 2602 wmb(); 2603 next->link.control ^= cpu_to_le32(TRB_CYCLE); 2604 2605 /* Toggle the cycle bit after the last ring segment. */ 2606 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2607 ring->cycle_state = (ring->cycle_state ? 0 : 1); 2608 } 2609 ring->enq_seg = ring->enq_seg->next; 2610 ring->enqueue = ring->enq_seg->trbs; 2611 next = ring->enqueue; 2612 } 2613 } 2614 2615 return 0; 2616 } 2617 2618 static int prepare_transfer(struct xhci_hcd *xhci, 2619 struct xhci_virt_device *xdev, 2620 unsigned int ep_index, 2621 unsigned int stream_id, 2622 unsigned int num_trbs, 2623 struct urb *urb, 2624 unsigned int td_index, 2625 gfp_t mem_flags) 2626 { 2627 int ret; 2628 struct urb_priv *urb_priv; 2629 struct xhci_td *td; 2630 struct xhci_ring *ep_ring; 2631 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2632 2633 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2634 if (!ep_ring) { 2635 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2636 stream_id); 2637 return -EINVAL; 2638 } 2639 2640 ret = prepare_ring(xhci, ep_ring, 2641 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2642 num_trbs, mem_flags); 2643 if (ret) 2644 return ret; 2645 2646 urb_priv = urb->hcpriv; 2647 td = urb_priv->td[td_index]; 2648 2649 INIT_LIST_HEAD(&td->td_list); 2650 INIT_LIST_HEAD(&td->cancelled_td_list); 2651 2652 if (td_index == 0) { 2653 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2654 if (unlikely(ret)) 2655 return ret; 2656 } 2657 2658 td->urb = urb; 2659 /* Add this TD to the tail of the endpoint ring's TD list */ 2660 list_add_tail(&td->td_list, &ep_ring->td_list); 2661 td->start_seg = ep_ring->enq_seg; 2662 td->first_trb = ep_ring->enqueue; 2663 2664 urb_priv->td[td_index] = td; 2665 2666 return 0; 2667 } 2668 2669 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2670 { 2671 int num_sgs, num_trbs, running_total, temp, i; 2672 struct scatterlist *sg; 2673 2674 sg = NULL; 2675 num_sgs = urb->num_mapped_sgs; 2676 temp = urb->transfer_buffer_length; 2677 2678 num_trbs = 0; 2679 for_each_sg(urb->sg, sg, num_sgs, i) { 2680 unsigned int len = sg_dma_len(sg); 2681 2682 /* Scatter gather list entries may cross 64KB boundaries */ 2683 running_total = TRB_MAX_BUFF_SIZE - 2684 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); 2685 running_total &= TRB_MAX_BUFF_SIZE - 1; 2686 if (running_total != 0) 2687 num_trbs++; 2688 2689 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2690 while (running_total < sg_dma_len(sg) && running_total < temp) { 2691 num_trbs++; 2692 running_total += TRB_MAX_BUFF_SIZE; 2693 } 2694 len = min_t(int, len, temp); 2695 temp -= len; 2696 if (temp == 0) 2697 break; 2698 } 2699 return num_trbs; 2700 } 2701 2702 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2703 { 2704 if (num_trbs != 0) 2705 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2706 "TRBs, %d left\n", __func__, 2707 urb->ep->desc.bEndpointAddress, num_trbs); 2708 if (running_total != urb->transfer_buffer_length) 2709 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2710 "queued %#x (%d), asked for %#x (%d)\n", 2711 __func__, 2712 urb->ep->desc.bEndpointAddress, 2713 running_total, running_total, 2714 urb->transfer_buffer_length, 2715 urb->transfer_buffer_length); 2716 } 2717 2718 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2719 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2720 struct xhci_generic_trb *start_trb) 2721 { 2722 /* 2723 * Pass all the TRBs to the hardware at once and make sure this write 2724 * isn't reordered. 2725 */ 2726 wmb(); 2727 if (start_cycle) 2728 start_trb->field[3] |= cpu_to_le32(start_cycle); 2729 else 2730 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 2731 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2732 } 2733 2734 /* 2735 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2736 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2737 * (comprised of sg list entries) can take several service intervals to 2738 * transmit. 2739 */ 2740 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2741 struct urb *urb, int slot_id, unsigned int ep_index) 2742 { 2743 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 2744 xhci->devs[slot_id]->out_ctx, ep_index); 2745 int xhci_interval; 2746 int ep_interval; 2747 2748 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 2749 ep_interval = urb->interval; 2750 /* Convert to microframes */ 2751 if (urb->dev->speed == USB_SPEED_LOW || 2752 urb->dev->speed == USB_SPEED_FULL) 2753 ep_interval *= 8; 2754 /* FIXME change this to a warning and a suggestion to use the new API 2755 * to set the polling interval (once the API is added). 2756 */ 2757 if (xhci_interval != ep_interval) { 2758 if (printk_ratelimit()) 2759 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2760 " (%d microframe%s) than xHCI " 2761 "(%d microframe%s)\n", 2762 ep_interval, 2763 ep_interval == 1 ? "" : "s", 2764 xhci_interval, 2765 xhci_interval == 1 ? "" : "s"); 2766 urb->interval = xhci_interval; 2767 /* Convert back to frames for LS/FS devices */ 2768 if (urb->dev->speed == USB_SPEED_LOW || 2769 urb->dev->speed == USB_SPEED_FULL) 2770 urb->interval /= 8; 2771 } 2772 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 2773 } 2774 2775 /* 2776 * The TD size is the number of bytes remaining in the TD (including this TRB), 2777 * right shifted by 10. 2778 * It must fit in bits 21:17, so it can't be bigger than 31. 2779 */ 2780 static u32 xhci_td_remainder(unsigned int remainder) 2781 { 2782 u32 max = (1 << (21 - 17 + 1)) - 1; 2783 2784 if ((remainder >> 10) >= max) 2785 return max << 17; 2786 else 2787 return (remainder >> 10) << 17; 2788 } 2789 2790 /* 2791 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in 2792 * the TD (*not* including this TRB). 2793 * 2794 * Total TD packet count = total_packet_count = 2795 * roundup(TD size in bytes / wMaxPacketSize) 2796 * 2797 * Packets transferred up to and including this TRB = packets_transferred = 2798 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 2799 * 2800 * TD size = total_packet_count - packets_transferred 2801 * 2802 * It must fit in bits 21:17, so it can't be bigger than 31. 2803 */ 2804 2805 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, 2806 unsigned int total_packet_count, struct urb *urb) 2807 { 2808 int packets_transferred; 2809 2810 /* One TRB with a zero-length data packet. */ 2811 if (running_total == 0 && trb_buff_len == 0) 2812 return 0; 2813 2814 /* All the TRB queueing functions don't count the current TRB in 2815 * running_total. 2816 */ 2817 packets_transferred = (running_total + trb_buff_len) / 2818 usb_endpoint_maxp(&urb->ep->desc); 2819 2820 return xhci_td_remainder(total_packet_count - packets_transferred); 2821 } 2822 2823 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2824 struct urb *urb, int slot_id, unsigned int ep_index) 2825 { 2826 struct xhci_ring *ep_ring; 2827 unsigned int num_trbs; 2828 struct urb_priv *urb_priv; 2829 struct xhci_td *td; 2830 struct scatterlist *sg; 2831 int num_sgs; 2832 int trb_buff_len, this_sg_len, running_total; 2833 unsigned int total_packet_count; 2834 bool first_trb; 2835 u64 addr; 2836 bool more_trbs_coming; 2837 2838 struct xhci_generic_trb *start_trb; 2839 int start_cycle; 2840 2841 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2842 if (!ep_ring) 2843 return -EINVAL; 2844 2845 num_trbs = count_sg_trbs_needed(xhci, urb); 2846 num_sgs = urb->num_mapped_sgs; 2847 total_packet_count = roundup(urb->transfer_buffer_length, 2848 usb_endpoint_maxp(&urb->ep->desc)); 2849 2850 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2851 ep_index, urb->stream_id, 2852 num_trbs, urb, 0, mem_flags); 2853 if (trb_buff_len < 0) 2854 return trb_buff_len; 2855 2856 urb_priv = urb->hcpriv; 2857 td = urb_priv->td[0]; 2858 2859 /* 2860 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2861 * until we've finished creating all the other TRBs. The ring's cycle 2862 * state may change as we enqueue the other TRBs, so save it too. 2863 */ 2864 start_trb = &ep_ring->enqueue->generic; 2865 start_cycle = ep_ring->cycle_state; 2866 2867 running_total = 0; 2868 /* 2869 * How much data is in the first TRB? 2870 * 2871 * There are three forces at work for TRB buffer pointers and lengths: 2872 * 1. We don't want to walk off the end of this sg-list entry buffer. 2873 * 2. The transfer length that the driver requested may be smaller than 2874 * the amount of memory allocated for this scatter-gather list. 2875 * 3. TRBs buffers can't cross 64KB boundaries. 2876 */ 2877 sg = urb->sg; 2878 addr = (u64) sg_dma_address(sg); 2879 this_sg_len = sg_dma_len(sg); 2880 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 2881 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2882 if (trb_buff_len > urb->transfer_buffer_length) 2883 trb_buff_len = urb->transfer_buffer_length; 2884 2885 first_trb = true; 2886 /* Queue the first TRB, even if it's zero-length */ 2887 do { 2888 u32 field = 0; 2889 u32 length_field = 0; 2890 u32 remainder = 0; 2891 2892 /* Don't change the cycle bit of the first TRB until later */ 2893 if (first_trb) { 2894 first_trb = false; 2895 if (start_cycle == 0) 2896 field |= 0x1; 2897 } else 2898 field |= ep_ring->cycle_state; 2899 2900 /* Chain all the TRBs together; clear the chain bit in the last 2901 * TRB to indicate it's the last TRB in the chain. 2902 */ 2903 if (num_trbs > 1) { 2904 field |= TRB_CHAIN; 2905 } else { 2906 /* FIXME - add check for ZERO_PACKET flag before this */ 2907 td->last_trb = ep_ring->enqueue; 2908 field |= TRB_IOC; 2909 } 2910 2911 /* Only set interrupt on short packet for IN endpoints */ 2912 if (usb_urb_dir_in(urb)) 2913 field |= TRB_ISP; 2914 2915 if (TRB_MAX_BUFF_SIZE - 2916 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { 2917 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2918 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2919 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2920 (unsigned int) addr + trb_buff_len); 2921 } 2922 2923 /* Set the TRB length, TD size, and interrupter fields. */ 2924 if (xhci->hci_version < 0x100) { 2925 remainder = xhci_td_remainder( 2926 urb->transfer_buffer_length - 2927 running_total); 2928 } else { 2929 remainder = xhci_v1_0_td_remainder(running_total, 2930 trb_buff_len, total_packet_count, urb); 2931 } 2932 length_field = TRB_LEN(trb_buff_len) | 2933 remainder | 2934 TRB_INTR_TARGET(0); 2935 2936 if (num_trbs > 1) 2937 more_trbs_coming = true; 2938 else 2939 more_trbs_coming = false; 2940 queue_trb(xhci, ep_ring, more_trbs_coming, 2941 lower_32_bits(addr), 2942 upper_32_bits(addr), 2943 length_field, 2944 field | TRB_TYPE(TRB_NORMAL)); 2945 --num_trbs; 2946 running_total += trb_buff_len; 2947 2948 /* Calculate length for next transfer -- 2949 * Are we done queueing all the TRBs for this sg entry? 2950 */ 2951 this_sg_len -= trb_buff_len; 2952 if (this_sg_len == 0) { 2953 --num_sgs; 2954 if (num_sgs == 0) 2955 break; 2956 sg = sg_next(sg); 2957 addr = (u64) sg_dma_address(sg); 2958 this_sg_len = sg_dma_len(sg); 2959 } else { 2960 addr += trb_buff_len; 2961 } 2962 2963 trb_buff_len = TRB_MAX_BUFF_SIZE - 2964 (addr & (TRB_MAX_BUFF_SIZE - 1)); 2965 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2966 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2967 trb_buff_len = 2968 urb->transfer_buffer_length - running_total; 2969 } while (running_total < urb->transfer_buffer_length); 2970 2971 check_trb_math(urb, num_trbs, running_total); 2972 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2973 start_cycle, start_trb); 2974 return 0; 2975 } 2976 2977 /* This is very similar to what ehci-q.c qtd_fill() does */ 2978 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2979 struct urb *urb, int slot_id, unsigned int ep_index) 2980 { 2981 struct xhci_ring *ep_ring; 2982 struct urb_priv *urb_priv; 2983 struct xhci_td *td; 2984 int num_trbs; 2985 struct xhci_generic_trb *start_trb; 2986 bool first_trb; 2987 bool more_trbs_coming; 2988 int start_cycle; 2989 u32 field, length_field; 2990 2991 int running_total, trb_buff_len, ret; 2992 unsigned int total_packet_count; 2993 u64 addr; 2994 2995 if (urb->num_sgs) 2996 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2997 2998 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2999 if (!ep_ring) 3000 return -EINVAL; 3001 3002 num_trbs = 0; 3003 /* How much data is (potentially) left before the 64KB boundary? */ 3004 running_total = TRB_MAX_BUFF_SIZE - 3005 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3006 running_total &= TRB_MAX_BUFF_SIZE - 1; 3007 3008 /* If there's some data on this 64KB chunk, or we have to send a 3009 * zero-length transfer, we need at least one TRB 3010 */ 3011 if (running_total != 0 || urb->transfer_buffer_length == 0) 3012 num_trbs++; 3013 /* How many more 64KB chunks to transfer, how many more TRBs? */ 3014 while (running_total < urb->transfer_buffer_length) { 3015 num_trbs++; 3016 running_total += TRB_MAX_BUFF_SIZE; 3017 } 3018 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 3019 3020 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3021 ep_index, urb->stream_id, 3022 num_trbs, urb, 0, mem_flags); 3023 if (ret < 0) 3024 return ret; 3025 3026 urb_priv = urb->hcpriv; 3027 td = urb_priv->td[0]; 3028 3029 /* 3030 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3031 * until we've finished creating all the other TRBs. The ring's cycle 3032 * state may change as we enqueue the other TRBs, so save it too. 3033 */ 3034 start_trb = &ep_ring->enqueue->generic; 3035 start_cycle = ep_ring->cycle_state; 3036 3037 running_total = 0; 3038 total_packet_count = roundup(urb->transfer_buffer_length, 3039 usb_endpoint_maxp(&urb->ep->desc)); 3040 /* How much data is in the first TRB? */ 3041 addr = (u64) urb->transfer_dma; 3042 trb_buff_len = TRB_MAX_BUFF_SIZE - 3043 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3044 if (trb_buff_len > urb->transfer_buffer_length) 3045 trb_buff_len = urb->transfer_buffer_length; 3046 3047 first_trb = true; 3048 3049 /* Queue the first TRB, even if it's zero-length */ 3050 do { 3051 u32 remainder = 0; 3052 field = 0; 3053 3054 /* Don't change the cycle bit of the first TRB until later */ 3055 if (first_trb) { 3056 first_trb = false; 3057 if (start_cycle == 0) 3058 field |= 0x1; 3059 } else 3060 field |= ep_ring->cycle_state; 3061 3062 /* Chain all the TRBs together; clear the chain bit in the last 3063 * TRB to indicate it's the last TRB in the chain. 3064 */ 3065 if (num_trbs > 1) { 3066 field |= TRB_CHAIN; 3067 } else { 3068 /* FIXME - add check for ZERO_PACKET flag before this */ 3069 td->last_trb = ep_ring->enqueue; 3070 field |= TRB_IOC; 3071 } 3072 3073 /* Only set interrupt on short packet for IN endpoints */ 3074 if (usb_urb_dir_in(urb)) 3075 field |= TRB_ISP; 3076 3077 /* Set the TRB length, TD size, and interrupter fields. */ 3078 if (xhci->hci_version < 0x100) { 3079 remainder = xhci_td_remainder( 3080 urb->transfer_buffer_length - 3081 running_total); 3082 } else { 3083 remainder = xhci_v1_0_td_remainder(running_total, 3084 trb_buff_len, total_packet_count, urb); 3085 } 3086 length_field = TRB_LEN(trb_buff_len) | 3087 remainder | 3088 TRB_INTR_TARGET(0); 3089 3090 if (num_trbs > 1) 3091 more_trbs_coming = true; 3092 else 3093 more_trbs_coming = false; 3094 queue_trb(xhci, ep_ring, more_trbs_coming, 3095 lower_32_bits(addr), 3096 upper_32_bits(addr), 3097 length_field, 3098 field | TRB_TYPE(TRB_NORMAL)); 3099 --num_trbs; 3100 running_total += trb_buff_len; 3101 3102 /* Calculate length for next transfer */ 3103 addr += trb_buff_len; 3104 trb_buff_len = urb->transfer_buffer_length - running_total; 3105 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 3106 trb_buff_len = TRB_MAX_BUFF_SIZE; 3107 } while (running_total < urb->transfer_buffer_length); 3108 3109 check_trb_math(urb, num_trbs, running_total); 3110 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3111 start_cycle, start_trb); 3112 return 0; 3113 } 3114 3115 /* Caller must have locked xhci->lock */ 3116 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3117 struct urb *urb, int slot_id, unsigned int ep_index) 3118 { 3119 struct xhci_ring *ep_ring; 3120 int num_trbs; 3121 int ret; 3122 struct usb_ctrlrequest *setup; 3123 struct xhci_generic_trb *start_trb; 3124 int start_cycle; 3125 u32 field, length_field; 3126 struct urb_priv *urb_priv; 3127 struct xhci_td *td; 3128 3129 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3130 if (!ep_ring) 3131 return -EINVAL; 3132 3133 /* 3134 * Need to copy setup packet into setup TRB, so we can't use the setup 3135 * DMA address. 3136 */ 3137 if (!urb->setup_packet) 3138 return -EINVAL; 3139 3140 /* 1 TRB for setup, 1 for status */ 3141 num_trbs = 2; 3142 /* 3143 * Don't need to check if we need additional event data and normal TRBs, 3144 * since data in control transfers will never get bigger than 16MB 3145 * XXX: can we get a buffer that crosses 64KB boundaries? 3146 */ 3147 if (urb->transfer_buffer_length > 0) 3148 num_trbs++; 3149 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3150 ep_index, urb->stream_id, 3151 num_trbs, urb, 0, mem_flags); 3152 if (ret < 0) 3153 return ret; 3154 3155 urb_priv = urb->hcpriv; 3156 td = urb_priv->td[0]; 3157 3158 /* 3159 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3160 * until we've finished creating all the other TRBs. The ring's cycle 3161 * state may change as we enqueue the other TRBs, so save it too. 3162 */ 3163 start_trb = &ep_ring->enqueue->generic; 3164 start_cycle = ep_ring->cycle_state; 3165 3166 /* Queue setup TRB - see section 6.4.1.2.1 */ 3167 /* FIXME better way to translate setup_packet into two u32 fields? */ 3168 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3169 field = 0; 3170 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3171 if (start_cycle == 0) 3172 field |= 0x1; 3173 3174 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3175 if (xhci->hci_version == 0x100) { 3176 if (urb->transfer_buffer_length > 0) { 3177 if (setup->bRequestType & USB_DIR_IN) 3178 field |= TRB_TX_TYPE(TRB_DATA_IN); 3179 else 3180 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3181 } 3182 } 3183 3184 queue_trb(xhci, ep_ring, true, 3185 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3186 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3187 TRB_LEN(8) | TRB_INTR_TARGET(0), 3188 /* Immediate data in pointer */ 3189 field); 3190 3191 /* If there's data, queue data TRBs */ 3192 /* Only set interrupt on short packet for IN endpoints */ 3193 if (usb_urb_dir_in(urb)) 3194 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3195 else 3196 field = TRB_TYPE(TRB_DATA); 3197 3198 length_field = TRB_LEN(urb->transfer_buffer_length) | 3199 xhci_td_remainder(urb->transfer_buffer_length) | 3200 TRB_INTR_TARGET(0); 3201 if (urb->transfer_buffer_length > 0) { 3202 if (setup->bRequestType & USB_DIR_IN) 3203 field |= TRB_DIR_IN; 3204 queue_trb(xhci, ep_ring, true, 3205 lower_32_bits(urb->transfer_dma), 3206 upper_32_bits(urb->transfer_dma), 3207 length_field, 3208 field | ep_ring->cycle_state); 3209 } 3210 3211 /* Save the DMA address of the last TRB in the TD */ 3212 td->last_trb = ep_ring->enqueue; 3213 3214 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3215 /* If the device sent data, the status stage is an OUT transfer */ 3216 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3217 field = 0; 3218 else 3219 field = TRB_DIR_IN; 3220 queue_trb(xhci, ep_ring, false, 3221 0, 3222 0, 3223 TRB_INTR_TARGET(0), 3224 /* Event on completion */ 3225 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3226 3227 giveback_first_trb(xhci, slot_id, ep_index, 0, 3228 start_cycle, start_trb); 3229 return 0; 3230 } 3231 3232 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 3233 struct urb *urb, int i) 3234 { 3235 int num_trbs = 0; 3236 u64 addr, td_len; 3237 3238 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3239 td_len = urb->iso_frame_desc[i].length; 3240 3241 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 3242 TRB_MAX_BUFF_SIZE); 3243 if (num_trbs == 0) 3244 num_trbs++; 3245 3246 return num_trbs; 3247 } 3248 3249 /* 3250 * The transfer burst count field of the isochronous TRB defines the number of 3251 * bursts that are required to move all packets in this TD. Only SuperSpeed 3252 * devices can burst up to bMaxBurst number of packets per service interval. 3253 * This field is zero based, meaning a value of zero in the field means one 3254 * burst. Basically, for everything but SuperSpeed devices, this field will be 3255 * zero. Only xHCI 1.0 host controllers support this field. 3256 */ 3257 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3258 struct usb_device *udev, 3259 struct urb *urb, unsigned int total_packet_count) 3260 { 3261 unsigned int max_burst; 3262 3263 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) 3264 return 0; 3265 3266 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3267 return roundup(total_packet_count, max_burst + 1) - 1; 3268 } 3269 3270 /* 3271 * Returns the number of packets in the last "burst" of packets. This field is 3272 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3273 * the last burst packet count is equal to the total number of packets in the 3274 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3275 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3276 * contain 1 to (bMaxBurst + 1) packets. 3277 */ 3278 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3279 struct usb_device *udev, 3280 struct urb *urb, unsigned int total_packet_count) 3281 { 3282 unsigned int max_burst; 3283 unsigned int residue; 3284 3285 if (xhci->hci_version < 0x100) 3286 return 0; 3287 3288 switch (udev->speed) { 3289 case USB_SPEED_SUPER: 3290 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3291 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3292 residue = total_packet_count % (max_burst + 1); 3293 /* If residue is zero, the last burst contains (max_burst + 1) 3294 * number of packets, but the TLBPC field is zero-based. 3295 */ 3296 if (residue == 0) 3297 return max_burst; 3298 return residue - 1; 3299 default: 3300 if (total_packet_count == 0) 3301 return 0; 3302 return total_packet_count - 1; 3303 } 3304 } 3305 3306 /* This is for isoc transfer */ 3307 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3308 struct urb *urb, int slot_id, unsigned int ep_index) 3309 { 3310 struct xhci_ring *ep_ring; 3311 struct urb_priv *urb_priv; 3312 struct xhci_td *td; 3313 int num_tds, trbs_per_td; 3314 struct xhci_generic_trb *start_trb; 3315 bool first_trb; 3316 int start_cycle; 3317 u32 field, length_field; 3318 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3319 u64 start_addr, addr; 3320 int i, j; 3321 bool more_trbs_coming; 3322 3323 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3324 3325 num_tds = urb->number_of_packets; 3326 if (num_tds < 1) { 3327 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3328 return -EINVAL; 3329 } 3330 3331 start_addr = (u64) urb->transfer_dma; 3332 start_trb = &ep_ring->enqueue->generic; 3333 start_cycle = ep_ring->cycle_state; 3334 3335 urb_priv = urb->hcpriv; 3336 /* Queue the first TRB, even if it's zero-length */ 3337 for (i = 0; i < num_tds; i++) { 3338 unsigned int total_packet_count; 3339 unsigned int burst_count; 3340 unsigned int residue; 3341 3342 first_trb = true; 3343 running_total = 0; 3344 addr = start_addr + urb->iso_frame_desc[i].offset; 3345 td_len = urb->iso_frame_desc[i].length; 3346 td_remain_len = td_len; 3347 total_packet_count = roundup(td_len, 3348 usb_endpoint_maxp(&urb->ep->desc)); 3349 /* A zero-length transfer still involves at least one packet. */ 3350 if (total_packet_count == 0) 3351 total_packet_count++; 3352 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3353 total_packet_count); 3354 residue = xhci_get_last_burst_packet_count(xhci, 3355 urb->dev, urb, total_packet_count); 3356 3357 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3358 3359 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3360 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3361 if (ret < 0) { 3362 if (i == 0) 3363 return ret; 3364 goto cleanup; 3365 } 3366 3367 td = urb_priv->td[i]; 3368 for (j = 0; j < trbs_per_td; j++) { 3369 u32 remainder = 0; 3370 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3371 3372 if (first_trb) { 3373 /* Queue the isoc TRB */ 3374 field |= TRB_TYPE(TRB_ISOC); 3375 /* Assume URB_ISO_ASAP is set */ 3376 field |= TRB_SIA; 3377 if (i == 0) { 3378 if (start_cycle == 0) 3379 field |= 0x1; 3380 } else 3381 field |= ep_ring->cycle_state; 3382 first_trb = false; 3383 } else { 3384 /* Queue other normal TRBs */ 3385 field |= TRB_TYPE(TRB_NORMAL); 3386 field |= ep_ring->cycle_state; 3387 } 3388 3389 /* Only set interrupt on short packet for IN EPs */ 3390 if (usb_urb_dir_in(urb)) 3391 field |= TRB_ISP; 3392 3393 /* Chain all the TRBs together; clear the chain bit in 3394 * the last TRB to indicate it's the last TRB in the 3395 * chain. 3396 */ 3397 if (j < trbs_per_td - 1) { 3398 field |= TRB_CHAIN; 3399 more_trbs_coming = true; 3400 } else { 3401 td->last_trb = ep_ring->enqueue; 3402 field |= TRB_IOC; 3403 if (xhci->hci_version == 0x100) { 3404 /* Set BEI bit except for the last td */ 3405 if (i < num_tds - 1) 3406 field |= TRB_BEI; 3407 } 3408 more_trbs_coming = false; 3409 } 3410 3411 /* Calculate TRB length */ 3412 trb_buff_len = TRB_MAX_BUFF_SIZE - 3413 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 3414 if (trb_buff_len > td_remain_len) 3415 trb_buff_len = td_remain_len; 3416 3417 /* Set the TRB length, TD size, & interrupter fields. */ 3418 if (xhci->hci_version < 0x100) { 3419 remainder = xhci_td_remainder( 3420 td_len - running_total); 3421 } else { 3422 remainder = xhci_v1_0_td_remainder( 3423 running_total, trb_buff_len, 3424 total_packet_count, urb); 3425 } 3426 length_field = TRB_LEN(trb_buff_len) | 3427 remainder | 3428 TRB_INTR_TARGET(0); 3429 3430 queue_trb(xhci, ep_ring, more_trbs_coming, 3431 lower_32_bits(addr), 3432 upper_32_bits(addr), 3433 length_field, 3434 field); 3435 running_total += trb_buff_len; 3436 3437 addr += trb_buff_len; 3438 td_remain_len -= trb_buff_len; 3439 } 3440 3441 /* Check TD length */ 3442 if (running_total != td_len) { 3443 xhci_err(xhci, "ISOC TD length unmatch\n"); 3444 ret = -EINVAL; 3445 goto cleanup; 3446 } 3447 } 3448 3449 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3450 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3451 usb_amd_quirk_pll_disable(); 3452 } 3453 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3454 3455 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3456 start_cycle, start_trb); 3457 return 0; 3458 cleanup: 3459 /* Clean up a partially enqueued isoc transfer. */ 3460 3461 for (i--; i >= 0; i--) 3462 list_del_init(&urb_priv->td[i]->td_list); 3463 3464 /* Use the first TD as a temporary variable to turn the TDs we've queued 3465 * into No-ops with a software-owned cycle bit. That way the hardware 3466 * won't accidentally start executing bogus TDs when we partially 3467 * overwrite them. td->first_trb and td->start_seg are already set. 3468 */ 3469 urb_priv->td[0]->last_trb = ep_ring->enqueue; 3470 /* Every TRB except the first & last will have its cycle bit flipped. */ 3471 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3472 3473 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3474 ep_ring->enqueue = urb_priv->td[0]->first_trb; 3475 ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3476 ep_ring->cycle_state = start_cycle; 3477 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; 3478 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3479 return ret; 3480 } 3481 3482 /* 3483 * Check transfer ring to guarantee there is enough room for the urb. 3484 * Update ISO URB start_frame and interval. 3485 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to 3486 * update the urb->start_frame by now. 3487 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. 3488 */ 3489 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3490 struct urb *urb, int slot_id, unsigned int ep_index) 3491 { 3492 struct xhci_virt_device *xdev; 3493 struct xhci_ring *ep_ring; 3494 struct xhci_ep_ctx *ep_ctx; 3495 int start_frame; 3496 int xhci_interval; 3497 int ep_interval; 3498 int num_tds, num_trbs, i; 3499 int ret; 3500 3501 xdev = xhci->devs[slot_id]; 3502 ep_ring = xdev->eps[ep_index].ring; 3503 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3504 3505 num_trbs = 0; 3506 num_tds = urb->number_of_packets; 3507 for (i = 0; i < num_tds; i++) 3508 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3509 3510 /* Check the ring to guarantee there is enough room for the whole urb. 3511 * Do not insert any td of the urb to the ring if the check failed. 3512 */ 3513 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3514 num_trbs, mem_flags); 3515 if (ret) 3516 return ret; 3517 3518 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index); 3519 start_frame &= 0x3fff; 3520 3521 urb->start_frame = start_frame; 3522 if (urb->dev->speed == USB_SPEED_LOW || 3523 urb->dev->speed == USB_SPEED_FULL) 3524 urb->start_frame >>= 3; 3525 3526 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3527 ep_interval = urb->interval; 3528 /* Convert to microframes */ 3529 if (urb->dev->speed == USB_SPEED_LOW || 3530 urb->dev->speed == USB_SPEED_FULL) 3531 ep_interval *= 8; 3532 /* FIXME change this to a warning and a suggestion to use the new API 3533 * to set the polling interval (once the API is added). 3534 */ 3535 if (xhci_interval != ep_interval) { 3536 if (printk_ratelimit()) 3537 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3538 " (%d microframe%s) than xHCI " 3539 "(%d microframe%s)\n", 3540 ep_interval, 3541 ep_interval == 1 ? "" : "s", 3542 xhci_interval, 3543 xhci_interval == 1 ? "" : "s"); 3544 urb->interval = xhci_interval; 3545 /* Convert back to frames for LS/FS devices */ 3546 if (urb->dev->speed == USB_SPEED_LOW || 3547 urb->dev->speed == USB_SPEED_FULL) 3548 urb->interval /= 8; 3549 } 3550 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 3551 3552 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); 3553 } 3554 3555 /**** Command Ring Operations ****/ 3556 3557 /* Generic function for queueing a command TRB on the command ring. 3558 * Check to make sure there's room on the command ring for one command TRB. 3559 * Also check that there's room reserved for commands that must not fail. 3560 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3561 * then only check for the number of reserved spots. 3562 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3563 * because the command event handler may want to resubmit a failed command. 3564 */ 3565 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 3566 u32 field3, u32 field4, bool command_must_succeed) 3567 { 3568 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3569 int ret; 3570 3571 if (!command_must_succeed) 3572 reserved_trbs++; 3573 3574 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3575 reserved_trbs, GFP_ATOMIC); 3576 if (ret < 0) { 3577 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3578 if (command_must_succeed) 3579 xhci_err(xhci, "ERR: Reserved TRB counting for " 3580 "unfailable commands failed.\n"); 3581 return ret; 3582 } 3583 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3584 field4 | xhci->cmd_ring->cycle_state); 3585 return 0; 3586 } 3587 3588 /* Queue a slot enable or disable request on the command ring */ 3589 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 3590 { 3591 return queue_command(xhci, 0, 0, 0, 3592 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3593 } 3594 3595 /* Queue an address device command TRB */ 3596 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3597 u32 slot_id) 3598 { 3599 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3600 upper_32_bits(in_ctx_ptr), 0, 3601 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), 3602 false); 3603 } 3604 3605 int xhci_queue_vendor_command(struct xhci_hcd *xhci, 3606 u32 field1, u32 field2, u32 field3, u32 field4) 3607 { 3608 return queue_command(xhci, field1, field2, field3, field4, false); 3609 } 3610 3611 /* Queue a reset device command TRB */ 3612 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 3613 { 3614 return queue_command(xhci, 0, 0, 0, 3615 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3616 false); 3617 } 3618 3619 /* Queue a configure endpoint command TRB */ 3620 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3621 u32 slot_id, bool command_must_succeed) 3622 { 3623 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3624 upper_32_bits(in_ctx_ptr), 0, 3625 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3626 command_must_succeed); 3627 } 3628 3629 /* Queue an evaluate context command TRB */ 3630 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3631 u32 slot_id, bool command_must_succeed) 3632 { 3633 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3634 upper_32_bits(in_ctx_ptr), 0, 3635 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3636 command_must_succeed); 3637 } 3638 3639 /* 3640 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3641 * activity on an endpoint that is about to be suspended. 3642 */ 3643 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3644 unsigned int ep_index, int suspend) 3645 { 3646 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3647 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3648 u32 type = TRB_TYPE(TRB_STOP_RING); 3649 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3650 3651 return queue_command(xhci, 0, 0, 0, 3652 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3653 } 3654 3655 /* Set Transfer Ring Dequeue Pointer command. 3656 * This should not be used for endpoints that have streams enabled. 3657 */ 3658 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 3659 unsigned int ep_index, unsigned int stream_id, 3660 struct xhci_segment *deq_seg, 3661 union xhci_trb *deq_ptr, u32 cycle_state) 3662 { 3663 dma_addr_t addr; 3664 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3665 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3666 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3667 u32 type = TRB_TYPE(TRB_SET_DEQ); 3668 struct xhci_virt_ep *ep; 3669 3670 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3671 if (addr == 0) { 3672 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3673 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3674 deq_seg, deq_ptr); 3675 return 0; 3676 } 3677 ep = &xhci->devs[slot_id]->eps[ep_index]; 3678 if ((ep->ep_state & SET_DEQ_PENDING)) { 3679 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3680 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3681 return 0; 3682 } 3683 ep->queued_deq_seg = deq_seg; 3684 ep->queued_deq_ptr = deq_ptr; 3685 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 3686 upper_32_bits(addr), trb_stream_id, 3687 trb_slot_id | trb_ep_index | type, false); 3688 } 3689 3690 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 3691 unsigned int ep_index) 3692 { 3693 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3694 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3695 u32 type = TRB_TYPE(TRB_RESET_EP); 3696 3697 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 3698 false); 3699 } 3700