1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 72 struct xhci_virt_device *virt_dev, 73 struct xhci_event_cmd *event); 74 75 /* 76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 77 * address of the TRB. 78 */ 79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 80 union xhci_trb *trb) 81 { 82 unsigned long segment_offset; 83 84 if (!seg || !trb || trb < seg->trbs) 85 return 0; 86 /* offset in TRBs */ 87 segment_offset = trb - seg->trbs; 88 if (segment_offset > TRBS_PER_SEGMENT) 89 return 0; 90 return seg->dma + (segment_offset * sizeof(*trb)); 91 } 92 93 /* Does this link TRB point to the first segment in a ring, 94 * or was the previous TRB the last TRB on the last segment in the ERST? 95 */ 96 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 97 struct xhci_segment *seg, union xhci_trb *trb) 98 { 99 if (ring == xhci->event_ring) 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 101 (seg->next == xhci->event_ring->first_seg); 102 else 103 return trb->link.control & LINK_TOGGLE; 104 } 105 106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 107 * segment? I.e. would the updated event TRB pointer step off the end of the 108 * event seg? 109 */ 110 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 111 struct xhci_segment *seg, union xhci_trb *trb) 112 { 113 if (ring == xhci->event_ring) 114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 115 else 116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 117 } 118 119 static inline int enqueue_is_link_trb(struct xhci_ring *ring) 120 { 121 struct xhci_link_trb *link = &ring->enqueue->link; 122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); 123 } 124 125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 126 * TRB is in a new segment. This does not skip over link TRBs, and it does not 127 * effect the ring dequeue or enqueue pointers. 128 */ 129 static void next_trb(struct xhci_hcd *xhci, 130 struct xhci_ring *ring, 131 struct xhci_segment **seg, 132 union xhci_trb **trb) 133 { 134 if (last_trb(xhci, ring, *seg, *trb)) { 135 *seg = (*seg)->next; 136 *trb = ((*seg)->trbs); 137 } else { 138 (*trb)++; 139 } 140 } 141 142 /* 143 * See Cycle bit rules. SW is the consumer for the event ring only. 144 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 145 */ 146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 147 { 148 union xhci_trb *next = ++(ring->dequeue); 149 unsigned long long addr; 150 151 ring->deq_updates++; 152 /* Update the dequeue pointer further if that was a link TRB or we're at 153 * the end of an event ring segment (which doesn't have link TRBS) 154 */ 155 while (last_trb(xhci, ring, ring->deq_seg, next)) { 156 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { 157 ring->cycle_state = (ring->cycle_state ? 0 : 1); 158 if (!in_interrupt()) 159 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 160 ring, 161 (unsigned int) ring->cycle_state); 162 } 163 ring->deq_seg = ring->deq_seg->next; 164 ring->dequeue = ring->deq_seg->trbs; 165 next = ring->dequeue; 166 } 167 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 168 if (ring == xhci->event_ring) 169 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); 170 else if (ring == xhci->cmd_ring) 171 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); 172 else 173 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); 174 } 175 176 /* 177 * See Cycle bit rules. SW is the consumer for the event ring only. 178 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 179 * 180 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 181 * chain bit is set), then set the chain bit in all the following link TRBs. 182 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 183 * have their chain bit cleared (so that each Link TRB is a separate TD). 184 * 185 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 186 * set, but other sections talk about dealing with the chain bit set. This was 187 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 188 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 189 * 190 * @more_trbs_coming: Will you enqueue more TRBs before calling 191 * prepare_transfer()? 192 */ 193 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 194 bool consumer, bool more_trbs_coming) 195 { 196 u32 chain; 197 union xhci_trb *next; 198 unsigned long long addr; 199 200 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 201 next = ++(ring->enqueue); 202 203 ring->enq_updates++; 204 /* Update the dequeue pointer further if that was a link TRB or we're at 205 * the end of an event ring segment (which doesn't have link TRBS) 206 */ 207 while (last_trb(xhci, ring, ring->enq_seg, next)) { 208 if (!consumer) { 209 if (ring != xhci->event_ring) { 210 /* 211 * If the caller doesn't plan on enqueueing more 212 * TDs before ringing the doorbell, then we 213 * don't want to give the link TRB to the 214 * hardware just yet. We'll give the link TRB 215 * back in prepare_ring() just before we enqueue 216 * the TD at the top of the ring. 217 */ 218 if (!chain && !more_trbs_coming) 219 break; 220 221 /* If we're not dealing with 0.95 hardware, 222 * carry over the chain bit of the previous TRB 223 * (which may mean the chain bit is cleared). 224 */ 225 if (!xhci_link_trb_quirk(xhci)) { 226 next->link.control &= ~TRB_CHAIN; 227 next->link.control |= chain; 228 } 229 /* Give this link TRB to the hardware */ 230 wmb(); 231 next->link.control ^= TRB_CYCLE; 232 } 233 /* Toggle the cycle bit after the last ring segment. */ 234 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 235 ring->cycle_state = (ring->cycle_state ? 0 : 1); 236 if (!in_interrupt()) 237 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 238 ring, 239 (unsigned int) ring->cycle_state); 240 } 241 } 242 ring->enq_seg = ring->enq_seg->next; 243 ring->enqueue = ring->enq_seg->trbs; 244 next = ring->enqueue; 245 } 246 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 247 if (ring == xhci->event_ring) 248 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); 249 else if (ring == xhci->cmd_ring) 250 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); 251 else 252 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); 253 } 254 255 /* 256 * Check to see if there's room to enqueue num_trbs on the ring. See rules 257 * above. 258 * FIXME: this would be simpler and faster if we just kept track of the number 259 * of free TRBs in a ring. 260 */ 261 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 262 unsigned int num_trbs) 263 { 264 int i; 265 union xhci_trb *enq = ring->enqueue; 266 struct xhci_segment *enq_seg = ring->enq_seg; 267 struct xhci_segment *cur_seg; 268 unsigned int left_on_ring; 269 270 /* If we are currently pointing to a link TRB, advance the 271 * enqueue pointer before checking for space */ 272 while (last_trb(xhci, ring, enq_seg, enq)) { 273 enq_seg = enq_seg->next; 274 enq = enq_seg->trbs; 275 } 276 277 /* Check if ring is empty */ 278 if (enq == ring->dequeue) { 279 /* Can't use link trbs */ 280 left_on_ring = TRBS_PER_SEGMENT - 1; 281 for (cur_seg = enq_seg->next; cur_seg != enq_seg; 282 cur_seg = cur_seg->next) 283 left_on_ring += TRBS_PER_SEGMENT - 1; 284 285 /* Always need one TRB free in the ring. */ 286 left_on_ring -= 1; 287 if (num_trbs > left_on_ring) { 288 xhci_warn(xhci, "Not enough room on ring; " 289 "need %u TRBs, %u TRBs left\n", 290 num_trbs, left_on_ring); 291 return 0; 292 } 293 return 1; 294 } 295 /* Make sure there's an extra empty TRB available */ 296 for (i = 0; i <= num_trbs; ++i) { 297 if (enq == ring->dequeue) 298 return 0; 299 enq++; 300 while (last_trb(xhci, ring, enq_seg, enq)) { 301 enq_seg = enq_seg->next; 302 enq = enq_seg->trbs; 303 } 304 } 305 return 1; 306 } 307 308 /* Ring the host controller doorbell after placing a command on the ring */ 309 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 310 { 311 xhci_dbg(xhci, "// Ding dong!\n"); 312 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); 313 /* Flush PCI posted writes */ 314 xhci_readl(xhci, &xhci->dba->doorbell[0]); 315 } 316 317 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 318 unsigned int slot_id, 319 unsigned int ep_index, 320 unsigned int stream_id) 321 { 322 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 323 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 324 unsigned int ep_state = ep->ep_state; 325 326 /* Don't ring the doorbell for this endpoint if there are pending 327 * cancellations because we don't want to interrupt processing. 328 * We don't want to restart any stream rings if there's a set dequeue 329 * pointer command pending because the device can choose to start any 330 * stream once the endpoint is on the HW schedule. 331 * FIXME - check all the stream rings for pending cancellations. 332 */ 333 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 334 (ep_state & EP_HALTED)) 335 return; 336 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); 337 /* The CPU has better things to do at this point than wait for a 338 * write-posting flush. It'll get there soon enough. 339 */ 340 } 341 342 /* Ring the doorbell for any rings with pending URBs */ 343 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 344 unsigned int slot_id, 345 unsigned int ep_index) 346 { 347 unsigned int stream_id; 348 struct xhci_virt_ep *ep; 349 350 ep = &xhci->devs[slot_id]->eps[ep_index]; 351 352 /* A ring has pending URBs if its TD list is not empty */ 353 if (!(ep->ep_state & EP_HAS_STREAMS)) { 354 if (!(list_empty(&ep->ring->td_list))) 355 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 356 return; 357 } 358 359 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 360 stream_id++) { 361 struct xhci_stream_info *stream_info = ep->stream_info; 362 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 363 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 364 stream_id); 365 } 366 } 367 368 /* 369 * Find the segment that trb is in. Start searching in start_seg. 370 * If we must move past a segment that has a link TRB with a toggle cycle state 371 * bit set, then we will toggle the value pointed at by cycle_state. 372 */ 373 static struct xhci_segment *find_trb_seg( 374 struct xhci_segment *start_seg, 375 union xhci_trb *trb, int *cycle_state) 376 { 377 struct xhci_segment *cur_seg = start_seg; 378 struct xhci_generic_trb *generic_trb; 379 380 while (cur_seg->trbs > trb || 381 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 382 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 383 if ((generic_trb->field[3] & TRB_TYPE_BITMASK) == 384 TRB_TYPE(TRB_LINK) && 385 (generic_trb->field[3] & LINK_TOGGLE)) 386 *cycle_state = ~(*cycle_state) & 0x1; 387 cur_seg = cur_seg->next; 388 if (cur_seg == start_seg) 389 /* Looped over the entire list. Oops! */ 390 return NULL; 391 } 392 return cur_seg; 393 } 394 395 396 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 397 unsigned int slot_id, unsigned int ep_index, 398 unsigned int stream_id) 399 { 400 struct xhci_virt_ep *ep; 401 402 ep = &xhci->devs[slot_id]->eps[ep_index]; 403 /* Common case: no streams */ 404 if (!(ep->ep_state & EP_HAS_STREAMS)) 405 return ep->ring; 406 407 if (stream_id == 0) { 408 xhci_warn(xhci, 409 "WARN: Slot ID %u, ep index %u has streams, " 410 "but URB has no stream ID.\n", 411 slot_id, ep_index); 412 return NULL; 413 } 414 415 if (stream_id < ep->stream_info->num_streams) 416 return ep->stream_info->stream_rings[stream_id]; 417 418 xhci_warn(xhci, 419 "WARN: Slot ID %u, ep index %u has " 420 "stream IDs 1 to %u allocated, " 421 "but stream ID %u is requested.\n", 422 slot_id, ep_index, 423 ep->stream_info->num_streams - 1, 424 stream_id); 425 return NULL; 426 } 427 428 /* Get the right ring for the given URB. 429 * If the endpoint supports streams, boundary check the URB's stream ID. 430 * If the endpoint doesn't support streams, return the singular endpoint ring. 431 */ 432 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 433 struct urb *urb) 434 { 435 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 436 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 437 } 438 439 /* 440 * Move the xHC's endpoint ring dequeue pointer past cur_td. 441 * Record the new state of the xHC's endpoint ring dequeue segment, 442 * dequeue pointer, and new consumer cycle state in state. 443 * Update our internal representation of the ring's dequeue pointer. 444 * 445 * We do this in three jumps: 446 * - First we update our new ring state to be the same as when the xHC stopped. 447 * - Then we traverse the ring to find the segment that contains 448 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 449 * any link TRBs with the toggle cycle bit set. 450 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 451 * if we've moved it past a link TRB with the toggle cycle bit set. 452 */ 453 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 454 unsigned int slot_id, unsigned int ep_index, 455 unsigned int stream_id, struct xhci_td *cur_td, 456 struct xhci_dequeue_state *state) 457 { 458 struct xhci_virt_device *dev = xhci->devs[slot_id]; 459 struct xhci_ring *ep_ring; 460 struct xhci_generic_trb *trb; 461 struct xhci_ep_ctx *ep_ctx; 462 dma_addr_t addr; 463 464 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 465 ep_index, stream_id); 466 if (!ep_ring) { 467 xhci_warn(xhci, "WARN can't find new dequeue state " 468 "for invalid stream ID %u.\n", 469 stream_id); 470 return; 471 } 472 state->new_cycle_state = 0; 473 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 474 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 475 dev->eps[ep_index].stopped_trb, 476 &state->new_cycle_state); 477 if (!state->new_deq_seg) 478 BUG(); 479 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 480 xhci_dbg(xhci, "Finding endpoint context\n"); 481 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 482 state->new_cycle_state = 0x1 & ep_ctx->deq; 483 484 state->new_deq_ptr = cur_td->last_trb; 485 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 486 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 487 state->new_deq_ptr, 488 &state->new_cycle_state); 489 if (!state->new_deq_seg) 490 BUG(); 491 492 trb = &state->new_deq_ptr->generic; 493 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 494 (trb->field[3] & LINK_TOGGLE)) 495 state->new_cycle_state = ~(state->new_cycle_state) & 0x1; 496 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 497 498 /* Don't update the ring cycle state for the producer (us). */ 499 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 500 state->new_deq_seg); 501 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 502 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 503 (unsigned long long) addr); 504 xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); 505 ep_ring->dequeue = state->new_deq_ptr; 506 ep_ring->deq_seg = state->new_deq_seg; 507 } 508 509 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 510 struct xhci_td *cur_td) 511 { 512 struct xhci_segment *cur_seg; 513 union xhci_trb *cur_trb; 514 515 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 516 true; 517 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 518 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == 519 TRB_TYPE(TRB_LINK)) { 520 /* Unchain any chained Link TRBs, but 521 * leave the pointers intact. 522 */ 523 cur_trb->generic.field[3] &= ~TRB_CHAIN; 524 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 525 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 526 "in seg %p (0x%llx dma)\n", 527 cur_trb, 528 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 529 cur_seg, 530 (unsigned long long)cur_seg->dma); 531 } else { 532 cur_trb->generic.field[0] = 0; 533 cur_trb->generic.field[1] = 0; 534 cur_trb->generic.field[2] = 0; 535 /* Preserve only the cycle bit of this TRB */ 536 cur_trb->generic.field[3] &= TRB_CYCLE; 537 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); 538 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 539 "in seg %p (0x%llx dma)\n", 540 cur_trb, 541 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 542 cur_seg, 543 (unsigned long long)cur_seg->dma); 544 } 545 if (cur_trb == cur_td->last_trb) 546 break; 547 } 548 } 549 550 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 551 unsigned int ep_index, unsigned int stream_id, 552 struct xhci_segment *deq_seg, 553 union xhci_trb *deq_ptr, u32 cycle_state); 554 555 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 556 unsigned int slot_id, unsigned int ep_index, 557 unsigned int stream_id, 558 struct xhci_dequeue_state *deq_state) 559 { 560 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 561 562 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 563 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 564 deq_state->new_deq_seg, 565 (unsigned long long)deq_state->new_deq_seg->dma, 566 deq_state->new_deq_ptr, 567 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 568 deq_state->new_cycle_state); 569 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 570 deq_state->new_deq_seg, 571 deq_state->new_deq_ptr, 572 (u32) deq_state->new_cycle_state); 573 /* Stop the TD queueing code from ringing the doorbell until 574 * this command completes. The HC won't set the dequeue pointer 575 * if the ring is running, and ringing the doorbell starts the 576 * ring running. 577 */ 578 ep->ep_state |= SET_DEQ_PENDING; 579 } 580 581 static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 582 struct xhci_virt_ep *ep) 583 { 584 ep->ep_state &= ~EP_HALT_PENDING; 585 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 586 * timer is running on another CPU, we don't decrement stop_cmds_pending 587 * (since we didn't successfully stop the watchdog timer). 588 */ 589 if (del_timer(&ep->stop_cmd_timer)) 590 ep->stop_cmds_pending--; 591 } 592 593 /* Must be called with xhci->lock held in interrupt context */ 594 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 595 struct xhci_td *cur_td, int status, char *adjective) 596 { 597 struct usb_hcd *hcd = xhci_to_hcd(xhci); 598 struct urb *urb; 599 struct urb_priv *urb_priv; 600 601 urb = cur_td->urb; 602 urb_priv = urb->hcpriv; 603 urb_priv->td_cnt++; 604 605 /* Only giveback urb when this is the last td in urb */ 606 if (urb_priv->td_cnt == urb_priv->length) { 607 usb_hcd_unlink_urb_from_ep(hcd, urb); 608 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); 609 610 spin_unlock(&xhci->lock); 611 usb_hcd_giveback_urb(hcd, urb, status); 612 xhci_urb_free_priv(xhci, urb_priv); 613 spin_lock(&xhci->lock); 614 xhci_dbg(xhci, "%s URB given back\n", adjective); 615 } 616 } 617 618 /* 619 * When we get a command completion for a Stop Endpoint Command, we need to 620 * unlink any cancelled TDs from the ring. There are two ways to do that: 621 * 622 * 1. If the HW was in the middle of processing the TD that needs to be 623 * cancelled, then we must move the ring's dequeue pointer past the last TRB 624 * in the TD with a Set Dequeue Pointer Command. 625 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 626 * bit cleared) so that the HW will skip over them. 627 */ 628 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 629 union xhci_trb *trb, struct xhci_event_cmd *event) 630 { 631 unsigned int slot_id; 632 unsigned int ep_index; 633 struct xhci_virt_device *virt_dev; 634 struct xhci_ring *ep_ring; 635 struct xhci_virt_ep *ep; 636 struct list_head *entry; 637 struct xhci_td *cur_td = NULL; 638 struct xhci_td *last_unlinked_td; 639 640 struct xhci_dequeue_state deq_state; 641 642 if (unlikely(TRB_TO_SUSPEND_PORT( 643 xhci->cmd_ring->dequeue->generic.field[3]))) { 644 slot_id = TRB_TO_SLOT_ID( 645 xhci->cmd_ring->dequeue->generic.field[3]); 646 virt_dev = xhci->devs[slot_id]; 647 if (virt_dev) 648 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 649 event); 650 else 651 xhci_warn(xhci, "Stop endpoint command " 652 "completion for disabled slot %u\n", 653 slot_id); 654 return; 655 } 656 657 memset(&deq_state, 0, sizeof(deq_state)); 658 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 659 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 660 ep = &xhci->devs[slot_id]->eps[ep_index]; 661 662 if (list_empty(&ep->cancelled_td_list)) { 663 xhci_stop_watchdog_timer_in_irq(xhci, ep); 664 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 665 return; 666 } 667 668 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 669 * We have the xHCI lock, so nothing can modify this list until we drop 670 * it. We're also in the event handler, so we can't get re-interrupted 671 * if another Stop Endpoint command completes 672 */ 673 list_for_each(entry, &ep->cancelled_td_list) { 674 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 675 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 676 cur_td->first_trb, 677 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 678 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 679 if (!ep_ring) { 680 /* This shouldn't happen unless a driver is mucking 681 * with the stream ID after submission. This will 682 * leave the TD on the hardware ring, and the hardware 683 * will try to execute it, and may access a buffer 684 * that has already been freed. In the best case, the 685 * hardware will execute it, and the event handler will 686 * ignore the completion event for that TD, since it was 687 * removed from the td_list for that endpoint. In 688 * short, don't muck with the stream ID after 689 * submission. 690 */ 691 xhci_warn(xhci, "WARN Cancelled URB %p " 692 "has invalid stream ID %u.\n", 693 cur_td->urb, 694 cur_td->urb->stream_id); 695 goto remove_finished_td; 696 } 697 /* 698 * If we stopped on the TD we need to cancel, then we have to 699 * move the xHC endpoint ring dequeue pointer past this TD. 700 */ 701 if (cur_td == ep->stopped_td) 702 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 703 cur_td->urb->stream_id, 704 cur_td, &deq_state); 705 else 706 td_to_noop(xhci, ep_ring, cur_td); 707 remove_finished_td: 708 /* 709 * The event handler won't see a completion for this TD anymore, 710 * so remove it from the endpoint ring's TD list. Keep it in 711 * the cancelled TD list for URB completion later. 712 */ 713 list_del(&cur_td->td_list); 714 } 715 last_unlinked_td = cur_td; 716 xhci_stop_watchdog_timer_in_irq(xhci, ep); 717 718 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 719 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 720 xhci_queue_new_dequeue_state(xhci, 721 slot_id, ep_index, 722 ep->stopped_td->urb->stream_id, 723 &deq_state); 724 xhci_ring_cmd_db(xhci); 725 } else { 726 /* Otherwise ring the doorbell(s) to restart queued transfers */ 727 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 728 } 729 ep->stopped_td = NULL; 730 ep->stopped_trb = NULL; 731 732 /* 733 * Drop the lock and complete the URBs in the cancelled TD list. 734 * New TDs to be cancelled might be added to the end of the list before 735 * we can complete all the URBs for the TDs we already unlinked. 736 * So stop when we've completed the URB for the last TD we unlinked. 737 */ 738 do { 739 cur_td = list_entry(ep->cancelled_td_list.next, 740 struct xhci_td, cancelled_td_list); 741 list_del(&cur_td->cancelled_td_list); 742 743 /* Clean up the cancelled URB */ 744 /* Doesn't matter what we pass for status, since the core will 745 * just overwrite it (because the URB has been unlinked). 746 */ 747 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); 748 749 /* Stop processing the cancelled list if the watchdog timer is 750 * running. 751 */ 752 if (xhci->xhc_state & XHCI_STATE_DYING) 753 return; 754 } while (cur_td != last_unlinked_td); 755 756 /* Return to the event handler with xhci->lock re-acquired */ 757 } 758 759 /* Watchdog timer function for when a stop endpoint command fails to complete. 760 * In this case, we assume the host controller is broken or dying or dead. The 761 * host may still be completing some other events, so we have to be careful to 762 * let the event ring handler and the URB dequeueing/enqueueing functions know 763 * through xhci->state. 764 * 765 * The timer may also fire if the host takes a very long time to respond to the 766 * command, and the stop endpoint command completion handler cannot delete the 767 * timer before the timer function is called. Another endpoint cancellation may 768 * sneak in before the timer function can grab the lock, and that may queue 769 * another stop endpoint command and add the timer back. So we cannot use a 770 * simple flag to say whether there is a pending stop endpoint command for a 771 * particular endpoint. 772 * 773 * Instead we use a combination of that flag and a counter for the number of 774 * pending stop endpoint commands. If the timer is the tail end of the last 775 * stop endpoint command, and the endpoint's command is still pending, we assume 776 * the host is dying. 777 */ 778 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 779 { 780 struct xhci_hcd *xhci; 781 struct xhci_virt_ep *ep; 782 struct xhci_virt_ep *temp_ep; 783 struct xhci_ring *ring; 784 struct xhci_td *cur_td; 785 int ret, i, j; 786 787 ep = (struct xhci_virt_ep *) arg; 788 xhci = ep->xhci; 789 790 spin_lock(&xhci->lock); 791 792 ep->stop_cmds_pending--; 793 if (xhci->xhc_state & XHCI_STATE_DYING) { 794 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 795 "xHCI as DYING, exiting.\n"); 796 spin_unlock(&xhci->lock); 797 return; 798 } 799 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 800 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 801 "exiting.\n"); 802 spin_unlock(&xhci->lock); 803 return; 804 } 805 806 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 807 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 808 /* Oops, HC is dead or dying or at least not responding to the stop 809 * endpoint command. 810 */ 811 xhci->xhc_state |= XHCI_STATE_DYING; 812 /* Disable interrupts from the host controller and start halting it */ 813 xhci_quiesce(xhci); 814 spin_unlock(&xhci->lock); 815 816 ret = xhci_halt(xhci); 817 818 spin_lock(&xhci->lock); 819 if (ret < 0) { 820 /* This is bad; the host is not responding to commands and it's 821 * not allowing itself to be halted. At least interrupts are 822 * disabled, so we can set HC_STATE_HALT and notify the 823 * USB core. But if we call usb_hc_died(), it will attempt to 824 * disconnect all device drivers under this host. Those 825 * disconnect() methods will wait for all URBs to be unlinked, 826 * so we must complete them. 827 */ 828 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 829 xhci_warn(xhci, "Completing active URBs anyway.\n"); 830 /* We could turn all TDs on the rings to no-ops. This won't 831 * help if the host has cached part of the ring, and is slow if 832 * we want to preserve the cycle bit. Skip it and hope the host 833 * doesn't touch the memory. 834 */ 835 } 836 for (i = 0; i < MAX_HC_SLOTS; i++) { 837 if (!xhci->devs[i]) 838 continue; 839 for (j = 0; j < 31; j++) { 840 temp_ep = &xhci->devs[i]->eps[j]; 841 ring = temp_ep->ring; 842 if (!ring) 843 continue; 844 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 845 "ep index %u\n", i, j); 846 while (!list_empty(&ring->td_list)) { 847 cur_td = list_first_entry(&ring->td_list, 848 struct xhci_td, 849 td_list); 850 list_del(&cur_td->td_list); 851 if (!list_empty(&cur_td->cancelled_td_list)) 852 list_del(&cur_td->cancelled_td_list); 853 xhci_giveback_urb_in_irq(xhci, cur_td, 854 -ESHUTDOWN, "killed"); 855 } 856 while (!list_empty(&temp_ep->cancelled_td_list)) { 857 cur_td = list_first_entry( 858 &temp_ep->cancelled_td_list, 859 struct xhci_td, 860 cancelled_td_list); 861 list_del(&cur_td->cancelled_td_list); 862 xhci_giveback_urb_in_irq(xhci, cur_td, 863 -ESHUTDOWN, "killed"); 864 } 865 } 866 } 867 spin_unlock(&xhci->lock); 868 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 869 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 870 usb_hc_died(xhci_to_hcd(xhci)); 871 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 872 } 873 874 /* 875 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 876 * we need to clear the set deq pending flag in the endpoint ring state, so that 877 * the TD queueing code can ring the doorbell again. We also need to ring the 878 * endpoint doorbell to restart the ring, but only if there aren't more 879 * cancellations pending. 880 */ 881 static void handle_set_deq_completion(struct xhci_hcd *xhci, 882 struct xhci_event_cmd *event, 883 union xhci_trb *trb) 884 { 885 unsigned int slot_id; 886 unsigned int ep_index; 887 unsigned int stream_id; 888 struct xhci_ring *ep_ring; 889 struct xhci_virt_device *dev; 890 struct xhci_ep_ctx *ep_ctx; 891 struct xhci_slot_ctx *slot_ctx; 892 893 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 894 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 895 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); 896 dev = xhci->devs[slot_id]; 897 898 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 899 if (!ep_ring) { 900 xhci_warn(xhci, "WARN Set TR deq ptr command for " 901 "freed stream ID %u\n", 902 stream_id); 903 /* XXX: Harmless??? */ 904 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 905 return; 906 } 907 908 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 909 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 910 911 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 912 unsigned int ep_state; 913 unsigned int slot_state; 914 915 switch (GET_COMP_CODE(event->status)) { 916 case COMP_TRB_ERR: 917 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 918 "of stream ID configuration\n"); 919 break; 920 case COMP_CTX_STATE: 921 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 922 "to incorrect slot or ep state.\n"); 923 ep_state = ep_ctx->ep_info; 924 ep_state &= EP_STATE_MASK; 925 slot_state = slot_ctx->dev_state; 926 slot_state = GET_SLOT_STATE(slot_state); 927 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 928 slot_state, ep_state); 929 break; 930 case COMP_EBADSLT: 931 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 932 "slot %u was not enabled.\n", slot_id); 933 break; 934 default: 935 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 936 "completion code of %u.\n", 937 GET_COMP_CODE(event->status)); 938 break; 939 } 940 /* OK what do we do now? The endpoint state is hosed, and we 941 * should never get to this point if the synchronization between 942 * queueing, and endpoint state are correct. This might happen 943 * if the device gets disconnected after we've finished 944 * cancelling URBs, which might not be an error... 945 */ 946 } else { 947 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 948 ep_ctx->deq); 949 } 950 951 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 952 /* Restart any rings with pending URBs */ 953 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 954 } 955 956 static void handle_reset_ep_completion(struct xhci_hcd *xhci, 957 struct xhci_event_cmd *event, 958 union xhci_trb *trb) 959 { 960 int slot_id; 961 unsigned int ep_index; 962 963 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 964 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 965 /* This command will only fail if the endpoint wasn't halted, 966 * but we don't care. 967 */ 968 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 969 (unsigned int) GET_COMP_CODE(event->status)); 970 971 /* HW with the reset endpoint quirk needs to have a configure endpoint 972 * command complete before the endpoint can be used. Queue that here 973 * because the HW can't handle two commands being queued in a row. 974 */ 975 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 976 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 977 xhci_queue_configure_endpoint(xhci, 978 xhci->devs[slot_id]->in_ctx->dma, slot_id, 979 false); 980 xhci_ring_cmd_db(xhci); 981 } else { 982 /* Clear our internal halted state and restart the ring(s) */ 983 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 984 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 985 } 986 } 987 988 /* Check to see if a command in the device's command queue matches this one. 989 * Signal the completion or free the command, and return 1. Return 0 if the 990 * completed command isn't at the head of the command list. 991 */ 992 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 993 struct xhci_virt_device *virt_dev, 994 struct xhci_event_cmd *event) 995 { 996 struct xhci_command *command; 997 998 if (list_empty(&virt_dev->cmd_list)) 999 return 0; 1000 1001 command = list_entry(virt_dev->cmd_list.next, 1002 struct xhci_command, cmd_list); 1003 if (xhci->cmd_ring->dequeue != command->command_trb) 1004 return 0; 1005 1006 command->status = 1007 GET_COMP_CODE(event->status); 1008 list_del(&command->cmd_list); 1009 if (command->completion) 1010 complete(command->completion); 1011 else 1012 xhci_free_command(xhci, command); 1013 return 1; 1014 } 1015 1016 static void handle_cmd_completion(struct xhci_hcd *xhci, 1017 struct xhci_event_cmd *event) 1018 { 1019 int slot_id = TRB_TO_SLOT_ID(event->flags); 1020 u64 cmd_dma; 1021 dma_addr_t cmd_dequeue_dma; 1022 struct xhci_input_control_ctx *ctrl_ctx; 1023 struct xhci_virt_device *virt_dev; 1024 unsigned int ep_index; 1025 struct xhci_ring *ep_ring; 1026 unsigned int ep_state; 1027 1028 cmd_dma = event->cmd_trb; 1029 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1030 xhci->cmd_ring->dequeue); 1031 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1032 if (cmd_dequeue_dma == 0) { 1033 xhci->error_bitmask |= 1 << 4; 1034 return; 1035 } 1036 /* Does the DMA address match our internal dequeue pointer address? */ 1037 if (cmd_dma != (u64) cmd_dequeue_dma) { 1038 xhci->error_bitmask |= 1 << 5; 1039 return; 1040 } 1041 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 1042 case TRB_TYPE(TRB_ENABLE_SLOT): 1043 if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 1044 xhci->slot_id = slot_id; 1045 else 1046 xhci->slot_id = 0; 1047 complete(&xhci->addr_dev); 1048 break; 1049 case TRB_TYPE(TRB_DISABLE_SLOT): 1050 if (xhci->devs[slot_id]) 1051 xhci_free_virt_device(xhci, slot_id); 1052 break; 1053 case TRB_TYPE(TRB_CONFIG_EP): 1054 virt_dev = xhci->devs[slot_id]; 1055 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1056 break; 1057 /* 1058 * Configure endpoint commands can come from the USB core 1059 * configuration or alt setting changes, or because the HW 1060 * needed an extra configure endpoint command after a reset 1061 * endpoint command or streams were being configured. 1062 * If the command was for a halted endpoint, the xHCI driver 1063 * is not waiting on the configure endpoint command. 1064 */ 1065 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1066 virt_dev->in_ctx); 1067 /* Input ctx add_flags are the endpoint index plus one */ 1068 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1069 /* A usb_set_interface() call directly after clearing a halted 1070 * condition may race on this quirky hardware. Not worth 1071 * worrying about, since this is prototype hardware. Not sure 1072 * if this will work for streams, but streams support was 1073 * untested on this prototype. 1074 */ 1075 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1076 ep_index != (unsigned int) -1 && 1077 ctrl_ctx->add_flags - SLOT_FLAG == 1078 ctrl_ctx->drop_flags) { 1079 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1080 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1081 if (!(ep_state & EP_HALTED)) 1082 goto bandwidth_change; 1083 xhci_dbg(xhci, "Completed config ep cmd - " 1084 "last ep index = %d, state = %d\n", 1085 ep_index, ep_state); 1086 /* Clear internal halted state and restart ring(s) */ 1087 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1088 ~EP_HALTED; 1089 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1090 break; 1091 } 1092 bandwidth_change: 1093 xhci_dbg(xhci, "Completed config ep cmd\n"); 1094 xhci->devs[slot_id]->cmd_status = 1095 GET_COMP_CODE(event->status); 1096 complete(&xhci->devs[slot_id]->cmd_completion); 1097 break; 1098 case TRB_TYPE(TRB_EVAL_CONTEXT): 1099 virt_dev = xhci->devs[slot_id]; 1100 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1101 break; 1102 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1103 complete(&xhci->devs[slot_id]->cmd_completion); 1104 break; 1105 case TRB_TYPE(TRB_ADDR_DEV): 1106 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1107 complete(&xhci->addr_dev); 1108 break; 1109 case TRB_TYPE(TRB_STOP_RING): 1110 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); 1111 break; 1112 case TRB_TYPE(TRB_SET_DEQ): 1113 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 1114 break; 1115 case TRB_TYPE(TRB_CMD_NOOP): 1116 ++xhci->noops_handled; 1117 break; 1118 case TRB_TYPE(TRB_RESET_EP): 1119 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 1120 break; 1121 case TRB_TYPE(TRB_RESET_DEV): 1122 xhci_dbg(xhci, "Completed reset device command.\n"); 1123 slot_id = TRB_TO_SLOT_ID( 1124 xhci->cmd_ring->dequeue->generic.field[3]); 1125 virt_dev = xhci->devs[slot_id]; 1126 if (virt_dev) 1127 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1128 else 1129 xhci_warn(xhci, "Reset device command completion " 1130 "for disabled slot %u\n", slot_id); 1131 break; 1132 case TRB_TYPE(TRB_NEC_GET_FW): 1133 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1134 xhci->error_bitmask |= 1 << 6; 1135 break; 1136 } 1137 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1138 NEC_FW_MAJOR(event->status), 1139 NEC_FW_MINOR(event->status)); 1140 break; 1141 default: 1142 /* Skip over unknown commands on the event ring */ 1143 xhci->error_bitmask |= 1 << 6; 1144 break; 1145 } 1146 inc_deq(xhci, xhci->cmd_ring, false); 1147 } 1148 1149 static void handle_vendor_event(struct xhci_hcd *xhci, 1150 union xhci_trb *event) 1151 { 1152 u32 trb_type; 1153 1154 trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); 1155 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1156 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1157 handle_cmd_completion(xhci, &event->event_cmd); 1158 } 1159 1160 static void handle_port_status(struct xhci_hcd *xhci, 1161 union xhci_trb *event) 1162 { 1163 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1164 u32 port_id; 1165 u32 temp, temp1; 1166 u32 __iomem *addr; 1167 int ports; 1168 int slot_id; 1169 1170 /* Port status change events always have a successful completion code */ 1171 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1172 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1173 xhci->error_bitmask |= 1 << 8; 1174 } 1175 port_id = GET_PORT_ID(event->generic.field[0]); 1176 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1177 1178 ports = HCS_MAX_PORTS(xhci->hcs_params1); 1179 if ((port_id <= 0) || (port_id > ports)) { 1180 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1181 goto cleanup; 1182 } 1183 1184 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1); 1185 temp = xhci_readl(xhci, addr); 1186 if (hcd->state == HC_STATE_SUSPENDED) { 1187 xhci_dbg(xhci, "resume root hub\n"); 1188 usb_hcd_resume_root_hub(hcd); 1189 } 1190 1191 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1192 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1193 1194 temp1 = xhci_readl(xhci, &xhci->op_regs->command); 1195 if (!(temp1 & CMD_RUN)) { 1196 xhci_warn(xhci, "xHC is not running.\n"); 1197 goto cleanup; 1198 } 1199 1200 if (DEV_SUPERSPEED(temp)) { 1201 xhci_dbg(xhci, "resume SS port %d\n", port_id); 1202 temp = xhci_port_state_to_neutral(temp); 1203 temp &= ~PORT_PLS_MASK; 1204 temp |= PORT_LINK_STROBE | XDEV_U0; 1205 xhci_writel(xhci, temp, addr); 1206 slot_id = xhci_find_slot_id_by_port(xhci, port_id); 1207 if (!slot_id) { 1208 xhci_dbg(xhci, "slot_id is zero\n"); 1209 goto cleanup; 1210 } 1211 xhci_ring_device(xhci, slot_id); 1212 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1213 /* Clear PORT_PLC */ 1214 temp = xhci_readl(xhci, addr); 1215 temp = xhci_port_state_to_neutral(temp); 1216 temp |= PORT_PLC; 1217 xhci_writel(xhci, temp, addr); 1218 } else { 1219 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1220 xhci->resume_done[port_id - 1] = jiffies + 1221 msecs_to_jiffies(20); 1222 mod_timer(&hcd->rh_timer, 1223 xhci->resume_done[port_id - 1]); 1224 /* Do the rest in GetPortStatus */ 1225 } 1226 } 1227 1228 cleanup: 1229 /* Update event ring dequeue pointer before dropping the lock */ 1230 inc_deq(xhci, xhci->event_ring, true); 1231 1232 spin_unlock(&xhci->lock); 1233 /* Pass this up to the core */ 1234 usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); 1235 spin_lock(&xhci->lock); 1236 } 1237 1238 /* 1239 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1240 * at end_trb, which may be in another segment. If the suspect DMA address is a 1241 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1242 * returns 0. 1243 */ 1244 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1245 union xhci_trb *start_trb, 1246 union xhci_trb *end_trb, 1247 dma_addr_t suspect_dma) 1248 { 1249 dma_addr_t start_dma; 1250 dma_addr_t end_seg_dma; 1251 dma_addr_t end_trb_dma; 1252 struct xhci_segment *cur_seg; 1253 1254 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1255 cur_seg = start_seg; 1256 1257 do { 1258 if (start_dma == 0) 1259 return NULL; 1260 /* We may get an event for a Link TRB in the middle of a TD */ 1261 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1262 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1263 /* If the end TRB isn't in this segment, this is set to 0 */ 1264 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1265 1266 if (end_trb_dma > 0) { 1267 /* The end TRB is in this segment, so suspect should be here */ 1268 if (start_dma <= end_trb_dma) { 1269 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1270 return cur_seg; 1271 } else { 1272 /* Case for one segment with 1273 * a TD wrapped around to the top 1274 */ 1275 if ((suspect_dma >= start_dma && 1276 suspect_dma <= end_seg_dma) || 1277 (suspect_dma >= cur_seg->dma && 1278 suspect_dma <= end_trb_dma)) 1279 return cur_seg; 1280 } 1281 return NULL; 1282 } else { 1283 /* Might still be somewhere in this segment */ 1284 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1285 return cur_seg; 1286 } 1287 cur_seg = cur_seg->next; 1288 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1289 } while (cur_seg != start_seg); 1290 1291 return NULL; 1292 } 1293 1294 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1295 unsigned int slot_id, unsigned int ep_index, 1296 unsigned int stream_id, 1297 struct xhci_td *td, union xhci_trb *event_trb) 1298 { 1299 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1300 ep->ep_state |= EP_HALTED; 1301 ep->stopped_td = td; 1302 ep->stopped_trb = event_trb; 1303 ep->stopped_stream = stream_id; 1304 1305 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1306 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1307 1308 ep->stopped_td = NULL; 1309 ep->stopped_trb = NULL; 1310 ep->stopped_stream = 0; 1311 1312 xhci_ring_cmd_db(xhci); 1313 } 1314 1315 /* Check if an error has halted the endpoint ring. The class driver will 1316 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1317 * However, a babble and other errors also halt the endpoint ring, and the class 1318 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1319 * Ring Dequeue Pointer command manually. 1320 */ 1321 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1322 struct xhci_ep_ctx *ep_ctx, 1323 unsigned int trb_comp_code) 1324 { 1325 /* TRB completion codes that may require a manual halt cleanup */ 1326 if (trb_comp_code == COMP_TX_ERR || 1327 trb_comp_code == COMP_BABBLE || 1328 trb_comp_code == COMP_SPLIT_ERR) 1329 /* The 0.96 spec says a babbling control endpoint 1330 * is not halted. The 0.96 spec says it is. Some HW 1331 * claims to be 0.95 compliant, but it halts the control 1332 * endpoint anyway. Check if a babble halted the 1333 * endpoint. 1334 */ 1335 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) 1336 return 1; 1337 1338 return 0; 1339 } 1340 1341 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1342 { 1343 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1344 /* Vendor defined "informational" completion code, 1345 * treat as not-an-error. 1346 */ 1347 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1348 trb_comp_code); 1349 xhci_dbg(xhci, "Treating code as success.\n"); 1350 return 1; 1351 } 1352 return 0; 1353 } 1354 1355 /* 1356 * Finish the td processing, remove the td from td list; 1357 * Return 1 if the urb can be given back. 1358 */ 1359 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1360 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1361 struct xhci_virt_ep *ep, int *status, bool skip) 1362 { 1363 struct xhci_virt_device *xdev; 1364 struct xhci_ring *ep_ring; 1365 unsigned int slot_id; 1366 int ep_index; 1367 struct urb *urb = NULL; 1368 struct xhci_ep_ctx *ep_ctx; 1369 int ret = 0; 1370 struct urb_priv *urb_priv; 1371 u32 trb_comp_code; 1372 1373 slot_id = TRB_TO_SLOT_ID(event->flags); 1374 xdev = xhci->devs[slot_id]; 1375 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1376 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1377 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1378 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1379 1380 if (skip) 1381 goto td_cleanup; 1382 1383 if (trb_comp_code == COMP_STOP_INVAL || 1384 trb_comp_code == COMP_STOP) { 1385 /* The Endpoint Stop Command completion will take care of any 1386 * stopped TDs. A stopped TD may be restarted, so don't update 1387 * the ring dequeue pointer or take this TD off any lists yet. 1388 */ 1389 ep->stopped_td = td; 1390 ep->stopped_trb = event_trb; 1391 return 0; 1392 } else { 1393 if (trb_comp_code == COMP_STALL) { 1394 /* The transfer is completed from the driver's 1395 * perspective, but we need to issue a set dequeue 1396 * command for this stalled endpoint to move the dequeue 1397 * pointer past the TD. We can't do that here because 1398 * the halt condition must be cleared first. Let the 1399 * USB class driver clear the stall later. 1400 */ 1401 ep->stopped_td = td; 1402 ep->stopped_trb = event_trb; 1403 ep->stopped_stream = ep_ring->stream_id; 1404 } else if (xhci_requires_manual_halt_cleanup(xhci, 1405 ep_ctx, trb_comp_code)) { 1406 /* Other types of errors halt the endpoint, but the 1407 * class driver doesn't call usb_reset_endpoint() unless 1408 * the error is -EPIPE. Clear the halted status in the 1409 * xHCI hardware manually. 1410 */ 1411 xhci_cleanup_halted_endpoint(xhci, 1412 slot_id, ep_index, ep_ring->stream_id, 1413 td, event_trb); 1414 } else { 1415 /* Update ring dequeue pointer */ 1416 while (ep_ring->dequeue != td->last_trb) 1417 inc_deq(xhci, ep_ring, false); 1418 inc_deq(xhci, ep_ring, false); 1419 } 1420 1421 td_cleanup: 1422 /* Clean up the endpoint's TD list */ 1423 urb = td->urb; 1424 urb_priv = urb->hcpriv; 1425 1426 /* Do one last check of the actual transfer length. 1427 * If the host controller said we transferred more data than 1428 * the buffer length, urb->actual_length will be a very big 1429 * number (since it's unsigned). Play it safe and say we didn't 1430 * transfer anything. 1431 */ 1432 if (urb->actual_length > urb->transfer_buffer_length) { 1433 xhci_warn(xhci, "URB transfer length is wrong, " 1434 "xHC issue? req. len = %u, " 1435 "act. len = %u\n", 1436 urb->transfer_buffer_length, 1437 urb->actual_length); 1438 urb->actual_length = 0; 1439 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1440 *status = -EREMOTEIO; 1441 else 1442 *status = 0; 1443 } 1444 list_del(&td->td_list); 1445 /* Was this TD slated to be cancelled but completed anyway? */ 1446 if (!list_empty(&td->cancelled_td_list)) 1447 list_del(&td->cancelled_td_list); 1448 1449 urb_priv->td_cnt++; 1450 /* Giveback the urb when all the tds are completed */ 1451 if (urb_priv->td_cnt == urb_priv->length) 1452 ret = 1; 1453 } 1454 1455 return ret; 1456 } 1457 1458 /* 1459 * Process control tds, update urb status and actual_length. 1460 */ 1461 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1462 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1463 struct xhci_virt_ep *ep, int *status) 1464 { 1465 struct xhci_virt_device *xdev; 1466 struct xhci_ring *ep_ring; 1467 unsigned int slot_id; 1468 int ep_index; 1469 struct xhci_ep_ctx *ep_ctx; 1470 u32 trb_comp_code; 1471 1472 slot_id = TRB_TO_SLOT_ID(event->flags); 1473 xdev = xhci->devs[slot_id]; 1474 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1475 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1476 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1477 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1478 1479 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1480 switch (trb_comp_code) { 1481 case COMP_SUCCESS: 1482 if (event_trb == ep_ring->dequeue) { 1483 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1484 "without IOC set??\n"); 1485 *status = -ESHUTDOWN; 1486 } else if (event_trb != td->last_trb) { 1487 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1488 "without IOC set??\n"); 1489 *status = -ESHUTDOWN; 1490 } else { 1491 xhci_dbg(xhci, "Successful control transfer!\n"); 1492 *status = 0; 1493 } 1494 break; 1495 case COMP_SHORT_TX: 1496 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 1497 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1498 *status = -EREMOTEIO; 1499 else 1500 *status = 0; 1501 break; 1502 default: 1503 if (!xhci_requires_manual_halt_cleanup(xhci, 1504 ep_ctx, trb_comp_code)) 1505 break; 1506 xhci_dbg(xhci, "TRB error code %u, " 1507 "halted endpoint index = %u\n", 1508 trb_comp_code, ep_index); 1509 /* else fall through */ 1510 case COMP_STALL: 1511 /* Did we transfer part of the data (middle) phase? */ 1512 if (event_trb != ep_ring->dequeue && 1513 event_trb != td->last_trb) 1514 td->urb->actual_length = 1515 td->urb->transfer_buffer_length 1516 - TRB_LEN(event->transfer_len); 1517 else 1518 td->urb->actual_length = 0; 1519 1520 xhci_cleanup_halted_endpoint(xhci, 1521 slot_id, ep_index, 0, td, event_trb); 1522 return finish_td(xhci, td, event_trb, event, ep, status, true); 1523 } 1524 /* 1525 * Did we transfer any data, despite the errors that might have 1526 * happened? I.e. did we get past the setup stage? 1527 */ 1528 if (event_trb != ep_ring->dequeue) { 1529 /* The event was for the status stage */ 1530 if (event_trb == td->last_trb) { 1531 if (td->urb->actual_length != 0) { 1532 /* Don't overwrite a previously set error code 1533 */ 1534 if ((*status == -EINPROGRESS || *status == 0) && 1535 (td->urb->transfer_flags 1536 & URB_SHORT_NOT_OK)) 1537 /* Did we already see a short data 1538 * stage? */ 1539 *status = -EREMOTEIO; 1540 } else { 1541 td->urb->actual_length = 1542 td->urb->transfer_buffer_length; 1543 } 1544 } else { 1545 /* Maybe the event was for the data stage? */ 1546 if (trb_comp_code != COMP_STOP_INVAL) { 1547 /* We didn't stop on a link TRB in the middle */ 1548 td->urb->actual_length = 1549 td->urb->transfer_buffer_length - 1550 TRB_LEN(event->transfer_len); 1551 xhci_dbg(xhci, "Waiting for status " 1552 "stage event\n"); 1553 return 0; 1554 } 1555 } 1556 } 1557 1558 return finish_td(xhci, td, event_trb, event, ep, status, false); 1559 } 1560 1561 /* 1562 * Process isochronous tds, update urb packet status and actual_length. 1563 */ 1564 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1565 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1566 struct xhci_virt_ep *ep, int *status) 1567 { 1568 struct xhci_ring *ep_ring; 1569 struct urb_priv *urb_priv; 1570 int idx; 1571 int len = 0; 1572 int skip_td = 0; 1573 union xhci_trb *cur_trb; 1574 struct xhci_segment *cur_seg; 1575 u32 trb_comp_code; 1576 1577 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1578 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1579 urb_priv = td->urb->hcpriv; 1580 idx = urb_priv->td_cnt; 1581 1582 if (ep->skip) { 1583 /* The transfer is partly done */ 1584 *status = -EXDEV; 1585 td->urb->iso_frame_desc[idx].status = -EXDEV; 1586 } else { 1587 /* handle completion code */ 1588 switch (trb_comp_code) { 1589 case COMP_SUCCESS: 1590 td->urb->iso_frame_desc[idx].status = 0; 1591 xhci_dbg(xhci, "Successful isoc transfer!\n"); 1592 break; 1593 case COMP_SHORT_TX: 1594 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1595 td->urb->iso_frame_desc[idx].status = 1596 -EREMOTEIO; 1597 else 1598 td->urb->iso_frame_desc[idx].status = 0; 1599 break; 1600 case COMP_BW_OVER: 1601 td->urb->iso_frame_desc[idx].status = -ECOMM; 1602 skip_td = 1; 1603 break; 1604 case COMP_BUFF_OVER: 1605 case COMP_BABBLE: 1606 td->urb->iso_frame_desc[idx].status = -EOVERFLOW; 1607 skip_td = 1; 1608 break; 1609 case COMP_STALL: 1610 td->urb->iso_frame_desc[idx].status = -EPROTO; 1611 skip_td = 1; 1612 break; 1613 case COMP_STOP: 1614 case COMP_STOP_INVAL: 1615 break; 1616 default: 1617 td->urb->iso_frame_desc[idx].status = -1; 1618 break; 1619 } 1620 } 1621 1622 /* calc actual length */ 1623 if (ep->skip) { 1624 td->urb->iso_frame_desc[idx].actual_length = 0; 1625 /* Update ring dequeue pointer */ 1626 while (ep_ring->dequeue != td->last_trb) 1627 inc_deq(xhci, ep_ring, false); 1628 inc_deq(xhci, ep_ring, false); 1629 return finish_td(xhci, td, event_trb, event, ep, status, true); 1630 } 1631 1632 if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { 1633 td->urb->iso_frame_desc[idx].actual_length = 1634 td->urb->iso_frame_desc[idx].length; 1635 td->urb->actual_length += 1636 td->urb->iso_frame_desc[idx].length; 1637 } else { 1638 for (cur_trb = ep_ring->dequeue, 1639 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1640 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1641 if ((cur_trb->generic.field[3] & 1642 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1643 (cur_trb->generic.field[3] & 1644 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1645 len += 1646 TRB_LEN(cur_trb->generic.field[2]); 1647 } 1648 len += TRB_LEN(cur_trb->generic.field[2]) - 1649 TRB_LEN(event->transfer_len); 1650 1651 if (trb_comp_code != COMP_STOP_INVAL) { 1652 td->urb->iso_frame_desc[idx].actual_length = len; 1653 td->urb->actual_length += len; 1654 } 1655 } 1656 1657 if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS) 1658 *status = 0; 1659 1660 return finish_td(xhci, td, event_trb, event, ep, status, false); 1661 } 1662 1663 /* 1664 * Process bulk and interrupt tds, update urb status and actual_length. 1665 */ 1666 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 1667 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1668 struct xhci_virt_ep *ep, int *status) 1669 { 1670 struct xhci_ring *ep_ring; 1671 union xhci_trb *cur_trb; 1672 struct xhci_segment *cur_seg; 1673 u32 trb_comp_code; 1674 1675 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1676 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1677 1678 switch (trb_comp_code) { 1679 case COMP_SUCCESS: 1680 /* Double check that the HW transferred everything. */ 1681 if (event_trb != td->last_trb) { 1682 xhci_warn(xhci, "WARN Successful completion " 1683 "on short TX\n"); 1684 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1685 *status = -EREMOTEIO; 1686 else 1687 *status = 0; 1688 } else { 1689 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc)) 1690 xhci_dbg(xhci, "Successful bulk " 1691 "transfer!\n"); 1692 else 1693 xhci_dbg(xhci, "Successful interrupt " 1694 "transfer!\n"); 1695 *status = 0; 1696 } 1697 break; 1698 case COMP_SHORT_TX: 1699 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1700 *status = -EREMOTEIO; 1701 else 1702 *status = 0; 1703 break; 1704 default: 1705 /* Others already handled above */ 1706 break; 1707 } 1708 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1709 "%d bytes untransferred\n", 1710 td->urb->ep->desc.bEndpointAddress, 1711 td->urb->transfer_buffer_length, 1712 TRB_LEN(event->transfer_len)); 1713 /* Fast path - was this the last TRB in the TD for this URB? */ 1714 if (event_trb == td->last_trb) { 1715 if (TRB_LEN(event->transfer_len) != 0) { 1716 td->urb->actual_length = 1717 td->urb->transfer_buffer_length - 1718 TRB_LEN(event->transfer_len); 1719 if (td->urb->transfer_buffer_length < 1720 td->urb->actual_length) { 1721 xhci_warn(xhci, "HC gave bad length " 1722 "of %d bytes left\n", 1723 TRB_LEN(event->transfer_len)); 1724 td->urb->actual_length = 0; 1725 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1726 *status = -EREMOTEIO; 1727 else 1728 *status = 0; 1729 } 1730 /* Don't overwrite a previously set error code */ 1731 if (*status == -EINPROGRESS) { 1732 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1733 *status = -EREMOTEIO; 1734 else 1735 *status = 0; 1736 } 1737 } else { 1738 td->urb->actual_length = 1739 td->urb->transfer_buffer_length; 1740 /* Ignore a short packet completion if the 1741 * untransferred length was zero. 1742 */ 1743 if (*status == -EREMOTEIO) 1744 *status = 0; 1745 } 1746 } else { 1747 /* Slow path - walk the list, starting from the dequeue 1748 * pointer, to get the actual length transferred. 1749 */ 1750 td->urb->actual_length = 0; 1751 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1752 cur_trb != event_trb; 1753 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1754 if ((cur_trb->generic.field[3] & 1755 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1756 (cur_trb->generic.field[3] & 1757 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1758 td->urb->actual_length += 1759 TRB_LEN(cur_trb->generic.field[2]); 1760 } 1761 /* If the ring didn't stop on a Link or No-op TRB, add 1762 * in the actual bytes transferred from the Normal TRB 1763 */ 1764 if (trb_comp_code != COMP_STOP_INVAL) 1765 td->urb->actual_length += 1766 TRB_LEN(cur_trb->generic.field[2]) - 1767 TRB_LEN(event->transfer_len); 1768 } 1769 1770 return finish_td(xhci, td, event_trb, event, ep, status, false); 1771 } 1772 1773 /* 1774 * If this function returns an error condition, it means it got a Transfer 1775 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1776 * At this point, the host controller is probably hosed and should be reset. 1777 */ 1778 static int handle_tx_event(struct xhci_hcd *xhci, 1779 struct xhci_transfer_event *event) 1780 { 1781 struct xhci_virt_device *xdev; 1782 struct xhci_virt_ep *ep; 1783 struct xhci_ring *ep_ring; 1784 unsigned int slot_id; 1785 int ep_index; 1786 struct xhci_td *td = NULL; 1787 dma_addr_t event_dma; 1788 struct xhci_segment *event_seg; 1789 union xhci_trb *event_trb; 1790 struct urb *urb = NULL; 1791 int status = -EINPROGRESS; 1792 struct urb_priv *urb_priv; 1793 struct xhci_ep_ctx *ep_ctx; 1794 u32 trb_comp_code; 1795 int ret = 0; 1796 1797 slot_id = TRB_TO_SLOT_ID(event->flags); 1798 xdev = xhci->devs[slot_id]; 1799 if (!xdev) { 1800 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1801 return -ENODEV; 1802 } 1803 1804 /* Endpoint ID is 1 based, our index is zero based */ 1805 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1806 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1807 ep = &xdev->eps[ep_index]; 1808 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1809 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1810 if (!ep_ring || 1811 (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1812 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1813 "or incorrect stream ring\n"); 1814 return -ENODEV; 1815 } 1816 1817 event_dma = event->buffer; 1818 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1819 /* Look for common error cases */ 1820 switch (trb_comp_code) { 1821 /* Skip codes that require special handling depending on 1822 * transfer type 1823 */ 1824 case COMP_SUCCESS: 1825 case COMP_SHORT_TX: 1826 break; 1827 case COMP_STOP: 1828 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 1829 break; 1830 case COMP_STOP_INVAL: 1831 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 1832 break; 1833 case COMP_STALL: 1834 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 1835 ep->ep_state |= EP_HALTED; 1836 status = -EPIPE; 1837 break; 1838 case COMP_TRB_ERR: 1839 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1840 status = -EILSEQ; 1841 break; 1842 case COMP_SPLIT_ERR: 1843 case COMP_TX_ERR: 1844 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1845 status = -EPROTO; 1846 break; 1847 case COMP_BABBLE: 1848 xhci_warn(xhci, "WARN: babble error on endpoint\n"); 1849 status = -EOVERFLOW; 1850 break; 1851 case COMP_DB_ERR: 1852 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 1853 status = -ENOSR; 1854 break; 1855 case COMP_BW_OVER: 1856 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 1857 break; 1858 case COMP_BUFF_OVER: 1859 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 1860 break; 1861 case COMP_UNDERRUN: 1862 /* 1863 * When the Isoch ring is empty, the xHC will generate 1864 * a Ring Overrun Event for IN Isoch endpoint or Ring 1865 * Underrun Event for OUT Isoch endpoint. 1866 */ 1867 xhci_dbg(xhci, "underrun event on endpoint\n"); 1868 if (!list_empty(&ep_ring->td_list)) 1869 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 1870 "still with TDs queued?\n", 1871 TRB_TO_SLOT_ID(event->flags), ep_index); 1872 goto cleanup; 1873 case COMP_OVERRUN: 1874 xhci_dbg(xhci, "overrun event on endpoint\n"); 1875 if (!list_empty(&ep_ring->td_list)) 1876 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 1877 "still with TDs queued?\n", 1878 TRB_TO_SLOT_ID(event->flags), ep_index); 1879 goto cleanup; 1880 case COMP_MISSED_INT: 1881 /* 1882 * When encounter missed service error, one or more isoc tds 1883 * may be missed by xHC. 1884 * Set skip flag of the ep_ring; Complete the missed tds as 1885 * short transfer when process the ep_ring next time. 1886 */ 1887 ep->skip = true; 1888 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 1889 goto cleanup; 1890 default: 1891 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 1892 status = 0; 1893 break; 1894 } 1895 xhci_warn(xhci, "ERROR Unknown event condition, HC probably " 1896 "busted\n"); 1897 goto cleanup; 1898 } 1899 1900 do { 1901 /* This TRB should be in the TD at the head of this ring's 1902 * TD list. 1903 */ 1904 if (list_empty(&ep_ring->td_list)) { 1905 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 1906 "with no TDs queued?\n", 1907 TRB_TO_SLOT_ID(event->flags), ep_index); 1908 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 1909 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 1910 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 1911 if (ep->skip) { 1912 ep->skip = false; 1913 xhci_dbg(xhci, "td_list is empty while skip " 1914 "flag set. Clear skip flag.\n"); 1915 } 1916 ret = 0; 1917 goto cleanup; 1918 } 1919 1920 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 1921 /* Is this a TRB in the currently executing TD? */ 1922 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 1923 td->last_trb, event_dma); 1924 if (event_seg && ep->skip) { 1925 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 1926 ep->skip = false; 1927 } 1928 if (!event_seg && 1929 (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) { 1930 /* HC is busted, give up! */ 1931 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " 1932 "part of current TD\n"); 1933 return -ESHUTDOWN; 1934 } 1935 1936 if (event_seg) { 1937 event_trb = &event_seg->trbs[(event_dma - 1938 event_seg->dma) / sizeof(*event_trb)]; 1939 /* 1940 * No-op TRB should not trigger interrupts. 1941 * If event_trb is a no-op TRB, it means the 1942 * corresponding TD has been cancelled. Just ignore 1943 * the TD. 1944 */ 1945 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) 1946 == TRB_TYPE(TRB_TR_NOOP)) { 1947 xhci_dbg(xhci, "event_trb is a no-op TRB. " 1948 "Skip it\n"); 1949 goto cleanup; 1950 } 1951 } 1952 1953 /* Now update the urb's actual_length and give back to 1954 * the core 1955 */ 1956 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 1957 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 1958 &status); 1959 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 1960 ret = process_isoc_td(xhci, td, event_trb, event, ep, 1961 &status); 1962 else 1963 ret = process_bulk_intr_td(xhci, td, event_trb, event, 1964 ep, &status); 1965 1966 cleanup: 1967 /* 1968 * Do not update event ring dequeue pointer if ep->skip is set. 1969 * Will roll back to continue process missed tds. 1970 */ 1971 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 1972 inc_deq(xhci, xhci->event_ring, true); 1973 } 1974 1975 if (ret) { 1976 urb = td->urb; 1977 urb_priv = urb->hcpriv; 1978 /* Leave the TD around for the reset endpoint function 1979 * to use(but only if it's not a control endpoint, 1980 * since we already queued the Set TR dequeue pointer 1981 * command for stalled control endpoints). 1982 */ 1983 if (usb_endpoint_xfer_control(&urb->ep->desc) || 1984 (trb_comp_code != COMP_STALL && 1985 trb_comp_code != COMP_BABBLE)) 1986 xhci_urb_free_priv(xhci, urb_priv); 1987 1988 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1989 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 1990 "status = %d\n", 1991 urb, urb->actual_length, status); 1992 spin_unlock(&xhci->lock); 1993 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1994 spin_lock(&xhci->lock); 1995 } 1996 1997 /* 1998 * If ep->skip is set, it means there are missed tds on the 1999 * endpoint ring need to take care of. 2000 * Process them as short transfer until reach the td pointed by 2001 * the event. 2002 */ 2003 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2004 2005 return 0; 2006 } 2007 2008 /* 2009 * This function handles all OS-owned events on the event ring. It may drop 2010 * xhci->lock between event processing (e.g. to pass up port status changes). 2011 */ 2012 static void xhci_handle_event(struct xhci_hcd *xhci) 2013 { 2014 union xhci_trb *event; 2015 int update_ptrs = 1; 2016 int ret; 2017 2018 xhci_dbg(xhci, "In %s\n", __func__); 2019 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2020 xhci->error_bitmask |= 1 << 1; 2021 return; 2022 } 2023 2024 event = xhci->event_ring->dequeue; 2025 /* Does the HC or OS own the TRB? */ 2026 if ((event->event_cmd.flags & TRB_CYCLE) != 2027 xhci->event_ring->cycle_state) { 2028 xhci->error_bitmask |= 1 << 2; 2029 return; 2030 } 2031 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); 2032 2033 /* FIXME: Handle more event types. */ 2034 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 2035 case TRB_TYPE(TRB_COMPLETION): 2036 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); 2037 handle_cmd_completion(xhci, &event->event_cmd); 2038 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); 2039 break; 2040 case TRB_TYPE(TRB_PORT_STATUS): 2041 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); 2042 handle_port_status(xhci, event); 2043 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); 2044 update_ptrs = 0; 2045 break; 2046 case TRB_TYPE(TRB_TRANSFER): 2047 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); 2048 ret = handle_tx_event(xhci, &event->trans_event); 2049 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); 2050 if (ret < 0) 2051 xhci->error_bitmask |= 1 << 9; 2052 else 2053 update_ptrs = 0; 2054 break; 2055 default: 2056 if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) 2057 handle_vendor_event(xhci, event); 2058 else 2059 xhci->error_bitmask |= 1 << 3; 2060 } 2061 /* Any of the above functions may drop and re-acquire the lock, so check 2062 * to make sure a watchdog timer didn't mark the host as non-responsive. 2063 */ 2064 if (xhci->xhc_state & XHCI_STATE_DYING) { 2065 xhci_dbg(xhci, "xHCI host dying, returning from " 2066 "event handler.\n"); 2067 return; 2068 } 2069 2070 if (update_ptrs) 2071 /* Update SW event ring dequeue pointer */ 2072 inc_deq(xhci, xhci->event_ring, true); 2073 2074 /* Are there more items on the event ring? */ 2075 xhci_handle_event(xhci); 2076 } 2077 2078 /* 2079 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2080 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2081 * indicators of an event TRB error, but we check the status *first* to be safe. 2082 */ 2083 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2084 { 2085 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2086 u32 status; 2087 union xhci_trb *trb; 2088 u64 temp_64; 2089 union xhci_trb *event_ring_deq; 2090 dma_addr_t deq; 2091 2092 spin_lock(&xhci->lock); 2093 trb = xhci->event_ring->dequeue; 2094 /* Check if the xHC generated the interrupt, or the irq is shared */ 2095 status = xhci_readl(xhci, &xhci->op_regs->status); 2096 if (status == 0xffffffff) 2097 goto hw_died; 2098 2099 if (!(status & STS_EINT)) { 2100 spin_unlock(&xhci->lock); 2101 return IRQ_NONE; 2102 } 2103 xhci_dbg(xhci, "op reg status = %08x\n", status); 2104 xhci_dbg(xhci, "Event ring dequeue ptr:\n"); 2105 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", 2106 (unsigned long long) 2107 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), 2108 lower_32_bits(trb->link.segment_ptr), 2109 upper_32_bits(trb->link.segment_ptr), 2110 (unsigned int) trb->link.intr_target, 2111 (unsigned int) trb->link.control); 2112 2113 if (status & STS_FATAL) { 2114 xhci_warn(xhci, "WARNING: Host System Error\n"); 2115 xhci_halt(xhci); 2116 hw_died: 2117 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 2118 spin_unlock(&xhci->lock); 2119 return -ESHUTDOWN; 2120 } 2121 2122 /* 2123 * Clear the op reg interrupt status first, 2124 * so we can receive interrupts from other MSI-X interrupters. 2125 * Write 1 to clear the interrupt status. 2126 */ 2127 status |= STS_EINT; 2128 xhci_writel(xhci, status, &xhci->op_regs->status); 2129 /* FIXME when MSI-X is supported and there are multiple vectors */ 2130 /* Clear the MSI-X event interrupt status */ 2131 2132 if (hcd->irq != -1) { 2133 u32 irq_pending; 2134 /* Acknowledge the PCI interrupt */ 2135 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 2136 irq_pending |= 0x3; 2137 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); 2138 } 2139 2140 if (xhci->xhc_state & XHCI_STATE_DYING) { 2141 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2142 "Shouldn't IRQs be disabled?\n"); 2143 /* Clear the event handler busy flag (RW1C); 2144 * the event ring should be empty. 2145 */ 2146 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2147 xhci_write_64(xhci, temp_64 | ERST_EHB, 2148 &xhci->ir_set->erst_dequeue); 2149 spin_unlock(&xhci->lock); 2150 2151 return IRQ_HANDLED; 2152 } 2153 2154 event_ring_deq = xhci->event_ring->dequeue; 2155 /* FIXME this should be a delayed service routine 2156 * that clears the EHB. 2157 */ 2158 xhci_handle_event(xhci); 2159 2160 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2161 /* If necessary, update the HW's version of the event ring deq ptr. */ 2162 if (event_ring_deq != xhci->event_ring->dequeue) { 2163 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2164 xhci->event_ring->dequeue); 2165 if (deq == 0) 2166 xhci_warn(xhci, "WARN something wrong with SW event " 2167 "ring dequeue ptr.\n"); 2168 /* Update HC event ring dequeue pointer */ 2169 temp_64 &= ERST_PTR_MASK; 2170 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2171 } 2172 2173 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2174 temp_64 |= ERST_EHB; 2175 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2176 2177 spin_unlock(&xhci->lock); 2178 2179 return IRQ_HANDLED; 2180 } 2181 2182 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) 2183 { 2184 irqreturn_t ret; 2185 2186 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); 2187 2188 ret = xhci_irq(hcd); 2189 2190 return ret; 2191 } 2192 2193 /**** Endpoint Ring Operations ****/ 2194 2195 /* 2196 * Generic function for queueing a TRB on a ring. 2197 * The caller must have checked to make sure there's room on the ring. 2198 * 2199 * @more_trbs_coming: Will you enqueue more TRBs before calling 2200 * prepare_transfer()? 2201 */ 2202 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2203 bool consumer, bool more_trbs_coming, 2204 u32 field1, u32 field2, u32 field3, u32 field4) 2205 { 2206 struct xhci_generic_trb *trb; 2207 2208 trb = &ring->enqueue->generic; 2209 trb->field[0] = field1; 2210 trb->field[1] = field2; 2211 trb->field[2] = field3; 2212 trb->field[3] = field4; 2213 inc_enq(xhci, ring, consumer, more_trbs_coming); 2214 } 2215 2216 /* 2217 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2218 * FIXME allocate segments if the ring is full. 2219 */ 2220 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2221 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2222 { 2223 /* Make sure the endpoint has been added to xHC schedule */ 2224 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); 2225 switch (ep_state) { 2226 case EP_STATE_DISABLED: 2227 /* 2228 * USB core changed config/interfaces without notifying us, 2229 * or hardware is reporting the wrong state. 2230 */ 2231 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2232 return -ENOENT; 2233 case EP_STATE_ERROR: 2234 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2235 /* FIXME event handling code for error needs to clear it */ 2236 /* XXX not sure if this should be -ENOENT or not */ 2237 return -EINVAL; 2238 case EP_STATE_HALTED: 2239 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2240 case EP_STATE_STOPPED: 2241 case EP_STATE_RUNNING: 2242 break; 2243 default: 2244 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2245 /* 2246 * FIXME issue Configure Endpoint command to try to get the HC 2247 * back into a known state. 2248 */ 2249 return -EINVAL; 2250 } 2251 if (!room_on_ring(xhci, ep_ring, num_trbs)) { 2252 /* FIXME allocate more room */ 2253 xhci_err(xhci, "ERROR no room on ep ring\n"); 2254 return -ENOMEM; 2255 } 2256 2257 if (enqueue_is_link_trb(ep_ring)) { 2258 struct xhci_ring *ring = ep_ring; 2259 union xhci_trb *next; 2260 2261 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n"); 2262 next = ring->enqueue; 2263 2264 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2265 2266 /* If we're not dealing with 0.95 hardware, 2267 * clear the chain bit. 2268 */ 2269 if (!xhci_link_trb_quirk(xhci)) 2270 next->link.control &= ~TRB_CHAIN; 2271 else 2272 next->link.control |= TRB_CHAIN; 2273 2274 wmb(); 2275 next->link.control ^= (u32) TRB_CYCLE; 2276 2277 /* Toggle the cycle bit after the last ring segment. */ 2278 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2279 ring->cycle_state = (ring->cycle_state ? 0 : 1); 2280 if (!in_interrupt()) { 2281 xhci_dbg(xhci, "queue_trb: Toggle cycle " 2282 "state for ring %p = %i\n", 2283 ring, (unsigned int)ring->cycle_state); 2284 } 2285 } 2286 ring->enq_seg = ring->enq_seg->next; 2287 ring->enqueue = ring->enq_seg->trbs; 2288 next = ring->enqueue; 2289 } 2290 } 2291 2292 return 0; 2293 } 2294 2295 static int prepare_transfer(struct xhci_hcd *xhci, 2296 struct xhci_virt_device *xdev, 2297 unsigned int ep_index, 2298 unsigned int stream_id, 2299 unsigned int num_trbs, 2300 struct urb *urb, 2301 unsigned int td_index, 2302 gfp_t mem_flags) 2303 { 2304 int ret; 2305 struct urb_priv *urb_priv; 2306 struct xhci_td *td; 2307 struct xhci_ring *ep_ring; 2308 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2309 2310 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2311 if (!ep_ring) { 2312 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2313 stream_id); 2314 return -EINVAL; 2315 } 2316 2317 ret = prepare_ring(xhci, ep_ring, 2318 ep_ctx->ep_info & EP_STATE_MASK, 2319 num_trbs, mem_flags); 2320 if (ret) 2321 return ret; 2322 2323 urb_priv = urb->hcpriv; 2324 td = urb_priv->td[td_index]; 2325 2326 INIT_LIST_HEAD(&td->td_list); 2327 INIT_LIST_HEAD(&td->cancelled_td_list); 2328 2329 if (td_index == 0) { 2330 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 2331 if (unlikely(ret)) { 2332 xhci_urb_free_priv(xhci, urb_priv); 2333 urb->hcpriv = NULL; 2334 return ret; 2335 } 2336 } 2337 2338 td->urb = urb; 2339 /* Add this TD to the tail of the endpoint ring's TD list */ 2340 list_add_tail(&td->td_list, &ep_ring->td_list); 2341 td->start_seg = ep_ring->enq_seg; 2342 td->first_trb = ep_ring->enqueue; 2343 2344 urb_priv->td[td_index] = td; 2345 2346 return 0; 2347 } 2348 2349 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2350 { 2351 int num_sgs, num_trbs, running_total, temp, i; 2352 struct scatterlist *sg; 2353 2354 sg = NULL; 2355 num_sgs = urb->num_sgs; 2356 temp = urb->transfer_buffer_length; 2357 2358 xhci_dbg(xhci, "count sg list trbs: \n"); 2359 num_trbs = 0; 2360 for_each_sg(urb->sg, sg, num_sgs, i) { 2361 unsigned int previous_total_trbs = num_trbs; 2362 unsigned int len = sg_dma_len(sg); 2363 2364 /* Scatter gather list entries may cross 64KB boundaries */ 2365 running_total = TRB_MAX_BUFF_SIZE - 2366 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2367 if (running_total != 0) 2368 num_trbs++; 2369 2370 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2371 while (running_total < sg_dma_len(sg)) { 2372 num_trbs++; 2373 running_total += TRB_MAX_BUFF_SIZE; 2374 } 2375 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", 2376 i, (unsigned long long)sg_dma_address(sg), 2377 len, len, num_trbs - previous_total_trbs); 2378 2379 len = min_t(int, len, temp); 2380 temp -= len; 2381 if (temp == 0) 2382 break; 2383 } 2384 xhci_dbg(xhci, "\n"); 2385 if (!in_interrupt()) 2386 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, " 2387 "num_trbs = %d\n", 2388 urb->ep->desc.bEndpointAddress, 2389 urb->transfer_buffer_length, 2390 num_trbs); 2391 return num_trbs; 2392 } 2393 2394 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2395 { 2396 if (num_trbs != 0) 2397 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2398 "TRBs, %d left\n", __func__, 2399 urb->ep->desc.bEndpointAddress, num_trbs); 2400 if (running_total != urb->transfer_buffer_length) 2401 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2402 "queued %#x (%d), asked for %#x (%d)\n", 2403 __func__, 2404 urb->ep->desc.bEndpointAddress, 2405 running_total, running_total, 2406 urb->transfer_buffer_length, 2407 urb->transfer_buffer_length); 2408 } 2409 2410 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2411 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2412 struct xhci_generic_trb *start_trb) 2413 { 2414 /* 2415 * Pass all the TRBs to the hardware at once and make sure this write 2416 * isn't reordered. 2417 */ 2418 wmb(); 2419 if (start_cycle) 2420 start_trb->field[3] |= start_cycle; 2421 else 2422 start_trb->field[3] &= ~0x1; 2423 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2424 } 2425 2426 /* 2427 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2428 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2429 * (comprised of sg list entries) can take several service intervals to 2430 * transmit. 2431 */ 2432 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2433 struct urb *urb, int slot_id, unsigned int ep_index) 2434 { 2435 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 2436 xhci->devs[slot_id]->out_ctx, ep_index); 2437 int xhci_interval; 2438 int ep_interval; 2439 2440 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 2441 ep_interval = urb->interval; 2442 /* Convert to microframes */ 2443 if (urb->dev->speed == USB_SPEED_LOW || 2444 urb->dev->speed == USB_SPEED_FULL) 2445 ep_interval *= 8; 2446 /* FIXME change this to a warning and a suggestion to use the new API 2447 * to set the polling interval (once the API is added). 2448 */ 2449 if (xhci_interval != ep_interval) { 2450 if (printk_ratelimit()) 2451 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2452 " (%d microframe%s) than xHCI " 2453 "(%d microframe%s)\n", 2454 ep_interval, 2455 ep_interval == 1 ? "" : "s", 2456 xhci_interval, 2457 xhci_interval == 1 ? "" : "s"); 2458 urb->interval = xhci_interval; 2459 /* Convert back to frames for LS/FS devices */ 2460 if (urb->dev->speed == USB_SPEED_LOW || 2461 urb->dev->speed == USB_SPEED_FULL) 2462 urb->interval /= 8; 2463 } 2464 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 2465 } 2466 2467 /* 2468 * The TD size is the number of bytes remaining in the TD (including this TRB), 2469 * right shifted by 10. 2470 * It must fit in bits 21:17, so it can't be bigger than 31. 2471 */ 2472 static u32 xhci_td_remainder(unsigned int remainder) 2473 { 2474 u32 max = (1 << (21 - 17 + 1)) - 1; 2475 2476 if ((remainder >> 10) >= max) 2477 return max << 17; 2478 else 2479 return (remainder >> 10) << 17; 2480 } 2481 2482 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2483 struct urb *urb, int slot_id, unsigned int ep_index) 2484 { 2485 struct xhci_ring *ep_ring; 2486 unsigned int num_trbs; 2487 struct urb_priv *urb_priv; 2488 struct xhci_td *td; 2489 struct scatterlist *sg; 2490 int num_sgs; 2491 int trb_buff_len, this_sg_len, running_total; 2492 bool first_trb; 2493 u64 addr; 2494 bool more_trbs_coming; 2495 2496 struct xhci_generic_trb *start_trb; 2497 int start_cycle; 2498 2499 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2500 if (!ep_ring) 2501 return -EINVAL; 2502 2503 num_trbs = count_sg_trbs_needed(xhci, urb); 2504 num_sgs = urb->num_sgs; 2505 2506 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2507 ep_index, urb->stream_id, 2508 num_trbs, urb, 0, mem_flags); 2509 if (trb_buff_len < 0) 2510 return trb_buff_len; 2511 2512 urb_priv = urb->hcpriv; 2513 td = urb_priv->td[0]; 2514 2515 /* 2516 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2517 * until we've finished creating all the other TRBs. The ring's cycle 2518 * state may change as we enqueue the other TRBs, so save it too. 2519 */ 2520 start_trb = &ep_ring->enqueue->generic; 2521 start_cycle = ep_ring->cycle_state; 2522 2523 running_total = 0; 2524 /* 2525 * How much data is in the first TRB? 2526 * 2527 * There are three forces at work for TRB buffer pointers and lengths: 2528 * 1. We don't want to walk off the end of this sg-list entry buffer. 2529 * 2. The transfer length that the driver requested may be smaller than 2530 * the amount of memory allocated for this scatter-gather list. 2531 * 3. TRBs buffers can't cross 64KB boundaries. 2532 */ 2533 sg = urb->sg; 2534 addr = (u64) sg_dma_address(sg); 2535 this_sg_len = sg_dma_len(sg); 2536 trb_buff_len = TRB_MAX_BUFF_SIZE - 2537 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2538 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2539 if (trb_buff_len > urb->transfer_buffer_length) 2540 trb_buff_len = urb->transfer_buffer_length; 2541 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", 2542 trb_buff_len); 2543 2544 first_trb = true; 2545 /* Queue the first TRB, even if it's zero-length */ 2546 do { 2547 u32 field = 0; 2548 u32 length_field = 0; 2549 u32 remainder = 0; 2550 2551 /* Don't change the cycle bit of the first TRB until later */ 2552 if (first_trb) { 2553 first_trb = false; 2554 if (start_cycle == 0) 2555 field |= 0x1; 2556 } else 2557 field |= ep_ring->cycle_state; 2558 2559 /* Chain all the TRBs together; clear the chain bit in the last 2560 * TRB to indicate it's the last TRB in the chain. 2561 */ 2562 if (num_trbs > 1) { 2563 field |= TRB_CHAIN; 2564 } else { 2565 /* FIXME - add check for ZERO_PACKET flag before this */ 2566 td->last_trb = ep_ring->enqueue; 2567 field |= TRB_IOC; 2568 } 2569 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 2570 "64KB boundary at %#x, end dma = %#x\n", 2571 (unsigned int) addr, trb_buff_len, trb_buff_len, 2572 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2573 (unsigned int) addr + trb_buff_len); 2574 if (TRB_MAX_BUFF_SIZE - 2575 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 2576 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2577 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2578 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2579 (unsigned int) addr + trb_buff_len); 2580 } 2581 remainder = xhci_td_remainder(urb->transfer_buffer_length - 2582 running_total) ; 2583 length_field = TRB_LEN(trb_buff_len) | 2584 remainder | 2585 TRB_INTR_TARGET(0); 2586 if (num_trbs > 1) 2587 more_trbs_coming = true; 2588 else 2589 more_trbs_coming = false; 2590 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2591 lower_32_bits(addr), 2592 upper_32_bits(addr), 2593 length_field, 2594 /* We always want to know if the TRB was short, 2595 * or we won't get an event when it completes. 2596 * (Unless we use event data TRBs, which are a 2597 * waste of space and HC resources.) 2598 */ 2599 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 2600 --num_trbs; 2601 running_total += trb_buff_len; 2602 2603 /* Calculate length for next transfer -- 2604 * Are we done queueing all the TRBs for this sg entry? 2605 */ 2606 this_sg_len -= trb_buff_len; 2607 if (this_sg_len == 0) { 2608 --num_sgs; 2609 if (num_sgs == 0) 2610 break; 2611 sg = sg_next(sg); 2612 addr = (u64) sg_dma_address(sg); 2613 this_sg_len = sg_dma_len(sg); 2614 } else { 2615 addr += trb_buff_len; 2616 } 2617 2618 trb_buff_len = TRB_MAX_BUFF_SIZE - 2619 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2620 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2621 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2622 trb_buff_len = 2623 urb->transfer_buffer_length - running_total; 2624 } while (running_total < urb->transfer_buffer_length); 2625 2626 check_trb_math(urb, num_trbs, running_total); 2627 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2628 start_cycle, start_trb); 2629 return 0; 2630 } 2631 2632 /* This is very similar to what ehci-q.c qtd_fill() does */ 2633 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2634 struct urb *urb, int slot_id, unsigned int ep_index) 2635 { 2636 struct xhci_ring *ep_ring; 2637 struct urb_priv *urb_priv; 2638 struct xhci_td *td; 2639 int num_trbs; 2640 struct xhci_generic_trb *start_trb; 2641 bool first_trb; 2642 bool more_trbs_coming; 2643 int start_cycle; 2644 u32 field, length_field; 2645 2646 int running_total, trb_buff_len, ret; 2647 u64 addr; 2648 2649 if (urb->num_sgs) 2650 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2651 2652 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2653 if (!ep_ring) 2654 return -EINVAL; 2655 2656 num_trbs = 0; 2657 /* How much data is (potentially) left before the 64KB boundary? */ 2658 running_total = TRB_MAX_BUFF_SIZE - 2659 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2660 2661 /* If there's some data on this 64KB chunk, or we have to send a 2662 * zero-length transfer, we need at least one TRB 2663 */ 2664 if (running_total != 0 || urb->transfer_buffer_length == 0) 2665 num_trbs++; 2666 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2667 while (running_total < urb->transfer_buffer_length) { 2668 num_trbs++; 2669 running_total += TRB_MAX_BUFF_SIZE; 2670 } 2671 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 2672 2673 if (!in_interrupt()) 2674 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), " 2675 "addr = %#llx, num_trbs = %d\n", 2676 urb->ep->desc.bEndpointAddress, 2677 urb->transfer_buffer_length, 2678 urb->transfer_buffer_length, 2679 (unsigned long long)urb->transfer_dma, 2680 num_trbs); 2681 2682 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2683 ep_index, urb->stream_id, 2684 num_trbs, urb, 0, mem_flags); 2685 if (ret < 0) 2686 return ret; 2687 2688 urb_priv = urb->hcpriv; 2689 td = urb_priv->td[0]; 2690 2691 /* 2692 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2693 * until we've finished creating all the other TRBs. The ring's cycle 2694 * state may change as we enqueue the other TRBs, so save it too. 2695 */ 2696 start_trb = &ep_ring->enqueue->generic; 2697 start_cycle = ep_ring->cycle_state; 2698 2699 running_total = 0; 2700 /* How much data is in the first TRB? */ 2701 addr = (u64) urb->transfer_dma; 2702 trb_buff_len = TRB_MAX_BUFF_SIZE - 2703 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2704 if (urb->transfer_buffer_length < trb_buff_len) 2705 trb_buff_len = urb->transfer_buffer_length; 2706 2707 first_trb = true; 2708 2709 /* Queue the first TRB, even if it's zero-length */ 2710 do { 2711 u32 remainder = 0; 2712 field = 0; 2713 2714 /* Don't change the cycle bit of the first TRB until later */ 2715 if (first_trb) { 2716 first_trb = false; 2717 if (start_cycle == 0) 2718 field |= 0x1; 2719 } else 2720 field |= ep_ring->cycle_state; 2721 2722 /* Chain all the TRBs together; clear the chain bit in the last 2723 * TRB to indicate it's the last TRB in the chain. 2724 */ 2725 if (num_trbs > 1) { 2726 field |= TRB_CHAIN; 2727 } else { 2728 /* FIXME - add check for ZERO_PACKET flag before this */ 2729 td->last_trb = ep_ring->enqueue; 2730 field |= TRB_IOC; 2731 } 2732 remainder = xhci_td_remainder(urb->transfer_buffer_length - 2733 running_total); 2734 length_field = TRB_LEN(trb_buff_len) | 2735 remainder | 2736 TRB_INTR_TARGET(0); 2737 if (num_trbs > 1) 2738 more_trbs_coming = true; 2739 else 2740 more_trbs_coming = false; 2741 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2742 lower_32_bits(addr), 2743 upper_32_bits(addr), 2744 length_field, 2745 /* We always want to know if the TRB was short, 2746 * or we won't get an event when it completes. 2747 * (Unless we use event data TRBs, which are a 2748 * waste of space and HC resources.) 2749 */ 2750 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 2751 --num_trbs; 2752 running_total += trb_buff_len; 2753 2754 /* Calculate length for next transfer */ 2755 addr += trb_buff_len; 2756 trb_buff_len = urb->transfer_buffer_length - running_total; 2757 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 2758 trb_buff_len = TRB_MAX_BUFF_SIZE; 2759 } while (running_total < urb->transfer_buffer_length); 2760 2761 check_trb_math(urb, num_trbs, running_total); 2762 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2763 start_cycle, start_trb); 2764 return 0; 2765 } 2766 2767 /* Caller must have locked xhci->lock */ 2768 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2769 struct urb *urb, int slot_id, unsigned int ep_index) 2770 { 2771 struct xhci_ring *ep_ring; 2772 int num_trbs; 2773 int ret; 2774 struct usb_ctrlrequest *setup; 2775 struct xhci_generic_trb *start_trb; 2776 int start_cycle; 2777 u32 field, length_field; 2778 struct urb_priv *urb_priv; 2779 struct xhci_td *td; 2780 2781 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2782 if (!ep_ring) 2783 return -EINVAL; 2784 2785 /* 2786 * Need to copy setup packet into setup TRB, so we can't use the setup 2787 * DMA address. 2788 */ 2789 if (!urb->setup_packet) 2790 return -EINVAL; 2791 2792 if (!in_interrupt()) 2793 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 2794 slot_id, ep_index); 2795 /* 1 TRB for setup, 1 for status */ 2796 num_trbs = 2; 2797 /* 2798 * Don't need to check if we need additional event data and normal TRBs, 2799 * since data in control transfers will never get bigger than 16MB 2800 * XXX: can we get a buffer that crosses 64KB boundaries? 2801 */ 2802 if (urb->transfer_buffer_length > 0) 2803 num_trbs++; 2804 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2805 ep_index, urb->stream_id, 2806 num_trbs, urb, 0, mem_flags); 2807 if (ret < 0) 2808 return ret; 2809 2810 urb_priv = urb->hcpriv; 2811 td = urb_priv->td[0]; 2812 2813 /* 2814 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2815 * until we've finished creating all the other TRBs. The ring's cycle 2816 * state may change as we enqueue the other TRBs, so save it too. 2817 */ 2818 start_trb = &ep_ring->enqueue->generic; 2819 start_cycle = ep_ring->cycle_state; 2820 2821 /* Queue setup TRB - see section 6.4.1.2.1 */ 2822 /* FIXME better way to translate setup_packet into two u32 fields? */ 2823 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2824 field = 0; 2825 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 2826 if (start_cycle == 0) 2827 field |= 0x1; 2828 queue_trb(xhci, ep_ring, false, true, 2829 /* FIXME endianness is probably going to bite my ass here. */ 2830 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2831 setup->wIndex | setup->wLength << 16, 2832 TRB_LEN(8) | TRB_INTR_TARGET(0), 2833 /* Immediate data in pointer */ 2834 field); 2835 2836 /* If there's data, queue data TRBs */ 2837 field = 0; 2838 length_field = TRB_LEN(urb->transfer_buffer_length) | 2839 xhci_td_remainder(urb->transfer_buffer_length) | 2840 TRB_INTR_TARGET(0); 2841 if (urb->transfer_buffer_length > 0) { 2842 if (setup->bRequestType & USB_DIR_IN) 2843 field |= TRB_DIR_IN; 2844 queue_trb(xhci, ep_ring, false, true, 2845 lower_32_bits(urb->transfer_dma), 2846 upper_32_bits(urb->transfer_dma), 2847 length_field, 2848 /* Event on short tx */ 2849 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 2850 } 2851 2852 /* Save the DMA address of the last TRB in the TD */ 2853 td->last_trb = ep_ring->enqueue; 2854 2855 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 2856 /* If the device sent data, the status stage is an OUT transfer */ 2857 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 2858 field = 0; 2859 else 2860 field = TRB_DIR_IN; 2861 queue_trb(xhci, ep_ring, false, false, 2862 0, 2863 0, 2864 TRB_INTR_TARGET(0), 2865 /* Event on completion */ 2866 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2867 2868 giveback_first_trb(xhci, slot_id, ep_index, 0, 2869 start_cycle, start_trb); 2870 return 0; 2871 } 2872 2873 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 2874 struct urb *urb, int i) 2875 { 2876 int num_trbs = 0; 2877 u64 addr, td_len, running_total; 2878 2879 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 2880 td_len = urb->iso_frame_desc[i].length; 2881 2882 running_total = TRB_MAX_BUFF_SIZE - 2883 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2884 if (running_total != 0) 2885 num_trbs++; 2886 2887 while (running_total < td_len) { 2888 num_trbs++; 2889 running_total += TRB_MAX_BUFF_SIZE; 2890 } 2891 2892 return num_trbs; 2893 } 2894 2895 /* This is for isoc transfer */ 2896 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2897 struct urb *urb, int slot_id, unsigned int ep_index) 2898 { 2899 struct xhci_ring *ep_ring; 2900 struct urb_priv *urb_priv; 2901 struct xhci_td *td; 2902 int num_tds, trbs_per_td; 2903 struct xhci_generic_trb *start_trb; 2904 bool first_trb; 2905 int start_cycle; 2906 u32 field, length_field; 2907 int running_total, trb_buff_len, td_len, td_remain_len, ret; 2908 u64 start_addr, addr; 2909 int i, j; 2910 bool more_trbs_coming; 2911 2912 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2913 2914 num_tds = urb->number_of_packets; 2915 if (num_tds < 1) { 2916 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 2917 return -EINVAL; 2918 } 2919 2920 if (!in_interrupt()) 2921 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d)," 2922 " addr = %#llx, num_tds = %d\n", 2923 urb->ep->desc.bEndpointAddress, 2924 urb->transfer_buffer_length, 2925 urb->transfer_buffer_length, 2926 (unsigned long long)urb->transfer_dma, 2927 num_tds); 2928 2929 start_addr = (u64) urb->transfer_dma; 2930 start_trb = &ep_ring->enqueue->generic; 2931 start_cycle = ep_ring->cycle_state; 2932 2933 /* Queue the first TRB, even if it's zero-length */ 2934 for (i = 0; i < num_tds; i++) { 2935 first_trb = true; 2936 2937 running_total = 0; 2938 addr = start_addr + urb->iso_frame_desc[i].offset; 2939 td_len = urb->iso_frame_desc[i].length; 2940 td_remain_len = td_len; 2941 2942 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 2943 2944 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 2945 urb->stream_id, trbs_per_td, urb, i, mem_flags); 2946 if (ret < 0) 2947 return ret; 2948 2949 urb_priv = urb->hcpriv; 2950 td = urb_priv->td[i]; 2951 2952 for (j = 0; j < trbs_per_td; j++) { 2953 u32 remainder = 0; 2954 field = 0; 2955 2956 if (first_trb) { 2957 /* Queue the isoc TRB */ 2958 field |= TRB_TYPE(TRB_ISOC); 2959 /* Assume URB_ISO_ASAP is set */ 2960 field |= TRB_SIA; 2961 if (i == 0) { 2962 if (start_cycle == 0) 2963 field |= 0x1; 2964 } else 2965 field |= ep_ring->cycle_state; 2966 first_trb = false; 2967 } else { 2968 /* Queue other normal TRBs */ 2969 field |= TRB_TYPE(TRB_NORMAL); 2970 field |= ep_ring->cycle_state; 2971 } 2972 2973 /* Chain all the TRBs together; clear the chain bit in 2974 * the last TRB to indicate it's the last TRB in the 2975 * chain. 2976 */ 2977 if (j < trbs_per_td - 1) { 2978 field |= TRB_CHAIN; 2979 more_trbs_coming = true; 2980 } else { 2981 td->last_trb = ep_ring->enqueue; 2982 field |= TRB_IOC; 2983 more_trbs_coming = false; 2984 } 2985 2986 /* Calculate TRB length */ 2987 trb_buff_len = TRB_MAX_BUFF_SIZE - 2988 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2989 if (trb_buff_len > td_remain_len) 2990 trb_buff_len = td_remain_len; 2991 2992 remainder = xhci_td_remainder(td_len - running_total); 2993 length_field = TRB_LEN(trb_buff_len) | 2994 remainder | 2995 TRB_INTR_TARGET(0); 2996 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2997 lower_32_bits(addr), 2998 upper_32_bits(addr), 2999 length_field, 3000 /* We always want to know if the TRB was short, 3001 * or we won't get an event when it completes. 3002 * (Unless we use event data TRBs, which are a 3003 * waste of space and HC resources.) 3004 */ 3005 field | TRB_ISP); 3006 running_total += trb_buff_len; 3007 3008 addr += trb_buff_len; 3009 td_remain_len -= trb_buff_len; 3010 } 3011 3012 /* Check TD length */ 3013 if (running_total != td_len) { 3014 xhci_err(xhci, "ISOC TD length unmatch\n"); 3015 return -EINVAL; 3016 } 3017 } 3018 3019 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3020 start_cycle, start_trb); 3021 return 0; 3022 } 3023 3024 /* 3025 * Check transfer ring to guarantee there is enough room for the urb. 3026 * Update ISO URB start_frame and interval. 3027 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to 3028 * update the urb->start_frame by now. 3029 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. 3030 */ 3031 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3032 struct urb *urb, int slot_id, unsigned int ep_index) 3033 { 3034 struct xhci_virt_device *xdev; 3035 struct xhci_ring *ep_ring; 3036 struct xhci_ep_ctx *ep_ctx; 3037 int start_frame; 3038 int xhci_interval; 3039 int ep_interval; 3040 int num_tds, num_trbs, i; 3041 int ret; 3042 3043 xdev = xhci->devs[slot_id]; 3044 ep_ring = xdev->eps[ep_index].ring; 3045 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3046 3047 num_trbs = 0; 3048 num_tds = urb->number_of_packets; 3049 for (i = 0; i < num_tds; i++) 3050 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3051 3052 /* Check the ring to guarantee there is enough room for the whole urb. 3053 * Do not insert any td of the urb to the ring if the check failed. 3054 */ 3055 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, 3056 num_trbs, mem_flags); 3057 if (ret) 3058 return ret; 3059 3060 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index); 3061 start_frame &= 0x3fff; 3062 3063 urb->start_frame = start_frame; 3064 if (urb->dev->speed == USB_SPEED_LOW || 3065 urb->dev->speed == USB_SPEED_FULL) 3066 urb->start_frame >>= 3; 3067 3068 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 3069 ep_interval = urb->interval; 3070 /* Convert to microframes */ 3071 if (urb->dev->speed == USB_SPEED_LOW || 3072 urb->dev->speed == USB_SPEED_FULL) 3073 ep_interval *= 8; 3074 /* FIXME change this to a warning and a suggestion to use the new API 3075 * to set the polling interval (once the API is added). 3076 */ 3077 if (xhci_interval != ep_interval) { 3078 if (printk_ratelimit()) 3079 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3080 " (%d microframe%s) than xHCI " 3081 "(%d microframe%s)\n", 3082 ep_interval, 3083 ep_interval == 1 ? "" : "s", 3084 xhci_interval, 3085 xhci_interval == 1 ? "" : "s"); 3086 urb->interval = xhci_interval; 3087 /* Convert back to frames for LS/FS devices */ 3088 if (urb->dev->speed == USB_SPEED_LOW || 3089 urb->dev->speed == USB_SPEED_FULL) 3090 urb->interval /= 8; 3091 } 3092 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 3093 } 3094 3095 /**** Command Ring Operations ****/ 3096 3097 /* Generic function for queueing a command TRB on the command ring. 3098 * Check to make sure there's room on the command ring for one command TRB. 3099 * Also check that there's room reserved for commands that must not fail. 3100 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3101 * then only check for the number of reserved spots. 3102 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3103 * because the command event handler may want to resubmit a failed command. 3104 */ 3105 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 3106 u32 field3, u32 field4, bool command_must_succeed) 3107 { 3108 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3109 int ret; 3110 3111 if (!command_must_succeed) 3112 reserved_trbs++; 3113 3114 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3115 reserved_trbs, GFP_ATOMIC); 3116 if (ret < 0) { 3117 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3118 if (command_must_succeed) 3119 xhci_err(xhci, "ERR: Reserved TRB counting for " 3120 "unfailable commands failed.\n"); 3121 return ret; 3122 } 3123 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 3124 field4 | xhci->cmd_ring->cycle_state); 3125 return 0; 3126 } 3127 3128 /* Queue a no-op command on the command ring */ 3129 static int queue_cmd_noop(struct xhci_hcd *xhci) 3130 { 3131 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false); 3132 } 3133 3134 /* 3135 * Place a no-op command on the command ring to test the command and 3136 * event ring. 3137 */ 3138 void *xhci_setup_one_noop(struct xhci_hcd *xhci) 3139 { 3140 if (queue_cmd_noop(xhci) < 0) 3141 return NULL; 3142 xhci->noops_submitted++; 3143 return xhci_ring_cmd_db; 3144 } 3145 3146 /* Queue a slot enable or disable request on the command ring */ 3147 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 3148 { 3149 return queue_command(xhci, 0, 0, 0, 3150 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3151 } 3152 3153 /* Queue an address device command TRB */ 3154 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3155 u32 slot_id) 3156 { 3157 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3158 upper_32_bits(in_ctx_ptr), 0, 3159 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), 3160 false); 3161 } 3162 3163 int xhci_queue_vendor_command(struct xhci_hcd *xhci, 3164 u32 field1, u32 field2, u32 field3, u32 field4) 3165 { 3166 return queue_command(xhci, field1, field2, field3, field4, false); 3167 } 3168 3169 /* Queue a reset device command TRB */ 3170 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 3171 { 3172 return queue_command(xhci, 0, 0, 0, 3173 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3174 false); 3175 } 3176 3177 /* Queue a configure endpoint command TRB */ 3178 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3179 u32 slot_id, bool command_must_succeed) 3180 { 3181 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3182 upper_32_bits(in_ctx_ptr), 0, 3183 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3184 command_must_succeed); 3185 } 3186 3187 /* Queue an evaluate context command TRB */ 3188 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3189 u32 slot_id) 3190 { 3191 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3192 upper_32_bits(in_ctx_ptr), 0, 3193 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3194 false); 3195 } 3196 3197 /* 3198 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3199 * activity on an endpoint that is about to be suspended. 3200 */ 3201 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3202 unsigned int ep_index, int suspend) 3203 { 3204 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3205 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3206 u32 type = TRB_TYPE(TRB_STOP_RING); 3207 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3208 3209 return queue_command(xhci, 0, 0, 0, 3210 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3211 } 3212 3213 /* Set Transfer Ring Dequeue Pointer command. 3214 * This should not be used for endpoints that have streams enabled. 3215 */ 3216 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 3217 unsigned int ep_index, unsigned int stream_id, 3218 struct xhci_segment *deq_seg, 3219 union xhci_trb *deq_ptr, u32 cycle_state) 3220 { 3221 dma_addr_t addr; 3222 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3223 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3224 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3225 u32 type = TRB_TYPE(TRB_SET_DEQ); 3226 3227 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3228 if (addr == 0) { 3229 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3230 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3231 deq_seg, deq_ptr); 3232 return 0; 3233 } 3234 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 3235 upper_32_bits(addr), trb_stream_id, 3236 trb_slot_id | trb_ep_index | type, false); 3237 } 3238 3239 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 3240 unsigned int ep_index) 3241 { 3242 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3243 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3244 u32 type = TRB_TYPE(TRB_RESET_EP); 3245 3246 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 3247 false); 3248 } 3249