1 /* 2 * Copyright (c) 2001-2004 by David Brownell 3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software Foundation, 17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 /* this file is part of ehci-hcd.c */ 21 22 /*-------------------------------------------------------------------------*/ 23 24 /* 25 * EHCI scheduled transaction support: interrupt, iso, split iso 26 * These are called "periodic" transactions in the EHCI spec. 27 * 28 * Note that for interrupt transfers, the QH/QTD manipulation is shared 29 * with the "asynchronous" transaction support (control/bulk transfers). 30 * The only real difference is in how interrupt transfers are scheduled. 31 * 32 * For ISO, we make an "iso_stream" head to serve the same role as a QH. 33 * It keeps track of every ITD (or SITD) that's linked, and holds enough 34 * pre-calculated schedule data to make appending to the queue be quick. 35 */ 36 37 static int ehci_get_frame (struct usb_hcd *hcd); 38 39 /* 40 * periodic_next_shadow - return "next" pointer on shadow list 41 * @periodic: host pointer to qh/itd/sitd 42 * @tag: hardware tag for type of this record 43 */ 44 static union ehci_shadow * 45 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, 46 __hc32 tag) 47 { 48 switch (hc32_to_cpu(ehci, tag)) { 49 case Q_TYPE_QH: 50 return &periodic->qh->qh_next; 51 case Q_TYPE_FSTN: 52 return &periodic->fstn->fstn_next; 53 case Q_TYPE_ITD: 54 return &periodic->itd->itd_next; 55 // case Q_TYPE_SITD: 56 default: 57 return &periodic->sitd->sitd_next; 58 } 59 } 60 61 static __hc32 * 62 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, 63 __hc32 tag) 64 { 65 switch (hc32_to_cpu(ehci, tag)) { 66 /* our ehci_shadow.qh is actually software part */ 67 case Q_TYPE_QH: 68 return &periodic->qh->hw->hw_next; 69 /* others are hw parts */ 70 default: 71 return periodic->hw_next; 72 } 73 } 74 75 /* caller must hold ehci->lock */ 76 static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) 77 { 78 union ehci_shadow *prev_p = &ehci->pshadow[frame]; 79 __hc32 *hw_p = &ehci->periodic[frame]; 80 union ehci_shadow here = *prev_p; 81 82 /* find predecessor of "ptr"; hw and shadow lists are in sync */ 83 while (here.ptr && here.ptr != ptr) { 84 prev_p = periodic_next_shadow(ehci, prev_p, 85 Q_NEXT_TYPE(ehci, *hw_p)); 86 hw_p = shadow_next_periodic(ehci, &here, 87 Q_NEXT_TYPE(ehci, *hw_p)); 88 here = *prev_p; 89 } 90 /* an interrupt entry (at list end) could have been shared */ 91 if (!here.ptr) 92 return; 93 94 /* update shadow and hardware lists ... the old "next" pointers 95 * from ptr may still be in use, the caller updates them. 96 */ 97 *prev_p = *periodic_next_shadow(ehci, &here, 98 Q_NEXT_TYPE(ehci, *hw_p)); 99 100 if (!ehci->use_dummy_qh || 101 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) 102 != EHCI_LIST_END(ehci)) 103 *hw_p = *shadow_next_periodic(ehci, &here, 104 Q_NEXT_TYPE(ehci, *hw_p)); 105 else 106 *hw_p = ehci->dummy->qh_dma; 107 } 108 109 /* how many of the uframe's 125 usecs are allocated? */ 110 static unsigned short 111 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) 112 { 113 __hc32 *hw_p = &ehci->periodic [frame]; 114 union ehci_shadow *q = &ehci->pshadow [frame]; 115 unsigned usecs = 0; 116 struct ehci_qh_hw *hw; 117 118 while (q->ptr) { 119 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { 120 case Q_TYPE_QH: 121 hw = q->qh->hw; 122 /* is it in the S-mask? */ 123 if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) 124 usecs += q->qh->usecs; 125 /* ... or C-mask? */ 126 if (hw->hw_info2 & cpu_to_hc32(ehci, 127 1 << (8 + uframe))) 128 usecs += q->qh->c_usecs; 129 hw_p = &hw->hw_next; 130 q = &q->qh->qh_next; 131 break; 132 // case Q_TYPE_FSTN: 133 default: 134 /* for "save place" FSTNs, count the relevant INTR 135 * bandwidth from the previous frame 136 */ 137 if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) { 138 ehci_dbg (ehci, "ignoring FSTN cost ...\n"); 139 } 140 hw_p = &q->fstn->hw_next; 141 q = &q->fstn->fstn_next; 142 break; 143 case Q_TYPE_ITD: 144 if (q->itd->hw_transaction[uframe]) 145 usecs += q->itd->stream->usecs; 146 hw_p = &q->itd->hw_next; 147 q = &q->itd->itd_next; 148 break; 149 case Q_TYPE_SITD: 150 /* is it in the S-mask? (count SPLIT, DATA) */ 151 if (q->sitd->hw_uframe & cpu_to_hc32(ehci, 152 1 << uframe)) { 153 if (q->sitd->hw_fullspeed_ep & 154 cpu_to_hc32(ehci, 1<<31)) 155 usecs += q->sitd->stream->usecs; 156 else /* worst case for OUT start-split */ 157 usecs += HS_USECS_ISO (188); 158 } 159 160 /* ... C-mask? (count CSPLIT, DATA) */ 161 if (q->sitd->hw_uframe & 162 cpu_to_hc32(ehci, 1 << (8 + uframe))) { 163 /* worst case for IN complete-split */ 164 usecs += q->sitd->stream->c_usecs; 165 } 166 167 hw_p = &q->sitd->hw_next; 168 q = &q->sitd->sitd_next; 169 break; 170 } 171 } 172 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 173 if (usecs > ehci->uframe_periodic_max) 174 ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", 175 frame * 8 + uframe, usecs); 176 #endif 177 return usecs; 178 } 179 180 /*-------------------------------------------------------------------------*/ 181 182 static int same_tt (struct usb_device *dev1, struct usb_device *dev2) 183 { 184 if (!dev1->tt || !dev2->tt) 185 return 0; 186 if (dev1->tt != dev2->tt) 187 return 0; 188 if (dev1->tt->multi) 189 return dev1->ttport == dev2->ttport; 190 else 191 return 1; 192 } 193 194 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 195 196 /* Which uframe does the low/fullspeed transfer start in? 197 * 198 * The parameter is the mask of ssplits in "H-frame" terms 199 * and this returns the transfer start uframe in "B-frame" terms, 200 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 201 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag 202 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. 203 */ 204 static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) 205 { 206 unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask); 207 if (!smask) { 208 ehci_err(ehci, "invalid empty smask!\n"); 209 /* uframe 7 can't have bw so this will indicate failure */ 210 return 7; 211 } 212 return ffs(smask) - 1; 213 } 214 215 static const unsigned char 216 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 217 218 /* carryover low/fullspeed bandwidth that crosses uframe boundries */ 219 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 220 { 221 int i; 222 for (i=0; i<7; i++) { 223 if (max_tt_usecs[i] < tt_usecs[i]) { 224 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; 225 tt_usecs[i] = max_tt_usecs[i]; 226 } 227 } 228 } 229 230 /* How many of the tt's periodic downstream 1000 usecs are allocated? 231 * 232 * While this measures the bandwidth in terms of usecs/uframe, 233 * the low/fullspeed bus has no notion of uframes, so any particular 234 * low/fullspeed transfer can "carry over" from one uframe to the next, 235 * since the TT just performs downstream transfers in sequence. 236 * 237 * For example two separate 100 usec transfers can start in the same uframe, 238 * and the second one would "carry over" 75 usecs into the next uframe. 239 */ 240 static void 241 periodic_tt_usecs ( 242 struct ehci_hcd *ehci, 243 struct usb_device *dev, 244 unsigned frame, 245 unsigned short tt_usecs[8] 246 ) 247 { 248 __hc32 *hw_p = &ehci->periodic [frame]; 249 union ehci_shadow *q = &ehci->pshadow [frame]; 250 unsigned char uf; 251 252 memset(tt_usecs, 0, 16); 253 254 while (q->ptr) { 255 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { 256 case Q_TYPE_ITD: 257 hw_p = &q->itd->hw_next; 258 q = &q->itd->itd_next; 259 continue; 260 case Q_TYPE_QH: 261 if (same_tt(dev, q->qh->dev)) { 262 uf = tt_start_uframe(ehci, q->qh->hw->hw_info2); 263 tt_usecs[uf] += q->qh->tt_usecs; 264 } 265 hw_p = &q->qh->hw->hw_next; 266 q = &q->qh->qh_next; 267 continue; 268 case Q_TYPE_SITD: 269 if (same_tt(dev, q->sitd->urb->dev)) { 270 uf = tt_start_uframe(ehci, q->sitd->hw_uframe); 271 tt_usecs[uf] += q->sitd->stream->tt_usecs; 272 } 273 hw_p = &q->sitd->hw_next; 274 q = &q->sitd->sitd_next; 275 continue; 276 // case Q_TYPE_FSTN: 277 default: 278 ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n", 279 frame); 280 hw_p = &q->fstn->hw_next; 281 q = &q->fstn->fstn_next; 282 } 283 } 284 285 carryover_tt_bandwidth(tt_usecs); 286 287 if (max_tt_usecs[7] < tt_usecs[7]) 288 ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n", 289 frame, tt_usecs[7] - max_tt_usecs[7]); 290 } 291 292 /* 293 * Return true if the device's tt's downstream bus is available for a 294 * periodic transfer of the specified length (usecs), starting at the 295 * specified frame/uframe. Note that (as summarized in section 11.19 296 * of the usb 2.0 spec) TTs can buffer multiple transactions for each 297 * uframe. 298 * 299 * The uframe parameter is when the fullspeed/lowspeed transfer 300 * should be executed in "B-frame" terms, which is the same as the 301 * highspeed ssplit's uframe (which is in "H-frame" terms). For example 302 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. 303 * See the EHCI spec sec 4.5 and fig 4.7. 304 * 305 * This checks if the full/lowspeed bus, at the specified starting uframe, 306 * has the specified bandwidth available, according to rules listed 307 * in USB 2.0 spec section 11.18.1 fig 11-60. 308 * 309 * This does not check if the transfer would exceed the max ssplit 310 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, 311 * since proper scheduling limits ssplits to less than 16 per uframe. 312 */ 313 static int tt_available ( 314 struct ehci_hcd *ehci, 315 unsigned period, 316 struct usb_device *dev, 317 unsigned frame, 318 unsigned uframe, 319 u16 usecs 320 ) 321 { 322 if ((period == 0) || (uframe >= 7)) /* error */ 323 return 0; 324 325 for (; frame < ehci->periodic_size; frame += period) { 326 unsigned short tt_usecs[8]; 327 328 periodic_tt_usecs (ehci, dev, frame, tt_usecs); 329 330 if (max_tt_usecs[uframe] <= tt_usecs[uframe]) 331 return 0; 332 333 /* special case for isoc transfers larger than 125us: 334 * the first and each subsequent fully used uframe 335 * must be empty, so as to not illegally delay 336 * already scheduled transactions 337 */ 338 if (125 < usecs) { 339 int ufs = (usecs / 125); 340 int i; 341 for (i = uframe; i < (uframe + ufs) && i < 8; i++) 342 if (0 < tt_usecs[i]) 343 return 0; 344 } 345 346 tt_usecs[uframe] += usecs; 347 348 carryover_tt_bandwidth(tt_usecs); 349 350 /* fail if the carryover pushed bw past the last uframe's limit */ 351 if (max_tt_usecs[7] < tt_usecs[7]) 352 return 0; 353 } 354 355 return 1; 356 } 357 358 #else 359 360 /* return true iff the device's transaction translator is available 361 * for a periodic transfer starting at the specified frame, using 362 * all the uframes in the mask. 363 */ 364 static int tt_no_collision ( 365 struct ehci_hcd *ehci, 366 unsigned period, 367 struct usb_device *dev, 368 unsigned frame, 369 u32 uf_mask 370 ) 371 { 372 if (period == 0) /* error */ 373 return 0; 374 375 /* note bandwidth wastage: split never follows csplit 376 * (different dev or endpoint) until the next uframe. 377 * calling convention doesn't make that distinction. 378 */ 379 for (; frame < ehci->periodic_size; frame += period) { 380 union ehci_shadow here; 381 __hc32 type; 382 struct ehci_qh_hw *hw; 383 384 here = ehci->pshadow [frame]; 385 type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); 386 while (here.ptr) { 387 switch (hc32_to_cpu(ehci, type)) { 388 case Q_TYPE_ITD: 389 type = Q_NEXT_TYPE(ehci, here.itd->hw_next); 390 here = here.itd->itd_next; 391 continue; 392 case Q_TYPE_QH: 393 hw = here.qh->hw; 394 if (same_tt (dev, here.qh->dev)) { 395 u32 mask; 396 397 mask = hc32_to_cpu(ehci, 398 hw->hw_info2); 399 /* "knows" no gap is needed */ 400 mask |= mask >> 8; 401 if (mask & uf_mask) 402 break; 403 } 404 type = Q_NEXT_TYPE(ehci, hw->hw_next); 405 here = here.qh->qh_next; 406 continue; 407 case Q_TYPE_SITD: 408 if (same_tt (dev, here.sitd->urb->dev)) { 409 u16 mask; 410 411 mask = hc32_to_cpu(ehci, here.sitd 412 ->hw_uframe); 413 /* FIXME assumes no gap for IN! */ 414 mask |= mask >> 8; 415 if (mask & uf_mask) 416 break; 417 } 418 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); 419 here = here.sitd->sitd_next; 420 continue; 421 // case Q_TYPE_FSTN: 422 default: 423 ehci_dbg (ehci, 424 "periodic frame %d bogus type %d\n", 425 frame, type); 426 } 427 428 /* collision or error */ 429 return 0; 430 } 431 } 432 433 /* no collision */ 434 return 1; 435 } 436 437 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ 438 439 /*-------------------------------------------------------------------------*/ 440 441 static void enable_periodic(struct ehci_hcd *ehci) 442 { 443 if (ehci->periodic_count++) 444 return; 445 446 /* Stop waiting to turn off the periodic schedule */ 447 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); 448 449 /* Don't start the schedule until PSS is 0 */ 450 ehci_poll_PSS(ehci); 451 turn_on_io_watchdog(ehci); 452 } 453 454 static void disable_periodic(struct ehci_hcd *ehci) 455 { 456 if (--ehci->periodic_count) 457 return; 458 459 /* Don't turn off the schedule until PSS is 1 */ 460 ehci_poll_PSS(ehci); 461 } 462 463 /*-------------------------------------------------------------------------*/ 464 465 /* periodic schedule slots have iso tds (normal or split) first, then a 466 * sparse tree for active interrupt transfers. 467 * 468 * this just links in a qh; caller guarantees uframe masks are set right. 469 * no FSTN support (yet; ehci 0.96+) 470 */ 471 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) 472 { 473 unsigned i; 474 unsigned period = qh->period; 475 476 dev_dbg (&qh->dev->dev, 477 "link qh%d-%04x/%p start %d [%d/%d us]\n", 478 period, hc32_to_cpup(ehci, &qh->hw->hw_info2) 479 & (QH_CMASK | QH_SMASK), 480 qh, qh->start, qh->usecs, qh->c_usecs); 481 482 /* high bandwidth, or otherwise every microframe */ 483 if (period == 0) 484 period = 1; 485 486 for (i = qh->start; i < ehci->periodic_size; i += period) { 487 union ehci_shadow *prev = &ehci->pshadow[i]; 488 __hc32 *hw_p = &ehci->periodic[i]; 489 union ehci_shadow here = *prev; 490 __hc32 type = 0; 491 492 /* skip the iso nodes at list head */ 493 while (here.ptr) { 494 type = Q_NEXT_TYPE(ehci, *hw_p); 495 if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) 496 break; 497 prev = periodic_next_shadow(ehci, prev, type); 498 hw_p = shadow_next_periodic(ehci, &here, type); 499 here = *prev; 500 } 501 502 /* sorting each branch by period (slow-->fast) 503 * enables sharing interior tree nodes 504 */ 505 while (here.ptr && qh != here.qh) { 506 if (qh->period > here.qh->period) 507 break; 508 prev = &here.qh->qh_next; 509 hw_p = &here.qh->hw->hw_next; 510 here = *prev; 511 } 512 /* link in this qh, unless some earlier pass did that */ 513 if (qh != here.qh) { 514 qh->qh_next = here; 515 if (here.qh) 516 qh->hw->hw_next = *hw_p; 517 wmb (); 518 prev->qh = qh; 519 *hw_p = QH_NEXT (ehci, qh->qh_dma); 520 } 521 } 522 qh->qh_state = QH_STATE_LINKED; 523 qh->xacterrs = 0; 524 qh->exception = 0; 525 526 /* update per-qh bandwidth for usbfs */ 527 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period 528 ? ((qh->usecs + qh->c_usecs) / qh->period) 529 : (qh->usecs * 8); 530 531 list_add(&qh->intr_node, &ehci->intr_qh_list); 532 533 /* maybe enable periodic schedule processing */ 534 ++ehci->intr_count; 535 enable_periodic(ehci); 536 } 537 538 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) 539 { 540 unsigned i; 541 unsigned period; 542 543 /* 544 * If qh is for a low/full-speed device, simply unlinking it 545 * could interfere with an ongoing split transaction. To unlink 546 * it safely would require setting the QH_INACTIVATE bit and 547 * waiting at least one frame, as described in EHCI 4.12.2.5. 548 * 549 * We won't bother with any of this. Instead, we assume that the 550 * only reason for unlinking an interrupt QH while the current URB 551 * is still active is to dequeue all the URBs (flush the whole 552 * endpoint queue). 553 * 554 * If rebalancing the periodic schedule is ever implemented, this 555 * approach will no longer be valid. 556 */ 557 558 /* high bandwidth, or otherwise part of every microframe */ 559 if ((period = qh->period) == 0) 560 period = 1; 561 562 for (i = qh->start; i < ehci->periodic_size; i += period) 563 periodic_unlink (ehci, i, qh); 564 565 /* update per-qh bandwidth for usbfs */ 566 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period 567 ? ((qh->usecs + qh->c_usecs) / qh->period) 568 : (qh->usecs * 8); 569 570 dev_dbg (&qh->dev->dev, 571 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 572 qh->period, 573 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), 574 qh, qh->start, qh->usecs, qh->c_usecs); 575 576 /* qh->qh_next still "live" to HC */ 577 qh->qh_state = QH_STATE_UNLINK; 578 qh->qh_next.ptr = NULL; 579 580 if (ehci->qh_scan_next == qh) 581 ehci->qh_scan_next = list_entry(qh->intr_node.next, 582 struct ehci_qh, intr_node); 583 list_del(&qh->intr_node); 584 } 585 586 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 587 { 588 if (qh->qh_state != QH_STATE_LINKED || 589 list_empty(&qh->unlink_node)) 590 return; 591 592 list_del_init(&qh->unlink_node); 593 594 /* 595 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for 596 * avoiding unnecessary CPU wakeup 597 */ 598 } 599 600 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 601 { 602 /* If the QH isn't linked then there's nothing we can do. */ 603 if (qh->qh_state != QH_STATE_LINKED) 604 return; 605 606 /* if the qh is waiting for unlink, cancel it now */ 607 cancel_unlink_wait_intr(ehci, qh); 608 609 qh_unlink_periodic (ehci, qh); 610 611 /* Make sure the unlinks are visible before starting the timer */ 612 wmb(); 613 614 /* 615 * The EHCI spec doesn't say how long it takes the controller to 616 * stop accessing an unlinked interrupt QH. The timer delay is 617 * 9 uframes; presumably that will be long enough. 618 */ 619 qh->unlink_cycle = ehci->intr_unlink_cycle; 620 621 /* New entries go at the end of the intr_unlink list */ 622 list_add_tail(&qh->unlink_node, &ehci->intr_unlink); 623 624 if (ehci->intr_unlinking) 625 ; /* Avoid recursive calls */ 626 else if (ehci->rh_state < EHCI_RH_RUNNING) 627 ehci_handle_intr_unlinks(ehci); 628 else if (ehci->intr_unlink.next == &qh->unlink_node) { 629 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); 630 ++ehci->intr_unlink_cycle; 631 } 632 } 633 634 /* 635 * It is common only one intr URB is scheduled on one qh, and 636 * given complete() is run in tasklet context, introduce a bit 637 * delay to avoid unlink qh too early. 638 */ 639 static void start_unlink_intr_wait(struct ehci_hcd *ehci, 640 struct ehci_qh *qh) 641 { 642 qh->unlink_cycle = ehci->intr_unlink_wait_cycle; 643 644 /* New entries go at the end of the intr_unlink_wait list */ 645 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); 646 647 if (ehci->rh_state < EHCI_RH_RUNNING) 648 ehci_handle_start_intr_unlinks(ehci); 649 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { 650 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); 651 ++ehci->intr_unlink_wait_cycle; 652 } 653 } 654 655 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 656 { 657 struct ehci_qh_hw *hw = qh->hw; 658 int rc; 659 660 qh->qh_state = QH_STATE_IDLE; 661 hw->hw_next = EHCI_LIST_END(ehci); 662 663 if (!list_empty(&qh->qtd_list)) 664 qh_completions(ehci, qh); 665 666 /* reschedule QH iff another request is queued */ 667 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { 668 rc = qh_schedule(ehci, qh); 669 if (rc == 0) { 670 qh_refresh(ehci, qh); 671 qh_link_periodic(ehci, qh); 672 } 673 674 /* An error here likely indicates handshake failure 675 * or no space left in the schedule. Neither fault 676 * should happen often ... 677 * 678 * FIXME kill the now-dysfunctional queued urbs 679 */ 680 else { 681 ehci_err(ehci, "can't reschedule qh %p, err %d\n", 682 qh, rc); 683 } 684 } 685 686 /* maybe turn off periodic schedule */ 687 --ehci->intr_count; 688 disable_periodic(ehci); 689 } 690 691 /*-------------------------------------------------------------------------*/ 692 693 static int check_period ( 694 struct ehci_hcd *ehci, 695 unsigned frame, 696 unsigned uframe, 697 unsigned period, 698 unsigned usecs 699 ) { 700 int claimed; 701 702 /* complete split running into next frame? 703 * given FSTN support, we could sometimes check... 704 */ 705 if (uframe >= 8) 706 return 0; 707 708 /* convert "usecs we need" to "max already claimed" */ 709 usecs = ehci->uframe_periodic_max - usecs; 710 711 /* we "know" 2 and 4 uframe intervals were rejected; so 712 * for period 0, check _every_ microframe in the schedule. 713 */ 714 if (unlikely (period == 0)) { 715 do { 716 for (uframe = 0; uframe < 7; uframe++) { 717 claimed = periodic_usecs (ehci, frame, uframe); 718 if (claimed > usecs) 719 return 0; 720 } 721 } while ((frame += 1) < ehci->periodic_size); 722 723 /* just check the specified uframe, at that period */ 724 } else { 725 do { 726 claimed = periodic_usecs (ehci, frame, uframe); 727 if (claimed > usecs) 728 return 0; 729 } while ((frame += period) < ehci->periodic_size); 730 } 731 732 // success! 733 return 1; 734 } 735 736 static int check_intr_schedule ( 737 struct ehci_hcd *ehci, 738 unsigned frame, 739 unsigned uframe, 740 const struct ehci_qh *qh, 741 __hc32 *c_maskp 742 ) 743 { 744 int retval = -ENOSPC; 745 u8 mask = 0; 746 747 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ 748 goto done; 749 750 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) 751 goto done; 752 if (!qh->c_usecs) { 753 retval = 0; 754 *c_maskp = 0; 755 goto done; 756 } 757 758 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 759 if (tt_available (ehci, qh->period, qh->dev, frame, uframe, 760 qh->tt_usecs)) { 761 unsigned i; 762 763 /* TODO : this may need FSTN for SSPLIT in uframe 5. */ 764 for (i=uframe+1; i<8 && i<uframe+4; i++) 765 if (!check_period (ehci, frame, i, 766 qh->period, qh->c_usecs)) 767 goto done; 768 else 769 mask |= 1 << i; 770 771 retval = 0; 772 773 *c_maskp = cpu_to_hc32(ehci, mask << 8); 774 } 775 #else 776 /* Make sure this tt's buffer is also available for CSPLITs. 777 * We pessimize a bit; probably the typical full speed case 778 * doesn't need the second CSPLIT. 779 * 780 * NOTE: both SPLIT and CSPLIT could be checked in just 781 * one smart pass... 782 */ 783 mask = 0x03 << (uframe + qh->gap_uf); 784 *c_maskp = cpu_to_hc32(ehci, mask << 8); 785 786 mask |= 1 << uframe; 787 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { 788 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, 789 qh->period, qh->c_usecs)) 790 goto done; 791 if (!check_period (ehci, frame, uframe + qh->gap_uf, 792 qh->period, qh->c_usecs)) 793 goto done; 794 retval = 0; 795 } 796 #endif 797 done: 798 return retval; 799 } 800 801 /* "first fit" scheduling policy used the first time through, 802 * or when the previous schedule slot can't be re-used. 803 */ 804 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) 805 { 806 int status; 807 unsigned uframe; 808 __hc32 c_mask; 809 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ 810 struct ehci_qh_hw *hw = qh->hw; 811 812 hw->hw_next = EHCI_LIST_END(ehci); 813 frame = qh->start; 814 815 /* reuse the previous schedule slots, if we can */ 816 if (frame < qh->period) { 817 uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK); 818 status = check_intr_schedule (ehci, frame, --uframe, 819 qh, &c_mask); 820 } else { 821 uframe = 0; 822 c_mask = 0; 823 status = -ENOSPC; 824 } 825 826 /* else scan the schedule to find a group of slots such that all 827 * uframes have enough periodic bandwidth available. 828 */ 829 if (status) { 830 /* "normal" case, uframing flexible except with splits */ 831 if (qh->period) { 832 int i; 833 834 for (i = qh->period; status && i > 0; --i) { 835 frame = ++ehci->random_frame % qh->period; 836 for (uframe = 0; uframe < 8; uframe++) { 837 status = check_intr_schedule (ehci, 838 frame, uframe, qh, 839 &c_mask); 840 if (status == 0) 841 break; 842 } 843 } 844 845 /* qh->period == 0 means every uframe */ 846 } else { 847 frame = 0; 848 status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); 849 } 850 if (status) 851 goto done; 852 qh->start = frame; 853 854 /* reset S-frame and (maybe) C-frame masks */ 855 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); 856 hw->hw_info2 |= qh->period 857 ? cpu_to_hc32(ehci, 1 << uframe) 858 : cpu_to_hc32(ehci, QH_SMASK); 859 hw->hw_info2 |= c_mask; 860 } else 861 ehci_dbg (ehci, "reused qh %p schedule\n", qh); 862 863 done: 864 return status; 865 } 866 867 static int intr_submit ( 868 struct ehci_hcd *ehci, 869 struct urb *urb, 870 struct list_head *qtd_list, 871 gfp_t mem_flags 872 ) { 873 unsigned epnum; 874 unsigned long flags; 875 struct ehci_qh *qh; 876 int status; 877 struct list_head empty; 878 879 /* get endpoint and transfer/schedule data */ 880 epnum = urb->ep->desc.bEndpointAddress; 881 882 spin_lock_irqsave (&ehci->lock, flags); 883 884 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 885 status = -ESHUTDOWN; 886 goto done_not_linked; 887 } 888 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 889 if (unlikely(status)) 890 goto done_not_linked; 891 892 /* get qh and force any scheduling errors */ 893 INIT_LIST_HEAD (&empty); 894 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); 895 if (qh == NULL) { 896 status = -ENOMEM; 897 goto done; 898 } 899 if (qh->qh_state == QH_STATE_IDLE) { 900 if ((status = qh_schedule (ehci, qh)) != 0) 901 goto done; 902 } 903 904 /* then queue the urb's tds to the qh */ 905 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 906 BUG_ON (qh == NULL); 907 908 /* stuff into the periodic schedule */ 909 if (qh->qh_state == QH_STATE_IDLE) { 910 qh_refresh(ehci, qh); 911 qh_link_periodic(ehci, qh); 912 } else { 913 /* cancel unlink wait for the qh */ 914 cancel_unlink_wait_intr(ehci, qh); 915 } 916 917 /* ... update usbfs periodic stats */ 918 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; 919 920 done: 921 if (unlikely(status)) 922 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 923 done_not_linked: 924 spin_unlock_irqrestore (&ehci->lock, flags); 925 if (status) 926 qtd_list_free (ehci, urb, qtd_list); 927 928 return status; 929 } 930 931 static void scan_intr(struct ehci_hcd *ehci) 932 { 933 struct ehci_qh *qh; 934 935 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, 936 intr_node) { 937 938 /* clean any finished work for this qh */ 939 if (!list_empty(&qh->qtd_list)) { 940 int temp; 941 942 /* 943 * Unlinks could happen here; completion reporting 944 * drops the lock. That's why ehci->qh_scan_next 945 * always holds the next qh to scan; if the next qh 946 * gets unlinked then ehci->qh_scan_next is adjusted 947 * in qh_unlink_periodic(). 948 */ 949 temp = qh_completions(ehci, qh); 950 if (unlikely(temp)) 951 start_unlink_intr(ehci, qh); 952 else if (unlikely(list_empty(&qh->qtd_list) && 953 qh->qh_state == QH_STATE_LINKED)) 954 start_unlink_intr_wait(ehci, qh); 955 } 956 } 957 } 958 959 /*-------------------------------------------------------------------------*/ 960 961 /* ehci_iso_stream ops work with both ITD and SITD */ 962 963 static struct ehci_iso_stream * 964 iso_stream_alloc (gfp_t mem_flags) 965 { 966 struct ehci_iso_stream *stream; 967 968 stream = kzalloc(sizeof *stream, mem_flags); 969 if (likely (stream != NULL)) { 970 INIT_LIST_HEAD(&stream->td_list); 971 INIT_LIST_HEAD(&stream->free_list); 972 stream->next_uframe = -1; 973 } 974 return stream; 975 } 976 977 static void 978 iso_stream_init ( 979 struct ehci_hcd *ehci, 980 struct ehci_iso_stream *stream, 981 struct usb_device *dev, 982 int pipe, 983 unsigned interval 984 ) 985 { 986 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; 987 988 u32 buf1; 989 unsigned epnum, maxp; 990 int is_input; 991 long bandwidth; 992 993 /* 994 * this might be a "high bandwidth" highspeed endpoint, 995 * as encoded in the ep descriptor's wMaxPacket field 996 */ 997 epnum = usb_pipeendpoint (pipe); 998 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; 999 maxp = usb_maxpacket(dev, pipe, !is_input); 1000 if (is_input) { 1001 buf1 = (1 << 11); 1002 } else { 1003 buf1 = 0; 1004 } 1005 1006 /* knows about ITD vs SITD */ 1007 if (dev->speed == USB_SPEED_HIGH) { 1008 unsigned multi = hb_mult(maxp); 1009 1010 stream->highspeed = 1; 1011 1012 maxp = max_packet(maxp); 1013 buf1 |= maxp; 1014 maxp *= multi; 1015 1016 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); 1017 stream->buf1 = cpu_to_hc32(ehci, buf1); 1018 stream->buf2 = cpu_to_hc32(ehci, multi); 1019 1020 /* usbfs wants to report the average usecs per frame tied up 1021 * when transfers on this endpoint are scheduled ... 1022 */ 1023 stream->usecs = HS_USECS_ISO (maxp); 1024 bandwidth = stream->usecs * 8; 1025 bandwidth /= interval; 1026 1027 } else { 1028 u32 addr; 1029 int think_time; 1030 int hs_transfers; 1031 1032 addr = dev->ttport << 24; 1033 if (!ehci_is_TDI(ehci) 1034 || (dev->tt->hub != 1035 ehci_to_hcd(ehci)->self.root_hub)) 1036 addr |= dev->tt->hub->devnum << 16; 1037 addr |= epnum << 8; 1038 addr |= dev->devnum; 1039 stream->usecs = HS_USECS_ISO (maxp); 1040 think_time = dev->tt ? dev->tt->think_time : 0; 1041 stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time ( 1042 dev->speed, is_input, 1, maxp)); 1043 hs_transfers = max (1u, (maxp + 187) / 188); 1044 if (is_input) { 1045 u32 tmp; 1046 1047 addr |= 1 << 31; 1048 stream->c_usecs = stream->usecs; 1049 stream->usecs = HS_USECS_ISO (1); 1050 stream->raw_mask = 1; 1051 1052 /* c-mask as specified in USB 2.0 11.18.4 3.c */ 1053 tmp = (1 << (hs_transfers + 2)) - 1; 1054 stream->raw_mask |= tmp << (8 + 2); 1055 } else 1056 stream->raw_mask = smask_out [hs_transfers - 1]; 1057 bandwidth = stream->usecs + stream->c_usecs; 1058 bandwidth /= interval << 3; 1059 1060 /* stream->splits gets created from raw_mask later */ 1061 stream->address = cpu_to_hc32(ehci, addr); 1062 } 1063 stream->bandwidth = bandwidth; 1064 1065 stream->udev = dev; 1066 1067 stream->bEndpointAddress = is_input | epnum; 1068 stream->interval = interval; 1069 stream->maxp = maxp; 1070 } 1071 1072 static struct ehci_iso_stream * 1073 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) 1074 { 1075 unsigned epnum; 1076 struct ehci_iso_stream *stream; 1077 struct usb_host_endpoint *ep; 1078 unsigned long flags; 1079 1080 epnum = usb_pipeendpoint (urb->pipe); 1081 if (usb_pipein(urb->pipe)) 1082 ep = urb->dev->ep_in[epnum]; 1083 else 1084 ep = urb->dev->ep_out[epnum]; 1085 1086 spin_lock_irqsave (&ehci->lock, flags); 1087 stream = ep->hcpriv; 1088 1089 if (unlikely (stream == NULL)) { 1090 stream = iso_stream_alloc(GFP_ATOMIC); 1091 if (likely (stream != NULL)) { 1092 ep->hcpriv = stream; 1093 stream->ep = ep; 1094 iso_stream_init(ehci, stream, urb->dev, urb->pipe, 1095 urb->interval); 1096 } 1097 1098 /* if dev->ep [epnum] is a QH, hw is set */ 1099 } else if (unlikely (stream->hw != NULL)) { 1100 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", 1101 urb->dev->devpath, epnum, 1102 usb_pipein(urb->pipe) ? "in" : "out"); 1103 stream = NULL; 1104 } 1105 1106 spin_unlock_irqrestore (&ehci->lock, flags); 1107 return stream; 1108 } 1109 1110 /*-------------------------------------------------------------------------*/ 1111 1112 /* ehci_iso_sched ops can be ITD-only or SITD-only */ 1113 1114 static struct ehci_iso_sched * 1115 iso_sched_alloc (unsigned packets, gfp_t mem_flags) 1116 { 1117 struct ehci_iso_sched *iso_sched; 1118 int size = sizeof *iso_sched; 1119 1120 size += packets * sizeof (struct ehci_iso_packet); 1121 iso_sched = kzalloc(size, mem_flags); 1122 if (likely (iso_sched != NULL)) { 1123 INIT_LIST_HEAD (&iso_sched->td_list); 1124 } 1125 return iso_sched; 1126 } 1127 1128 static inline void 1129 itd_sched_init( 1130 struct ehci_hcd *ehci, 1131 struct ehci_iso_sched *iso_sched, 1132 struct ehci_iso_stream *stream, 1133 struct urb *urb 1134 ) 1135 { 1136 unsigned i; 1137 dma_addr_t dma = urb->transfer_dma; 1138 1139 /* how many uframes are needed for these transfers */ 1140 iso_sched->span = urb->number_of_packets * stream->interval; 1141 1142 /* figure out per-uframe itd fields that we'll need later 1143 * when we fit new itds into the schedule. 1144 */ 1145 for (i = 0; i < urb->number_of_packets; i++) { 1146 struct ehci_iso_packet *uframe = &iso_sched->packet [i]; 1147 unsigned length; 1148 dma_addr_t buf; 1149 u32 trans; 1150 1151 length = urb->iso_frame_desc [i].length; 1152 buf = dma + urb->iso_frame_desc [i].offset; 1153 1154 trans = EHCI_ISOC_ACTIVE; 1155 trans |= buf & 0x0fff; 1156 if (unlikely (((i + 1) == urb->number_of_packets)) 1157 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1158 trans |= EHCI_ITD_IOC; 1159 trans |= length << 16; 1160 uframe->transaction = cpu_to_hc32(ehci, trans); 1161 1162 /* might need to cross a buffer page within a uframe */ 1163 uframe->bufp = (buf & ~(u64)0x0fff); 1164 buf += length; 1165 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) 1166 uframe->cross = 1; 1167 } 1168 } 1169 1170 static void 1171 iso_sched_free ( 1172 struct ehci_iso_stream *stream, 1173 struct ehci_iso_sched *iso_sched 1174 ) 1175 { 1176 if (!iso_sched) 1177 return; 1178 // caller must hold ehci->lock! 1179 list_splice (&iso_sched->td_list, &stream->free_list); 1180 kfree (iso_sched); 1181 } 1182 1183 static int 1184 itd_urb_transaction ( 1185 struct ehci_iso_stream *stream, 1186 struct ehci_hcd *ehci, 1187 struct urb *urb, 1188 gfp_t mem_flags 1189 ) 1190 { 1191 struct ehci_itd *itd; 1192 dma_addr_t itd_dma; 1193 int i; 1194 unsigned num_itds; 1195 struct ehci_iso_sched *sched; 1196 unsigned long flags; 1197 1198 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1199 if (unlikely (sched == NULL)) 1200 return -ENOMEM; 1201 1202 itd_sched_init(ehci, sched, stream, urb); 1203 1204 if (urb->interval < 8) 1205 num_itds = 1 + (sched->span + 7) / 8; 1206 else 1207 num_itds = urb->number_of_packets; 1208 1209 /* allocate/init ITDs */ 1210 spin_lock_irqsave (&ehci->lock, flags); 1211 for (i = 0; i < num_itds; i++) { 1212 1213 /* 1214 * Use iTDs from the free list, but not iTDs that may 1215 * still be in use by the hardware. 1216 */ 1217 if (likely(!list_empty(&stream->free_list))) { 1218 itd = list_first_entry(&stream->free_list, 1219 struct ehci_itd, itd_list); 1220 if (itd->frame == ehci->now_frame) 1221 goto alloc_itd; 1222 list_del (&itd->itd_list); 1223 itd_dma = itd->itd_dma; 1224 } else { 1225 alloc_itd: 1226 spin_unlock_irqrestore (&ehci->lock, flags); 1227 itd = dma_pool_alloc (ehci->itd_pool, mem_flags, 1228 &itd_dma); 1229 spin_lock_irqsave (&ehci->lock, flags); 1230 if (!itd) { 1231 iso_sched_free(stream, sched); 1232 spin_unlock_irqrestore(&ehci->lock, flags); 1233 return -ENOMEM; 1234 } 1235 } 1236 1237 memset (itd, 0, sizeof *itd); 1238 itd->itd_dma = itd_dma; 1239 itd->frame = 9999; /* an invalid value */ 1240 list_add (&itd->itd_list, &sched->td_list); 1241 } 1242 spin_unlock_irqrestore (&ehci->lock, flags); 1243 1244 /* temporarily store schedule info in hcpriv */ 1245 urb->hcpriv = sched; 1246 urb->error_count = 0; 1247 return 0; 1248 } 1249 1250 /*-------------------------------------------------------------------------*/ 1251 1252 static inline int 1253 itd_slot_ok ( 1254 struct ehci_hcd *ehci, 1255 u32 mod, 1256 u32 uframe, 1257 u8 usecs, 1258 u32 period 1259 ) 1260 { 1261 uframe %= period; 1262 do { 1263 /* can't commit more than uframe_periodic_max usec */ 1264 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) 1265 > (ehci->uframe_periodic_max - usecs)) 1266 return 0; 1267 1268 /* we know urb->interval is 2^N uframes */ 1269 uframe += period; 1270 } while (uframe < mod); 1271 return 1; 1272 } 1273 1274 static inline int 1275 sitd_slot_ok ( 1276 struct ehci_hcd *ehci, 1277 u32 mod, 1278 struct ehci_iso_stream *stream, 1279 u32 uframe, 1280 struct ehci_iso_sched *sched, 1281 u32 period_uframes 1282 ) 1283 { 1284 u32 mask, tmp; 1285 u32 frame, uf; 1286 1287 mask = stream->raw_mask << (uframe & 7); 1288 1289 /* for IN, don't wrap CSPLIT into the next frame */ 1290 if (mask & ~0xffff) 1291 return 0; 1292 1293 /* check bandwidth */ 1294 uframe %= period_uframes; 1295 frame = uframe >> 3; 1296 1297 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 1298 /* The tt's fullspeed bus bandwidth must be available. 1299 * tt_available scheduling guarantees 10+% for control/bulk. 1300 */ 1301 uf = uframe & 7; 1302 if (!tt_available(ehci, period_uframes >> 3, 1303 stream->udev, frame, uf, stream->tt_usecs)) 1304 return 0; 1305 #else 1306 /* tt must be idle for start(s), any gap, and csplit. 1307 * assume scheduling slop leaves 10+% for control/bulk. 1308 */ 1309 if (!tt_no_collision(ehci, period_uframes >> 3, 1310 stream->udev, frame, mask)) 1311 return 0; 1312 #endif 1313 1314 /* this multi-pass logic is simple, but performance may 1315 * suffer when the schedule data isn't cached. 1316 */ 1317 do { 1318 u32 max_used; 1319 1320 frame = uframe >> 3; 1321 uf = uframe & 7; 1322 1323 /* check starts (OUT uses more than one) */ 1324 max_used = ehci->uframe_periodic_max - stream->usecs; 1325 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { 1326 if (periodic_usecs (ehci, frame, uf) > max_used) 1327 return 0; 1328 } 1329 1330 /* for IN, check CSPLIT */ 1331 if (stream->c_usecs) { 1332 uf = uframe & 7; 1333 max_used = ehci->uframe_periodic_max - stream->c_usecs; 1334 do { 1335 tmp = 1 << uf; 1336 tmp <<= 8; 1337 if ((stream->raw_mask & tmp) == 0) 1338 continue; 1339 if (periodic_usecs (ehci, frame, uf) 1340 > max_used) 1341 return 0; 1342 } while (++uf < 8); 1343 } 1344 1345 /* we know urb->interval is 2^N uframes */ 1346 uframe += period_uframes; 1347 } while (uframe < mod); 1348 1349 stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7)); 1350 return 1; 1351 } 1352 1353 /* 1354 * This scheduler plans almost as far into the future as it has actual 1355 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to 1356 * "as small as possible" to be cache-friendlier.) That limits the size 1357 * transfers you can stream reliably; avoid more than 64 msec per urb. 1358 * Also avoid queue depths of less than ehci's worst irq latency (affected 1359 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, 1360 * and other factors); or more than about 230 msec total (for portability, 1361 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1362 */ 1363 1364 #define SCHEDULING_DELAY 40 /* microframes */ 1365 1366 static int 1367 iso_stream_schedule ( 1368 struct ehci_hcd *ehci, 1369 struct urb *urb, 1370 struct ehci_iso_stream *stream 1371 ) 1372 { 1373 u32 now, base, next, start, period, span, now2; 1374 u32 wrap = 0, skip = 0; 1375 int status = 0; 1376 unsigned mod = ehci->periodic_size << 3; 1377 struct ehci_iso_sched *sched = urb->hcpriv; 1378 bool empty = list_empty(&stream->td_list); 1379 1380 period = urb->interval; 1381 span = sched->span; 1382 if (!stream->highspeed) { 1383 period <<= 3; 1384 span <<= 3; 1385 } 1386 1387 now = ehci_read_frame_index(ehci) & (mod - 1); 1388 1389 /* Take the isochronous scheduling threshold into account */ 1390 if (ehci->i_thresh) 1391 next = now + ehci->i_thresh; /* uframe cache */ 1392 else 1393 next = (now + 2 + 7) & ~0x07; /* full frame cache */ 1394 1395 /* 1396 * Use ehci->last_iso_frame as the base. There can't be any 1397 * TDs scheduled for earlier than that. 1398 */ 1399 base = ehci->last_iso_frame << 3; 1400 next = (next - base) & (mod - 1); 1401 1402 /* 1403 * Need to schedule; when's the next (u)frame we could start? 1404 * This is bigger than ehci->i_thresh allows; scheduling itself 1405 * isn't free, the delay should handle reasonably slow cpus. It 1406 * can also help high bandwidth if the dma and irq loads don't 1407 * jump until after the queue is primed. 1408 */ 1409 if (unlikely(empty && !hcd_periodic_completion_in_progress( 1410 ehci_to_hcd(ehci), urb->ep))) { 1411 int done = 0; 1412 1413 start = (now & ~0x07) + SCHEDULING_DELAY; 1414 1415 /* find a uframe slot with enough bandwidth. 1416 * Early uframes are more precious because full-speed 1417 * iso IN transfers can't use late uframes, 1418 * and therefore they should be allocated last. 1419 */ 1420 next = start; 1421 start += period; 1422 do { 1423 start--; 1424 /* check schedule: enough space? */ 1425 if (stream->highspeed) { 1426 if (itd_slot_ok(ehci, mod, start, 1427 stream->usecs, period)) 1428 done = 1; 1429 } else { 1430 if ((start % 8) >= 6) 1431 continue; 1432 if (sitd_slot_ok(ehci, mod, stream, 1433 start, sched, period)) 1434 done = 1; 1435 } 1436 } while (start > next && !done); 1437 1438 /* no room in the schedule */ 1439 if (!done) { 1440 ehci_dbg(ehci, "iso sched full %p", urb); 1441 status = -ENOSPC; 1442 goto fail; 1443 } 1444 1445 start = (start - base) & (mod - 1); 1446 goto use_start; 1447 } 1448 1449 /* 1450 * Typical case: reuse current schedule, stream is still active. 1451 * Hopefully there are no gaps from the host falling behind 1452 * (irq delays etc). If there are, the behavior depends on 1453 * whether URB_ISO_ASAP is set. 1454 */ 1455 start = (stream->next_uframe - base) & (mod - 1); 1456 now2 = (now - base) & (mod - 1); 1457 1458 /* Is the schedule already full? */ 1459 if (unlikely(!empty && start < period)) { 1460 ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", 1461 urb, stream->next_uframe, base, period, mod); 1462 status = -ENOSPC; 1463 goto fail; 1464 } 1465 1466 /* Is the next packet scheduled after the base time? */ 1467 if (likely(!empty || start <= now2 + period)) { 1468 1469 /* URB_ISO_ASAP: make sure that start >= next */ 1470 if (unlikely(start < next && 1471 (urb->transfer_flags & URB_ISO_ASAP))) 1472 goto do_ASAP; 1473 1474 /* Otherwise use start, if it's not in the past */ 1475 if (likely(start >= now2)) 1476 goto use_start; 1477 1478 /* Otherwise we got an underrun while the queue was empty */ 1479 } else { 1480 if (urb->transfer_flags & URB_ISO_ASAP) 1481 goto do_ASAP; 1482 wrap = mod; 1483 now2 += mod; 1484 } 1485 1486 /* How many uframes and packets do we need to skip? */ 1487 skip = (now2 - start + period - 1) & -period; 1488 if (skip >= span) { /* Entirely in the past? */ 1489 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n", 1490 urb, start + base, span - period, now2 + base, 1491 base); 1492 1493 /* Try to keep the last TD intact for scanning later */ 1494 skip = span - period; 1495 1496 /* Will it come before the current scan position? */ 1497 if (empty) { 1498 skip = span; /* Skip the entire URB */ 1499 status = 1; /* and give it back immediately */ 1500 iso_sched_free(stream, sched); 1501 sched = NULL; 1502 } 1503 } 1504 urb->error_count = skip / period; 1505 if (sched) 1506 sched->first_packet = urb->error_count; 1507 goto use_start; 1508 1509 do_ASAP: 1510 /* Use the first slot after "next" */ 1511 start = next + ((start - next) & (period - 1)); 1512 1513 use_start: 1514 /* Tried to schedule too far into the future? */ 1515 if (unlikely(start + span - period >= mod + wrap)) { 1516 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n", 1517 urb, start, span - period, mod + wrap); 1518 status = -EFBIG; 1519 goto fail; 1520 } 1521 1522 start += base; 1523 stream->next_uframe = (start + skip) & (mod - 1); 1524 1525 /* report high speed start in uframes; full speed, in frames */ 1526 urb->start_frame = start & (mod - 1); 1527 if (!stream->highspeed) 1528 urb->start_frame >>= 3; 1529 1530 /* Make sure scan_isoc() sees these */ 1531 if (ehci->isoc_count == 0) 1532 ehci->last_iso_frame = now >> 3; 1533 return status; 1534 1535 fail: 1536 iso_sched_free(stream, sched); 1537 urb->hcpriv = NULL; 1538 return status; 1539 } 1540 1541 /*-------------------------------------------------------------------------*/ 1542 1543 static inline void 1544 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream, 1545 struct ehci_itd *itd) 1546 { 1547 int i; 1548 1549 /* it's been recently zeroed */ 1550 itd->hw_next = EHCI_LIST_END(ehci); 1551 itd->hw_bufp [0] = stream->buf0; 1552 itd->hw_bufp [1] = stream->buf1; 1553 itd->hw_bufp [2] = stream->buf2; 1554 1555 for (i = 0; i < 8; i++) 1556 itd->index[i] = -1; 1557 1558 /* All other fields are filled when scheduling */ 1559 } 1560 1561 static inline void 1562 itd_patch( 1563 struct ehci_hcd *ehci, 1564 struct ehci_itd *itd, 1565 struct ehci_iso_sched *iso_sched, 1566 unsigned index, 1567 u16 uframe 1568 ) 1569 { 1570 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1571 unsigned pg = itd->pg; 1572 1573 // BUG_ON (pg == 6 && uf->cross); 1574 1575 uframe &= 0x07; 1576 itd->index [uframe] = index; 1577 1578 itd->hw_transaction[uframe] = uf->transaction; 1579 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); 1580 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); 1581 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); 1582 1583 /* iso_frame_desc[].offset must be strictly increasing */ 1584 if (unlikely (uf->cross)) { 1585 u64 bufp = uf->bufp + 4096; 1586 1587 itd->pg = ++pg; 1588 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); 1589 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); 1590 } 1591 } 1592 1593 static inline void 1594 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1595 { 1596 union ehci_shadow *prev = &ehci->pshadow[frame]; 1597 __hc32 *hw_p = &ehci->periodic[frame]; 1598 union ehci_shadow here = *prev; 1599 __hc32 type = 0; 1600 1601 /* skip any iso nodes which might belong to previous microframes */ 1602 while (here.ptr) { 1603 type = Q_NEXT_TYPE(ehci, *hw_p); 1604 if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) 1605 break; 1606 prev = periodic_next_shadow(ehci, prev, type); 1607 hw_p = shadow_next_periodic(ehci, &here, type); 1608 here = *prev; 1609 } 1610 1611 itd->itd_next = here; 1612 itd->hw_next = *hw_p; 1613 prev->itd = itd; 1614 itd->frame = frame; 1615 wmb (); 1616 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); 1617 } 1618 1619 /* fit urb's itds into the selected schedule slot; activate as needed */ 1620 static void itd_link_urb( 1621 struct ehci_hcd *ehci, 1622 struct urb *urb, 1623 unsigned mod, 1624 struct ehci_iso_stream *stream 1625 ) 1626 { 1627 int packet; 1628 unsigned next_uframe, uframe, frame; 1629 struct ehci_iso_sched *iso_sched = urb->hcpriv; 1630 struct ehci_itd *itd; 1631 1632 next_uframe = stream->next_uframe & (mod - 1); 1633 1634 if (unlikely (list_empty(&stream->td_list))) 1635 ehci_to_hcd(ehci)->self.bandwidth_allocated 1636 += stream->bandwidth; 1637 1638 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 1639 if (ehci->amd_pll_fix == 1) 1640 usb_amd_quirk_pll_disable(); 1641 } 1642 1643 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1644 1645 /* fill iTDs uframe by uframe */ 1646 for (packet = iso_sched->first_packet, itd = NULL; 1647 packet < urb->number_of_packets;) { 1648 if (itd == NULL) { 1649 /* ASSERT: we have all necessary itds */ 1650 // BUG_ON (list_empty (&iso_sched->td_list)); 1651 1652 /* ASSERT: no itds for this endpoint in this uframe */ 1653 1654 itd = list_entry (iso_sched->td_list.next, 1655 struct ehci_itd, itd_list); 1656 list_move_tail (&itd->itd_list, &stream->td_list); 1657 itd->stream = stream; 1658 itd->urb = urb; 1659 itd_init (ehci, stream, itd); 1660 } 1661 1662 uframe = next_uframe & 0x07; 1663 frame = next_uframe >> 3; 1664 1665 itd_patch(ehci, itd, iso_sched, packet, uframe); 1666 1667 next_uframe += stream->interval; 1668 next_uframe &= mod - 1; 1669 packet++; 1670 1671 /* link completed itds into the schedule */ 1672 if (((next_uframe >> 3) != frame) 1673 || packet == urb->number_of_packets) { 1674 itd_link(ehci, frame & (ehci->periodic_size - 1), itd); 1675 itd = NULL; 1676 } 1677 } 1678 stream->next_uframe = next_uframe; 1679 1680 /* don't need that schedule data any more */ 1681 iso_sched_free (stream, iso_sched); 1682 urb->hcpriv = stream; 1683 1684 ++ehci->isoc_count; 1685 enable_periodic(ehci); 1686 } 1687 1688 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) 1689 1690 /* Process and recycle a completed ITD. Return true iff its urb completed, 1691 * and hence its completion callback probably added things to the hardware 1692 * schedule. 1693 * 1694 * Note that we carefully avoid recycling this descriptor until after any 1695 * completion callback runs, so that it won't be reused quickly. That is, 1696 * assuming (a) no more than two urbs per frame on this endpoint, and also 1697 * (b) only this endpoint's completions submit URBs. It seems some silicon 1698 * corrupts things if you reuse completed descriptors very quickly... 1699 */ 1700 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd) 1701 { 1702 struct urb *urb = itd->urb; 1703 struct usb_iso_packet_descriptor *desc; 1704 u32 t; 1705 unsigned uframe; 1706 int urb_index = -1; 1707 struct ehci_iso_stream *stream = itd->stream; 1708 struct usb_device *dev; 1709 bool retval = false; 1710 1711 /* for each uframe with a packet */ 1712 for (uframe = 0; uframe < 8; uframe++) { 1713 if (likely (itd->index[uframe] == -1)) 1714 continue; 1715 urb_index = itd->index[uframe]; 1716 desc = &urb->iso_frame_desc [urb_index]; 1717 1718 t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]); 1719 itd->hw_transaction [uframe] = 0; 1720 1721 /* report transfer status */ 1722 if (unlikely (t & ISO_ERRS)) { 1723 urb->error_count++; 1724 if (t & EHCI_ISOC_BUF_ERR) 1725 desc->status = usb_pipein (urb->pipe) 1726 ? -ENOSR /* hc couldn't read */ 1727 : -ECOMM; /* hc couldn't write */ 1728 else if (t & EHCI_ISOC_BABBLE) 1729 desc->status = -EOVERFLOW; 1730 else /* (t & EHCI_ISOC_XACTERR) */ 1731 desc->status = -EPROTO; 1732 1733 /* HC need not update length with this error */ 1734 if (!(t & EHCI_ISOC_BABBLE)) { 1735 desc->actual_length = EHCI_ITD_LENGTH(t); 1736 urb->actual_length += desc->actual_length; 1737 } 1738 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { 1739 desc->status = 0; 1740 desc->actual_length = EHCI_ITD_LENGTH(t); 1741 urb->actual_length += desc->actual_length; 1742 } else { 1743 /* URB was too late */ 1744 urb->error_count++; 1745 } 1746 } 1747 1748 /* handle completion now? */ 1749 if (likely ((urb_index + 1) != urb->number_of_packets)) 1750 goto done; 1751 1752 /* ASSERT: it's really the last itd for this urb 1753 list_for_each_entry (itd, &stream->td_list, itd_list) 1754 BUG_ON (itd->urb == urb); 1755 */ 1756 1757 /* give urb back to the driver; completion often (re)submits */ 1758 dev = urb->dev; 1759 ehci_urb_done(ehci, urb, 0); 1760 retval = true; 1761 urb = NULL; 1762 1763 --ehci->isoc_count; 1764 disable_periodic(ehci); 1765 1766 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1767 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 1768 if (ehci->amd_pll_fix == 1) 1769 usb_amd_quirk_pll_enable(); 1770 } 1771 1772 if (unlikely(list_is_singular(&stream->td_list))) 1773 ehci_to_hcd(ehci)->self.bandwidth_allocated 1774 -= stream->bandwidth; 1775 1776 done: 1777 itd->urb = NULL; 1778 1779 /* Add to the end of the free list for later reuse */ 1780 list_move_tail(&itd->itd_list, &stream->free_list); 1781 1782 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ 1783 if (list_empty(&stream->td_list)) { 1784 list_splice_tail_init(&stream->free_list, 1785 &ehci->cached_itd_list); 1786 start_free_itds(ehci); 1787 } 1788 1789 return retval; 1790 } 1791 1792 /*-------------------------------------------------------------------------*/ 1793 1794 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, 1795 gfp_t mem_flags) 1796 { 1797 int status = -EINVAL; 1798 unsigned long flags; 1799 struct ehci_iso_stream *stream; 1800 1801 /* Get iso_stream head */ 1802 stream = iso_stream_find (ehci, urb); 1803 if (unlikely (stream == NULL)) { 1804 ehci_dbg (ehci, "can't get iso stream\n"); 1805 return -ENOMEM; 1806 } 1807 if (unlikely (urb->interval != stream->interval)) { 1808 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 1809 stream->interval, urb->interval); 1810 goto done; 1811 } 1812 1813 #ifdef EHCI_URB_TRACE 1814 ehci_dbg (ehci, 1815 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", 1816 __func__, urb->dev->devpath, urb, 1817 usb_pipeendpoint (urb->pipe), 1818 usb_pipein (urb->pipe) ? "in" : "out", 1819 urb->transfer_buffer_length, 1820 urb->number_of_packets, urb->interval, 1821 stream); 1822 #endif 1823 1824 /* allocate ITDs w/o locking anything */ 1825 status = itd_urb_transaction (stream, ehci, urb, mem_flags); 1826 if (unlikely (status < 0)) { 1827 ehci_dbg (ehci, "can't init itds\n"); 1828 goto done; 1829 } 1830 1831 /* schedule ... need to lock */ 1832 spin_lock_irqsave (&ehci->lock, flags); 1833 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 1834 status = -ESHUTDOWN; 1835 goto done_not_linked; 1836 } 1837 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1838 if (unlikely(status)) 1839 goto done_not_linked; 1840 status = iso_stream_schedule(ehci, urb, stream); 1841 if (likely(status == 0)) { 1842 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 1843 } else if (status > 0) { 1844 status = 0; 1845 ehci_urb_done(ehci, urb, 0); 1846 } else { 1847 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1848 } 1849 done_not_linked: 1850 spin_unlock_irqrestore (&ehci->lock, flags); 1851 done: 1852 return status; 1853 } 1854 1855 /*-------------------------------------------------------------------------*/ 1856 1857 /* 1858 * "Split ISO TDs" ... used for USB 1.1 devices going through the 1859 * TTs in USB 2.0 hubs. These need microframe scheduling. 1860 */ 1861 1862 static inline void 1863 sitd_sched_init( 1864 struct ehci_hcd *ehci, 1865 struct ehci_iso_sched *iso_sched, 1866 struct ehci_iso_stream *stream, 1867 struct urb *urb 1868 ) 1869 { 1870 unsigned i; 1871 dma_addr_t dma = urb->transfer_dma; 1872 1873 /* how many frames are needed for these transfers */ 1874 iso_sched->span = urb->number_of_packets * stream->interval; 1875 1876 /* figure out per-frame sitd fields that we'll need later 1877 * when we fit new sitds into the schedule. 1878 */ 1879 for (i = 0; i < urb->number_of_packets; i++) { 1880 struct ehci_iso_packet *packet = &iso_sched->packet [i]; 1881 unsigned length; 1882 dma_addr_t buf; 1883 u32 trans; 1884 1885 length = urb->iso_frame_desc [i].length & 0x03ff; 1886 buf = dma + urb->iso_frame_desc [i].offset; 1887 1888 trans = SITD_STS_ACTIVE; 1889 if (((i + 1) == urb->number_of_packets) 1890 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1891 trans |= SITD_IOC; 1892 trans |= length << 16; 1893 packet->transaction = cpu_to_hc32(ehci, trans); 1894 1895 /* might need to cross a buffer page within a td */ 1896 packet->bufp = buf; 1897 packet->buf1 = (buf + length) & ~0x0fff; 1898 if (packet->buf1 != (buf & ~(u64)0x0fff)) 1899 packet->cross = 1; 1900 1901 /* OUT uses multiple start-splits */ 1902 if (stream->bEndpointAddress & USB_DIR_IN) 1903 continue; 1904 length = (length + 187) / 188; 1905 if (length > 1) /* BEGIN vs ALL */ 1906 length |= 1 << 3; 1907 packet->buf1 |= length; 1908 } 1909 } 1910 1911 static int 1912 sitd_urb_transaction ( 1913 struct ehci_iso_stream *stream, 1914 struct ehci_hcd *ehci, 1915 struct urb *urb, 1916 gfp_t mem_flags 1917 ) 1918 { 1919 struct ehci_sitd *sitd; 1920 dma_addr_t sitd_dma; 1921 int i; 1922 struct ehci_iso_sched *iso_sched; 1923 unsigned long flags; 1924 1925 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1926 if (iso_sched == NULL) 1927 return -ENOMEM; 1928 1929 sitd_sched_init(ehci, iso_sched, stream, urb); 1930 1931 /* allocate/init sITDs */ 1932 spin_lock_irqsave (&ehci->lock, flags); 1933 for (i = 0; i < urb->number_of_packets; i++) { 1934 1935 /* NOTE: for now, we don't try to handle wraparound cases 1936 * for IN (using sitd->hw_backpointer, like a FSTN), which 1937 * means we never need two sitds for full speed packets. 1938 */ 1939 1940 /* 1941 * Use siTDs from the free list, but not siTDs that may 1942 * still be in use by the hardware. 1943 */ 1944 if (likely(!list_empty(&stream->free_list))) { 1945 sitd = list_first_entry(&stream->free_list, 1946 struct ehci_sitd, sitd_list); 1947 if (sitd->frame == ehci->now_frame) 1948 goto alloc_sitd; 1949 list_del (&sitd->sitd_list); 1950 sitd_dma = sitd->sitd_dma; 1951 } else { 1952 alloc_sitd: 1953 spin_unlock_irqrestore (&ehci->lock, flags); 1954 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, 1955 &sitd_dma); 1956 spin_lock_irqsave (&ehci->lock, flags); 1957 if (!sitd) { 1958 iso_sched_free(stream, iso_sched); 1959 spin_unlock_irqrestore(&ehci->lock, flags); 1960 return -ENOMEM; 1961 } 1962 } 1963 1964 memset (sitd, 0, sizeof *sitd); 1965 sitd->sitd_dma = sitd_dma; 1966 sitd->frame = 9999; /* an invalid value */ 1967 list_add (&sitd->sitd_list, &iso_sched->td_list); 1968 } 1969 1970 /* temporarily store schedule info in hcpriv */ 1971 urb->hcpriv = iso_sched; 1972 urb->error_count = 0; 1973 1974 spin_unlock_irqrestore (&ehci->lock, flags); 1975 return 0; 1976 } 1977 1978 /*-------------------------------------------------------------------------*/ 1979 1980 static inline void 1981 sitd_patch( 1982 struct ehci_hcd *ehci, 1983 struct ehci_iso_stream *stream, 1984 struct ehci_sitd *sitd, 1985 struct ehci_iso_sched *iso_sched, 1986 unsigned index 1987 ) 1988 { 1989 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1990 u64 bufp = uf->bufp; 1991 1992 sitd->hw_next = EHCI_LIST_END(ehci); 1993 sitd->hw_fullspeed_ep = stream->address; 1994 sitd->hw_uframe = stream->splits; 1995 sitd->hw_results = uf->transaction; 1996 sitd->hw_backpointer = EHCI_LIST_END(ehci); 1997 1998 bufp = uf->bufp; 1999 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); 2000 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); 2001 2002 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); 2003 if (uf->cross) 2004 bufp += 4096; 2005 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); 2006 sitd->index = index; 2007 } 2008 2009 static inline void 2010 sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) 2011 { 2012 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ 2013 sitd->sitd_next = ehci->pshadow [frame]; 2014 sitd->hw_next = ehci->periodic [frame]; 2015 ehci->pshadow [frame].sitd = sitd; 2016 sitd->frame = frame; 2017 wmb (); 2018 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); 2019 } 2020 2021 /* fit urb's sitds into the selected schedule slot; activate as needed */ 2022 static void sitd_link_urb( 2023 struct ehci_hcd *ehci, 2024 struct urb *urb, 2025 unsigned mod, 2026 struct ehci_iso_stream *stream 2027 ) 2028 { 2029 int packet; 2030 unsigned next_uframe; 2031 struct ehci_iso_sched *sched = urb->hcpriv; 2032 struct ehci_sitd *sitd; 2033 2034 next_uframe = stream->next_uframe; 2035 2036 if (list_empty(&stream->td_list)) 2037 /* usbfs ignores TT bandwidth */ 2038 ehci_to_hcd(ehci)->self.bandwidth_allocated 2039 += stream->bandwidth; 2040 2041 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 2042 if (ehci->amd_pll_fix == 1) 2043 usb_amd_quirk_pll_disable(); 2044 } 2045 2046 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 2047 2048 /* fill sITDs frame by frame */ 2049 for (packet = sched->first_packet, sitd = NULL; 2050 packet < urb->number_of_packets; 2051 packet++) { 2052 2053 /* ASSERT: we have all necessary sitds */ 2054 BUG_ON (list_empty (&sched->td_list)); 2055 2056 /* ASSERT: no itds for this endpoint in this frame */ 2057 2058 sitd = list_entry (sched->td_list.next, 2059 struct ehci_sitd, sitd_list); 2060 list_move_tail (&sitd->sitd_list, &stream->td_list); 2061 sitd->stream = stream; 2062 sitd->urb = urb; 2063 2064 sitd_patch(ehci, stream, sitd, sched, packet); 2065 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), 2066 sitd); 2067 2068 next_uframe += stream->interval << 3; 2069 } 2070 stream->next_uframe = next_uframe & (mod - 1); 2071 2072 /* don't need that schedule data any more */ 2073 iso_sched_free (stream, sched); 2074 urb->hcpriv = stream; 2075 2076 ++ehci->isoc_count; 2077 enable_periodic(ehci); 2078 } 2079 2080 /*-------------------------------------------------------------------------*/ 2081 2082 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ 2083 | SITD_STS_XACT | SITD_STS_MMF) 2084 2085 /* Process and recycle a completed SITD. Return true iff its urb completed, 2086 * and hence its completion callback probably added things to the hardware 2087 * schedule. 2088 * 2089 * Note that we carefully avoid recycling this descriptor until after any 2090 * completion callback runs, so that it won't be reused quickly. That is, 2091 * assuming (a) no more than two urbs per frame on this endpoint, and also 2092 * (b) only this endpoint's completions submit URBs. It seems some silicon 2093 * corrupts things if you reuse completed descriptors very quickly... 2094 */ 2095 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd) 2096 { 2097 struct urb *urb = sitd->urb; 2098 struct usb_iso_packet_descriptor *desc; 2099 u32 t; 2100 int urb_index = -1; 2101 struct ehci_iso_stream *stream = sitd->stream; 2102 struct usb_device *dev; 2103 bool retval = false; 2104 2105 urb_index = sitd->index; 2106 desc = &urb->iso_frame_desc [urb_index]; 2107 t = hc32_to_cpup(ehci, &sitd->hw_results); 2108 2109 /* report transfer status */ 2110 if (unlikely(t & SITD_ERRS)) { 2111 urb->error_count++; 2112 if (t & SITD_STS_DBE) 2113 desc->status = usb_pipein (urb->pipe) 2114 ? -ENOSR /* hc couldn't read */ 2115 : -ECOMM; /* hc couldn't write */ 2116 else if (t & SITD_STS_BABBLE) 2117 desc->status = -EOVERFLOW; 2118 else /* XACT, MMF, etc */ 2119 desc->status = -EPROTO; 2120 } else if (unlikely(t & SITD_STS_ACTIVE)) { 2121 /* URB was too late */ 2122 urb->error_count++; 2123 } else { 2124 desc->status = 0; 2125 desc->actual_length = desc->length - SITD_LENGTH(t); 2126 urb->actual_length += desc->actual_length; 2127 } 2128 2129 /* handle completion now? */ 2130 if ((urb_index + 1) != urb->number_of_packets) 2131 goto done; 2132 2133 /* ASSERT: it's really the last sitd for this urb 2134 list_for_each_entry (sitd, &stream->td_list, sitd_list) 2135 BUG_ON (sitd->urb == urb); 2136 */ 2137 2138 /* give urb back to the driver; completion often (re)submits */ 2139 dev = urb->dev; 2140 ehci_urb_done(ehci, urb, 0); 2141 retval = true; 2142 urb = NULL; 2143 2144 --ehci->isoc_count; 2145 disable_periodic(ehci); 2146 2147 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 2148 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 2149 if (ehci->amd_pll_fix == 1) 2150 usb_amd_quirk_pll_enable(); 2151 } 2152 2153 if (list_is_singular(&stream->td_list)) 2154 ehci_to_hcd(ehci)->self.bandwidth_allocated 2155 -= stream->bandwidth; 2156 2157 done: 2158 sitd->urb = NULL; 2159 2160 /* Add to the end of the free list for later reuse */ 2161 list_move_tail(&sitd->sitd_list, &stream->free_list); 2162 2163 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */ 2164 if (list_empty(&stream->td_list)) { 2165 list_splice_tail_init(&stream->free_list, 2166 &ehci->cached_sitd_list); 2167 start_free_itds(ehci); 2168 } 2169 2170 return retval; 2171 } 2172 2173 2174 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, 2175 gfp_t mem_flags) 2176 { 2177 int status = -EINVAL; 2178 unsigned long flags; 2179 struct ehci_iso_stream *stream; 2180 2181 /* Get iso_stream head */ 2182 stream = iso_stream_find (ehci, urb); 2183 if (stream == NULL) { 2184 ehci_dbg (ehci, "can't get iso stream\n"); 2185 return -ENOMEM; 2186 } 2187 if (urb->interval != stream->interval) { 2188 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 2189 stream->interval, urb->interval); 2190 goto done; 2191 } 2192 2193 #ifdef EHCI_URB_TRACE 2194 ehci_dbg (ehci, 2195 "submit %p dev%s ep%d%s-iso len %d\n", 2196 urb, urb->dev->devpath, 2197 usb_pipeendpoint (urb->pipe), 2198 usb_pipein (urb->pipe) ? "in" : "out", 2199 urb->transfer_buffer_length); 2200 #endif 2201 2202 /* allocate SITDs */ 2203 status = sitd_urb_transaction (stream, ehci, urb, mem_flags); 2204 if (status < 0) { 2205 ehci_dbg (ehci, "can't init sitds\n"); 2206 goto done; 2207 } 2208 2209 /* schedule ... need to lock */ 2210 spin_lock_irqsave (&ehci->lock, flags); 2211 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 2212 status = -ESHUTDOWN; 2213 goto done_not_linked; 2214 } 2215 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 2216 if (unlikely(status)) 2217 goto done_not_linked; 2218 status = iso_stream_schedule(ehci, urb, stream); 2219 if (likely(status == 0)) { 2220 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 2221 } else if (status > 0) { 2222 status = 0; 2223 ehci_urb_done(ehci, urb, 0); 2224 } else { 2225 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 2226 } 2227 done_not_linked: 2228 spin_unlock_irqrestore (&ehci->lock, flags); 2229 done: 2230 return status; 2231 } 2232 2233 /*-------------------------------------------------------------------------*/ 2234 2235 static void scan_isoc(struct ehci_hcd *ehci) 2236 { 2237 unsigned uf, now_frame, frame; 2238 unsigned fmask = ehci->periodic_size - 1; 2239 bool modified, live; 2240 2241 /* 2242 * When running, scan from last scan point up to "now" 2243 * else clean up by scanning everything that's left. 2244 * Touches as few pages as possible: cache-friendly. 2245 */ 2246 if (ehci->rh_state >= EHCI_RH_RUNNING) { 2247 uf = ehci_read_frame_index(ehci); 2248 now_frame = (uf >> 3) & fmask; 2249 live = true; 2250 } else { 2251 now_frame = (ehci->last_iso_frame - 1) & fmask; 2252 live = false; 2253 } 2254 ehci->now_frame = now_frame; 2255 2256 frame = ehci->last_iso_frame; 2257 for (;;) { 2258 union ehci_shadow q, *q_p; 2259 __hc32 type, *hw_p; 2260 2261 restart: 2262 /* scan each element in frame's queue for completions */ 2263 q_p = &ehci->pshadow [frame]; 2264 hw_p = &ehci->periodic [frame]; 2265 q.ptr = q_p->ptr; 2266 type = Q_NEXT_TYPE(ehci, *hw_p); 2267 modified = false; 2268 2269 while (q.ptr != NULL) { 2270 switch (hc32_to_cpu(ehci, type)) { 2271 case Q_TYPE_ITD: 2272 /* If this ITD is still active, leave it for 2273 * later processing ... check the next entry. 2274 * No need to check for activity unless the 2275 * frame is current. 2276 */ 2277 if (frame == now_frame && live) { 2278 rmb(); 2279 for (uf = 0; uf < 8; uf++) { 2280 if (q.itd->hw_transaction[uf] & 2281 ITD_ACTIVE(ehci)) 2282 break; 2283 } 2284 if (uf < 8) { 2285 q_p = &q.itd->itd_next; 2286 hw_p = &q.itd->hw_next; 2287 type = Q_NEXT_TYPE(ehci, 2288 q.itd->hw_next); 2289 q = *q_p; 2290 break; 2291 } 2292 } 2293 2294 /* Take finished ITDs out of the schedule 2295 * and process them: recycle, maybe report 2296 * URB completion. HC won't cache the 2297 * pointer for much longer, if at all. 2298 */ 2299 *q_p = q.itd->itd_next; 2300 if (!ehci->use_dummy_qh || 2301 q.itd->hw_next != EHCI_LIST_END(ehci)) 2302 *hw_p = q.itd->hw_next; 2303 else 2304 *hw_p = ehci->dummy->qh_dma; 2305 type = Q_NEXT_TYPE(ehci, q.itd->hw_next); 2306 wmb(); 2307 modified = itd_complete (ehci, q.itd); 2308 q = *q_p; 2309 break; 2310 case Q_TYPE_SITD: 2311 /* If this SITD is still active, leave it for 2312 * later processing ... check the next entry. 2313 * No need to check for activity unless the 2314 * frame is current. 2315 */ 2316 if (((frame == now_frame) || 2317 (((frame + 1) & fmask) == now_frame)) 2318 && live 2319 && (q.sitd->hw_results & 2320 SITD_ACTIVE(ehci))) { 2321 2322 q_p = &q.sitd->sitd_next; 2323 hw_p = &q.sitd->hw_next; 2324 type = Q_NEXT_TYPE(ehci, 2325 q.sitd->hw_next); 2326 q = *q_p; 2327 break; 2328 } 2329 2330 /* Take finished SITDs out of the schedule 2331 * and process them: recycle, maybe report 2332 * URB completion. 2333 */ 2334 *q_p = q.sitd->sitd_next; 2335 if (!ehci->use_dummy_qh || 2336 q.sitd->hw_next != EHCI_LIST_END(ehci)) 2337 *hw_p = q.sitd->hw_next; 2338 else 2339 *hw_p = ehci->dummy->qh_dma; 2340 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); 2341 wmb(); 2342 modified = sitd_complete (ehci, q.sitd); 2343 q = *q_p; 2344 break; 2345 default: 2346 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n", 2347 type, frame, q.ptr); 2348 // BUG (); 2349 /* FALL THROUGH */ 2350 case Q_TYPE_QH: 2351 case Q_TYPE_FSTN: 2352 /* End of the iTDs and siTDs */ 2353 q.ptr = NULL; 2354 break; 2355 } 2356 2357 /* assume completion callbacks modify the queue */ 2358 if (unlikely(modified && ehci->isoc_count > 0)) 2359 goto restart; 2360 } 2361 2362 /* Stop when we have reached the current frame */ 2363 if (frame == now_frame) 2364 break; 2365 2366 /* The last frame may still have active siTDs */ 2367 ehci->last_iso_frame = frame; 2368 frame = (frame + 1) & fmask; 2369 } 2370 } 2371