1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2001-2004 by David Brownell 4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers 5 */ 6 7 /* this file is part of ehci-hcd.c */ 8 9 /*-------------------------------------------------------------------------*/ 10 11 /* 12 * EHCI scheduled transaction support: interrupt, iso, split iso 13 * These are called "periodic" transactions in the EHCI spec. 14 * 15 * Note that for interrupt transfers, the QH/QTD manipulation is shared 16 * with the "asynchronous" transaction support (control/bulk transfers). 17 * The only real difference is in how interrupt transfers are scheduled. 18 * 19 * For ISO, we make an "iso_stream" head to serve the same role as a QH. 20 * It keeps track of every ITD (or SITD) that's linked, and holds enough 21 * pre-calculated schedule data to make appending to the queue be quick. 22 */ 23 24 static int ehci_get_frame(struct usb_hcd *hcd); 25 26 /* 27 * periodic_next_shadow - return "next" pointer on shadow list 28 * @periodic: host pointer to qh/itd/sitd 29 * @tag: hardware tag for type of this record 30 */ 31 static union ehci_shadow * 32 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, 33 __hc32 tag) 34 { 35 switch (hc32_to_cpu(ehci, tag)) { 36 case Q_TYPE_QH: 37 return &periodic->qh->qh_next; 38 case Q_TYPE_FSTN: 39 return &periodic->fstn->fstn_next; 40 case Q_TYPE_ITD: 41 return &periodic->itd->itd_next; 42 /* case Q_TYPE_SITD: */ 43 default: 44 return &periodic->sitd->sitd_next; 45 } 46 } 47 48 static __hc32 * 49 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, 50 __hc32 tag) 51 { 52 switch (hc32_to_cpu(ehci, tag)) { 53 /* our ehci_shadow.qh is actually software part */ 54 case Q_TYPE_QH: 55 return &periodic->qh->hw->hw_next; 56 /* others are hw parts */ 57 default: 58 return periodic->hw_next; 59 } 60 } 61 62 /* caller must hold ehci->lock */ 63 static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr) 64 { 65 union ehci_shadow *prev_p = &ehci->pshadow[frame]; 66 __hc32 *hw_p = &ehci->periodic[frame]; 67 union ehci_shadow here = *prev_p; 68 69 /* find predecessor of "ptr"; hw and shadow lists are in sync */ 70 while (here.ptr && here.ptr != ptr) { 71 prev_p = periodic_next_shadow(ehci, prev_p, 72 Q_NEXT_TYPE(ehci, *hw_p)); 73 hw_p = shadow_next_periodic(ehci, &here, 74 Q_NEXT_TYPE(ehci, *hw_p)); 75 here = *prev_p; 76 } 77 /* an interrupt entry (at list end) could have been shared */ 78 if (!here.ptr) 79 return; 80 81 /* update shadow and hardware lists ... the old "next" pointers 82 * from ptr may still be in use, the caller updates them. 83 */ 84 *prev_p = *periodic_next_shadow(ehci, &here, 85 Q_NEXT_TYPE(ehci, *hw_p)); 86 87 if (!ehci->use_dummy_qh || 88 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) 89 != EHCI_LIST_END(ehci)) 90 *hw_p = *shadow_next_periodic(ehci, &here, 91 Q_NEXT_TYPE(ehci, *hw_p)); 92 else 93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); 94 } 95 96 /*-------------------------------------------------------------------------*/ 97 98 /* Bandwidth and TT management */ 99 100 /* Find the TT data structure for this device; create it if necessary */ 101 static struct ehci_tt *find_tt(struct usb_device *udev) 102 { 103 struct usb_tt *utt = udev->tt; 104 struct ehci_tt *tt, **tt_index, **ptt; 105 unsigned port; 106 bool allocated_index = false; 107 108 if (!utt) 109 return NULL; /* Not below a TT */ 110 111 /* 112 * Find/create our data structure. 113 * For hubs with a single TT, we get it directly. 114 * For hubs with multiple TTs, there's an extra level of pointers. 115 */ 116 tt_index = NULL; 117 if (utt->multi) { 118 tt_index = utt->hcpriv; 119 if (!tt_index) { /* Create the index array */ 120 tt_index = kcalloc(utt->hub->maxchild, 121 sizeof(*tt_index), 122 GFP_ATOMIC); 123 if (!tt_index) 124 return ERR_PTR(-ENOMEM); 125 utt->hcpriv = tt_index; 126 allocated_index = true; 127 } 128 port = udev->ttport - 1; 129 ptt = &tt_index[port]; 130 } else { 131 port = 0; 132 ptt = (struct ehci_tt **) &utt->hcpriv; 133 } 134 135 tt = *ptt; 136 if (!tt) { /* Create the ehci_tt */ 137 struct ehci_hcd *ehci = 138 hcd_to_ehci(bus_to_hcd(udev->bus)); 139 140 tt = kzalloc(sizeof(*tt), GFP_ATOMIC); 141 if (!tt) { 142 if (allocated_index) { 143 utt->hcpriv = NULL; 144 kfree(tt_index); 145 } 146 return ERR_PTR(-ENOMEM); 147 } 148 list_add_tail(&tt->tt_list, &ehci->tt_list); 149 INIT_LIST_HEAD(&tt->ps_list); 150 tt->usb_tt = utt; 151 tt->tt_port = port; 152 *ptt = tt; 153 } 154 155 return tt; 156 } 157 158 /* Release the TT above udev, if it's not in use */ 159 static void drop_tt(struct usb_device *udev) 160 { 161 struct usb_tt *utt = udev->tt; 162 struct ehci_tt *tt, **tt_index, **ptt; 163 int cnt, i; 164 165 if (!utt || !utt->hcpriv) 166 return; /* Not below a TT, or never allocated */ 167 168 cnt = 0; 169 if (utt->multi) { 170 tt_index = utt->hcpriv; 171 ptt = &tt_index[udev->ttport - 1]; 172 173 /* How many entries are left in tt_index? */ 174 for (i = 0; i < utt->hub->maxchild; ++i) 175 cnt += !!tt_index[i]; 176 } else { 177 tt_index = NULL; 178 ptt = (struct ehci_tt **) &utt->hcpriv; 179 } 180 181 tt = *ptt; 182 if (!tt || !list_empty(&tt->ps_list)) 183 return; /* never allocated, or still in use */ 184 185 list_del(&tt->tt_list); 186 *ptt = NULL; 187 kfree(tt); 188 if (cnt == 1) { 189 utt->hcpriv = NULL; 190 kfree(tt_index); 191 } 192 } 193 194 static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type, 195 struct ehci_per_sched *ps) 196 { 197 dev_dbg(&ps->udev->dev, 198 "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n", 199 ps->ep->desc.bEndpointAddress, 200 (sign >= 0 ? "reserve" : "release"), type, 201 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod, 202 ps->phase, ps->phase_uf, ps->period, 203 ps->usecs, ps->c_usecs, ps->cs_mask); 204 } 205 206 static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci, 207 struct ehci_qh *qh, int sign) 208 { 209 unsigned start_uf; 210 unsigned i, j, m; 211 int usecs = qh->ps.usecs; 212 int c_usecs = qh->ps.c_usecs; 213 int tt_usecs = qh->ps.tt_usecs; 214 struct ehci_tt *tt; 215 216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ 217 return; 218 start_uf = qh->ps.bw_phase << 3; 219 220 bandwidth_dbg(ehci, sign, "intr", &qh->ps); 221 222 if (sign < 0) { /* Release bandwidth */ 223 usecs = -usecs; 224 c_usecs = -c_usecs; 225 tt_usecs = -tt_usecs; 226 } 227 228 /* Entire transaction (high speed) or start-split (full/low speed) */ 229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; 230 i += qh->ps.bw_uperiod) 231 ehci->bandwidth[i] += usecs; 232 233 /* Complete-split (full/low speed) */ 234 if (qh->ps.c_usecs) { 235 /* NOTE: adjustments needed for FSTN */ 236 for (i = start_uf; i < EHCI_BANDWIDTH_SIZE; 237 i += qh->ps.bw_uperiod) { 238 for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) { 239 if (qh->ps.cs_mask & m) 240 ehci->bandwidth[i+j] += c_usecs; 241 } 242 } 243 } 244 245 /* FS/LS bus bandwidth */ 246 if (tt_usecs) { 247 tt = find_tt(qh->ps.udev); 248 if (sign > 0) 249 list_add_tail(&qh->ps.ps_list, &tt->ps_list); 250 else 251 list_del(&qh->ps.ps_list); 252 253 for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES; 254 i += qh->ps.bw_period) 255 tt->bandwidth[i] += tt_usecs; 256 } 257 } 258 259 /*-------------------------------------------------------------------------*/ 260 261 static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE], 262 struct ehci_tt *tt) 263 { 264 struct ehci_per_sched *ps; 265 unsigned uframe, uf, x; 266 u8 *budget_line; 267 268 if (!tt) 269 return; 270 memset(budget_table, 0, EHCI_BANDWIDTH_SIZE); 271 272 /* Add up the contributions from all the endpoints using this TT */ 273 list_for_each_entry(ps, &tt->ps_list, ps_list) { 274 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE; 275 uframe += ps->bw_uperiod) { 276 budget_line = &budget_table[uframe]; 277 x = ps->tt_usecs; 278 279 /* propagate the time forward */ 280 for (uf = ps->phase_uf; uf < 8; ++uf) { 281 x += budget_line[uf]; 282 283 /* Each microframe lasts 125 us */ 284 if (x <= 125) { 285 budget_line[uf] = x; 286 break; 287 } 288 budget_line[uf] = 125; 289 x -= 125; 290 } 291 } 292 } 293 } 294 295 static int __maybe_unused same_tt(struct usb_device *dev1, 296 struct usb_device *dev2) 297 { 298 if (!dev1->tt || !dev2->tt) 299 return 0; 300 if (dev1->tt != dev2->tt) 301 return 0; 302 if (dev1->tt->multi) 303 return dev1->ttport == dev2->ttport; 304 else 305 return 1; 306 } 307 308 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 309 310 static const unsigned char 311 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 312 313 /* carryover low/fullspeed bandwidth that crosses uframe boundries */ 314 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 315 { 316 int i; 317 318 for (i = 0; i < 7; i++) { 319 if (max_tt_usecs[i] < tt_usecs[i]) { 320 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; 321 tt_usecs[i] = max_tt_usecs[i]; 322 } 323 } 324 } 325 326 /* 327 * Return true if the device's tt's downstream bus is available for a 328 * periodic transfer of the specified length (usecs), starting at the 329 * specified frame/uframe. Note that (as summarized in section 11.19 330 * of the usb 2.0 spec) TTs can buffer multiple transactions for each 331 * uframe. 332 * 333 * The uframe parameter is when the fullspeed/lowspeed transfer 334 * should be executed in "B-frame" terms, which is the same as the 335 * highspeed ssplit's uframe (which is in "H-frame" terms). For example 336 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. 337 * See the EHCI spec sec 4.5 and fig 4.7. 338 * 339 * This checks if the full/lowspeed bus, at the specified starting uframe, 340 * has the specified bandwidth available, according to rules listed 341 * in USB 2.0 spec section 11.18.1 fig 11-60. 342 * 343 * This does not check if the transfer would exceed the max ssplit 344 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, 345 * since proper scheduling limits ssplits to less than 16 per uframe. 346 */ 347 static int tt_available( 348 struct ehci_hcd *ehci, 349 struct ehci_per_sched *ps, 350 struct ehci_tt *tt, 351 unsigned frame, 352 unsigned uframe 353 ) 354 { 355 unsigned period = ps->bw_period; 356 unsigned usecs = ps->tt_usecs; 357 358 if ((period == 0) || (uframe >= 7)) /* error */ 359 return 0; 360 361 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES; 362 frame += period) { 363 unsigned i, uf; 364 unsigned short tt_usecs[8]; 365 366 if (tt->bandwidth[frame] + usecs > 900) 367 return 0; 368 369 uf = frame << 3; 370 for (i = 0; i < 8; (++i, ++uf)) 371 tt_usecs[i] = ehci->tt_budget[uf]; 372 373 if (max_tt_usecs[uframe] <= tt_usecs[uframe]) 374 return 0; 375 376 /* special case for isoc transfers larger than 125us: 377 * the first and each subsequent fully used uframe 378 * must be empty, so as to not illegally delay 379 * already scheduled transactions 380 */ 381 if (usecs > 125) { 382 int ufs = (usecs / 125); 383 384 for (i = uframe; i < (uframe + ufs) && i < 8; i++) 385 if (tt_usecs[i] > 0) 386 return 0; 387 } 388 389 tt_usecs[uframe] += usecs; 390 391 carryover_tt_bandwidth(tt_usecs); 392 393 /* fail if the carryover pushed bw past the last uframe's limit */ 394 if (max_tt_usecs[7] < tt_usecs[7]) 395 return 0; 396 } 397 398 return 1; 399 } 400 401 #else 402 403 /* return true iff the device's transaction translator is available 404 * for a periodic transfer starting at the specified frame, using 405 * all the uframes in the mask. 406 */ 407 static int tt_no_collision( 408 struct ehci_hcd *ehci, 409 unsigned period, 410 struct usb_device *dev, 411 unsigned frame, 412 u32 uf_mask 413 ) 414 { 415 if (period == 0) /* error */ 416 return 0; 417 418 /* note bandwidth wastage: split never follows csplit 419 * (different dev or endpoint) until the next uframe. 420 * calling convention doesn't make that distinction. 421 */ 422 for (; frame < ehci->periodic_size; frame += period) { 423 union ehci_shadow here; 424 __hc32 type; 425 struct ehci_qh_hw *hw; 426 427 here = ehci->pshadow[frame]; 428 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]); 429 while (here.ptr) { 430 switch (hc32_to_cpu(ehci, type)) { 431 case Q_TYPE_ITD: 432 type = Q_NEXT_TYPE(ehci, here.itd->hw_next); 433 here = here.itd->itd_next; 434 continue; 435 case Q_TYPE_QH: 436 hw = here.qh->hw; 437 if (same_tt(dev, here.qh->ps.udev)) { 438 u32 mask; 439 440 mask = hc32_to_cpu(ehci, 441 hw->hw_info2); 442 /* "knows" no gap is needed */ 443 mask |= mask >> 8; 444 if (mask & uf_mask) 445 break; 446 } 447 type = Q_NEXT_TYPE(ehci, hw->hw_next); 448 here = here.qh->qh_next; 449 continue; 450 case Q_TYPE_SITD: 451 if (same_tt(dev, here.sitd->urb->dev)) { 452 u16 mask; 453 454 mask = hc32_to_cpu(ehci, here.sitd 455 ->hw_uframe); 456 /* FIXME assumes no gap for IN! */ 457 mask |= mask >> 8; 458 if (mask & uf_mask) 459 break; 460 } 461 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); 462 here = here.sitd->sitd_next; 463 continue; 464 /* case Q_TYPE_FSTN: */ 465 default: 466 ehci_dbg(ehci, 467 "periodic frame %d bogus type %d\n", 468 frame, type); 469 } 470 471 /* collision or error */ 472 return 0; 473 } 474 } 475 476 /* no collision */ 477 return 1; 478 } 479 480 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ 481 482 /*-------------------------------------------------------------------------*/ 483 484 static void enable_periodic(struct ehci_hcd *ehci) 485 { 486 if (ehci->periodic_count++) 487 return; 488 489 /* Stop waiting to turn off the periodic schedule */ 490 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); 491 492 /* Don't start the schedule until PSS is 0 */ 493 ehci_poll_PSS(ehci); 494 turn_on_io_watchdog(ehci); 495 } 496 497 static void disable_periodic(struct ehci_hcd *ehci) 498 { 499 if (--ehci->periodic_count) 500 return; 501 502 /* Don't turn off the schedule until PSS is 1 */ 503 ehci_poll_PSS(ehci); 504 } 505 506 /*-------------------------------------------------------------------------*/ 507 508 /* periodic schedule slots have iso tds (normal or split) first, then a 509 * sparse tree for active interrupt transfers. 510 * 511 * this just links in a qh; caller guarantees uframe masks are set right. 512 * no FSTN support (yet; ehci 0.96+) 513 */ 514 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) 515 { 516 unsigned i; 517 unsigned period = qh->ps.period; 518 519 dev_dbg(&qh->ps.udev->dev, 520 "link qh%d-%04x/%p start %d [%d/%d us]\n", 521 period, hc32_to_cpup(ehci, &qh->hw->hw_info2) 522 & (QH_CMASK | QH_SMASK), 523 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); 524 525 /* high bandwidth, or otherwise every microframe */ 526 if (period == 0) 527 period = 1; 528 529 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) { 530 union ehci_shadow *prev = &ehci->pshadow[i]; 531 __hc32 *hw_p = &ehci->periodic[i]; 532 union ehci_shadow here = *prev; 533 __hc32 type = 0; 534 535 /* skip the iso nodes at list head */ 536 while (here.ptr) { 537 type = Q_NEXT_TYPE(ehci, *hw_p); 538 if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) 539 break; 540 prev = periodic_next_shadow(ehci, prev, type); 541 hw_p = shadow_next_periodic(ehci, &here, type); 542 here = *prev; 543 } 544 545 /* sorting each branch by period (slow-->fast) 546 * enables sharing interior tree nodes 547 */ 548 while (here.ptr && qh != here.qh) { 549 if (qh->ps.period > here.qh->ps.period) 550 break; 551 prev = &here.qh->qh_next; 552 hw_p = &here.qh->hw->hw_next; 553 here = *prev; 554 } 555 /* link in this qh, unless some earlier pass did that */ 556 if (qh != here.qh) { 557 qh->qh_next = here; 558 if (here.qh) 559 qh->hw->hw_next = *hw_p; 560 wmb(); 561 prev->qh = qh; 562 *hw_p = QH_NEXT(ehci, qh->qh_dma); 563 } 564 } 565 qh->qh_state = QH_STATE_LINKED; 566 qh->xacterrs = 0; 567 qh->unlink_reason = 0; 568 569 /* update per-qh bandwidth for debugfs */ 570 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period 571 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) 572 : (qh->ps.usecs * 8); 573 574 list_add(&qh->intr_node, &ehci->intr_qh_list); 575 576 /* maybe enable periodic schedule processing */ 577 ++ehci->intr_count; 578 enable_periodic(ehci); 579 } 580 581 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) 582 { 583 unsigned i; 584 unsigned period; 585 586 /* 587 * If qh is for a low/full-speed device, simply unlinking it 588 * could interfere with an ongoing split transaction. To unlink 589 * it safely would require setting the QH_INACTIVATE bit and 590 * waiting at least one frame, as described in EHCI 4.12.2.5. 591 * 592 * We won't bother with any of this. Instead, we assume that the 593 * only reason for unlinking an interrupt QH while the current URB 594 * is still active is to dequeue all the URBs (flush the whole 595 * endpoint queue). 596 * 597 * If rebalancing the periodic schedule is ever implemented, this 598 * approach will no longer be valid. 599 */ 600 601 /* high bandwidth, or otherwise part of every microframe */ 602 period = qh->ps.period ? : 1; 603 604 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) 605 periodic_unlink(ehci, i, qh); 606 607 /* update per-qh bandwidth for debugfs */ 608 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period 609 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) 610 : (qh->ps.usecs * 8); 611 612 dev_dbg(&qh->ps.udev->dev, 613 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 614 qh->ps.period, 615 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), 616 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); 617 618 /* qh->qh_next still "live" to HC */ 619 qh->qh_state = QH_STATE_UNLINK; 620 qh->qh_next.ptr = NULL; 621 622 if (ehci->qh_scan_next == qh) 623 ehci->qh_scan_next = list_entry(qh->intr_node.next, 624 struct ehci_qh, intr_node); 625 list_del(&qh->intr_node); 626 } 627 628 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 629 { 630 if (qh->qh_state != QH_STATE_LINKED || 631 list_empty(&qh->unlink_node)) 632 return; 633 634 list_del_init(&qh->unlink_node); 635 636 /* 637 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for 638 * avoiding unnecessary CPU wakeup 639 */ 640 } 641 642 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 643 { 644 /* If the QH isn't linked then there's nothing we can do. */ 645 if (qh->qh_state != QH_STATE_LINKED) 646 return; 647 648 /* if the qh is waiting for unlink, cancel it now */ 649 cancel_unlink_wait_intr(ehci, qh); 650 651 qh_unlink_periodic(ehci, qh); 652 653 /* Make sure the unlinks are visible before starting the timer */ 654 wmb(); 655 656 /* 657 * The EHCI spec doesn't say how long it takes the controller to 658 * stop accessing an unlinked interrupt QH. The timer delay is 659 * 9 uframes; presumably that will be long enough. 660 */ 661 qh->unlink_cycle = ehci->intr_unlink_cycle; 662 663 /* New entries go at the end of the intr_unlink list */ 664 list_add_tail(&qh->unlink_node, &ehci->intr_unlink); 665 666 if (ehci->intr_unlinking) 667 ; /* Avoid recursive calls */ 668 else if (ehci->rh_state < EHCI_RH_RUNNING) 669 ehci_handle_intr_unlinks(ehci); 670 else if (ehci->intr_unlink.next == &qh->unlink_node) { 671 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); 672 ++ehci->intr_unlink_cycle; 673 } 674 } 675 676 /* 677 * It is common only one intr URB is scheduled on one qh, and 678 * given complete() is run in tasklet context, introduce a bit 679 * delay to avoid unlink qh too early. 680 */ 681 static void start_unlink_intr_wait(struct ehci_hcd *ehci, 682 struct ehci_qh *qh) 683 { 684 qh->unlink_cycle = ehci->intr_unlink_wait_cycle; 685 686 /* New entries go at the end of the intr_unlink_wait list */ 687 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); 688 689 if (ehci->rh_state < EHCI_RH_RUNNING) 690 ehci_handle_start_intr_unlinks(ehci); 691 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { 692 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); 693 ++ehci->intr_unlink_wait_cycle; 694 } 695 } 696 697 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) 698 { 699 struct ehci_qh_hw *hw = qh->hw; 700 int rc; 701 702 qh->qh_state = QH_STATE_IDLE; 703 hw->hw_next = EHCI_LIST_END(ehci); 704 705 if (!list_empty(&qh->qtd_list)) 706 qh_completions(ehci, qh); 707 708 /* reschedule QH iff another request is queued */ 709 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { 710 rc = qh_schedule(ehci, qh); 711 if (rc == 0) { 712 qh_refresh(ehci, qh); 713 qh_link_periodic(ehci, qh); 714 } 715 716 /* An error here likely indicates handshake failure 717 * or no space left in the schedule. Neither fault 718 * should happen often ... 719 * 720 * FIXME kill the now-dysfunctional queued urbs 721 */ 722 else { 723 ehci_err(ehci, "can't reschedule qh %p, err %d\n", 724 qh, rc); 725 } 726 } 727 728 /* maybe turn off periodic schedule */ 729 --ehci->intr_count; 730 disable_periodic(ehci); 731 } 732 733 /*-------------------------------------------------------------------------*/ 734 735 static int check_period( 736 struct ehci_hcd *ehci, 737 unsigned frame, 738 unsigned uframe, 739 unsigned uperiod, 740 unsigned usecs 741 ) { 742 /* complete split running into next frame? 743 * given FSTN support, we could sometimes check... 744 */ 745 if (uframe >= 8) 746 return 0; 747 748 /* convert "usecs we need" to "max already claimed" */ 749 usecs = ehci->uframe_periodic_max - usecs; 750 751 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE; 752 uframe += uperiod) { 753 if (ehci->bandwidth[uframe] > usecs) 754 return 0; 755 } 756 757 /* success! */ 758 return 1; 759 } 760 761 static int check_intr_schedule( 762 struct ehci_hcd *ehci, 763 unsigned frame, 764 unsigned uframe, 765 struct ehci_qh *qh, 766 unsigned *c_maskp, 767 struct ehci_tt *tt 768 ) 769 { 770 int retval = -ENOSPC; 771 u8 mask = 0; 772 773 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */ 774 goto done; 775 776 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs)) 777 goto done; 778 if (!qh->ps.c_usecs) { 779 retval = 0; 780 *c_maskp = 0; 781 goto done; 782 } 783 784 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 785 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) { 786 unsigned i; 787 788 /* TODO : this may need FSTN for SSPLIT in uframe 5. */ 789 for (i = uframe+2; i < 8 && i <= uframe+4; i++) 790 if (!check_period(ehci, frame, i, 791 qh->ps.bw_uperiod, qh->ps.c_usecs)) 792 goto done; 793 else 794 mask |= 1 << i; 795 796 retval = 0; 797 798 *c_maskp = mask; 799 } 800 #else 801 /* Make sure this tt's buffer is also available for CSPLITs. 802 * We pessimize a bit; probably the typical full speed case 803 * doesn't need the second CSPLIT. 804 * 805 * NOTE: both SPLIT and CSPLIT could be checked in just 806 * one smart pass... 807 */ 808 mask = 0x03 << (uframe + qh->gap_uf); 809 *c_maskp = mask; 810 811 mask |= 1 << uframe; 812 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) { 813 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1, 814 qh->ps.bw_uperiod, qh->ps.c_usecs)) 815 goto done; 816 if (!check_period(ehci, frame, uframe + qh->gap_uf, 817 qh->ps.bw_uperiod, qh->ps.c_usecs)) 818 goto done; 819 retval = 0; 820 } 821 #endif 822 done: 823 return retval; 824 } 825 826 /* "first fit" scheduling policy used the first time through, 827 * or when the previous schedule slot can't be re-used. 828 */ 829 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) 830 { 831 int status = 0; 832 unsigned uframe; 833 unsigned c_mask; 834 struct ehci_qh_hw *hw = qh->hw; 835 struct ehci_tt *tt; 836 837 hw->hw_next = EHCI_LIST_END(ehci); 838 839 /* reuse the previous schedule slots, if we can */ 840 if (qh->ps.phase != NO_FRAME) { 841 ehci_dbg(ehci, "reused qh %p schedule\n", qh); 842 return 0; 843 } 844 845 uframe = 0; 846 c_mask = 0; 847 tt = find_tt(qh->ps.udev); 848 if (IS_ERR(tt)) { 849 status = PTR_ERR(tt); 850 goto done; 851 } 852 compute_tt_budget(ehci->tt_budget, tt); 853 854 /* else scan the schedule to find a group of slots such that all 855 * uframes have enough periodic bandwidth available. 856 */ 857 /* "normal" case, uframing flexible except with splits */ 858 if (qh->ps.bw_period) { 859 int i; 860 unsigned frame; 861 862 for (i = qh->ps.bw_period; i > 0; --i) { 863 frame = ++ehci->random_frame & (qh->ps.bw_period - 1); 864 for (uframe = 0; uframe < 8; uframe++) { 865 status = check_intr_schedule(ehci, 866 frame, uframe, qh, &c_mask, tt); 867 if (status == 0) 868 goto got_it; 869 } 870 } 871 872 /* qh->ps.bw_period == 0 means every uframe */ 873 } else { 874 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt); 875 } 876 if (status) 877 goto done; 878 879 got_it: 880 qh->ps.phase = (qh->ps.period ? ehci->random_frame & 881 (qh->ps.period - 1) : 0); 882 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1); 883 qh->ps.phase_uf = uframe; 884 qh->ps.cs_mask = qh->ps.period ? 885 (c_mask << 8) | (1 << uframe) : 886 QH_SMASK; 887 888 /* reset S-frame and (maybe) C-frame masks */ 889 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); 890 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask); 891 reserve_release_intr_bandwidth(ehci, qh, 1); 892 893 done: 894 return status; 895 } 896 897 static int intr_submit( 898 struct ehci_hcd *ehci, 899 struct urb *urb, 900 struct list_head *qtd_list, 901 gfp_t mem_flags 902 ) { 903 unsigned epnum; 904 unsigned long flags; 905 struct ehci_qh *qh; 906 int status; 907 struct list_head empty; 908 909 /* get endpoint and transfer/schedule data */ 910 epnum = urb->ep->desc.bEndpointAddress; 911 912 spin_lock_irqsave(&ehci->lock, flags); 913 914 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 915 status = -ESHUTDOWN; 916 goto done_not_linked; 917 } 918 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 919 if (unlikely(status)) 920 goto done_not_linked; 921 922 /* get qh and force any scheduling errors */ 923 INIT_LIST_HEAD(&empty); 924 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); 925 if (qh == NULL) { 926 status = -ENOMEM; 927 goto done; 928 } 929 if (qh->qh_state == QH_STATE_IDLE) { 930 status = qh_schedule(ehci, qh); 931 if (status) 932 goto done; 933 } 934 935 /* then queue the urb's tds to the qh */ 936 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 937 BUG_ON(qh == NULL); 938 939 /* stuff into the periodic schedule */ 940 if (qh->qh_state == QH_STATE_IDLE) { 941 qh_refresh(ehci, qh); 942 qh_link_periodic(ehci, qh); 943 } else { 944 /* cancel unlink wait for the qh */ 945 cancel_unlink_wait_intr(ehci, qh); 946 } 947 948 /* ... update usbfs periodic stats */ 949 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; 950 951 done: 952 if (unlikely(status)) 953 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 954 done_not_linked: 955 spin_unlock_irqrestore(&ehci->lock, flags); 956 if (status) 957 qtd_list_free(ehci, urb, qtd_list); 958 959 return status; 960 } 961 962 static void scan_intr(struct ehci_hcd *ehci) 963 { 964 struct ehci_qh *qh; 965 966 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, 967 intr_node) { 968 969 /* clean any finished work for this qh */ 970 if (!list_empty(&qh->qtd_list)) { 971 int temp; 972 973 /* 974 * Unlinks could happen here; completion reporting 975 * drops the lock. That's why ehci->qh_scan_next 976 * always holds the next qh to scan; if the next qh 977 * gets unlinked then ehci->qh_scan_next is adjusted 978 * in qh_unlink_periodic(). 979 */ 980 temp = qh_completions(ehci, qh); 981 if (unlikely(temp)) 982 start_unlink_intr(ehci, qh); 983 else if (unlikely(list_empty(&qh->qtd_list) && 984 qh->qh_state == QH_STATE_LINKED)) 985 start_unlink_intr_wait(ehci, qh); 986 } 987 } 988 } 989 990 /*-------------------------------------------------------------------------*/ 991 992 /* ehci_iso_stream ops work with both ITD and SITD */ 993 994 static struct ehci_iso_stream * 995 iso_stream_alloc(gfp_t mem_flags) 996 { 997 struct ehci_iso_stream *stream; 998 999 stream = kzalloc(sizeof(*stream), mem_flags); 1000 if (likely(stream != NULL)) { 1001 INIT_LIST_HEAD(&stream->td_list); 1002 INIT_LIST_HEAD(&stream->free_list); 1003 stream->next_uframe = NO_FRAME; 1004 stream->ps.phase = NO_FRAME; 1005 } 1006 return stream; 1007 } 1008 1009 static void 1010 iso_stream_init( 1011 struct ehci_hcd *ehci, 1012 struct ehci_iso_stream *stream, 1013 struct urb *urb 1014 ) 1015 { 1016 static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; 1017 1018 struct usb_device *dev = urb->dev; 1019 u32 buf1; 1020 unsigned epnum, maxp; 1021 int is_input; 1022 unsigned tmp; 1023 1024 /* 1025 * this might be a "high bandwidth" highspeed endpoint, 1026 * as encoded in the ep descriptor's wMaxPacket field 1027 */ 1028 epnum = usb_pipeendpoint(urb->pipe); 1029 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0; 1030 maxp = usb_endpoint_maxp(&urb->ep->desc); 1031 buf1 = is_input ? 1 << 11 : 0; 1032 1033 /* knows about ITD vs SITD */ 1034 if (dev->speed == USB_SPEED_HIGH) { 1035 unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc); 1036 1037 stream->highspeed = 1; 1038 1039 buf1 |= maxp; 1040 maxp *= multi; 1041 1042 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); 1043 stream->buf1 = cpu_to_hc32(ehci, buf1); 1044 stream->buf2 = cpu_to_hc32(ehci, multi); 1045 1046 /* usbfs wants to report the average usecs per frame tied up 1047 * when transfers on this endpoint are scheduled ... 1048 */ 1049 stream->ps.usecs = HS_USECS_ISO(maxp); 1050 1051 /* period for bandwidth allocation */ 1052 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, 1053 1 << (urb->ep->desc.bInterval - 1)); 1054 1055 /* Allow urb->interval to override */ 1056 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); 1057 1058 stream->uperiod = urb->interval; 1059 stream->ps.period = urb->interval >> 3; 1060 stream->bandwidth = stream->ps.usecs * 8 / 1061 stream->ps.bw_uperiod; 1062 1063 } else { 1064 u32 addr; 1065 int think_time; 1066 int hs_transfers; 1067 1068 addr = dev->ttport << 24; 1069 if (!ehci_is_TDI(ehci) 1070 || (dev->tt->hub != 1071 ehci_to_hcd(ehci)->self.root_hub)) 1072 addr |= dev->tt->hub->devnum << 16; 1073 addr |= epnum << 8; 1074 addr |= dev->devnum; 1075 stream->ps.usecs = HS_USECS_ISO(maxp); 1076 think_time = dev->tt->think_time; 1077 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time( 1078 dev->speed, is_input, 1, maxp)); 1079 hs_transfers = max(1u, (maxp + 187) / 188); 1080 if (is_input) { 1081 u32 tmp; 1082 1083 addr |= 1 << 31; 1084 stream->ps.c_usecs = stream->ps.usecs; 1085 stream->ps.usecs = HS_USECS_ISO(1); 1086 stream->ps.cs_mask = 1; 1087 1088 /* c-mask as specified in USB 2.0 11.18.4 3.c */ 1089 tmp = (1 << (hs_transfers + 2)) - 1; 1090 stream->ps.cs_mask |= tmp << (8 + 2); 1091 } else 1092 stream->ps.cs_mask = smask_out[hs_transfers - 1]; 1093 1094 /* period for bandwidth allocation */ 1095 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, 1096 1 << (urb->ep->desc.bInterval - 1)); 1097 1098 /* Allow urb->interval to override */ 1099 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval); 1100 stream->ps.bw_uperiod = stream->ps.bw_period << 3; 1101 1102 stream->ps.period = urb->interval; 1103 stream->uperiod = urb->interval << 3; 1104 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) / 1105 stream->ps.bw_period; 1106 1107 /* stream->splits gets created from cs_mask later */ 1108 stream->address = cpu_to_hc32(ehci, addr); 1109 } 1110 1111 stream->ps.udev = dev; 1112 stream->ps.ep = urb->ep; 1113 1114 stream->bEndpointAddress = is_input | epnum; 1115 stream->maxp = maxp; 1116 } 1117 1118 static struct ehci_iso_stream * 1119 iso_stream_find(struct ehci_hcd *ehci, struct urb *urb) 1120 { 1121 unsigned epnum; 1122 struct ehci_iso_stream *stream; 1123 struct usb_host_endpoint *ep; 1124 unsigned long flags; 1125 1126 epnum = usb_pipeendpoint (urb->pipe); 1127 if (usb_pipein(urb->pipe)) 1128 ep = urb->dev->ep_in[epnum]; 1129 else 1130 ep = urb->dev->ep_out[epnum]; 1131 1132 spin_lock_irqsave(&ehci->lock, flags); 1133 stream = ep->hcpriv; 1134 1135 if (unlikely(stream == NULL)) { 1136 stream = iso_stream_alloc(GFP_ATOMIC); 1137 if (likely(stream != NULL)) { 1138 ep->hcpriv = stream; 1139 iso_stream_init(ehci, stream, urb); 1140 } 1141 1142 /* if dev->ep [epnum] is a QH, hw is set */ 1143 } else if (unlikely(stream->hw != NULL)) { 1144 ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n", 1145 urb->dev->devpath, epnum, 1146 usb_pipein(urb->pipe) ? "in" : "out"); 1147 stream = NULL; 1148 } 1149 1150 spin_unlock_irqrestore(&ehci->lock, flags); 1151 return stream; 1152 } 1153 1154 /*-------------------------------------------------------------------------*/ 1155 1156 /* ehci_iso_sched ops can be ITD-only or SITD-only */ 1157 1158 static struct ehci_iso_sched * 1159 iso_sched_alloc(unsigned packets, gfp_t mem_flags) 1160 { 1161 struct ehci_iso_sched *iso_sched; 1162 int size = sizeof(*iso_sched); 1163 1164 size += packets * sizeof(struct ehci_iso_packet); 1165 iso_sched = kzalloc(size, mem_flags); 1166 if (likely(iso_sched != NULL)) 1167 INIT_LIST_HEAD(&iso_sched->td_list); 1168 1169 return iso_sched; 1170 } 1171 1172 static inline void 1173 itd_sched_init( 1174 struct ehci_hcd *ehci, 1175 struct ehci_iso_sched *iso_sched, 1176 struct ehci_iso_stream *stream, 1177 struct urb *urb 1178 ) 1179 { 1180 unsigned i; 1181 dma_addr_t dma = urb->transfer_dma; 1182 1183 /* how many uframes are needed for these transfers */ 1184 iso_sched->span = urb->number_of_packets * stream->uperiod; 1185 1186 /* figure out per-uframe itd fields that we'll need later 1187 * when we fit new itds into the schedule. 1188 */ 1189 for (i = 0; i < urb->number_of_packets; i++) { 1190 struct ehci_iso_packet *uframe = &iso_sched->packet[i]; 1191 unsigned length; 1192 dma_addr_t buf; 1193 u32 trans; 1194 1195 length = urb->iso_frame_desc[i].length; 1196 buf = dma + urb->iso_frame_desc[i].offset; 1197 1198 trans = EHCI_ISOC_ACTIVE; 1199 trans |= buf & 0x0fff; 1200 if (unlikely(((i + 1) == urb->number_of_packets)) 1201 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1202 trans |= EHCI_ITD_IOC; 1203 trans |= length << 16; 1204 uframe->transaction = cpu_to_hc32(ehci, trans); 1205 1206 /* might need to cross a buffer page within a uframe */ 1207 uframe->bufp = (buf & ~(u64)0x0fff); 1208 buf += length; 1209 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff)))) 1210 uframe->cross = 1; 1211 } 1212 } 1213 1214 static void 1215 iso_sched_free( 1216 struct ehci_iso_stream *stream, 1217 struct ehci_iso_sched *iso_sched 1218 ) 1219 { 1220 if (!iso_sched) 1221 return; 1222 /* caller must hold ehci->lock! */ 1223 list_splice(&iso_sched->td_list, &stream->free_list); 1224 kfree(iso_sched); 1225 } 1226 1227 static int 1228 itd_urb_transaction( 1229 struct ehci_iso_stream *stream, 1230 struct ehci_hcd *ehci, 1231 struct urb *urb, 1232 gfp_t mem_flags 1233 ) 1234 { 1235 struct ehci_itd *itd; 1236 dma_addr_t itd_dma; 1237 int i; 1238 unsigned num_itds; 1239 struct ehci_iso_sched *sched; 1240 unsigned long flags; 1241 1242 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); 1243 if (unlikely(sched == NULL)) 1244 return -ENOMEM; 1245 1246 itd_sched_init(ehci, sched, stream, urb); 1247 1248 if (urb->interval < 8) 1249 num_itds = 1 + (sched->span + 7) / 8; 1250 else 1251 num_itds = urb->number_of_packets; 1252 1253 /* allocate/init ITDs */ 1254 spin_lock_irqsave(&ehci->lock, flags); 1255 for (i = 0; i < num_itds; i++) { 1256 1257 /* 1258 * Use iTDs from the free list, but not iTDs that may 1259 * still be in use by the hardware. 1260 */ 1261 if (likely(!list_empty(&stream->free_list))) { 1262 itd = list_first_entry(&stream->free_list, 1263 struct ehci_itd, itd_list); 1264 if (itd->frame == ehci->now_frame) 1265 goto alloc_itd; 1266 list_del(&itd->itd_list); 1267 itd_dma = itd->itd_dma; 1268 } else { 1269 alloc_itd: 1270 spin_unlock_irqrestore(&ehci->lock, flags); 1271 itd = dma_pool_alloc(ehci->itd_pool, mem_flags, 1272 &itd_dma); 1273 spin_lock_irqsave(&ehci->lock, flags); 1274 if (!itd) { 1275 iso_sched_free(stream, sched); 1276 spin_unlock_irqrestore(&ehci->lock, flags); 1277 return -ENOMEM; 1278 } 1279 } 1280 1281 memset(itd, 0, sizeof(*itd)); 1282 itd->itd_dma = itd_dma; 1283 itd->frame = NO_FRAME; 1284 list_add(&itd->itd_list, &sched->td_list); 1285 } 1286 spin_unlock_irqrestore(&ehci->lock, flags); 1287 1288 /* temporarily store schedule info in hcpriv */ 1289 urb->hcpriv = sched; 1290 urb->error_count = 0; 1291 return 0; 1292 } 1293 1294 /*-------------------------------------------------------------------------*/ 1295 1296 static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci, 1297 struct ehci_iso_stream *stream, int sign) 1298 { 1299 unsigned uframe; 1300 unsigned i, j; 1301 unsigned s_mask, c_mask, m; 1302 int usecs = stream->ps.usecs; 1303 int c_usecs = stream->ps.c_usecs; 1304 int tt_usecs = stream->ps.tt_usecs; 1305 struct ehci_tt *tt; 1306 1307 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ 1308 return; 1309 uframe = stream->ps.bw_phase << 3; 1310 1311 bandwidth_dbg(ehci, sign, "iso", &stream->ps); 1312 1313 if (sign < 0) { /* Release bandwidth */ 1314 usecs = -usecs; 1315 c_usecs = -c_usecs; 1316 tt_usecs = -tt_usecs; 1317 } 1318 1319 if (!stream->splits) { /* High speed */ 1320 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; 1321 i += stream->ps.bw_uperiod) 1322 ehci->bandwidth[i] += usecs; 1323 1324 } else { /* Full speed */ 1325 s_mask = stream->ps.cs_mask; 1326 c_mask = s_mask >> 8; 1327 1328 /* NOTE: adjustment needed for frame overflow */ 1329 for (i = uframe; i < EHCI_BANDWIDTH_SIZE; 1330 i += stream->ps.bw_uperiod) { 1331 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8; 1332 (++j, m <<= 1)) { 1333 if (s_mask & m) 1334 ehci->bandwidth[i+j] += usecs; 1335 else if (c_mask & m) 1336 ehci->bandwidth[i+j] += c_usecs; 1337 } 1338 } 1339 1340 tt = find_tt(stream->ps.udev); 1341 if (sign > 0) 1342 list_add_tail(&stream->ps.ps_list, &tt->ps_list); 1343 else 1344 list_del(&stream->ps.ps_list); 1345 1346 for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES; 1347 i += stream->ps.bw_period) 1348 tt->bandwidth[i] += tt_usecs; 1349 } 1350 } 1351 1352 static inline int 1353 itd_slot_ok( 1354 struct ehci_hcd *ehci, 1355 struct ehci_iso_stream *stream, 1356 unsigned uframe 1357 ) 1358 { 1359 unsigned usecs; 1360 1361 /* convert "usecs we need" to "max already claimed" */ 1362 usecs = ehci->uframe_periodic_max - stream->ps.usecs; 1363 1364 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE; 1365 uframe += stream->ps.bw_uperiod) { 1366 if (ehci->bandwidth[uframe] > usecs) 1367 return 0; 1368 } 1369 return 1; 1370 } 1371 1372 static inline int 1373 sitd_slot_ok( 1374 struct ehci_hcd *ehci, 1375 struct ehci_iso_stream *stream, 1376 unsigned uframe, 1377 struct ehci_iso_sched *sched, 1378 struct ehci_tt *tt 1379 ) 1380 { 1381 unsigned mask, tmp; 1382 unsigned frame, uf; 1383 1384 mask = stream->ps.cs_mask << (uframe & 7); 1385 1386 /* for OUT, don't wrap SSPLIT into H-microframe 7 */ 1387 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7)) 1388 return 0; 1389 1390 /* for IN, don't wrap CSPLIT into the next frame */ 1391 if (mask & ~0xffff) 1392 return 0; 1393 1394 /* check bandwidth */ 1395 uframe &= stream->ps.bw_uperiod - 1; 1396 frame = uframe >> 3; 1397 1398 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 1399 /* The tt's fullspeed bus bandwidth must be available. 1400 * tt_available scheduling guarantees 10+% for control/bulk. 1401 */ 1402 uf = uframe & 7; 1403 if (!tt_available(ehci, &stream->ps, tt, frame, uf)) 1404 return 0; 1405 #else 1406 /* tt must be idle for start(s), any gap, and csplit. 1407 * assume scheduling slop leaves 10+% for control/bulk. 1408 */ 1409 if (!tt_no_collision(ehci, stream->ps.bw_period, 1410 stream->ps.udev, frame, mask)) 1411 return 0; 1412 #endif 1413 1414 do { 1415 unsigned max_used; 1416 unsigned i; 1417 1418 /* check starts (OUT uses more than one) */ 1419 uf = uframe; 1420 max_used = ehci->uframe_periodic_max - stream->ps.usecs; 1421 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) { 1422 if (ehci->bandwidth[uf] > max_used) 1423 return 0; 1424 } 1425 1426 /* for IN, check CSPLIT */ 1427 if (stream->ps.c_usecs) { 1428 max_used = ehci->uframe_periodic_max - 1429 stream->ps.c_usecs; 1430 uf = uframe & ~7; 1431 tmp = 1 << (2+8); 1432 for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) { 1433 if ((stream->ps.cs_mask & tmp) == 0) 1434 continue; 1435 if (ehci->bandwidth[uf+i] > max_used) 1436 return 0; 1437 } 1438 } 1439 1440 uframe += stream->ps.bw_uperiod; 1441 } while (uframe < EHCI_BANDWIDTH_SIZE); 1442 1443 stream->ps.cs_mask <<= uframe & 7; 1444 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask); 1445 return 1; 1446 } 1447 1448 /* 1449 * This scheduler plans almost as far into the future as it has actual 1450 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to 1451 * "as small as possible" to be cache-friendlier.) That limits the size 1452 * transfers you can stream reliably; avoid more than 64 msec per urb. 1453 * Also avoid queue depths of less than ehci's worst irq latency (affected 1454 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, 1455 * and other factors); or more than about 230 msec total (for portability, 1456 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1457 */ 1458 1459 static int 1460 iso_stream_schedule( 1461 struct ehci_hcd *ehci, 1462 struct urb *urb, 1463 struct ehci_iso_stream *stream 1464 ) 1465 { 1466 u32 now, base, next, start, period, span, now2; 1467 u32 wrap = 0, skip = 0; 1468 int status = 0; 1469 unsigned mod = ehci->periodic_size << 3; 1470 struct ehci_iso_sched *sched = urb->hcpriv; 1471 bool empty = list_empty(&stream->td_list); 1472 bool new_stream = false; 1473 1474 period = stream->uperiod; 1475 span = sched->span; 1476 if (!stream->highspeed) 1477 span <<= 3; 1478 1479 /* Start a new isochronous stream? */ 1480 if (unlikely(empty && !hcd_periodic_completion_in_progress( 1481 ehci_to_hcd(ehci), urb->ep))) { 1482 1483 /* Schedule the endpoint */ 1484 if (stream->ps.phase == NO_FRAME) { 1485 int done = 0; 1486 struct ehci_tt *tt = find_tt(stream->ps.udev); 1487 1488 if (IS_ERR(tt)) { 1489 status = PTR_ERR(tt); 1490 goto fail; 1491 } 1492 compute_tt_budget(ehci->tt_budget, tt); 1493 1494 start = ((-(++ehci->random_frame)) << 3) & (period - 1); 1495 1496 /* find a uframe slot with enough bandwidth. 1497 * Early uframes are more precious because full-speed 1498 * iso IN transfers can't use late uframes, 1499 * and therefore they should be allocated last. 1500 */ 1501 next = start; 1502 start += period; 1503 do { 1504 start--; 1505 /* check schedule: enough space? */ 1506 if (stream->highspeed) { 1507 if (itd_slot_ok(ehci, stream, start)) 1508 done = 1; 1509 } else { 1510 if ((start % 8) >= 6) 1511 continue; 1512 if (sitd_slot_ok(ehci, stream, start, 1513 sched, tt)) 1514 done = 1; 1515 } 1516 } while (start > next && !done); 1517 1518 /* no room in the schedule */ 1519 if (!done) { 1520 ehci_dbg(ehci, "iso sched full %p", urb); 1521 status = -ENOSPC; 1522 goto fail; 1523 } 1524 stream->ps.phase = (start >> 3) & 1525 (stream->ps.period - 1); 1526 stream->ps.bw_phase = stream->ps.phase & 1527 (stream->ps.bw_period - 1); 1528 stream->ps.phase_uf = start & 7; 1529 reserve_release_iso_bandwidth(ehci, stream, 1); 1530 } 1531 1532 /* New stream is already scheduled; use the upcoming slot */ 1533 else { 1534 start = (stream->ps.phase << 3) + stream->ps.phase_uf; 1535 } 1536 1537 stream->next_uframe = start; 1538 new_stream = true; 1539 } 1540 1541 now = ehci_read_frame_index(ehci) & (mod - 1); 1542 1543 /* Take the isochronous scheduling threshold into account */ 1544 if (ehci->i_thresh) 1545 next = now + ehci->i_thresh; /* uframe cache */ 1546 else 1547 next = (now + 2 + 7) & ~0x07; /* full frame cache */ 1548 1549 /* If needed, initialize last_iso_frame so that this URB will be seen */ 1550 if (ehci->isoc_count == 0) 1551 ehci->last_iso_frame = now >> 3; 1552 1553 /* 1554 * Use ehci->last_iso_frame as the base. There can't be any 1555 * TDs scheduled for earlier than that. 1556 */ 1557 base = ehci->last_iso_frame << 3; 1558 next = (next - base) & (mod - 1); 1559 start = (stream->next_uframe - base) & (mod - 1); 1560 1561 if (unlikely(new_stream)) 1562 goto do_ASAP; 1563 1564 /* 1565 * Typical case: reuse current schedule, stream may still be active. 1566 * Hopefully there are no gaps from the host falling behind 1567 * (irq delays etc). If there are, the behavior depends on 1568 * whether URB_ISO_ASAP is set. 1569 */ 1570 now2 = (now - base) & (mod - 1); 1571 1572 /* Is the schedule about to wrap around? */ 1573 if (unlikely(!empty && start < period)) { 1574 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", 1575 urb, stream->next_uframe, base, period, mod); 1576 status = -EFBIG; 1577 goto fail; 1578 } 1579 1580 /* Is the next packet scheduled after the base time? */ 1581 if (likely(!empty || start <= now2 + period)) { 1582 1583 /* URB_ISO_ASAP: make sure that start >= next */ 1584 if (unlikely(start < next && 1585 (urb->transfer_flags & URB_ISO_ASAP))) 1586 goto do_ASAP; 1587 1588 /* Otherwise use start, if it's not in the past */ 1589 if (likely(start >= now2)) 1590 goto use_start; 1591 1592 /* Otherwise we got an underrun while the queue was empty */ 1593 } else { 1594 if (urb->transfer_flags & URB_ISO_ASAP) 1595 goto do_ASAP; 1596 wrap = mod; 1597 now2 += mod; 1598 } 1599 1600 /* How many uframes and packets do we need to skip? */ 1601 skip = (now2 - start + period - 1) & -period; 1602 if (skip >= span) { /* Entirely in the past? */ 1603 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n", 1604 urb, start + base, span - period, now2 + base, 1605 base); 1606 1607 /* Try to keep the last TD intact for scanning later */ 1608 skip = span - period; 1609 1610 /* Will it come before the current scan position? */ 1611 if (empty) { 1612 skip = span; /* Skip the entire URB */ 1613 status = 1; /* and give it back immediately */ 1614 iso_sched_free(stream, sched); 1615 sched = NULL; 1616 } 1617 } 1618 urb->error_count = skip / period; 1619 if (sched) 1620 sched->first_packet = urb->error_count; 1621 goto use_start; 1622 1623 do_ASAP: 1624 /* Use the first slot after "next" */ 1625 start = next + ((start - next) & (period - 1)); 1626 1627 use_start: 1628 /* Tried to schedule too far into the future? */ 1629 if (unlikely(start + span - period >= mod + wrap)) { 1630 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n", 1631 urb, start, span - period, mod + wrap); 1632 status = -EFBIG; 1633 goto fail; 1634 } 1635 1636 start += base; 1637 stream->next_uframe = (start + skip) & (mod - 1); 1638 1639 /* report high speed start in uframes; full speed, in frames */ 1640 urb->start_frame = start & (mod - 1); 1641 if (!stream->highspeed) 1642 urb->start_frame >>= 3; 1643 return status; 1644 1645 fail: 1646 iso_sched_free(stream, sched); 1647 urb->hcpriv = NULL; 1648 return status; 1649 } 1650 1651 /*-------------------------------------------------------------------------*/ 1652 1653 static inline void 1654 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream, 1655 struct ehci_itd *itd) 1656 { 1657 int i; 1658 1659 /* it's been recently zeroed */ 1660 itd->hw_next = EHCI_LIST_END(ehci); 1661 itd->hw_bufp[0] = stream->buf0; 1662 itd->hw_bufp[1] = stream->buf1; 1663 itd->hw_bufp[2] = stream->buf2; 1664 1665 for (i = 0; i < 8; i++) 1666 itd->index[i] = -1; 1667 1668 /* All other fields are filled when scheduling */ 1669 } 1670 1671 static inline void 1672 itd_patch( 1673 struct ehci_hcd *ehci, 1674 struct ehci_itd *itd, 1675 struct ehci_iso_sched *iso_sched, 1676 unsigned index, 1677 u16 uframe 1678 ) 1679 { 1680 struct ehci_iso_packet *uf = &iso_sched->packet[index]; 1681 unsigned pg = itd->pg; 1682 1683 /* BUG_ON(pg == 6 && uf->cross); */ 1684 1685 uframe &= 0x07; 1686 itd->index[uframe] = index; 1687 1688 itd->hw_transaction[uframe] = uf->transaction; 1689 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); 1690 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); 1691 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); 1692 1693 /* iso_frame_desc[].offset must be strictly increasing */ 1694 if (unlikely(uf->cross)) { 1695 u64 bufp = uf->bufp + 4096; 1696 1697 itd->pg = ++pg; 1698 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); 1699 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); 1700 } 1701 } 1702 1703 static inline void 1704 itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1705 { 1706 union ehci_shadow *prev = &ehci->pshadow[frame]; 1707 __hc32 *hw_p = &ehci->periodic[frame]; 1708 union ehci_shadow here = *prev; 1709 __hc32 type = 0; 1710 1711 /* skip any iso nodes which might belong to previous microframes */ 1712 while (here.ptr) { 1713 type = Q_NEXT_TYPE(ehci, *hw_p); 1714 if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) 1715 break; 1716 prev = periodic_next_shadow(ehci, prev, type); 1717 hw_p = shadow_next_periodic(ehci, &here, type); 1718 here = *prev; 1719 } 1720 1721 itd->itd_next = here; 1722 itd->hw_next = *hw_p; 1723 prev->itd = itd; 1724 itd->frame = frame; 1725 wmb(); 1726 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); 1727 } 1728 1729 /* fit urb's itds into the selected schedule slot; activate as needed */ 1730 static void itd_link_urb( 1731 struct ehci_hcd *ehci, 1732 struct urb *urb, 1733 unsigned mod, 1734 struct ehci_iso_stream *stream 1735 ) 1736 { 1737 int packet; 1738 unsigned next_uframe, uframe, frame; 1739 struct ehci_iso_sched *iso_sched = urb->hcpriv; 1740 struct ehci_itd *itd; 1741 1742 next_uframe = stream->next_uframe & (mod - 1); 1743 1744 if (unlikely(list_empty(&stream->td_list))) 1745 ehci_to_hcd(ehci)->self.bandwidth_allocated 1746 += stream->bandwidth; 1747 1748 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 1749 if (ehci->amd_pll_fix == 1) 1750 usb_amd_quirk_pll_disable(); 1751 } 1752 1753 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1754 1755 /* fill iTDs uframe by uframe */ 1756 for (packet = iso_sched->first_packet, itd = NULL; 1757 packet < urb->number_of_packets;) { 1758 if (itd == NULL) { 1759 /* ASSERT: we have all necessary itds */ 1760 /* BUG_ON(list_empty(&iso_sched->td_list)); */ 1761 1762 /* ASSERT: no itds for this endpoint in this uframe */ 1763 1764 itd = list_entry(iso_sched->td_list.next, 1765 struct ehci_itd, itd_list); 1766 list_move_tail(&itd->itd_list, &stream->td_list); 1767 itd->stream = stream; 1768 itd->urb = urb; 1769 itd_init(ehci, stream, itd); 1770 } 1771 1772 uframe = next_uframe & 0x07; 1773 frame = next_uframe >> 3; 1774 1775 itd_patch(ehci, itd, iso_sched, packet, uframe); 1776 1777 next_uframe += stream->uperiod; 1778 next_uframe &= mod - 1; 1779 packet++; 1780 1781 /* link completed itds into the schedule */ 1782 if (((next_uframe >> 3) != frame) 1783 || packet == urb->number_of_packets) { 1784 itd_link(ehci, frame & (ehci->periodic_size - 1), itd); 1785 itd = NULL; 1786 } 1787 } 1788 stream->next_uframe = next_uframe; 1789 1790 /* don't need that schedule data any more */ 1791 iso_sched_free(stream, iso_sched); 1792 urb->hcpriv = stream; 1793 1794 ++ehci->isoc_count; 1795 enable_periodic(ehci); 1796 } 1797 1798 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) 1799 1800 /* Process and recycle a completed ITD. Return true iff its urb completed, 1801 * and hence its completion callback probably added things to the hardware 1802 * schedule. 1803 * 1804 * Note that we carefully avoid recycling this descriptor until after any 1805 * completion callback runs, so that it won't be reused quickly. That is, 1806 * assuming (a) no more than two urbs per frame on this endpoint, and also 1807 * (b) only this endpoint's completions submit URBs. It seems some silicon 1808 * corrupts things if you reuse completed descriptors very quickly... 1809 */ 1810 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd) 1811 { 1812 struct urb *urb = itd->urb; 1813 struct usb_iso_packet_descriptor *desc; 1814 u32 t; 1815 unsigned uframe; 1816 int urb_index = -1; 1817 struct ehci_iso_stream *stream = itd->stream; 1818 bool retval = false; 1819 1820 /* for each uframe with a packet */ 1821 for (uframe = 0; uframe < 8; uframe++) { 1822 if (likely(itd->index[uframe] == -1)) 1823 continue; 1824 urb_index = itd->index[uframe]; 1825 desc = &urb->iso_frame_desc[urb_index]; 1826 1827 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]); 1828 itd->hw_transaction[uframe] = 0; 1829 1830 /* report transfer status */ 1831 if (unlikely(t & ISO_ERRS)) { 1832 urb->error_count++; 1833 if (t & EHCI_ISOC_BUF_ERR) 1834 desc->status = usb_pipein(urb->pipe) 1835 ? -ENOSR /* hc couldn't read */ 1836 : -ECOMM; /* hc couldn't write */ 1837 else if (t & EHCI_ISOC_BABBLE) 1838 desc->status = -EOVERFLOW; 1839 else /* (t & EHCI_ISOC_XACTERR) */ 1840 desc->status = -EPROTO; 1841 1842 /* HC need not update length with this error */ 1843 if (!(t & EHCI_ISOC_BABBLE)) { 1844 desc->actual_length = EHCI_ITD_LENGTH(t); 1845 urb->actual_length += desc->actual_length; 1846 } 1847 } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) { 1848 desc->status = 0; 1849 desc->actual_length = EHCI_ITD_LENGTH(t); 1850 urb->actual_length += desc->actual_length; 1851 } else { 1852 /* URB was too late */ 1853 urb->error_count++; 1854 } 1855 } 1856 1857 /* handle completion now? */ 1858 if (likely((urb_index + 1) != urb->number_of_packets)) 1859 goto done; 1860 1861 /* 1862 * ASSERT: it's really the last itd for this urb 1863 * list_for_each_entry (itd, &stream->td_list, itd_list) 1864 * BUG_ON(itd->urb == urb); 1865 */ 1866 1867 /* give urb back to the driver; completion often (re)submits */ 1868 ehci_urb_done(ehci, urb, 0); 1869 retval = true; 1870 urb = NULL; 1871 1872 --ehci->isoc_count; 1873 disable_periodic(ehci); 1874 1875 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1876 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 1877 if (ehci->amd_pll_fix == 1) 1878 usb_amd_quirk_pll_enable(); 1879 } 1880 1881 if (unlikely(list_is_singular(&stream->td_list))) 1882 ehci_to_hcd(ehci)->self.bandwidth_allocated 1883 -= stream->bandwidth; 1884 1885 done: 1886 itd->urb = NULL; 1887 1888 /* Add to the end of the free list for later reuse */ 1889 list_move_tail(&itd->itd_list, &stream->free_list); 1890 1891 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ 1892 if (list_empty(&stream->td_list)) { 1893 list_splice_tail_init(&stream->free_list, 1894 &ehci->cached_itd_list); 1895 start_free_itds(ehci); 1896 } 1897 1898 return retval; 1899 } 1900 1901 /*-------------------------------------------------------------------------*/ 1902 1903 static int itd_submit(struct ehci_hcd *ehci, struct urb *urb, 1904 gfp_t mem_flags) 1905 { 1906 int status = -EINVAL; 1907 unsigned long flags; 1908 struct ehci_iso_stream *stream; 1909 1910 /* Get iso_stream head */ 1911 stream = iso_stream_find(ehci, urb); 1912 if (unlikely(stream == NULL)) { 1913 ehci_dbg(ehci, "can't get iso stream\n"); 1914 return -ENOMEM; 1915 } 1916 if (unlikely(urb->interval != stream->uperiod)) { 1917 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", 1918 stream->uperiod, urb->interval); 1919 goto done; 1920 } 1921 1922 #ifdef EHCI_URB_TRACE 1923 ehci_dbg(ehci, 1924 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", 1925 __func__, urb->dev->devpath, urb, 1926 usb_pipeendpoint(urb->pipe), 1927 usb_pipein(urb->pipe) ? "in" : "out", 1928 urb->transfer_buffer_length, 1929 urb->number_of_packets, urb->interval, 1930 stream); 1931 #endif 1932 1933 /* allocate ITDs w/o locking anything */ 1934 status = itd_urb_transaction(stream, ehci, urb, mem_flags); 1935 if (unlikely(status < 0)) { 1936 ehci_dbg(ehci, "can't init itds\n"); 1937 goto done; 1938 } 1939 1940 /* schedule ... need to lock */ 1941 spin_lock_irqsave(&ehci->lock, flags); 1942 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 1943 status = -ESHUTDOWN; 1944 goto done_not_linked; 1945 } 1946 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1947 if (unlikely(status)) 1948 goto done_not_linked; 1949 status = iso_stream_schedule(ehci, urb, stream); 1950 if (likely(status == 0)) { 1951 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); 1952 } else if (status > 0) { 1953 status = 0; 1954 ehci_urb_done(ehci, urb, 0); 1955 } else { 1956 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1957 } 1958 done_not_linked: 1959 spin_unlock_irqrestore(&ehci->lock, flags); 1960 done: 1961 return status; 1962 } 1963 1964 /*-------------------------------------------------------------------------*/ 1965 1966 /* 1967 * "Split ISO TDs" ... used for USB 1.1 devices going through the 1968 * TTs in USB 2.0 hubs. These need microframe scheduling. 1969 */ 1970 1971 static inline void 1972 sitd_sched_init( 1973 struct ehci_hcd *ehci, 1974 struct ehci_iso_sched *iso_sched, 1975 struct ehci_iso_stream *stream, 1976 struct urb *urb 1977 ) 1978 { 1979 unsigned i; 1980 dma_addr_t dma = urb->transfer_dma; 1981 1982 /* how many frames are needed for these transfers */ 1983 iso_sched->span = urb->number_of_packets * stream->ps.period; 1984 1985 /* figure out per-frame sitd fields that we'll need later 1986 * when we fit new sitds into the schedule. 1987 */ 1988 for (i = 0; i < urb->number_of_packets; i++) { 1989 struct ehci_iso_packet *packet = &iso_sched->packet[i]; 1990 unsigned length; 1991 dma_addr_t buf; 1992 u32 trans; 1993 1994 length = urb->iso_frame_desc[i].length & 0x03ff; 1995 buf = dma + urb->iso_frame_desc[i].offset; 1996 1997 trans = SITD_STS_ACTIVE; 1998 if (((i + 1) == urb->number_of_packets) 1999 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 2000 trans |= SITD_IOC; 2001 trans |= length << 16; 2002 packet->transaction = cpu_to_hc32(ehci, trans); 2003 2004 /* might need to cross a buffer page within a td */ 2005 packet->bufp = buf; 2006 packet->buf1 = (buf + length) & ~0x0fff; 2007 if (packet->buf1 != (buf & ~(u64)0x0fff)) 2008 packet->cross = 1; 2009 2010 /* OUT uses multiple start-splits */ 2011 if (stream->bEndpointAddress & USB_DIR_IN) 2012 continue; 2013 length = (length + 187) / 188; 2014 if (length > 1) /* BEGIN vs ALL */ 2015 length |= 1 << 3; 2016 packet->buf1 |= length; 2017 } 2018 } 2019 2020 static int 2021 sitd_urb_transaction( 2022 struct ehci_iso_stream *stream, 2023 struct ehci_hcd *ehci, 2024 struct urb *urb, 2025 gfp_t mem_flags 2026 ) 2027 { 2028 struct ehci_sitd *sitd; 2029 dma_addr_t sitd_dma; 2030 int i; 2031 struct ehci_iso_sched *iso_sched; 2032 unsigned long flags; 2033 2034 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags); 2035 if (iso_sched == NULL) 2036 return -ENOMEM; 2037 2038 sitd_sched_init(ehci, iso_sched, stream, urb); 2039 2040 /* allocate/init sITDs */ 2041 spin_lock_irqsave(&ehci->lock, flags); 2042 for (i = 0; i < urb->number_of_packets; i++) { 2043 2044 /* NOTE: for now, we don't try to handle wraparound cases 2045 * for IN (using sitd->hw_backpointer, like a FSTN), which 2046 * means we never need two sitds for full speed packets. 2047 */ 2048 2049 /* 2050 * Use siTDs from the free list, but not siTDs that may 2051 * still be in use by the hardware. 2052 */ 2053 if (likely(!list_empty(&stream->free_list))) { 2054 sitd = list_first_entry(&stream->free_list, 2055 struct ehci_sitd, sitd_list); 2056 if (sitd->frame == ehci->now_frame) 2057 goto alloc_sitd; 2058 list_del(&sitd->sitd_list); 2059 sitd_dma = sitd->sitd_dma; 2060 } else { 2061 alloc_sitd: 2062 spin_unlock_irqrestore(&ehci->lock, flags); 2063 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags, 2064 &sitd_dma); 2065 spin_lock_irqsave(&ehci->lock, flags); 2066 if (!sitd) { 2067 iso_sched_free(stream, iso_sched); 2068 spin_unlock_irqrestore(&ehci->lock, flags); 2069 return -ENOMEM; 2070 } 2071 } 2072 2073 memset(sitd, 0, sizeof(*sitd)); 2074 sitd->sitd_dma = sitd_dma; 2075 sitd->frame = NO_FRAME; 2076 list_add(&sitd->sitd_list, &iso_sched->td_list); 2077 } 2078 2079 /* temporarily store schedule info in hcpriv */ 2080 urb->hcpriv = iso_sched; 2081 urb->error_count = 0; 2082 2083 spin_unlock_irqrestore(&ehci->lock, flags); 2084 return 0; 2085 } 2086 2087 /*-------------------------------------------------------------------------*/ 2088 2089 static inline void 2090 sitd_patch( 2091 struct ehci_hcd *ehci, 2092 struct ehci_iso_stream *stream, 2093 struct ehci_sitd *sitd, 2094 struct ehci_iso_sched *iso_sched, 2095 unsigned index 2096 ) 2097 { 2098 struct ehci_iso_packet *uf = &iso_sched->packet[index]; 2099 u64 bufp; 2100 2101 sitd->hw_next = EHCI_LIST_END(ehci); 2102 sitd->hw_fullspeed_ep = stream->address; 2103 sitd->hw_uframe = stream->splits; 2104 sitd->hw_results = uf->transaction; 2105 sitd->hw_backpointer = EHCI_LIST_END(ehci); 2106 2107 bufp = uf->bufp; 2108 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); 2109 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); 2110 2111 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); 2112 if (uf->cross) 2113 bufp += 4096; 2114 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); 2115 sitd->index = index; 2116 } 2117 2118 static inline void 2119 sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) 2120 { 2121 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ 2122 sitd->sitd_next = ehci->pshadow[frame]; 2123 sitd->hw_next = ehci->periodic[frame]; 2124 ehci->pshadow[frame].sitd = sitd; 2125 sitd->frame = frame; 2126 wmb(); 2127 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); 2128 } 2129 2130 /* fit urb's sitds into the selected schedule slot; activate as needed */ 2131 static void sitd_link_urb( 2132 struct ehci_hcd *ehci, 2133 struct urb *urb, 2134 unsigned mod, 2135 struct ehci_iso_stream *stream 2136 ) 2137 { 2138 int packet; 2139 unsigned next_uframe; 2140 struct ehci_iso_sched *sched = urb->hcpriv; 2141 struct ehci_sitd *sitd; 2142 2143 next_uframe = stream->next_uframe; 2144 2145 if (list_empty(&stream->td_list)) 2146 /* usbfs ignores TT bandwidth */ 2147 ehci_to_hcd(ehci)->self.bandwidth_allocated 2148 += stream->bandwidth; 2149 2150 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 2151 if (ehci->amd_pll_fix == 1) 2152 usb_amd_quirk_pll_disable(); 2153 } 2154 2155 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 2156 2157 /* fill sITDs frame by frame */ 2158 for (packet = sched->first_packet, sitd = NULL; 2159 packet < urb->number_of_packets; 2160 packet++) { 2161 2162 /* ASSERT: we have all necessary sitds */ 2163 BUG_ON(list_empty(&sched->td_list)); 2164 2165 /* ASSERT: no itds for this endpoint in this frame */ 2166 2167 sitd = list_entry(sched->td_list.next, 2168 struct ehci_sitd, sitd_list); 2169 list_move_tail(&sitd->sitd_list, &stream->td_list); 2170 sitd->stream = stream; 2171 sitd->urb = urb; 2172 2173 sitd_patch(ehci, stream, sitd, sched, packet); 2174 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), 2175 sitd); 2176 2177 next_uframe += stream->uperiod; 2178 } 2179 stream->next_uframe = next_uframe & (mod - 1); 2180 2181 /* don't need that schedule data any more */ 2182 iso_sched_free(stream, sched); 2183 urb->hcpriv = stream; 2184 2185 ++ehci->isoc_count; 2186 enable_periodic(ehci); 2187 } 2188 2189 /*-------------------------------------------------------------------------*/ 2190 2191 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ 2192 | SITD_STS_XACT | SITD_STS_MMF) 2193 2194 /* Process and recycle a completed SITD. Return true iff its urb completed, 2195 * and hence its completion callback probably added things to the hardware 2196 * schedule. 2197 * 2198 * Note that we carefully avoid recycling this descriptor until after any 2199 * completion callback runs, so that it won't be reused quickly. That is, 2200 * assuming (a) no more than two urbs per frame on this endpoint, and also 2201 * (b) only this endpoint's completions submit URBs. It seems some silicon 2202 * corrupts things if you reuse completed descriptors very quickly... 2203 */ 2204 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd) 2205 { 2206 struct urb *urb = sitd->urb; 2207 struct usb_iso_packet_descriptor *desc; 2208 u32 t; 2209 int urb_index; 2210 struct ehci_iso_stream *stream = sitd->stream; 2211 bool retval = false; 2212 2213 urb_index = sitd->index; 2214 desc = &urb->iso_frame_desc[urb_index]; 2215 t = hc32_to_cpup(ehci, &sitd->hw_results); 2216 2217 /* report transfer status */ 2218 if (unlikely(t & SITD_ERRS)) { 2219 urb->error_count++; 2220 if (t & SITD_STS_DBE) 2221 desc->status = usb_pipein(urb->pipe) 2222 ? -ENOSR /* hc couldn't read */ 2223 : -ECOMM; /* hc couldn't write */ 2224 else if (t & SITD_STS_BABBLE) 2225 desc->status = -EOVERFLOW; 2226 else /* XACT, MMF, etc */ 2227 desc->status = -EPROTO; 2228 } else if (unlikely(t & SITD_STS_ACTIVE)) { 2229 /* URB was too late */ 2230 urb->error_count++; 2231 } else { 2232 desc->status = 0; 2233 desc->actual_length = desc->length - SITD_LENGTH(t); 2234 urb->actual_length += desc->actual_length; 2235 } 2236 2237 /* handle completion now? */ 2238 if ((urb_index + 1) != urb->number_of_packets) 2239 goto done; 2240 2241 /* 2242 * ASSERT: it's really the last sitd for this urb 2243 * list_for_each_entry (sitd, &stream->td_list, sitd_list) 2244 * BUG_ON(sitd->urb == urb); 2245 */ 2246 2247 /* give urb back to the driver; completion often (re)submits */ 2248 ehci_urb_done(ehci, urb, 0); 2249 retval = true; 2250 urb = NULL; 2251 2252 --ehci->isoc_count; 2253 disable_periodic(ehci); 2254 2255 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 2256 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { 2257 if (ehci->amd_pll_fix == 1) 2258 usb_amd_quirk_pll_enable(); 2259 } 2260 2261 if (list_is_singular(&stream->td_list)) 2262 ehci_to_hcd(ehci)->self.bandwidth_allocated 2263 -= stream->bandwidth; 2264 2265 done: 2266 sitd->urb = NULL; 2267 2268 /* Add to the end of the free list for later reuse */ 2269 list_move_tail(&sitd->sitd_list, &stream->free_list); 2270 2271 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */ 2272 if (list_empty(&stream->td_list)) { 2273 list_splice_tail_init(&stream->free_list, 2274 &ehci->cached_sitd_list); 2275 start_free_itds(ehci); 2276 } 2277 2278 return retval; 2279 } 2280 2281 2282 static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb, 2283 gfp_t mem_flags) 2284 { 2285 int status = -EINVAL; 2286 unsigned long flags; 2287 struct ehci_iso_stream *stream; 2288 2289 /* Get iso_stream head */ 2290 stream = iso_stream_find(ehci, urb); 2291 if (stream == NULL) { 2292 ehci_dbg(ehci, "can't get iso stream\n"); 2293 return -ENOMEM; 2294 } 2295 if (urb->interval != stream->ps.period) { 2296 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", 2297 stream->ps.period, urb->interval); 2298 goto done; 2299 } 2300 2301 #ifdef EHCI_URB_TRACE 2302 ehci_dbg(ehci, 2303 "submit %p dev%s ep%d%s-iso len %d\n", 2304 urb, urb->dev->devpath, 2305 usb_pipeendpoint(urb->pipe), 2306 usb_pipein(urb->pipe) ? "in" : "out", 2307 urb->transfer_buffer_length); 2308 #endif 2309 2310 /* allocate SITDs */ 2311 status = sitd_urb_transaction(stream, ehci, urb, mem_flags); 2312 if (status < 0) { 2313 ehci_dbg(ehci, "can't init sitds\n"); 2314 goto done; 2315 } 2316 2317 /* schedule ... need to lock */ 2318 spin_lock_irqsave(&ehci->lock, flags); 2319 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 2320 status = -ESHUTDOWN; 2321 goto done_not_linked; 2322 } 2323 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 2324 if (unlikely(status)) 2325 goto done_not_linked; 2326 status = iso_stream_schedule(ehci, urb, stream); 2327 if (likely(status == 0)) { 2328 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); 2329 } else if (status > 0) { 2330 status = 0; 2331 ehci_urb_done(ehci, urb, 0); 2332 } else { 2333 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 2334 } 2335 done_not_linked: 2336 spin_unlock_irqrestore(&ehci->lock, flags); 2337 done: 2338 return status; 2339 } 2340 2341 /*-------------------------------------------------------------------------*/ 2342 2343 static void scan_isoc(struct ehci_hcd *ehci) 2344 { 2345 unsigned uf, now_frame, frame; 2346 unsigned fmask = ehci->periodic_size - 1; 2347 bool modified, live; 2348 union ehci_shadow q, *q_p; 2349 __hc32 type, *hw_p; 2350 2351 /* 2352 * When running, scan from last scan point up to "now" 2353 * else clean up by scanning everything that's left. 2354 * Touches as few pages as possible: cache-friendly. 2355 */ 2356 if (ehci->rh_state >= EHCI_RH_RUNNING) { 2357 uf = ehci_read_frame_index(ehci); 2358 now_frame = (uf >> 3) & fmask; 2359 live = true; 2360 } else { 2361 now_frame = (ehci->last_iso_frame - 1) & fmask; 2362 live = false; 2363 } 2364 ehci->now_frame = now_frame; 2365 2366 frame = ehci->last_iso_frame; 2367 2368 restart: 2369 /* Scan each element in frame's queue for completions */ 2370 q_p = &ehci->pshadow[frame]; 2371 hw_p = &ehci->periodic[frame]; 2372 q.ptr = q_p->ptr; 2373 type = Q_NEXT_TYPE(ehci, *hw_p); 2374 modified = false; 2375 2376 while (q.ptr != NULL) { 2377 switch (hc32_to_cpu(ehci, type)) { 2378 case Q_TYPE_ITD: 2379 /* 2380 * If this ITD is still active, leave it for 2381 * later processing ... check the next entry. 2382 * No need to check for activity unless the 2383 * frame is current. 2384 */ 2385 if (frame == now_frame && live) { 2386 rmb(); 2387 for (uf = 0; uf < 8; uf++) { 2388 if (q.itd->hw_transaction[uf] & 2389 ITD_ACTIVE(ehci)) 2390 break; 2391 } 2392 if (uf < 8) { 2393 q_p = &q.itd->itd_next; 2394 hw_p = &q.itd->hw_next; 2395 type = Q_NEXT_TYPE(ehci, 2396 q.itd->hw_next); 2397 q = *q_p; 2398 break; 2399 } 2400 } 2401 2402 /* 2403 * Take finished ITDs out of the schedule 2404 * and process them: recycle, maybe report 2405 * URB completion. HC won't cache the 2406 * pointer for much longer, if at all. 2407 */ 2408 *q_p = q.itd->itd_next; 2409 if (!ehci->use_dummy_qh || 2410 q.itd->hw_next != EHCI_LIST_END(ehci)) 2411 *hw_p = q.itd->hw_next; 2412 else 2413 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); 2414 type = Q_NEXT_TYPE(ehci, q.itd->hw_next); 2415 wmb(); 2416 modified = itd_complete(ehci, q.itd); 2417 q = *q_p; 2418 break; 2419 case Q_TYPE_SITD: 2420 /* 2421 * If this SITD is still active, leave it for 2422 * later processing ... check the next entry. 2423 * No need to check for activity unless the 2424 * frame is current. 2425 */ 2426 if (((frame == now_frame) || 2427 (((frame + 1) & fmask) == now_frame)) 2428 && live 2429 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) { 2430 2431 q_p = &q.sitd->sitd_next; 2432 hw_p = &q.sitd->hw_next; 2433 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); 2434 q = *q_p; 2435 break; 2436 } 2437 2438 /* 2439 * Take finished SITDs out of the schedule 2440 * and process them: recycle, maybe report 2441 * URB completion. 2442 */ 2443 *q_p = q.sitd->sitd_next; 2444 if (!ehci->use_dummy_qh || 2445 q.sitd->hw_next != EHCI_LIST_END(ehci)) 2446 *hw_p = q.sitd->hw_next; 2447 else 2448 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); 2449 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); 2450 wmb(); 2451 modified = sitd_complete(ehci, q.sitd); 2452 q = *q_p; 2453 break; 2454 default: 2455 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n", 2456 type, frame, q.ptr); 2457 /* BUG(); */ 2458 fallthrough; 2459 case Q_TYPE_QH: 2460 case Q_TYPE_FSTN: 2461 /* End of the iTDs and siTDs */ 2462 q.ptr = NULL; 2463 break; 2464 } 2465 2466 /* Assume completion callbacks modify the queue */ 2467 if (unlikely(modified && ehci->isoc_count > 0)) 2468 goto restart; 2469 } 2470 2471 /* Stop when we have reached the current frame */ 2472 if (frame == now_frame) 2473 return; 2474 2475 /* The last frame may still have active siTDs */ 2476 ehci->last_iso_frame = frame; 2477 frame = (frame + 1) & fmask; 2478 2479 goto restart; 2480 } 2481