1 /* 2 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * This file contains the functions to manage Queue Heads and Queue 39 * Transfer Descriptors for Host mode 40 */ 41 #include <linux/kernel.h> 42 #include <linux/module.h> 43 #include <linux/spinlock.h> 44 #include <linux/interrupt.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/io.h> 47 #include <linux/slab.h> 48 #include <linux/usb.h> 49 50 #include <linux/usb/hcd.h> 51 #include <linux/usb/ch11.h> 52 53 #include "core.h" 54 #include "hcd.h" 55 56 /** 57 * dwc2_qh_init() - Initializes a QH structure 58 * 59 * @hsotg: The HCD state structure for the DWC OTG controller 60 * @qh: The QH to init 61 * @urb: Holds the information about the device/endpoint needed to initialize 62 * the QH 63 */ 64 #define SCHEDULE_SLOP 10 65 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 66 struct dwc2_hcd_urb *urb) 67 { 68 int dev_speed, hub_addr, hub_port; 69 char *speed, *type; 70 71 dev_vdbg(hsotg->dev, "%s()\n", __func__); 72 73 /* Initialize QH */ 74 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 75 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; 76 77 qh->data_toggle = DWC2_HC_PID_DATA0; 78 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); 79 INIT_LIST_HEAD(&qh->qtd_list); 80 INIT_LIST_HEAD(&qh->qh_list_entry); 81 82 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ 83 dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 84 85 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 86 87 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && 88 hub_addr != 0 && hub_addr != 1) { 89 dev_vdbg(hsotg->dev, 90 "QH init: EP %d: TT found at hub addr %d, for port %d\n", 91 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, 92 hub_port); 93 qh->do_split = 1; 94 } 95 96 if (qh->ep_type == USB_ENDPOINT_XFER_INT || 97 qh->ep_type == USB_ENDPOINT_XFER_ISOC) { 98 /* Compute scheduling parameters once and save them */ 99 u32 hprt, prtspd; 100 101 /* Todo: Account for split transfers in the bus time */ 102 int bytecount = 103 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); 104 105 qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ? 106 USB_SPEED_HIGH : dev_speed, qh->ep_is_in, 107 qh->ep_type == USB_ENDPOINT_XFER_ISOC, 108 bytecount)); 109 /* Start in a slightly future (micro)frame */ 110 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, 111 SCHEDULE_SLOP); 112 qh->interval = urb->interval; 113 #if 0 114 /* Increase interrupt polling rate for debugging */ 115 if (qh->ep_type == USB_ENDPOINT_XFER_INT) 116 qh->interval = 8; 117 #endif 118 hprt = readl(hsotg->regs + HPRT0); 119 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 120 if (prtspd == HPRT0_SPD_HIGH_SPEED && 121 (dev_speed == USB_SPEED_LOW || 122 dev_speed == USB_SPEED_FULL)) { 123 qh->interval *= 8; 124 qh->sched_frame |= 0x7; 125 qh->start_split_frame = qh->sched_frame; 126 } 127 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval); 128 } 129 130 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); 131 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); 132 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", 133 dwc2_hcd_get_dev_addr(&urb->pipe_info)); 134 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", 135 dwc2_hcd_get_ep_num(&urb->pipe_info), 136 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 137 138 qh->dev_speed = dev_speed; 139 140 switch (dev_speed) { 141 case USB_SPEED_LOW: 142 speed = "low"; 143 break; 144 case USB_SPEED_FULL: 145 speed = "full"; 146 break; 147 case USB_SPEED_HIGH: 148 speed = "high"; 149 break; 150 default: 151 speed = "?"; 152 break; 153 } 154 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); 155 156 switch (qh->ep_type) { 157 case USB_ENDPOINT_XFER_ISOC: 158 type = "isochronous"; 159 break; 160 case USB_ENDPOINT_XFER_INT: 161 type = "interrupt"; 162 break; 163 case USB_ENDPOINT_XFER_CONTROL: 164 type = "control"; 165 break; 166 case USB_ENDPOINT_XFER_BULK: 167 type = "bulk"; 168 break; 169 default: 170 type = "?"; 171 break; 172 } 173 174 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); 175 176 if (qh->ep_type == USB_ENDPOINT_XFER_INT) { 177 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", 178 qh->usecs); 179 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", 180 qh->interval); 181 } 182 } 183 184 /** 185 * dwc2_hcd_qh_create() - Allocates and initializes a QH 186 * 187 * @hsotg: The HCD state structure for the DWC OTG controller 188 * @urb: Holds the information about the device/endpoint needed 189 * to initialize the QH 190 * @atomic_alloc: Flag to do atomic allocation if needed 191 * 192 * Return: Pointer to the newly allocated QH, or NULL on error 193 */ 194 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 195 struct dwc2_hcd_urb *urb, 196 gfp_t mem_flags) 197 { 198 struct dwc2_qh *qh; 199 200 if (!urb->priv) 201 return NULL; 202 203 /* Allocate memory */ 204 qh = kzalloc(sizeof(*qh), mem_flags); 205 if (!qh) 206 return NULL; 207 208 dwc2_qh_init(hsotg, qh, urb); 209 210 if (hsotg->core_params->dma_desc_enable > 0 && 211 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { 212 dwc2_hcd_qh_free(hsotg, qh); 213 return NULL; 214 } 215 216 return qh; 217 } 218 219 /** 220 * dwc2_hcd_qh_free() - Frees the QH 221 * 222 * @hsotg: HCD instance 223 * @qh: The QH to free 224 * 225 * QH should already be removed from the list. QTD list should already be empty 226 * if called from URB Dequeue. 227 * 228 * Must NOT be called with interrupt disabled or spinlock held 229 */ 230 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 231 { 232 if (hsotg->core_params->dma_desc_enable > 0) 233 dwc2_hcd_qh_free_ddma(hsotg, qh); 234 else if (qh->dw_align_buf) 235 dma_free_coherent(hsotg->dev, qh->dw_align_buf_size, 236 qh->dw_align_buf, qh->dw_align_buf_dma); 237 kfree(qh); 238 } 239 240 /** 241 * dwc2_periodic_channel_available() - Checks that a channel is available for a 242 * periodic transfer 243 * 244 * @hsotg: The HCD state structure for the DWC OTG controller 245 * 246 * Return: 0 if successful, negative error code otherwise 247 */ 248 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) 249 { 250 /* 251 * Currently assuming that there is a dedicated host channel for 252 * each periodic transaction plus at least one host channel for 253 * non-periodic transactions 254 */ 255 int status; 256 int num_channels; 257 258 num_channels = hsotg->core_params->host_channels; 259 if (hsotg->periodic_channels + hsotg->non_periodic_channels < 260 num_channels 261 && hsotg->periodic_channels < num_channels - 1) { 262 status = 0; 263 } else { 264 dev_dbg(hsotg->dev, 265 "%s: Total channels: %d, Periodic: %d, " 266 "Non-periodic: %d\n", __func__, num_channels, 267 hsotg->periodic_channels, hsotg->non_periodic_channels); 268 status = -ENOSPC; 269 } 270 271 return status; 272 } 273 274 /** 275 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth 276 * for the specified QH in the periodic schedule 277 * 278 * @hsotg: The HCD state structure for the DWC OTG controller 279 * @qh: QH containing periodic bandwidth required 280 * 281 * Return: 0 if successful, negative error code otherwise 282 * 283 * For simplicity, this calculation assumes that all the transfers in the 284 * periodic schedule may occur in the same (micro)frame 285 */ 286 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, 287 struct dwc2_qh *qh) 288 { 289 int status; 290 s16 max_claimed_usecs; 291 292 status = 0; 293 294 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { 295 /* 296 * High speed mode 297 * Max periodic usecs is 80% x 125 usec = 100 usec 298 */ 299 max_claimed_usecs = 100 - qh->usecs; 300 } else { 301 /* 302 * Full speed mode 303 * Max periodic usecs is 90% x 1000 usec = 900 usec 304 */ 305 max_claimed_usecs = 900 - qh->usecs; 306 } 307 308 if (hsotg->periodic_usecs > max_claimed_usecs) { 309 dev_err(hsotg->dev, 310 "%s: already claimed usecs %d, required usecs %d\n", 311 __func__, hsotg->periodic_usecs, qh->usecs); 312 status = -ENOSPC; 313 } 314 315 return status; 316 } 317 318 /** 319 * Microframe scheduler 320 * track the total use in hsotg->frame_usecs 321 * keep each qh use in qh->frame_usecs 322 * when surrendering the qh then donate the time back 323 */ 324 static const unsigned short max_uframe_usecs[] = { 325 100, 100, 100, 100, 100, 100, 30, 0 326 }; 327 328 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) 329 { 330 int i; 331 332 for (i = 0; i < 8; i++) 333 hsotg->frame_usecs[i] = max_uframe_usecs[i]; 334 } 335 336 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 337 { 338 unsigned short utime = qh->usecs; 339 int i; 340 341 for (i = 0; i < 8; i++) { 342 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ 343 if (utime <= hsotg->frame_usecs[i]) { 344 hsotg->frame_usecs[i] -= utime; 345 qh->frame_usecs[i] += utime; 346 return i; 347 } 348 } 349 return -ENOSPC; 350 } 351 352 /* 353 * use this for FS apps that can span multiple uframes 354 */ 355 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 356 { 357 unsigned short utime = qh->usecs; 358 unsigned short xtime; 359 int t_left; 360 int i; 361 int j; 362 int k; 363 364 for (i = 0; i < 8; i++) { 365 if (hsotg->frame_usecs[i] <= 0) 366 continue; 367 368 /* 369 * we need n consecutive slots so use j as a start slot 370 * j plus j+1 must be enough time (for now) 371 */ 372 xtime = hsotg->frame_usecs[i]; 373 for (j = i + 1; j < 8; j++) { 374 /* 375 * if we add this frame remaining time to xtime we may 376 * be OK, if not we need to test j for a complete frame 377 */ 378 if (xtime + hsotg->frame_usecs[j] < utime) { 379 if (hsotg->frame_usecs[j] < 380 max_uframe_usecs[j]) 381 continue; 382 } 383 if (xtime >= utime) { 384 t_left = utime; 385 for (k = i; k < 8; k++) { 386 t_left -= hsotg->frame_usecs[k]; 387 if (t_left <= 0) { 388 qh->frame_usecs[k] += 389 hsotg->frame_usecs[k] 390 + t_left; 391 hsotg->frame_usecs[k] = -t_left; 392 return i; 393 } else { 394 qh->frame_usecs[k] += 395 hsotg->frame_usecs[k]; 396 hsotg->frame_usecs[k] = 0; 397 } 398 } 399 } 400 /* add the frame time to x time */ 401 xtime += hsotg->frame_usecs[j]; 402 /* we must have a fully available next frame or break */ 403 if (xtime < utime && 404 hsotg->frame_usecs[j] == max_uframe_usecs[j]) 405 continue; 406 } 407 } 408 return -ENOSPC; 409 } 410 411 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 412 { 413 int ret; 414 415 if (qh->dev_speed == USB_SPEED_HIGH) { 416 /* if this is a hs transaction we need a full frame */ 417 ret = dwc2_find_single_uframe(hsotg, qh); 418 } else { 419 /* 420 * if this is a fs transaction we may need a sequence 421 * of frames 422 */ 423 ret = dwc2_find_multi_uframe(hsotg, qh); 424 } 425 return ret; 426 } 427 428 /** 429 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a 430 * host channel is large enough to handle the maximum data transfer in a single 431 * (micro)frame for a periodic transfer 432 * 433 * @hsotg: The HCD state structure for the DWC OTG controller 434 * @qh: QH for a periodic endpoint 435 * 436 * Return: 0 if successful, negative error code otherwise 437 */ 438 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, 439 struct dwc2_qh *qh) 440 { 441 u32 max_xfer_size; 442 u32 max_channel_xfer_size; 443 int status = 0; 444 445 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); 446 max_channel_xfer_size = hsotg->core_params->max_transfer_size; 447 448 if (max_xfer_size > max_channel_xfer_size) { 449 dev_err(hsotg->dev, 450 "%s: Periodic xfer length %d > max xfer length for channel %d\n", 451 __func__, max_xfer_size, max_channel_xfer_size); 452 status = -ENOSPC; 453 } 454 455 return status; 456 } 457 458 /** 459 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in 460 * the periodic schedule 461 * 462 * @hsotg: The HCD state structure for the DWC OTG controller 463 * @qh: QH for the periodic transfer. The QH should already contain the 464 * scheduling information. 465 * 466 * Return: 0 if successful, negative error code otherwise 467 */ 468 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 469 { 470 int status; 471 472 if (hsotg->core_params->uframe_sched > 0) { 473 int frame = -1; 474 475 status = dwc2_find_uframe(hsotg, qh); 476 if (status == 0) 477 frame = 7; 478 else if (status > 0) 479 frame = status - 1; 480 481 /* Set the new frame up */ 482 if (frame >= 0) { 483 qh->sched_frame &= ~0x7; 484 qh->sched_frame |= (frame & 7); 485 } 486 487 if (status > 0) 488 status = 0; 489 } else { 490 status = dwc2_periodic_channel_available(hsotg); 491 if (status) { 492 dev_info(hsotg->dev, 493 "%s: No host channel available for periodic transfer\n", 494 __func__); 495 return status; 496 } 497 498 status = dwc2_check_periodic_bandwidth(hsotg, qh); 499 } 500 501 if (status) { 502 dev_dbg(hsotg->dev, 503 "%s: Insufficient periodic bandwidth for periodic transfer\n", 504 __func__); 505 return status; 506 } 507 508 status = dwc2_check_max_xfer_size(hsotg, qh); 509 if (status) { 510 dev_dbg(hsotg->dev, 511 "%s: Channel max transfer size too small for periodic transfer\n", 512 __func__); 513 return status; 514 } 515 516 if (hsotg->core_params->dma_desc_enable > 0) 517 /* Don't rely on SOF and start in ready schedule */ 518 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 519 else 520 /* Always start in inactive schedule */ 521 list_add_tail(&qh->qh_list_entry, 522 &hsotg->periodic_sched_inactive); 523 524 if (hsotg->core_params->uframe_sched <= 0) 525 /* Reserve periodic channel */ 526 hsotg->periodic_channels++; 527 528 /* Update claimed usecs per (micro)frame */ 529 hsotg->periodic_usecs += qh->usecs; 530 531 return status; 532 } 533 534 /** 535 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer 536 * from the periodic schedule 537 * 538 * @hsotg: The HCD state structure for the DWC OTG controller 539 * @qh: QH for the periodic transfer 540 */ 541 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 542 struct dwc2_qh *qh) 543 { 544 int i; 545 546 list_del_init(&qh->qh_list_entry); 547 548 /* Update claimed usecs per (micro)frame */ 549 hsotg->periodic_usecs -= qh->usecs; 550 551 if (hsotg->core_params->uframe_sched > 0) { 552 for (i = 0; i < 8; i++) { 553 hsotg->frame_usecs[i] += qh->frame_usecs[i]; 554 qh->frame_usecs[i] = 0; 555 } 556 } else { 557 /* Release periodic channel reservation */ 558 hsotg->periodic_channels--; 559 } 560 } 561 562 /** 563 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic 564 * schedule if it is not already in the schedule. If the QH is already in 565 * the schedule, no action is taken. 566 * 567 * @hsotg: The HCD state structure for the DWC OTG controller 568 * @qh: The QH to add 569 * 570 * Return: 0 if successful, negative error code otherwise 571 */ 572 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 573 { 574 int status; 575 u32 intr_mask; 576 577 if (dbg_qh(qh)) 578 dev_vdbg(hsotg->dev, "%s()\n", __func__); 579 580 if (!list_empty(&qh->qh_list_entry)) 581 /* QH already in a schedule */ 582 return 0; 583 584 /* Add the new QH to the appropriate schedule */ 585 if (dwc2_qh_is_non_per(qh)) { 586 /* Always start in inactive schedule */ 587 list_add_tail(&qh->qh_list_entry, 588 &hsotg->non_periodic_sched_inactive); 589 return 0; 590 } 591 592 status = dwc2_schedule_periodic(hsotg, qh); 593 if (status) 594 return status; 595 if (!hsotg->periodic_qh_count) { 596 intr_mask = readl(hsotg->regs + GINTMSK); 597 intr_mask |= GINTSTS_SOF; 598 writel(intr_mask, hsotg->regs + GINTMSK); 599 } 600 hsotg->periodic_qh_count++; 601 602 return 0; 603 } 604 605 /** 606 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic 607 * schedule. Memory is not freed. 608 * 609 * @hsotg: The HCD state structure 610 * @qh: QH to remove from schedule 611 */ 612 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 613 { 614 u32 intr_mask; 615 616 dev_vdbg(hsotg->dev, "%s()\n", __func__); 617 618 if (list_empty(&qh->qh_list_entry)) 619 /* QH is not in a schedule */ 620 return; 621 622 if (dwc2_qh_is_non_per(qh)) { 623 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry) 624 hsotg->non_periodic_qh_ptr = 625 hsotg->non_periodic_qh_ptr->next; 626 list_del_init(&qh->qh_list_entry); 627 return; 628 } 629 630 dwc2_deschedule_periodic(hsotg, qh); 631 hsotg->periodic_qh_count--; 632 if (!hsotg->periodic_qh_count) { 633 intr_mask = readl(hsotg->regs + GINTMSK); 634 intr_mask &= ~GINTSTS_SOF; 635 writel(intr_mask, hsotg->regs + GINTMSK); 636 } 637 } 638 639 /* 640 * Schedule the next continuing periodic split transfer 641 */ 642 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, 643 struct dwc2_qh *qh, u16 frame_number, 644 int sched_next_periodic_split) 645 { 646 u16 incr; 647 648 if (sched_next_periodic_split) { 649 qh->sched_frame = frame_number; 650 incr = dwc2_frame_num_inc(qh->start_split_frame, 1); 651 if (dwc2_frame_num_le(frame_number, incr)) { 652 /* 653 * Allow one frame to elapse after start split 654 * microframe before scheduling complete split, but 655 * DON'T if we are doing the next start split in the 656 * same frame for an ISOC out 657 */ 658 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || 659 qh->ep_is_in != 0) { 660 qh->sched_frame = 661 dwc2_frame_num_inc(qh->sched_frame, 1); 662 } 663 } 664 } else { 665 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, 666 qh->interval); 667 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 668 qh->sched_frame = frame_number; 669 qh->sched_frame |= 0x7; 670 qh->start_split_frame = qh->sched_frame; 671 } 672 } 673 674 /* 675 * Deactivates a QH. For non-periodic QHs, removes the QH from the active 676 * non-periodic schedule. The QH is added to the inactive non-periodic 677 * schedule if any QTDs are still attached to the QH. 678 * 679 * For periodic QHs, the QH is removed from the periodic queued schedule. If 680 * there are any QTDs still attached to the QH, the QH is added to either the 681 * periodic inactive schedule or the periodic ready schedule and its next 682 * scheduled frame is calculated. The QH is placed in the ready schedule if 683 * the scheduled frame has been reached already. Otherwise it's placed in the 684 * inactive schedule. If there are no QTDs attached to the QH, the QH is 685 * completely removed from the periodic schedule. 686 */ 687 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 688 int sched_next_periodic_split) 689 { 690 u16 frame_number; 691 692 if (dbg_qh(qh)) 693 dev_vdbg(hsotg->dev, "%s()\n", __func__); 694 695 if (dwc2_qh_is_non_per(qh)) { 696 dwc2_hcd_qh_unlink(hsotg, qh); 697 if (!list_empty(&qh->qtd_list)) 698 /* Add back to inactive non-periodic schedule */ 699 dwc2_hcd_qh_add(hsotg, qh); 700 return; 701 } 702 703 frame_number = dwc2_hcd_get_frame_number(hsotg); 704 705 if (qh->do_split) { 706 dwc2_sched_periodic_split(hsotg, qh, frame_number, 707 sched_next_periodic_split); 708 } else { 709 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, 710 qh->interval); 711 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 712 qh->sched_frame = frame_number; 713 } 714 715 if (list_empty(&qh->qtd_list)) { 716 dwc2_hcd_qh_unlink(hsotg, qh); 717 return; 718 } 719 /* 720 * Remove from periodic_sched_queued and move to 721 * appropriate queue 722 */ 723 if ((hsotg->core_params->uframe_sched > 0 && 724 dwc2_frame_num_le(qh->sched_frame, frame_number)) || 725 (hsotg->core_params->uframe_sched <= 0 && 726 qh->sched_frame == frame_number)) 727 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 728 else 729 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); 730 } 731 732 /** 733 * dwc2_hcd_qtd_init() - Initializes a QTD structure 734 * 735 * @qtd: The QTD to initialize 736 * @urb: The associated URB 737 */ 738 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 739 { 740 qtd->urb = urb; 741 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) == 742 USB_ENDPOINT_XFER_CONTROL) { 743 /* 744 * The only time the QTD data toggle is used is on the data 745 * phase of control transfers. This phase always starts with 746 * DATA1. 747 */ 748 qtd->data_toggle = DWC2_HC_PID_DATA1; 749 qtd->control_phase = DWC2_CONTROL_SETUP; 750 } 751 752 /* Start split */ 753 qtd->complete_split = 0; 754 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 755 qtd->isoc_split_offset = 0; 756 qtd->in_process = 0; 757 758 /* Store the qtd ptr in the urb to reference the QTD */ 759 urb->qtd = qtd; 760 } 761 762 /** 763 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH 764 * 765 * @hsotg: The DWC HCD structure 766 * @qtd: The QTD to add 767 * @qh: Out parameter to return queue head 768 * @atomic_alloc: Flag to do atomic alloc if needed 769 * 770 * Return: 0 if successful, negative error code otherwise 771 * 772 * Finds the correct QH to place the QTD into. If it does not find a QH, it 773 * will create a new QH. If the QH to which the QTD is added is not currently 774 * scheduled, it is placed into the proper schedule based on its EP type. 775 */ 776 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 777 struct dwc2_qh **qh, gfp_t mem_flags) 778 { 779 struct dwc2_hcd_urb *urb = qtd->urb; 780 unsigned long flags; 781 int allocated = 0; 782 int retval; 783 784 /* 785 * Get the QH which holds the QTD-list to insert to. Create QH if it 786 * doesn't exist. 787 */ 788 if (*qh == NULL) { 789 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags); 790 if (*qh == NULL) 791 return -ENOMEM; 792 allocated = 1; 793 } 794 795 spin_lock_irqsave(&hsotg->lock, flags); 796 797 retval = dwc2_hcd_qh_add(hsotg, *qh); 798 if (retval) 799 goto fail; 800 801 qtd->qh = *qh; 802 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); 803 spin_unlock_irqrestore(&hsotg->lock, flags); 804 805 return 0; 806 807 fail: 808 if (allocated) { 809 struct dwc2_qtd *qtd2, *qtd2_tmp; 810 struct dwc2_qh *qh_tmp = *qh; 811 812 *qh = NULL; 813 dwc2_hcd_qh_unlink(hsotg, qh_tmp); 814 815 /* Free each QTD in the QH's QTD list */ 816 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list, 817 qtd_list_entry) 818 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp); 819 820 spin_unlock_irqrestore(&hsotg->lock, flags); 821 dwc2_hcd_qh_free(hsotg, qh_tmp); 822 } else { 823 spin_unlock_irqrestore(&hsotg->lock, flags); 824 } 825 826 return retval; 827 } 828