1 /* 2 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * This file contains the Descriptor DMA implementation for Host mode 39 */ 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/spinlock.h> 43 #include <linux/interrupt.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/io.h> 46 #include <linux/slab.h> 47 #include <linux/usb.h> 48 49 #include <linux/usb/hcd.h> 50 #include <linux/usb/ch11.h> 51 52 #include "core.h" 53 #include "hcd.h" 54 55 static u16 dwc2_frame_list_idx(u16 frame) 56 { 57 return frame & (FRLISTEN_64_SIZE - 1); 58 } 59 60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 61 { 62 return (idx + inc) & 63 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 64 MAX_DMA_DESC_NUM_GENERIC) - 1); 65 } 66 67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 68 { 69 return (idx - inc) & 70 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 71 MAX_DMA_DESC_NUM_GENERIC) - 1); 72 } 73 74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh) 75 { 76 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 77 qh->dev_speed == USB_SPEED_HIGH) ? 78 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 79 } 80 81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 82 { 83 return qh->dev_speed == USB_SPEED_HIGH ? 84 (qh->host_interval + 8 - 1) / 8 : qh->host_interval; 85 } 86 87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 88 gfp_t flags) 89 { 90 struct kmem_cache *desc_cache; 91 92 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 93 qh->dev_speed == USB_SPEED_HIGH) 94 desc_cache = hsotg->desc_hsisoc_cache; 95 else 96 desc_cache = hsotg->desc_gen_cache; 97 98 qh->desc_list_sz = sizeof(struct dwc2_dma_desc) * 99 dwc2_max_desc_num(qh); 100 101 qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); 102 if (!qh->desc_list) 103 return -ENOMEM; 104 105 qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list, 106 qh->desc_list_sz, 107 DMA_TO_DEVICE); 108 109 qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags); 110 if (!qh->n_bytes) { 111 dma_unmap_single(hsotg->dev, qh->desc_list_dma, 112 qh->desc_list_sz, 113 DMA_FROM_DEVICE); 114 kmem_cache_free(desc_cache, qh->desc_list); 115 qh->desc_list = NULL; 116 return -ENOMEM; 117 } 118 119 return 0; 120 } 121 122 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 123 { 124 struct kmem_cache *desc_cache; 125 126 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 127 qh->dev_speed == USB_SPEED_HIGH) 128 desc_cache = hsotg->desc_hsisoc_cache; 129 else 130 desc_cache = hsotg->desc_gen_cache; 131 132 if (qh->desc_list) { 133 dma_unmap_single(hsotg->dev, qh->desc_list_dma, 134 qh->desc_list_sz, DMA_FROM_DEVICE); 135 kmem_cache_free(desc_cache, qh->desc_list); 136 qh->desc_list = NULL; 137 } 138 139 kfree(qh->n_bytes); 140 qh->n_bytes = NULL; 141 } 142 143 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 144 { 145 if (hsotg->frame_list) 146 return 0; 147 148 hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; 149 hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA); 150 if (!hsotg->frame_list) 151 return -ENOMEM; 152 153 hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list, 154 hsotg->frame_list_sz, 155 DMA_TO_DEVICE); 156 157 return 0; 158 } 159 160 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 161 { 162 unsigned long flags; 163 164 spin_lock_irqsave(&hsotg->lock, flags); 165 166 if (!hsotg->frame_list) { 167 spin_unlock_irqrestore(&hsotg->lock, flags); 168 return; 169 } 170 171 dma_unmap_single(hsotg->dev, hsotg->frame_list_dma, 172 hsotg->frame_list_sz, DMA_FROM_DEVICE); 173 174 kfree(hsotg->frame_list); 175 hsotg->frame_list = NULL; 176 177 spin_unlock_irqrestore(&hsotg->lock, flags); 178 } 179 180 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 181 { 182 u32 hcfg; 183 unsigned long flags; 184 185 spin_lock_irqsave(&hsotg->lock, flags); 186 187 hcfg = dwc2_readl(hsotg->regs + HCFG); 188 if (hcfg & HCFG_PERSCHEDENA) { 189 /* already enabled */ 190 spin_unlock_irqrestore(&hsotg->lock, flags); 191 return; 192 } 193 194 dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR); 195 196 hcfg &= ~HCFG_FRLISTEN_MASK; 197 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 198 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 199 dwc2_writel(hcfg, hsotg->regs + HCFG); 200 201 spin_unlock_irqrestore(&hsotg->lock, flags); 202 } 203 204 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 205 { 206 u32 hcfg; 207 unsigned long flags; 208 209 spin_lock_irqsave(&hsotg->lock, flags); 210 211 hcfg = dwc2_readl(hsotg->regs + HCFG); 212 if (!(hcfg & HCFG_PERSCHEDENA)) { 213 /* already disabled */ 214 spin_unlock_irqrestore(&hsotg->lock, flags); 215 return; 216 } 217 218 hcfg &= ~HCFG_PERSCHEDENA; 219 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 220 dwc2_writel(hcfg, hsotg->regs + HCFG); 221 222 spin_unlock_irqrestore(&hsotg->lock, flags); 223 } 224 225 /* 226 * Activates/Deactivates FrameList entries for the channel based on endpoint 227 * servicing period 228 */ 229 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 230 int enable) 231 { 232 struct dwc2_host_chan *chan; 233 u16 i, j, inc; 234 235 if (!hsotg) { 236 pr_err("hsotg = %p\n", hsotg); 237 return; 238 } 239 240 if (!qh->channel) { 241 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 242 return; 243 } 244 245 if (!hsotg->frame_list) { 246 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 247 hsotg->frame_list); 248 return; 249 } 250 251 chan = qh->channel; 252 inc = dwc2_frame_incr_val(qh); 253 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 254 i = dwc2_frame_list_idx(qh->next_active_frame); 255 else 256 i = 0; 257 258 j = i; 259 do { 260 if (enable) 261 hsotg->frame_list[j] |= 1 << chan->hc_num; 262 else 263 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 264 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 265 } while (j != i); 266 267 /* 268 * Sync frame list since controller will access it if periodic 269 * channel is currently enabled. 270 */ 271 dma_sync_single_for_device(hsotg->dev, 272 hsotg->frame_list_dma, 273 hsotg->frame_list_sz, 274 DMA_TO_DEVICE); 275 276 if (!enable) 277 return; 278 279 chan->schinfo = 0; 280 if (chan->speed == USB_SPEED_HIGH && qh->host_interval) { 281 j = 1; 282 /* TODO - check this */ 283 inc = (8 + qh->host_interval - 1) / qh->host_interval; 284 for (i = 0; i < inc; i++) { 285 chan->schinfo |= j; 286 j = j << qh->host_interval; 287 } 288 } else { 289 chan->schinfo = 0xff; 290 } 291 } 292 293 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 294 struct dwc2_qh *qh) 295 { 296 struct dwc2_host_chan *chan = qh->channel; 297 298 if (dwc2_qh_is_non_per(qh)) { 299 if (hsotg->params.uframe_sched) 300 hsotg->available_host_channels++; 301 else 302 hsotg->non_periodic_channels--; 303 } else { 304 dwc2_update_frame_list(hsotg, qh, 0); 305 hsotg->available_host_channels++; 306 } 307 308 /* 309 * The condition is added to prevent double cleanup try in case of 310 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 311 */ 312 if (chan->qh) { 313 if (!list_empty(&chan->hc_list_entry)) 314 list_del(&chan->hc_list_entry); 315 dwc2_hc_cleanup(hsotg, chan); 316 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 317 chan->qh = NULL; 318 } 319 320 qh->channel = NULL; 321 qh->ntd = 0; 322 323 if (qh->desc_list) 324 memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) * 325 dwc2_max_desc_num(qh)); 326 } 327 328 /** 329 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 330 * related members 331 * 332 * @hsotg: The HCD state structure for the DWC OTG controller 333 * @qh: The QH to init 334 * 335 * Return: 0 if successful, negative error code otherwise 336 * 337 * Allocates memory for the descriptor list. For the first periodic QH, 338 * allocates memory for the FrameList and enables periodic scheduling. 339 */ 340 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 341 gfp_t mem_flags) 342 { 343 int retval; 344 345 if (qh->do_split) { 346 dev_err(hsotg->dev, 347 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 348 retval = -EINVAL; 349 goto err0; 350 } 351 352 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 353 if (retval) 354 goto err0; 355 356 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 357 qh->ep_type == USB_ENDPOINT_XFER_INT) { 358 if (!hsotg->frame_list) { 359 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 360 if (retval) 361 goto err1; 362 /* Enable periodic schedule on first periodic QH */ 363 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 364 } 365 } 366 367 qh->ntd = 0; 368 return 0; 369 370 err1: 371 dwc2_desc_list_free(hsotg, qh); 372 err0: 373 return retval; 374 } 375 376 /** 377 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 378 * members 379 * 380 * @hsotg: The HCD state structure for the DWC OTG controller 381 * @qh: The QH to free 382 * 383 * Frees descriptor list memory associated with the QH. If QH is periodic and 384 * the last, frees FrameList memory and disables periodic scheduling. 385 */ 386 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 387 { 388 unsigned long flags; 389 390 dwc2_desc_list_free(hsotg, qh); 391 392 /* 393 * Channel still assigned due to some reasons. 394 * Seen on Isoc URB dequeue. Channel halted but no subsequent 395 * ChHalted interrupt to release the channel. Afterwards 396 * when it comes here from endpoint disable routine 397 * channel remains assigned. 398 */ 399 spin_lock_irqsave(&hsotg->lock, flags); 400 if (qh->channel) 401 dwc2_release_channel_ddma(hsotg, qh); 402 spin_unlock_irqrestore(&hsotg->lock, flags); 403 404 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 405 qh->ep_type == USB_ENDPOINT_XFER_INT) && 406 (hsotg->params.uframe_sched || 407 !hsotg->periodic_channels) && hsotg->frame_list) { 408 dwc2_per_sched_disable(hsotg); 409 dwc2_frame_list_free(hsotg); 410 } 411 } 412 413 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 414 { 415 if (qh->dev_speed == USB_SPEED_HIGH) 416 /* Descriptor set (8 descriptors) index which is 8-aligned */ 417 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 418 else 419 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 420 } 421 422 /* 423 * Determine starting frame for Isochronous transfer. 424 * Few frames skipped to prevent race condition with HC. 425 */ 426 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 427 struct dwc2_qh *qh, u16 *skip_frames) 428 { 429 u16 frame; 430 431 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 432 433 /* 434 * next_active_frame is always frame number (not uFrame) both in FS 435 * and HS! 436 */ 437 438 /* 439 * skip_frames is used to limit activated descriptors number 440 * to avoid the situation when HC services the last activated 441 * descriptor firstly. 442 * Example for FS: 443 * Current frame is 1, scheduled frame is 3. Since HC always fetches 444 * the descriptor corresponding to curr_frame+1, the descriptor 445 * corresponding to frame 2 will be fetched. If the number of 446 * descriptors is max=64 (or greather) the list will be fully programmed 447 * with Active descriptors and it is possible case (rare) that the 448 * latest descriptor(considering rollback) corresponding to frame 2 will 449 * be serviced first. HS case is more probable because, in fact, up to 450 * 11 uframes (16 in the code) may be skipped. 451 */ 452 if (qh->dev_speed == USB_SPEED_HIGH) { 453 /* 454 * Consider uframe counter also, to start xfer asap. If half of 455 * the frame elapsed skip 2 frames otherwise just 1 frame. 456 * Starting descriptor index must be 8-aligned, so if the 457 * current frame is near to complete the next one is skipped as 458 * well. 459 */ 460 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 461 *skip_frames = 2 * 8; 462 frame = dwc2_frame_num_inc(hsotg->frame_number, 463 *skip_frames); 464 } else { 465 *skip_frames = 1 * 8; 466 frame = dwc2_frame_num_inc(hsotg->frame_number, 467 *skip_frames); 468 } 469 470 frame = dwc2_full_frame_num(frame); 471 } else { 472 /* 473 * Two frames are skipped for FS - the current and the next. 474 * But for descriptor programming, 1 frame (descriptor) is 475 * enough, see example above. 476 */ 477 *skip_frames = 1; 478 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 479 } 480 481 return frame; 482 } 483 484 /* 485 * Calculate initial descriptor index for isochronous transfer based on 486 * scheduled frame 487 */ 488 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 489 struct dwc2_qh *qh) 490 { 491 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 492 493 /* 494 * With current ISOC processing algorithm the channel is being released 495 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 496 * called only when qh->ntd == 0 and qh->channel == 0. 497 * 498 * So qh->channel != NULL branch is not used and just not removed from 499 * the source file. It is required for another possible approach which 500 * is, do not disable and release the channel when ISOC session 501 * completed, just move QH to inactive schedule until new QTD arrives. 502 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 503 * therefore starting desc_index are recalculated. In this case channel 504 * is released only on ep_disable. 505 */ 506 507 /* 508 * Calculate starting descriptor index. For INTERRUPT endpoint it is 509 * always 0. 510 */ 511 if (qh->channel) { 512 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 513 /* 514 * Calculate initial descriptor index based on FrameList current 515 * bitmap and servicing period 516 */ 517 fr_idx_tmp = dwc2_frame_list_idx(frame); 518 fr_idx = (FRLISTEN_64_SIZE + 519 dwc2_frame_list_idx(qh->next_active_frame) - 520 fr_idx_tmp) % dwc2_frame_incr_val(qh); 521 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 522 } else { 523 qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh, 524 &skip_frames); 525 fr_idx = dwc2_frame_list_idx(qh->next_active_frame); 526 } 527 528 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 529 530 return skip_frames; 531 } 532 533 #define ISOC_URB_GIVEBACK_ASAP 534 535 #define MAX_ISOC_XFER_SIZE_FS 1023 536 #define MAX_ISOC_XFER_SIZE_HS 3072 537 #define DESCNUM_THRESHOLD 4 538 539 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 540 struct dwc2_qtd *qtd, 541 struct dwc2_qh *qh, u32 max_xfer_size, 542 u16 idx) 543 { 544 struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx]; 545 struct dwc2_hcd_iso_packet_desc *frame_desc; 546 547 memset(dma_desc, 0, sizeof(*dma_desc)); 548 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 549 550 if (frame_desc->length > max_xfer_size) 551 qh->n_bytes[idx] = max_xfer_size; 552 else 553 qh->n_bytes[idx] = frame_desc->length; 554 555 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); 556 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 557 HOST_DMA_ISOC_NBYTES_MASK; 558 559 /* Set active bit */ 560 dma_desc->status |= HOST_DMA_A; 561 562 qh->ntd++; 563 qtd->isoc_frame_index_last++; 564 565 #ifdef ISOC_URB_GIVEBACK_ASAP 566 /* Set IOC for each descriptor corresponding to last frame of URB */ 567 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 568 dma_desc->status |= HOST_DMA_IOC; 569 #endif 570 571 dma_sync_single_for_device(hsotg->dev, 572 qh->desc_list_dma + 573 (idx * sizeof(struct dwc2_dma_desc)), 574 sizeof(struct dwc2_dma_desc), 575 DMA_TO_DEVICE); 576 } 577 578 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 579 struct dwc2_qh *qh, u16 skip_frames) 580 { 581 struct dwc2_qtd *qtd; 582 u32 max_xfer_size; 583 u16 idx, inc, n_desc = 0, ntd_max = 0; 584 u16 cur_idx; 585 u16 next_idx; 586 587 idx = qh->td_last; 588 inc = qh->host_interval; 589 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 590 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 591 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); 592 593 /* 594 * Ensure current frame number didn't overstep last scheduled 595 * descriptor. If it happens, the only way to recover is to move 596 * qh->td_last to current frame number + 1. 597 * So that next isoc descriptor will be scheduled on frame number + 1 598 * and not on a past frame. 599 */ 600 if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { 601 if (inc < 32) { 602 dev_vdbg(hsotg->dev, 603 "current frame number overstep last descriptor\n"); 604 qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, 605 qh->dev_speed); 606 idx = qh->td_last; 607 } 608 } 609 610 if (qh->host_interval) { 611 ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) / 612 qh->host_interval; 613 if (skip_frames && !qh->channel) 614 ntd_max -= skip_frames / qh->host_interval; 615 } 616 617 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 618 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 619 620 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 621 if (qtd->in_process && 622 qtd->isoc_frame_index_last == 623 qtd->urb->packet_count) 624 continue; 625 626 qtd->isoc_td_first = idx; 627 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 628 qtd->urb->packet_count) { 629 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 630 max_xfer_size, idx); 631 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 632 n_desc++; 633 } 634 qtd->isoc_td_last = idx; 635 qtd->in_process = 1; 636 } 637 638 qh->td_last = idx; 639 640 #ifdef ISOC_URB_GIVEBACK_ASAP 641 /* Set IOC for last descriptor if descriptor list is full */ 642 if (qh->ntd == ntd_max) { 643 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 644 qh->desc_list[idx].status |= HOST_DMA_IOC; 645 dma_sync_single_for_device(hsotg->dev, 646 qh->desc_list_dma + (idx * 647 sizeof(struct dwc2_dma_desc)), 648 sizeof(struct dwc2_dma_desc), 649 DMA_TO_DEVICE); 650 } 651 #else 652 /* 653 * Set IOC bit only for one descriptor. Always try to be ahead of HW 654 * processing, i.e. on IOC generation driver activates next descriptor 655 * but core continues to process descriptors following the one with IOC 656 * set. 657 */ 658 659 if (n_desc > DESCNUM_THRESHOLD) 660 /* 661 * Move IOC "up". Required even if there is only one QTD 662 * in the list, because QTDs might continue to be queued, 663 * but during the activation it was only one queued. 664 * Actually more than one QTD might be in the list if this 665 * function called from XferCompletion - QTDs was queued during 666 * HW processing of the previous descriptor chunk. 667 */ 668 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 669 qh->dev_speed); 670 else 671 /* 672 * Set the IOC for the latest descriptor if either number of 673 * descriptors is not greater than threshold or no more new 674 * descriptors activated 675 */ 676 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 677 678 qh->desc_list[idx].status |= HOST_DMA_IOC; 679 dma_sync_single_for_device(hsotg->dev, 680 qh->desc_list_dma + 681 (idx * sizeof(struct dwc2_dma_desc)), 682 sizeof(struct dwc2_dma_desc), 683 DMA_TO_DEVICE); 684 #endif 685 } 686 687 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 688 struct dwc2_host_chan *chan, 689 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 690 int n_desc) 691 { 692 struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc]; 693 int len = chan->xfer_len; 694 695 if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1)) 696 len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1); 697 698 if (chan->ep_is_in) { 699 int num_packets; 700 701 if (len > 0 && chan->max_packet) 702 num_packets = (len + chan->max_packet - 1) 703 / chan->max_packet; 704 else 705 /* Need 1 packet for transfer length of 0 */ 706 num_packets = 1; 707 708 /* Always program an integral # of packets for IN transfers */ 709 len = num_packets * chan->max_packet; 710 } 711 712 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 713 qh->n_bytes[n_desc] = len; 714 715 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 716 qtd->control_phase == DWC2_CONTROL_SETUP) 717 dma_desc->status |= HOST_DMA_SUP; 718 719 dma_desc->buf = (u32)chan->xfer_dma; 720 721 dma_sync_single_for_device(hsotg->dev, 722 qh->desc_list_dma + 723 (n_desc * sizeof(struct dwc2_dma_desc)), 724 sizeof(struct dwc2_dma_desc), 725 DMA_TO_DEVICE); 726 727 /* 728 * Last (or only) descriptor of IN transfer with actual size less 729 * than MaxPacket 730 */ 731 if (len > chan->xfer_len) { 732 chan->xfer_len = 0; 733 } else { 734 chan->xfer_dma += len; 735 chan->xfer_len -= len; 736 } 737 } 738 739 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 740 struct dwc2_qh *qh) 741 { 742 struct dwc2_qtd *qtd; 743 struct dwc2_host_chan *chan = qh->channel; 744 int n_desc = 0; 745 746 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 747 (unsigned long)chan->xfer_dma, chan->xfer_len); 748 749 /* 750 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 751 * if SG transfer consists of multiple URBs, this pointer is re-assigned 752 * to the buffer of the currently processed QTD. For non-SG request 753 * there is always one QTD active. 754 */ 755 756 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 757 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 758 759 if (n_desc) { 760 /* SG request - more than 1 QTD */ 761 chan->xfer_dma = qtd->urb->dma + 762 qtd->urb->actual_length; 763 chan->xfer_len = qtd->urb->length - 764 qtd->urb->actual_length; 765 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 766 (unsigned long)chan->xfer_dma, chan->xfer_len); 767 } 768 769 qtd->n_desc = 0; 770 do { 771 if (n_desc > 1) { 772 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 773 dev_vdbg(hsotg->dev, 774 "set A bit in desc %d (%p)\n", 775 n_desc - 1, 776 &qh->desc_list[n_desc - 1]); 777 dma_sync_single_for_device(hsotg->dev, 778 qh->desc_list_dma + 779 ((n_desc - 1) * 780 sizeof(struct dwc2_dma_desc)), 781 sizeof(struct dwc2_dma_desc), 782 DMA_TO_DEVICE); 783 } 784 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 785 dev_vdbg(hsotg->dev, 786 "desc %d (%p) buf=%08x status=%08x\n", 787 n_desc, &qh->desc_list[n_desc], 788 qh->desc_list[n_desc].buf, 789 qh->desc_list[n_desc].status); 790 qtd->n_desc++; 791 n_desc++; 792 } while (chan->xfer_len > 0 && 793 n_desc != MAX_DMA_DESC_NUM_GENERIC); 794 795 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 796 qtd->in_process = 1; 797 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 798 break; 799 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 800 break; 801 } 802 803 if (n_desc) { 804 qh->desc_list[n_desc - 1].status |= 805 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 806 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 807 n_desc - 1, &qh->desc_list[n_desc - 1]); 808 dma_sync_single_for_device(hsotg->dev, 809 qh->desc_list_dma + (n_desc - 1) * 810 sizeof(struct dwc2_dma_desc), 811 sizeof(struct dwc2_dma_desc), 812 DMA_TO_DEVICE); 813 if (n_desc > 1) { 814 qh->desc_list[0].status |= HOST_DMA_A; 815 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 816 &qh->desc_list[0]); 817 dma_sync_single_for_device(hsotg->dev, 818 qh->desc_list_dma, 819 sizeof(struct dwc2_dma_desc), 820 DMA_TO_DEVICE); 821 } 822 chan->ntd = n_desc; 823 } 824 } 825 826 /** 827 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 828 * 829 * @hsotg: The HCD state structure for the DWC OTG controller 830 * @qh: The QH to init 831 * 832 * Return: 0 if successful, negative error code otherwise 833 * 834 * For Control and Bulk endpoints, initializes descriptor list and starts the 835 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 836 * list then updates FrameList, marking appropriate entries as active. 837 * 838 * For Isochronous endpoints the starting descriptor index is calculated based 839 * on the scheduled frame, but only on the first transfer descriptor within a 840 * session. Then the transfer is started via enabling the channel. 841 * 842 * For Isochronous endpoints the channel is not halted on XferComplete 843 * interrupt so remains assigned to the endpoint(QH) until session is done. 844 */ 845 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 846 { 847 /* Channel is already assigned */ 848 struct dwc2_host_chan *chan = qh->channel; 849 u16 skip_frames = 0; 850 851 switch (chan->ep_type) { 852 case USB_ENDPOINT_XFER_CONTROL: 853 case USB_ENDPOINT_XFER_BULK: 854 dwc2_init_non_isoc_dma_desc(hsotg, qh); 855 dwc2_hc_start_transfer_ddma(hsotg, chan); 856 break; 857 case USB_ENDPOINT_XFER_INT: 858 dwc2_init_non_isoc_dma_desc(hsotg, qh); 859 dwc2_update_frame_list(hsotg, qh, 1); 860 dwc2_hc_start_transfer_ddma(hsotg, chan); 861 break; 862 case USB_ENDPOINT_XFER_ISOC: 863 if (!qh->ntd) 864 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 865 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 866 867 if (!chan->xfer_started) { 868 dwc2_update_frame_list(hsotg, qh, 1); 869 870 /* 871 * Always set to max, instead of actual size. Otherwise 872 * ntd will be changed with channel being enabled. Not 873 * recommended. 874 */ 875 chan->ntd = dwc2_max_desc_num(qh); 876 877 /* Enable channel only once for ISOC */ 878 dwc2_hc_start_transfer_ddma(hsotg, chan); 879 } 880 881 break; 882 default: 883 break; 884 } 885 } 886 887 #define DWC2_CMPL_DONE 1 888 #define DWC2_CMPL_STOP 2 889 890 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 891 struct dwc2_host_chan *chan, 892 struct dwc2_qtd *qtd, 893 struct dwc2_qh *qh, u16 idx) 894 { 895 struct dwc2_dma_desc *dma_desc; 896 struct dwc2_hcd_iso_packet_desc *frame_desc; 897 u16 remain = 0; 898 int rc = 0; 899 900 if (!qtd->urb) 901 return -EINVAL; 902 903 dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * 904 sizeof(struct dwc2_dma_desc)), 905 sizeof(struct dwc2_dma_desc), 906 DMA_FROM_DEVICE); 907 908 dma_desc = &qh->desc_list[idx]; 909 910 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 911 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); 912 if (chan->ep_is_in) 913 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 914 HOST_DMA_ISOC_NBYTES_SHIFT; 915 916 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 917 /* 918 * XactError, or unable to complete all the transactions 919 * in the scheduled micro-frame/frame, both indicated by 920 * HOST_DMA_STS_PKTERR 921 */ 922 qtd->urb->error_count++; 923 frame_desc->actual_length = qh->n_bytes[idx] - remain; 924 frame_desc->status = -EPROTO; 925 } else { 926 /* Success */ 927 frame_desc->actual_length = qh->n_bytes[idx] - remain; 928 frame_desc->status = 0; 929 } 930 931 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 932 /* 933 * urb->status is not used for isoc transfers here. The 934 * individual frame_desc status are used instead. 935 */ 936 dwc2_host_complete(hsotg, qtd, 0); 937 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 938 939 /* 940 * This check is necessary because urb_dequeue can be called 941 * from urb complete callback (sound driver for example). All 942 * pending URBs are dequeued there, so no need for further 943 * processing. 944 */ 945 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 946 return -1; 947 rc = DWC2_CMPL_DONE; 948 } 949 950 qh->ntd--; 951 952 /* Stop if IOC requested descriptor reached */ 953 if (dma_desc->status & HOST_DMA_IOC) 954 rc = DWC2_CMPL_STOP; 955 956 return rc; 957 } 958 959 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 960 struct dwc2_host_chan *chan, 961 enum dwc2_halt_status halt_status) 962 { 963 struct dwc2_hcd_iso_packet_desc *frame_desc; 964 struct dwc2_qtd *qtd, *qtd_tmp; 965 struct dwc2_qh *qh; 966 u16 idx; 967 int rc; 968 969 qh = chan->qh; 970 idx = qh->td_first; 971 972 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 973 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 974 qtd->in_process = 0; 975 return; 976 } 977 978 if (halt_status == DWC2_HC_XFER_AHB_ERR || 979 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 980 /* 981 * Channel is halted in these error cases, considered as serious 982 * issues. 983 * Complete all URBs marking all frames as failed, irrespective 984 * whether some of the descriptors (frames) succeeded or not. 985 * Pass error code to completion routine as well, to update 986 * urb->status, some of class drivers might use it to stop 987 * queing transfer requests. 988 */ 989 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 990 -EIO : -EOVERFLOW; 991 992 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 993 qtd_list_entry) { 994 if (qtd->urb) { 995 for (idx = 0; idx < qtd->urb->packet_count; 996 idx++) { 997 frame_desc = &qtd->urb->iso_descs[idx]; 998 frame_desc->status = err; 999 } 1000 1001 dwc2_host_complete(hsotg, qtd, err); 1002 } 1003 1004 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1005 } 1006 1007 return; 1008 } 1009 1010 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { 1011 if (!qtd->in_process) 1012 break; 1013 1014 /* 1015 * Ensure idx corresponds to descriptor where first urb of this 1016 * qtd was added. In fact, during isoc desc init, dwc2 may skip 1017 * an index if current frame number is already over this index. 1018 */ 1019 if (idx != qtd->isoc_td_first) { 1020 dev_vdbg(hsotg->dev, 1021 "try to complete %d instead of %d\n", 1022 idx, qtd->isoc_td_first); 1023 idx = qtd->isoc_td_first; 1024 } 1025 1026 do { 1027 struct dwc2_qtd *qtd_next; 1028 u16 cur_idx; 1029 1030 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 1031 idx); 1032 if (rc < 0) 1033 return; 1034 idx = dwc2_desclist_idx_inc(idx, qh->host_interval, 1035 chan->speed); 1036 if (!rc) 1037 continue; 1038 1039 if (rc == DWC2_CMPL_DONE) 1040 break; 1041 1042 /* rc == DWC2_CMPL_STOP */ 1043 1044 if (qh->host_interval >= 32) 1045 goto stop_scan; 1046 1047 qh->td_first = idx; 1048 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 1049 qtd_next = list_first_entry(&qh->qtd_list, 1050 struct dwc2_qtd, 1051 qtd_list_entry); 1052 if (dwc2_frame_idx_num_gt(cur_idx, 1053 qtd_next->isoc_td_last)) 1054 break; 1055 1056 goto stop_scan; 1057 1058 } while (idx != qh->td_first); 1059 } 1060 1061 stop_scan: 1062 qh->td_first = idx; 1063 } 1064 1065 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 1066 struct dwc2_host_chan *chan, 1067 struct dwc2_qtd *qtd, 1068 struct dwc2_dma_desc *dma_desc, 1069 enum dwc2_halt_status halt_status, 1070 u32 n_bytes, int *xfer_done) 1071 { 1072 struct dwc2_hcd_urb *urb = qtd->urb; 1073 u16 remain = 0; 1074 1075 if (chan->ep_is_in) 1076 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 1077 HOST_DMA_NBYTES_SHIFT; 1078 1079 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 1080 1081 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 1082 dev_err(hsotg->dev, "EIO\n"); 1083 urb->status = -EIO; 1084 return 1; 1085 } 1086 1087 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 1088 switch (halt_status) { 1089 case DWC2_HC_XFER_STALL: 1090 dev_vdbg(hsotg->dev, "Stall\n"); 1091 urb->status = -EPIPE; 1092 break; 1093 case DWC2_HC_XFER_BABBLE_ERR: 1094 dev_err(hsotg->dev, "Babble\n"); 1095 urb->status = -EOVERFLOW; 1096 break; 1097 case DWC2_HC_XFER_XACT_ERR: 1098 dev_err(hsotg->dev, "XactErr\n"); 1099 urb->status = -EPROTO; 1100 break; 1101 default: 1102 dev_err(hsotg->dev, 1103 "%s: Unhandled descriptor error status (%d)\n", 1104 __func__, halt_status); 1105 break; 1106 } 1107 return 1; 1108 } 1109 1110 if (dma_desc->status & HOST_DMA_A) { 1111 dev_vdbg(hsotg->dev, 1112 "Active descriptor encountered on channel %d\n", 1113 chan->hc_num); 1114 return 0; 1115 } 1116 1117 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1118 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1119 urb->actual_length += n_bytes - remain; 1120 if (remain || urb->actual_length >= urb->length) { 1121 /* 1122 * For Control Data stage do not set urb->status 1123 * to 0, to prevent URB callback. Set it when 1124 * Status phase is done. See below. 1125 */ 1126 *xfer_done = 1; 1127 } 1128 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1129 urb->status = 0; 1130 *xfer_done = 1; 1131 } 1132 /* No handling for SETUP stage */ 1133 } else { 1134 /* BULK and INTR */ 1135 urb->actual_length += n_bytes - remain; 1136 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1137 urb->actual_length); 1138 if (remain || urb->actual_length >= urb->length) { 1139 urb->status = 0; 1140 *xfer_done = 1; 1141 } 1142 } 1143 1144 return 0; 1145 } 1146 1147 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1148 struct dwc2_host_chan *chan, 1149 int chnum, struct dwc2_qtd *qtd, 1150 int desc_num, 1151 enum dwc2_halt_status halt_status, 1152 int *xfer_done) 1153 { 1154 struct dwc2_qh *qh = chan->qh; 1155 struct dwc2_hcd_urb *urb = qtd->urb; 1156 struct dwc2_dma_desc *dma_desc; 1157 u32 n_bytes; 1158 int failed; 1159 1160 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1161 1162 if (!urb) 1163 return -EINVAL; 1164 1165 dma_sync_single_for_cpu(hsotg->dev, 1166 qh->desc_list_dma + (desc_num * 1167 sizeof(struct dwc2_dma_desc)), 1168 sizeof(struct dwc2_dma_desc), 1169 DMA_FROM_DEVICE); 1170 1171 dma_desc = &qh->desc_list[desc_num]; 1172 n_bytes = qh->n_bytes[desc_num]; 1173 dev_vdbg(hsotg->dev, 1174 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1175 qtd, urb, desc_num, dma_desc, n_bytes); 1176 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1177 halt_status, n_bytes, 1178 xfer_done); 1179 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { 1180 dwc2_host_complete(hsotg, qtd, urb->status); 1181 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1182 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n", 1183 failed, *xfer_done); 1184 return failed; 1185 } 1186 1187 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1188 switch (qtd->control_phase) { 1189 case DWC2_CONTROL_SETUP: 1190 if (urb->length > 0) 1191 qtd->control_phase = DWC2_CONTROL_DATA; 1192 else 1193 qtd->control_phase = DWC2_CONTROL_STATUS; 1194 dev_vdbg(hsotg->dev, 1195 " Control setup transaction done\n"); 1196 break; 1197 case DWC2_CONTROL_DATA: 1198 if (*xfer_done) { 1199 qtd->control_phase = DWC2_CONTROL_STATUS; 1200 dev_vdbg(hsotg->dev, 1201 " Control data transfer done\n"); 1202 } else if (desc_num + 1 == qtd->n_desc) { 1203 /* 1204 * Last descriptor for Control data stage which 1205 * is not completed yet 1206 */ 1207 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1208 qtd); 1209 } 1210 break; 1211 default: 1212 break; 1213 } 1214 } 1215 1216 return 0; 1217 } 1218 1219 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1220 struct dwc2_host_chan *chan, 1221 int chnum, 1222 enum dwc2_halt_status halt_status) 1223 { 1224 struct list_head *qtd_item, *qtd_tmp; 1225 struct dwc2_qh *qh = chan->qh; 1226 struct dwc2_qtd *qtd = NULL; 1227 int xfer_done; 1228 int desc_num = 0; 1229 1230 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1231 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 1232 qtd->in_process = 0; 1233 return; 1234 } 1235 1236 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1237 int i; 1238 int qtd_desc_count; 1239 1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1241 xfer_done = 0; 1242 qtd_desc_count = qtd->n_desc; 1243 1244 for (i = 0; i < qtd_desc_count; i++) { 1245 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1246 desc_num, halt_status, 1247 &xfer_done)) { 1248 qtd = NULL; 1249 goto stop_scan; 1250 } 1251 1252 desc_num++; 1253 } 1254 } 1255 1256 stop_scan: 1257 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1258 /* 1259 * Resetting the data toggle for bulk and interrupt endpoints 1260 * in case of stall. See handle_hc_stall_intr(). 1261 */ 1262 if (halt_status == DWC2_HC_XFER_STALL) 1263 qh->data_toggle = DWC2_HC_PID_DATA0; 1264 else 1265 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL); 1266 } 1267 1268 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1269 if (chan->hcint & HCINTMSK_NYET) { 1270 /* 1271 * Got a NYET on the last transaction of the transfer. 1272 * It means that the endpoint should be in the PING 1273 * state at the beginning of the next transfer. 1274 */ 1275 qh->ping_state = 1; 1276 } 1277 } 1278 } 1279 1280 /** 1281 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1282 * status and calls completion routine for the URB if it's done. Called from 1283 * interrupt handlers. 1284 * 1285 * @hsotg: The HCD state structure for the DWC OTG controller 1286 * @chan: Host channel the transfer is completed on 1287 * @chnum: Index of Host channel registers 1288 * @halt_status: Reason the channel is being halted or just XferComplete 1289 * for isochronous transfers 1290 * 1291 * Releases the channel to be used by other transfers. 1292 * In case of Isochronous endpoint the channel is not halted until the end of 1293 * the session, i.e. QTD list is empty. 1294 * If periodic channel released the FrameList is updated accordingly. 1295 * Calls transaction selection routines to activate pending transfers. 1296 */ 1297 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1298 struct dwc2_host_chan *chan, int chnum, 1299 enum dwc2_halt_status halt_status) 1300 { 1301 struct dwc2_qh *qh = chan->qh; 1302 int continue_isoc_xfer = 0; 1303 enum dwc2_transaction_type tr_type; 1304 1305 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1306 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1307 1308 /* Release the channel if halted or session completed */ 1309 if (halt_status != DWC2_HC_XFER_COMPLETE || 1310 list_empty(&qh->qtd_list)) { 1311 struct dwc2_qtd *qtd, *qtd_tmp; 1312 1313 /* 1314 * Kill all remainings QTDs since channel has been 1315 * halted. 1316 */ 1317 list_for_each_entry_safe(qtd, qtd_tmp, 1318 &qh->qtd_list, 1319 qtd_list_entry) { 1320 dwc2_host_complete(hsotg, qtd, 1321 -ECONNRESET); 1322 dwc2_hcd_qtd_unlink_and_free(hsotg, 1323 qtd, qh); 1324 } 1325 1326 /* Halt the channel if session completed */ 1327 if (halt_status == DWC2_HC_XFER_COMPLETE) 1328 dwc2_hc_halt(hsotg, chan, halt_status); 1329 dwc2_release_channel_ddma(hsotg, qh); 1330 dwc2_hcd_qh_unlink(hsotg, qh); 1331 } else { 1332 /* Keep in assigned schedule to continue transfer */ 1333 list_move_tail(&qh->qh_list_entry, 1334 &hsotg->periodic_sched_assigned); 1335 /* 1336 * If channel has been halted during giveback of urb 1337 * then prevent any new scheduling. 1338 */ 1339 if (!chan->halt_status) 1340 continue_isoc_xfer = 1; 1341 } 1342 /* 1343 * Todo: Consider the case when period exceeds FrameList size. 1344 * Frame Rollover interrupt should be used. 1345 */ 1346 } else { 1347 /* 1348 * Scan descriptor list to complete the URB(s), then release 1349 * the channel 1350 */ 1351 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1352 halt_status); 1353 dwc2_release_channel_ddma(hsotg, qh); 1354 dwc2_hcd_qh_unlink(hsotg, qh); 1355 1356 if (!list_empty(&qh->qtd_list)) { 1357 /* 1358 * Add back to inactive non-periodic schedule on normal 1359 * completion 1360 */ 1361 dwc2_hcd_qh_add(hsotg, qh); 1362 } 1363 } 1364 1365 tr_type = dwc2_hcd_select_transactions(hsotg); 1366 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1367 if (continue_isoc_xfer) { 1368 if (tr_type == DWC2_TRANSACTION_NONE) 1369 tr_type = DWC2_TRANSACTION_PERIODIC; 1370 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1371 tr_type = DWC2_TRANSACTION_ALL; 1372 } 1373 dwc2_hcd_queue_transactions(hsotg, tr_type); 1374 } 1375 } 1376