1 /* 2 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * This file contains the interrupt handlers for Host mode 39 */ 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/spinlock.h> 43 #include <linux/interrupt.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/io.h> 46 #include <linux/slab.h> 47 #include <linux/usb.h> 48 49 #include <linux/usb/hcd.h> 50 #include <linux/usb/ch11.h> 51 52 #include "core.h" 53 #include "hcd.h" 54 55 /* This function is for debug only */ 56 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) 57 { 58 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 59 u16 curr_frame_number = hsotg->frame_number; 60 61 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { 62 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) != 63 curr_frame_number) { 64 hsotg->frame_num_array[hsotg->frame_num_idx] = 65 curr_frame_number; 66 hsotg->last_frame_num_array[hsotg->frame_num_idx] = 67 hsotg->last_frame_num; 68 hsotg->frame_num_idx++; 69 } 70 } else if (!hsotg->dumped_frame_num_array) { 71 int i; 72 73 dev_info(hsotg->dev, "Frame Last Frame\n"); 74 dev_info(hsotg->dev, "----- ----------\n"); 75 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { 76 dev_info(hsotg->dev, "0x%04x 0x%04x\n", 77 hsotg->frame_num_array[i], 78 hsotg->last_frame_num_array[i]); 79 } 80 hsotg->dumped_frame_num_array = 1; 81 } 82 hsotg->last_frame_num = curr_frame_number; 83 #endif 84 } 85 86 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, 87 struct dwc2_host_chan *chan, 88 struct dwc2_qtd *qtd) 89 { 90 struct urb *usb_urb; 91 92 if (!chan->qh) 93 return; 94 95 if (chan->qh->dev_speed == USB_SPEED_HIGH) 96 return; 97 98 if (!qtd->urb) 99 return; 100 101 usb_urb = qtd->urb->priv; 102 if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt) 103 return; 104 105 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) { 106 chan->qh->tt_buffer_dirty = 1; 107 if (usb_hub_clear_tt_buffer(usb_urb)) 108 /* Clear failed; let's hope things work anyway */ 109 chan->qh->tt_buffer_dirty = 0; 110 } 111 } 112 113 /* 114 * Handles the start-of-frame interrupt in host mode. Non-periodic 115 * transactions may be queued to the DWC_otg controller for the current 116 * (micro)frame. Periodic transactions may be queued to the controller 117 * for the next (micro)frame. 118 */ 119 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) 120 { 121 struct list_head *qh_entry; 122 struct dwc2_qh *qh; 123 enum dwc2_transaction_type tr_type; 124 125 /* Clear interrupt */ 126 dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS); 127 128 #ifdef DEBUG_SOF 129 dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n"); 130 #endif 131 132 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 133 134 dwc2_track_missed_sofs(hsotg); 135 136 /* Determine whether any periodic QHs should be executed */ 137 qh_entry = hsotg->periodic_sched_inactive.next; 138 while (qh_entry != &hsotg->periodic_sched_inactive) { 139 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry); 140 qh_entry = qh_entry->next; 141 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) 142 /* 143 * Move QH to the ready list to be executed next 144 * (micro)frame 145 */ 146 list_move(&qh->qh_list_entry, 147 &hsotg->periodic_sched_ready); 148 } 149 tr_type = dwc2_hcd_select_transactions(hsotg); 150 if (tr_type != DWC2_TRANSACTION_NONE) 151 dwc2_hcd_queue_transactions(hsotg, tr_type); 152 } 153 154 /* 155 * Handles the Rx FIFO Level Interrupt, which indicates that there is 156 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to 157 * memory if the DWC_otg controller is operating in Slave mode. 158 */ 159 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg) 160 { 161 u32 grxsts, chnum, bcnt, dpid, pktsts; 162 struct dwc2_host_chan *chan; 163 164 if (dbg_perio()) 165 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n"); 166 167 grxsts = dwc2_readl(hsotg->regs + GRXSTSP); 168 chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT; 169 chan = hsotg->hc_ptr_array[chnum]; 170 if (!chan) { 171 dev_err(hsotg->dev, "Unable to get corresponding channel\n"); 172 return; 173 } 174 175 bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT; 176 dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT; 177 pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT; 178 179 /* Packet Status */ 180 if (dbg_perio()) { 181 dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum); 182 dev_vdbg(hsotg->dev, " Count = %d\n", bcnt); 183 dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid, 184 chan->data_pid_start); 185 dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts); 186 } 187 188 switch (pktsts) { 189 case GRXSTS_PKTSTS_HCHIN: 190 /* Read the data into the host buffer */ 191 if (bcnt > 0) { 192 dwc2_read_packet(hsotg, chan->xfer_buf, bcnt); 193 194 /* Update the HC fields for the next packet received */ 195 chan->xfer_count += bcnt; 196 chan->xfer_buf += bcnt; 197 } 198 break; 199 case GRXSTS_PKTSTS_HCHIN_XFER_COMP: 200 case GRXSTS_PKTSTS_DATATOGGLEERR: 201 case GRXSTS_PKTSTS_HCHHALTED: 202 /* Handled in interrupt, just ignore data */ 203 break; 204 default: 205 dev_err(hsotg->dev, 206 "RxFIFO Level Interrupt: Unknown status %d\n", pktsts); 207 break; 208 } 209 } 210 211 /* 212 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More 213 * data packets may be written to the FIFO for OUT transfers. More requests 214 * may be written to the non-periodic request queue for IN transfers. This 215 * interrupt is enabled only in Slave mode. 216 */ 217 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) 218 { 219 dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n"); 220 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC); 221 } 222 223 /* 224 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data 225 * packets may be written to the FIFO for OUT transfers. More requests may be 226 * written to the periodic request queue for IN transfers. This interrupt is 227 * enabled only in Slave mode. 228 */ 229 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) 230 { 231 if (dbg_perio()) 232 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n"); 233 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC); 234 } 235 236 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, 237 u32 *hprt0_modify) 238 { 239 struct dwc2_core_params *params = hsotg->core_params; 240 int do_reset = 0; 241 u32 usbcfg; 242 u32 prtspd; 243 u32 hcfg; 244 u32 fslspclksel; 245 u32 hfir; 246 247 dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 248 249 /* Every time when port enables calculate HFIR.FrInterval */ 250 hfir = dwc2_readl(hsotg->regs + HFIR); 251 hfir &= ~HFIR_FRINT_MASK; 252 hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT & 253 HFIR_FRINT_MASK; 254 dwc2_writel(hfir, hsotg->regs + HFIR); 255 256 /* Check if we need to adjust the PHY clock speed for low power */ 257 if (!params->host_support_fs_ls_low_power) { 258 /* Port has been enabled, set the reset change flag */ 259 hsotg->flags.b.port_reset_change = 1; 260 return; 261 } 262 263 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 264 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 265 266 if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) { 267 /* Low power */ 268 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) { 269 /* Set PHY low power clock select for FS/LS devices */ 270 usbcfg |= GUSBCFG_PHY_LP_CLK_SEL; 271 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 272 do_reset = 1; 273 } 274 275 hcfg = dwc2_readl(hsotg->regs + HCFG); 276 fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >> 277 HCFG_FSLSPCLKSEL_SHIFT; 278 279 if (prtspd == HPRT0_SPD_LOW_SPEED && 280 params->host_ls_low_power_phy_clk == 281 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { 282 /* 6 MHZ */ 283 dev_vdbg(hsotg->dev, 284 "FS_PHY programming HCFG to 6 MHz\n"); 285 if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) { 286 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ; 287 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 288 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT; 289 dwc2_writel(hcfg, hsotg->regs + HCFG); 290 do_reset = 1; 291 } 292 } else { 293 /* 48 MHZ */ 294 dev_vdbg(hsotg->dev, 295 "FS_PHY programming HCFG to 48 MHz\n"); 296 if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) { 297 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ; 298 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 299 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT; 300 dwc2_writel(hcfg, hsotg->regs + HCFG); 301 do_reset = 1; 302 } 303 } 304 } else { 305 /* Not low power */ 306 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) { 307 usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL; 308 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 309 do_reset = 1; 310 } 311 } 312 313 if (do_reset) { 314 *hprt0_modify |= HPRT0_RST; 315 dwc2_writel(*hprt0_modify, hsotg->regs + HPRT0); 316 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work, 317 msecs_to_jiffies(60)); 318 } else { 319 /* Port has been enabled, set the reset change flag */ 320 hsotg->flags.b.port_reset_change = 1; 321 } 322 } 323 324 /* 325 * There are multiple conditions that can cause a port interrupt. This function 326 * determines which interrupt conditions have occurred and handles them 327 * appropriately. 328 */ 329 static void dwc2_port_intr(struct dwc2_hsotg *hsotg) 330 { 331 u32 hprt0; 332 u32 hprt0_modify; 333 334 dev_vdbg(hsotg->dev, "--Port Interrupt--\n"); 335 336 hprt0 = dwc2_readl(hsotg->regs + HPRT0); 337 hprt0_modify = hprt0; 338 339 /* 340 * Clear appropriate bits in HPRT0 to clear the interrupt bit in 341 * GINTSTS 342 */ 343 hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | 344 HPRT0_OVRCURRCHG); 345 346 /* 347 * Port Connect Detected 348 * Set flag and clear if detected 349 */ 350 if (hprt0 & HPRT0_CONNDET) { 351 dwc2_writel(hprt0_modify | HPRT0_CONNDET, hsotg->regs + HPRT0); 352 353 dev_vdbg(hsotg->dev, 354 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n", 355 hprt0); 356 dwc2_hcd_connect(hsotg); 357 358 /* 359 * The Hub driver asserts a reset when it sees port connect 360 * status change flag 361 */ 362 } 363 364 /* 365 * Port Enable Changed 366 * Clear if detected - Set internal flag if disabled 367 */ 368 if (hprt0 & HPRT0_ENACHG) { 369 dwc2_writel(hprt0_modify | HPRT0_ENACHG, hsotg->regs + HPRT0); 370 dev_vdbg(hsotg->dev, 371 " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n", 372 hprt0, !!(hprt0 & HPRT0_ENA)); 373 if (hprt0 & HPRT0_ENA) { 374 hsotg->new_connection = true; 375 dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); 376 } else { 377 hsotg->flags.b.port_enable_change = 1; 378 if (hsotg->core_params->dma_desc_fs_enable) { 379 u32 hcfg; 380 381 hsotg->core_params->dma_desc_enable = 0; 382 hsotg->new_connection = false; 383 hcfg = dwc2_readl(hsotg->regs + HCFG); 384 hcfg &= ~HCFG_DESCDMA; 385 dwc2_writel(hcfg, hsotg->regs + HCFG); 386 } 387 } 388 } 389 390 /* Overcurrent Change Interrupt */ 391 if (hprt0 & HPRT0_OVRCURRCHG) { 392 dwc2_writel(hprt0_modify | HPRT0_OVRCURRCHG, 393 hsotg->regs + HPRT0); 394 dev_vdbg(hsotg->dev, 395 " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n", 396 hprt0); 397 hsotg->flags.b.port_over_current_change = 1; 398 } 399 } 400 401 /* 402 * Gets the actual length of a transfer after the transfer halts. halt_status 403 * holds the reason for the halt. 404 * 405 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read 406 * is set to 1 upon return if less than the requested number of bytes were 407 * transferred. short_read may also be NULL on entry, in which case it remains 408 * unchanged. 409 */ 410 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg, 411 struct dwc2_host_chan *chan, int chnum, 412 struct dwc2_qtd *qtd, 413 enum dwc2_halt_status halt_status, 414 int *short_read) 415 { 416 u32 hctsiz, count, length; 417 418 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 419 420 if (halt_status == DWC2_HC_XFER_COMPLETE) { 421 if (chan->ep_is_in) { 422 count = (hctsiz & TSIZ_XFERSIZE_MASK) >> 423 TSIZ_XFERSIZE_SHIFT; 424 length = chan->xfer_len - count; 425 if (short_read != NULL) 426 *short_read = (count != 0); 427 } else if (chan->qh->do_split) { 428 length = qtd->ssplit_out_xfer_count; 429 } else { 430 length = chan->xfer_len; 431 } 432 } else { 433 /* 434 * Must use the hctsiz.pktcnt field to determine how much data 435 * has been transferred. This field reflects the number of 436 * packets that have been transferred via the USB. This is 437 * always an integral number of packets if the transfer was 438 * halted before its normal completion. (Can't use the 439 * hctsiz.xfersize field because that reflects the number of 440 * bytes transferred via the AHB, not the USB). 441 */ 442 count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT; 443 length = (chan->start_pkt_count - count) * chan->max_packet; 444 } 445 446 return length; 447 } 448 449 /** 450 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer 451 * Complete interrupt on the host channel. Updates the actual_length field 452 * of the URB based on the number of bytes transferred via the host channel. 453 * Sets the URB status if the data transfer is finished. 454 * 455 * Return: 1 if the data transfer specified by the URB is completely finished, 456 * 0 otherwise 457 */ 458 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, 459 struct dwc2_host_chan *chan, int chnum, 460 struct dwc2_hcd_urb *urb, 461 struct dwc2_qtd *qtd) 462 { 463 u32 hctsiz; 464 int xfer_done = 0; 465 int short_read = 0; 466 int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, 467 DWC2_HC_XFER_COMPLETE, 468 &short_read); 469 470 if (urb->actual_length + xfer_length > urb->length) { 471 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); 472 xfer_length = urb->length - urb->actual_length; 473 } 474 475 /* Non DWORD-aligned buffer case handling */ 476 if (chan->align_buf && xfer_length) { 477 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 478 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, 479 chan->qh->dw_align_buf_size, 480 chan->ep_is_in ? 481 DMA_FROM_DEVICE : DMA_TO_DEVICE); 482 if (chan->ep_is_in) 483 memcpy(urb->buf + urb->actual_length, 484 chan->qh->dw_align_buf, xfer_length); 485 } 486 487 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n", 488 urb->actual_length, xfer_length); 489 urb->actual_length += xfer_length; 490 491 if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK && 492 (urb->flags & URB_SEND_ZERO_PACKET) && 493 urb->actual_length >= urb->length && 494 !(urb->length % chan->max_packet)) { 495 xfer_done = 0; 496 } else if (short_read || urb->actual_length >= urb->length) { 497 xfer_done = 1; 498 urb->status = 0; 499 } 500 501 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 502 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n", 503 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum); 504 dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len); 505 dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n", 506 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT); 507 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length); 508 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length); 509 dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read, 510 xfer_done); 511 512 return xfer_done; 513 } 514 515 /* 516 * Save the starting data toggle for the next transfer. The data toggle is 517 * saved in the QH for non-control transfers and it's saved in the QTD for 518 * control transfers. 519 */ 520 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, 521 struct dwc2_host_chan *chan, int chnum, 522 struct dwc2_qtd *qtd) 523 { 524 u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 526 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 528 if (pid == TSIZ_SC_MC_PID_DATA0) 529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 530 else 531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 532 } else { 533 if (pid == TSIZ_SC_MC_PID_DATA0) 534 qtd->data_toggle = DWC2_HC_PID_DATA0; 535 else 536 qtd->data_toggle = DWC2_HC_PID_DATA1; 537 } 538 } 539 540 /** 541 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when 542 * the transfer is stopped for any reason. The fields of the current entry in 543 * the frame descriptor array are set based on the transfer state and the input 544 * halt_status. Completes the Isochronous URB if all the URB frames have been 545 * completed. 546 * 547 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be 548 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE. 549 */ 550 static enum dwc2_halt_status dwc2_update_isoc_urb_state( 551 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 552 int chnum, struct dwc2_qtd *qtd, 553 enum dwc2_halt_status halt_status) 554 { 555 struct dwc2_hcd_iso_packet_desc *frame_desc; 556 struct dwc2_hcd_urb *urb = qtd->urb; 557 558 if (!urb) 559 return DWC2_HC_XFER_NO_HALT_STATUS; 560 561 frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; 562 563 switch (halt_status) { 564 case DWC2_HC_XFER_COMPLETE: 565 frame_desc->status = 0; 566 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 567 chan, chnum, qtd, halt_status, NULL); 568 569 /* Non DWORD-aligned buffer case handling */ 570 if (chan->align_buf && frame_desc->actual_length) { 571 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", 572 __func__); 573 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, 574 chan->qh->dw_align_buf_size, 575 chan->ep_is_in ? 576 DMA_FROM_DEVICE : DMA_TO_DEVICE); 577 if (chan->ep_is_in) 578 memcpy(urb->buf + frame_desc->offset + 579 qtd->isoc_split_offset, 580 chan->qh->dw_align_buf, 581 frame_desc->actual_length); 582 } 583 break; 584 case DWC2_HC_XFER_FRAME_OVERRUN: 585 urb->error_count++; 586 if (chan->ep_is_in) 587 frame_desc->status = -ENOSR; 588 else 589 frame_desc->status = -ECOMM; 590 frame_desc->actual_length = 0; 591 break; 592 case DWC2_HC_XFER_BABBLE_ERR: 593 urb->error_count++; 594 frame_desc->status = -EOVERFLOW; 595 /* Don't need to update actual_length in this case */ 596 break; 597 case DWC2_HC_XFER_XACT_ERR: 598 urb->error_count++; 599 frame_desc->status = -EPROTO; 600 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 601 chan, chnum, qtd, halt_status, NULL); 602 603 /* Non DWORD-aligned buffer case handling */ 604 if (chan->align_buf && frame_desc->actual_length) { 605 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", 606 __func__); 607 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, 608 chan->qh->dw_align_buf_size, 609 chan->ep_is_in ? 610 DMA_FROM_DEVICE : DMA_TO_DEVICE); 611 if (chan->ep_is_in) 612 memcpy(urb->buf + frame_desc->offset + 613 qtd->isoc_split_offset, 614 chan->qh->dw_align_buf, 615 frame_desc->actual_length); 616 } 617 618 /* Skip whole frame */ 619 if (chan->qh->do_split && 620 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 621 hsotg->core_params->dma_enable > 0) { 622 qtd->complete_split = 0; 623 qtd->isoc_split_offset = 0; 624 } 625 626 break; 627 default: 628 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n", 629 halt_status); 630 break; 631 } 632 633 if (++qtd->isoc_frame_index == urb->packet_count) { 634 /* 635 * urb->status is not used for isoc transfers. The individual 636 * frame_desc statuses are used instead. 637 */ 638 dwc2_host_complete(hsotg, qtd, 0); 639 halt_status = DWC2_HC_XFER_URB_COMPLETE; 640 } else { 641 halt_status = DWC2_HC_XFER_COMPLETE; 642 } 643 644 return halt_status; 645 } 646 647 /* 648 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic 649 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are 650 * still linked to the QH, the QH is added to the end of the inactive 651 * non-periodic schedule. For periodic QHs, removes the QH from the periodic 652 * schedule if no more QTDs are linked to the QH. 653 */ 654 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 655 int free_qtd) 656 { 657 int continue_split = 0; 658 struct dwc2_qtd *qtd; 659 660 if (dbg_qh(qh)) 661 dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__, 662 hsotg, qh, free_qtd); 663 664 if (list_empty(&qh->qtd_list)) { 665 dev_dbg(hsotg->dev, "## QTD list empty ##\n"); 666 goto no_qtd; 667 } 668 669 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry); 670 671 if (qtd->complete_split) 672 continue_split = 1; 673 else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID || 674 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END) 675 continue_split = 1; 676 677 if (free_qtd) { 678 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 679 continue_split = 0; 680 } 681 682 no_qtd: 683 if (qh->channel) 684 qh->channel->align_buf = 0; 685 qh->channel = NULL; 686 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split); 687 } 688 689 /** 690 * dwc2_release_channel() - Releases a host channel for use by other transfers 691 * 692 * @hsotg: The HCD state structure 693 * @chan: The host channel to release 694 * @qtd: The QTD associated with the host channel. This QTD may be 695 * freed if the transfer is complete or an error has occurred. 696 * @halt_status: Reason the channel is being released. This status 697 * determines the actions taken by this function. 698 * 699 * Also attempts to select and queue more transactions since at least one host 700 * channel is available. 701 */ 702 static void dwc2_release_channel(struct dwc2_hsotg *hsotg, 703 struct dwc2_host_chan *chan, 704 struct dwc2_qtd *qtd, 705 enum dwc2_halt_status halt_status) 706 { 707 enum dwc2_transaction_type tr_type; 708 u32 haintmsk; 709 int free_qtd = 0; 710 711 if (dbg_hc(chan)) 712 dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n", 713 __func__, chan->hc_num, halt_status); 714 715 switch (halt_status) { 716 case DWC2_HC_XFER_URB_COMPLETE: 717 free_qtd = 1; 718 break; 719 case DWC2_HC_XFER_AHB_ERR: 720 case DWC2_HC_XFER_STALL: 721 case DWC2_HC_XFER_BABBLE_ERR: 722 free_qtd = 1; 723 break; 724 case DWC2_HC_XFER_XACT_ERR: 725 if (qtd && qtd->error_count >= 3) { 726 dev_vdbg(hsotg->dev, 727 " Complete URB with transaction error\n"); 728 free_qtd = 1; 729 dwc2_host_complete(hsotg, qtd, -EPROTO); 730 } 731 break; 732 case DWC2_HC_XFER_URB_DEQUEUE: 733 /* 734 * The QTD has already been removed and the QH has been 735 * deactivated. Don't want to do anything except release the 736 * host channel and try to queue more transfers. 737 */ 738 goto cleanup; 739 case DWC2_HC_XFER_PERIODIC_INCOMPLETE: 740 dev_vdbg(hsotg->dev, " Complete URB with I/O error\n"); 741 free_qtd = 1; 742 dwc2_host_complete(hsotg, qtd, -EIO); 743 break; 744 case DWC2_HC_XFER_NO_HALT_STATUS: 745 default: 746 break; 747 } 748 749 dwc2_deactivate_qh(hsotg, chan->qh, free_qtd); 750 751 cleanup: 752 /* 753 * Release the host channel for use by other transfers. The cleanup 754 * function clears the channel interrupt enables and conditions, so 755 * there's no need to clear the Channel Halted interrupt separately. 756 */ 757 if (!list_empty(&chan->hc_list_entry)) 758 list_del(&chan->hc_list_entry); 759 dwc2_hc_cleanup(hsotg, chan); 760 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 761 762 if (hsotg->core_params->uframe_sched > 0) { 763 hsotg->available_host_channels++; 764 } else { 765 switch (chan->ep_type) { 766 case USB_ENDPOINT_XFER_CONTROL: 767 case USB_ENDPOINT_XFER_BULK: 768 hsotg->non_periodic_channels--; 769 break; 770 default: 771 /* 772 * Don't release reservations for periodic channels 773 * here. That's done when a periodic transfer is 774 * descheduled (i.e. when the QH is removed from the 775 * periodic schedule). 776 */ 777 break; 778 } 779 } 780 781 haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); 782 haintmsk &= ~(1 << chan->hc_num); 783 dwc2_writel(haintmsk, hsotg->regs + HAINTMSK); 784 785 /* Try to queue more transfers now that there's a free channel */ 786 tr_type = dwc2_hcd_select_transactions(hsotg); 787 if (tr_type != DWC2_TRANSACTION_NONE) 788 dwc2_hcd_queue_transactions(hsotg, tr_type); 789 } 790 791 /* 792 * Halts a host channel. If the channel cannot be halted immediately because 793 * the request queue is full, this function ensures that the FIFO empty 794 * interrupt for the appropriate queue is enabled so that the halt request can 795 * be queued when there is space in the request queue. 796 * 797 * This function may also be called in DMA mode. In that case, the channel is 798 * simply released since the core always halts the channel automatically in 799 * DMA mode. 800 */ 801 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg, 802 struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, 803 enum dwc2_halt_status halt_status) 804 { 805 if (dbg_hc(chan)) 806 dev_vdbg(hsotg->dev, "%s()\n", __func__); 807 808 if (hsotg->core_params->dma_enable > 0) { 809 if (dbg_hc(chan)) 810 dev_vdbg(hsotg->dev, "DMA enabled\n"); 811 dwc2_release_channel(hsotg, chan, qtd, halt_status); 812 return; 813 } 814 815 /* Slave mode processing */ 816 dwc2_hc_halt(hsotg, chan, halt_status); 817 818 if (chan->halt_on_queue) { 819 u32 gintmsk; 820 821 dev_vdbg(hsotg->dev, "Halt on queue\n"); 822 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 823 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 824 dev_vdbg(hsotg->dev, "control/bulk\n"); 825 /* 826 * Make sure the Non-periodic Tx FIFO empty interrupt 827 * is enabled so that the non-periodic schedule will 828 * be processed 829 */ 830 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 831 gintmsk |= GINTSTS_NPTXFEMP; 832 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 833 } else { 834 dev_vdbg(hsotg->dev, "isoc/intr\n"); 835 /* 836 * Move the QH from the periodic queued schedule to 837 * the periodic assigned schedule. This allows the 838 * halt to be queued when the periodic schedule is 839 * processed. 840 */ 841 list_move(&chan->qh->qh_list_entry, 842 &hsotg->periodic_sched_assigned); 843 844 /* 845 * Make sure the Periodic Tx FIFO Empty interrupt is 846 * enabled so that the periodic schedule will be 847 * processed 848 */ 849 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 850 gintmsk |= GINTSTS_PTXFEMP; 851 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 852 } 853 } 854 } 855 856 /* 857 * Performs common cleanup for non-periodic transfers after a Transfer 858 * Complete interrupt. This function should be called after any endpoint type 859 * specific handling is finished to release the host channel. 860 */ 861 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg, 862 struct dwc2_host_chan *chan, 863 int chnum, struct dwc2_qtd *qtd, 864 enum dwc2_halt_status halt_status) 865 { 866 dev_vdbg(hsotg->dev, "%s()\n", __func__); 867 868 qtd->error_count = 0; 869 870 if (chan->hcint & HCINTMSK_NYET) { 871 /* 872 * Got a NYET on the last transaction of the transfer. This 873 * means that the endpoint should be in the PING state at the 874 * beginning of the next transfer. 875 */ 876 dev_vdbg(hsotg->dev, "got NYET\n"); 877 chan->qh->ping_state = 1; 878 } 879 880 /* 881 * Always halt and release the host channel to make it available for 882 * more transfers. There may still be more phases for a control 883 * transfer or more data packets for a bulk transfer at this point, 884 * but the host channel is still halted. A channel will be reassigned 885 * to the transfer when the non-periodic schedule is processed after 886 * the channel is released. This allows transactions to be queued 887 * properly via dwc2_hcd_queue_transactions, which also enables the 888 * Tx FIFO Empty interrupt if necessary. 889 */ 890 if (chan->ep_is_in) { 891 /* 892 * IN transfers in Slave mode require an explicit disable to 893 * halt the channel. (In DMA mode, this call simply releases 894 * the channel.) 895 */ 896 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 897 } else { 898 /* 899 * The channel is automatically disabled by the core for OUT 900 * transfers in Slave mode 901 */ 902 dwc2_release_channel(hsotg, chan, qtd, halt_status); 903 } 904 } 905 906 /* 907 * Performs common cleanup for periodic transfers after a Transfer Complete 908 * interrupt. This function should be called after any endpoint type specific 909 * handling is finished to release the host channel. 910 */ 911 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg, 912 struct dwc2_host_chan *chan, int chnum, 913 struct dwc2_qtd *qtd, 914 enum dwc2_halt_status halt_status) 915 { 916 u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 917 918 qtd->error_count = 0; 919 920 if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0) 921 /* Core halts channel in these cases */ 922 dwc2_release_channel(hsotg, chan, qtd, halt_status); 923 else 924 /* Flush any outstanding requests from the Tx queue */ 925 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 926 } 927 928 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, 929 struct dwc2_host_chan *chan, int chnum, 930 struct dwc2_qtd *qtd) 931 { 932 struct dwc2_hcd_iso_packet_desc *frame_desc; 933 u32 len; 934 935 if (!qtd->urb) 936 return 0; 937 938 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; 939 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, 940 DWC2_HC_XFER_COMPLETE, NULL); 941 if (!len) { 942 qtd->complete_split = 0; 943 qtd->isoc_split_offset = 0; 944 return 0; 945 } 946 947 frame_desc->actual_length += len; 948 949 if (chan->align_buf) { 950 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 951 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, 952 chan->qh->dw_align_buf_size, DMA_FROM_DEVICE); 953 memcpy(qtd->urb->buf + frame_desc->offset + 954 qtd->isoc_split_offset, chan->qh->dw_align_buf, len); 955 } 956 957 qtd->isoc_split_offset += len; 958 959 if (frame_desc->actual_length >= frame_desc->length) { 960 frame_desc->status = 0; 961 qtd->isoc_frame_index++; 962 qtd->complete_split = 0; 963 qtd->isoc_split_offset = 0; 964 } 965 966 if (qtd->isoc_frame_index == qtd->urb->packet_count) { 967 dwc2_host_complete(hsotg, qtd, 0); 968 dwc2_release_channel(hsotg, chan, qtd, 969 DWC2_HC_XFER_URB_COMPLETE); 970 } else { 971 dwc2_release_channel(hsotg, chan, qtd, 972 DWC2_HC_XFER_NO_HALT_STATUS); 973 } 974 975 return 1; /* Indicates that channel released */ 976 } 977 978 /* 979 * Handles a host channel Transfer Complete interrupt. This handler may be 980 * called in either DMA mode or Slave mode. 981 */ 982 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, 983 struct dwc2_host_chan *chan, int chnum, 984 struct dwc2_qtd *qtd) 985 { 986 struct dwc2_hcd_urb *urb = qtd->urb; 987 enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE; 988 int pipe_type; 989 int urb_xfer_done; 990 991 if (dbg_hc(chan)) 992 dev_vdbg(hsotg->dev, 993 "--Host Channel %d Interrupt: Transfer Complete--\n", 994 chnum); 995 996 if (!urb) 997 goto handle_xfercomp_done; 998 999 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 1000 1001 if (hsotg->core_params->dma_desc_enable > 0) { 1002 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status); 1003 if (pipe_type == USB_ENDPOINT_XFER_ISOC) 1004 /* Do not disable the interrupt, just clear it */ 1005 return; 1006 goto handle_xfercomp_done; 1007 } 1008 1009 /* Handle xfer complete on CSPLIT */ 1010 if (chan->qh->do_split) { 1011 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 1012 hsotg->core_params->dma_enable > 0) { 1013 if (qtd->complete_split && 1014 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum, 1015 qtd)) 1016 goto handle_xfercomp_done; 1017 } else { 1018 qtd->complete_split = 0; 1019 } 1020 } 1021 1022 /* Update the QTD and URB states */ 1023 switch (pipe_type) { 1024 case USB_ENDPOINT_XFER_CONTROL: 1025 switch (qtd->control_phase) { 1026 case DWC2_CONTROL_SETUP: 1027 if (urb->length > 0) 1028 qtd->control_phase = DWC2_CONTROL_DATA; 1029 else 1030 qtd->control_phase = DWC2_CONTROL_STATUS; 1031 dev_vdbg(hsotg->dev, 1032 " Control setup transaction done\n"); 1033 halt_status = DWC2_HC_XFER_COMPLETE; 1034 break; 1035 case DWC2_CONTROL_DATA: 1036 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, 1037 chnum, urb, qtd); 1038 if (urb_xfer_done) { 1039 qtd->control_phase = DWC2_CONTROL_STATUS; 1040 dev_vdbg(hsotg->dev, 1041 " Control data transfer done\n"); 1042 } else { 1043 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1044 qtd); 1045 } 1046 halt_status = DWC2_HC_XFER_COMPLETE; 1047 break; 1048 case DWC2_CONTROL_STATUS: 1049 dev_vdbg(hsotg->dev, " Control transfer complete\n"); 1050 if (urb->status == -EINPROGRESS) 1051 urb->status = 0; 1052 dwc2_host_complete(hsotg, qtd, urb->status); 1053 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1054 break; 1055 } 1056 1057 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd, 1058 halt_status); 1059 break; 1060 case USB_ENDPOINT_XFER_BULK: 1061 dev_vdbg(hsotg->dev, " Bulk transfer complete\n"); 1062 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb, 1063 qtd); 1064 if (urb_xfer_done) { 1065 dwc2_host_complete(hsotg, qtd, urb->status); 1066 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1067 } else { 1068 halt_status = DWC2_HC_XFER_COMPLETE; 1069 } 1070 1071 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1072 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd, 1073 halt_status); 1074 break; 1075 case USB_ENDPOINT_XFER_INT: 1076 dev_vdbg(hsotg->dev, " Interrupt transfer complete\n"); 1077 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb, 1078 qtd); 1079 1080 /* 1081 * Interrupt URB is done on the first transfer complete 1082 * interrupt 1083 */ 1084 if (urb_xfer_done) { 1085 dwc2_host_complete(hsotg, qtd, urb->status); 1086 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1087 } else { 1088 halt_status = DWC2_HC_XFER_COMPLETE; 1089 } 1090 1091 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1092 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd, 1093 halt_status); 1094 break; 1095 case USB_ENDPOINT_XFER_ISOC: 1096 if (dbg_perio()) 1097 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n"); 1098 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL) 1099 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1100 chnum, qtd, DWC2_HC_XFER_COMPLETE); 1101 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd, 1102 halt_status); 1103 break; 1104 } 1105 1106 handle_xfercomp_done: 1107 disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL); 1108 } 1109 1110 /* 1111 * Handles a host channel STALL interrupt. This handler may be called in 1112 * either DMA mode or Slave mode. 1113 */ 1114 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg, 1115 struct dwc2_host_chan *chan, int chnum, 1116 struct dwc2_qtd *qtd) 1117 { 1118 struct dwc2_hcd_urb *urb = qtd->urb; 1119 int pipe_type; 1120 1121 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n", 1122 chnum); 1123 1124 if (hsotg->core_params->dma_desc_enable > 0) { 1125 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1126 DWC2_HC_XFER_STALL); 1127 goto handle_stall_done; 1128 } 1129 1130 if (!urb) 1131 goto handle_stall_halt; 1132 1133 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 1134 1135 if (pipe_type == USB_ENDPOINT_XFER_CONTROL) 1136 dwc2_host_complete(hsotg, qtd, -EPIPE); 1137 1138 if (pipe_type == USB_ENDPOINT_XFER_BULK || 1139 pipe_type == USB_ENDPOINT_XFER_INT) { 1140 dwc2_host_complete(hsotg, qtd, -EPIPE); 1141 /* 1142 * USB protocol requires resetting the data toggle for bulk 1143 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) 1144 * setup command is issued to the endpoint. Anticipate the 1145 * CLEAR_FEATURE command since a STALL has occurred and reset 1146 * the data toggle now. 1147 */ 1148 chan->qh->data_toggle = 0; 1149 } 1150 1151 handle_stall_halt: 1152 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL); 1153 1154 handle_stall_done: 1155 disable_hc_int(hsotg, chnum, HCINTMSK_STALL); 1156 } 1157 1158 /* 1159 * Updates the state of the URB when a transfer has been stopped due to an 1160 * abnormal condition before the transfer completes. Modifies the 1161 * actual_length field of the URB to reflect the number of bytes that have 1162 * actually been transferred via the host channel. 1163 */ 1164 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg, 1165 struct dwc2_host_chan *chan, int chnum, 1166 struct dwc2_hcd_urb *urb, 1167 struct dwc2_qtd *qtd, 1168 enum dwc2_halt_status halt_status) 1169 { 1170 u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, 1171 qtd, halt_status, NULL); 1172 u32 hctsiz; 1173 1174 if (urb->actual_length + xfer_length > urb->length) { 1175 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); 1176 xfer_length = urb->length - urb->actual_length; 1177 } 1178 1179 /* Non DWORD-aligned buffer case handling */ 1180 if (chan->align_buf && xfer_length && chan->ep_is_in) { 1181 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 1182 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, 1183 chan->qh->dw_align_buf_size, 1184 chan->ep_is_in ? 1185 DMA_FROM_DEVICE : DMA_TO_DEVICE); 1186 if (chan->ep_is_in) 1187 memcpy(urb->buf + urb->actual_length, 1188 chan->qh->dw_align_buf, 1189 xfer_length); 1190 } 1191 1192 urb->actual_length += xfer_length; 1193 1194 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 1195 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n", 1196 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum); 1197 dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n", 1198 chan->start_pkt_count); 1199 dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n", 1200 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT); 1201 dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet); 1202 dev_vdbg(hsotg->dev, " bytes_transferred %d\n", 1203 xfer_length); 1204 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", 1205 urb->actual_length); 1206 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", 1207 urb->length); 1208 } 1209 1210 /* 1211 * Handles a host channel NAK interrupt. This handler may be called in either 1212 * DMA mode or Slave mode. 1213 */ 1214 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, 1215 struct dwc2_host_chan *chan, int chnum, 1216 struct dwc2_qtd *qtd) 1217 { 1218 if (!qtd) { 1219 dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__); 1220 return; 1221 } 1222 1223 if (!qtd->urb) { 1224 dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__); 1225 return; 1226 } 1227 1228 if (dbg_hc(chan)) 1229 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n", 1230 chnum); 1231 1232 /* 1233 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and 1234 * interrupt. Re-start the SSPLIT transfer. 1235 */ 1236 if (chan->do_split) { 1237 if (chan->complete_split) 1238 qtd->error_count = 0; 1239 qtd->complete_split = 0; 1240 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1241 goto handle_nak_done; 1242 } 1243 1244 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1245 case USB_ENDPOINT_XFER_CONTROL: 1246 case USB_ENDPOINT_XFER_BULK: 1247 if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) { 1248 /* 1249 * NAK interrupts are enabled on bulk/control IN 1250 * transfers in DMA mode for the sole purpose of 1251 * resetting the error count after a transaction error 1252 * occurs. The core will continue transferring data. 1253 */ 1254 qtd->error_count = 0; 1255 break; 1256 } 1257 1258 /* 1259 * NAK interrupts normally occur during OUT transfers in DMA 1260 * or Slave mode. For IN transfers, more requests will be 1261 * queued as request queue space is available. 1262 */ 1263 qtd->error_count = 0; 1264 1265 if (!chan->qh->ping_state) { 1266 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1267 qtd, DWC2_HC_XFER_NAK); 1268 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1269 1270 if (chan->speed == USB_SPEED_HIGH) 1271 chan->qh->ping_state = 1; 1272 } 1273 1274 /* 1275 * Halt the channel so the transfer can be re-started from 1276 * the appropriate point or the PING protocol will 1277 * start/continue 1278 */ 1279 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1280 break; 1281 case USB_ENDPOINT_XFER_INT: 1282 qtd->error_count = 0; 1283 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1284 break; 1285 case USB_ENDPOINT_XFER_ISOC: 1286 /* Should never get called for isochronous transfers */ 1287 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n"); 1288 break; 1289 } 1290 1291 handle_nak_done: 1292 disable_hc_int(hsotg, chnum, HCINTMSK_NAK); 1293 } 1294 1295 /* 1296 * Handles a host channel ACK interrupt. This interrupt is enabled when 1297 * performing the PING protocol in Slave mode, when errors occur during 1298 * either Slave mode or DMA mode, and during Start Split transactions. 1299 */ 1300 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg, 1301 struct dwc2_host_chan *chan, int chnum, 1302 struct dwc2_qtd *qtd) 1303 { 1304 struct dwc2_hcd_iso_packet_desc *frame_desc; 1305 1306 if (dbg_hc(chan)) 1307 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n", 1308 chnum); 1309 1310 if (chan->do_split) { 1311 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */ 1312 if (!chan->ep_is_in && 1313 chan->data_pid_start != DWC2_HC_PID_SETUP) 1314 qtd->ssplit_out_xfer_count = chan->xfer_len; 1315 1316 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) { 1317 qtd->complete_split = 1; 1318 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK); 1319 } else { 1320 /* ISOC OUT */ 1321 switch (chan->xact_pos) { 1322 case DWC2_HCSPLT_XACTPOS_ALL: 1323 break; 1324 case DWC2_HCSPLT_XACTPOS_END: 1325 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 1326 qtd->isoc_split_offset = 0; 1327 break; 1328 case DWC2_HCSPLT_XACTPOS_BEGIN: 1329 case DWC2_HCSPLT_XACTPOS_MID: 1330 /* 1331 * For BEGIN or MID, calculate the length for 1332 * the next microframe to determine the correct 1333 * SSPLIT token, either MID or END 1334 */ 1335 frame_desc = &qtd->urb->iso_descs[ 1336 qtd->isoc_frame_index]; 1337 qtd->isoc_split_offset += 188; 1338 1339 if (frame_desc->length - qtd->isoc_split_offset 1340 <= 188) 1341 qtd->isoc_split_pos = 1342 DWC2_HCSPLT_XACTPOS_END; 1343 else 1344 qtd->isoc_split_pos = 1345 DWC2_HCSPLT_XACTPOS_MID; 1346 break; 1347 } 1348 } 1349 } else { 1350 qtd->error_count = 0; 1351 1352 if (chan->qh->ping_state) { 1353 chan->qh->ping_state = 0; 1354 /* 1355 * Halt the channel so the transfer can be re-started 1356 * from the appropriate point. This only happens in 1357 * Slave mode. In DMA mode, the ping_state is cleared 1358 * when the transfer is started because the core 1359 * automatically executes the PING, then the transfer. 1360 */ 1361 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK); 1362 } 1363 } 1364 1365 /* 1366 * If the ACK occurred when _not_ in the PING state, let the channel 1367 * continue transferring data after clearing the error count 1368 */ 1369 disable_hc_int(hsotg, chnum, HCINTMSK_ACK); 1370 } 1371 1372 /* 1373 * Handles a host channel NYET interrupt. This interrupt should only occur on 1374 * Bulk and Control OUT endpoints and for complete split transactions. If a 1375 * NYET occurs at the same time as a Transfer Complete interrupt, it is 1376 * handled in the xfercomp interrupt handler, not here. This handler may be 1377 * called in either DMA mode or Slave mode. 1378 */ 1379 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, 1380 struct dwc2_host_chan *chan, int chnum, 1381 struct dwc2_qtd *qtd) 1382 { 1383 if (dbg_hc(chan)) 1384 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n", 1385 chnum); 1386 1387 /* 1388 * NYET on CSPLIT 1389 * re-do the CSPLIT immediately on non-periodic 1390 */ 1391 if (chan->do_split && chan->complete_split) { 1392 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC && 1393 hsotg->core_params->dma_enable > 0) { 1394 qtd->complete_split = 0; 1395 qtd->isoc_split_offset = 0; 1396 qtd->isoc_frame_index++; 1397 if (qtd->urb && 1398 qtd->isoc_frame_index == qtd->urb->packet_count) { 1399 dwc2_host_complete(hsotg, qtd, 0); 1400 dwc2_release_channel(hsotg, chan, qtd, 1401 DWC2_HC_XFER_URB_COMPLETE); 1402 } else { 1403 dwc2_release_channel(hsotg, chan, qtd, 1404 DWC2_HC_XFER_NO_HALT_STATUS); 1405 } 1406 goto handle_nyet_done; 1407 } 1408 1409 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1410 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1411 int frnum = dwc2_hcd_get_frame_number(hsotg); 1412 1413 if (dwc2_full_frame_num(frnum) != 1414 dwc2_full_frame_num(chan->qh->sched_frame)) { 1415 /* 1416 * No longer in the same full speed frame. 1417 * Treat this as a transaction error. 1418 */ 1419 #if 0 1420 /* 1421 * Todo: Fix system performance so this can 1422 * be treated as an error. Right now complete 1423 * splits cannot be scheduled precisely enough 1424 * due to other system activity, so this error 1425 * occurs regularly in Slave mode. 1426 */ 1427 qtd->error_count++; 1428 #endif 1429 qtd->complete_split = 0; 1430 dwc2_halt_channel(hsotg, chan, qtd, 1431 DWC2_HC_XFER_XACT_ERR); 1432 /* Todo: add support for isoc release */ 1433 goto handle_nyet_done; 1434 } 1435 } 1436 1437 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET); 1438 goto handle_nyet_done; 1439 } 1440 1441 chan->qh->ping_state = 1; 1442 qtd->error_count = 0; 1443 1444 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, 1445 DWC2_HC_XFER_NYET); 1446 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1447 1448 /* 1449 * Halt the channel and re-start the transfer so the PING protocol 1450 * will start 1451 */ 1452 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET); 1453 1454 handle_nyet_done: 1455 disable_hc_int(hsotg, chnum, HCINTMSK_NYET); 1456 } 1457 1458 /* 1459 * Handles a host channel babble interrupt. This handler may be called in 1460 * either DMA mode or Slave mode. 1461 */ 1462 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg, 1463 struct dwc2_host_chan *chan, int chnum, 1464 struct dwc2_qtd *qtd) 1465 { 1466 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n", 1467 chnum); 1468 1469 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1470 1471 if (hsotg->core_params->dma_desc_enable > 0) { 1472 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1473 DWC2_HC_XFER_BABBLE_ERR); 1474 goto disable_int; 1475 } 1476 1477 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1478 dwc2_host_complete(hsotg, qtd, -EOVERFLOW); 1479 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR); 1480 } else { 1481 enum dwc2_halt_status halt_status; 1482 1483 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum, 1484 qtd, DWC2_HC_XFER_BABBLE_ERR); 1485 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1486 } 1487 1488 disable_int: 1489 disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR); 1490 } 1491 1492 /* 1493 * Handles a host channel AHB error interrupt. This handler is only called in 1494 * DMA mode. 1495 */ 1496 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, 1497 struct dwc2_host_chan *chan, int chnum, 1498 struct dwc2_qtd *qtd) 1499 { 1500 struct dwc2_hcd_urb *urb = qtd->urb; 1501 char *pipetype, *speed; 1502 u32 hcchar; 1503 u32 hcsplt; 1504 u32 hctsiz; 1505 u32 hc_dma; 1506 1507 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n", 1508 chnum); 1509 1510 if (!urb) 1511 goto handle_ahberr_halt; 1512 1513 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1514 1515 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum)); 1516 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum)); 1517 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 1518 hc_dma = dwc2_readl(hsotg->regs + HCDMA(chnum)); 1519 1520 dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum); 1521 dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt); 1522 dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma); 1523 dev_err(hsotg->dev, " Device address: %d\n", 1524 dwc2_hcd_get_dev_addr(&urb->pipe_info)); 1525 dev_err(hsotg->dev, " Endpoint: %d, %s\n", 1526 dwc2_hcd_get_ep_num(&urb->pipe_info), 1527 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 1528 1529 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { 1530 case USB_ENDPOINT_XFER_CONTROL: 1531 pipetype = "CONTROL"; 1532 break; 1533 case USB_ENDPOINT_XFER_BULK: 1534 pipetype = "BULK"; 1535 break; 1536 case USB_ENDPOINT_XFER_INT: 1537 pipetype = "INTERRUPT"; 1538 break; 1539 case USB_ENDPOINT_XFER_ISOC: 1540 pipetype = "ISOCHRONOUS"; 1541 break; 1542 default: 1543 pipetype = "UNKNOWN"; 1544 break; 1545 } 1546 1547 dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype); 1548 1549 switch (chan->speed) { 1550 case USB_SPEED_HIGH: 1551 speed = "HIGH"; 1552 break; 1553 case USB_SPEED_FULL: 1554 speed = "FULL"; 1555 break; 1556 case USB_SPEED_LOW: 1557 speed = "LOW"; 1558 break; 1559 default: 1560 speed = "UNKNOWN"; 1561 break; 1562 } 1563 1564 dev_err(hsotg->dev, " Speed: %s\n", speed); 1565 1566 dev_err(hsotg->dev, " Max packet size: %d\n", 1567 dwc2_hcd_get_mps(&urb->pipe_info)); 1568 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length); 1569 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", 1570 urb->buf, (unsigned long)urb->dma); 1571 dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", 1572 urb->setup_packet, (unsigned long)urb->setup_dma); 1573 dev_err(hsotg->dev, " Interval: %d\n", urb->interval); 1574 1575 /* Core halts the channel for Descriptor DMA mode */ 1576 if (hsotg->core_params->dma_desc_enable > 0) { 1577 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1578 DWC2_HC_XFER_AHB_ERR); 1579 goto handle_ahberr_done; 1580 } 1581 1582 dwc2_host_complete(hsotg, qtd, -EIO); 1583 1584 handle_ahberr_halt: 1585 /* 1586 * Force a channel halt. Don't call dwc2_halt_channel because that won't 1587 * write to the HCCHARn register in DMA mode to force the halt. 1588 */ 1589 dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR); 1590 1591 handle_ahberr_done: 1592 disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR); 1593 } 1594 1595 /* 1596 * Handles a host channel transaction error interrupt. This handler may be 1597 * called in either DMA mode or Slave mode. 1598 */ 1599 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg, 1600 struct dwc2_host_chan *chan, int chnum, 1601 struct dwc2_qtd *qtd) 1602 { 1603 dev_dbg(hsotg->dev, 1604 "--Host Channel %d Interrupt: Transaction Error--\n", chnum); 1605 1606 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1607 1608 if (hsotg->core_params->dma_desc_enable > 0) { 1609 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1610 DWC2_HC_XFER_XACT_ERR); 1611 goto handle_xacterr_done; 1612 } 1613 1614 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1615 case USB_ENDPOINT_XFER_CONTROL: 1616 case USB_ENDPOINT_XFER_BULK: 1617 qtd->error_count++; 1618 if (!chan->qh->ping_state) { 1619 1620 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1621 qtd, DWC2_HC_XFER_XACT_ERR); 1622 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1623 if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH) 1624 chan->qh->ping_state = 1; 1625 } 1626 1627 /* 1628 * Halt the channel so the transfer can be re-started from 1629 * the appropriate point or the PING protocol will start 1630 */ 1631 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1632 break; 1633 case USB_ENDPOINT_XFER_INT: 1634 qtd->error_count++; 1635 if (chan->do_split && chan->complete_split) 1636 qtd->complete_split = 0; 1637 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1638 break; 1639 case USB_ENDPOINT_XFER_ISOC: 1640 { 1641 enum dwc2_halt_status halt_status; 1642 1643 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1644 chnum, qtd, DWC2_HC_XFER_XACT_ERR); 1645 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1646 } 1647 break; 1648 } 1649 1650 handle_xacterr_done: 1651 disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR); 1652 } 1653 1654 /* 1655 * Handles a host channel frame overrun interrupt. This handler may be called 1656 * in either DMA mode or Slave mode. 1657 */ 1658 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg, 1659 struct dwc2_host_chan *chan, int chnum, 1660 struct dwc2_qtd *qtd) 1661 { 1662 enum dwc2_halt_status halt_status; 1663 1664 if (dbg_hc(chan)) 1665 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n", 1666 chnum); 1667 1668 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1669 1670 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1671 case USB_ENDPOINT_XFER_CONTROL: 1672 case USB_ENDPOINT_XFER_BULK: 1673 break; 1674 case USB_ENDPOINT_XFER_INT: 1675 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN); 1676 break; 1677 case USB_ENDPOINT_XFER_ISOC: 1678 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum, 1679 qtd, DWC2_HC_XFER_FRAME_OVERRUN); 1680 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1681 break; 1682 } 1683 1684 disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN); 1685 } 1686 1687 /* 1688 * Handles a host channel data toggle error interrupt. This handler may be 1689 * called in either DMA mode or Slave mode. 1690 */ 1691 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg, 1692 struct dwc2_host_chan *chan, int chnum, 1693 struct dwc2_qtd *qtd) 1694 { 1695 dev_dbg(hsotg->dev, 1696 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum); 1697 1698 if (chan->ep_is_in) 1699 qtd->error_count = 0; 1700 else 1701 dev_err(hsotg->dev, 1702 "Data Toggle Error on OUT transfer, channel %d\n", 1703 chnum); 1704 1705 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1706 disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR); 1707 } 1708 1709 /* 1710 * For debug only. It checks that a valid halt status is set and that 1711 * HCCHARn.chdis is clear. If there's a problem, corrective action is 1712 * taken and a warning is issued. 1713 * 1714 * Return: true if halt status is ok, false otherwise 1715 */ 1716 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg, 1717 struct dwc2_host_chan *chan, int chnum, 1718 struct dwc2_qtd *qtd) 1719 { 1720 #ifdef DEBUG 1721 u32 hcchar; 1722 u32 hctsiz; 1723 u32 hcintmsk; 1724 u32 hcsplt; 1725 1726 if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) { 1727 /* 1728 * This code is here only as a check. This condition should 1729 * never happen. Ignore the halt if it does occur. 1730 */ 1731 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum)); 1732 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 1733 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum)); 1734 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum)); 1735 dev_dbg(hsotg->dev, 1736 "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n", 1737 __func__); 1738 dev_dbg(hsotg->dev, 1739 "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n", 1740 chnum, hcchar, hctsiz); 1741 dev_dbg(hsotg->dev, 1742 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n", 1743 chan->hcint, hcintmsk, hcsplt); 1744 if (qtd) 1745 dev_dbg(hsotg->dev, "qtd->complete_split %d\n", 1746 qtd->complete_split); 1747 dev_warn(hsotg->dev, 1748 "%s: no halt status, channel %d, ignoring interrupt\n", 1749 __func__, chnum); 1750 return false; 1751 } 1752 1753 /* 1754 * This code is here only as a check. hcchar.chdis should never be set 1755 * when the halt interrupt occurs. Halt the channel again if it does 1756 * occur. 1757 */ 1758 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum)); 1759 if (hcchar & HCCHAR_CHDIS) { 1760 dev_warn(hsotg->dev, 1761 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n", 1762 __func__, hcchar); 1763 chan->halt_pending = 0; 1764 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status); 1765 return false; 1766 } 1767 #endif 1768 1769 return true; 1770 } 1771 1772 /* 1773 * Handles a host Channel Halted interrupt in DMA mode. This handler 1774 * determines the reason the channel halted and proceeds accordingly. 1775 */ 1776 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, 1777 struct dwc2_host_chan *chan, int chnum, 1778 struct dwc2_qtd *qtd) 1779 { 1780 u32 hcintmsk; 1781 int out_nak_enh = 0; 1782 1783 if (dbg_hc(chan)) 1784 dev_vdbg(hsotg->dev, 1785 "--Host Channel %d Interrupt: DMA Channel Halted--\n", 1786 chnum); 1787 1788 /* 1789 * For core with OUT NAK enhancement, the flow for high-speed 1790 * CONTROL/BULK OUT is handled a little differently 1791 */ 1792 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) { 1793 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in && 1794 (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1795 chan->ep_type == USB_ENDPOINT_XFER_BULK)) { 1796 out_nak_enh = 1; 1797 } 1798 } 1799 1800 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1801 (chan->halt_status == DWC2_HC_XFER_AHB_ERR && 1802 hsotg->core_params->dma_desc_enable <= 0)) { 1803 if (hsotg->core_params->dma_desc_enable > 0) 1804 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1805 chan->halt_status); 1806 else 1807 /* 1808 * Just release the channel. A dequeue can happen on a 1809 * transfer timeout. In the case of an AHB Error, the 1810 * channel was forced to halt because there's no way to 1811 * gracefully recover. 1812 */ 1813 dwc2_release_channel(hsotg, chan, qtd, 1814 chan->halt_status); 1815 return; 1816 } 1817 1818 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum)); 1819 1820 if (chan->hcint & HCINTMSK_XFERCOMPL) { 1821 /* 1822 * Todo: This is here because of a possible hardware bug. Spec 1823 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT 1824 * interrupt w/ACK bit set should occur, but I only see the 1825 * XFERCOMP bit, even with it masked out. This is a workaround 1826 * for that behavior. Should fix this when hardware is fixed. 1827 */ 1828 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in) 1829 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 1830 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd); 1831 } else if (chan->hcint & HCINTMSK_STALL) { 1832 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); 1833 } else if ((chan->hcint & HCINTMSK_XACTERR) && 1834 hsotg->core_params->dma_desc_enable <= 0) { 1835 if (out_nak_enh) { 1836 if (chan->hcint & 1837 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) { 1838 dev_vdbg(hsotg->dev, 1839 "XactErr with NYET/NAK/ACK\n"); 1840 qtd->error_count = 0; 1841 } else { 1842 dev_vdbg(hsotg->dev, 1843 "XactErr without NYET/NAK/ACK\n"); 1844 } 1845 } 1846 1847 /* 1848 * Must handle xacterr before nak or ack. Could get a xacterr 1849 * at the same time as either of these on a BULK/CONTROL OUT 1850 * that started with a PING. The xacterr takes precedence. 1851 */ 1852 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1853 } else if ((chan->hcint & HCINTMSK_XCS_XACT) && 1854 hsotg->core_params->dma_desc_enable > 0) { 1855 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1856 } else if ((chan->hcint & HCINTMSK_AHBERR) && 1857 hsotg->core_params->dma_desc_enable > 0) { 1858 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); 1859 } else if (chan->hcint & HCINTMSK_BBLERR) { 1860 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); 1861 } else if (chan->hcint & HCINTMSK_FRMOVRUN) { 1862 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd); 1863 } else if (!out_nak_enh) { 1864 if (chan->hcint & HCINTMSK_NYET) { 1865 /* 1866 * Must handle nyet before nak or ack. Could get a nyet 1867 * at the same time as either of those on a BULK/CONTROL 1868 * OUT that started with a PING. The nyet takes 1869 * precedence. 1870 */ 1871 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd); 1872 } else if ((chan->hcint & HCINTMSK_NAK) && 1873 !(hcintmsk & HCINTMSK_NAK)) { 1874 /* 1875 * If nak is not masked, it's because a non-split IN 1876 * transfer is in an error state. In that case, the nak 1877 * is handled by the nak interrupt handler, not here. 1878 * Handle nak here for BULK/CONTROL OUT transfers, which 1879 * halt on a NAK to allow rewinding the buffer pointer. 1880 */ 1881 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd); 1882 } else if ((chan->hcint & HCINTMSK_ACK) && 1883 !(hcintmsk & HCINTMSK_ACK)) { 1884 /* 1885 * If ack is not masked, it's because a non-split IN 1886 * transfer is in an error state. In that case, the ack 1887 * is handled by the ack interrupt handler, not here. 1888 * Handle ack here for split transfers. Start splits 1889 * halt on ACK. 1890 */ 1891 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 1892 } else { 1893 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1894 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1895 /* 1896 * A periodic transfer halted with no other 1897 * channel interrupts set. Assume it was halted 1898 * by the core because it could not be completed 1899 * in its scheduled (micro)frame. 1900 */ 1901 dev_dbg(hsotg->dev, 1902 "%s: Halt channel %d (assume incomplete periodic transfer)\n", 1903 __func__, chnum); 1904 dwc2_halt_channel(hsotg, chan, qtd, 1905 DWC2_HC_XFER_PERIODIC_INCOMPLETE); 1906 } else { 1907 dev_err(hsotg->dev, 1908 "%s: Channel %d - ChHltd set, but reason is unknown\n", 1909 __func__, chnum); 1910 dev_err(hsotg->dev, 1911 "hcint 0x%08x, intsts 0x%08x\n", 1912 chan->hcint, 1913 dwc2_readl(hsotg->regs + GINTSTS)); 1914 goto error; 1915 } 1916 } 1917 } else { 1918 dev_info(hsotg->dev, 1919 "NYET/NAK/ACK/other in non-error case, 0x%08x\n", 1920 chan->hcint); 1921 error: 1922 /* Failthrough: use 3-strikes rule */ 1923 qtd->error_count++; 1924 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1925 qtd, DWC2_HC_XFER_XACT_ERR); 1926 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1927 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1928 } 1929 } 1930 1931 /* 1932 * Handles a host channel Channel Halted interrupt 1933 * 1934 * In slave mode, this handler is called only when the driver specifically 1935 * requests a halt. This occurs during handling other host channel interrupts 1936 * (e.g. nak, xacterr, stall, nyet, etc.). 1937 * 1938 * In DMA mode, this is the interrupt that occurs when the core has finished 1939 * processing a transfer on a channel. Other host channel interrupts (except 1940 * ahberr) are disabled in DMA mode. 1941 */ 1942 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg, 1943 struct dwc2_host_chan *chan, int chnum, 1944 struct dwc2_qtd *qtd) 1945 { 1946 if (dbg_hc(chan)) 1947 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n", 1948 chnum); 1949 1950 if (hsotg->core_params->dma_enable > 0) { 1951 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd); 1952 } else { 1953 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd)) 1954 return; 1955 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status); 1956 } 1957 } 1958 1959 /* 1960 * Check if the given qtd is still the top of the list (and thus valid). 1961 * 1962 * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed 1963 * the qtd from the top of the list, this will return false (otherwise true). 1964 */ 1965 static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh) 1966 { 1967 struct dwc2_qtd *cur_head; 1968 1969 if (qh == NULL) 1970 return false; 1971 1972 cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd, 1973 qtd_list_entry); 1974 return (cur_head == qtd); 1975 } 1976 1977 /* Handles interrupt for a specific Host Channel */ 1978 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) 1979 { 1980 struct dwc2_qtd *qtd; 1981 struct dwc2_host_chan *chan; 1982 u32 hcint, hcintmsk; 1983 1984 chan = hsotg->hc_ptr_array[chnum]; 1985 1986 hcint = dwc2_readl(hsotg->regs + HCINT(chnum)); 1987 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum)); 1988 if (!chan) { 1989 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n"); 1990 dwc2_writel(hcint, hsotg->regs + HCINT(chnum)); 1991 return; 1992 } 1993 1994 if (dbg_hc(chan)) { 1995 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n", 1996 chnum); 1997 dev_vdbg(hsotg->dev, 1998 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", 1999 hcint, hcintmsk, hcint & hcintmsk); 2000 } 2001 2002 dwc2_writel(hcint, hsotg->regs + HCINT(chnum)); 2003 chan->hcint = hcint; 2004 hcint &= hcintmsk; 2005 2006 /* 2007 * If the channel was halted due to a dequeue, the qtd list might 2008 * be empty or at least the first entry will not be the active qtd. 2009 * In this case, take a shortcut and just release the channel. 2010 */ 2011 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 2012 /* 2013 * If the channel was halted, this should be the only 2014 * interrupt unmasked 2015 */ 2016 WARN_ON(hcint != HCINTMSK_CHHLTD); 2017 if (hsotg->core_params->dma_desc_enable > 0) 2018 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 2019 chan->halt_status); 2020 else 2021 dwc2_release_channel(hsotg, chan, NULL, 2022 chan->halt_status); 2023 return; 2024 } 2025 2026 if (list_empty(&chan->qh->qtd_list)) { 2027 /* 2028 * TODO: Will this ever happen with the 2029 * DWC2_HC_XFER_URB_DEQUEUE handling above? 2030 */ 2031 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n", 2032 chnum); 2033 dev_dbg(hsotg->dev, 2034 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", 2035 chan->hcint, hcintmsk, hcint); 2036 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; 2037 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD); 2038 chan->hcint = 0; 2039 return; 2040 } 2041 2042 qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd, 2043 qtd_list_entry); 2044 2045 if (hsotg->core_params->dma_enable <= 0) { 2046 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD) 2047 hcint &= ~HCINTMSK_CHHLTD; 2048 } 2049 2050 if (hcint & HCINTMSK_XFERCOMPL) { 2051 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd); 2052 /* 2053 * If NYET occurred at same time as Xfer Complete, the NYET is 2054 * handled by the Xfer Complete interrupt handler. Don't want 2055 * to call the NYET interrupt handler in this case. 2056 */ 2057 hcint &= ~HCINTMSK_NYET; 2058 } 2059 2060 if (hcint & HCINTMSK_CHHLTD) { 2061 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd); 2062 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2063 goto exit; 2064 } 2065 if (hcint & HCINTMSK_AHBERR) { 2066 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); 2067 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2068 goto exit; 2069 } 2070 if (hcint & HCINTMSK_STALL) { 2071 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); 2072 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2073 goto exit; 2074 } 2075 if (hcint & HCINTMSK_NAK) { 2076 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd); 2077 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2078 goto exit; 2079 } 2080 if (hcint & HCINTMSK_ACK) { 2081 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 2082 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2083 goto exit; 2084 } 2085 if (hcint & HCINTMSK_NYET) { 2086 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd); 2087 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2088 goto exit; 2089 } 2090 if (hcint & HCINTMSK_XACTERR) { 2091 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 2092 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2093 goto exit; 2094 } 2095 if (hcint & HCINTMSK_BBLERR) { 2096 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); 2097 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2098 goto exit; 2099 } 2100 if (hcint & HCINTMSK_FRMOVRUN) { 2101 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd); 2102 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2103 goto exit; 2104 } 2105 if (hcint & HCINTMSK_DATATGLERR) { 2106 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd); 2107 if (!dwc2_check_qtd_still_ok(qtd, chan->qh)) 2108 goto exit; 2109 } 2110 2111 exit: 2112 chan->hcint = 0; 2113 } 2114 2115 /* 2116 * This interrupt indicates that one or more host channels has a pending 2117 * interrupt. There are multiple conditions that can cause each host channel 2118 * interrupt. This function determines which conditions have occurred for each 2119 * host channel interrupt and handles them appropriately. 2120 */ 2121 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) 2122 { 2123 u32 haint; 2124 int i; 2125 2126 haint = dwc2_readl(hsotg->regs + HAINT); 2127 if (dbg_perio()) { 2128 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2129 2130 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint); 2131 } 2132 2133 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2134 if (haint & (1 << i)) 2135 dwc2_hc_n_intr(hsotg, i); 2136 } 2137 } 2138 2139 /* This function handles interrupts for the HCD */ 2140 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg) 2141 { 2142 u32 gintsts, dbg_gintsts; 2143 irqreturn_t retval = IRQ_NONE; 2144 2145 if (!dwc2_is_controller_alive(hsotg)) { 2146 dev_warn(hsotg->dev, "Controller is dead\n"); 2147 return retval; 2148 } 2149 2150 spin_lock(&hsotg->lock); 2151 2152 /* Check if HOST Mode */ 2153 if (dwc2_is_host_mode(hsotg)) { 2154 gintsts = dwc2_read_core_intr(hsotg); 2155 if (!gintsts) { 2156 spin_unlock(&hsotg->lock); 2157 return retval; 2158 } 2159 2160 retval = IRQ_HANDLED; 2161 2162 dbg_gintsts = gintsts; 2163 #ifndef DEBUG_SOF 2164 dbg_gintsts &= ~GINTSTS_SOF; 2165 #endif 2166 if (!dbg_perio()) 2167 dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL | 2168 GINTSTS_PTXFEMP); 2169 2170 /* Only print if there are any non-suppressed interrupts left */ 2171 if (dbg_gintsts) 2172 dev_vdbg(hsotg->dev, 2173 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", 2174 gintsts); 2175 2176 if (gintsts & GINTSTS_SOF) 2177 dwc2_sof_intr(hsotg); 2178 if (gintsts & GINTSTS_RXFLVL) 2179 dwc2_rx_fifo_level_intr(hsotg); 2180 if (gintsts & GINTSTS_NPTXFEMP) 2181 dwc2_np_tx_fifo_empty_intr(hsotg); 2182 if (gintsts & GINTSTS_PRTINT) 2183 dwc2_port_intr(hsotg); 2184 if (gintsts & GINTSTS_HCHINT) 2185 dwc2_hc_intr(hsotg); 2186 if (gintsts & GINTSTS_PTXFEMP) 2187 dwc2_perio_tx_fifo_empty_intr(hsotg); 2188 2189 if (dbg_gintsts) { 2190 dev_vdbg(hsotg->dev, 2191 "DWC OTG HCD Finished Servicing Interrupts\n"); 2192 dev_vdbg(hsotg->dev, 2193 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n", 2194 dwc2_readl(hsotg->regs + GINTSTS), 2195 dwc2_readl(hsotg->regs + GINTMSK)); 2196 } 2197 } 2198 2199 spin_unlock(&hsotg->lock); 2200 2201 return retval; 2202 } 2203