1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 * 6 * Copyright 2008 Openmoko, Inc. 7 * Copyright 2008 Simtec Electronics 8 * Ben Dooks <ben@simtec.co.uk> 9 * http://armlinux.simtec.co.uk/ 10 * 11 * S3C USB2.0 High-speed / OtG driver 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/platform_device.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/mutex.h> 21 #include <linux/seq_file.h> 22 #include <linux/delay.h> 23 #include <linux/io.h> 24 #include <linux/slab.h> 25 #include <linux/of_platform.h> 26 27 #include <linux/usb/ch9.h> 28 #include <linux/usb/gadget.h> 29 #include <linux/usb/phy.h> 30 #include <linux/usb/composite.h> 31 32 33 #include "core.h" 34 #include "hw.h" 35 36 /* conversion functions */ 37 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req) 38 { 39 return container_of(req, struct dwc2_hsotg_req, req); 40 } 41 42 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep) 43 { 44 return container_of(ep, struct dwc2_hsotg_ep, ep); 45 } 46 47 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget) 48 { 49 return container_of(gadget, struct dwc2_hsotg, gadget); 50 } 51 52 static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 53 { 54 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset); 55 } 56 57 static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 58 { 59 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset); 60 } 61 62 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg, 63 u32 ep_index, u32 dir_in) 64 { 65 if (dir_in) 66 return hsotg->eps_in[ep_index]; 67 else 68 return hsotg->eps_out[ep_index]; 69 } 70 71 /* forward declaration of functions */ 72 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); 73 74 /** 75 * using_dma - return the DMA status of the driver. 76 * @hsotg: The driver state. 77 * 78 * Return true if we're using DMA. 79 * 80 * Currently, we have the DMA support code worked into everywhere 81 * that needs it, but the AMBA DMA implementation in the hardware can 82 * only DMA from 32bit aligned addresses. This means that gadgets such 83 * as the CDC Ethernet cannot work as they often pass packets which are 84 * not 32bit aligned. 85 * 86 * Unfortunately the choice to use DMA or not is global to the controller 87 * and seems to be only settable when the controller is being put through 88 * a core reset. This means we either need to fix the gadgets to take 89 * account of DMA alignment, or add bounce buffers (yuerk). 90 * 91 * g_using_dma is set depending on dts flag. 92 */ 93 static inline bool using_dma(struct dwc2_hsotg *hsotg) 94 { 95 return hsotg->params.g_dma; 96 } 97 98 /* 99 * using_desc_dma - return the descriptor DMA status of the driver. 100 * @hsotg: The driver state. 101 * 102 * Return true if we're using descriptor DMA. 103 */ 104 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) 105 { 106 return hsotg->params.g_dma_desc; 107 } 108 109 /** 110 * dwc2_gadget_incr_frame_num - Increments the targeted frame number. 111 * @hs_ep: The endpoint 112 * 113 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT. 114 * If an overrun occurs it will wrap the value and set the frame_overrun flag. 115 */ 116 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep) 117 { 118 hs_ep->target_frame += hs_ep->interval; 119 if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) { 120 hs_ep->frame_overrun = true; 121 hs_ep->target_frame &= DSTS_SOFFN_LIMIT; 122 } else { 123 hs_ep->frame_overrun = false; 124 } 125 } 126 127 /** 128 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number 129 * by one. 130 * @hs_ep: The endpoint. 131 * 132 * This function used in service interval based scheduling flow to calculate 133 * descriptor frame number filed value. For service interval mode frame 134 * number in descriptor should point to last (u)frame in the interval. 135 * 136 */ 137 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep) 138 { 139 if (hs_ep->target_frame) 140 hs_ep->target_frame -= 1; 141 else 142 hs_ep->target_frame = DSTS_SOFFN_LIMIT; 143 } 144 145 /** 146 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt 147 * @hsotg: The device state 148 * @ints: A bitmask of the interrupts to enable 149 */ 150 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints) 151 { 152 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 153 u32 new_gsintmsk; 154 155 new_gsintmsk = gsintmsk | ints; 156 157 if (new_gsintmsk != gsintmsk) { 158 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 159 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 160 } 161 } 162 163 /** 164 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt 165 * @hsotg: The device state 166 * @ints: A bitmask of the interrupts to enable 167 */ 168 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints) 169 { 170 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 171 u32 new_gsintmsk; 172 173 new_gsintmsk = gsintmsk & ~ints; 174 175 if (new_gsintmsk != gsintmsk) 176 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 177 } 178 179 /** 180 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq 181 * @hsotg: The device state 182 * @ep: The endpoint index 183 * @dir_in: True if direction is in. 184 * @en: The enable value, true to enable 185 * 186 * Set or clear the mask for an individual endpoint's interrupt 187 * request. 188 */ 189 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, 190 unsigned int ep, unsigned int dir_in, 191 unsigned int en) 192 { 193 unsigned long flags; 194 u32 bit = 1 << ep; 195 u32 daint; 196 197 if (!dir_in) 198 bit <<= 16; 199 200 local_irq_save(flags); 201 daint = dwc2_readl(hsotg, DAINTMSK); 202 if (en) 203 daint |= bit; 204 else 205 daint &= ~bit; 206 dwc2_writel(hsotg, daint, DAINTMSK); 207 local_irq_restore(flags); 208 } 209 210 /** 211 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 212 * 213 * @hsotg: Programming view of the DWC_otg controller 214 */ 215 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 216 { 217 if (hsotg->hw_params.en_multiple_tx_fifo) 218 /* In dedicated FIFO mode we need count of IN EPs */ 219 return hsotg->hw_params.num_dev_in_eps; 220 else 221 /* In shared FIFO mode we need count of Periodic IN EPs */ 222 return hsotg->hw_params.num_dev_perio_in_ep; 223 } 224 225 /** 226 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 227 * device mode TX FIFOs 228 * 229 * @hsotg: Programming view of the DWC_otg controller 230 */ 231 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 232 { 233 int addr; 234 int tx_addr_max; 235 u32 np_tx_fifo_size; 236 237 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size, 238 hsotg->params.g_np_tx_fifo_size); 239 240 /* Get Endpoint Info Control block size in DWORDs. */ 241 tx_addr_max = hsotg->hw_params.total_fifo_size; 242 243 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 244 if (tx_addr_max <= addr) 245 return 0; 246 247 return tx_addr_max - addr; 248 } 249 250 /** 251 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt 252 * 253 * @hsotg: Programming view of the DWC_otg controller 254 * 255 */ 256 static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg) 257 { 258 u32 gintsts2; 259 u32 gintmsk2; 260 261 gintsts2 = dwc2_readl(hsotg, GINTSTS2); 262 gintmsk2 = dwc2_readl(hsotg, GINTMSK2); 263 264 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { 265 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); 266 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); 267 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); 268 } 269 } 270 271 /** 272 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 273 * TX FIFOs 274 * 275 * @hsotg: Programming view of the DWC_otg controller 276 */ 277 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 278 { 279 int tx_fifo_count; 280 int tx_fifo_depth; 281 282 tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg); 283 284 tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 285 286 if (!tx_fifo_count) 287 return tx_fifo_depth; 288 else 289 return tx_fifo_depth / tx_fifo_count; 290 } 291 292 /** 293 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs 294 * @hsotg: The device instance. 295 */ 296 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) 297 { 298 unsigned int ep; 299 unsigned int addr; 300 int timeout; 301 302 u32 val; 303 u32 *txfsz = hsotg->params.g_tx_fifo_size; 304 305 /* Reset fifo map if not correctly cleared during previous session */ 306 WARN_ON(hsotg->fifo_map); 307 hsotg->fifo_map = 0; 308 309 /* set RX/NPTX FIFO sizes */ 310 dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ); 311 dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size << 312 FIFOSIZE_STARTADDR_SHIFT) | 313 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), 314 GNPTXFSIZ); 315 316 /* 317 * arange all the rest of the TX FIFOs, as some versions of this 318 * block have overlapping default addresses. This also ensures 319 * that if the settings have been changed, then they are set to 320 * known values. 321 */ 322 323 /* start at the end of the GNPTXFSIZ, rounded up */ 324 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; 325 326 /* 327 * Configure fifos sizes from provided configuration and assign 328 * them to endpoints dynamically according to maxpacket size value of 329 * given endpoint. 330 */ 331 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { 332 if (!txfsz[ep]) 333 continue; 334 val = addr; 335 val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; 336 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, 337 "insufficient fifo memory"); 338 addr += txfsz[ep]; 339 340 dwc2_writel(hsotg, val, DPTXFSIZN(ep)); 341 val = dwc2_readl(hsotg, DPTXFSIZN(ep)); 342 } 343 344 dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size | 345 addr << GDFIFOCFG_EPINFOBASE_SHIFT, 346 GDFIFOCFG); 347 /* 348 * according to p428 of the design guide, we need to ensure that 349 * all fifos are flushed before continuing 350 */ 351 352 dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | 353 GRSTCTL_RXFFLSH, GRSTCTL); 354 355 /* wait until the fifos are both flushed */ 356 timeout = 100; 357 while (1) { 358 val = dwc2_readl(hsotg, GRSTCTL); 359 360 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) 361 break; 362 363 if (--timeout == 0) { 364 dev_err(hsotg->dev, 365 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 366 __func__, val); 367 break; 368 } 369 370 udelay(1); 371 } 372 373 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 374 } 375 376 /** 377 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure 378 * @ep: USB endpoint to allocate request for. 379 * @flags: Allocation flags 380 * 381 * Allocate a new USB request structure appropriate for the specified endpoint 382 */ 383 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, 384 gfp_t flags) 385 { 386 struct dwc2_hsotg_req *req; 387 388 req = kzalloc(sizeof(*req), flags); 389 if (!req) 390 return NULL; 391 392 INIT_LIST_HEAD(&req->queue); 393 394 return &req->req; 395 } 396 397 /** 398 * is_ep_periodic - return true if the endpoint is in periodic mode. 399 * @hs_ep: The endpoint to query. 400 * 401 * Returns true if the endpoint is in periodic mode, meaning it is being 402 * used for an Interrupt or ISO transfer. 403 */ 404 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep) 405 { 406 return hs_ep->periodic; 407 } 408 409 /** 410 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request 411 * @hsotg: The device state. 412 * @hs_ep: The endpoint for the request 413 * @hs_req: The request being processed. 414 * 415 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion 416 * of a request to ensure the buffer is ready for access by the caller. 417 */ 418 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, 419 struct dwc2_hsotg_ep *hs_ep, 420 struct dwc2_hsotg_req *hs_req) 421 { 422 struct usb_request *req = &hs_req->req; 423 424 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 425 } 426 427 /* 428 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains 429 * for Control endpoint 430 * @hsotg: The device state. 431 * 432 * This function will allocate 4 descriptor chains for EP 0: 2 for 433 * Setup stage, per one for IN and OUT data/status transactions. 434 */ 435 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) 436 { 437 hsotg->setup_desc[0] = 438 dmam_alloc_coherent(hsotg->dev, 439 sizeof(struct dwc2_dma_desc), 440 &hsotg->setup_desc_dma[0], 441 GFP_KERNEL); 442 if (!hsotg->setup_desc[0]) 443 goto fail; 444 445 hsotg->setup_desc[1] = 446 dmam_alloc_coherent(hsotg->dev, 447 sizeof(struct dwc2_dma_desc), 448 &hsotg->setup_desc_dma[1], 449 GFP_KERNEL); 450 if (!hsotg->setup_desc[1]) 451 goto fail; 452 453 hsotg->ctrl_in_desc = 454 dmam_alloc_coherent(hsotg->dev, 455 sizeof(struct dwc2_dma_desc), 456 &hsotg->ctrl_in_desc_dma, 457 GFP_KERNEL); 458 if (!hsotg->ctrl_in_desc) 459 goto fail; 460 461 hsotg->ctrl_out_desc = 462 dmam_alloc_coherent(hsotg->dev, 463 sizeof(struct dwc2_dma_desc), 464 &hsotg->ctrl_out_desc_dma, 465 GFP_KERNEL); 466 if (!hsotg->ctrl_out_desc) 467 goto fail; 468 469 return 0; 470 471 fail: 472 return -ENOMEM; 473 } 474 475 /** 476 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO 477 * @hsotg: The controller state. 478 * @hs_ep: The endpoint we're going to write for. 479 * @hs_req: The request to write data for. 480 * 481 * This is called when the TxFIFO has some space in it to hold a new 482 * transmission and we have something to give it. The actual setup of 483 * the data size is done elsewhere, so all we have to do is to actually 484 * write the data. 485 * 486 * The return value is zero if there is more space (or nothing was done) 487 * otherwise -ENOSPC is returned if the FIFO space was used up. 488 * 489 * This routine is only needed for PIO 490 */ 491 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg, 492 struct dwc2_hsotg_ep *hs_ep, 493 struct dwc2_hsotg_req *hs_req) 494 { 495 bool periodic = is_ep_periodic(hs_ep); 496 u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS); 497 int buf_pos = hs_req->req.actual; 498 int to_write = hs_ep->size_loaded; 499 void *data; 500 int can_write; 501 int pkt_round; 502 int max_transfer; 503 504 to_write -= (buf_pos - hs_ep->last_load); 505 506 /* if there's nothing to write, get out early */ 507 if (to_write == 0) 508 return 0; 509 510 if (periodic && !hsotg->dedicated_fifos) { 511 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 512 int size_left; 513 int size_done; 514 515 /* 516 * work out how much data was loaded so we can calculate 517 * how much data is left in the fifo. 518 */ 519 520 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 521 522 /* 523 * if shared fifo, we cannot write anything until the 524 * previous data has been completely sent. 525 */ 526 if (hs_ep->fifo_load != 0) { 527 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 528 return -ENOSPC; 529 } 530 531 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 532 __func__, size_left, 533 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 534 535 /* how much of the data has moved */ 536 size_done = hs_ep->size_loaded - size_left; 537 538 /* how much data is left in the fifo */ 539 can_write = hs_ep->fifo_load - size_done; 540 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 541 __func__, can_write); 542 543 can_write = hs_ep->fifo_size - can_write; 544 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 545 __func__, can_write); 546 547 if (can_write <= 0) { 548 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 549 return -ENOSPC; 550 } 551 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 552 can_write = dwc2_readl(hsotg, 553 DTXFSTS(hs_ep->fifo_index)); 554 555 can_write &= 0xffff; 556 can_write *= 4; 557 } else { 558 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) { 559 dev_dbg(hsotg->dev, 560 "%s: no queue slots available (0x%08x)\n", 561 __func__, gnptxsts); 562 563 dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP); 564 return -ENOSPC; 565 } 566 567 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts); 568 can_write *= 4; /* fifo size is in 32bit quantities. */ 569 } 570 571 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 572 573 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 574 __func__, gnptxsts, can_write, to_write, max_transfer); 575 576 /* 577 * limit to 512 bytes of data, it seems at least on the non-periodic 578 * FIFO, requests of >512 cause the endpoint to get stuck with a 579 * fragment of the end of the transfer in it. 580 */ 581 if (can_write > 512 && !periodic) 582 can_write = 512; 583 584 /* 585 * limit the write to one max-packet size worth of data, but allow 586 * the transfer to return that it did not run out of fifo space 587 * doing it. 588 */ 589 if (to_write > max_transfer) { 590 to_write = max_transfer; 591 592 /* it's needed only when we do not use dedicated fifos */ 593 if (!hsotg->dedicated_fifos) 594 dwc2_hsotg_en_gsint(hsotg, 595 periodic ? GINTSTS_PTXFEMP : 596 GINTSTS_NPTXFEMP); 597 } 598 599 /* see if we can write data */ 600 601 if (to_write > can_write) { 602 to_write = can_write; 603 pkt_round = to_write % max_transfer; 604 605 /* 606 * Round the write down to an 607 * exact number of packets. 608 * 609 * Note, we do not currently check to see if we can ever 610 * write a full packet or not to the FIFO. 611 */ 612 613 if (pkt_round) 614 to_write -= pkt_round; 615 616 /* 617 * enable correct FIFO interrupt to alert us when there 618 * is more room left. 619 */ 620 621 /* it's needed only when we do not use dedicated fifos */ 622 if (!hsotg->dedicated_fifos) 623 dwc2_hsotg_en_gsint(hsotg, 624 periodic ? GINTSTS_PTXFEMP : 625 GINTSTS_NPTXFEMP); 626 } 627 628 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 629 to_write, hs_req->req.length, can_write, buf_pos); 630 631 if (to_write <= 0) 632 return -ENOSPC; 633 634 hs_req->req.actual = buf_pos + to_write; 635 hs_ep->total_data += to_write; 636 637 if (periodic) 638 hs_ep->fifo_load += to_write; 639 640 to_write = DIV_ROUND_UP(to_write, 4); 641 data = hs_req->req.buf + buf_pos; 642 643 dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write); 644 645 return (to_write >= can_write) ? -ENOSPC : 0; 646 } 647 648 /** 649 * get_ep_limit - get the maximum data legnth for this endpoint 650 * @hs_ep: The endpoint 651 * 652 * Return the maximum data that can be queued in one go on a given endpoint 653 * so that transfers that are too long can be split. 654 */ 655 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 656 { 657 int index = hs_ep->index; 658 unsigned int maxsize; 659 unsigned int maxpkt; 660 661 if (index != 0) { 662 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 663 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 664 } else { 665 maxsize = 64 + 64; 666 if (hs_ep->dir_in) 667 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 668 else 669 maxpkt = 2; 670 } 671 672 /* we made the constant loading easier above by using +1 */ 673 maxpkt--; 674 maxsize--; 675 676 /* 677 * constrain by packet count if maxpkts*pktsize is greater 678 * than the length register size. 679 */ 680 681 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 682 maxsize = maxpkt * hs_ep->ep.maxpacket; 683 684 return maxsize; 685 } 686 687 /** 688 * dwc2_hsotg_read_frameno - read current frame number 689 * @hsotg: The device instance 690 * 691 * Return the current frame number 692 */ 693 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) 694 { 695 u32 dsts; 696 697 dsts = dwc2_readl(hsotg, DSTS); 698 dsts &= DSTS_SOFFN_MASK; 699 dsts >>= DSTS_SOFFN_SHIFT; 700 701 return dsts; 702 } 703 704 /** 705 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the 706 * DMA descriptor chain prepared for specific endpoint 707 * @hs_ep: The endpoint 708 * 709 * Return the maximum data that can be queued in one go on a given endpoint 710 * depending on its descriptor chain capacity so that transfers that 711 * are too long can be split. 712 */ 713 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) 714 { 715 int is_isoc = hs_ep->isochronous; 716 unsigned int maxsize; 717 718 if (is_isoc) 719 maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : 720 DEV_DMA_ISOC_RX_NBYTES_LIMIT) * 721 MAX_DMA_DESC_NUM_HS_ISOC; 722 else 723 maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; 724 725 return maxsize; 726 } 727 728 /* 729 * dwc2_gadget_get_desc_params - get DMA descriptor parameters. 730 * @hs_ep: The endpoint 731 * @mask: RX/TX bytes mask to be defined 732 * 733 * Returns maximum data payload for one descriptor after analyzing endpoint 734 * characteristics. 735 * DMA descriptor transfer bytes limit depends on EP type: 736 * Control out - MPS, 737 * Isochronous - descriptor rx/tx bytes bitfield limit, 738 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not 739 * have concatenations from various descriptors within one packet. 740 * 741 * Selects corresponding mask for RX/TX bytes as well. 742 */ 743 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) 744 { 745 u32 mps = hs_ep->ep.maxpacket; 746 int dir_in = hs_ep->dir_in; 747 u32 desc_size = 0; 748 749 if (!hs_ep->index && !dir_in) { 750 desc_size = mps; 751 *mask = DEV_DMA_NBYTES_MASK; 752 } else if (hs_ep->isochronous) { 753 if (dir_in) { 754 desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; 755 *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; 756 } else { 757 desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; 758 *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; 759 } 760 } else { 761 desc_size = DEV_DMA_NBYTES_LIMIT; 762 *mask = DEV_DMA_NBYTES_MASK; 763 764 /* Round down desc_size to be mps multiple */ 765 desc_size -= desc_size % mps; 766 } 767 768 return desc_size; 769 } 770 771 static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep, 772 struct dwc2_dma_desc **desc, 773 dma_addr_t dma_buff, 774 unsigned int len, 775 bool true_last) 776 { 777 int dir_in = hs_ep->dir_in; 778 u32 mps = hs_ep->ep.maxpacket; 779 u32 maxsize = 0; 780 u32 offset = 0; 781 u32 mask = 0; 782 int i; 783 784 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 785 786 hs_ep->desc_count = (len / maxsize) + 787 ((len % maxsize) ? 1 : 0); 788 if (len == 0) 789 hs_ep->desc_count = 1; 790 791 for (i = 0; i < hs_ep->desc_count; ++i) { 792 (*desc)->status = 0; 793 (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY 794 << DEV_DMA_BUFF_STS_SHIFT); 795 796 if (len > maxsize) { 797 if (!hs_ep->index && !dir_in) 798 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC); 799 800 (*desc)->status |= 801 maxsize << DEV_DMA_NBYTES_SHIFT & mask; 802 (*desc)->buf = dma_buff + offset; 803 804 len -= maxsize; 805 offset += maxsize; 806 } else { 807 if (true_last) 808 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC); 809 810 if (dir_in) 811 (*desc)->status |= (len % mps) ? DEV_DMA_SHORT : 812 ((hs_ep->send_zlp && true_last) ? 813 DEV_DMA_SHORT : 0); 814 815 (*desc)->status |= 816 len << DEV_DMA_NBYTES_SHIFT & mask; 817 (*desc)->buf = dma_buff + offset; 818 } 819 820 (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK; 821 (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY 822 << DEV_DMA_BUFF_STS_SHIFT); 823 (*desc)++; 824 } 825 } 826 827 /* 828 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. 829 * @hs_ep: The endpoint 830 * @ureq: Request to transfer 831 * @offset: offset in bytes 832 * @len: Length of the transfer 833 * 834 * This function will iterate over descriptor chain and fill its entries 835 * with corresponding information based on transfer data. 836 */ 837 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, 838 struct usb_request *ureq, 839 unsigned int offset, 840 unsigned int len) 841 { 842 struct dwc2_dma_desc *desc = hs_ep->desc_list; 843 struct scatterlist *sg; 844 int i; 845 u8 desc_count = 0; 846 847 /* non-DMA sg buffer */ 848 if (!ureq->num_sgs) { 849 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc, 850 ureq->dma + offset, len, true); 851 return; 852 } 853 854 /* DMA sg buffer */ 855 for_each_sg(ureq->sg, sg, ureq->num_sgs, i) { 856 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc, 857 sg_dma_address(sg) + sg->offset, sg_dma_len(sg), 858 sg_is_last(sg)); 859 desc_count += hs_ep->desc_count; 860 } 861 862 hs_ep->desc_count = desc_count; 863 } 864 865 /* 866 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. 867 * @hs_ep: The isochronous endpoint. 868 * @dma_buff: usb requests dma buffer. 869 * @len: usb request transfer length. 870 * 871 * Fills next free descriptor with the data of the arrived usb request, 872 * frame info, sets Last and IOC bits increments next_desc. If filled 873 * descriptor is not the first one, removes L bit from the previous descriptor 874 * status. 875 */ 876 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, 877 dma_addr_t dma_buff, unsigned int len) 878 { 879 struct dwc2_dma_desc *desc; 880 struct dwc2_hsotg *hsotg = hs_ep->parent; 881 u32 index; 882 u32 maxsize = 0; 883 u32 mask = 0; 884 u8 pid = 0; 885 886 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 887 888 index = hs_ep->next_desc; 889 desc = &hs_ep->desc_list[index]; 890 891 /* Check if descriptor chain full */ 892 if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) == 893 DEV_DMA_BUFF_STS_HREADY) { 894 dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 895 return 1; 896 } 897 898 /* Clear L bit of previous desc if more than one entries in the chain */ 899 if (hs_ep->next_desc) 900 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; 901 902 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", 903 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); 904 905 desc->status = 0; 906 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); 907 908 desc->buf = dma_buff; 909 desc->status |= (DEV_DMA_L | DEV_DMA_IOC | 910 ((len << DEV_DMA_NBYTES_SHIFT) & mask)); 911 912 if (hs_ep->dir_in) { 913 if (len) 914 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); 915 else 916 pid = 1; 917 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & 918 DEV_DMA_ISOC_PID_MASK) | 919 ((len % hs_ep->ep.maxpacket) ? 920 DEV_DMA_SHORT : 0) | 921 ((hs_ep->target_frame << 922 DEV_DMA_ISOC_FRNUM_SHIFT) & 923 DEV_DMA_ISOC_FRNUM_MASK); 924 } 925 926 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 927 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); 928 929 /* Increment frame number by interval for IN */ 930 if (hs_ep->dir_in) 931 dwc2_gadget_incr_frame_num(hs_ep); 932 933 /* Update index of last configured entry in the chain */ 934 hs_ep->next_desc++; 935 if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC) 936 hs_ep->next_desc = 0; 937 938 return 0; 939 } 940 941 /* 942 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA 943 * @hs_ep: The isochronous endpoint. 944 * 945 * Prepare descriptor chain for isochronous endpoints. Afterwards 946 * write DMA address to HW and enable the endpoint. 947 */ 948 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 949 { 950 struct dwc2_hsotg *hsotg = hs_ep->parent; 951 struct dwc2_hsotg_req *hs_req, *treq; 952 int index = hs_ep->index; 953 int ret; 954 int i; 955 u32 dma_reg; 956 u32 depctl; 957 u32 ctrl; 958 struct dwc2_dma_desc *desc; 959 960 if (list_empty(&hs_ep->queue)) { 961 hs_ep->target_frame = TARGET_FRAME_INITIAL; 962 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 963 return; 964 } 965 966 /* Initialize descriptor chain by Host Busy status */ 967 for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) { 968 desc = &hs_ep->desc_list[i]; 969 desc->status = 0; 970 desc->status |= (DEV_DMA_BUFF_STS_HBUSY 971 << DEV_DMA_BUFF_STS_SHIFT); 972 } 973 974 hs_ep->next_desc = 0; 975 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { 976 dma_addr_t dma_addr = hs_req->req.dma; 977 978 if (hs_req->req.num_sgs) { 979 WARN_ON(hs_req->req.num_sgs > 1); 980 dma_addr = sg_dma_address(hs_req->req.sg); 981 } 982 ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr, 983 hs_req->req.length); 984 if (ret) 985 break; 986 } 987 988 hs_ep->compl_desc = 0; 989 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 990 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 991 992 /* write descriptor chain address to control register */ 993 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 994 995 ctrl = dwc2_readl(hsotg, depctl); 996 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 997 dwc2_writel(hsotg, ctrl, depctl); 998 } 999 1000 /** 1001 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue 1002 * @hsotg: The controller state. 1003 * @hs_ep: The endpoint to process a request for 1004 * @hs_req: The request to start. 1005 * @continuing: True if we are doing more for the current request. 1006 * 1007 * Start the given request running by setting the endpoint registers 1008 * appropriately, and writing any data to the FIFOs. 1009 */ 1010 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, 1011 struct dwc2_hsotg_ep *hs_ep, 1012 struct dwc2_hsotg_req *hs_req, 1013 bool continuing) 1014 { 1015 struct usb_request *ureq = &hs_req->req; 1016 int index = hs_ep->index; 1017 int dir_in = hs_ep->dir_in; 1018 u32 epctrl_reg; 1019 u32 epsize_reg; 1020 u32 epsize; 1021 u32 ctrl; 1022 unsigned int length; 1023 unsigned int packets; 1024 unsigned int maxreq; 1025 unsigned int dma_reg; 1026 1027 if (index != 0) { 1028 if (hs_ep->req && !continuing) { 1029 dev_err(hsotg->dev, "%s: active request\n", __func__); 1030 WARN_ON(1); 1031 return; 1032 } else if (hs_ep->req != hs_req && continuing) { 1033 dev_err(hsotg->dev, 1034 "%s: continue different req\n", __func__); 1035 WARN_ON(1); 1036 return; 1037 } 1038 } 1039 1040 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); 1041 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 1042 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 1043 1044 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 1045 __func__, dwc2_readl(hsotg, epctrl_reg), index, 1046 hs_ep->dir_in ? "in" : "out"); 1047 1048 /* If endpoint is stalled, we will restart request later */ 1049 ctrl = dwc2_readl(hsotg, epctrl_reg); 1050 1051 if (index && ctrl & DXEPCTL_STALL) { 1052 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 1053 return; 1054 } 1055 1056 length = ureq->length - ureq->actual; 1057 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", 1058 ureq->length, ureq->actual); 1059 1060 if (!using_desc_dma(hsotg)) 1061 maxreq = get_ep_limit(hs_ep); 1062 else 1063 maxreq = dwc2_gadget_get_chain_limit(hs_ep); 1064 1065 if (length > maxreq) { 1066 int round = maxreq % hs_ep->ep.maxpacket; 1067 1068 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 1069 __func__, length, maxreq, round); 1070 1071 /* round down to multiple of packets */ 1072 if (round) 1073 maxreq -= round; 1074 1075 length = maxreq; 1076 } 1077 1078 if (length) 1079 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 1080 else 1081 packets = 1; /* send one packet if length is zero. */ 1082 1083 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { 1084 dev_err(hsotg->dev, "req length > maxpacket*mc\n"); 1085 return; 1086 } 1087 1088 if (dir_in && index != 0) 1089 if (hs_ep->isochronous) 1090 epsize = DXEPTSIZ_MC(packets); 1091 else 1092 epsize = DXEPTSIZ_MC(1); 1093 else 1094 epsize = 0; 1095 1096 /* 1097 * zero length packet should be programmed on its own and should not 1098 * be counted in DIEPTSIZ.PktCnt with other packets. 1099 */ 1100 if (dir_in && ureq->zero && !continuing) { 1101 /* Test if zlp is actually required. */ 1102 if ((ureq->length >= hs_ep->ep.maxpacket) && 1103 !(ureq->length % hs_ep->ep.maxpacket)) 1104 hs_ep->send_zlp = 1; 1105 } 1106 1107 epsize |= DXEPTSIZ_PKTCNT(packets); 1108 epsize |= DXEPTSIZ_XFERSIZE(length); 1109 1110 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 1111 __func__, packets, length, ureq->length, epsize, epsize_reg); 1112 1113 /* store the request as the current one we're doing */ 1114 hs_ep->req = hs_req; 1115 1116 if (using_desc_dma(hsotg)) { 1117 u32 offset = 0; 1118 u32 mps = hs_ep->ep.maxpacket; 1119 1120 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ 1121 if (!dir_in) { 1122 if (!index) 1123 length = mps; 1124 else if (length % mps) 1125 length += (mps - (length % mps)); 1126 } 1127 1128 /* 1129 * If more data to send, adjust DMA for EP0 out data stage. 1130 * ureq->dma stays unchanged, hence increment it by already 1131 * passed passed data count before starting new transaction. 1132 */ 1133 if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && 1134 continuing) 1135 offset = ureq->actual; 1136 1137 /* Fill DDMA chain entries */ 1138 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq, offset, 1139 length); 1140 1141 /* write descriptor chain address to control register */ 1142 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 1143 1144 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", 1145 __func__, (u32)hs_ep->desc_list_dma, dma_reg); 1146 } else { 1147 /* write size / packets */ 1148 dwc2_writel(hsotg, epsize, epsize_reg); 1149 1150 if (using_dma(hsotg) && !continuing && (length != 0)) { 1151 /* 1152 * write DMA address to control register, buffer 1153 * already synced by dwc2_hsotg_ep_queue(). 1154 */ 1155 1156 dwc2_writel(hsotg, ureq->dma, dma_reg); 1157 1158 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", 1159 __func__, &ureq->dma, dma_reg); 1160 } 1161 } 1162 1163 if (hs_ep->isochronous && hs_ep->interval == 1) { 1164 hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 1165 dwc2_gadget_incr_frame_num(hs_ep); 1166 1167 if (hs_ep->target_frame & 0x1) 1168 ctrl |= DXEPCTL_SETODDFR; 1169 else 1170 ctrl |= DXEPCTL_SETEVENFR; 1171 } 1172 1173 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1174 1175 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state); 1176 1177 /* For Setup request do not clear NAK */ 1178 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP)) 1179 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1180 1181 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 1182 dwc2_writel(hsotg, ctrl, epctrl_reg); 1183 1184 /* 1185 * set these, it seems that DMA support increments past the end 1186 * of the packet buffer so we need to calculate the length from 1187 * this information. 1188 */ 1189 hs_ep->size_loaded = length; 1190 hs_ep->last_load = ureq->actual; 1191 1192 if (dir_in && !using_dma(hsotg)) { 1193 /* set these anyway, we may need them for non-periodic in */ 1194 hs_ep->fifo_load = 0; 1195 1196 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1197 } 1198 1199 /* 1200 * Note, trying to clear the NAK here causes problems with transmit 1201 * on the S3C6400 ending up with the TXFIFO becoming full. 1202 */ 1203 1204 /* check ep is enabled */ 1205 if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA)) 1206 dev_dbg(hsotg->dev, 1207 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1208 index, dwc2_readl(hsotg, epctrl_reg)); 1209 1210 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", 1211 __func__, dwc2_readl(hsotg, epctrl_reg)); 1212 1213 /* enable ep interrupts */ 1214 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1); 1215 } 1216 1217 /** 1218 * dwc2_hsotg_map_dma - map the DMA memory being used for the request 1219 * @hsotg: The device state. 1220 * @hs_ep: The endpoint the request is on. 1221 * @req: The request being processed. 1222 * 1223 * We've been asked to queue a request, so ensure that the memory buffer 1224 * is correctly setup for DMA. If we've been passed an extant DMA address 1225 * then ensure the buffer has been synced to memory. If our buffer has no 1226 * DMA memory, then we map the memory and mark our request to allow us to 1227 * cleanup on completion. 1228 */ 1229 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, 1230 struct dwc2_hsotg_ep *hs_ep, 1231 struct usb_request *req) 1232 { 1233 int ret; 1234 1235 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); 1236 if (ret) 1237 goto dma_error; 1238 1239 return 0; 1240 1241 dma_error: 1242 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 1243 __func__, req->buf, req->length); 1244 1245 return -EIO; 1246 } 1247 1248 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg, 1249 struct dwc2_hsotg_ep *hs_ep, 1250 struct dwc2_hsotg_req *hs_req) 1251 { 1252 void *req_buf = hs_req->req.buf; 1253 1254 /* If dma is not being used or buffer is aligned */ 1255 if (!using_dma(hsotg) || !((long)req_buf & 3)) 1256 return 0; 1257 1258 WARN_ON(hs_req->saved_req_buf); 1259 1260 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__, 1261 hs_ep->ep.name, req_buf, hs_req->req.length); 1262 1263 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC); 1264 if (!hs_req->req.buf) { 1265 hs_req->req.buf = req_buf; 1266 dev_err(hsotg->dev, 1267 "%s: unable to allocate memory for bounce buffer\n", 1268 __func__); 1269 return -ENOMEM; 1270 } 1271 1272 /* Save actual buffer */ 1273 hs_req->saved_req_buf = req_buf; 1274 1275 if (hs_ep->dir_in) 1276 memcpy(hs_req->req.buf, req_buf, hs_req->req.length); 1277 return 0; 1278 } 1279 1280 static void 1281 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1282 struct dwc2_hsotg_ep *hs_ep, 1283 struct dwc2_hsotg_req *hs_req) 1284 { 1285 /* If dma is not being used or buffer was aligned */ 1286 if (!using_dma(hsotg) || !hs_req->saved_req_buf) 1287 return; 1288 1289 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__, 1290 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual); 1291 1292 /* Copy data from bounce buffer on successful out transfer */ 1293 if (!hs_ep->dir_in && !hs_req->req.status) 1294 memcpy(hs_req->saved_req_buf, hs_req->req.buf, 1295 hs_req->req.actual); 1296 1297 /* Free bounce buffer */ 1298 kfree(hs_req->req.buf); 1299 1300 hs_req->req.buf = hs_req->saved_req_buf; 1301 hs_req->saved_req_buf = NULL; 1302 } 1303 1304 /** 1305 * dwc2_gadget_target_frame_elapsed - Checks target frame 1306 * @hs_ep: The driver endpoint to check 1307 * 1308 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop 1309 * corresponding transfer. 1310 */ 1311 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) 1312 { 1313 struct dwc2_hsotg *hsotg = hs_ep->parent; 1314 u32 target_frame = hs_ep->target_frame; 1315 u32 current_frame = hsotg->frame_number; 1316 bool frame_overrun = hs_ep->frame_overrun; 1317 1318 if (!frame_overrun && current_frame >= target_frame) 1319 return true; 1320 1321 if (frame_overrun && current_frame >= target_frame && 1322 ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2)) 1323 return true; 1324 1325 return false; 1326 } 1327 1328 /* 1329 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers 1330 * @hsotg: The driver state 1331 * @hs_ep: the ep descriptor chain is for 1332 * 1333 * Called to update EP0 structure's pointers depend on stage of 1334 * control transfer. 1335 */ 1336 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, 1337 struct dwc2_hsotg_ep *hs_ep) 1338 { 1339 switch (hsotg->ep0_state) { 1340 case DWC2_EP0_SETUP: 1341 case DWC2_EP0_STATUS_OUT: 1342 hs_ep->desc_list = hsotg->setup_desc[0]; 1343 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; 1344 break; 1345 case DWC2_EP0_DATA_IN: 1346 case DWC2_EP0_STATUS_IN: 1347 hs_ep->desc_list = hsotg->ctrl_in_desc; 1348 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; 1349 break; 1350 case DWC2_EP0_DATA_OUT: 1351 hs_ep->desc_list = hsotg->ctrl_out_desc; 1352 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; 1353 break; 1354 default: 1355 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", 1356 hsotg->ep0_state); 1357 return -EINVAL; 1358 } 1359 1360 return 0; 1361 } 1362 1363 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 1364 gfp_t gfp_flags) 1365 { 1366 struct dwc2_hsotg_req *hs_req = our_req(req); 1367 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1368 struct dwc2_hsotg *hs = hs_ep->parent; 1369 bool first; 1370 int ret; 1371 u32 maxsize = 0; 1372 u32 mask = 0; 1373 1374 1375 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 1376 ep->name, req, req->length, req->buf, req->no_interrupt, 1377 req->zero, req->short_not_ok); 1378 1379 /* Prevent new request submission when controller is suspended */ 1380 if (hs->lx_state != DWC2_L0) { 1381 dev_dbg(hs->dev, "%s: submit request only in active state\n", 1382 __func__); 1383 return -EAGAIN; 1384 } 1385 1386 /* initialise status of the request */ 1387 INIT_LIST_HEAD(&hs_req->queue); 1388 req->actual = 0; 1389 req->status = -EINPROGRESS; 1390 1391 /* In DDMA mode for ISOC's don't queue request if length greater 1392 * than descriptor limits. 1393 */ 1394 if (using_desc_dma(hs) && hs_ep->isochronous) { 1395 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 1396 if (hs_ep->dir_in && req->length > maxsize) { 1397 dev_err(hs->dev, "wrong length %d (maxsize=%d)\n", 1398 req->length, maxsize); 1399 return -EINVAL; 1400 } 1401 1402 if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) { 1403 dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n", 1404 req->length, hs_ep->ep.maxpacket); 1405 return -EINVAL; 1406 } 1407 } 1408 1409 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req); 1410 if (ret) 1411 return ret; 1412 1413 /* if we're using DMA, sync the buffers as necessary */ 1414 if (using_dma(hs)) { 1415 ret = dwc2_hsotg_map_dma(hs, hs_ep, req); 1416 if (ret) 1417 return ret; 1418 } 1419 /* If using descriptor DMA configure EP0 descriptor chain pointers */ 1420 if (using_desc_dma(hs) && !hs_ep->index) { 1421 ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); 1422 if (ret) 1423 return ret; 1424 } 1425 1426 first = list_empty(&hs_ep->queue); 1427 list_add_tail(&hs_req->queue, &hs_ep->queue); 1428 1429 /* 1430 * Handle DDMA isochronous transfers separately - just add new entry 1431 * to the descriptor chain. 1432 * Transfer will be started once SW gets either one of NAK or 1433 * OutTknEpDis interrupts. 1434 */ 1435 if (using_desc_dma(hs) && hs_ep->isochronous) { 1436 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1437 dma_addr_t dma_addr = hs_req->req.dma; 1438 1439 if (hs_req->req.num_sgs) { 1440 WARN_ON(hs_req->req.num_sgs > 1); 1441 dma_addr = sg_dma_address(hs_req->req.sg); 1442 } 1443 dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr, 1444 hs_req->req.length); 1445 } 1446 return 0; 1447 } 1448 1449 /* Change EP direction if status phase request is after data out */ 1450 if (!hs_ep->index && !req->length && !hs_ep->dir_in && 1451 hs->ep0_state == DWC2_EP0_DATA_OUT) 1452 hs_ep->dir_in = 1; 1453 1454 if (first) { 1455 if (!hs_ep->isochronous) { 1456 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1457 return 0; 1458 } 1459 1460 /* Update current frame number value. */ 1461 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1462 while (dwc2_gadget_target_frame_elapsed(hs_ep)) { 1463 dwc2_gadget_incr_frame_num(hs_ep); 1464 /* Update current frame number value once more as it 1465 * changes here. 1466 */ 1467 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1468 } 1469 1470 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) 1471 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1472 } 1473 return 0; 1474 } 1475 1476 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 1477 gfp_t gfp_flags) 1478 { 1479 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1480 struct dwc2_hsotg *hs = hs_ep->parent; 1481 unsigned long flags = 0; 1482 int ret = 0; 1483 1484 spin_lock_irqsave(&hs->lock, flags); 1485 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags); 1486 spin_unlock_irqrestore(&hs->lock, flags); 1487 1488 return ret; 1489 } 1490 1491 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, 1492 struct usb_request *req) 1493 { 1494 struct dwc2_hsotg_req *hs_req = our_req(req); 1495 1496 kfree(hs_req); 1497 } 1498 1499 /** 1500 * dwc2_hsotg_complete_oursetup - setup completion callback 1501 * @ep: The endpoint the request was on. 1502 * @req: The request completed. 1503 * 1504 * Called on completion of any requests the driver itself 1505 * submitted that need cleaning up. 1506 */ 1507 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, 1508 struct usb_request *req) 1509 { 1510 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1511 struct dwc2_hsotg *hsotg = hs_ep->parent; 1512 1513 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 1514 1515 dwc2_hsotg_ep_free_request(ep, req); 1516 } 1517 1518 /** 1519 * ep_from_windex - convert control wIndex value to endpoint 1520 * @hsotg: The driver state. 1521 * @windex: The control request wIndex field (in host order). 1522 * 1523 * Convert the given wIndex into a pointer to an driver endpoint 1524 * structure, or return NULL if it is not a valid endpoint. 1525 */ 1526 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, 1527 u32 windex) 1528 { 1529 struct dwc2_hsotg_ep *ep; 1530 int dir = (windex & USB_DIR_IN) ? 1 : 0; 1531 int idx = windex & 0x7F; 1532 1533 if (windex >= 0x100) 1534 return NULL; 1535 1536 if (idx > hsotg->num_of_eps) 1537 return NULL; 1538 1539 ep = index_to_ep(hsotg, idx, dir); 1540 1541 if (idx && ep->dir_in != dir) 1542 return NULL; 1543 1544 return ep; 1545 } 1546 1547 /** 1548 * dwc2_hsotg_set_test_mode - Enable usb Test Modes 1549 * @hsotg: The driver state. 1550 * @testmode: requested usb test mode 1551 * Enable usb Test Mode requested by the Host. 1552 */ 1553 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) 1554 { 1555 int dctl = dwc2_readl(hsotg, DCTL); 1556 1557 dctl &= ~DCTL_TSTCTL_MASK; 1558 switch (testmode) { 1559 case TEST_J: 1560 case TEST_K: 1561 case TEST_SE0_NAK: 1562 case TEST_PACKET: 1563 case TEST_FORCE_EN: 1564 dctl |= testmode << DCTL_TSTCTL_SHIFT; 1565 break; 1566 default: 1567 return -EINVAL; 1568 } 1569 dwc2_writel(hsotg, dctl, DCTL); 1570 return 0; 1571 } 1572 1573 /** 1574 * dwc2_hsotg_send_reply - send reply to control request 1575 * @hsotg: The device state 1576 * @ep: Endpoint 0 1577 * @buff: Buffer for request 1578 * @length: Length of reply. 1579 * 1580 * Create a request and queue it on the given endpoint. This is useful as 1581 * an internal method of sending replies to certain control requests, etc. 1582 */ 1583 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, 1584 struct dwc2_hsotg_ep *ep, 1585 void *buff, 1586 int length) 1587 { 1588 struct usb_request *req; 1589 int ret; 1590 1591 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 1592 1593 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 1594 hsotg->ep0_reply = req; 1595 if (!req) { 1596 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 1597 return -ENOMEM; 1598 } 1599 1600 req->buf = hsotg->ep0_buff; 1601 req->length = length; 1602 /* 1603 * zero flag is for sending zlp in DATA IN stage. It has no impact on 1604 * STATUS stage. 1605 */ 1606 req->zero = 0; 1607 req->complete = dwc2_hsotg_complete_oursetup; 1608 1609 if (length) 1610 memcpy(req->buf, buff, length); 1611 1612 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 1613 if (ret) { 1614 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 1615 return ret; 1616 } 1617 1618 return 0; 1619 } 1620 1621 /** 1622 * dwc2_hsotg_process_req_status - process request GET_STATUS 1623 * @hsotg: The device state 1624 * @ctrl: USB control request 1625 */ 1626 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, 1627 struct usb_ctrlrequest *ctrl) 1628 { 1629 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1630 struct dwc2_hsotg_ep *ep; 1631 __le16 reply; 1632 int ret; 1633 1634 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1635 1636 if (!ep0->dir_in) { 1637 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1638 return -EINVAL; 1639 } 1640 1641 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1642 case USB_RECIP_DEVICE: 1643 /* 1644 * bit 0 => self powered 1645 * bit 1 => remote wakeup 1646 */ 1647 reply = cpu_to_le16(0); 1648 break; 1649 1650 case USB_RECIP_INTERFACE: 1651 /* currently, the data result should be zero */ 1652 reply = cpu_to_le16(0); 1653 break; 1654 1655 case USB_RECIP_ENDPOINT: 1656 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1657 if (!ep) 1658 return -ENOENT; 1659 1660 reply = cpu_to_le16(ep->halted ? 1 : 0); 1661 break; 1662 1663 default: 1664 return 0; 1665 } 1666 1667 if (le16_to_cpu(ctrl->wLength) != 2) 1668 return -EINVAL; 1669 1670 ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2); 1671 if (ret) { 1672 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1673 return ret; 1674 } 1675 1676 return 1; 1677 } 1678 1679 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); 1680 1681 /** 1682 * get_ep_head - return the first request on the endpoint 1683 * @hs_ep: The controller endpoint to get 1684 * 1685 * Get the first request on the endpoint. 1686 */ 1687 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) 1688 { 1689 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, 1690 queue); 1691 } 1692 1693 /** 1694 * dwc2_gadget_start_next_request - Starts next request from ep queue 1695 * @hs_ep: Endpoint structure 1696 * 1697 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked 1698 * in its handler. Hence we need to unmask it here to be able to do 1699 * resynchronization. 1700 */ 1701 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep) 1702 { 1703 u32 mask; 1704 struct dwc2_hsotg *hsotg = hs_ep->parent; 1705 int dir_in = hs_ep->dir_in; 1706 struct dwc2_hsotg_req *hs_req; 1707 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 1708 1709 if (!list_empty(&hs_ep->queue)) { 1710 hs_req = get_ep_head(hs_ep); 1711 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1712 return; 1713 } 1714 if (!hs_ep->isochronous) 1715 return; 1716 1717 if (dir_in) { 1718 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n", 1719 __func__); 1720 } else { 1721 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n", 1722 __func__); 1723 mask = dwc2_readl(hsotg, epmsk_reg); 1724 mask |= DOEPMSK_OUTTKNEPDISMSK; 1725 dwc2_writel(hsotg, mask, epmsk_reg); 1726 } 1727 } 1728 1729 /** 1730 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE 1731 * @hsotg: The device state 1732 * @ctrl: USB control request 1733 */ 1734 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, 1735 struct usb_ctrlrequest *ctrl) 1736 { 1737 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1738 struct dwc2_hsotg_req *hs_req; 1739 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1740 struct dwc2_hsotg_ep *ep; 1741 int ret; 1742 bool halted; 1743 u32 recip; 1744 u32 wValue; 1745 u32 wIndex; 1746 1747 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1748 __func__, set ? "SET" : "CLEAR"); 1749 1750 wValue = le16_to_cpu(ctrl->wValue); 1751 wIndex = le16_to_cpu(ctrl->wIndex); 1752 recip = ctrl->bRequestType & USB_RECIP_MASK; 1753 1754 switch (recip) { 1755 case USB_RECIP_DEVICE: 1756 switch (wValue) { 1757 case USB_DEVICE_REMOTE_WAKEUP: 1758 hsotg->remote_wakeup_allowed = 1; 1759 break; 1760 1761 case USB_DEVICE_TEST_MODE: 1762 if ((wIndex & 0xff) != 0) 1763 return -EINVAL; 1764 if (!set) 1765 return -EINVAL; 1766 1767 hsotg->test_mode = wIndex >> 8; 1768 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1769 if (ret) { 1770 dev_err(hsotg->dev, 1771 "%s: failed to send reply\n", __func__); 1772 return ret; 1773 } 1774 break; 1775 default: 1776 return -ENOENT; 1777 } 1778 break; 1779 1780 case USB_RECIP_ENDPOINT: 1781 ep = ep_from_windex(hsotg, wIndex); 1782 if (!ep) { 1783 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1784 __func__, wIndex); 1785 return -ENOENT; 1786 } 1787 1788 switch (wValue) { 1789 case USB_ENDPOINT_HALT: 1790 halted = ep->halted; 1791 1792 dwc2_hsotg_ep_sethalt(&ep->ep, set, true); 1793 1794 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1795 if (ret) { 1796 dev_err(hsotg->dev, 1797 "%s: failed to send reply\n", __func__); 1798 return ret; 1799 } 1800 1801 /* 1802 * we have to complete all requests for ep if it was 1803 * halted, and the halt was cleared by CLEAR_FEATURE 1804 */ 1805 1806 if (!set && halted) { 1807 /* 1808 * If we have request in progress, 1809 * then complete it 1810 */ 1811 if (ep->req) { 1812 hs_req = ep->req; 1813 ep->req = NULL; 1814 list_del_init(&hs_req->queue); 1815 if (hs_req->req.complete) { 1816 spin_unlock(&hsotg->lock); 1817 usb_gadget_giveback_request( 1818 &ep->ep, &hs_req->req); 1819 spin_lock(&hsotg->lock); 1820 } 1821 } 1822 1823 /* If we have pending request, then start it */ 1824 if (!ep->req) 1825 dwc2_gadget_start_next_request(ep); 1826 } 1827 1828 break; 1829 1830 default: 1831 return -ENOENT; 1832 } 1833 break; 1834 default: 1835 return -ENOENT; 1836 } 1837 return 1; 1838 } 1839 1840 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg); 1841 1842 /** 1843 * dwc2_hsotg_stall_ep0 - stall ep0 1844 * @hsotg: The device state 1845 * 1846 * Set stall for ep0 as response for setup request. 1847 */ 1848 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg) 1849 { 1850 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1851 u32 reg; 1852 u32 ctrl; 1853 1854 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1855 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0; 1856 1857 /* 1858 * DxEPCTL_Stall will be cleared by EP once it has 1859 * taken effect, so no need to clear later. 1860 */ 1861 1862 ctrl = dwc2_readl(hsotg, reg); 1863 ctrl |= DXEPCTL_STALL; 1864 ctrl |= DXEPCTL_CNAK; 1865 dwc2_writel(hsotg, ctrl, reg); 1866 1867 dev_dbg(hsotg->dev, 1868 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n", 1869 ctrl, reg, dwc2_readl(hsotg, reg)); 1870 1871 /* 1872 * complete won't be called, so we enqueue 1873 * setup request here 1874 */ 1875 dwc2_hsotg_enqueue_setup(hsotg); 1876 } 1877 1878 /** 1879 * dwc2_hsotg_process_control - process a control request 1880 * @hsotg: The device state 1881 * @ctrl: The control request received 1882 * 1883 * The controller has received the SETUP phase of a control request, and 1884 * needs to work out what to do next (and whether to pass it on to the 1885 * gadget driver). 1886 */ 1887 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, 1888 struct usb_ctrlrequest *ctrl) 1889 { 1890 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1891 int ret = 0; 1892 u32 dcfg; 1893 1894 dev_dbg(hsotg->dev, 1895 "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n", 1896 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, 1897 ctrl->wIndex, ctrl->wLength); 1898 1899 if (ctrl->wLength == 0) { 1900 ep0->dir_in = 1; 1901 hsotg->ep0_state = DWC2_EP0_STATUS_IN; 1902 } else if (ctrl->bRequestType & USB_DIR_IN) { 1903 ep0->dir_in = 1; 1904 hsotg->ep0_state = DWC2_EP0_DATA_IN; 1905 } else { 1906 ep0->dir_in = 0; 1907 hsotg->ep0_state = DWC2_EP0_DATA_OUT; 1908 } 1909 1910 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1911 switch (ctrl->bRequest) { 1912 case USB_REQ_SET_ADDRESS: 1913 hsotg->connected = 1; 1914 dcfg = dwc2_readl(hsotg, DCFG); 1915 dcfg &= ~DCFG_DEVADDR_MASK; 1916 dcfg |= (le16_to_cpu(ctrl->wValue) << 1917 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK; 1918 dwc2_writel(hsotg, dcfg, DCFG); 1919 1920 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1921 1922 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1923 return; 1924 1925 case USB_REQ_GET_STATUS: 1926 ret = dwc2_hsotg_process_req_status(hsotg, ctrl); 1927 break; 1928 1929 case USB_REQ_CLEAR_FEATURE: 1930 case USB_REQ_SET_FEATURE: 1931 ret = dwc2_hsotg_process_req_feature(hsotg, ctrl); 1932 break; 1933 } 1934 } 1935 1936 /* as a fallback, try delivering it to the driver to deal with */ 1937 1938 if (ret == 0 && hsotg->driver) { 1939 spin_unlock(&hsotg->lock); 1940 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1941 spin_lock(&hsotg->lock); 1942 if (ret < 0) 1943 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1944 } 1945 1946 hsotg->delayed_status = false; 1947 if (ret == USB_GADGET_DELAYED_STATUS) 1948 hsotg->delayed_status = true; 1949 1950 /* 1951 * the request is either unhandlable, or is not formatted correctly 1952 * so respond with a STALL for the status stage to indicate failure. 1953 */ 1954 1955 if (ret < 0) 1956 dwc2_hsotg_stall_ep0(hsotg); 1957 } 1958 1959 /** 1960 * dwc2_hsotg_complete_setup - completion of a setup transfer 1961 * @ep: The endpoint the request was on. 1962 * @req: The request completed. 1963 * 1964 * Called on completion of any requests the driver itself submitted for 1965 * EP0 setup packets 1966 */ 1967 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, 1968 struct usb_request *req) 1969 { 1970 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1971 struct dwc2_hsotg *hsotg = hs_ep->parent; 1972 1973 if (req->status < 0) { 1974 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1975 return; 1976 } 1977 1978 spin_lock(&hsotg->lock); 1979 if (req->actual == 0) 1980 dwc2_hsotg_enqueue_setup(hsotg); 1981 else 1982 dwc2_hsotg_process_control(hsotg, req->buf); 1983 spin_unlock(&hsotg->lock); 1984 } 1985 1986 /** 1987 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets 1988 * @hsotg: The device state. 1989 * 1990 * Enqueue a request on EP0 if necessary to received any SETUP packets 1991 * received from the host. 1992 */ 1993 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg) 1994 { 1995 struct usb_request *req = hsotg->ctrl_req; 1996 struct dwc2_hsotg_req *hs_req = our_req(req); 1997 int ret; 1998 1999 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 2000 2001 req->zero = 0; 2002 req->length = 8; 2003 req->buf = hsotg->ctrl_buff; 2004 req->complete = dwc2_hsotg_complete_setup; 2005 2006 if (!list_empty(&hs_req->queue)) { 2007 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 2008 return; 2009 } 2010 2011 hsotg->eps_out[0]->dir_in = 0; 2012 hsotg->eps_out[0]->send_zlp = 0; 2013 hsotg->ep0_state = DWC2_EP0_SETUP; 2014 2015 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC); 2016 if (ret < 0) { 2017 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 2018 /* 2019 * Don't think there's much we can do other than watch the 2020 * driver fail. 2021 */ 2022 } 2023 } 2024 2025 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, 2026 struct dwc2_hsotg_ep *hs_ep) 2027 { 2028 u32 ctrl; 2029 u8 index = hs_ep->index; 2030 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 2031 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 2032 2033 if (hs_ep->dir_in) 2034 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", 2035 index); 2036 else 2037 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", 2038 index); 2039 if (using_desc_dma(hsotg)) { 2040 if (!index) 2041 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); 2042 2043 /* Not specific buffer needed for ep0 ZLP */ 2044 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &hs_ep->desc_list, 2045 hs_ep->desc_list_dma, 0, true); 2046 } else { 2047 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 2048 DXEPTSIZ_XFERSIZE(0), 2049 epsiz_reg); 2050 } 2051 2052 ctrl = dwc2_readl(hsotg, epctl_reg); 2053 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 2054 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 2055 ctrl |= DXEPCTL_USBACTEP; 2056 dwc2_writel(hsotg, ctrl, epctl_reg); 2057 } 2058 2059 /** 2060 * dwc2_hsotg_complete_request - complete a request given to us 2061 * @hsotg: The device state. 2062 * @hs_ep: The endpoint the request was on. 2063 * @hs_req: The request to complete. 2064 * @result: The result code (0 => Ok, otherwise errno) 2065 * 2066 * The given request has finished, so call the necessary completion 2067 * if it has one and then look to see if we can start a new request 2068 * on the endpoint. 2069 * 2070 * Note, expects the ep to already be locked as appropriate. 2071 */ 2072 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 2073 struct dwc2_hsotg_ep *hs_ep, 2074 struct dwc2_hsotg_req *hs_req, 2075 int result) 2076 { 2077 if (!hs_req) { 2078 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 2079 return; 2080 } 2081 2082 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 2083 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 2084 2085 /* 2086 * only replace the status if we've not already set an error 2087 * from a previous transaction 2088 */ 2089 2090 if (hs_req->req.status == -EINPROGRESS) 2091 hs_req->req.status = result; 2092 2093 if (using_dma(hsotg)) 2094 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 2095 2096 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req); 2097 2098 hs_ep->req = NULL; 2099 list_del_init(&hs_req->queue); 2100 2101 /* 2102 * call the complete request with the locks off, just in case the 2103 * request tries to queue more work for this endpoint. 2104 */ 2105 2106 if (hs_req->req.complete) { 2107 spin_unlock(&hsotg->lock); 2108 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req); 2109 spin_lock(&hsotg->lock); 2110 } 2111 2112 /* In DDMA don't need to proceed to starting of next ISOC request */ 2113 if (using_desc_dma(hsotg) && hs_ep->isochronous) 2114 return; 2115 2116 /* 2117 * Look to see if there is anything else to do. Note, the completion 2118 * of the previous request may have caused a new request to be started 2119 * so be careful when doing this. 2120 */ 2121 2122 if (!hs_ep->req && result >= 0) 2123 dwc2_gadget_start_next_request(hs_ep); 2124 } 2125 2126 /* 2127 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA 2128 * @hs_ep: The endpoint the request was on. 2129 * 2130 * Get first request from the ep queue, determine descriptor on which complete 2131 * happened. SW discovers which descriptor currently in use by HW, adjusts 2132 * dma_address and calculates index of completed descriptor based on the value 2133 * of DEPDMA register. Update actual length of request, giveback to gadget. 2134 */ 2135 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) 2136 { 2137 struct dwc2_hsotg *hsotg = hs_ep->parent; 2138 struct dwc2_hsotg_req *hs_req; 2139 struct usb_request *ureq; 2140 u32 desc_sts; 2141 u32 mask; 2142 2143 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2144 2145 /* Process only descriptors with buffer status set to DMA done */ 2146 while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >> 2147 DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) { 2148 2149 hs_req = get_ep_head(hs_ep); 2150 if (!hs_req) { 2151 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2152 return; 2153 } 2154 ureq = &hs_req->req; 2155 2156 /* Check completion status */ 2157 if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT == 2158 DEV_DMA_STS_SUCC) { 2159 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2160 DEV_DMA_ISOC_RX_NBYTES_MASK; 2161 ureq->actual = ureq->length - ((desc_sts & mask) >> 2162 DEV_DMA_ISOC_NBYTES_SHIFT); 2163 2164 /* Adjust actual len for ISOC Out if len is 2165 * not align of 4 2166 */ 2167 if (!hs_ep->dir_in && ureq->length & 0x3) 2168 ureq->actual += 4 - (ureq->length & 0x3); 2169 2170 /* Set actual frame number for completed transfers */ 2171 ureq->frame_number = 2172 (desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >> 2173 DEV_DMA_ISOC_FRNUM_SHIFT; 2174 } 2175 2176 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2177 2178 hs_ep->compl_desc++; 2179 if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1)) 2180 hs_ep->compl_desc = 0; 2181 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2182 } 2183 } 2184 2185 /* 2186 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC. 2187 * @hs_ep: The isochronous endpoint. 2188 * 2189 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA 2190 * interrupt. Reset target frame and next_desc to allow to start 2191 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS 2192 * interrupt for OUT direction. 2193 */ 2194 static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep) 2195 { 2196 struct dwc2_hsotg *hsotg = hs_ep->parent; 2197 2198 if (!hs_ep->dir_in) 2199 dwc2_flush_rx_fifo(hsotg); 2200 dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0); 2201 2202 hs_ep->target_frame = TARGET_FRAME_INITIAL; 2203 hs_ep->next_desc = 0; 2204 hs_ep->compl_desc = 0; 2205 } 2206 2207 /** 2208 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint 2209 * @hsotg: The device state. 2210 * @ep_idx: The endpoint index for the data 2211 * @size: The size of data in the fifo, in bytes 2212 * 2213 * The FIFO status shows there is data to read from the FIFO for a given 2214 * endpoint, so sort out whether we need to read the data into a request 2215 * that has been made for that endpoint. 2216 */ 2217 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size) 2218 { 2219 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx]; 2220 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2221 int to_read; 2222 int max_req; 2223 int read_ptr; 2224 2225 if (!hs_req) { 2226 u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx)); 2227 int ptr; 2228 2229 dev_dbg(hsotg->dev, 2230 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2231 __func__, size, ep_idx, epctl); 2232 2233 /* dump the data from the FIFO, we've nothing we can do */ 2234 for (ptr = 0; ptr < size; ptr += 4) 2235 (void)dwc2_readl(hsotg, EPFIFO(ep_idx)); 2236 2237 return; 2238 } 2239 2240 to_read = size; 2241 read_ptr = hs_req->req.actual; 2242 max_req = hs_req->req.length - read_ptr; 2243 2244 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 2245 __func__, to_read, max_req, read_ptr, hs_req->req.length); 2246 2247 if (to_read > max_req) { 2248 /* 2249 * more data appeared than we where willing 2250 * to deal with in this request. 2251 */ 2252 2253 /* currently we don't deal this */ 2254 WARN_ON_ONCE(1); 2255 } 2256 2257 hs_ep->total_data += to_read; 2258 hs_req->req.actual += to_read; 2259 to_read = DIV_ROUND_UP(to_read, 4); 2260 2261 /* 2262 * note, we might over-write the buffer end by 3 bytes depending on 2263 * alignment of the data. 2264 */ 2265 dwc2_readl_rep(hsotg, EPFIFO(ep_idx), 2266 hs_req->req.buf + read_ptr, to_read); 2267 } 2268 2269 /** 2270 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint 2271 * @hsotg: The device instance 2272 * @dir_in: If IN zlp 2273 * 2274 * Generate a zero-length IN packet request for terminating a SETUP 2275 * transaction. 2276 * 2277 * Note, since we don't write any data to the TxFIFO, then it is 2278 * currently believed that we do not need to wait for any space in 2279 * the TxFIFO. 2280 */ 2281 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in) 2282 { 2283 /* eps_out[0] is used in both directions */ 2284 hsotg->eps_out[0]->dir_in = dir_in; 2285 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT; 2286 2287 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]); 2288 } 2289 2290 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2291 u32 epctl_reg) 2292 { 2293 u32 ctrl; 2294 2295 ctrl = dwc2_readl(hsotg, epctl_reg); 2296 if (ctrl & DXEPCTL_EOFRNUM) 2297 ctrl |= DXEPCTL_SETEVENFR; 2298 else 2299 ctrl |= DXEPCTL_SETODDFR; 2300 dwc2_writel(hsotg, ctrl, epctl_reg); 2301 } 2302 2303 /* 2304 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc 2305 * @hs_ep - The endpoint on which transfer went 2306 * 2307 * Iterate over endpoints descriptor chain and get info on bytes remained 2308 * in DMA descriptors after transfer has completed. Used for non isoc EPs. 2309 */ 2310 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) 2311 { 2312 struct dwc2_hsotg *hsotg = hs_ep->parent; 2313 unsigned int bytes_rem = 0; 2314 struct dwc2_dma_desc *desc = hs_ep->desc_list; 2315 int i; 2316 u32 status; 2317 2318 if (!desc) 2319 return -EINVAL; 2320 2321 for (i = 0; i < hs_ep->desc_count; ++i) { 2322 status = desc->status; 2323 bytes_rem += status & DEV_DMA_NBYTES_MASK; 2324 2325 if (status & DEV_DMA_STS_MASK) 2326 dev_err(hsotg->dev, "descriptor %d closed with %x\n", 2327 i, status & DEV_DMA_STS_MASK); 2328 desc++; 2329 } 2330 2331 return bytes_rem; 2332 } 2333 2334 /** 2335 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 2336 * @hsotg: The device instance 2337 * @epnum: The endpoint received from 2338 * 2339 * The RXFIFO has delivered an OutDone event, which means that the data 2340 * transfer for an OUT endpoint has been completed, either by a short 2341 * packet or by the finish of a transfer. 2342 */ 2343 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) 2344 { 2345 u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum)); 2346 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum]; 2347 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2348 struct usb_request *req = &hs_req->req; 2349 unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2350 int result = 0; 2351 2352 if (!hs_req) { 2353 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 2354 return; 2355 } 2356 2357 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) { 2358 dev_dbg(hsotg->dev, "zlp packet received\n"); 2359 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2360 dwc2_hsotg_enqueue_setup(hsotg); 2361 return; 2362 } 2363 2364 if (using_desc_dma(hsotg)) 2365 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2366 2367 if (using_dma(hsotg)) { 2368 unsigned int size_done; 2369 2370 /* 2371 * Calculate the size of the transfer by checking how much 2372 * is left in the endpoint size register and then working it 2373 * out from the amount we loaded for the transfer. 2374 * 2375 * We need to do this as DMA pointers are always 32bit aligned 2376 * so may overshoot/undershoot the transfer. 2377 */ 2378 2379 size_done = hs_ep->size_loaded - size_left; 2380 size_done += hs_ep->last_load; 2381 2382 req->actual = size_done; 2383 } 2384 2385 /* if there is more request to do, schedule new transfer */ 2386 if (req->actual < req->length && size_left == 0) { 2387 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2388 return; 2389 } 2390 2391 if (req->actual < req->length && req->short_not_ok) { 2392 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 2393 __func__, req->actual, req->length); 2394 2395 /* 2396 * todo - what should we return here? there's no one else 2397 * even bothering to check the status. 2398 */ 2399 } 2400 2401 /* DDMA IN status phase will start from StsPhseRcvd interrupt */ 2402 if (!using_desc_dma(hsotg) && epnum == 0 && 2403 hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 2404 /* Move to STATUS IN */ 2405 if (!hsotg->delayed_status) 2406 dwc2_hsotg_ep0_zlp(hsotg, true); 2407 } 2408 2409 /* 2410 * Slave mode OUT transfers do not go through XferComplete so 2411 * adjust the ISOC parity here. 2412 */ 2413 if (!using_dma(hsotg)) { 2414 if (hs_ep->isochronous && hs_ep->interval == 1) 2415 dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum)); 2416 else if (hs_ep->isochronous && hs_ep->interval > 1) 2417 dwc2_gadget_incr_frame_num(hs_ep); 2418 } 2419 2420 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 2421 } 2422 2423 /** 2424 * dwc2_hsotg_handle_rx - RX FIFO has data 2425 * @hsotg: The device instance 2426 * 2427 * The IRQ handler has detected that the RX FIFO has some data in it 2428 * that requires processing, so find out what is in there and do the 2429 * appropriate read. 2430 * 2431 * The RXFIFO is a true FIFO, the packets coming out are still in packet 2432 * chunks, so if you have x packets received on an endpoint you'll get x 2433 * FIFO events delivered, each with a packet's worth of data in it. 2434 * 2435 * When using DMA, we should not be processing events from the RXFIFO 2436 * as the actual data should be sent to the memory directly and we turn 2437 * on the completion interrupts to get notifications of transfer completion. 2438 */ 2439 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg) 2440 { 2441 u32 grxstsr = dwc2_readl(hsotg, GRXSTSP); 2442 u32 epnum, status, size; 2443 2444 WARN_ON(using_dma(hsotg)); 2445 2446 epnum = grxstsr & GRXSTS_EPNUM_MASK; 2447 status = grxstsr & GRXSTS_PKTSTS_MASK; 2448 2449 size = grxstsr & GRXSTS_BYTECNT_MASK; 2450 size >>= GRXSTS_BYTECNT_SHIFT; 2451 2452 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 2453 __func__, grxstsr, size, epnum); 2454 2455 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 2456 case GRXSTS_PKTSTS_GLOBALOUTNAK: 2457 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n"); 2458 break; 2459 2460 case GRXSTS_PKTSTS_OUTDONE: 2461 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 2462 dwc2_hsotg_read_frameno(hsotg)); 2463 2464 if (!using_dma(hsotg)) 2465 dwc2_hsotg_handle_outdone(hsotg, epnum); 2466 break; 2467 2468 case GRXSTS_PKTSTS_SETUPDONE: 2469 dev_dbg(hsotg->dev, 2470 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2471 dwc2_hsotg_read_frameno(hsotg), 2472 dwc2_readl(hsotg, DOEPCTL(0))); 2473 /* 2474 * Call dwc2_hsotg_handle_outdone here if it was not called from 2475 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't 2476 * generate GRXSTS_PKTSTS_OUTDONE for setup packet. 2477 */ 2478 if (hsotg->ep0_state == DWC2_EP0_SETUP) 2479 dwc2_hsotg_handle_outdone(hsotg, epnum); 2480 break; 2481 2482 case GRXSTS_PKTSTS_OUTRX: 2483 dwc2_hsotg_rx_data(hsotg, epnum, size); 2484 break; 2485 2486 case GRXSTS_PKTSTS_SETUPRX: 2487 dev_dbg(hsotg->dev, 2488 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2489 dwc2_hsotg_read_frameno(hsotg), 2490 dwc2_readl(hsotg, DOEPCTL(0))); 2491 2492 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP); 2493 2494 dwc2_hsotg_rx_data(hsotg, epnum, size); 2495 break; 2496 2497 default: 2498 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 2499 __func__, grxstsr); 2500 2501 dwc2_hsotg_dump(hsotg); 2502 break; 2503 } 2504 } 2505 2506 /** 2507 * dwc2_hsotg_ep0_mps - turn max packet size into register setting 2508 * @mps: The maximum packet size in bytes. 2509 */ 2510 static u32 dwc2_hsotg_ep0_mps(unsigned int mps) 2511 { 2512 switch (mps) { 2513 case 64: 2514 return D0EPCTL_MPS_64; 2515 case 32: 2516 return D0EPCTL_MPS_32; 2517 case 16: 2518 return D0EPCTL_MPS_16; 2519 case 8: 2520 return D0EPCTL_MPS_8; 2521 } 2522 2523 /* bad max packet size, warn and return invalid result */ 2524 WARN_ON(1); 2525 return (u32)-1; 2526 } 2527 2528 /** 2529 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field 2530 * @hsotg: The driver state. 2531 * @ep: The index number of the endpoint 2532 * @mps: The maximum packet size in bytes 2533 * @mc: The multicount value 2534 * @dir_in: True if direction is in. 2535 * 2536 * Configure the maximum packet size for the given endpoint, updating 2537 * the hardware control registers to reflect this. 2538 */ 2539 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, 2540 unsigned int ep, unsigned int mps, 2541 unsigned int mc, unsigned int dir_in) 2542 { 2543 struct dwc2_hsotg_ep *hs_ep; 2544 u32 reg; 2545 2546 hs_ep = index_to_ep(hsotg, ep, dir_in); 2547 if (!hs_ep) 2548 return; 2549 2550 if (ep == 0) { 2551 u32 mps_bytes = mps; 2552 2553 /* EP0 is a special case */ 2554 mps = dwc2_hsotg_ep0_mps(mps_bytes); 2555 if (mps > 3) 2556 goto bad_mps; 2557 hs_ep->ep.maxpacket = mps_bytes; 2558 hs_ep->mc = 1; 2559 } else { 2560 if (mps > 1024) 2561 goto bad_mps; 2562 hs_ep->mc = mc; 2563 if (mc > 3) 2564 goto bad_mps; 2565 hs_ep->ep.maxpacket = mps; 2566 } 2567 2568 if (dir_in) { 2569 reg = dwc2_readl(hsotg, DIEPCTL(ep)); 2570 reg &= ~DXEPCTL_MPS_MASK; 2571 reg |= mps; 2572 dwc2_writel(hsotg, reg, DIEPCTL(ep)); 2573 } else { 2574 reg = dwc2_readl(hsotg, DOEPCTL(ep)); 2575 reg &= ~DXEPCTL_MPS_MASK; 2576 reg |= mps; 2577 dwc2_writel(hsotg, reg, DOEPCTL(ep)); 2578 } 2579 2580 return; 2581 2582 bad_mps: 2583 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 2584 } 2585 2586 /** 2587 * dwc2_hsotg_txfifo_flush - flush Tx FIFO 2588 * @hsotg: The driver state 2589 * @idx: The index for the endpoint (0..15) 2590 */ 2591 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx) 2592 { 2593 dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH, 2594 GRSTCTL); 2595 2596 /* wait until the fifo is flushed */ 2597 if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100)) 2598 dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n", 2599 __func__); 2600 } 2601 2602 /** 2603 * dwc2_hsotg_trytx - check to see if anything needs transmitting 2604 * @hsotg: The driver state 2605 * @hs_ep: The driver endpoint to check. 2606 * 2607 * Check to see if there is a request that has data to send, and if so 2608 * make an attempt to write data into the FIFO. 2609 */ 2610 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg, 2611 struct dwc2_hsotg_ep *hs_ep) 2612 { 2613 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2614 2615 if (!hs_ep->dir_in || !hs_req) { 2616 /** 2617 * if request is not enqueued, we disable interrupts 2618 * for endpoints, excepting ep0 2619 */ 2620 if (hs_ep->index != 0) 2621 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, 2622 hs_ep->dir_in, 0); 2623 return 0; 2624 } 2625 2626 if (hs_req->req.actual < hs_req->req.length) { 2627 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 2628 hs_ep->index); 2629 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 2630 } 2631 2632 return 0; 2633 } 2634 2635 /** 2636 * dwc2_hsotg_complete_in - complete IN transfer 2637 * @hsotg: The device state. 2638 * @hs_ep: The endpoint that has just completed. 2639 * 2640 * An IN transfer has been completed, update the transfer's state and then 2641 * call the relevant completion routines. 2642 */ 2643 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, 2644 struct dwc2_hsotg_ep *hs_ep) 2645 { 2646 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2647 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 2648 int size_left, size_done; 2649 2650 if (!hs_req) { 2651 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 2652 return; 2653 } 2654 2655 /* Finish ZLP handling for IN EP0 transactions */ 2656 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { 2657 dev_dbg(hsotg->dev, "zlp packet sent\n"); 2658 2659 /* 2660 * While send zlp for DWC2_EP0_STATUS_IN EP direction was 2661 * changed to IN. Change back to complete OUT transfer request 2662 */ 2663 hs_ep->dir_in = 0; 2664 2665 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2666 if (hsotg->test_mode) { 2667 int ret; 2668 2669 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode); 2670 if (ret < 0) { 2671 dev_dbg(hsotg->dev, "Invalid Test #%d\n", 2672 hsotg->test_mode); 2673 dwc2_hsotg_stall_ep0(hsotg); 2674 return; 2675 } 2676 } 2677 dwc2_hsotg_enqueue_setup(hsotg); 2678 return; 2679 } 2680 2681 /* 2682 * Calculate the size of the transfer by checking how much is left 2683 * in the endpoint size register and then working it out from 2684 * the amount we loaded for the transfer. 2685 * 2686 * We do this even for DMA, as the transfer may have incremented 2687 * past the end of the buffer (DMA transfers are always 32bit 2688 * aligned). 2689 */ 2690 if (using_desc_dma(hsotg)) { 2691 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2692 if (size_left < 0) 2693 dev_err(hsotg->dev, "error parsing DDMA results %d\n", 2694 size_left); 2695 } else { 2696 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2697 } 2698 2699 size_done = hs_ep->size_loaded - size_left; 2700 size_done += hs_ep->last_load; 2701 2702 if (hs_req->req.actual != size_done) 2703 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 2704 __func__, hs_req->req.actual, size_done); 2705 2706 hs_req->req.actual = size_done; 2707 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n", 2708 hs_req->req.length, hs_req->req.actual, hs_req->req.zero); 2709 2710 if (!size_left && hs_req->req.actual < hs_req->req.length) { 2711 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 2712 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2713 return; 2714 } 2715 2716 /* Zlp for all endpoints, for ep0 only in DATA IN stage */ 2717 if (hs_ep->send_zlp) { 2718 dwc2_hsotg_program_zlp(hsotg, hs_ep); 2719 hs_ep->send_zlp = 0; 2720 /* transfer will be completed on next complete interrupt */ 2721 return; 2722 } 2723 2724 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { 2725 /* Move to STATUS OUT */ 2726 dwc2_hsotg_ep0_zlp(hsotg, false); 2727 return; 2728 } 2729 2730 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2731 } 2732 2733 /** 2734 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep 2735 * @hsotg: The device state. 2736 * @idx: Index of ep. 2737 * @dir_in: Endpoint direction 1-in 0-out. 2738 * 2739 * Reads for endpoint with given index and direction, by masking 2740 * epint_reg with coresponding mask. 2741 */ 2742 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg, 2743 unsigned int idx, int dir_in) 2744 { 2745 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 2746 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2747 u32 ints; 2748 u32 mask; 2749 u32 diepempmsk; 2750 2751 mask = dwc2_readl(hsotg, epmsk_reg); 2752 diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK); 2753 mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0; 2754 mask |= DXEPINT_SETUP_RCVD; 2755 2756 ints = dwc2_readl(hsotg, epint_reg); 2757 ints &= mask; 2758 return ints; 2759 } 2760 2761 /** 2762 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD 2763 * @hs_ep: The endpoint on which interrupt is asserted. 2764 * 2765 * This interrupt indicates that the endpoint has been disabled per the 2766 * application's request. 2767 * 2768 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK, 2769 * in case of ISOC completes current request. 2770 * 2771 * For ISOC-OUT endpoints completes expired requests. If there is remaining 2772 * request starts it. 2773 */ 2774 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) 2775 { 2776 struct dwc2_hsotg *hsotg = hs_ep->parent; 2777 struct dwc2_hsotg_req *hs_req; 2778 unsigned char idx = hs_ep->index; 2779 int dir_in = hs_ep->dir_in; 2780 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2781 int dctl = dwc2_readl(hsotg, DCTL); 2782 2783 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 2784 2785 if (dir_in) { 2786 int epctl = dwc2_readl(hsotg, epctl_reg); 2787 2788 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2789 2790 if (hs_ep->isochronous) { 2791 dwc2_hsotg_complete_in(hsotg, hs_ep); 2792 return; 2793 } 2794 2795 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) { 2796 int dctl = dwc2_readl(hsotg, DCTL); 2797 2798 dctl |= DCTL_CGNPINNAK; 2799 dwc2_writel(hsotg, dctl, DCTL); 2800 } 2801 return; 2802 } 2803 2804 if (dctl & DCTL_GOUTNAKSTS) { 2805 dctl |= DCTL_CGOUTNAK; 2806 dwc2_writel(hsotg, dctl, DCTL); 2807 } 2808 2809 if (!hs_ep->isochronous) 2810 return; 2811 2812 if (list_empty(&hs_ep->queue)) { 2813 dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n", 2814 __func__, hs_ep); 2815 return; 2816 } 2817 2818 do { 2819 hs_req = get_ep_head(hs_ep); 2820 if (hs_req) 2821 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 2822 -ENODATA); 2823 dwc2_gadget_incr_frame_num(hs_ep); 2824 /* Update current frame number value. */ 2825 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2826 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2827 2828 dwc2_gadget_start_next_request(hs_ep); 2829 } 2830 2831 /** 2832 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS 2833 * @ep: The endpoint on which interrupt is asserted. 2834 * 2835 * This is starting point for ISOC-OUT transfer, synchronization done with 2836 * first out token received from host while corresponding EP is disabled. 2837 * 2838 * Device does not know initial frame in which out token will come. For this 2839 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon 2840 * getting this interrupt SW starts calculation for next transfer frame. 2841 */ 2842 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) 2843 { 2844 struct dwc2_hsotg *hsotg = ep->parent; 2845 int dir_in = ep->dir_in; 2846 u32 doepmsk; 2847 2848 if (dir_in || !ep->isochronous) 2849 return; 2850 2851 if (using_desc_dma(hsotg)) { 2852 if (ep->target_frame == TARGET_FRAME_INITIAL) { 2853 /* Start first ISO Out */ 2854 ep->target_frame = hsotg->frame_number; 2855 dwc2_gadget_start_isoc_ddma(ep); 2856 } 2857 return; 2858 } 2859 2860 if (ep->interval > 1 && 2861 ep->target_frame == TARGET_FRAME_INITIAL) { 2862 u32 ctrl; 2863 2864 ep->target_frame = hsotg->frame_number; 2865 dwc2_gadget_incr_frame_num(ep); 2866 2867 ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index)); 2868 if (ep->target_frame & 0x1) 2869 ctrl |= DXEPCTL_SETODDFR; 2870 else 2871 ctrl |= DXEPCTL_SETEVENFR; 2872 2873 dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index)); 2874 } 2875 2876 dwc2_gadget_start_next_request(ep); 2877 doepmsk = dwc2_readl(hsotg, DOEPMSK); 2878 doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK; 2879 dwc2_writel(hsotg, doepmsk, DOEPMSK); 2880 } 2881 2882 /** 2883 * dwc2_gadget_handle_nak - handle NAK interrupt 2884 * @hs_ep: The endpoint on which interrupt is asserted. 2885 * 2886 * This is starting point for ISOC-IN transfer, synchronization done with 2887 * first IN token received from host while corresponding EP is disabled. 2888 * 2889 * Device does not know when first one token will arrive from host. On first 2890 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2891 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2892 * sent in response to that as there was no data in FIFO. SW is basing on this 2893 * interrupt to obtain frame in which token has come and then based on the 2894 * interval calculates next frame for transfer. 2895 */ 2896 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2897 { 2898 struct dwc2_hsotg *hsotg = hs_ep->parent; 2899 int dir_in = hs_ep->dir_in; 2900 2901 if (!dir_in || !hs_ep->isochronous) 2902 return; 2903 2904 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { 2905 2906 if (using_desc_dma(hsotg)) { 2907 hs_ep->target_frame = hsotg->frame_number; 2908 dwc2_gadget_incr_frame_num(hs_ep); 2909 2910 /* In service interval mode target_frame must 2911 * be set to last (u)frame of the service interval. 2912 */ 2913 if (hsotg->params.service_interval) { 2914 /* Set target_frame to the first (u)frame of 2915 * the service interval 2916 */ 2917 hs_ep->target_frame &= ~hs_ep->interval + 1; 2918 2919 /* Set target_frame to the last (u)frame of 2920 * the service interval 2921 */ 2922 dwc2_gadget_incr_frame_num(hs_ep); 2923 dwc2_gadget_dec_frame_num_by_one(hs_ep); 2924 } 2925 2926 dwc2_gadget_start_isoc_ddma(hs_ep); 2927 return; 2928 } 2929 2930 hs_ep->target_frame = hsotg->frame_number; 2931 if (hs_ep->interval > 1) { 2932 u32 ctrl = dwc2_readl(hsotg, 2933 DIEPCTL(hs_ep->index)); 2934 if (hs_ep->target_frame & 0x1) 2935 ctrl |= DXEPCTL_SETODDFR; 2936 else 2937 ctrl |= DXEPCTL_SETEVENFR; 2938 2939 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index)); 2940 } 2941 2942 dwc2_hsotg_complete_request(hsotg, hs_ep, 2943 get_ep_head(hs_ep), 0); 2944 } 2945 2946 if (!using_desc_dma(hsotg)) 2947 dwc2_gadget_incr_frame_num(hs_ep); 2948 } 2949 2950 /** 2951 * dwc2_hsotg_epint - handle an in/out endpoint interrupt 2952 * @hsotg: The driver state 2953 * @idx: The index for the endpoint (0..15) 2954 * @dir_in: Set if this is an IN endpoint 2955 * 2956 * Process and clear any interrupt pending for an individual endpoint 2957 */ 2958 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, 2959 int dir_in) 2960 { 2961 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in); 2962 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2963 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2964 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx); 2965 u32 ints; 2966 u32 ctrl; 2967 2968 ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in); 2969 ctrl = dwc2_readl(hsotg, epctl_reg); 2970 2971 /* Clear endpoint interrupts */ 2972 dwc2_writel(hsotg, ints, epint_reg); 2973 2974 if (!hs_ep) { 2975 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", 2976 __func__, idx, dir_in ? "in" : "out"); 2977 return; 2978 } 2979 2980 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 2981 __func__, idx, dir_in ? "in" : "out", ints); 2982 2983 /* Don't process XferCompl interrupt if it is a setup packet */ 2984 if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) 2985 ints &= ~DXEPINT_XFERCOMPL; 2986 2987 /* 2988 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP 2989 * stage and xfercomplete was generated without SETUP phase done 2990 * interrupt. SW should parse received setup packet only after host's 2991 * exit from setup phase of control transfer. 2992 */ 2993 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && 2994 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) 2995 ints &= ~DXEPINT_XFERCOMPL; 2996 2997 if (ints & DXEPINT_XFERCOMPL) { 2998 dev_dbg(hsotg->dev, 2999 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n", 3000 __func__, dwc2_readl(hsotg, epctl_reg), 3001 dwc2_readl(hsotg, epsiz_reg)); 3002 3003 /* In DDMA handle isochronous requests separately */ 3004 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 3005 /* XferCompl set along with BNA */ 3006 if (!(ints & DXEPINT_BNAINTR)) 3007 dwc2_gadget_complete_isoc_request_ddma(hs_ep); 3008 } else if (dir_in) { 3009 /* 3010 * We get OutDone from the FIFO, so we only 3011 * need to look at completing IN requests here 3012 * if operating slave mode 3013 */ 3014 if (hs_ep->isochronous && hs_ep->interval > 1) 3015 dwc2_gadget_incr_frame_num(hs_ep); 3016 3017 dwc2_hsotg_complete_in(hsotg, hs_ep); 3018 if (ints & DXEPINT_NAKINTRPT) 3019 ints &= ~DXEPINT_NAKINTRPT; 3020 3021 if (idx == 0 && !hs_ep->req) 3022 dwc2_hsotg_enqueue_setup(hsotg); 3023 } else if (using_dma(hsotg)) { 3024 /* 3025 * We're using DMA, we need to fire an OutDone here 3026 * as we ignore the RXFIFO. 3027 */ 3028 if (hs_ep->isochronous && hs_ep->interval > 1) 3029 dwc2_gadget_incr_frame_num(hs_ep); 3030 3031 dwc2_hsotg_handle_outdone(hsotg, idx); 3032 } 3033 } 3034 3035 if (ints & DXEPINT_EPDISBLD) 3036 dwc2_gadget_handle_ep_disabled(hs_ep); 3037 3038 if (ints & DXEPINT_OUTTKNEPDIS) 3039 dwc2_gadget_handle_out_token_ep_disabled(hs_ep); 3040 3041 if (ints & DXEPINT_NAKINTRPT) 3042 dwc2_gadget_handle_nak(hs_ep); 3043 3044 if (ints & DXEPINT_AHBERR) 3045 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 3046 3047 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */ 3048 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 3049 3050 if (using_dma(hsotg) && idx == 0) { 3051 /* 3052 * this is the notification we've received a 3053 * setup packet. In non-DMA mode we'd get this 3054 * from the RXFIFO, instead we need to process 3055 * the setup here. 3056 */ 3057 3058 if (dir_in) 3059 WARN_ON_ONCE(1); 3060 else 3061 dwc2_hsotg_handle_outdone(hsotg, 0); 3062 } 3063 } 3064 3065 if (ints & DXEPINT_STSPHSERCVD) { 3066 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); 3067 3068 /* Safety check EP0 state when STSPHSERCVD asserted */ 3069 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 3070 /* Move to STATUS IN for DDMA */ 3071 if (using_desc_dma(hsotg)) { 3072 if (!hsotg->delayed_status) 3073 dwc2_hsotg_ep0_zlp(hsotg, true); 3074 else 3075 /* In case of 3 stage Control Write with delayed 3076 * status, when Status IN transfer started 3077 * before STSPHSERCVD asserted, NAKSTS bit not 3078 * cleared by CNAK in dwc2_hsotg_start_req() 3079 * function. Clear now NAKSTS to allow complete 3080 * transfer. 3081 */ 3082 dwc2_set_bit(hsotg, DIEPCTL(0), 3083 DXEPCTL_CNAK); 3084 } 3085 } 3086 3087 } 3088 3089 if (ints & DXEPINT_BACK2BACKSETUP) 3090 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 3091 3092 if (ints & DXEPINT_BNAINTR) { 3093 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); 3094 if (hs_ep->isochronous) 3095 dwc2_gadget_handle_isoc_bna(hs_ep); 3096 } 3097 3098 if (dir_in && !hs_ep->isochronous) { 3099 /* not sure if this is important, but we'll clear it anyway */ 3100 if (ints & DXEPINT_INTKNTXFEMP) { 3101 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 3102 __func__, idx); 3103 } 3104 3105 /* this probably means something bad is happening */ 3106 if (ints & DXEPINT_INTKNEPMIS) { 3107 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 3108 __func__, idx); 3109 } 3110 3111 /* FIFO has space or is empty (see GAHBCFG) */ 3112 if (hsotg->dedicated_fifos && 3113 ints & DXEPINT_TXFEMP) { 3114 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 3115 __func__, idx); 3116 if (!using_dma(hsotg)) 3117 dwc2_hsotg_trytx(hsotg, hs_ep); 3118 } 3119 } 3120 } 3121 3122 /** 3123 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 3124 * @hsotg: The device state. 3125 * 3126 * Handle updating the device settings after the enumeration phase has 3127 * been completed. 3128 */ 3129 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) 3130 { 3131 u32 dsts = dwc2_readl(hsotg, DSTS); 3132 int ep0_mps = 0, ep_mps = 8; 3133 3134 /* 3135 * This should signal the finish of the enumeration phase 3136 * of the USB handshaking, so we should now know what rate 3137 * we connected at. 3138 */ 3139 3140 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 3141 3142 /* 3143 * note, since we're limited by the size of transfer on EP0, and 3144 * it seems IN transfers must be a even number of packets we do 3145 * not advertise a 64byte MPS on EP0. 3146 */ 3147 3148 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 3149 switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { 3150 case DSTS_ENUMSPD_FS: 3151 case DSTS_ENUMSPD_FS48: 3152 hsotg->gadget.speed = USB_SPEED_FULL; 3153 ep0_mps = EP0_MPS_LIMIT; 3154 ep_mps = 1023; 3155 break; 3156 3157 case DSTS_ENUMSPD_HS: 3158 hsotg->gadget.speed = USB_SPEED_HIGH; 3159 ep0_mps = EP0_MPS_LIMIT; 3160 ep_mps = 1024; 3161 break; 3162 3163 case DSTS_ENUMSPD_LS: 3164 hsotg->gadget.speed = USB_SPEED_LOW; 3165 ep0_mps = 8; 3166 ep_mps = 8; 3167 /* 3168 * note, we don't actually support LS in this driver at the 3169 * moment, and the documentation seems to imply that it isn't 3170 * supported by the PHYs on some of the devices. 3171 */ 3172 break; 3173 } 3174 dev_info(hsotg->dev, "new device is %s\n", 3175 usb_speed_string(hsotg->gadget.speed)); 3176 3177 /* 3178 * we should now know the maximum packet size for an 3179 * endpoint, so set the endpoints to a default value. 3180 */ 3181 3182 if (ep0_mps) { 3183 int i; 3184 /* Initialize ep0 for both in and out directions */ 3185 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); 3186 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); 3187 for (i = 1; i < hsotg->num_of_eps; i++) { 3188 if (hsotg->eps_in[i]) 3189 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3190 0, 1); 3191 if (hsotg->eps_out[i]) 3192 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3193 0, 0); 3194 } 3195 } 3196 3197 /* ensure after enumeration our EP0 is active */ 3198 3199 dwc2_hsotg_enqueue_setup(hsotg); 3200 3201 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3202 dwc2_readl(hsotg, DIEPCTL0), 3203 dwc2_readl(hsotg, DOEPCTL0)); 3204 } 3205 3206 /** 3207 * kill_all_requests - remove all requests from the endpoint's queue 3208 * @hsotg: The device state. 3209 * @ep: The endpoint the requests may be on. 3210 * @result: The result code to use. 3211 * 3212 * Go through the requests on the given endpoint and mark them 3213 * completed with the given result code. 3214 */ 3215 static void kill_all_requests(struct dwc2_hsotg *hsotg, 3216 struct dwc2_hsotg_ep *ep, 3217 int result) 3218 { 3219 struct dwc2_hsotg_req *req, *treq; 3220 unsigned int size; 3221 3222 ep->req = NULL; 3223 3224 list_for_each_entry_safe(req, treq, &ep->queue, queue) 3225 dwc2_hsotg_complete_request(hsotg, ep, req, 3226 result); 3227 3228 if (!hsotg->dedicated_fifos) 3229 return; 3230 size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4; 3231 if (size < ep->fifo_size) 3232 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index); 3233 } 3234 3235 /** 3236 * dwc2_hsotg_disconnect - disconnect service 3237 * @hsotg: The device state. 3238 * 3239 * The device has been disconnected. Remove all current 3240 * transactions and signal the gadget driver that this 3241 * has happened. 3242 */ 3243 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) 3244 { 3245 unsigned int ep; 3246 3247 if (!hsotg->connected) 3248 return; 3249 3250 hsotg->connected = 0; 3251 hsotg->test_mode = 0; 3252 3253 /* all endpoints should be shutdown */ 3254 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 3255 if (hsotg->eps_in[ep]) 3256 kill_all_requests(hsotg, hsotg->eps_in[ep], 3257 -ESHUTDOWN); 3258 if (hsotg->eps_out[ep]) 3259 kill_all_requests(hsotg, hsotg->eps_out[ep], 3260 -ESHUTDOWN); 3261 } 3262 3263 call_gadget(hsotg, disconnect); 3264 hsotg->lx_state = DWC2_L3; 3265 3266 usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED); 3267 } 3268 3269 /** 3270 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 3271 * @hsotg: The device state: 3272 * @periodic: True if this is a periodic FIFO interrupt 3273 */ 3274 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) 3275 { 3276 struct dwc2_hsotg_ep *ep; 3277 int epno, ret; 3278 3279 /* look through for any more data to transmit */ 3280 for (epno = 0; epno < hsotg->num_of_eps; epno++) { 3281 ep = index_to_ep(hsotg, epno, 1); 3282 3283 if (!ep) 3284 continue; 3285 3286 if (!ep->dir_in) 3287 continue; 3288 3289 if ((periodic && !ep->periodic) || 3290 (!periodic && ep->periodic)) 3291 continue; 3292 3293 ret = dwc2_hsotg_trytx(hsotg, ep); 3294 if (ret < 0) 3295 break; 3296 } 3297 } 3298 3299 /* IRQ flags which will trigger a retry around the IRQ loop */ 3300 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \ 3301 GINTSTS_PTXFEMP | \ 3302 GINTSTS_RXFLVL) 3303 3304 static int dwc2_hsotg_ep_disable(struct usb_ep *ep); 3305 /** 3306 * dwc2_hsotg_core_init - issue softreset to the core 3307 * @hsotg: The device state 3308 * @is_usb_reset: Usb resetting flag 3309 * 3310 * Issue a soft reset to the core, and await the core finishing it. 3311 */ 3312 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, 3313 bool is_usb_reset) 3314 { 3315 u32 intmsk; 3316 u32 val; 3317 u32 usbcfg; 3318 u32 dcfg = 0; 3319 int ep; 3320 3321 /* Kill any ep0 requests as controller will be reinitialized */ 3322 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 3323 3324 if (!is_usb_reset) { 3325 if (dwc2_core_reset(hsotg, true)) 3326 return; 3327 } else { 3328 /* all endpoints should be shutdown */ 3329 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 3330 if (hsotg->eps_in[ep]) 3331 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 3332 if (hsotg->eps_out[ep]) 3333 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 3334 } 3335 } 3336 3337 /* 3338 * we must now enable ep0 ready for host detection and then 3339 * set configuration. 3340 */ 3341 3342 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3343 usbcfg = dwc2_readl(hsotg, GUSBCFG); 3344 usbcfg &= ~GUSBCFG_TOUTCAL_MASK; 3345 usbcfg |= GUSBCFG_TOUTCAL(7); 3346 3347 /* remove the HNP/SRP and set the PHY */ 3348 usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP); 3349 dwc2_writel(hsotg, usbcfg, GUSBCFG); 3350 3351 dwc2_phy_init(hsotg, true); 3352 3353 dwc2_hsotg_init_fifo(hsotg); 3354 3355 if (!is_usb_reset) 3356 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3357 3358 dcfg |= DCFG_EPMISCNT(1); 3359 3360 switch (hsotg->params.speed) { 3361 case DWC2_SPEED_PARAM_LOW: 3362 dcfg |= DCFG_DEVSPD_LS; 3363 break; 3364 case DWC2_SPEED_PARAM_FULL: 3365 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) 3366 dcfg |= DCFG_DEVSPD_FS48; 3367 else 3368 dcfg |= DCFG_DEVSPD_FS; 3369 break; 3370 default: 3371 dcfg |= DCFG_DEVSPD_HS; 3372 } 3373 3374 if (hsotg->params.ipg_isoc_en) 3375 dcfg |= DCFG_IPG_ISOC_SUPPORDED; 3376 3377 dwc2_writel(hsotg, dcfg, DCFG); 3378 3379 /* Clear any pending OTG interrupts */ 3380 dwc2_writel(hsotg, 0xffffffff, GOTGINT); 3381 3382 /* Clear any pending interrupts */ 3383 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 3384 intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT | 3385 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | 3386 GINTSTS_USBRST | GINTSTS_RESETDET | 3387 GINTSTS_ENUMDONE | GINTSTS_OTGINT | 3388 GINTSTS_USBSUSP | GINTSTS_WKUPINT | 3389 GINTSTS_LPMTRANRCVD; 3390 3391 if (!using_desc_dma(hsotg)) 3392 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; 3393 3394 if (!hsotg->params.external_id_pin_ctl) 3395 intmsk |= GINTSTS_CONIDSTSCHNG; 3396 3397 dwc2_writel(hsotg, intmsk, GINTMSK); 3398 3399 if (using_dma(hsotg)) { 3400 dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | 3401 hsotg->params.ahbcfg, 3402 GAHBCFG); 3403 3404 /* Set DDMA mode support in the core if needed */ 3405 if (using_desc_dma(hsotg)) 3406 dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN); 3407 3408 } else { 3409 dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ? 3410 (GAHBCFG_NP_TXF_EMP_LVL | 3411 GAHBCFG_P_TXF_EMP_LVL) : 0) | 3412 GAHBCFG_GLBL_INTR_EN, GAHBCFG); 3413 } 3414 3415 /* 3416 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts 3417 * when we have no data to transfer. Otherwise we get being flooded by 3418 * interrupts. 3419 */ 3420 3421 dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ? 3422 DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) | 3423 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK | 3424 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK, 3425 DIEPMSK); 3426 3427 /* 3428 * don't need XferCompl, we get that from RXFIFO in slave mode. In 3429 * DMA mode we may need this and StsPhseRcvd. 3430 */ 3431 dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | 3432 DOEPMSK_STSPHSERCVDMSK) : 0) | 3433 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | 3434 DOEPMSK_SETUPMSK, 3435 DOEPMSK); 3436 3437 /* Enable BNA interrupt for DDMA */ 3438 if (using_desc_dma(hsotg)) { 3439 dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK); 3440 dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK); 3441 } 3442 3443 /* Enable Service Interval mode if supported */ 3444 if (using_desc_dma(hsotg) && hsotg->params.service_interval) 3445 dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED); 3446 3447 dwc2_writel(hsotg, 0, DAINTMSK); 3448 3449 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3450 dwc2_readl(hsotg, DIEPCTL0), 3451 dwc2_readl(hsotg, DOEPCTL0)); 3452 3453 /* enable in and out endpoint interrupts */ 3454 dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT); 3455 3456 /* 3457 * Enable the RXFIFO when in slave mode, as this is how we collect 3458 * the data. In DMA mode, we get events from the FIFO but also 3459 * things we cannot process, so do not use it. 3460 */ 3461 if (!using_dma(hsotg)) 3462 dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL); 3463 3464 /* Enable interrupts for EP0 in and out */ 3465 dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1); 3466 dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1); 3467 3468 if (!is_usb_reset) { 3469 dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3470 udelay(10); /* see openiboot */ 3471 dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3472 } 3473 3474 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL)); 3475 3476 /* 3477 * DxEPCTL_USBActEp says RO in manual, but seems to be set by 3478 * writing to the EPCTL register.. 3479 */ 3480 3481 /* set to read 1 8byte packet */ 3482 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 3483 DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0); 3484 3485 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3486 DXEPCTL_CNAK | DXEPCTL_EPENA | 3487 DXEPCTL_USBACTEP, 3488 DOEPCTL0); 3489 3490 /* enable, but don't activate EP0in */ 3491 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3492 DXEPCTL_USBACTEP, DIEPCTL0); 3493 3494 /* clear global NAKs */ 3495 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 3496 if (!is_usb_reset) 3497 val |= DCTL_SFTDISCON; 3498 dwc2_set_bit(hsotg, DCTL, val); 3499 3500 /* configure the core to support LPM */ 3501 dwc2_gadget_init_lpm(hsotg); 3502 3503 /* program GREFCLK register if needed */ 3504 if (using_desc_dma(hsotg) && hsotg->params.service_interval) 3505 dwc2_gadget_program_ref_clk(hsotg); 3506 3507 /* must be at-least 3ms to allow bus to see disconnect */ 3508 mdelay(3); 3509 3510 hsotg->lx_state = DWC2_L0; 3511 3512 dwc2_hsotg_enqueue_setup(hsotg); 3513 3514 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3515 dwc2_readl(hsotg, DIEPCTL0), 3516 dwc2_readl(hsotg, DOEPCTL0)); 3517 } 3518 3519 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 3520 { 3521 /* set the soft-disconnect bit */ 3522 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3523 } 3524 3525 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) 3526 { 3527 /* remove the soft-disconnect and let's go */ 3528 dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON); 3529 } 3530 3531 /** 3532 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt. 3533 * @hsotg: The device state: 3534 * 3535 * This interrupt indicates one of the following conditions occurred while 3536 * transmitting an ISOC transaction. 3537 * - Corrupted IN Token for ISOC EP. 3538 * - Packet not complete in FIFO. 3539 * 3540 * The following actions will be taken: 3541 * - Determine the EP 3542 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO 3543 */ 3544 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg) 3545 { 3546 struct dwc2_hsotg_ep *hs_ep; 3547 u32 epctrl; 3548 u32 daintmsk; 3549 u32 idx; 3550 3551 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n"); 3552 3553 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3554 3555 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3556 hs_ep = hsotg->eps_in[idx]; 3557 /* Proceed only unmasked ISOC EPs */ 3558 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3559 continue; 3560 3561 epctrl = dwc2_readl(hsotg, DIEPCTL(idx)); 3562 if ((epctrl & DXEPCTL_EPENA) && 3563 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3564 epctrl |= DXEPCTL_SNAK; 3565 epctrl |= DXEPCTL_EPDIS; 3566 dwc2_writel(hsotg, epctrl, DIEPCTL(idx)); 3567 } 3568 } 3569 3570 /* Clear interrupt */ 3571 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS); 3572 } 3573 3574 /** 3575 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt 3576 * @hsotg: The device state: 3577 * 3578 * This interrupt indicates one of the following conditions occurred while 3579 * transmitting an ISOC transaction. 3580 * - Corrupted OUT Token for ISOC EP. 3581 * - Packet not complete in FIFO. 3582 * 3583 * The following actions will be taken: 3584 * - Determine the EP 3585 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed. 3586 */ 3587 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg) 3588 { 3589 u32 gintsts; 3590 u32 gintmsk; 3591 u32 daintmsk; 3592 u32 epctrl; 3593 struct dwc2_hsotg_ep *hs_ep; 3594 int idx; 3595 3596 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__); 3597 3598 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3599 daintmsk >>= DAINT_OUTEP_SHIFT; 3600 3601 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3602 hs_ep = hsotg->eps_out[idx]; 3603 /* Proceed only unmasked ISOC EPs */ 3604 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3605 continue; 3606 3607 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3608 if ((epctrl & DXEPCTL_EPENA) && 3609 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3610 /* Unmask GOUTNAKEFF interrupt */ 3611 gintmsk = dwc2_readl(hsotg, GINTMSK); 3612 gintmsk |= GINTSTS_GOUTNAKEFF; 3613 dwc2_writel(hsotg, gintmsk, GINTMSK); 3614 3615 gintsts = dwc2_readl(hsotg, GINTSTS); 3616 if (!(gintsts & GINTSTS_GOUTNAKEFF)) { 3617 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3618 break; 3619 } 3620 } 3621 } 3622 3623 /* Clear interrupt */ 3624 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS); 3625 } 3626 3627 /** 3628 * dwc2_hsotg_irq - handle device interrupt 3629 * @irq: The IRQ number triggered 3630 * @pw: The pw value when registered the handler. 3631 */ 3632 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw) 3633 { 3634 struct dwc2_hsotg *hsotg = pw; 3635 int retry_count = 8; 3636 u32 gintsts; 3637 u32 gintmsk; 3638 3639 if (!dwc2_is_device_mode(hsotg)) 3640 return IRQ_NONE; 3641 3642 spin_lock(&hsotg->lock); 3643 irq_retry: 3644 gintsts = dwc2_readl(hsotg, GINTSTS); 3645 gintmsk = dwc2_readl(hsotg, GINTMSK); 3646 3647 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 3648 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 3649 3650 gintsts &= gintmsk; 3651 3652 if (gintsts & GINTSTS_RESETDET) { 3653 dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__); 3654 3655 dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS); 3656 3657 /* This event must be used only if controller is suspended */ 3658 if (hsotg->lx_state == DWC2_L2) { 3659 dwc2_exit_partial_power_down(hsotg, true); 3660 hsotg->lx_state = DWC2_L0; 3661 } 3662 } 3663 3664 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) { 3665 u32 usb_status = dwc2_readl(hsotg, GOTGCTL); 3666 u32 connected = hsotg->connected; 3667 3668 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__); 3669 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 3670 dwc2_readl(hsotg, GNPTXSTS)); 3671 3672 dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS); 3673 3674 /* Report disconnection if it is not already done. */ 3675 dwc2_hsotg_disconnect(hsotg); 3676 3677 /* Reset device address to zero */ 3678 dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK); 3679 3680 if (usb_status & GOTGCTL_BSESVLD && connected) 3681 dwc2_hsotg_core_init_disconnected(hsotg, true); 3682 } 3683 3684 if (gintsts & GINTSTS_ENUMDONE) { 3685 dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS); 3686 3687 dwc2_hsotg_irq_enumdone(hsotg); 3688 } 3689 3690 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) { 3691 u32 daint = dwc2_readl(hsotg, DAINT); 3692 u32 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3693 u32 daint_out, daint_in; 3694 int ep; 3695 3696 daint &= daintmsk; 3697 daint_out = daint >> DAINT_OUTEP_SHIFT; 3698 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT); 3699 3700 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 3701 3702 for (ep = 0; ep < hsotg->num_of_eps && daint_out; 3703 ep++, daint_out >>= 1) { 3704 if (daint_out & 1) 3705 dwc2_hsotg_epint(hsotg, ep, 0); 3706 } 3707 3708 for (ep = 0; ep < hsotg->num_of_eps && daint_in; 3709 ep++, daint_in >>= 1) { 3710 if (daint_in & 1) 3711 dwc2_hsotg_epint(hsotg, ep, 1); 3712 } 3713 } 3714 3715 /* check both FIFOs */ 3716 3717 if (gintsts & GINTSTS_NPTXFEMP) { 3718 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 3719 3720 /* 3721 * Disable the interrupt to stop it happening again 3722 * unless one of these endpoint routines decides that 3723 * it needs re-enabling 3724 */ 3725 3726 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP); 3727 dwc2_hsotg_irq_fifoempty(hsotg, false); 3728 } 3729 3730 if (gintsts & GINTSTS_PTXFEMP) { 3731 dev_dbg(hsotg->dev, "PTxFEmp\n"); 3732 3733 /* See note in GINTSTS_NPTxFEmp */ 3734 3735 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP); 3736 dwc2_hsotg_irq_fifoempty(hsotg, true); 3737 } 3738 3739 if (gintsts & GINTSTS_RXFLVL) { 3740 /* 3741 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 3742 * we need to retry dwc2_hsotg_handle_rx if this is still 3743 * set. 3744 */ 3745 3746 dwc2_hsotg_handle_rx(hsotg); 3747 } 3748 3749 if (gintsts & GINTSTS_ERLYSUSP) { 3750 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 3751 dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS); 3752 } 3753 3754 /* 3755 * these next two seem to crop-up occasionally causing the core 3756 * to shutdown the USB transfer, so try clearing them and logging 3757 * the occurrence. 3758 */ 3759 3760 if (gintsts & GINTSTS_GOUTNAKEFF) { 3761 u8 idx; 3762 u32 epctrl; 3763 u32 gintmsk; 3764 u32 daintmsk; 3765 struct dwc2_hsotg_ep *hs_ep; 3766 3767 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3768 daintmsk >>= DAINT_OUTEP_SHIFT; 3769 /* Mask this interrupt */ 3770 gintmsk = dwc2_readl(hsotg, GINTMSK); 3771 gintmsk &= ~GINTSTS_GOUTNAKEFF; 3772 dwc2_writel(hsotg, gintmsk, GINTMSK); 3773 3774 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n"); 3775 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3776 hs_ep = hsotg->eps_out[idx]; 3777 /* Proceed only unmasked ISOC EPs */ 3778 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3779 continue; 3780 3781 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3782 3783 if (epctrl & DXEPCTL_EPENA) { 3784 epctrl |= DXEPCTL_SNAK; 3785 epctrl |= DXEPCTL_EPDIS; 3786 dwc2_writel(hsotg, epctrl, DOEPCTL(idx)); 3787 } 3788 } 3789 3790 /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */ 3791 } 3792 3793 if (gintsts & GINTSTS_GINNAKEFF) { 3794 dev_info(hsotg->dev, "GINNakEff triggered\n"); 3795 3796 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3797 3798 dwc2_hsotg_dump(hsotg); 3799 } 3800 3801 if (gintsts & GINTSTS_INCOMPL_SOIN) 3802 dwc2_gadget_handle_incomplete_isoc_in(hsotg); 3803 3804 if (gintsts & GINTSTS_INCOMPL_SOOUT) 3805 dwc2_gadget_handle_incomplete_isoc_out(hsotg); 3806 3807 /* 3808 * if we've had fifo events, we should try and go around the 3809 * loop again to see if there's any point in returning yet. 3810 */ 3811 3812 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 3813 goto irq_retry; 3814 3815 /* Check WKUP_ALERT interrupt*/ 3816 if (hsotg->params.service_interval) 3817 dwc2_gadget_wkup_alert_handler(hsotg); 3818 3819 spin_unlock(&hsotg->lock); 3820 3821 return IRQ_HANDLED; 3822 } 3823 3824 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, 3825 struct dwc2_hsotg_ep *hs_ep) 3826 { 3827 u32 epctrl_reg; 3828 u32 epint_reg; 3829 3830 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : 3831 DOEPCTL(hs_ep->index); 3832 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : 3833 DOEPINT(hs_ep->index); 3834 3835 dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, 3836 hs_ep->name); 3837 3838 if (hs_ep->dir_in) { 3839 if (hsotg->dedicated_fifos || hs_ep->periodic) { 3840 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK); 3841 /* Wait for Nak effect */ 3842 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, 3843 DXEPINT_INEPNAKEFF, 100)) 3844 dev_warn(hsotg->dev, 3845 "%s: timeout DIEPINT.NAKEFF\n", 3846 __func__); 3847 } else { 3848 dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK); 3849 /* Wait for Nak effect */ 3850 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3851 GINTSTS_GINNAKEFF, 100)) 3852 dev_warn(hsotg->dev, 3853 "%s: timeout GINTSTS.GINNAKEFF\n", 3854 __func__); 3855 } 3856 } else { 3857 if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF)) 3858 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3859 3860 /* Wait for global nak to take effect */ 3861 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3862 GINTSTS_GOUTNAKEFF, 100)) 3863 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", 3864 __func__); 3865 } 3866 3867 /* Disable ep */ 3868 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); 3869 3870 /* Wait for ep to be disabled */ 3871 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) 3872 dev_warn(hsotg->dev, 3873 "%s: timeout DOEPCTL.EPDisable\n", __func__); 3874 3875 /* Clear EPDISBLD interrupt */ 3876 dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD); 3877 3878 if (hs_ep->dir_in) { 3879 unsigned short fifo_index; 3880 3881 if (hsotg->dedicated_fifos || hs_ep->periodic) 3882 fifo_index = hs_ep->fifo_index; 3883 else 3884 fifo_index = 0; 3885 3886 /* Flush TX FIFO */ 3887 dwc2_flush_tx_fifo(hsotg, fifo_index); 3888 3889 /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ 3890 if (!hsotg->dedicated_fifos && !hs_ep->periodic) 3891 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3892 3893 } else { 3894 /* Remove global NAKs */ 3895 dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK); 3896 } 3897 } 3898 3899 /** 3900 * dwc2_hsotg_ep_enable - enable the given endpoint 3901 * @ep: The USB endpint to configure 3902 * @desc: The USB endpoint descriptor to configure with. 3903 * 3904 * This is called from the USB gadget code's usb_ep_enable(). 3905 */ 3906 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, 3907 const struct usb_endpoint_descriptor *desc) 3908 { 3909 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3910 struct dwc2_hsotg *hsotg = hs_ep->parent; 3911 unsigned long flags; 3912 unsigned int index = hs_ep->index; 3913 u32 epctrl_reg; 3914 u32 epctrl; 3915 u32 mps; 3916 u32 mc; 3917 u32 mask; 3918 unsigned int dir_in; 3919 unsigned int i, val, size; 3920 int ret = 0; 3921 unsigned char ep_type; 3922 int desc_num; 3923 3924 dev_dbg(hsotg->dev, 3925 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 3926 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 3927 desc->wMaxPacketSize, desc->bInterval); 3928 3929 /* not to be called for EP0 */ 3930 if (index == 0) { 3931 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__); 3932 return -EINVAL; 3933 } 3934 3935 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 3936 if (dir_in != hs_ep->dir_in) { 3937 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 3938 return -EINVAL; 3939 } 3940 3941 ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 3942 mps = usb_endpoint_maxp(desc); 3943 mc = usb_endpoint_maxp_mult(desc); 3944 3945 /* ISOC IN in DDMA supported bInterval up to 10 */ 3946 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3947 dir_in && desc->bInterval > 10) { 3948 dev_err(hsotg->dev, 3949 "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__); 3950 return -EINVAL; 3951 } 3952 3953 /* High bandwidth ISOC OUT in DDMA not supported */ 3954 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3955 !dir_in && mc > 1) { 3956 dev_err(hsotg->dev, 3957 "%s: ISOC OUT, DDMA: HB not supported!\n", __func__); 3958 return -EINVAL; 3959 } 3960 3961 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ 3962 3963 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3964 epctrl = dwc2_readl(hsotg, epctrl_reg); 3965 3966 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 3967 __func__, epctrl, epctrl_reg); 3968 3969 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC) 3970 desc_num = MAX_DMA_DESC_NUM_HS_ISOC; 3971 else 3972 desc_num = MAX_DMA_DESC_NUM_GENERIC; 3973 3974 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3975 if (using_desc_dma(hsotg) && !hs_ep->desc_list) { 3976 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, 3977 desc_num * sizeof(struct dwc2_dma_desc), 3978 &hs_ep->desc_list_dma, GFP_ATOMIC); 3979 if (!hs_ep->desc_list) { 3980 ret = -ENOMEM; 3981 goto error2; 3982 } 3983 } 3984 3985 spin_lock_irqsave(&hsotg->lock, flags); 3986 3987 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); 3988 epctrl |= DXEPCTL_MPS(mps); 3989 3990 /* 3991 * mark the endpoint as active, otherwise the core may ignore 3992 * transactions entirely for this endpoint 3993 */ 3994 epctrl |= DXEPCTL_USBACTEP; 3995 3996 /* update the endpoint state */ 3997 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); 3998 3999 /* default, set to non-periodic */ 4000 hs_ep->isochronous = 0; 4001 hs_ep->periodic = 0; 4002 hs_ep->halted = 0; 4003 hs_ep->interval = desc->bInterval; 4004 4005 switch (ep_type) { 4006 case USB_ENDPOINT_XFER_ISOC: 4007 epctrl |= DXEPCTL_EPTYPE_ISO; 4008 epctrl |= DXEPCTL_SETEVENFR; 4009 hs_ep->isochronous = 1; 4010 hs_ep->interval = 1 << (desc->bInterval - 1); 4011 hs_ep->target_frame = TARGET_FRAME_INITIAL; 4012 hs_ep->next_desc = 0; 4013 hs_ep->compl_desc = 0; 4014 if (dir_in) { 4015 hs_ep->periodic = 1; 4016 mask = dwc2_readl(hsotg, DIEPMSK); 4017 mask |= DIEPMSK_NAKMSK; 4018 dwc2_writel(hsotg, mask, DIEPMSK); 4019 } else { 4020 mask = dwc2_readl(hsotg, DOEPMSK); 4021 mask |= DOEPMSK_OUTTKNEPDISMSK; 4022 dwc2_writel(hsotg, mask, DOEPMSK); 4023 } 4024 break; 4025 4026 case USB_ENDPOINT_XFER_BULK: 4027 epctrl |= DXEPCTL_EPTYPE_BULK; 4028 break; 4029 4030 case USB_ENDPOINT_XFER_INT: 4031 if (dir_in) 4032 hs_ep->periodic = 1; 4033 4034 if (hsotg->gadget.speed == USB_SPEED_HIGH) 4035 hs_ep->interval = 1 << (desc->bInterval - 1); 4036 4037 epctrl |= DXEPCTL_EPTYPE_INTERRUPT; 4038 break; 4039 4040 case USB_ENDPOINT_XFER_CONTROL: 4041 epctrl |= DXEPCTL_EPTYPE_CONTROL; 4042 break; 4043 } 4044 4045 /* 4046 * if the hardware has dedicated fifos, we must give each IN EP 4047 * a unique tx-fifo even if it is non-periodic. 4048 */ 4049 if (dir_in && hsotg->dedicated_fifos) { 4050 u32 fifo_index = 0; 4051 u32 fifo_size = UINT_MAX; 4052 4053 size = hs_ep->ep.maxpacket * hs_ep->mc; 4054 for (i = 1; i < hsotg->num_of_eps; ++i) { 4055 if (hsotg->fifo_map & (1 << i)) 4056 continue; 4057 val = dwc2_readl(hsotg, DPTXFSIZN(i)); 4058 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4; 4059 if (val < size) 4060 continue; 4061 /* Search for smallest acceptable fifo */ 4062 if (val < fifo_size) { 4063 fifo_size = val; 4064 fifo_index = i; 4065 } 4066 } 4067 if (!fifo_index) { 4068 dev_err(hsotg->dev, 4069 "%s: No suitable fifo found\n", __func__); 4070 ret = -ENOMEM; 4071 goto error1; 4072 } 4073 epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT); 4074 hsotg->fifo_map |= 1 << fifo_index; 4075 epctrl |= DXEPCTL_TXFNUM(fifo_index); 4076 hs_ep->fifo_index = fifo_index; 4077 hs_ep->fifo_size = fifo_size; 4078 } 4079 4080 /* for non control endpoints, set PID to D0 */ 4081 if (index && !hs_ep->isochronous) 4082 epctrl |= DXEPCTL_SETD0PID; 4083 4084 /* WA for Full speed ISOC IN in DDMA mode. 4085 * By Clear NAK status of EP, core will send ZLP 4086 * to IN token and assert NAK interrupt relying 4087 * on TxFIFO status only 4088 */ 4089 4090 if (hsotg->gadget.speed == USB_SPEED_FULL && 4091 hs_ep->isochronous && dir_in) { 4092 /* The WA applies only to core versions from 2.72a 4093 * to 4.00a (including both). Also for FS_IOT_1.00a 4094 * and HS_IOT_1.00a. 4095 */ 4096 u32 gsnpsid = dwc2_readl(hsotg, GSNPSID); 4097 4098 if ((gsnpsid >= DWC2_CORE_REV_2_72a && 4099 gsnpsid <= DWC2_CORE_REV_4_00a) || 4100 gsnpsid == DWC2_FS_IOT_REV_1_00a || 4101 gsnpsid == DWC2_HS_IOT_REV_1_00a) 4102 epctrl |= DXEPCTL_CNAK; 4103 } 4104 4105 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 4106 __func__, epctrl); 4107 4108 dwc2_writel(hsotg, epctrl, epctrl_reg); 4109 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 4110 __func__, dwc2_readl(hsotg, epctrl_reg)); 4111 4112 /* enable the endpoint interrupt */ 4113 dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 4114 4115 error1: 4116 spin_unlock_irqrestore(&hsotg->lock, flags); 4117 4118 error2: 4119 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 4120 dmam_free_coherent(hsotg->dev, desc_num * 4121 sizeof(struct dwc2_dma_desc), 4122 hs_ep->desc_list, hs_ep->desc_list_dma); 4123 hs_ep->desc_list = NULL; 4124 } 4125 4126 return ret; 4127 } 4128 4129 /** 4130 * dwc2_hsotg_ep_disable - disable given endpoint 4131 * @ep: The endpoint to disable. 4132 */ 4133 static int dwc2_hsotg_ep_disable(struct usb_ep *ep) 4134 { 4135 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4136 struct dwc2_hsotg *hsotg = hs_ep->parent; 4137 int dir_in = hs_ep->dir_in; 4138 int index = hs_ep->index; 4139 u32 epctrl_reg; 4140 u32 ctrl; 4141 4142 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep); 4143 4144 if (ep == &hsotg->eps_out[0]->ep) { 4145 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 4146 return -EINVAL; 4147 } 4148 4149 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4150 dev_err(hsotg->dev, "%s: called in host mode?\n", __func__); 4151 return -EINVAL; 4152 } 4153 4154 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 4155 4156 ctrl = dwc2_readl(hsotg, epctrl_reg); 4157 4158 if (ctrl & DXEPCTL_EPENA) 4159 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); 4160 4161 ctrl &= ~DXEPCTL_EPENA; 4162 ctrl &= ~DXEPCTL_USBACTEP; 4163 ctrl |= DXEPCTL_SNAK; 4164 4165 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 4166 dwc2_writel(hsotg, ctrl, epctrl_reg); 4167 4168 /* disable endpoint interrupts */ 4169 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 4170 4171 /* terminate all requests with shutdown */ 4172 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN); 4173 4174 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index); 4175 hs_ep->fifo_index = 0; 4176 hs_ep->fifo_size = 0; 4177 4178 return 0; 4179 } 4180 4181 static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep) 4182 { 4183 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4184 struct dwc2_hsotg *hsotg = hs_ep->parent; 4185 unsigned long flags; 4186 int ret; 4187 4188 spin_lock_irqsave(&hsotg->lock, flags); 4189 ret = dwc2_hsotg_ep_disable(ep); 4190 spin_unlock_irqrestore(&hsotg->lock, flags); 4191 return ret; 4192 } 4193 4194 /** 4195 * on_list - check request is on the given endpoint 4196 * @ep: The endpoint to check. 4197 * @test: The request to test if it is on the endpoint. 4198 */ 4199 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) 4200 { 4201 struct dwc2_hsotg_req *req, *treq; 4202 4203 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 4204 if (req == test) 4205 return true; 4206 } 4207 4208 return false; 4209 } 4210 4211 /** 4212 * dwc2_hsotg_ep_dequeue - dequeue given endpoint 4213 * @ep: The endpoint to dequeue. 4214 * @req: The request to be removed from a queue. 4215 */ 4216 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 4217 { 4218 struct dwc2_hsotg_req *hs_req = our_req(req); 4219 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4220 struct dwc2_hsotg *hs = hs_ep->parent; 4221 unsigned long flags; 4222 4223 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 4224 4225 spin_lock_irqsave(&hs->lock, flags); 4226 4227 if (!on_list(hs_ep, hs_req)) { 4228 spin_unlock_irqrestore(&hs->lock, flags); 4229 return -EINVAL; 4230 } 4231 4232 /* Dequeue already started request */ 4233 if (req == &hs_ep->req->req) 4234 dwc2_hsotg_ep_stop_xfr(hs, hs_ep); 4235 4236 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 4237 spin_unlock_irqrestore(&hs->lock, flags); 4238 4239 return 0; 4240 } 4241 4242 /** 4243 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 4244 * @ep: The endpoint to set halt. 4245 * @value: Set or unset the halt. 4246 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if 4247 * the endpoint is busy processing requests. 4248 * 4249 * We need to stall the endpoint immediately if request comes from set_feature 4250 * protocol command handler. 4251 */ 4252 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) 4253 { 4254 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4255 struct dwc2_hsotg *hs = hs_ep->parent; 4256 int index = hs_ep->index; 4257 u32 epreg; 4258 u32 epctl; 4259 u32 xfertype; 4260 4261 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 4262 4263 if (index == 0) { 4264 if (value) 4265 dwc2_hsotg_stall_ep0(hs); 4266 else 4267 dev_warn(hs->dev, 4268 "%s: can't clear halt on ep0\n", __func__); 4269 return 0; 4270 } 4271 4272 if (hs_ep->isochronous) { 4273 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); 4274 return -EINVAL; 4275 } 4276 4277 if (!now && value && !list_empty(&hs_ep->queue)) { 4278 dev_dbg(hs->dev, "%s request is pending, cannot halt\n", 4279 ep->name); 4280 return -EAGAIN; 4281 } 4282 4283 if (hs_ep->dir_in) { 4284 epreg = DIEPCTL(index); 4285 epctl = dwc2_readl(hs, epreg); 4286 4287 if (value) { 4288 epctl |= DXEPCTL_STALL | DXEPCTL_SNAK; 4289 if (epctl & DXEPCTL_EPENA) 4290 epctl |= DXEPCTL_EPDIS; 4291 } else { 4292 epctl &= ~DXEPCTL_STALL; 4293 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4294 if (xfertype == DXEPCTL_EPTYPE_BULK || 4295 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4296 epctl |= DXEPCTL_SETD0PID; 4297 } 4298 dwc2_writel(hs, epctl, epreg); 4299 } else { 4300 epreg = DOEPCTL(index); 4301 epctl = dwc2_readl(hs, epreg); 4302 4303 if (value) { 4304 epctl |= DXEPCTL_STALL; 4305 } else { 4306 epctl &= ~DXEPCTL_STALL; 4307 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4308 if (xfertype == DXEPCTL_EPTYPE_BULK || 4309 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4310 epctl |= DXEPCTL_SETD0PID; 4311 } 4312 dwc2_writel(hs, epctl, epreg); 4313 } 4314 4315 hs_ep->halted = value; 4316 4317 return 0; 4318 } 4319 4320 /** 4321 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held 4322 * @ep: The endpoint to set halt. 4323 * @value: Set or unset the halt. 4324 */ 4325 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) 4326 { 4327 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4328 struct dwc2_hsotg *hs = hs_ep->parent; 4329 unsigned long flags = 0; 4330 int ret = 0; 4331 4332 spin_lock_irqsave(&hs->lock, flags); 4333 ret = dwc2_hsotg_ep_sethalt(ep, value, false); 4334 spin_unlock_irqrestore(&hs->lock, flags); 4335 4336 return ret; 4337 } 4338 4339 static const struct usb_ep_ops dwc2_hsotg_ep_ops = { 4340 .enable = dwc2_hsotg_ep_enable, 4341 .disable = dwc2_hsotg_ep_disable_lock, 4342 .alloc_request = dwc2_hsotg_ep_alloc_request, 4343 .free_request = dwc2_hsotg_ep_free_request, 4344 .queue = dwc2_hsotg_ep_queue_lock, 4345 .dequeue = dwc2_hsotg_ep_dequeue, 4346 .set_halt = dwc2_hsotg_ep_sethalt_lock, 4347 /* note, don't believe we have any call for the fifo routines */ 4348 }; 4349 4350 /** 4351 * dwc2_hsotg_init - initialize the usb core 4352 * @hsotg: The driver state 4353 */ 4354 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 4355 { 4356 /* unmask subset of endpoint interrupts */ 4357 4358 dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 4359 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK, 4360 DIEPMSK); 4361 4362 dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK | 4363 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK, 4364 DOEPMSK); 4365 4366 dwc2_writel(hsotg, 0, DAINTMSK); 4367 4368 /* Be in disconnected state until gadget is registered */ 4369 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 4370 4371 /* setup fifos */ 4372 4373 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4374 dwc2_readl(hsotg, GRXFSIZ), 4375 dwc2_readl(hsotg, GNPTXFSIZ)); 4376 4377 dwc2_hsotg_init_fifo(hsotg); 4378 4379 if (using_dma(hsotg)) 4380 dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN); 4381 } 4382 4383 /** 4384 * dwc2_hsotg_udc_start - prepare the udc for work 4385 * @gadget: The usb gadget state 4386 * @driver: The usb gadget driver 4387 * 4388 * Perform initialization to prepare udc device and driver 4389 * to work. 4390 */ 4391 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, 4392 struct usb_gadget_driver *driver) 4393 { 4394 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4395 unsigned long flags; 4396 int ret; 4397 4398 if (!hsotg) { 4399 pr_err("%s: called with no device\n", __func__); 4400 return -ENODEV; 4401 } 4402 4403 if (!driver) { 4404 dev_err(hsotg->dev, "%s: no driver\n", __func__); 4405 return -EINVAL; 4406 } 4407 4408 if (driver->max_speed < USB_SPEED_FULL) 4409 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 4410 4411 if (!driver->setup) { 4412 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 4413 return -EINVAL; 4414 } 4415 4416 WARN_ON(hsotg->driver); 4417 4418 driver->driver.bus = NULL; 4419 hsotg->driver = driver; 4420 hsotg->gadget.dev.of_node = hsotg->dev->of_node; 4421 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4422 4423 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 4424 ret = dwc2_lowlevel_hw_enable(hsotg); 4425 if (ret) 4426 goto err; 4427 } 4428 4429 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4430 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget); 4431 4432 spin_lock_irqsave(&hsotg->lock, flags); 4433 if (dwc2_hw_is_device(hsotg)) { 4434 dwc2_hsotg_init(hsotg); 4435 dwc2_hsotg_core_init_disconnected(hsotg, false); 4436 } 4437 4438 hsotg->enabled = 0; 4439 spin_unlock_irqrestore(&hsotg->lock, flags); 4440 4441 gadget->sg_supported = using_desc_dma(hsotg); 4442 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 4443 4444 return 0; 4445 4446 err: 4447 hsotg->driver = NULL; 4448 return ret; 4449 } 4450 4451 /** 4452 * dwc2_hsotg_udc_stop - stop the udc 4453 * @gadget: The usb gadget state 4454 * 4455 * Stop udc hw block and stay tunned for future transmissions 4456 */ 4457 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) 4458 { 4459 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4460 unsigned long flags = 0; 4461 int ep; 4462 4463 if (!hsotg) 4464 return -ENODEV; 4465 4466 /* all endpoints should be shutdown */ 4467 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 4468 if (hsotg->eps_in[ep]) 4469 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); 4470 if (hsotg->eps_out[ep]) 4471 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); 4472 } 4473 4474 spin_lock_irqsave(&hsotg->lock, flags); 4475 4476 hsotg->driver = NULL; 4477 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4478 hsotg->enabled = 0; 4479 4480 spin_unlock_irqrestore(&hsotg->lock, flags); 4481 4482 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4483 otg_set_peripheral(hsotg->uphy->otg, NULL); 4484 4485 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4486 dwc2_lowlevel_hw_disable(hsotg); 4487 4488 return 0; 4489 } 4490 4491 /** 4492 * dwc2_hsotg_gadget_getframe - read the frame number 4493 * @gadget: The usb gadget state 4494 * 4495 * Read the {micro} frame number 4496 */ 4497 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget) 4498 { 4499 return dwc2_hsotg_read_frameno(to_hsotg(gadget)); 4500 } 4501 4502 /** 4503 * dwc2_hsotg_pullup - connect/disconnect the USB PHY 4504 * @gadget: The usb gadget state 4505 * @is_on: Current state of the USB PHY 4506 * 4507 * Connect/Disconnect the USB PHY pullup 4508 */ 4509 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on) 4510 { 4511 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4512 unsigned long flags = 0; 4513 4514 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on, 4515 hsotg->op_state); 4516 4517 /* Don't modify pullup state while in host mode */ 4518 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4519 hsotg->enabled = is_on; 4520 return 0; 4521 } 4522 4523 spin_lock_irqsave(&hsotg->lock, flags); 4524 if (is_on) { 4525 hsotg->enabled = 1; 4526 dwc2_hsotg_core_init_disconnected(hsotg, false); 4527 /* Enable ACG feature in device mode,if supported */ 4528 dwc2_enable_acg(hsotg); 4529 dwc2_hsotg_core_connect(hsotg); 4530 } else { 4531 dwc2_hsotg_core_disconnect(hsotg); 4532 dwc2_hsotg_disconnect(hsotg); 4533 hsotg->enabled = 0; 4534 } 4535 4536 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4537 spin_unlock_irqrestore(&hsotg->lock, flags); 4538 4539 return 0; 4540 } 4541 4542 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active) 4543 { 4544 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4545 unsigned long flags; 4546 4547 dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active); 4548 spin_lock_irqsave(&hsotg->lock, flags); 4549 4550 /* 4551 * If controller is hibernated, it must exit from power_down 4552 * before being initialized / de-initialized 4553 */ 4554 if (hsotg->lx_state == DWC2_L2) 4555 dwc2_exit_partial_power_down(hsotg, false); 4556 4557 if (is_active) { 4558 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4559 4560 dwc2_hsotg_core_init_disconnected(hsotg, false); 4561 if (hsotg->enabled) { 4562 /* Enable ACG feature in device mode,if supported */ 4563 dwc2_enable_acg(hsotg); 4564 dwc2_hsotg_core_connect(hsotg); 4565 } 4566 } else { 4567 dwc2_hsotg_core_disconnect(hsotg); 4568 dwc2_hsotg_disconnect(hsotg); 4569 } 4570 4571 spin_unlock_irqrestore(&hsotg->lock, flags); 4572 return 0; 4573 } 4574 4575 /** 4576 * dwc2_hsotg_vbus_draw - report bMaxPower field 4577 * @gadget: The usb gadget state 4578 * @mA: Amount of current 4579 * 4580 * Report how much power the device may consume to the phy. 4581 */ 4582 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 4583 { 4584 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4585 4586 if (IS_ERR_OR_NULL(hsotg->uphy)) 4587 return -ENOTSUPP; 4588 return usb_phy_set_power(hsotg->uphy, mA); 4589 } 4590 4591 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = { 4592 .get_frame = dwc2_hsotg_gadget_getframe, 4593 .udc_start = dwc2_hsotg_udc_start, 4594 .udc_stop = dwc2_hsotg_udc_stop, 4595 .pullup = dwc2_hsotg_pullup, 4596 .vbus_session = dwc2_hsotg_vbus_session, 4597 .vbus_draw = dwc2_hsotg_vbus_draw, 4598 }; 4599 4600 /** 4601 * dwc2_hsotg_initep - initialise a single endpoint 4602 * @hsotg: The device state. 4603 * @hs_ep: The endpoint to be initialised. 4604 * @epnum: The endpoint number 4605 * @dir_in: True if direction is in. 4606 * 4607 * Initialise the given endpoint (as part of the probe and device state 4608 * creation) to give to the gadget driver. Setup the endpoint name, any 4609 * direction information and other state that may be required. 4610 */ 4611 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, 4612 struct dwc2_hsotg_ep *hs_ep, 4613 int epnum, 4614 bool dir_in) 4615 { 4616 char *dir; 4617 4618 if (epnum == 0) 4619 dir = ""; 4620 else if (dir_in) 4621 dir = "in"; 4622 else 4623 dir = "out"; 4624 4625 hs_ep->dir_in = dir_in; 4626 hs_ep->index = epnum; 4627 4628 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 4629 4630 INIT_LIST_HEAD(&hs_ep->queue); 4631 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 4632 4633 /* add to the list of endpoints known by the gadget driver */ 4634 if (epnum) 4635 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 4636 4637 hs_ep->parent = hsotg; 4638 hs_ep->ep.name = hs_ep->name; 4639 4640 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) 4641 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); 4642 else 4643 usb_ep_set_maxpacket_limit(&hs_ep->ep, 4644 epnum ? 1024 : EP0_MPS_LIMIT); 4645 hs_ep->ep.ops = &dwc2_hsotg_ep_ops; 4646 4647 if (epnum == 0) { 4648 hs_ep->ep.caps.type_control = true; 4649 } else { 4650 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { 4651 hs_ep->ep.caps.type_iso = true; 4652 hs_ep->ep.caps.type_bulk = true; 4653 } 4654 hs_ep->ep.caps.type_int = true; 4655 } 4656 4657 if (dir_in) 4658 hs_ep->ep.caps.dir_in = true; 4659 else 4660 hs_ep->ep.caps.dir_out = true; 4661 4662 /* 4663 * if we're using dma, we need to set the next-endpoint pointer 4664 * to be something valid. 4665 */ 4666 4667 if (using_dma(hsotg)) { 4668 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 4669 4670 if (dir_in) 4671 dwc2_writel(hsotg, next, DIEPCTL(epnum)); 4672 else 4673 dwc2_writel(hsotg, next, DOEPCTL(epnum)); 4674 } 4675 } 4676 4677 /** 4678 * dwc2_hsotg_hw_cfg - read HW configuration registers 4679 * @hsotg: Programming view of the DWC_otg controller 4680 * 4681 * Read the USB core HW configuration registers 4682 */ 4683 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) 4684 { 4685 u32 cfg; 4686 u32 ep_type; 4687 u32 i; 4688 4689 /* check hardware configuration */ 4690 4691 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; 4692 4693 /* Add ep0 */ 4694 hsotg->num_of_eps++; 4695 4696 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, 4697 sizeof(struct dwc2_hsotg_ep), 4698 GFP_KERNEL); 4699 if (!hsotg->eps_in[0]) 4700 return -ENOMEM; 4701 /* Same dwc2_hsotg_ep is used in both directions for ep0 */ 4702 hsotg->eps_out[0] = hsotg->eps_in[0]; 4703 4704 cfg = hsotg->hw_params.dev_ep_dirs; 4705 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { 4706 ep_type = cfg & 3; 4707 /* Direction in or both */ 4708 if (!(ep_type & 2)) { 4709 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev, 4710 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4711 if (!hsotg->eps_in[i]) 4712 return -ENOMEM; 4713 } 4714 /* Direction out or both */ 4715 if (!(ep_type & 1)) { 4716 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev, 4717 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4718 if (!hsotg->eps_out[i]) 4719 return -ENOMEM; 4720 } 4721 } 4722 4723 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; 4724 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; 4725 4726 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", 4727 hsotg->num_of_eps, 4728 hsotg->dedicated_fifos ? "dedicated" : "shared", 4729 hsotg->fifo_mem); 4730 return 0; 4731 } 4732 4733 /** 4734 * dwc2_hsotg_dump - dump state of the udc 4735 * @hsotg: Programming view of the DWC_otg controller 4736 * 4737 */ 4738 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) 4739 { 4740 #ifdef DEBUG 4741 struct device *dev = hsotg->dev; 4742 u32 val; 4743 int idx; 4744 4745 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 4746 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL), 4747 dwc2_readl(hsotg, DIEPMSK)); 4748 4749 dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n", 4750 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1)); 4751 4752 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4753 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ)); 4754 4755 /* show periodic fifo settings */ 4756 4757 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 4758 val = dwc2_readl(hsotg, DPTXFSIZN(idx)); 4759 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 4760 val >> FIFOSIZE_DEPTH_SHIFT, 4761 val & FIFOSIZE_STARTADDR_MASK); 4762 } 4763 4764 for (idx = 0; idx < hsotg->num_of_eps; idx++) { 4765 dev_info(dev, 4766 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 4767 dwc2_readl(hsotg, DIEPCTL(idx)), 4768 dwc2_readl(hsotg, DIEPTSIZ(idx)), 4769 dwc2_readl(hsotg, DIEPDMA(idx))); 4770 4771 val = dwc2_readl(hsotg, DOEPCTL(idx)); 4772 dev_info(dev, 4773 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 4774 idx, dwc2_readl(hsotg, DOEPCTL(idx)), 4775 dwc2_readl(hsotg, DOEPTSIZ(idx)), 4776 dwc2_readl(hsotg, DOEPDMA(idx))); 4777 } 4778 4779 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 4780 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE)); 4781 #endif 4782 } 4783 4784 /** 4785 * dwc2_gadget_init - init function for gadget 4786 * @hsotg: Programming view of the DWC_otg controller 4787 * 4788 */ 4789 int dwc2_gadget_init(struct dwc2_hsotg *hsotg) 4790 { 4791 struct device *dev = hsotg->dev; 4792 int epnum; 4793 int ret; 4794 4795 /* Dump fifo information */ 4796 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", 4797 hsotg->params.g_np_tx_fifo_size); 4798 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); 4799 4800 hsotg->gadget.max_speed = USB_SPEED_HIGH; 4801 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; 4802 hsotg->gadget.name = dev_name(dev); 4803 hsotg->remote_wakeup_allowed = 0; 4804 4805 if (hsotg->params.lpm) 4806 hsotg->gadget.lpm_capable = true; 4807 4808 if (hsotg->dr_mode == USB_DR_MODE_OTG) 4809 hsotg->gadget.is_otg = 1; 4810 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4811 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4812 4813 ret = dwc2_hsotg_hw_cfg(hsotg); 4814 if (ret) { 4815 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); 4816 return ret; 4817 } 4818 4819 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, 4820 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4821 if (!hsotg->ctrl_buff) 4822 return -ENOMEM; 4823 4824 hsotg->ep0_buff = devm_kzalloc(hsotg->dev, 4825 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4826 if (!hsotg->ep0_buff) 4827 return -ENOMEM; 4828 4829 if (using_desc_dma(hsotg)) { 4830 ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); 4831 if (ret < 0) 4832 return ret; 4833 } 4834 4835 ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq, 4836 IRQF_SHARED, dev_name(hsotg->dev), hsotg); 4837 if (ret < 0) { 4838 dev_err(dev, "cannot claim IRQ for gadget\n"); 4839 return ret; 4840 } 4841 4842 /* hsotg->num_of_eps holds number of EPs other than ep0 */ 4843 4844 if (hsotg->num_of_eps == 0) { 4845 dev_err(dev, "wrong number of EPs (zero)\n"); 4846 return -EINVAL; 4847 } 4848 4849 /* setup endpoint information */ 4850 4851 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 4852 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep; 4853 4854 /* allocate EP0 request */ 4855 4856 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep, 4857 GFP_KERNEL); 4858 if (!hsotg->ctrl_req) { 4859 dev_err(dev, "failed to allocate ctrl req\n"); 4860 return -ENOMEM; 4861 } 4862 4863 /* initialise the endpoints now the core has been initialised */ 4864 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) { 4865 if (hsotg->eps_in[epnum]) 4866 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum], 4867 epnum, 1); 4868 if (hsotg->eps_out[epnum]) 4869 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum], 4870 epnum, 0); 4871 } 4872 4873 ret = usb_add_gadget_udc(dev, &hsotg->gadget); 4874 if (ret) { 4875 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, 4876 hsotg->ctrl_req); 4877 return ret; 4878 } 4879 dwc2_hsotg_dump(hsotg); 4880 4881 return 0; 4882 } 4883 4884 /** 4885 * dwc2_hsotg_remove - remove function for hsotg driver 4886 * @hsotg: Programming view of the DWC_otg controller 4887 * 4888 */ 4889 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4890 { 4891 usb_del_gadget_udc(&hsotg->gadget); 4892 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); 4893 4894 return 0; 4895 } 4896 4897 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) 4898 { 4899 unsigned long flags; 4900 4901 if (hsotg->lx_state != DWC2_L0) 4902 return 0; 4903 4904 if (hsotg->driver) { 4905 int ep; 4906 4907 dev_info(hsotg->dev, "suspending usb gadget %s\n", 4908 hsotg->driver->driver.name); 4909 4910 spin_lock_irqsave(&hsotg->lock, flags); 4911 if (hsotg->enabled) 4912 dwc2_hsotg_core_disconnect(hsotg); 4913 dwc2_hsotg_disconnect(hsotg); 4914 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4915 spin_unlock_irqrestore(&hsotg->lock, flags); 4916 4917 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 4918 if (hsotg->eps_in[ep]) 4919 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); 4920 if (hsotg->eps_out[ep]) 4921 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); 4922 } 4923 } 4924 4925 return 0; 4926 } 4927 4928 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) 4929 { 4930 unsigned long flags; 4931 4932 if (hsotg->lx_state == DWC2_L2) 4933 return 0; 4934 4935 if (hsotg->driver) { 4936 dev_info(hsotg->dev, "resuming usb gadget %s\n", 4937 hsotg->driver->driver.name); 4938 4939 spin_lock_irqsave(&hsotg->lock, flags); 4940 dwc2_hsotg_core_init_disconnected(hsotg, false); 4941 if (hsotg->enabled) { 4942 /* Enable ACG feature in device mode,if supported */ 4943 dwc2_enable_acg(hsotg); 4944 dwc2_hsotg_core_connect(hsotg); 4945 } 4946 spin_unlock_irqrestore(&hsotg->lock, flags); 4947 } 4948 4949 return 0; 4950 } 4951 4952 /** 4953 * dwc2_backup_device_registers() - Backup controller device registers. 4954 * When suspending usb bus, registers needs to be backuped 4955 * if controller power is disabled once suspended. 4956 * 4957 * @hsotg: Programming view of the DWC_otg controller 4958 */ 4959 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 4960 { 4961 struct dwc2_dregs_backup *dr; 4962 int i; 4963 4964 dev_dbg(hsotg->dev, "%s\n", __func__); 4965 4966 /* Backup dev regs */ 4967 dr = &hsotg->dr_backup; 4968 4969 dr->dcfg = dwc2_readl(hsotg, DCFG); 4970 dr->dctl = dwc2_readl(hsotg, DCTL); 4971 dr->daintmsk = dwc2_readl(hsotg, DAINTMSK); 4972 dr->diepmsk = dwc2_readl(hsotg, DIEPMSK); 4973 dr->doepmsk = dwc2_readl(hsotg, DOEPMSK); 4974 4975 for (i = 0; i < hsotg->num_of_eps; i++) { 4976 /* Backup IN EPs */ 4977 dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i)); 4978 4979 /* Ensure DATA PID is correctly configured */ 4980 if (dr->diepctl[i] & DXEPCTL_DPID) 4981 dr->diepctl[i] |= DXEPCTL_SETD1PID; 4982 else 4983 dr->diepctl[i] |= DXEPCTL_SETD0PID; 4984 4985 dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i)); 4986 dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i)); 4987 4988 /* Backup OUT EPs */ 4989 dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i)); 4990 4991 /* Ensure DATA PID is correctly configured */ 4992 if (dr->doepctl[i] & DXEPCTL_DPID) 4993 dr->doepctl[i] |= DXEPCTL_SETD1PID; 4994 else 4995 dr->doepctl[i] |= DXEPCTL_SETD0PID; 4996 4997 dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i)); 4998 dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i)); 4999 dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i)); 5000 } 5001 dr->valid = true; 5002 return 0; 5003 } 5004 5005 /** 5006 * dwc2_restore_device_registers() - Restore controller device registers. 5007 * When resuming usb bus, device registers needs to be restored 5008 * if controller power were disabled. 5009 * 5010 * @hsotg: Programming view of the DWC_otg controller 5011 * @remote_wakeup: Indicates whether resume is initiated by Device or Host. 5012 * 5013 * Return: 0 if successful, negative error code otherwise 5014 */ 5015 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup) 5016 { 5017 struct dwc2_dregs_backup *dr; 5018 int i; 5019 5020 dev_dbg(hsotg->dev, "%s\n", __func__); 5021 5022 /* Restore dev regs */ 5023 dr = &hsotg->dr_backup; 5024 if (!dr->valid) { 5025 dev_err(hsotg->dev, "%s: no device registers to restore\n", 5026 __func__); 5027 return -EINVAL; 5028 } 5029 dr->valid = false; 5030 5031 if (!remote_wakeup) 5032 dwc2_writel(hsotg, dr->dctl, DCTL); 5033 5034 dwc2_writel(hsotg, dr->daintmsk, DAINTMSK); 5035 dwc2_writel(hsotg, dr->diepmsk, DIEPMSK); 5036 dwc2_writel(hsotg, dr->doepmsk, DOEPMSK); 5037 5038 for (i = 0; i < hsotg->num_of_eps; i++) { 5039 /* Restore IN EPs */ 5040 dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i)); 5041 dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i)); 5042 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 5043 /** WA for enabled EPx's IN in DDMA mode. On entering to 5044 * hibernation wrong value read and saved from DIEPDMAx, 5045 * as result BNA interrupt asserted on hibernation exit 5046 * by restoring from saved area. 5047 */ 5048 if (hsotg->params.g_dma_desc && 5049 (dr->diepctl[i] & DXEPCTL_EPENA)) 5050 dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma; 5051 dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i)); 5052 dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i)); 5053 /* Restore OUT EPs */ 5054 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 5055 /* WA for enabled EPx's OUT in DDMA mode. On entering to 5056 * hibernation wrong value read and saved from DOEPDMAx, 5057 * as result BNA interrupt asserted on hibernation exit 5058 * by restoring from saved area. 5059 */ 5060 if (hsotg->params.g_dma_desc && 5061 (dr->doepctl[i] & DXEPCTL_EPENA)) 5062 dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma; 5063 dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i)); 5064 dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i)); 5065 } 5066 5067 return 0; 5068 } 5069 5070 /** 5071 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode 5072 * 5073 * @hsotg: Programming view of DWC_otg controller 5074 * 5075 */ 5076 void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) 5077 { 5078 u32 val; 5079 5080 if (!hsotg->params.lpm) 5081 return; 5082 5083 val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES; 5084 val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0; 5085 val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0; 5086 val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT; 5087 val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0; 5088 val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL; 5089 val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC; 5090 dwc2_writel(hsotg, val, GLPMCFG); 5091 dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG)); 5092 5093 /* Unmask WKUP_ALERT Interrupt */ 5094 if (hsotg->params.service_interval) 5095 dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK); 5096 } 5097 5098 /** 5099 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode 5100 * 5101 * @hsotg: Programming view of DWC_otg controller 5102 * 5103 */ 5104 void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) 5105 { 5106 u32 val = 0; 5107 5108 val |= GREFCLK_REF_CLK_MODE; 5109 val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT; 5110 val |= hsotg->params.sof_cnt_wkup_alert << 5111 GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT; 5112 5113 dwc2_writel(hsotg, val, GREFCLK); 5114 dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK)); 5115 } 5116 5117 /** 5118 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation. 5119 * 5120 * @hsotg: Programming view of the DWC_otg controller 5121 * 5122 * Return non-zero if failed to enter to hibernation. 5123 */ 5124 int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg) 5125 { 5126 u32 gpwrdn; 5127 int ret = 0; 5128 5129 /* Change to L2(suspend) state */ 5130 hsotg->lx_state = DWC2_L2; 5131 dev_dbg(hsotg->dev, "Start of hibernation completed\n"); 5132 ret = dwc2_backup_global_registers(hsotg); 5133 if (ret) { 5134 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 5135 __func__); 5136 return ret; 5137 } 5138 ret = dwc2_backup_device_registers(hsotg); 5139 if (ret) { 5140 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 5141 __func__); 5142 return ret; 5143 } 5144 5145 gpwrdn = GPWRDN_PWRDNRSTN; 5146 gpwrdn |= GPWRDN_PMUACTV; 5147 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5148 udelay(10); 5149 5150 /* Set flag to indicate that we are in hibernation */ 5151 hsotg->hibernated = 1; 5152 5153 /* Enable interrupts from wake up logic */ 5154 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5155 gpwrdn |= GPWRDN_PMUINTSEL; 5156 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5157 udelay(10); 5158 5159 /* Unmask device mode interrupts in GPWRDN */ 5160 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5161 gpwrdn |= GPWRDN_RST_DET_MSK; 5162 gpwrdn |= GPWRDN_LNSTSCHG_MSK; 5163 gpwrdn |= GPWRDN_STS_CHGINT_MSK; 5164 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5165 udelay(10); 5166 5167 /* Enable Power Down Clamp */ 5168 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5169 gpwrdn |= GPWRDN_PWRDNCLMP; 5170 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5171 udelay(10); 5172 5173 /* Switch off VDD */ 5174 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5175 gpwrdn |= GPWRDN_PWRDNSWTCH; 5176 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5177 udelay(10); 5178 5179 /* Save gpwrdn register for further usage if stschng interrupt */ 5180 hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN); 5181 dev_dbg(hsotg->dev, "Hibernation completed\n"); 5182 5183 return ret; 5184 } 5185 5186 /** 5187 * dwc2_gadget_exit_hibernation() 5188 * This function is for exiting from Device mode hibernation by host initiated 5189 * resume/reset and device initiated remote-wakeup. 5190 * 5191 * @hsotg: Programming view of the DWC_otg controller 5192 * @rem_wakeup: indicates whether resume is initiated by Device or Host. 5193 * @reset: indicates whether resume is initiated by Reset. 5194 * 5195 * Return non-zero if failed to exit from hibernation. 5196 */ 5197 int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg, 5198 int rem_wakeup, int reset) 5199 { 5200 u32 pcgcctl; 5201 u32 gpwrdn; 5202 u32 dctl; 5203 int ret = 0; 5204 struct dwc2_gregs_backup *gr; 5205 struct dwc2_dregs_backup *dr; 5206 5207 gr = &hsotg->gr_backup; 5208 dr = &hsotg->dr_backup; 5209 5210 if (!hsotg->hibernated) { 5211 dev_dbg(hsotg->dev, "Already exited from Hibernation\n"); 5212 return 1; 5213 } 5214 dev_dbg(hsotg->dev, 5215 "%s: called with rem_wakeup = %d reset = %d\n", 5216 __func__, rem_wakeup, reset); 5217 5218 dwc2_hib_restore_common(hsotg, rem_wakeup, 0); 5219 5220 if (!reset) { 5221 /* Clear all pending interupts */ 5222 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5223 } 5224 5225 /* De-assert Restore */ 5226 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5227 gpwrdn &= ~GPWRDN_RESTORE; 5228 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5229 udelay(10); 5230 5231 if (!rem_wakeup) { 5232 pcgcctl = dwc2_readl(hsotg, PCGCTL); 5233 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 5234 dwc2_writel(hsotg, pcgcctl, PCGCTL); 5235 } 5236 5237 /* Restore GUSBCFG, DCFG and DCTL */ 5238 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG); 5239 dwc2_writel(hsotg, dr->dcfg, DCFG); 5240 dwc2_writel(hsotg, dr->dctl, DCTL); 5241 5242 /* De-assert Wakeup Logic */ 5243 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5244 gpwrdn &= ~GPWRDN_PMUACTV; 5245 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5246 5247 if (rem_wakeup) { 5248 udelay(10); 5249 /* Start Remote Wakeup Signaling */ 5250 dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL); 5251 } else { 5252 udelay(50); 5253 /* Set Device programming done bit */ 5254 dctl = dwc2_readl(hsotg, DCTL); 5255 dctl |= DCTL_PWRONPRGDONE; 5256 dwc2_writel(hsotg, dctl, DCTL); 5257 } 5258 /* Wait for interrupts which must be cleared */ 5259 mdelay(2); 5260 /* Clear all pending interupts */ 5261 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5262 5263 /* Restore global registers */ 5264 ret = dwc2_restore_global_registers(hsotg); 5265 if (ret) { 5266 dev_err(hsotg->dev, "%s: failed to restore registers\n", 5267 __func__); 5268 return ret; 5269 } 5270 5271 /* Restore device registers */ 5272 ret = dwc2_restore_device_registers(hsotg, rem_wakeup); 5273 if (ret) { 5274 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 5275 __func__); 5276 return ret; 5277 } 5278 5279 if (rem_wakeup) { 5280 mdelay(10); 5281 dctl = dwc2_readl(hsotg, DCTL); 5282 dctl &= ~DCTL_RMTWKUPSIG; 5283 dwc2_writel(hsotg, dctl, DCTL); 5284 } 5285 5286 hsotg->hibernated = 0; 5287 hsotg->lx_state = DWC2_L0; 5288 dev_dbg(hsotg->dev, "Hibernation recovery completes here\n"); 5289 5290 return ret; 5291 } 5292