1 /** 2 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * http://www.samsung.com 4 * 5 * Copyright 2008 Openmoko, Inc. 6 * Copyright 2008 Simtec Electronics 7 * Ben Dooks <ben@simtec.co.uk> 8 * http://armlinux.simtec.co.uk/ 9 * 10 * S3C USB2.0 High-speed / OtG driver 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <linux/interrupt.h> 21 #include <linux/platform_device.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/mutex.h> 24 #include <linux/seq_file.h> 25 #include <linux/delay.h> 26 #include <linux/io.h> 27 #include <linux/slab.h> 28 #include <linux/of_platform.h> 29 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 #include <linux/usb/phy.h> 33 34 #include "core.h" 35 #include "hw.h" 36 37 /* conversion functions */ 38 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req) 39 { 40 return container_of(req, struct dwc2_hsotg_req, req); 41 } 42 43 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep) 44 { 45 return container_of(ep, struct dwc2_hsotg_ep, ep); 46 } 47 48 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget) 49 { 50 return container_of(gadget, struct dwc2_hsotg, gadget); 51 } 52 53 static inline void __orr32(void __iomem *ptr, u32 val) 54 { 55 dwc2_writel(dwc2_readl(ptr) | val, ptr); 56 } 57 58 static inline void __bic32(void __iomem *ptr, u32 val) 59 { 60 dwc2_writel(dwc2_readl(ptr) & ~val, ptr); 61 } 62 63 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg, 64 u32 ep_index, u32 dir_in) 65 { 66 if (dir_in) 67 return hsotg->eps_in[ep_index]; 68 else 69 return hsotg->eps_out[ep_index]; 70 } 71 72 /* forward declaration of functions */ 73 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); 74 75 /** 76 * using_dma - return the DMA status of the driver. 77 * @hsotg: The driver state. 78 * 79 * Return true if we're using DMA. 80 * 81 * Currently, we have the DMA support code worked into everywhere 82 * that needs it, but the AMBA DMA implementation in the hardware can 83 * only DMA from 32bit aligned addresses. This means that gadgets such 84 * as the CDC Ethernet cannot work as they often pass packets which are 85 * not 32bit aligned. 86 * 87 * Unfortunately the choice to use DMA or not is global to the controller 88 * and seems to be only settable when the controller is being put through 89 * a core reset. This means we either need to fix the gadgets to take 90 * account of DMA alignment, or add bounce buffers (yuerk). 91 * 92 * g_using_dma is set depending on dts flag. 93 */ 94 static inline bool using_dma(struct dwc2_hsotg *hsotg) 95 { 96 return hsotg->params.g_dma; 97 } 98 99 /* 100 * using_desc_dma - return the descriptor DMA status of the driver. 101 * @hsotg: The driver state. 102 * 103 * Return true if we're using descriptor DMA. 104 */ 105 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) 106 { 107 return hsotg->params.g_dma_desc; 108 } 109 110 /** 111 * dwc2_gadget_incr_frame_num - Increments the targeted frame number. 112 * @hs_ep: The endpoint 113 * @increment: The value to increment by 114 * 115 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT. 116 * If an overrun occurs it will wrap the value and set the frame_overrun flag. 117 */ 118 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep) 119 { 120 hs_ep->target_frame += hs_ep->interval; 121 if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) { 122 hs_ep->frame_overrun = 1; 123 hs_ep->target_frame &= DSTS_SOFFN_LIMIT; 124 } else { 125 hs_ep->frame_overrun = 0; 126 } 127 } 128 129 /** 130 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt 131 * @hsotg: The device state 132 * @ints: A bitmask of the interrupts to enable 133 */ 134 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints) 135 { 136 u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK); 137 u32 new_gsintmsk; 138 139 new_gsintmsk = gsintmsk | ints; 140 141 if (new_gsintmsk != gsintmsk) { 142 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 143 dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK); 144 } 145 } 146 147 /** 148 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt 149 * @hsotg: The device state 150 * @ints: A bitmask of the interrupts to enable 151 */ 152 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints) 153 { 154 u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK); 155 u32 new_gsintmsk; 156 157 new_gsintmsk = gsintmsk & ~ints; 158 159 if (new_gsintmsk != gsintmsk) 160 dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK); 161 } 162 163 /** 164 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq 165 * @hsotg: The device state 166 * @ep: The endpoint index 167 * @dir_in: True if direction is in. 168 * @en: The enable value, true to enable 169 * 170 * Set or clear the mask for an individual endpoint's interrupt 171 * request. 172 */ 173 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, 174 unsigned int ep, unsigned int dir_in, 175 unsigned int en) 176 { 177 unsigned long flags; 178 u32 bit = 1 << ep; 179 u32 daint; 180 181 if (!dir_in) 182 bit <<= 16; 183 184 local_irq_save(flags); 185 daint = dwc2_readl(hsotg->regs + DAINTMSK); 186 if (en) 187 daint |= bit; 188 else 189 daint &= ~bit; 190 dwc2_writel(daint, hsotg->regs + DAINTMSK); 191 local_irq_restore(flags); 192 } 193 194 /** 195 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 196 */ 197 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 198 { 199 if (hsotg->hw_params.en_multiple_tx_fifo) 200 /* In dedicated FIFO mode we need count of IN EPs */ 201 return (dwc2_readl(hsotg->regs + GHWCFG4) & 202 GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT; 203 else 204 /* In shared FIFO mode we need count of Periodic IN EPs */ 205 return hsotg->hw_params.num_dev_perio_in_ep; 206 } 207 208 /** 209 * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs 210 */ 211 static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg) 212 { 213 int val = 0; 214 int i; 215 u32 ep_dirs; 216 217 /* 218 * Don't need additional space for ep info control registers in 219 * slave mode. 220 */ 221 if (!using_dma(hsotg)) { 222 dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n"); 223 return 0; 224 } 225 226 /* 227 * Buffer DMA mode - 1 location per endpoit 228 * Descriptor DMA mode - 4 locations per endpoint 229 */ 230 ep_dirs = hsotg->hw_params.dev_ep_dirs; 231 232 for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) { 233 val += ep_dirs & 3 ? 1 : 2; 234 ep_dirs >>= 2; 235 } 236 237 if (using_desc_dma(hsotg)) 238 val = val * 4; 239 240 return val; 241 } 242 243 /** 244 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 245 * device mode TX FIFOs 246 */ 247 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 248 { 249 int ep_info_size; 250 int addr; 251 int tx_addr_max; 252 u32 np_tx_fifo_size; 253 254 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size, 255 hsotg->params.g_np_tx_fifo_size); 256 257 /* Get Endpoint Info Control block size in DWORDs. */ 258 ep_info_size = dwc2_hsotg_ep_info_size(hsotg); 259 tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size; 260 261 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 262 if (tx_addr_max <= addr) 263 return 0; 264 265 return tx_addr_max - addr; 266 } 267 268 /** 269 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 270 * TX FIFOs 271 */ 272 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 273 { 274 int tx_fifo_count; 275 int tx_fifo_depth; 276 277 tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg); 278 279 tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 280 281 if (!tx_fifo_count) 282 return tx_fifo_depth; 283 else 284 return tx_fifo_depth / tx_fifo_count; 285 } 286 287 /** 288 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs 289 * @hsotg: The device instance. 290 */ 291 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) 292 { 293 unsigned int ep; 294 unsigned int addr; 295 int timeout; 296 u32 val; 297 u32 *txfsz = hsotg->params.g_tx_fifo_size; 298 299 /* Reset fifo map if not correctly cleared during previous session */ 300 WARN_ON(hsotg->fifo_map); 301 hsotg->fifo_map = 0; 302 303 /* set RX/NPTX FIFO sizes */ 304 dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ); 305 dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) | 306 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), 307 hsotg->regs + GNPTXFSIZ); 308 309 /* 310 * arange all the rest of the TX FIFOs, as some versions of this 311 * block have overlapping default addresses. This also ensures 312 * that if the settings have been changed, then they are set to 313 * known values. 314 */ 315 316 /* start at the end of the GNPTXFSIZ, rounded up */ 317 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; 318 319 /* 320 * Configure fifos sizes from provided configuration and assign 321 * them to endpoints dynamically according to maxpacket size value of 322 * given endpoint. 323 */ 324 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { 325 if (!txfsz[ep]) 326 continue; 327 val = addr; 328 val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; 329 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, 330 "insufficient fifo memory"); 331 addr += txfsz[ep]; 332 333 dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); 334 val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); 335 } 336 337 dwc2_writel(hsotg->hw_params.total_fifo_size | 338 addr << GDFIFOCFG_EPINFOBASE_SHIFT, 339 hsotg->regs + GDFIFOCFG); 340 /* 341 * according to p428 of the design guide, we need to ensure that 342 * all fifos are flushed before continuing 343 */ 344 345 dwc2_writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | 346 GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL); 347 348 /* wait until the fifos are both flushed */ 349 timeout = 100; 350 while (1) { 351 val = dwc2_readl(hsotg->regs + GRSTCTL); 352 353 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) 354 break; 355 356 if (--timeout == 0) { 357 dev_err(hsotg->dev, 358 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 359 __func__, val); 360 break; 361 } 362 363 udelay(1); 364 } 365 366 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 367 } 368 369 /** 370 * @ep: USB endpoint to allocate request for. 371 * @flags: Allocation flags 372 * 373 * Allocate a new USB request structure appropriate for the specified endpoint 374 */ 375 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, 376 gfp_t flags) 377 { 378 struct dwc2_hsotg_req *req; 379 380 req = kzalloc(sizeof(*req), flags); 381 if (!req) 382 return NULL; 383 384 INIT_LIST_HEAD(&req->queue); 385 386 return &req->req; 387 } 388 389 /** 390 * is_ep_periodic - return true if the endpoint is in periodic mode. 391 * @hs_ep: The endpoint to query. 392 * 393 * Returns true if the endpoint is in periodic mode, meaning it is being 394 * used for an Interrupt or ISO transfer. 395 */ 396 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep) 397 { 398 return hs_ep->periodic; 399 } 400 401 /** 402 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request 403 * @hsotg: The device state. 404 * @hs_ep: The endpoint for the request 405 * @hs_req: The request being processed. 406 * 407 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion 408 * of a request to ensure the buffer is ready for access by the caller. 409 */ 410 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, 411 struct dwc2_hsotg_ep *hs_ep, 412 struct dwc2_hsotg_req *hs_req) 413 { 414 struct usb_request *req = &hs_req->req; 415 416 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 417 } 418 419 /* 420 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains 421 * for Control endpoint 422 * @hsotg: The device state. 423 * 424 * This function will allocate 4 descriptor chains for EP 0: 2 for 425 * Setup stage, per one for IN and OUT data/status transactions. 426 */ 427 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) 428 { 429 hsotg->setup_desc[0] = 430 dmam_alloc_coherent(hsotg->dev, 431 sizeof(struct dwc2_dma_desc), 432 &hsotg->setup_desc_dma[0], 433 GFP_KERNEL); 434 if (!hsotg->setup_desc[0]) 435 goto fail; 436 437 hsotg->setup_desc[1] = 438 dmam_alloc_coherent(hsotg->dev, 439 sizeof(struct dwc2_dma_desc), 440 &hsotg->setup_desc_dma[1], 441 GFP_KERNEL); 442 if (!hsotg->setup_desc[1]) 443 goto fail; 444 445 hsotg->ctrl_in_desc = 446 dmam_alloc_coherent(hsotg->dev, 447 sizeof(struct dwc2_dma_desc), 448 &hsotg->ctrl_in_desc_dma, 449 GFP_KERNEL); 450 if (!hsotg->ctrl_in_desc) 451 goto fail; 452 453 hsotg->ctrl_out_desc = 454 dmam_alloc_coherent(hsotg->dev, 455 sizeof(struct dwc2_dma_desc), 456 &hsotg->ctrl_out_desc_dma, 457 GFP_KERNEL); 458 if (!hsotg->ctrl_out_desc) 459 goto fail; 460 461 return 0; 462 463 fail: 464 return -ENOMEM; 465 } 466 467 /** 468 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO 469 * @hsotg: The controller state. 470 * @hs_ep: The endpoint we're going to write for. 471 * @hs_req: The request to write data for. 472 * 473 * This is called when the TxFIFO has some space in it to hold a new 474 * transmission and we have something to give it. The actual setup of 475 * the data size is done elsewhere, so all we have to do is to actually 476 * write the data. 477 * 478 * The return value is zero if there is more space (or nothing was done) 479 * otherwise -ENOSPC is returned if the FIFO space was used up. 480 * 481 * This routine is only needed for PIO 482 */ 483 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg, 484 struct dwc2_hsotg_ep *hs_ep, 485 struct dwc2_hsotg_req *hs_req) 486 { 487 bool periodic = is_ep_periodic(hs_ep); 488 u32 gnptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); 489 int buf_pos = hs_req->req.actual; 490 int to_write = hs_ep->size_loaded; 491 void *data; 492 int can_write; 493 int pkt_round; 494 int max_transfer; 495 496 to_write -= (buf_pos - hs_ep->last_load); 497 498 /* if there's nothing to write, get out early */ 499 if (to_write == 0) 500 return 0; 501 502 if (periodic && !hsotg->dedicated_fifos) { 503 u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index)); 504 int size_left; 505 int size_done; 506 507 /* 508 * work out how much data was loaded so we can calculate 509 * how much data is left in the fifo. 510 */ 511 512 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 513 514 /* 515 * if shared fifo, we cannot write anything until the 516 * previous data has been completely sent. 517 */ 518 if (hs_ep->fifo_load != 0) { 519 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 520 return -ENOSPC; 521 } 522 523 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 524 __func__, size_left, 525 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 526 527 /* how much of the data has moved */ 528 size_done = hs_ep->size_loaded - size_left; 529 530 /* how much data is left in the fifo */ 531 can_write = hs_ep->fifo_load - size_done; 532 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 533 __func__, can_write); 534 535 can_write = hs_ep->fifo_size - can_write; 536 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 537 __func__, can_write); 538 539 if (can_write <= 0) { 540 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 541 return -ENOSPC; 542 } 543 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 544 can_write = dwc2_readl(hsotg->regs + 545 DTXFSTS(hs_ep->fifo_index)); 546 547 can_write &= 0xffff; 548 can_write *= 4; 549 } else { 550 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) { 551 dev_dbg(hsotg->dev, 552 "%s: no queue slots available (0x%08x)\n", 553 __func__, gnptxsts); 554 555 dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP); 556 return -ENOSPC; 557 } 558 559 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts); 560 can_write *= 4; /* fifo size is in 32bit quantities. */ 561 } 562 563 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 564 565 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 566 __func__, gnptxsts, can_write, to_write, max_transfer); 567 568 /* 569 * limit to 512 bytes of data, it seems at least on the non-periodic 570 * FIFO, requests of >512 cause the endpoint to get stuck with a 571 * fragment of the end of the transfer in it. 572 */ 573 if (can_write > 512 && !periodic) 574 can_write = 512; 575 576 /* 577 * limit the write to one max-packet size worth of data, but allow 578 * the transfer to return that it did not run out of fifo space 579 * doing it. 580 */ 581 if (to_write > max_transfer) { 582 to_write = max_transfer; 583 584 /* it's needed only when we do not use dedicated fifos */ 585 if (!hsotg->dedicated_fifos) 586 dwc2_hsotg_en_gsint(hsotg, 587 periodic ? GINTSTS_PTXFEMP : 588 GINTSTS_NPTXFEMP); 589 } 590 591 /* see if we can write data */ 592 593 if (to_write > can_write) { 594 to_write = can_write; 595 pkt_round = to_write % max_transfer; 596 597 /* 598 * Round the write down to an 599 * exact number of packets. 600 * 601 * Note, we do not currently check to see if we can ever 602 * write a full packet or not to the FIFO. 603 */ 604 605 if (pkt_round) 606 to_write -= pkt_round; 607 608 /* 609 * enable correct FIFO interrupt to alert us when there 610 * is more room left. 611 */ 612 613 /* it's needed only when we do not use dedicated fifos */ 614 if (!hsotg->dedicated_fifos) 615 dwc2_hsotg_en_gsint(hsotg, 616 periodic ? GINTSTS_PTXFEMP : 617 GINTSTS_NPTXFEMP); 618 } 619 620 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 621 to_write, hs_req->req.length, can_write, buf_pos); 622 623 if (to_write <= 0) 624 return -ENOSPC; 625 626 hs_req->req.actual = buf_pos + to_write; 627 hs_ep->total_data += to_write; 628 629 if (periodic) 630 hs_ep->fifo_load += to_write; 631 632 to_write = DIV_ROUND_UP(to_write, 4); 633 data = hs_req->req.buf + buf_pos; 634 635 iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write); 636 637 return (to_write >= can_write) ? -ENOSPC : 0; 638 } 639 640 /** 641 * get_ep_limit - get the maximum data legnth for this endpoint 642 * @hs_ep: The endpoint 643 * 644 * Return the maximum data that can be queued in one go on a given endpoint 645 * so that transfers that are too long can be split. 646 */ 647 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 648 { 649 int index = hs_ep->index; 650 unsigned int maxsize; 651 unsigned int maxpkt; 652 653 if (index != 0) { 654 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 655 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 656 } else { 657 maxsize = 64 + 64; 658 if (hs_ep->dir_in) 659 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 660 else 661 maxpkt = 2; 662 } 663 664 /* we made the constant loading easier above by using +1 */ 665 maxpkt--; 666 maxsize--; 667 668 /* 669 * constrain by packet count if maxpkts*pktsize is greater 670 * than the length register size. 671 */ 672 673 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 674 maxsize = maxpkt * hs_ep->ep.maxpacket; 675 676 return maxsize; 677 } 678 679 /** 680 * dwc2_hsotg_read_frameno - read current frame number 681 * @hsotg: The device instance 682 * 683 * Return the current frame number 684 */ 685 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) 686 { 687 u32 dsts; 688 689 dsts = dwc2_readl(hsotg->regs + DSTS); 690 dsts &= DSTS_SOFFN_MASK; 691 dsts >>= DSTS_SOFFN_SHIFT; 692 693 return dsts; 694 } 695 696 /** 697 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the 698 * DMA descriptor chain prepared for specific endpoint 699 * @hs_ep: The endpoint 700 * 701 * Return the maximum data that can be queued in one go on a given endpoint 702 * depending on its descriptor chain capacity so that transfers that 703 * are too long can be split. 704 */ 705 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) 706 { 707 int is_isoc = hs_ep->isochronous; 708 unsigned int maxsize; 709 710 if (is_isoc) 711 maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : 712 DEV_DMA_ISOC_RX_NBYTES_LIMIT; 713 else 714 maxsize = DEV_DMA_NBYTES_LIMIT; 715 716 /* Above size of one descriptor was chosen, multiple it */ 717 maxsize *= MAX_DMA_DESC_NUM_GENERIC; 718 719 return maxsize; 720 } 721 722 /* 723 * dwc2_gadget_get_desc_params - get DMA descriptor parameters. 724 * @hs_ep: The endpoint 725 * @mask: RX/TX bytes mask to be defined 726 * 727 * Returns maximum data payload for one descriptor after analyzing endpoint 728 * characteristics. 729 * DMA descriptor transfer bytes limit depends on EP type: 730 * Control out - MPS, 731 * Isochronous - descriptor rx/tx bytes bitfield limit, 732 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not 733 * have concatenations from various descriptors within one packet. 734 * 735 * Selects corresponding mask for RX/TX bytes as well. 736 */ 737 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) 738 { 739 u32 mps = hs_ep->ep.maxpacket; 740 int dir_in = hs_ep->dir_in; 741 u32 desc_size = 0; 742 743 if (!hs_ep->index && !dir_in) { 744 desc_size = mps; 745 *mask = DEV_DMA_NBYTES_MASK; 746 } else if (hs_ep->isochronous) { 747 if (dir_in) { 748 desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; 749 *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; 750 } else { 751 desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; 752 *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; 753 } 754 } else { 755 desc_size = DEV_DMA_NBYTES_LIMIT; 756 *mask = DEV_DMA_NBYTES_MASK; 757 758 /* Round down desc_size to be mps multiple */ 759 desc_size -= desc_size % mps; 760 } 761 762 return desc_size; 763 } 764 765 /* 766 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. 767 * @hs_ep: The endpoint 768 * @dma_buff: DMA address to use 769 * @len: Length of the transfer 770 * 771 * This function will iterate over descriptor chain and fill its entries 772 * with corresponding information based on transfer data. 773 */ 774 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, 775 dma_addr_t dma_buff, 776 unsigned int len) 777 { 778 struct dwc2_hsotg *hsotg = hs_ep->parent; 779 int dir_in = hs_ep->dir_in; 780 struct dwc2_dma_desc *desc = hs_ep->desc_list; 781 u32 mps = hs_ep->ep.maxpacket; 782 u32 maxsize = 0; 783 u32 offset = 0; 784 u32 mask = 0; 785 int i; 786 787 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 788 789 hs_ep->desc_count = (len / maxsize) + 790 ((len % maxsize) ? 1 : 0); 791 if (len == 0) 792 hs_ep->desc_count = 1; 793 794 for (i = 0; i < hs_ep->desc_count; ++i) { 795 desc->status = 0; 796 desc->status |= (DEV_DMA_BUFF_STS_HBUSY 797 << DEV_DMA_BUFF_STS_SHIFT); 798 799 if (len > maxsize) { 800 if (!hs_ep->index && !dir_in) 801 desc->status |= (DEV_DMA_L | DEV_DMA_IOC); 802 803 desc->status |= (maxsize << 804 DEV_DMA_NBYTES_SHIFT & mask); 805 desc->buf = dma_buff + offset; 806 807 len -= maxsize; 808 offset += maxsize; 809 } else { 810 desc->status |= (DEV_DMA_L | DEV_DMA_IOC); 811 812 if (dir_in) 813 desc->status |= (len % mps) ? DEV_DMA_SHORT : 814 ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0); 815 if (len > maxsize) 816 dev_err(hsotg->dev, "wrong len %d\n", len); 817 818 desc->status |= 819 len << DEV_DMA_NBYTES_SHIFT & mask; 820 desc->buf = dma_buff + offset; 821 } 822 823 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 824 desc->status |= (DEV_DMA_BUFF_STS_HREADY 825 << DEV_DMA_BUFF_STS_SHIFT); 826 desc++; 827 } 828 } 829 830 /* 831 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. 832 * @hs_ep: The isochronous endpoint. 833 * @dma_buff: usb requests dma buffer. 834 * @len: usb request transfer length. 835 * 836 * Finds out index of first free entry either in the bottom or up half of 837 * descriptor chain depend on which is under SW control and not processed 838 * by HW. Then fills that descriptor with the data of the arrived usb request, 839 * frame info, sets Last and IOC bits increments next_desc. If filled 840 * descriptor is not the first one, removes L bit from the previous descriptor 841 * status. 842 */ 843 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, 844 dma_addr_t dma_buff, unsigned int len) 845 { 846 struct dwc2_dma_desc *desc; 847 struct dwc2_hsotg *hsotg = hs_ep->parent; 848 u32 index; 849 u32 maxsize = 0; 850 u32 mask = 0; 851 852 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 853 if (len > maxsize) { 854 dev_err(hsotg->dev, "wrong len %d\n", len); 855 return -EINVAL; 856 } 857 858 /* 859 * If SW has already filled half of chain, then return and wait for 860 * the other chain to be processed by HW. 861 */ 862 if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2) 863 return -EBUSY; 864 865 /* Increment frame number by interval for IN */ 866 if (hs_ep->dir_in) 867 dwc2_gadget_incr_frame_num(hs_ep); 868 869 index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num + 870 hs_ep->next_desc; 871 872 /* Sanity check of calculated index */ 873 if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) || 874 (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) { 875 dev_err(hsotg->dev, "wrong index %d for iso chain\n", index); 876 return -EINVAL; 877 } 878 879 desc = &hs_ep->desc_list[index]; 880 881 /* Clear L bit of previous desc if more than one entries in the chain */ 882 if (hs_ep->next_desc) 883 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; 884 885 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", 886 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); 887 888 desc->status = 0; 889 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); 890 891 desc->buf = dma_buff; 892 desc->status |= (DEV_DMA_L | DEV_DMA_IOC | 893 ((len << DEV_DMA_NBYTES_SHIFT) & mask)); 894 895 if (hs_ep->dir_in) { 896 desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & 897 DEV_DMA_ISOC_PID_MASK) | 898 ((len % hs_ep->ep.maxpacket) ? 899 DEV_DMA_SHORT : 0) | 900 ((hs_ep->target_frame << 901 DEV_DMA_ISOC_FRNUM_SHIFT) & 902 DEV_DMA_ISOC_FRNUM_MASK); 903 } 904 905 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 906 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); 907 908 /* Update index of last configured entry in the chain */ 909 hs_ep->next_desc++; 910 911 return 0; 912 } 913 914 /* 915 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA 916 * @hs_ep: The isochronous endpoint. 917 * 918 * Prepare first descriptor chain for isochronous endpoints. Afterwards 919 * write DMA address to HW and enable the endpoint. 920 * 921 * Switch between descriptor chains via isoc_chain_num to give SW opportunity 922 * to prepare second descriptor chain while first one is being processed by HW. 923 */ 924 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 925 { 926 struct dwc2_hsotg *hsotg = hs_ep->parent; 927 struct dwc2_hsotg_req *hs_req, *treq; 928 int index = hs_ep->index; 929 int ret; 930 u32 dma_reg; 931 u32 depctl; 932 u32 ctrl; 933 934 if (list_empty(&hs_ep->queue)) { 935 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 936 return; 937 } 938 939 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { 940 ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 941 hs_req->req.length); 942 if (ret) { 943 dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 944 break; 945 } 946 } 947 948 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 949 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 950 951 /* write descriptor chain address to control register */ 952 dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); 953 954 ctrl = dwc2_readl(hsotg->regs + depctl); 955 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 956 dwc2_writel(ctrl, hsotg->regs + depctl); 957 958 /* Switch ISOC descriptor chain number being processed by SW*/ 959 hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; 960 hs_ep->next_desc = 0; 961 } 962 963 /** 964 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue 965 * @hsotg: The controller state. 966 * @hs_ep: The endpoint to process a request for 967 * @hs_req: The request to start. 968 * @continuing: True if we are doing more for the current request. 969 * 970 * Start the given request running by setting the endpoint registers 971 * appropriately, and writing any data to the FIFOs. 972 */ 973 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, 974 struct dwc2_hsotg_ep *hs_ep, 975 struct dwc2_hsotg_req *hs_req, 976 bool continuing) 977 { 978 struct usb_request *ureq = &hs_req->req; 979 int index = hs_ep->index; 980 int dir_in = hs_ep->dir_in; 981 u32 epctrl_reg; 982 u32 epsize_reg; 983 u32 epsize; 984 u32 ctrl; 985 unsigned int length; 986 unsigned int packets; 987 unsigned int maxreq; 988 unsigned int dma_reg; 989 990 if (index != 0) { 991 if (hs_ep->req && !continuing) { 992 dev_err(hsotg->dev, "%s: active request\n", __func__); 993 WARN_ON(1); 994 return; 995 } else if (hs_ep->req != hs_req && continuing) { 996 dev_err(hsotg->dev, 997 "%s: continue different req\n", __func__); 998 WARN_ON(1); 999 return; 1000 } 1001 } 1002 1003 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); 1004 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 1005 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 1006 1007 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 1008 __func__, dwc2_readl(hsotg->regs + epctrl_reg), index, 1009 hs_ep->dir_in ? "in" : "out"); 1010 1011 /* If endpoint is stalled, we will restart request later */ 1012 ctrl = dwc2_readl(hsotg->regs + epctrl_reg); 1013 1014 if (index && ctrl & DXEPCTL_STALL) { 1015 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 1016 return; 1017 } 1018 1019 length = ureq->length - ureq->actual; 1020 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", 1021 ureq->length, ureq->actual); 1022 1023 if (!using_desc_dma(hsotg)) 1024 maxreq = get_ep_limit(hs_ep); 1025 else 1026 maxreq = dwc2_gadget_get_chain_limit(hs_ep); 1027 1028 if (length > maxreq) { 1029 int round = maxreq % hs_ep->ep.maxpacket; 1030 1031 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 1032 __func__, length, maxreq, round); 1033 1034 /* round down to multiple of packets */ 1035 if (round) 1036 maxreq -= round; 1037 1038 length = maxreq; 1039 } 1040 1041 if (length) 1042 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 1043 else 1044 packets = 1; /* send one packet if length is zero. */ 1045 1046 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { 1047 dev_err(hsotg->dev, "req length > maxpacket*mc\n"); 1048 return; 1049 } 1050 1051 if (dir_in && index != 0) 1052 if (hs_ep->isochronous) 1053 epsize = DXEPTSIZ_MC(packets); 1054 else 1055 epsize = DXEPTSIZ_MC(1); 1056 else 1057 epsize = 0; 1058 1059 /* 1060 * zero length packet should be programmed on its own and should not 1061 * be counted in DIEPTSIZ.PktCnt with other packets. 1062 */ 1063 if (dir_in && ureq->zero && !continuing) { 1064 /* Test if zlp is actually required. */ 1065 if ((ureq->length >= hs_ep->ep.maxpacket) && 1066 !(ureq->length % hs_ep->ep.maxpacket)) 1067 hs_ep->send_zlp = 1; 1068 } 1069 1070 epsize |= DXEPTSIZ_PKTCNT(packets); 1071 epsize |= DXEPTSIZ_XFERSIZE(length); 1072 1073 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 1074 __func__, packets, length, ureq->length, epsize, epsize_reg); 1075 1076 /* store the request as the current one we're doing */ 1077 hs_ep->req = hs_req; 1078 1079 if (using_desc_dma(hsotg)) { 1080 u32 offset = 0; 1081 u32 mps = hs_ep->ep.maxpacket; 1082 1083 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ 1084 if (!dir_in) { 1085 if (!index) 1086 length = mps; 1087 else if (length % mps) 1088 length += (mps - (length % mps)); 1089 } 1090 1091 /* 1092 * If more data to send, adjust DMA for EP0 out data stage. 1093 * ureq->dma stays unchanged, hence increment it by already 1094 * passed passed data count before starting new transaction. 1095 */ 1096 if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && 1097 continuing) 1098 offset = ureq->actual; 1099 1100 /* Fill DDMA chain entries */ 1101 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset, 1102 length); 1103 1104 /* write descriptor chain address to control register */ 1105 dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); 1106 1107 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", 1108 __func__, (u32)hs_ep->desc_list_dma, dma_reg); 1109 } else { 1110 /* write size / packets */ 1111 dwc2_writel(epsize, hsotg->regs + epsize_reg); 1112 1113 if (using_dma(hsotg) && !continuing && (length != 0)) { 1114 /* 1115 * write DMA address to control register, buffer 1116 * already synced by dwc2_hsotg_ep_queue(). 1117 */ 1118 1119 dwc2_writel(ureq->dma, hsotg->regs + dma_reg); 1120 1121 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", 1122 __func__, &ureq->dma, dma_reg); 1123 } 1124 } 1125 1126 if (hs_ep->isochronous && hs_ep->interval == 1) { 1127 hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 1128 dwc2_gadget_incr_frame_num(hs_ep); 1129 1130 if (hs_ep->target_frame & 0x1) 1131 ctrl |= DXEPCTL_SETODDFR; 1132 else 1133 ctrl |= DXEPCTL_SETEVENFR; 1134 } 1135 1136 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1137 1138 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state); 1139 1140 /* For Setup request do not clear NAK */ 1141 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP)) 1142 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1143 1144 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 1145 dwc2_writel(ctrl, hsotg->regs + epctrl_reg); 1146 1147 /* 1148 * set these, it seems that DMA support increments past the end 1149 * of the packet buffer so we need to calculate the length from 1150 * this information. 1151 */ 1152 hs_ep->size_loaded = length; 1153 hs_ep->last_load = ureq->actual; 1154 1155 if (dir_in && !using_dma(hsotg)) { 1156 /* set these anyway, we may need them for non-periodic in */ 1157 hs_ep->fifo_load = 0; 1158 1159 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1160 } 1161 1162 /* 1163 * Note, trying to clear the NAK here causes problems with transmit 1164 * on the S3C6400 ending up with the TXFIFO becoming full. 1165 */ 1166 1167 /* check ep is enabled */ 1168 if (!(dwc2_readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA)) 1169 dev_dbg(hsotg->dev, 1170 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1171 index, dwc2_readl(hsotg->regs + epctrl_reg)); 1172 1173 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", 1174 __func__, dwc2_readl(hsotg->regs + epctrl_reg)); 1175 1176 /* enable ep interrupts */ 1177 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1); 1178 } 1179 1180 /** 1181 * dwc2_hsotg_map_dma - map the DMA memory being used for the request 1182 * @hsotg: The device state. 1183 * @hs_ep: The endpoint the request is on. 1184 * @req: The request being processed. 1185 * 1186 * We've been asked to queue a request, so ensure that the memory buffer 1187 * is correctly setup for DMA. If we've been passed an extant DMA address 1188 * then ensure the buffer has been synced to memory. If our buffer has no 1189 * DMA memory, then we map the memory and mark our request to allow us to 1190 * cleanup on completion. 1191 */ 1192 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, 1193 struct dwc2_hsotg_ep *hs_ep, 1194 struct usb_request *req) 1195 { 1196 int ret; 1197 1198 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); 1199 if (ret) 1200 goto dma_error; 1201 1202 return 0; 1203 1204 dma_error: 1205 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 1206 __func__, req->buf, req->length); 1207 1208 return -EIO; 1209 } 1210 1211 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg, 1212 struct dwc2_hsotg_ep *hs_ep, 1213 struct dwc2_hsotg_req *hs_req) 1214 { 1215 void *req_buf = hs_req->req.buf; 1216 1217 /* If dma is not being used or buffer is aligned */ 1218 if (!using_dma(hsotg) || !((long)req_buf & 3)) 1219 return 0; 1220 1221 WARN_ON(hs_req->saved_req_buf); 1222 1223 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__, 1224 hs_ep->ep.name, req_buf, hs_req->req.length); 1225 1226 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC); 1227 if (!hs_req->req.buf) { 1228 hs_req->req.buf = req_buf; 1229 dev_err(hsotg->dev, 1230 "%s: unable to allocate memory for bounce buffer\n", 1231 __func__); 1232 return -ENOMEM; 1233 } 1234 1235 /* Save actual buffer */ 1236 hs_req->saved_req_buf = req_buf; 1237 1238 if (hs_ep->dir_in) 1239 memcpy(hs_req->req.buf, req_buf, hs_req->req.length); 1240 return 0; 1241 } 1242 1243 static void 1244 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1245 struct dwc2_hsotg_ep *hs_ep, 1246 struct dwc2_hsotg_req *hs_req) 1247 { 1248 /* If dma is not being used or buffer was aligned */ 1249 if (!using_dma(hsotg) || !hs_req->saved_req_buf) 1250 return; 1251 1252 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__, 1253 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual); 1254 1255 /* Copy data from bounce buffer on successful out transfer */ 1256 if (!hs_ep->dir_in && !hs_req->req.status) 1257 memcpy(hs_req->saved_req_buf, hs_req->req.buf, 1258 hs_req->req.actual); 1259 1260 /* Free bounce buffer */ 1261 kfree(hs_req->req.buf); 1262 1263 hs_req->req.buf = hs_req->saved_req_buf; 1264 hs_req->saved_req_buf = NULL; 1265 } 1266 1267 /** 1268 * dwc2_gadget_target_frame_elapsed - Checks target frame 1269 * @hs_ep: The driver endpoint to check 1270 * 1271 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop 1272 * corresponding transfer. 1273 */ 1274 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) 1275 { 1276 struct dwc2_hsotg *hsotg = hs_ep->parent; 1277 u32 target_frame = hs_ep->target_frame; 1278 u32 current_frame = dwc2_hsotg_read_frameno(hsotg); 1279 bool frame_overrun = hs_ep->frame_overrun; 1280 1281 if (!frame_overrun && current_frame >= target_frame) 1282 return true; 1283 1284 if (frame_overrun && current_frame >= target_frame && 1285 ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2)) 1286 return true; 1287 1288 return false; 1289 } 1290 1291 /* 1292 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers 1293 * @hsotg: The driver state 1294 * @hs_ep: the ep descriptor chain is for 1295 * 1296 * Called to update EP0 structure's pointers depend on stage of 1297 * control transfer. 1298 */ 1299 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, 1300 struct dwc2_hsotg_ep *hs_ep) 1301 { 1302 switch (hsotg->ep0_state) { 1303 case DWC2_EP0_SETUP: 1304 case DWC2_EP0_STATUS_OUT: 1305 hs_ep->desc_list = hsotg->setup_desc[0]; 1306 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; 1307 break; 1308 case DWC2_EP0_DATA_IN: 1309 case DWC2_EP0_STATUS_IN: 1310 hs_ep->desc_list = hsotg->ctrl_in_desc; 1311 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; 1312 break; 1313 case DWC2_EP0_DATA_OUT: 1314 hs_ep->desc_list = hsotg->ctrl_out_desc; 1315 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; 1316 break; 1317 default: 1318 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", 1319 hsotg->ep0_state); 1320 return -EINVAL; 1321 } 1322 1323 return 0; 1324 } 1325 1326 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 1327 gfp_t gfp_flags) 1328 { 1329 struct dwc2_hsotg_req *hs_req = our_req(req); 1330 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1331 struct dwc2_hsotg *hs = hs_ep->parent; 1332 bool first; 1333 int ret; 1334 1335 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 1336 ep->name, req, req->length, req->buf, req->no_interrupt, 1337 req->zero, req->short_not_ok); 1338 1339 /* Prevent new request submission when controller is suspended */ 1340 if (hs->lx_state == DWC2_L2) { 1341 dev_dbg(hs->dev, "%s: don't submit request while suspended\n", 1342 __func__); 1343 return -EAGAIN; 1344 } 1345 1346 /* initialise status of the request */ 1347 INIT_LIST_HEAD(&hs_req->queue); 1348 req->actual = 0; 1349 req->status = -EINPROGRESS; 1350 1351 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req); 1352 if (ret) 1353 return ret; 1354 1355 /* if we're using DMA, sync the buffers as necessary */ 1356 if (using_dma(hs)) { 1357 ret = dwc2_hsotg_map_dma(hs, hs_ep, req); 1358 if (ret) 1359 return ret; 1360 } 1361 /* If using descriptor DMA configure EP0 descriptor chain pointers */ 1362 if (using_desc_dma(hs) && !hs_ep->index) { 1363 ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); 1364 if (ret) 1365 return ret; 1366 } 1367 1368 first = list_empty(&hs_ep->queue); 1369 list_add_tail(&hs_req->queue, &hs_ep->queue); 1370 1371 /* 1372 * Handle DDMA isochronous transfers separately - just add new entry 1373 * to the half of descriptor chain that is not processed by HW. 1374 * Transfer will be started once SW gets either one of NAK or 1375 * OutTknEpDis interrupts. 1376 */ 1377 if (using_desc_dma(hs) && hs_ep->isochronous && 1378 hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1379 ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 1380 hs_req->req.length); 1381 if (ret) 1382 dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__); 1383 1384 return 0; 1385 } 1386 1387 if (first) { 1388 if (!hs_ep->isochronous) { 1389 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1390 return 0; 1391 } 1392 1393 while (dwc2_gadget_target_frame_elapsed(hs_ep)) 1394 dwc2_gadget_incr_frame_num(hs_ep); 1395 1396 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) 1397 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1398 } 1399 return 0; 1400 } 1401 1402 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 1403 gfp_t gfp_flags) 1404 { 1405 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1406 struct dwc2_hsotg *hs = hs_ep->parent; 1407 unsigned long flags = 0; 1408 int ret = 0; 1409 1410 spin_lock_irqsave(&hs->lock, flags); 1411 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags); 1412 spin_unlock_irqrestore(&hs->lock, flags); 1413 1414 return ret; 1415 } 1416 1417 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, 1418 struct usb_request *req) 1419 { 1420 struct dwc2_hsotg_req *hs_req = our_req(req); 1421 1422 kfree(hs_req); 1423 } 1424 1425 /** 1426 * dwc2_hsotg_complete_oursetup - setup completion callback 1427 * @ep: The endpoint the request was on. 1428 * @req: The request completed. 1429 * 1430 * Called on completion of any requests the driver itself 1431 * submitted that need cleaning up. 1432 */ 1433 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, 1434 struct usb_request *req) 1435 { 1436 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1437 struct dwc2_hsotg *hsotg = hs_ep->parent; 1438 1439 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 1440 1441 dwc2_hsotg_ep_free_request(ep, req); 1442 } 1443 1444 /** 1445 * ep_from_windex - convert control wIndex value to endpoint 1446 * @hsotg: The driver state. 1447 * @windex: The control request wIndex field (in host order). 1448 * 1449 * Convert the given wIndex into a pointer to an driver endpoint 1450 * structure, or return NULL if it is not a valid endpoint. 1451 */ 1452 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, 1453 u32 windex) 1454 { 1455 struct dwc2_hsotg_ep *ep; 1456 int dir = (windex & USB_DIR_IN) ? 1 : 0; 1457 int idx = windex & 0x7F; 1458 1459 if (windex >= 0x100) 1460 return NULL; 1461 1462 if (idx > hsotg->num_of_eps) 1463 return NULL; 1464 1465 ep = index_to_ep(hsotg, idx, dir); 1466 1467 if (idx && ep->dir_in != dir) 1468 return NULL; 1469 1470 return ep; 1471 } 1472 1473 /** 1474 * dwc2_hsotg_set_test_mode - Enable usb Test Modes 1475 * @hsotg: The driver state. 1476 * @testmode: requested usb test mode 1477 * Enable usb Test Mode requested by the Host. 1478 */ 1479 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) 1480 { 1481 int dctl = dwc2_readl(hsotg->regs + DCTL); 1482 1483 dctl &= ~DCTL_TSTCTL_MASK; 1484 switch (testmode) { 1485 case TEST_J: 1486 case TEST_K: 1487 case TEST_SE0_NAK: 1488 case TEST_PACKET: 1489 case TEST_FORCE_EN: 1490 dctl |= testmode << DCTL_TSTCTL_SHIFT; 1491 break; 1492 default: 1493 return -EINVAL; 1494 } 1495 dwc2_writel(dctl, hsotg->regs + DCTL); 1496 return 0; 1497 } 1498 1499 /** 1500 * dwc2_hsotg_send_reply - send reply to control request 1501 * @hsotg: The device state 1502 * @ep: Endpoint 0 1503 * @buff: Buffer for request 1504 * @length: Length of reply. 1505 * 1506 * Create a request and queue it on the given endpoint. This is useful as 1507 * an internal method of sending replies to certain control requests, etc. 1508 */ 1509 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, 1510 struct dwc2_hsotg_ep *ep, 1511 void *buff, 1512 int length) 1513 { 1514 struct usb_request *req; 1515 int ret; 1516 1517 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 1518 1519 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 1520 hsotg->ep0_reply = req; 1521 if (!req) { 1522 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 1523 return -ENOMEM; 1524 } 1525 1526 req->buf = hsotg->ep0_buff; 1527 req->length = length; 1528 /* 1529 * zero flag is for sending zlp in DATA IN stage. It has no impact on 1530 * STATUS stage. 1531 */ 1532 req->zero = 0; 1533 req->complete = dwc2_hsotg_complete_oursetup; 1534 1535 if (length) 1536 memcpy(req->buf, buff, length); 1537 1538 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 1539 if (ret) { 1540 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 1541 return ret; 1542 } 1543 1544 return 0; 1545 } 1546 1547 /** 1548 * dwc2_hsotg_process_req_status - process request GET_STATUS 1549 * @hsotg: The device state 1550 * @ctrl: USB control request 1551 */ 1552 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, 1553 struct usb_ctrlrequest *ctrl) 1554 { 1555 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1556 struct dwc2_hsotg_ep *ep; 1557 __le16 reply; 1558 int ret; 1559 1560 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1561 1562 if (!ep0->dir_in) { 1563 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1564 return -EINVAL; 1565 } 1566 1567 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1568 case USB_RECIP_DEVICE: 1569 /* 1570 * bit 0 => self powered 1571 * bit 1 => remote wakeup 1572 */ 1573 reply = cpu_to_le16(0); 1574 break; 1575 1576 case USB_RECIP_INTERFACE: 1577 /* currently, the data result should be zero */ 1578 reply = cpu_to_le16(0); 1579 break; 1580 1581 case USB_RECIP_ENDPOINT: 1582 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1583 if (!ep) 1584 return -ENOENT; 1585 1586 reply = cpu_to_le16(ep->halted ? 1 : 0); 1587 break; 1588 1589 default: 1590 return 0; 1591 } 1592 1593 if (le16_to_cpu(ctrl->wLength) != 2) 1594 return -EINVAL; 1595 1596 ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2); 1597 if (ret) { 1598 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1599 return ret; 1600 } 1601 1602 return 1; 1603 } 1604 1605 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); 1606 1607 /** 1608 * get_ep_head - return the first request on the endpoint 1609 * @hs_ep: The controller endpoint to get 1610 * 1611 * Get the first request on the endpoint. 1612 */ 1613 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) 1614 { 1615 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, 1616 queue); 1617 } 1618 1619 /** 1620 * dwc2_gadget_start_next_request - Starts next request from ep queue 1621 * @hs_ep: Endpoint structure 1622 * 1623 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked 1624 * in its handler. Hence we need to unmask it here to be able to do 1625 * resynchronization. 1626 */ 1627 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep) 1628 { 1629 u32 mask; 1630 struct dwc2_hsotg *hsotg = hs_ep->parent; 1631 int dir_in = hs_ep->dir_in; 1632 struct dwc2_hsotg_req *hs_req; 1633 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 1634 1635 if (!list_empty(&hs_ep->queue)) { 1636 hs_req = get_ep_head(hs_ep); 1637 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1638 return; 1639 } 1640 if (!hs_ep->isochronous) 1641 return; 1642 1643 if (dir_in) { 1644 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n", 1645 __func__); 1646 } else { 1647 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n", 1648 __func__); 1649 mask = dwc2_readl(hsotg->regs + epmsk_reg); 1650 mask |= DOEPMSK_OUTTKNEPDISMSK; 1651 dwc2_writel(mask, hsotg->regs + epmsk_reg); 1652 } 1653 } 1654 1655 /** 1656 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE 1657 * @hsotg: The device state 1658 * @ctrl: USB control request 1659 */ 1660 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, 1661 struct usb_ctrlrequest *ctrl) 1662 { 1663 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1664 struct dwc2_hsotg_req *hs_req; 1665 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1666 struct dwc2_hsotg_ep *ep; 1667 int ret; 1668 bool halted; 1669 u32 recip; 1670 u32 wValue; 1671 u32 wIndex; 1672 1673 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1674 __func__, set ? "SET" : "CLEAR"); 1675 1676 wValue = le16_to_cpu(ctrl->wValue); 1677 wIndex = le16_to_cpu(ctrl->wIndex); 1678 recip = ctrl->bRequestType & USB_RECIP_MASK; 1679 1680 switch (recip) { 1681 case USB_RECIP_DEVICE: 1682 switch (wValue) { 1683 case USB_DEVICE_TEST_MODE: 1684 if ((wIndex & 0xff) != 0) 1685 return -EINVAL; 1686 if (!set) 1687 return -EINVAL; 1688 1689 hsotg->test_mode = wIndex >> 8; 1690 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1691 if (ret) { 1692 dev_err(hsotg->dev, 1693 "%s: failed to send reply\n", __func__); 1694 return ret; 1695 } 1696 break; 1697 default: 1698 return -ENOENT; 1699 } 1700 break; 1701 1702 case USB_RECIP_ENDPOINT: 1703 ep = ep_from_windex(hsotg, wIndex); 1704 if (!ep) { 1705 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1706 __func__, wIndex); 1707 return -ENOENT; 1708 } 1709 1710 switch (wValue) { 1711 case USB_ENDPOINT_HALT: 1712 halted = ep->halted; 1713 1714 dwc2_hsotg_ep_sethalt(&ep->ep, set, true); 1715 1716 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1717 if (ret) { 1718 dev_err(hsotg->dev, 1719 "%s: failed to send reply\n", __func__); 1720 return ret; 1721 } 1722 1723 /* 1724 * we have to complete all requests for ep if it was 1725 * halted, and the halt was cleared by CLEAR_FEATURE 1726 */ 1727 1728 if (!set && halted) { 1729 /* 1730 * If we have request in progress, 1731 * then complete it 1732 */ 1733 if (ep->req) { 1734 hs_req = ep->req; 1735 ep->req = NULL; 1736 list_del_init(&hs_req->queue); 1737 if (hs_req->req.complete) { 1738 spin_unlock(&hsotg->lock); 1739 usb_gadget_giveback_request( 1740 &ep->ep, &hs_req->req); 1741 spin_lock(&hsotg->lock); 1742 } 1743 } 1744 1745 /* If we have pending request, then start it */ 1746 if (!ep->req) 1747 dwc2_gadget_start_next_request(ep); 1748 } 1749 1750 break; 1751 1752 default: 1753 return -ENOENT; 1754 } 1755 break; 1756 default: 1757 return -ENOENT; 1758 } 1759 return 1; 1760 } 1761 1762 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg); 1763 1764 /** 1765 * dwc2_hsotg_stall_ep0 - stall ep0 1766 * @hsotg: The device state 1767 * 1768 * Set stall for ep0 as response for setup request. 1769 */ 1770 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg) 1771 { 1772 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1773 u32 reg; 1774 u32 ctrl; 1775 1776 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1777 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0; 1778 1779 /* 1780 * DxEPCTL_Stall will be cleared by EP once it has 1781 * taken effect, so no need to clear later. 1782 */ 1783 1784 ctrl = dwc2_readl(hsotg->regs + reg); 1785 ctrl |= DXEPCTL_STALL; 1786 ctrl |= DXEPCTL_CNAK; 1787 dwc2_writel(ctrl, hsotg->regs + reg); 1788 1789 dev_dbg(hsotg->dev, 1790 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n", 1791 ctrl, reg, dwc2_readl(hsotg->regs + reg)); 1792 1793 /* 1794 * complete won't be called, so we enqueue 1795 * setup request here 1796 */ 1797 dwc2_hsotg_enqueue_setup(hsotg); 1798 } 1799 1800 /** 1801 * dwc2_hsotg_process_control - process a control request 1802 * @hsotg: The device state 1803 * @ctrl: The control request received 1804 * 1805 * The controller has received the SETUP phase of a control request, and 1806 * needs to work out what to do next (and whether to pass it on to the 1807 * gadget driver). 1808 */ 1809 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, 1810 struct usb_ctrlrequest *ctrl) 1811 { 1812 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1813 int ret = 0; 1814 u32 dcfg; 1815 1816 dev_dbg(hsotg->dev, 1817 "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n", 1818 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, 1819 ctrl->wIndex, ctrl->wLength); 1820 1821 if (ctrl->wLength == 0) { 1822 ep0->dir_in = 1; 1823 hsotg->ep0_state = DWC2_EP0_STATUS_IN; 1824 } else if (ctrl->bRequestType & USB_DIR_IN) { 1825 ep0->dir_in = 1; 1826 hsotg->ep0_state = DWC2_EP0_DATA_IN; 1827 } else { 1828 ep0->dir_in = 0; 1829 hsotg->ep0_state = DWC2_EP0_DATA_OUT; 1830 } 1831 1832 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1833 switch (ctrl->bRequest) { 1834 case USB_REQ_SET_ADDRESS: 1835 hsotg->connected = 1; 1836 dcfg = dwc2_readl(hsotg->regs + DCFG); 1837 dcfg &= ~DCFG_DEVADDR_MASK; 1838 dcfg |= (le16_to_cpu(ctrl->wValue) << 1839 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK; 1840 dwc2_writel(dcfg, hsotg->regs + DCFG); 1841 1842 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1843 1844 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1845 return; 1846 1847 case USB_REQ_GET_STATUS: 1848 ret = dwc2_hsotg_process_req_status(hsotg, ctrl); 1849 break; 1850 1851 case USB_REQ_CLEAR_FEATURE: 1852 case USB_REQ_SET_FEATURE: 1853 ret = dwc2_hsotg_process_req_feature(hsotg, ctrl); 1854 break; 1855 } 1856 } 1857 1858 /* as a fallback, try delivering it to the driver to deal with */ 1859 1860 if (ret == 0 && hsotg->driver) { 1861 spin_unlock(&hsotg->lock); 1862 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1863 spin_lock(&hsotg->lock); 1864 if (ret < 0) 1865 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1866 } 1867 1868 /* 1869 * the request is either unhandlable, or is not formatted correctly 1870 * so respond with a STALL for the status stage to indicate failure. 1871 */ 1872 1873 if (ret < 0) 1874 dwc2_hsotg_stall_ep0(hsotg); 1875 } 1876 1877 /** 1878 * dwc2_hsotg_complete_setup - completion of a setup transfer 1879 * @ep: The endpoint the request was on. 1880 * @req: The request completed. 1881 * 1882 * Called on completion of any requests the driver itself submitted for 1883 * EP0 setup packets 1884 */ 1885 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, 1886 struct usb_request *req) 1887 { 1888 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1889 struct dwc2_hsotg *hsotg = hs_ep->parent; 1890 1891 if (req->status < 0) { 1892 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1893 return; 1894 } 1895 1896 spin_lock(&hsotg->lock); 1897 if (req->actual == 0) 1898 dwc2_hsotg_enqueue_setup(hsotg); 1899 else 1900 dwc2_hsotg_process_control(hsotg, req->buf); 1901 spin_unlock(&hsotg->lock); 1902 } 1903 1904 /** 1905 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets 1906 * @hsotg: The device state. 1907 * 1908 * Enqueue a request on EP0 if necessary to received any SETUP packets 1909 * received from the host. 1910 */ 1911 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg) 1912 { 1913 struct usb_request *req = hsotg->ctrl_req; 1914 struct dwc2_hsotg_req *hs_req = our_req(req); 1915 int ret; 1916 1917 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 1918 1919 req->zero = 0; 1920 req->length = 8; 1921 req->buf = hsotg->ctrl_buff; 1922 req->complete = dwc2_hsotg_complete_setup; 1923 1924 if (!list_empty(&hs_req->queue)) { 1925 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 1926 return; 1927 } 1928 1929 hsotg->eps_out[0]->dir_in = 0; 1930 hsotg->eps_out[0]->send_zlp = 0; 1931 hsotg->ep0_state = DWC2_EP0_SETUP; 1932 1933 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC); 1934 if (ret < 0) { 1935 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 1936 /* 1937 * Don't think there's much we can do other than watch the 1938 * driver fail. 1939 */ 1940 } 1941 } 1942 1943 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, 1944 struct dwc2_hsotg_ep *hs_ep) 1945 { 1946 u32 ctrl; 1947 u8 index = hs_ep->index; 1948 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 1949 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 1950 1951 if (hs_ep->dir_in) 1952 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", 1953 index); 1954 else 1955 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", 1956 index); 1957 if (using_desc_dma(hsotg)) { 1958 /* Not specific buffer needed for ep0 ZLP */ 1959 dma_addr_t dma = hs_ep->desc_list_dma; 1960 1961 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); 1962 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); 1963 } else { 1964 dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 1965 DXEPTSIZ_XFERSIZE(0), hsotg->regs + 1966 epsiz_reg); 1967 } 1968 1969 ctrl = dwc2_readl(hsotg->regs + epctl_reg); 1970 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1971 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1972 ctrl |= DXEPCTL_USBACTEP; 1973 dwc2_writel(ctrl, hsotg->regs + epctl_reg); 1974 } 1975 1976 /** 1977 * dwc2_hsotg_complete_request - complete a request given to us 1978 * @hsotg: The device state. 1979 * @hs_ep: The endpoint the request was on. 1980 * @hs_req: The request to complete. 1981 * @result: The result code (0 => Ok, otherwise errno) 1982 * 1983 * The given request has finished, so call the necessary completion 1984 * if it has one and then look to see if we can start a new request 1985 * on the endpoint. 1986 * 1987 * Note, expects the ep to already be locked as appropriate. 1988 */ 1989 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 1990 struct dwc2_hsotg_ep *hs_ep, 1991 struct dwc2_hsotg_req *hs_req, 1992 int result) 1993 { 1994 if (!hs_req) { 1995 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 1996 return; 1997 } 1998 1999 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 2000 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 2001 2002 /* 2003 * only replace the status if we've not already set an error 2004 * from a previous transaction 2005 */ 2006 2007 if (hs_req->req.status == -EINPROGRESS) 2008 hs_req->req.status = result; 2009 2010 if (using_dma(hsotg)) 2011 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 2012 2013 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req); 2014 2015 hs_ep->req = NULL; 2016 list_del_init(&hs_req->queue); 2017 2018 /* 2019 * call the complete request with the locks off, just in case the 2020 * request tries to queue more work for this endpoint. 2021 */ 2022 2023 if (hs_req->req.complete) { 2024 spin_unlock(&hsotg->lock); 2025 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req); 2026 spin_lock(&hsotg->lock); 2027 } 2028 2029 /* In DDMA don't need to proceed to starting of next ISOC request */ 2030 if (using_desc_dma(hsotg) && hs_ep->isochronous) 2031 return; 2032 2033 /* 2034 * Look to see if there is anything else to do. Note, the completion 2035 * of the previous request may have caused a new request to be started 2036 * so be careful when doing this. 2037 */ 2038 2039 if (!hs_ep->req && result >= 0) 2040 dwc2_gadget_start_next_request(hs_ep); 2041 } 2042 2043 /* 2044 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA 2045 * @hs_ep: The endpoint the request was on. 2046 * 2047 * Get first request from the ep queue, determine descriptor on which complete 2048 * happened. SW based on isoc_chain_num discovers which half of the descriptor 2049 * chain is currently in use by HW, adjusts dma_address and calculates index 2050 * of completed descriptor based on the value of DEPDMA register. Update actual 2051 * length of request, giveback to gadget. 2052 */ 2053 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) 2054 { 2055 struct dwc2_hsotg *hsotg = hs_ep->parent; 2056 struct dwc2_hsotg_req *hs_req; 2057 struct usb_request *ureq; 2058 int index; 2059 dma_addr_t dma_addr; 2060 u32 dma_reg; 2061 u32 depdma; 2062 u32 desc_sts; 2063 u32 mask; 2064 2065 hs_req = get_ep_head(hs_ep); 2066 if (!hs_req) { 2067 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2068 return; 2069 } 2070 ureq = &hs_req->req; 2071 2072 dma_addr = hs_ep->desc_list_dma; 2073 2074 /* 2075 * If lower half of descriptor chain is currently use by SW, 2076 * that means higher half is being processed by HW, so shift 2077 * DMA address to higher half of descriptor chain. 2078 */ 2079 if (!hs_ep->isoc_chain_num) 2080 dma_addr += sizeof(struct dwc2_dma_desc) * 2081 (MAX_DMA_DESC_NUM_GENERIC / 2); 2082 2083 dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index); 2084 depdma = dwc2_readl(hsotg->regs + dma_reg); 2085 2086 index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1; 2087 desc_sts = hs_ep->desc_list[index].status; 2088 2089 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2090 DEV_DMA_ISOC_RX_NBYTES_MASK; 2091 ureq->actual = ureq->length - 2092 ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT); 2093 2094 /* Adjust actual length for ISOC Out if length is not align of 4 */ 2095 if (!hs_ep->dir_in && ureq->length & 0x3) 2096 ureq->actual += 4 - (ureq->length & 0x3); 2097 2098 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2099 } 2100 2101 /* 2102 * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any. 2103 * @hs_ep: The isochronous endpoint to be re-enabled. 2104 * 2105 * If ep has been disabled due to last descriptor servicing (IN endpoint) or 2106 * BNA (OUT endpoint) check the status of other half of descriptor chain that 2107 * was under SW control till HW was busy and restart the endpoint if needed. 2108 */ 2109 static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 2110 { 2111 struct dwc2_hsotg *hsotg = hs_ep->parent; 2112 u32 depctl; 2113 u32 dma_reg; 2114 u32 ctrl; 2115 u32 dma_addr = hs_ep->desc_list_dma; 2116 unsigned char index = hs_ep->index; 2117 2118 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 2119 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 2120 2121 ctrl = dwc2_readl(hsotg->regs + depctl); 2122 2123 /* 2124 * EP was disabled if HW has processed last descriptor or BNA was set. 2125 * So restart ep if SW has prepared new descriptor chain in ep_queue 2126 * routine while HW was busy. 2127 */ 2128 if (!(ctrl & DXEPCTL_EPENA)) { 2129 if (!hs_ep->next_desc) { 2130 dev_dbg(hsotg->dev, "%s: No more ISOC requests\n", 2131 __func__); 2132 return; 2133 } 2134 2135 dma_addr += sizeof(struct dwc2_dma_desc) * 2136 (MAX_DMA_DESC_NUM_GENERIC / 2) * 2137 hs_ep->isoc_chain_num; 2138 dwc2_writel(dma_addr, hsotg->regs + dma_reg); 2139 2140 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 2141 dwc2_writel(ctrl, hsotg->regs + depctl); 2142 2143 /* Switch ISOC descriptor chain number being processed by SW*/ 2144 hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; 2145 hs_ep->next_desc = 0; 2146 2147 dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n", 2148 __func__); 2149 } 2150 } 2151 2152 /** 2153 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint 2154 * @hsotg: The device state. 2155 * @ep_idx: The endpoint index for the data 2156 * @size: The size of data in the fifo, in bytes 2157 * 2158 * The FIFO status shows there is data to read from the FIFO for a given 2159 * endpoint, so sort out whether we need to read the data into a request 2160 * that has been made for that endpoint. 2161 */ 2162 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size) 2163 { 2164 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx]; 2165 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2166 void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx); 2167 int to_read; 2168 int max_req; 2169 int read_ptr; 2170 2171 if (!hs_req) { 2172 u32 epctl = dwc2_readl(hsotg->regs + DOEPCTL(ep_idx)); 2173 int ptr; 2174 2175 dev_dbg(hsotg->dev, 2176 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2177 __func__, size, ep_idx, epctl); 2178 2179 /* dump the data from the FIFO, we've nothing we can do */ 2180 for (ptr = 0; ptr < size; ptr += 4) 2181 (void)dwc2_readl(fifo); 2182 2183 return; 2184 } 2185 2186 to_read = size; 2187 read_ptr = hs_req->req.actual; 2188 max_req = hs_req->req.length - read_ptr; 2189 2190 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 2191 __func__, to_read, max_req, read_ptr, hs_req->req.length); 2192 2193 if (to_read > max_req) { 2194 /* 2195 * more data appeared than we where willing 2196 * to deal with in this request. 2197 */ 2198 2199 /* currently we don't deal this */ 2200 WARN_ON_ONCE(1); 2201 } 2202 2203 hs_ep->total_data += to_read; 2204 hs_req->req.actual += to_read; 2205 to_read = DIV_ROUND_UP(to_read, 4); 2206 2207 /* 2208 * note, we might over-write the buffer end by 3 bytes depending on 2209 * alignment of the data. 2210 */ 2211 ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read); 2212 } 2213 2214 /** 2215 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint 2216 * @hsotg: The device instance 2217 * @dir_in: If IN zlp 2218 * 2219 * Generate a zero-length IN packet request for terminating a SETUP 2220 * transaction. 2221 * 2222 * Note, since we don't write any data to the TxFIFO, then it is 2223 * currently believed that we do not need to wait for any space in 2224 * the TxFIFO. 2225 */ 2226 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in) 2227 { 2228 /* eps_out[0] is used in both directions */ 2229 hsotg->eps_out[0]->dir_in = dir_in; 2230 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT; 2231 2232 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]); 2233 } 2234 2235 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2236 u32 epctl_reg) 2237 { 2238 u32 ctrl; 2239 2240 ctrl = dwc2_readl(hsotg->regs + epctl_reg); 2241 if (ctrl & DXEPCTL_EOFRNUM) 2242 ctrl |= DXEPCTL_SETEVENFR; 2243 else 2244 ctrl |= DXEPCTL_SETODDFR; 2245 dwc2_writel(ctrl, hsotg->regs + epctl_reg); 2246 } 2247 2248 /* 2249 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc 2250 * @hs_ep - The endpoint on which transfer went 2251 * 2252 * Iterate over endpoints descriptor chain and get info on bytes remained 2253 * in DMA descriptors after transfer has completed. Used for non isoc EPs. 2254 */ 2255 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) 2256 { 2257 struct dwc2_hsotg *hsotg = hs_ep->parent; 2258 unsigned int bytes_rem = 0; 2259 struct dwc2_dma_desc *desc = hs_ep->desc_list; 2260 int i; 2261 u32 status; 2262 2263 if (!desc) 2264 return -EINVAL; 2265 2266 for (i = 0; i < hs_ep->desc_count; ++i) { 2267 status = desc->status; 2268 bytes_rem += status & DEV_DMA_NBYTES_MASK; 2269 2270 if (status & DEV_DMA_STS_MASK) 2271 dev_err(hsotg->dev, "descriptor %d closed with %x\n", 2272 i, status & DEV_DMA_STS_MASK); 2273 } 2274 2275 return bytes_rem; 2276 } 2277 2278 /** 2279 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 2280 * @hsotg: The device instance 2281 * @epnum: The endpoint received from 2282 * 2283 * The RXFIFO has delivered an OutDone event, which means that the data 2284 * transfer for an OUT endpoint has been completed, either by a short 2285 * packet or by the finish of a transfer. 2286 */ 2287 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) 2288 { 2289 u32 epsize = dwc2_readl(hsotg->regs + DOEPTSIZ(epnum)); 2290 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum]; 2291 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2292 struct usb_request *req = &hs_req->req; 2293 unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2294 int result = 0; 2295 2296 if (!hs_req) { 2297 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 2298 return; 2299 } 2300 2301 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) { 2302 dev_dbg(hsotg->dev, "zlp packet received\n"); 2303 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2304 dwc2_hsotg_enqueue_setup(hsotg); 2305 return; 2306 } 2307 2308 if (using_desc_dma(hsotg)) 2309 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2310 2311 if (using_dma(hsotg)) { 2312 unsigned int size_done; 2313 2314 /* 2315 * Calculate the size of the transfer by checking how much 2316 * is left in the endpoint size register and then working it 2317 * out from the amount we loaded for the transfer. 2318 * 2319 * We need to do this as DMA pointers are always 32bit aligned 2320 * so may overshoot/undershoot the transfer. 2321 */ 2322 2323 size_done = hs_ep->size_loaded - size_left; 2324 size_done += hs_ep->last_load; 2325 2326 req->actual = size_done; 2327 } 2328 2329 /* if there is more request to do, schedule new transfer */ 2330 if (req->actual < req->length && size_left == 0) { 2331 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2332 return; 2333 } 2334 2335 if (req->actual < req->length && req->short_not_ok) { 2336 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 2337 __func__, req->actual, req->length); 2338 2339 /* 2340 * todo - what should we return here? there's no one else 2341 * even bothering to check the status. 2342 */ 2343 } 2344 2345 /* DDMA IN status phase will start from StsPhseRcvd interrupt */ 2346 if (!using_desc_dma(hsotg) && epnum == 0 && 2347 hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 2348 /* Move to STATUS IN */ 2349 dwc2_hsotg_ep0_zlp(hsotg, true); 2350 return; 2351 } 2352 2353 /* 2354 * Slave mode OUT transfers do not go through XferComplete so 2355 * adjust the ISOC parity here. 2356 */ 2357 if (!using_dma(hsotg)) { 2358 if (hs_ep->isochronous && hs_ep->interval == 1) 2359 dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum)); 2360 else if (hs_ep->isochronous && hs_ep->interval > 1) 2361 dwc2_gadget_incr_frame_num(hs_ep); 2362 } 2363 2364 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 2365 } 2366 2367 /** 2368 * dwc2_hsotg_handle_rx - RX FIFO has data 2369 * @hsotg: The device instance 2370 * 2371 * The IRQ handler has detected that the RX FIFO has some data in it 2372 * that requires processing, so find out what is in there and do the 2373 * appropriate read. 2374 * 2375 * The RXFIFO is a true FIFO, the packets coming out are still in packet 2376 * chunks, so if you have x packets received on an endpoint you'll get x 2377 * FIFO events delivered, each with a packet's worth of data in it. 2378 * 2379 * When using DMA, we should not be processing events from the RXFIFO 2380 * as the actual data should be sent to the memory directly and we turn 2381 * on the completion interrupts to get notifications of transfer completion. 2382 */ 2383 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg) 2384 { 2385 u32 grxstsr = dwc2_readl(hsotg->regs + GRXSTSP); 2386 u32 epnum, status, size; 2387 2388 WARN_ON(using_dma(hsotg)); 2389 2390 epnum = grxstsr & GRXSTS_EPNUM_MASK; 2391 status = grxstsr & GRXSTS_PKTSTS_MASK; 2392 2393 size = grxstsr & GRXSTS_BYTECNT_MASK; 2394 size >>= GRXSTS_BYTECNT_SHIFT; 2395 2396 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 2397 __func__, grxstsr, size, epnum); 2398 2399 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 2400 case GRXSTS_PKTSTS_GLOBALOUTNAK: 2401 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n"); 2402 break; 2403 2404 case GRXSTS_PKTSTS_OUTDONE: 2405 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 2406 dwc2_hsotg_read_frameno(hsotg)); 2407 2408 if (!using_dma(hsotg)) 2409 dwc2_hsotg_handle_outdone(hsotg, epnum); 2410 break; 2411 2412 case GRXSTS_PKTSTS_SETUPDONE: 2413 dev_dbg(hsotg->dev, 2414 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2415 dwc2_hsotg_read_frameno(hsotg), 2416 dwc2_readl(hsotg->regs + DOEPCTL(0))); 2417 /* 2418 * Call dwc2_hsotg_handle_outdone here if it was not called from 2419 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't 2420 * generate GRXSTS_PKTSTS_OUTDONE for setup packet. 2421 */ 2422 if (hsotg->ep0_state == DWC2_EP0_SETUP) 2423 dwc2_hsotg_handle_outdone(hsotg, epnum); 2424 break; 2425 2426 case GRXSTS_PKTSTS_OUTRX: 2427 dwc2_hsotg_rx_data(hsotg, epnum, size); 2428 break; 2429 2430 case GRXSTS_PKTSTS_SETUPRX: 2431 dev_dbg(hsotg->dev, 2432 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2433 dwc2_hsotg_read_frameno(hsotg), 2434 dwc2_readl(hsotg->regs + DOEPCTL(0))); 2435 2436 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP); 2437 2438 dwc2_hsotg_rx_data(hsotg, epnum, size); 2439 break; 2440 2441 default: 2442 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 2443 __func__, grxstsr); 2444 2445 dwc2_hsotg_dump(hsotg); 2446 break; 2447 } 2448 } 2449 2450 /** 2451 * dwc2_hsotg_ep0_mps - turn max packet size into register setting 2452 * @mps: The maximum packet size in bytes. 2453 */ 2454 static u32 dwc2_hsotg_ep0_mps(unsigned int mps) 2455 { 2456 switch (mps) { 2457 case 64: 2458 return D0EPCTL_MPS_64; 2459 case 32: 2460 return D0EPCTL_MPS_32; 2461 case 16: 2462 return D0EPCTL_MPS_16; 2463 case 8: 2464 return D0EPCTL_MPS_8; 2465 } 2466 2467 /* bad max packet size, warn and return invalid result */ 2468 WARN_ON(1); 2469 return (u32)-1; 2470 } 2471 2472 /** 2473 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field 2474 * @hsotg: The driver state. 2475 * @ep: The index number of the endpoint 2476 * @mps: The maximum packet size in bytes 2477 * @mc: The multicount value 2478 * 2479 * Configure the maximum packet size for the given endpoint, updating 2480 * the hardware control registers to reflect this. 2481 */ 2482 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, 2483 unsigned int ep, unsigned int mps, 2484 unsigned int mc, unsigned int dir_in) 2485 { 2486 struct dwc2_hsotg_ep *hs_ep; 2487 void __iomem *regs = hsotg->regs; 2488 u32 reg; 2489 2490 hs_ep = index_to_ep(hsotg, ep, dir_in); 2491 if (!hs_ep) 2492 return; 2493 2494 if (ep == 0) { 2495 u32 mps_bytes = mps; 2496 2497 /* EP0 is a special case */ 2498 mps = dwc2_hsotg_ep0_mps(mps_bytes); 2499 if (mps > 3) 2500 goto bad_mps; 2501 hs_ep->ep.maxpacket = mps_bytes; 2502 hs_ep->mc = 1; 2503 } else { 2504 if (mps > 1024) 2505 goto bad_mps; 2506 hs_ep->mc = mc; 2507 if (mc > 3) 2508 goto bad_mps; 2509 hs_ep->ep.maxpacket = mps; 2510 } 2511 2512 if (dir_in) { 2513 reg = dwc2_readl(regs + DIEPCTL(ep)); 2514 reg &= ~DXEPCTL_MPS_MASK; 2515 reg |= mps; 2516 dwc2_writel(reg, regs + DIEPCTL(ep)); 2517 } else { 2518 reg = dwc2_readl(regs + DOEPCTL(ep)); 2519 reg &= ~DXEPCTL_MPS_MASK; 2520 reg |= mps; 2521 dwc2_writel(reg, regs + DOEPCTL(ep)); 2522 } 2523 2524 return; 2525 2526 bad_mps: 2527 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 2528 } 2529 2530 /** 2531 * dwc2_hsotg_txfifo_flush - flush Tx FIFO 2532 * @hsotg: The driver state 2533 * @idx: The index for the endpoint (0..15) 2534 */ 2535 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx) 2536 { 2537 int timeout; 2538 int val; 2539 2540 dwc2_writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH, 2541 hsotg->regs + GRSTCTL); 2542 2543 /* wait until the fifo is flushed */ 2544 timeout = 100; 2545 2546 while (1) { 2547 val = dwc2_readl(hsotg->regs + GRSTCTL); 2548 2549 if ((val & (GRSTCTL_TXFFLSH)) == 0) 2550 break; 2551 2552 if (--timeout == 0) { 2553 dev_err(hsotg->dev, 2554 "%s: timeout flushing fifo (GRSTCTL=%08x)\n", 2555 __func__, val); 2556 break; 2557 } 2558 2559 udelay(1); 2560 } 2561 } 2562 2563 /** 2564 * dwc2_hsotg_trytx - check to see if anything needs transmitting 2565 * @hsotg: The driver state 2566 * @hs_ep: The driver endpoint to check. 2567 * 2568 * Check to see if there is a request that has data to send, and if so 2569 * make an attempt to write data into the FIFO. 2570 */ 2571 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg, 2572 struct dwc2_hsotg_ep *hs_ep) 2573 { 2574 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2575 2576 if (!hs_ep->dir_in || !hs_req) { 2577 /** 2578 * if request is not enqueued, we disable interrupts 2579 * for endpoints, excepting ep0 2580 */ 2581 if (hs_ep->index != 0) 2582 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, 2583 hs_ep->dir_in, 0); 2584 return 0; 2585 } 2586 2587 if (hs_req->req.actual < hs_req->req.length) { 2588 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 2589 hs_ep->index); 2590 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 2591 } 2592 2593 return 0; 2594 } 2595 2596 /** 2597 * dwc2_hsotg_complete_in - complete IN transfer 2598 * @hsotg: The device state. 2599 * @hs_ep: The endpoint that has just completed. 2600 * 2601 * An IN transfer has been completed, update the transfer's state and then 2602 * call the relevant completion routines. 2603 */ 2604 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, 2605 struct dwc2_hsotg_ep *hs_ep) 2606 { 2607 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2608 u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index)); 2609 int size_left, size_done; 2610 2611 if (!hs_req) { 2612 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 2613 return; 2614 } 2615 2616 /* Finish ZLP handling for IN EP0 transactions */ 2617 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { 2618 dev_dbg(hsotg->dev, "zlp packet sent\n"); 2619 2620 /* 2621 * While send zlp for DWC2_EP0_STATUS_IN EP direction was 2622 * changed to IN. Change back to complete OUT transfer request 2623 */ 2624 hs_ep->dir_in = 0; 2625 2626 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2627 if (hsotg->test_mode) { 2628 int ret; 2629 2630 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode); 2631 if (ret < 0) { 2632 dev_dbg(hsotg->dev, "Invalid Test #%d\n", 2633 hsotg->test_mode); 2634 dwc2_hsotg_stall_ep0(hsotg); 2635 return; 2636 } 2637 } 2638 dwc2_hsotg_enqueue_setup(hsotg); 2639 return; 2640 } 2641 2642 /* 2643 * Calculate the size of the transfer by checking how much is left 2644 * in the endpoint size register and then working it out from 2645 * the amount we loaded for the transfer. 2646 * 2647 * We do this even for DMA, as the transfer may have incremented 2648 * past the end of the buffer (DMA transfers are always 32bit 2649 * aligned). 2650 */ 2651 if (using_desc_dma(hsotg)) { 2652 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2653 if (size_left < 0) 2654 dev_err(hsotg->dev, "error parsing DDMA results %d\n", 2655 size_left); 2656 } else { 2657 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2658 } 2659 2660 size_done = hs_ep->size_loaded - size_left; 2661 size_done += hs_ep->last_load; 2662 2663 if (hs_req->req.actual != size_done) 2664 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 2665 __func__, hs_req->req.actual, size_done); 2666 2667 hs_req->req.actual = size_done; 2668 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n", 2669 hs_req->req.length, hs_req->req.actual, hs_req->req.zero); 2670 2671 if (!size_left && hs_req->req.actual < hs_req->req.length) { 2672 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 2673 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2674 return; 2675 } 2676 2677 /* Zlp for all endpoints, for ep0 only in DATA IN stage */ 2678 if (hs_ep->send_zlp) { 2679 dwc2_hsotg_program_zlp(hsotg, hs_ep); 2680 hs_ep->send_zlp = 0; 2681 /* transfer will be completed on next complete interrupt */ 2682 return; 2683 } 2684 2685 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { 2686 /* Move to STATUS OUT */ 2687 dwc2_hsotg_ep0_zlp(hsotg, false); 2688 return; 2689 } 2690 2691 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2692 } 2693 2694 /** 2695 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep 2696 * @hsotg: The device state. 2697 * @idx: Index of ep. 2698 * @dir_in: Endpoint direction 1-in 0-out. 2699 * 2700 * Reads for endpoint with given index and direction, by masking 2701 * epint_reg with coresponding mask. 2702 */ 2703 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg, 2704 unsigned int idx, int dir_in) 2705 { 2706 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 2707 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2708 u32 ints; 2709 u32 mask; 2710 u32 diepempmsk; 2711 2712 mask = dwc2_readl(hsotg->regs + epmsk_reg); 2713 diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK); 2714 mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0; 2715 mask |= DXEPINT_SETUP_RCVD; 2716 2717 ints = dwc2_readl(hsotg->regs + epint_reg); 2718 ints &= mask; 2719 return ints; 2720 } 2721 2722 /** 2723 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD 2724 * @hs_ep: The endpoint on which interrupt is asserted. 2725 * 2726 * This interrupt indicates that the endpoint has been disabled per the 2727 * application's request. 2728 * 2729 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK, 2730 * in case of ISOC completes current request. 2731 * 2732 * For ISOC-OUT endpoints completes expired requests. If there is remaining 2733 * request starts it. 2734 */ 2735 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) 2736 { 2737 struct dwc2_hsotg *hsotg = hs_ep->parent; 2738 struct dwc2_hsotg_req *hs_req; 2739 unsigned char idx = hs_ep->index; 2740 int dir_in = hs_ep->dir_in; 2741 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2742 int dctl = dwc2_readl(hsotg->regs + DCTL); 2743 2744 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 2745 2746 if (dir_in) { 2747 int epctl = dwc2_readl(hsotg->regs + epctl_reg); 2748 2749 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2750 2751 if (hs_ep->isochronous) { 2752 dwc2_hsotg_complete_in(hsotg, hs_ep); 2753 return; 2754 } 2755 2756 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) { 2757 int dctl = dwc2_readl(hsotg->regs + DCTL); 2758 2759 dctl |= DCTL_CGNPINNAK; 2760 dwc2_writel(dctl, hsotg->regs + DCTL); 2761 } 2762 return; 2763 } 2764 2765 if (dctl & DCTL_GOUTNAKSTS) { 2766 dctl |= DCTL_CGOUTNAK; 2767 dwc2_writel(dctl, hsotg->regs + DCTL); 2768 } 2769 2770 if (!hs_ep->isochronous) 2771 return; 2772 2773 if (list_empty(&hs_ep->queue)) { 2774 dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n", 2775 __func__, hs_ep); 2776 return; 2777 } 2778 2779 do { 2780 hs_req = get_ep_head(hs_ep); 2781 if (hs_req) 2782 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 2783 -ENODATA); 2784 dwc2_gadget_incr_frame_num(hs_ep); 2785 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2786 2787 dwc2_gadget_start_next_request(hs_ep); 2788 } 2789 2790 /** 2791 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS 2792 * @hs_ep: The endpoint on which interrupt is asserted. 2793 * 2794 * This is starting point for ISOC-OUT transfer, synchronization done with 2795 * first out token received from host while corresponding EP is disabled. 2796 * 2797 * Device does not know initial frame in which out token will come. For this 2798 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon 2799 * getting this interrupt SW starts calculation for next transfer frame. 2800 */ 2801 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) 2802 { 2803 struct dwc2_hsotg *hsotg = ep->parent; 2804 int dir_in = ep->dir_in; 2805 u32 doepmsk; 2806 u32 tmp; 2807 2808 if (dir_in || !ep->isochronous) 2809 return; 2810 2811 /* 2812 * Store frame in which irq was asserted here, as 2813 * it can change while completing request below. 2814 */ 2815 tmp = dwc2_hsotg_read_frameno(hsotg); 2816 2817 dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA); 2818 2819 if (using_desc_dma(hsotg)) { 2820 if (ep->target_frame == TARGET_FRAME_INITIAL) { 2821 /* Start first ISO Out */ 2822 ep->target_frame = tmp; 2823 dwc2_gadget_start_isoc_ddma(ep); 2824 } 2825 return; 2826 } 2827 2828 if (ep->interval > 1 && 2829 ep->target_frame == TARGET_FRAME_INITIAL) { 2830 u32 dsts; 2831 u32 ctrl; 2832 2833 dsts = dwc2_readl(hsotg->regs + DSTS); 2834 ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 2835 dwc2_gadget_incr_frame_num(ep); 2836 2837 ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index)); 2838 if (ep->target_frame & 0x1) 2839 ctrl |= DXEPCTL_SETODDFR; 2840 else 2841 ctrl |= DXEPCTL_SETEVENFR; 2842 2843 dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index)); 2844 } 2845 2846 dwc2_gadget_start_next_request(ep); 2847 doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); 2848 doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK; 2849 dwc2_writel(doepmsk, hsotg->regs + DOEPMSK); 2850 } 2851 2852 /** 2853 * dwc2_gadget_handle_nak - handle NAK interrupt 2854 * @hs_ep: The endpoint on which interrupt is asserted. 2855 * 2856 * This is starting point for ISOC-IN transfer, synchronization done with 2857 * first IN token received from host while corresponding EP is disabled. 2858 * 2859 * Device does not know when first one token will arrive from host. On first 2860 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2861 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2862 * sent in response to that as there was no data in FIFO. SW is basing on this 2863 * interrupt to obtain frame in which token has come and then based on the 2864 * interval calculates next frame for transfer. 2865 */ 2866 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2867 { 2868 struct dwc2_hsotg *hsotg = hs_ep->parent; 2869 int dir_in = hs_ep->dir_in; 2870 2871 if (!dir_in || !hs_ep->isochronous) 2872 return; 2873 2874 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { 2875 hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 2876 2877 if (using_desc_dma(hsotg)) { 2878 dwc2_gadget_start_isoc_ddma(hs_ep); 2879 return; 2880 } 2881 2882 if (hs_ep->interval > 1) { 2883 u32 ctrl = dwc2_readl(hsotg->regs + 2884 DIEPCTL(hs_ep->index)); 2885 if (hs_ep->target_frame & 0x1) 2886 ctrl |= DXEPCTL_SETODDFR; 2887 else 2888 ctrl |= DXEPCTL_SETEVENFR; 2889 2890 dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index)); 2891 } 2892 2893 dwc2_hsotg_complete_request(hsotg, hs_ep, 2894 get_ep_head(hs_ep), 0); 2895 } 2896 2897 dwc2_gadget_incr_frame_num(hs_ep); 2898 } 2899 2900 /** 2901 * dwc2_hsotg_epint - handle an in/out endpoint interrupt 2902 * @hsotg: The driver state 2903 * @idx: The index for the endpoint (0..15) 2904 * @dir_in: Set if this is an IN endpoint 2905 * 2906 * Process and clear any interrupt pending for an individual endpoint 2907 */ 2908 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, 2909 int dir_in) 2910 { 2911 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in); 2912 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2913 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2914 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx); 2915 u32 ints; 2916 u32 ctrl; 2917 2918 ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in); 2919 ctrl = dwc2_readl(hsotg->regs + epctl_reg); 2920 2921 /* Clear endpoint interrupts */ 2922 dwc2_writel(ints, hsotg->regs + epint_reg); 2923 2924 if (!hs_ep) { 2925 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", 2926 __func__, idx, dir_in ? "in" : "out"); 2927 return; 2928 } 2929 2930 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 2931 __func__, idx, dir_in ? "in" : "out", ints); 2932 2933 /* Don't process XferCompl interrupt if it is a setup packet */ 2934 if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) 2935 ints &= ~DXEPINT_XFERCOMPL; 2936 2937 /* 2938 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP 2939 * stage and xfercomplete was generated without SETUP phase done 2940 * interrupt. SW should parse received setup packet only after host's 2941 * exit from setup phase of control transfer. 2942 */ 2943 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && 2944 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) 2945 ints &= ~DXEPINT_XFERCOMPL; 2946 2947 if (ints & DXEPINT_XFERCOMPL) { 2948 dev_dbg(hsotg->dev, 2949 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n", 2950 __func__, dwc2_readl(hsotg->regs + epctl_reg), 2951 dwc2_readl(hsotg->regs + epsiz_reg)); 2952 2953 /* In DDMA handle isochronous requests separately */ 2954 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 2955 dwc2_gadget_complete_isoc_request_ddma(hs_ep); 2956 /* Try to start next isoc request */ 2957 dwc2_gadget_start_next_isoc_ddma(hs_ep); 2958 } else if (dir_in) { 2959 /* 2960 * We get OutDone from the FIFO, so we only 2961 * need to look at completing IN requests here 2962 * if operating slave mode 2963 */ 2964 if (hs_ep->isochronous && hs_ep->interval > 1) 2965 dwc2_gadget_incr_frame_num(hs_ep); 2966 2967 dwc2_hsotg_complete_in(hsotg, hs_ep); 2968 if (ints & DXEPINT_NAKINTRPT) 2969 ints &= ~DXEPINT_NAKINTRPT; 2970 2971 if (idx == 0 && !hs_ep->req) 2972 dwc2_hsotg_enqueue_setup(hsotg); 2973 } else if (using_dma(hsotg)) { 2974 /* 2975 * We're using DMA, we need to fire an OutDone here 2976 * as we ignore the RXFIFO. 2977 */ 2978 if (hs_ep->isochronous && hs_ep->interval > 1) 2979 dwc2_gadget_incr_frame_num(hs_ep); 2980 2981 dwc2_hsotg_handle_outdone(hsotg, idx); 2982 } 2983 } 2984 2985 if (ints & DXEPINT_EPDISBLD) 2986 dwc2_gadget_handle_ep_disabled(hs_ep); 2987 2988 if (ints & DXEPINT_OUTTKNEPDIS) 2989 dwc2_gadget_handle_out_token_ep_disabled(hs_ep); 2990 2991 if (ints & DXEPINT_NAKINTRPT) 2992 dwc2_gadget_handle_nak(hs_ep); 2993 2994 if (ints & DXEPINT_AHBERR) 2995 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 2996 2997 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */ 2998 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 2999 3000 if (using_dma(hsotg) && idx == 0) { 3001 /* 3002 * this is the notification we've received a 3003 * setup packet. In non-DMA mode we'd get this 3004 * from the RXFIFO, instead we need to process 3005 * the setup here. 3006 */ 3007 3008 if (dir_in) 3009 WARN_ON_ONCE(1); 3010 else 3011 dwc2_hsotg_handle_outdone(hsotg, 0); 3012 } 3013 } 3014 3015 if (ints & DXEPINT_STSPHSERCVD) { 3016 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); 3017 3018 /* Move to STATUS IN for DDMA */ 3019 if (using_desc_dma(hsotg)) 3020 dwc2_hsotg_ep0_zlp(hsotg, true); 3021 } 3022 3023 if (ints & DXEPINT_BACK2BACKSETUP) 3024 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 3025 3026 if (ints & DXEPINT_BNAINTR) { 3027 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); 3028 3029 /* 3030 * Try to start next isoc request, if any. 3031 * Sometimes the endpoint remains enabled after BNA interrupt 3032 * assertion, which is not expected, hence we can enter here 3033 * couple of times. 3034 */ 3035 if (hs_ep->isochronous) 3036 dwc2_gadget_start_next_isoc_ddma(hs_ep); 3037 } 3038 3039 if (dir_in && !hs_ep->isochronous) { 3040 /* not sure if this is important, but we'll clear it anyway */ 3041 if (ints & DXEPINT_INTKNTXFEMP) { 3042 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 3043 __func__, idx); 3044 } 3045 3046 /* this probably means something bad is happening */ 3047 if (ints & DXEPINT_INTKNEPMIS) { 3048 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 3049 __func__, idx); 3050 } 3051 3052 /* FIFO has space or is empty (see GAHBCFG) */ 3053 if (hsotg->dedicated_fifos && 3054 ints & DXEPINT_TXFEMP) { 3055 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 3056 __func__, idx); 3057 if (!using_dma(hsotg)) 3058 dwc2_hsotg_trytx(hsotg, hs_ep); 3059 } 3060 } 3061 } 3062 3063 /** 3064 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 3065 * @hsotg: The device state. 3066 * 3067 * Handle updating the device settings after the enumeration phase has 3068 * been completed. 3069 */ 3070 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) 3071 { 3072 u32 dsts = dwc2_readl(hsotg->regs + DSTS); 3073 int ep0_mps = 0, ep_mps = 8; 3074 3075 /* 3076 * This should signal the finish of the enumeration phase 3077 * of the USB handshaking, so we should now know what rate 3078 * we connected at. 3079 */ 3080 3081 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 3082 3083 /* 3084 * note, since we're limited by the size of transfer on EP0, and 3085 * it seems IN transfers must be a even number of packets we do 3086 * not advertise a 64byte MPS on EP0. 3087 */ 3088 3089 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 3090 switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { 3091 case DSTS_ENUMSPD_FS: 3092 case DSTS_ENUMSPD_FS48: 3093 hsotg->gadget.speed = USB_SPEED_FULL; 3094 ep0_mps = EP0_MPS_LIMIT; 3095 ep_mps = 1023; 3096 break; 3097 3098 case DSTS_ENUMSPD_HS: 3099 hsotg->gadget.speed = USB_SPEED_HIGH; 3100 ep0_mps = EP0_MPS_LIMIT; 3101 ep_mps = 1024; 3102 break; 3103 3104 case DSTS_ENUMSPD_LS: 3105 hsotg->gadget.speed = USB_SPEED_LOW; 3106 ep0_mps = 8; 3107 ep_mps = 8; 3108 /* 3109 * note, we don't actually support LS in this driver at the 3110 * moment, and the documentation seems to imply that it isn't 3111 * supported by the PHYs on some of the devices. 3112 */ 3113 break; 3114 } 3115 dev_info(hsotg->dev, "new device is %s\n", 3116 usb_speed_string(hsotg->gadget.speed)); 3117 3118 /* 3119 * we should now know the maximum packet size for an 3120 * endpoint, so set the endpoints to a default value. 3121 */ 3122 3123 if (ep0_mps) { 3124 int i; 3125 /* Initialize ep0 for both in and out directions */ 3126 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); 3127 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); 3128 for (i = 1; i < hsotg->num_of_eps; i++) { 3129 if (hsotg->eps_in[i]) 3130 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3131 0, 1); 3132 if (hsotg->eps_out[i]) 3133 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3134 0, 0); 3135 } 3136 } 3137 3138 /* ensure after enumeration our EP0 is active */ 3139 3140 dwc2_hsotg_enqueue_setup(hsotg); 3141 3142 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3143 dwc2_readl(hsotg->regs + DIEPCTL0), 3144 dwc2_readl(hsotg->regs + DOEPCTL0)); 3145 } 3146 3147 /** 3148 * kill_all_requests - remove all requests from the endpoint's queue 3149 * @hsotg: The device state. 3150 * @ep: The endpoint the requests may be on. 3151 * @result: The result code to use. 3152 * 3153 * Go through the requests on the given endpoint and mark them 3154 * completed with the given result code. 3155 */ 3156 static void kill_all_requests(struct dwc2_hsotg *hsotg, 3157 struct dwc2_hsotg_ep *ep, 3158 int result) 3159 { 3160 struct dwc2_hsotg_req *req, *treq; 3161 unsigned int size; 3162 3163 ep->req = NULL; 3164 3165 list_for_each_entry_safe(req, treq, &ep->queue, queue) 3166 dwc2_hsotg_complete_request(hsotg, ep, req, 3167 result); 3168 3169 if (!hsotg->dedicated_fifos) 3170 return; 3171 size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->fifo_index)) & 0xffff) * 4; 3172 if (size < ep->fifo_size) 3173 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index); 3174 } 3175 3176 /** 3177 * dwc2_hsotg_disconnect - disconnect service 3178 * @hsotg: The device state. 3179 * 3180 * The device has been disconnected. Remove all current 3181 * transactions and signal the gadget driver that this 3182 * has happened. 3183 */ 3184 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) 3185 { 3186 unsigned int ep; 3187 3188 if (!hsotg->connected) 3189 return; 3190 3191 hsotg->connected = 0; 3192 hsotg->test_mode = 0; 3193 3194 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 3195 if (hsotg->eps_in[ep]) 3196 kill_all_requests(hsotg, hsotg->eps_in[ep], 3197 -ESHUTDOWN); 3198 if (hsotg->eps_out[ep]) 3199 kill_all_requests(hsotg, hsotg->eps_out[ep], 3200 -ESHUTDOWN); 3201 } 3202 3203 call_gadget(hsotg, disconnect); 3204 hsotg->lx_state = DWC2_L3; 3205 } 3206 3207 /** 3208 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 3209 * @hsotg: The device state: 3210 * @periodic: True if this is a periodic FIFO interrupt 3211 */ 3212 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) 3213 { 3214 struct dwc2_hsotg_ep *ep; 3215 int epno, ret; 3216 3217 /* look through for any more data to transmit */ 3218 for (epno = 0; epno < hsotg->num_of_eps; epno++) { 3219 ep = index_to_ep(hsotg, epno, 1); 3220 3221 if (!ep) 3222 continue; 3223 3224 if (!ep->dir_in) 3225 continue; 3226 3227 if ((periodic && !ep->periodic) || 3228 (!periodic && ep->periodic)) 3229 continue; 3230 3231 ret = dwc2_hsotg_trytx(hsotg, ep); 3232 if (ret < 0) 3233 break; 3234 } 3235 } 3236 3237 /* IRQ flags which will trigger a retry around the IRQ loop */ 3238 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \ 3239 GINTSTS_PTXFEMP | \ 3240 GINTSTS_RXFLVL) 3241 3242 /** 3243 * dwc2_hsotg_core_init - issue softreset to the core 3244 * @hsotg: The device state 3245 * 3246 * Issue a soft reset to the core, and await the core finishing it. 3247 */ 3248 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, 3249 bool is_usb_reset) 3250 { 3251 u32 intmsk; 3252 u32 val; 3253 u32 usbcfg; 3254 u32 dcfg = 0; 3255 3256 /* Kill any ep0 requests as controller will be reinitialized */ 3257 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 3258 3259 if (!is_usb_reset) 3260 if (dwc2_core_reset(hsotg, true)) 3261 return; 3262 3263 /* 3264 * we must now enable ep0 ready for host detection and then 3265 * set configuration. 3266 */ 3267 3268 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3269 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3270 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3271 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 3272 3273 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3274 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3275 hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) { 3276 /* FS/LS Dedicated Transceiver Interface */ 3277 usbcfg |= GUSBCFG_PHYSEL; 3278 } else { 3279 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3280 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3281 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3282 (val << GUSBCFG_USBTRDTIM_SHIFT); 3283 } 3284 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 3285 3286 dwc2_hsotg_init_fifo(hsotg); 3287 3288 if (!is_usb_reset) 3289 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); 3290 3291 dcfg |= DCFG_EPMISCNT(1); 3292 3293 switch (hsotg->params.speed) { 3294 case DWC2_SPEED_PARAM_LOW: 3295 dcfg |= DCFG_DEVSPD_LS; 3296 break; 3297 case DWC2_SPEED_PARAM_FULL: 3298 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) 3299 dcfg |= DCFG_DEVSPD_FS48; 3300 else 3301 dcfg |= DCFG_DEVSPD_FS; 3302 break; 3303 default: 3304 dcfg |= DCFG_DEVSPD_HS; 3305 } 3306 3307 dwc2_writel(dcfg, hsotg->regs + DCFG); 3308 3309 /* Clear any pending OTG interrupts */ 3310 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); 3311 3312 /* Clear any pending interrupts */ 3313 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); 3314 intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT | 3315 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | 3316 GINTSTS_USBRST | GINTSTS_RESETDET | 3317 GINTSTS_ENUMDONE | GINTSTS_OTGINT | 3318 GINTSTS_USBSUSP | GINTSTS_WKUPINT; 3319 3320 if (!using_desc_dma(hsotg)) 3321 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; 3322 3323 if (!hsotg->params.external_id_pin_ctl) 3324 intmsk |= GINTSTS_CONIDSTSCHNG; 3325 3326 dwc2_writel(intmsk, hsotg->regs + GINTMSK); 3327 3328 if (using_dma(hsotg)) { 3329 dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | 3330 (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT), 3331 hsotg->regs + GAHBCFG); 3332 3333 /* Set DDMA mode support in the core if needed */ 3334 if (using_desc_dma(hsotg)) 3335 __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN); 3336 3337 } else { 3338 dwc2_writel(((hsotg->dedicated_fifos) ? 3339 (GAHBCFG_NP_TXF_EMP_LVL | 3340 GAHBCFG_P_TXF_EMP_LVL) : 0) | 3341 GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG); 3342 } 3343 3344 /* 3345 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts 3346 * when we have no data to transfer. Otherwise we get being flooded by 3347 * interrupts. 3348 */ 3349 3350 dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ? 3351 DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) | 3352 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK | 3353 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK, 3354 hsotg->regs + DIEPMSK); 3355 3356 /* 3357 * don't need XferCompl, we get that from RXFIFO in slave mode. In 3358 * DMA mode we may need this and StsPhseRcvd. 3359 */ 3360 dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | 3361 DOEPMSK_STSPHSERCVDMSK) : 0) | 3362 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | 3363 DOEPMSK_SETUPMSK, 3364 hsotg->regs + DOEPMSK); 3365 3366 /* Enable BNA interrupt for DDMA */ 3367 if (using_desc_dma(hsotg)) 3368 __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK); 3369 3370 dwc2_writel(0, hsotg->regs + DAINTMSK); 3371 3372 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3373 dwc2_readl(hsotg->regs + DIEPCTL0), 3374 dwc2_readl(hsotg->regs + DOEPCTL0)); 3375 3376 /* enable in and out endpoint interrupts */ 3377 dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT); 3378 3379 /* 3380 * Enable the RXFIFO when in slave mode, as this is how we collect 3381 * the data. In DMA mode, we get events from the FIFO but also 3382 * things we cannot process, so do not use it. 3383 */ 3384 if (!using_dma(hsotg)) 3385 dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL); 3386 3387 /* Enable interrupts for EP0 in and out */ 3388 dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1); 3389 dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1); 3390 3391 if (!is_usb_reset) { 3392 __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE); 3393 udelay(10); /* see openiboot */ 3394 __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE); 3395 } 3396 3397 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg->regs + DCTL)); 3398 3399 /* 3400 * DxEPCTL_USBActEp says RO in manual, but seems to be set by 3401 * writing to the EPCTL register.. 3402 */ 3403 3404 /* set to read 1 8byte packet */ 3405 dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 3406 DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0); 3407 3408 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3409 DXEPCTL_CNAK | DXEPCTL_EPENA | 3410 DXEPCTL_USBACTEP, 3411 hsotg->regs + DOEPCTL0); 3412 3413 /* enable, but don't activate EP0in */ 3414 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3415 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); 3416 3417 dwc2_hsotg_enqueue_setup(hsotg); 3418 3419 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3420 dwc2_readl(hsotg->regs + DIEPCTL0), 3421 dwc2_readl(hsotg->regs + DOEPCTL0)); 3422 3423 /* clear global NAKs */ 3424 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 3425 if (!is_usb_reset) 3426 val |= DCTL_SFTDISCON; 3427 __orr32(hsotg->regs + DCTL, val); 3428 3429 /* must be at-least 3ms to allow bus to see disconnect */ 3430 mdelay(3); 3431 3432 hsotg->lx_state = DWC2_L0; 3433 } 3434 3435 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 3436 { 3437 /* set the soft-disconnect bit */ 3438 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); 3439 } 3440 3441 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) 3442 { 3443 /* remove the soft-disconnect and let's go */ 3444 __bic32(hsotg->regs + DCTL, DCTL_SFTDISCON); 3445 } 3446 3447 /** 3448 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt. 3449 * @hsotg: The device state: 3450 * 3451 * This interrupt indicates one of the following conditions occurred while 3452 * transmitting an ISOC transaction. 3453 * - Corrupted IN Token for ISOC EP. 3454 * - Packet not complete in FIFO. 3455 * 3456 * The following actions will be taken: 3457 * - Determine the EP 3458 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO 3459 */ 3460 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg) 3461 { 3462 struct dwc2_hsotg_ep *hs_ep; 3463 u32 epctrl; 3464 u32 idx; 3465 3466 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n"); 3467 3468 for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3469 hs_ep = hsotg->eps_in[idx]; 3470 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); 3471 if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous && 3472 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3473 epctrl |= DXEPCTL_SNAK; 3474 epctrl |= DXEPCTL_EPDIS; 3475 dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx)); 3476 } 3477 } 3478 3479 /* Clear interrupt */ 3480 dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS); 3481 } 3482 3483 /** 3484 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt 3485 * @hsotg: The device state: 3486 * 3487 * This interrupt indicates one of the following conditions occurred while 3488 * transmitting an ISOC transaction. 3489 * - Corrupted OUT Token for ISOC EP. 3490 * - Packet not complete in FIFO. 3491 * 3492 * The following actions will be taken: 3493 * - Determine the EP 3494 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed. 3495 */ 3496 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg) 3497 { 3498 u32 gintsts; 3499 u32 gintmsk; 3500 u32 epctrl; 3501 struct dwc2_hsotg_ep *hs_ep; 3502 int idx; 3503 3504 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__); 3505 3506 for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3507 hs_ep = hsotg->eps_out[idx]; 3508 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3509 if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous && 3510 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3511 /* Unmask GOUTNAKEFF interrupt */ 3512 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 3513 gintmsk |= GINTSTS_GOUTNAKEFF; 3514 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 3515 3516 gintsts = dwc2_readl(hsotg->regs + GINTSTS); 3517 if (!(gintsts & GINTSTS_GOUTNAKEFF)) 3518 __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); 3519 } 3520 } 3521 3522 /* Clear interrupt */ 3523 dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS); 3524 } 3525 3526 /** 3527 * dwc2_hsotg_irq - handle device interrupt 3528 * @irq: The IRQ number triggered 3529 * @pw: The pw value when registered the handler. 3530 */ 3531 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw) 3532 { 3533 struct dwc2_hsotg *hsotg = pw; 3534 int retry_count = 8; 3535 u32 gintsts; 3536 u32 gintmsk; 3537 3538 if (!dwc2_is_device_mode(hsotg)) 3539 return IRQ_NONE; 3540 3541 spin_lock(&hsotg->lock); 3542 irq_retry: 3543 gintsts = dwc2_readl(hsotg->regs + GINTSTS); 3544 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 3545 3546 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 3547 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 3548 3549 gintsts &= gintmsk; 3550 3551 if (gintsts & GINTSTS_RESETDET) { 3552 dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__); 3553 3554 dwc2_writel(GINTSTS_RESETDET, hsotg->regs + GINTSTS); 3555 3556 /* This event must be used only if controller is suspended */ 3557 if (hsotg->lx_state == DWC2_L2) { 3558 dwc2_exit_hibernation(hsotg, true); 3559 hsotg->lx_state = DWC2_L0; 3560 } 3561 } 3562 3563 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) { 3564 u32 usb_status = dwc2_readl(hsotg->regs + GOTGCTL); 3565 u32 connected = hsotg->connected; 3566 3567 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__); 3568 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 3569 dwc2_readl(hsotg->regs + GNPTXSTS)); 3570 3571 dwc2_writel(GINTSTS_USBRST, hsotg->regs + GINTSTS); 3572 3573 /* Report disconnection if it is not already done. */ 3574 dwc2_hsotg_disconnect(hsotg); 3575 3576 /* Reset device address to zero */ 3577 __bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK); 3578 3579 if (usb_status & GOTGCTL_BSESVLD && connected) 3580 dwc2_hsotg_core_init_disconnected(hsotg, true); 3581 } 3582 3583 if (gintsts & GINTSTS_ENUMDONE) { 3584 dwc2_writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS); 3585 3586 dwc2_hsotg_irq_enumdone(hsotg); 3587 } 3588 3589 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) { 3590 u32 daint = dwc2_readl(hsotg->regs + DAINT); 3591 u32 daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 3592 u32 daint_out, daint_in; 3593 int ep; 3594 3595 daint &= daintmsk; 3596 daint_out = daint >> DAINT_OUTEP_SHIFT; 3597 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT); 3598 3599 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 3600 3601 for (ep = 0; ep < hsotg->num_of_eps && daint_out; 3602 ep++, daint_out >>= 1) { 3603 if (daint_out & 1) 3604 dwc2_hsotg_epint(hsotg, ep, 0); 3605 } 3606 3607 for (ep = 0; ep < hsotg->num_of_eps && daint_in; 3608 ep++, daint_in >>= 1) { 3609 if (daint_in & 1) 3610 dwc2_hsotg_epint(hsotg, ep, 1); 3611 } 3612 } 3613 3614 /* check both FIFOs */ 3615 3616 if (gintsts & GINTSTS_NPTXFEMP) { 3617 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 3618 3619 /* 3620 * Disable the interrupt to stop it happening again 3621 * unless one of these endpoint routines decides that 3622 * it needs re-enabling 3623 */ 3624 3625 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP); 3626 dwc2_hsotg_irq_fifoempty(hsotg, false); 3627 } 3628 3629 if (gintsts & GINTSTS_PTXFEMP) { 3630 dev_dbg(hsotg->dev, "PTxFEmp\n"); 3631 3632 /* See note in GINTSTS_NPTxFEmp */ 3633 3634 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP); 3635 dwc2_hsotg_irq_fifoempty(hsotg, true); 3636 } 3637 3638 if (gintsts & GINTSTS_RXFLVL) { 3639 /* 3640 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 3641 * we need to retry dwc2_hsotg_handle_rx if this is still 3642 * set. 3643 */ 3644 3645 dwc2_hsotg_handle_rx(hsotg); 3646 } 3647 3648 if (gintsts & GINTSTS_ERLYSUSP) { 3649 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 3650 dwc2_writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS); 3651 } 3652 3653 /* 3654 * these next two seem to crop-up occasionally causing the core 3655 * to shutdown the USB transfer, so try clearing them and logging 3656 * the occurrence. 3657 */ 3658 3659 if (gintsts & GINTSTS_GOUTNAKEFF) { 3660 u8 idx; 3661 u32 epctrl; 3662 u32 gintmsk; 3663 struct dwc2_hsotg_ep *hs_ep; 3664 3665 /* Mask this interrupt */ 3666 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 3667 gintmsk &= ~GINTSTS_GOUTNAKEFF; 3668 dwc2_writel(gintmsk, hsotg->regs + GINTMSK); 3669 3670 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n"); 3671 for (idx = 1; idx <= hsotg->num_of_eps; idx++) { 3672 hs_ep = hsotg->eps_out[idx]; 3673 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3674 3675 if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) { 3676 epctrl |= DXEPCTL_SNAK; 3677 epctrl |= DXEPCTL_EPDIS; 3678 dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx)); 3679 } 3680 } 3681 3682 /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */ 3683 } 3684 3685 if (gintsts & GINTSTS_GINNAKEFF) { 3686 dev_info(hsotg->dev, "GINNakEff triggered\n"); 3687 3688 __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); 3689 3690 dwc2_hsotg_dump(hsotg); 3691 } 3692 3693 if (gintsts & GINTSTS_INCOMPL_SOIN) 3694 dwc2_gadget_handle_incomplete_isoc_in(hsotg); 3695 3696 if (gintsts & GINTSTS_INCOMPL_SOOUT) 3697 dwc2_gadget_handle_incomplete_isoc_out(hsotg); 3698 3699 /* 3700 * if we've had fifo events, we should try and go around the 3701 * loop again to see if there's any point in returning yet. 3702 */ 3703 3704 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 3705 goto irq_retry; 3706 3707 spin_unlock(&hsotg->lock); 3708 3709 return IRQ_HANDLED; 3710 } 3711 3712 static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, 3713 u32 bit, u32 timeout) 3714 { 3715 u32 i; 3716 3717 for (i = 0; i < timeout; i++) { 3718 if (dwc2_readl(hs_otg->regs + reg) & bit) 3719 return 0; 3720 udelay(1); 3721 } 3722 3723 return -ETIMEDOUT; 3724 } 3725 3726 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, 3727 struct dwc2_hsotg_ep *hs_ep) 3728 { 3729 u32 epctrl_reg; 3730 u32 epint_reg; 3731 3732 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : 3733 DOEPCTL(hs_ep->index); 3734 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : 3735 DOEPINT(hs_ep->index); 3736 3737 dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, 3738 hs_ep->name); 3739 3740 if (hs_ep->dir_in) { 3741 if (hsotg->dedicated_fifos || hs_ep->periodic) { 3742 __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK); 3743 /* Wait for Nak effect */ 3744 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, 3745 DXEPINT_INEPNAKEFF, 100)) 3746 dev_warn(hsotg->dev, 3747 "%s: timeout DIEPINT.NAKEFF\n", 3748 __func__); 3749 } else { 3750 __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK); 3751 /* Wait for Nak effect */ 3752 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3753 GINTSTS_GINNAKEFF, 100)) 3754 dev_warn(hsotg->dev, 3755 "%s: timeout GINTSTS.GINNAKEFF\n", 3756 __func__); 3757 } 3758 } else { 3759 if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF)) 3760 __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); 3761 3762 /* Wait for global nak to take effect */ 3763 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3764 GINTSTS_GOUTNAKEFF, 100)) 3765 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", 3766 __func__); 3767 } 3768 3769 /* Disable ep */ 3770 __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); 3771 3772 /* Wait for ep to be disabled */ 3773 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) 3774 dev_warn(hsotg->dev, 3775 "%s: timeout DOEPCTL.EPDisable\n", __func__); 3776 3777 /* Clear EPDISBLD interrupt */ 3778 __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD); 3779 3780 if (hs_ep->dir_in) { 3781 unsigned short fifo_index; 3782 3783 if (hsotg->dedicated_fifos || hs_ep->periodic) 3784 fifo_index = hs_ep->fifo_index; 3785 else 3786 fifo_index = 0; 3787 3788 /* Flush TX FIFO */ 3789 dwc2_flush_tx_fifo(hsotg, fifo_index); 3790 3791 /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ 3792 if (!hsotg->dedicated_fifos && !hs_ep->periodic) 3793 __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); 3794 3795 } else { 3796 /* Remove global NAKs */ 3797 __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK); 3798 } 3799 } 3800 3801 /** 3802 * dwc2_hsotg_ep_enable - enable the given endpoint 3803 * @ep: The USB endpint to configure 3804 * @desc: The USB endpoint descriptor to configure with. 3805 * 3806 * This is called from the USB gadget code's usb_ep_enable(). 3807 */ 3808 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, 3809 const struct usb_endpoint_descriptor *desc) 3810 { 3811 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3812 struct dwc2_hsotg *hsotg = hs_ep->parent; 3813 unsigned long flags; 3814 unsigned int index = hs_ep->index; 3815 u32 epctrl_reg; 3816 u32 epctrl; 3817 u32 mps; 3818 u32 mc; 3819 u32 mask; 3820 unsigned int dir_in; 3821 unsigned int i, val, size; 3822 int ret = 0; 3823 3824 dev_dbg(hsotg->dev, 3825 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 3826 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 3827 desc->wMaxPacketSize, desc->bInterval); 3828 3829 /* not to be called for EP0 */ 3830 if (index == 0) { 3831 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__); 3832 return -EINVAL; 3833 } 3834 3835 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 3836 if (dir_in != hs_ep->dir_in) { 3837 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 3838 return -EINVAL; 3839 } 3840 3841 mps = usb_endpoint_maxp(desc); 3842 mc = usb_endpoint_maxp_mult(desc); 3843 3844 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ 3845 3846 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3847 epctrl = dwc2_readl(hsotg->regs + epctrl_reg); 3848 3849 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 3850 __func__, epctrl, epctrl_reg); 3851 3852 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3853 if (using_desc_dma(hsotg) && !hs_ep->desc_list) { 3854 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, 3855 MAX_DMA_DESC_NUM_GENERIC * 3856 sizeof(struct dwc2_dma_desc), 3857 &hs_ep->desc_list_dma, GFP_ATOMIC); 3858 if (!hs_ep->desc_list) { 3859 ret = -ENOMEM; 3860 goto error2; 3861 } 3862 } 3863 3864 spin_lock_irqsave(&hsotg->lock, flags); 3865 3866 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); 3867 epctrl |= DXEPCTL_MPS(mps); 3868 3869 /* 3870 * mark the endpoint as active, otherwise the core may ignore 3871 * transactions entirely for this endpoint 3872 */ 3873 epctrl |= DXEPCTL_USBACTEP; 3874 3875 /* update the endpoint state */ 3876 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); 3877 3878 /* default, set to non-periodic */ 3879 hs_ep->isochronous = 0; 3880 hs_ep->periodic = 0; 3881 hs_ep->halted = 0; 3882 hs_ep->interval = desc->bInterval; 3883 3884 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 3885 case USB_ENDPOINT_XFER_ISOC: 3886 epctrl |= DXEPCTL_EPTYPE_ISO; 3887 epctrl |= DXEPCTL_SETEVENFR; 3888 hs_ep->isochronous = 1; 3889 hs_ep->interval = 1 << (desc->bInterval - 1); 3890 hs_ep->target_frame = TARGET_FRAME_INITIAL; 3891 hs_ep->isoc_chain_num = 0; 3892 hs_ep->next_desc = 0; 3893 if (dir_in) { 3894 hs_ep->periodic = 1; 3895 mask = dwc2_readl(hsotg->regs + DIEPMSK); 3896 mask |= DIEPMSK_NAKMSK; 3897 dwc2_writel(mask, hsotg->regs + DIEPMSK); 3898 } else { 3899 mask = dwc2_readl(hsotg->regs + DOEPMSK); 3900 mask |= DOEPMSK_OUTTKNEPDISMSK; 3901 dwc2_writel(mask, hsotg->regs + DOEPMSK); 3902 } 3903 break; 3904 3905 case USB_ENDPOINT_XFER_BULK: 3906 epctrl |= DXEPCTL_EPTYPE_BULK; 3907 break; 3908 3909 case USB_ENDPOINT_XFER_INT: 3910 if (dir_in) 3911 hs_ep->periodic = 1; 3912 3913 if (hsotg->gadget.speed == USB_SPEED_HIGH) 3914 hs_ep->interval = 1 << (desc->bInterval - 1); 3915 3916 epctrl |= DXEPCTL_EPTYPE_INTERRUPT; 3917 break; 3918 3919 case USB_ENDPOINT_XFER_CONTROL: 3920 epctrl |= DXEPCTL_EPTYPE_CONTROL; 3921 break; 3922 } 3923 3924 /* 3925 * if the hardware has dedicated fifos, we must give each IN EP 3926 * a unique tx-fifo even if it is non-periodic. 3927 */ 3928 if (dir_in && hsotg->dedicated_fifos) { 3929 u32 fifo_index = 0; 3930 u32 fifo_size = UINT_MAX; 3931 3932 size = hs_ep->ep.maxpacket * hs_ep->mc; 3933 for (i = 1; i < hsotg->num_of_eps; ++i) { 3934 if (hsotg->fifo_map & (1 << i)) 3935 continue; 3936 val = dwc2_readl(hsotg->regs + DPTXFSIZN(i)); 3937 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4; 3938 if (val < size) 3939 continue; 3940 /* Search for smallest acceptable fifo */ 3941 if (val < fifo_size) { 3942 fifo_size = val; 3943 fifo_index = i; 3944 } 3945 } 3946 if (!fifo_index) { 3947 dev_err(hsotg->dev, 3948 "%s: No suitable fifo found\n", __func__); 3949 ret = -ENOMEM; 3950 goto error1; 3951 } 3952 hsotg->fifo_map |= 1 << fifo_index; 3953 epctrl |= DXEPCTL_TXFNUM(fifo_index); 3954 hs_ep->fifo_index = fifo_index; 3955 hs_ep->fifo_size = fifo_size; 3956 } 3957 3958 /* for non control endpoints, set PID to D0 */ 3959 if (index && !hs_ep->isochronous) 3960 epctrl |= DXEPCTL_SETD0PID; 3961 3962 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 3963 __func__, epctrl); 3964 3965 dwc2_writel(epctrl, hsotg->regs + epctrl_reg); 3966 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 3967 __func__, dwc2_readl(hsotg->regs + epctrl_reg)); 3968 3969 /* enable the endpoint interrupt */ 3970 dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 3971 3972 error1: 3973 spin_unlock_irqrestore(&hsotg->lock, flags); 3974 3975 error2: 3976 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 3977 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 3978 sizeof(struct dwc2_dma_desc), 3979 hs_ep->desc_list, hs_ep->desc_list_dma); 3980 hs_ep->desc_list = NULL; 3981 } 3982 3983 return ret; 3984 } 3985 3986 /** 3987 * dwc2_hsotg_ep_disable - disable given endpoint 3988 * @ep: The endpoint to disable. 3989 */ 3990 static int dwc2_hsotg_ep_disable(struct usb_ep *ep) 3991 { 3992 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3993 struct dwc2_hsotg *hsotg = hs_ep->parent; 3994 int dir_in = hs_ep->dir_in; 3995 int index = hs_ep->index; 3996 unsigned long flags; 3997 u32 epctrl_reg; 3998 u32 ctrl; 3999 4000 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep); 4001 4002 if (ep == &hsotg->eps_out[0]->ep) { 4003 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 4004 return -EINVAL; 4005 } 4006 4007 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 4008 4009 spin_lock_irqsave(&hsotg->lock, flags); 4010 4011 ctrl = dwc2_readl(hsotg->regs + epctrl_reg); 4012 4013 if (ctrl & DXEPCTL_EPENA) 4014 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); 4015 4016 ctrl &= ~DXEPCTL_EPENA; 4017 ctrl &= ~DXEPCTL_USBACTEP; 4018 ctrl |= DXEPCTL_SNAK; 4019 4020 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 4021 dwc2_writel(ctrl, hsotg->regs + epctrl_reg); 4022 4023 /* disable endpoint interrupts */ 4024 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 4025 4026 /* terminate all requests with shutdown */ 4027 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN); 4028 4029 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index); 4030 hs_ep->fifo_index = 0; 4031 hs_ep->fifo_size = 0; 4032 4033 spin_unlock_irqrestore(&hsotg->lock, flags); 4034 return 0; 4035 } 4036 4037 /** 4038 * on_list - check request is on the given endpoint 4039 * @ep: The endpoint to check. 4040 * @test: The request to test if it is on the endpoint. 4041 */ 4042 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) 4043 { 4044 struct dwc2_hsotg_req *req, *treq; 4045 4046 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 4047 if (req == test) 4048 return true; 4049 } 4050 4051 return false; 4052 } 4053 4054 /** 4055 * dwc2_hsotg_ep_dequeue - dequeue given endpoint 4056 * @ep: The endpoint to dequeue. 4057 * @req: The request to be removed from a queue. 4058 */ 4059 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 4060 { 4061 struct dwc2_hsotg_req *hs_req = our_req(req); 4062 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4063 struct dwc2_hsotg *hs = hs_ep->parent; 4064 unsigned long flags; 4065 4066 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 4067 4068 spin_lock_irqsave(&hs->lock, flags); 4069 4070 if (!on_list(hs_ep, hs_req)) { 4071 spin_unlock_irqrestore(&hs->lock, flags); 4072 return -EINVAL; 4073 } 4074 4075 /* Dequeue already started request */ 4076 if (req == &hs_ep->req->req) 4077 dwc2_hsotg_ep_stop_xfr(hs, hs_ep); 4078 4079 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 4080 spin_unlock_irqrestore(&hs->lock, flags); 4081 4082 return 0; 4083 } 4084 4085 /** 4086 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 4087 * @ep: The endpoint to set halt. 4088 * @value: Set or unset the halt. 4089 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if 4090 * the endpoint is busy processing requests. 4091 * 4092 * We need to stall the endpoint immediately if request comes from set_feature 4093 * protocol command handler. 4094 */ 4095 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) 4096 { 4097 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4098 struct dwc2_hsotg *hs = hs_ep->parent; 4099 int index = hs_ep->index; 4100 u32 epreg; 4101 u32 epctl; 4102 u32 xfertype; 4103 4104 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 4105 4106 if (index == 0) { 4107 if (value) 4108 dwc2_hsotg_stall_ep0(hs); 4109 else 4110 dev_warn(hs->dev, 4111 "%s: can't clear halt on ep0\n", __func__); 4112 return 0; 4113 } 4114 4115 if (hs_ep->isochronous) { 4116 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); 4117 return -EINVAL; 4118 } 4119 4120 if (!now && value && !list_empty(&hs_ep->queue)) { 4121 dev_dbg(hs->dev, "%s request is pending, cannot halt\n", 4122 ep->name); 4123 return -EAGAIN; 4124 } 4125 4126 if (hs_ep->dir_in) { 4127 epreg = DIEPCTL(index); 4128 epctl = dwc2_readl(hs->regs + epreg); 4129 4130 if (value) { 4131 epctl |= DXEPCTL_STALL | DXEPCTL_SNAK; 4132 if (epctl & DXEPCTL_EPENA) 4133 epctl |= DXEPCTL_EPDIS; 4134 } else { 4135 epctl &= ~DXEPCTL_STALL; 4136 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4137 if (xfertype == DXEPCTL_EPTYPE_BULK || 4138 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4139 epctl |= DXEPCTL_SETD0PID; 4140 } 4141 dwc2_writel(epctl, hs->regs + epreg); 4142 } else { 4143 epreg = DOEPCTL(index); 4144 epctl = dwc2_readl(hs->regs + epreg); 4145 4146 if (value) { 4147 epctl |= DXEPCTL_STALL; 4148 } else { 4149 epctl &= ~DXEPCTL_STALL; 4150 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4151 if (xfertype == DXEPCTL_EPTYPE_BULK || 4152 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4153 epctl |= DXEPCTL_SETD0PID; 4154 } 4155 dwc2_writel(epctl, hs->regs + epreg); 4156 } 4157 4158 hs_ep->halted = value; 4159 4160 return 0; 4161 } 4162 4163 /** 4164 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held 4165 * @ep: The endpoint to set halt. 4166 * @value: Set or unset the halt. 4167 */ 4168 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) 4169 { 4170 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4171 struct dwc2_hsotg *hs = hs_ep->parent; 4172 unsigned long flags = 0; 4173 int ret = 0; 4174 4175 spin_lock_irqsave(&hs->lock, flags); 4176 ret = dwc2_hsotg_ep_sethalt(ep, value, false); 4177 spin_unlock_irqrestore(&hs->lock, flags); 4178 4179 return ret; 4180 } 4181 4182 static const struct usb_ep_ops dwc2_hsotg_ep_ops = { 4183 .enable = dwc2_hsotg_ep_enable, 4184 .disable = dwc2_hsotg_ep_disable, 4185 .alloc_request = dwc2_hsotg_ep_alloc_request, 4186 .free_request = dwc2_hsotg_ep_free_request, 4187 .queue = dwc2_hsotg_ep_queue_lock, 4188 .dequeue = dwc2_hsotg_ep_dequeue, 4189 .set_halt = dwc2_hsotg_ep_sethalt_lock, 4190 /* note, don't believe we have any call for the fifo routines */ 4191 }; 4192 4193 /** 4194 * dwc2_hsotg_init - initialize the usb core 4195 * @hsotg: The driver state 4196 */ 4197 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 4198 { 4199 u32 trdtim; 4200 u32 usbcfg; 4201 /* unmask subset of endpoint interrupts */ 4202 4203 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 4204 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK, 4205 hsotg->regs + DIEPMSK); 4206 4207 dwc2_writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK | 4208 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK, 4209 hsotg->regs + DOEPMSK); 4210 4211 dwc2_writel(0, hsotg->regs + DAINTMSK); 4212 4213 /* Be in disconnected state until gadget is registered */ 4214 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); 4215 4216 /* setup fifos */ 4217 4218 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4219 dwc2_readl(hsotg->regs + GRXFSIZ), 4220 dwc2_readl(hsotg->regs + GNPTXFSIZ)); 4221 4222 dwc2_hsotg_init_fifo(hsotg); 4223 4224 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4225 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 4226 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4227 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 4228 4229 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4230 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4231 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 4232 (trdtim << GUSBCFG_USBTRDTIM_SHIFT); 4233 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); 4234 4235 if (using_dma(hsotg)) 4236 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN); 4237 } 4238 4239 /** 4240 * dwc2_hsotg_udc_start - prepare the udc for work 4241 * @gadget: The usb gadget state 4242 * @driver: The usb gadget driver 4243 * 4244 * Perform initialization to prepare udc device and driver 4245 * to work. 4246 */ 4247 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, 4248 struct usb_gadget_driver *driver) 4249 { 4250 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4251 unsigned long flags; 4252 int ret; 4253 4254 if (!hsotg) { 4255 pr_err("%s: called with no device\n", __func__); 4256 return -ENODEV; 4257 } 4258 4259 if (!driver) { 4260 dev_err(hsotg->dev, "%s: no driver\n", __func__); 4261 return -EINVAL; 4262 } 4263 4264 if (driver->max_speed < USB_SPEED_FULL) 4265 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 4266 4267 if (!driver->setup) { 4268 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 4269 return -EINVAL; 4270 } 4271 4272 WARN_ON(hsotg->driver); 4273 4274 driver->driver.bus = NULL; 4275 hsotg->driver = driver; 4276 hsotg->gadget.dev.of_node = hsotg->dev->of_node; 4277 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4278 4279 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 4280 ret = dwc2_lowlevel_hw_enable(hsotg); 4281 if (ret) 4282 goto err; 4283 } 4284 4285 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4286 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget); 4287 4288 spin_lock_irqsave(&hsotg->lock, flags); 4289 if (dwc2_hw_is_device(hsotg)) { 4290 dwc2_hsotg_init(hsotg); 4291 dwc2_hsotg_core_init_disconnected(hsotg, false); 4292 } 4293 4294 hsotg->enabled = 0; 4295 spin_unlock_irqrestore(&hsotg->lock, flags); 4296 4297 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 4298 4299 return 0; 4300 4301 err: 4302 hsotg->driver = NULL; 4303 return ret; 4304 } 4305 4306 /** 4307 * dwc2_hsotg_udc_stop - stop the udc 4308 * @gadget: The usb gadget state 4309 * @driver: The usb gadget driver 4310 * 4311 * Stop udc hw block and stay tunned for future transmissions 4312 */ 4313 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) 4314 { 4315 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4316 unsigned long flags = 0; 4317 int ep; 4318 4319 if (!hsotg) 4320 return -ENODEV; 4321 4322 /* all endpoints should be shutdown */ 4323 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 4324 if (hsotg->eps_in[ep]) 4325 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 4326 if (hsotg->eps_out[ep]) 4327 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 4328 } 4329 4330 spin_lock_irqsave(&hsotg->lock, flags); 4331 4332 hsotg->driver = NULL; 4333 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4334 hsotg->enabled = 0; 4335 4336 spin_unlock_irqrestore(&hsotg->lock, flags); 4337 4338 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4339 otg_set_peripheral(hsotg->uphy->otg, NULL); 4340 4341 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4342 dwc2_lowlevel_hw_disable(hsotg); 4343 4344 return 0; 4345 } 4346 4347 /** 4348 * dwc2_hsotg_gadget_getframe - read the frame number 4349 * @gadget: The usb gadget state 4350 * 4351 * Read the {micro} frame number 4352 */ 4353 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget) 4354 { 4355 return dwc2_hsotg_read_frameno(to_hsotg(gadget)); 4356 } 4357 4358 /** 4359 * dwc2_hsotg_pullup - connect/disconnect the USB PHY 4360 * @gadget: The usb gadget state 4361 * @is_on: Current state of the USB PHY 4362 * 4363 * Connect/Disconnect the USB PHY pullup 4364 */ 4365 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on) 4366 { 4367 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4368 unsigned long flags = 0; 4369 4370 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on, 4371 hsotg->op_state); 4372 4373 /* Don't modify pullup state while in host mode */ 4374 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4375 hsotg->enabled = is_on; 4376 return 0; 4377 } 4378 4379 spin_lock_irqsave(&hsotg->lock, flags); 4380 if (is_on) { 4381 hsotg->enabled = 1; 4382 dwc2_hsotg_core_init_disconnected(hsotg, false); 4383 dwc2_hsotg_core_connect(hsotg); 4384 } else { 4385 dwc2_hsotg_core_disconnect(hsotg); 4386 dwc2_hsotg_disconnect(hsotg); 4387 hsotg->enabled = 0; 4388 } 4389 4390 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4391 spin_unlock_irqrestore(&hsotg->lock, flags); 4392 4393 return 0; 4394 } 4395 4396 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active) 4397 { 4398 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4399 unsigned long flags; 4400 4401 dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active); 4402 spin_lock_irqsave(&hsotg->lock, flags); 4403 4404 /* 4405 * If controller is hibernated, it must exit from hibernation 4406 * before being initialized / de-initialized 4407 */ 4408 if (hsotg->lx_state == DWC2_L2) 4409 dwc2_exit_hibernation(hsotg, false); 4410 4411 if (is_active) { 4412 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4413 4414 dwc2_hsotg_core_init_disconnected(hsotg, false); 4415 if (hsotg->enabled) 4416 dwc2_hsotg_core_connect(hsotg); 4417 } else { 4418 dwc2_hsotg_core_disconnect(hsotg); 4419 dwc2_hsotg_disconnect(hsotg); 4420 } 4421 4422 spin_unlock_irqrestore(&hsotg->lock, flags); 4423 return 0; 4424 } 4425 4426 /** 4427 * dwc2_hsotg_vbus_draw - report bMaxPower field 4428 * @gadget: The usb gadget state 4429 * @mA: Amount of current 4430 * 4431 * Report how much power the device may consume to the phy. 4432 */ 4433 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 4434 { 4435 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4436 4437 if (IS_ERR_OR_NULL(hsotg->uphy)) 4438 return -ENOTSUPP; 4439 return usb_phy_set_power(hsotg->uphy, mA); 4440 } 4441 4442 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = { 4443 .get_frame = dwc2_hsotg_gadget_getframe, 4444 .udc_start = dwc2_hsotg_udc_start, 4445 .udc_stop = dwc2_hsotg_udc_stop, 4446 .pullup = dwc2_hsotg_pullup, 4447 .vbus_session = dwc2_hsotg_vbus_session, 4448 .vbus_draw = dwc2_hsotg_vbus_draw, 4449 }; 4450 4451 /** 4452 * dwc2_hsotg_initep - initialise a single endpoint 4453 * @hsotg: The device state. 4454 * @hs_ep: The endpoint to be initialised. 4455 * @epnum: The endpoint number 4456 * 4457 * Initialise the given endpoint (as part of the probe and device state 4458 * creation) to give to the gadget driver. Setup the endpoint name, any 4459 * direction information and other state that may be required. 4460 */ 4461 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, 4462 struct dwc2_hsotg_ep *hs_ep, 4463 int epnum, 4464 bool dir_in) 4465 { 4466 char *dir; 4467 4468 if (epnum == 0) 4469 dir = ""; 4470 else if (dir_in) 4471 dir = "in"; 4472 else 4473 dir = "out"; 4474 4475 hs_ep->dir_in = dir_in; 4476 hs_ep->index = epnum; 4477 4478 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 4479 4480 INIT_LIST_HEAD(&hs_ep->queue); 4481 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 4482 4483 /* add to the list of endpoints known by the gadget driver */ 4484 if (epnum) 4485 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 4486 4487 hs_ep->parent = hsotg; 4488 hs_ep->ep.name = hs_ep->name; 4489 4490 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) 4491 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); 4492 else 4493 usb_ep_set_maxpacket_limit(&hs_ep->ep, 4494 epnum ? 1024 : EP0_MPS_LIMIT); 4495 hs_ep->ep.ops = &dwc2_hsotg_ep_ops; 4496 4497 if (epnum == 0) { 4498 hs_ep->ep.caps.type_control = true; 4499 } else { 4500 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { 4501 hs_ep->ep.caps.type_iso = true; 4502 hs_ep->ep.caps.type_bulk = true; 4503 } 4504 hs_ep->ep.caps.type_int = true; 4505 } 4506 4507 if (dir_in) 4508 hs_ep->ep.caps.dir_in = true; 4509 else 4510 hs_ep->ep.caps.dir_out = true; 4511 4512 /* 4513 * if we're using dma, we need to set the next-endpoint pointer 4514 * to be something valid. 4515 */ 4516 4517 if (using_dma(hsotg)) { 4518 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 4519 4520 if (dir_in) 4521 dwc2_writel(next, hsotg->regs + DIEPCTL(epnum)); 4522 else 4523 dwc2_writel(next, hsotg->regs + DOEPCTL(epnum)); 4524 } 4525 } 4526 4527 /** 4528 * dwc2_hsotg_hw_cfg - read HW configuration registers 4529 * @param: The device state 4530 * 4531 * Read the USB core HW configuration registers 4532 */ 4533 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) 4534 { 4535 u32 cfg; 4536 u32 ep_type; 4537 u32 i; 4538 4539 /* check hardware configuration */ 4540 4541 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; 4542 4543 /* Add ep0 */ 4544 hsotg->num_of_eps++; 4545 4546 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, 4547 sizeof(struct dwc2_hsotg_ep), 4548 GFP_KERNEL); 4549 if (!hsotg->eps_in[0]) 4550 return -ENOMEM; 4551 /* Same dwc2_hsotg_ep is used in both directions for ep0 */ 4552 hsotg->eps_out[0] = hsotg->eps_in[0]; 4553 4554 cfg = hsotg->hw_params.dev_ep_dirs; 4555 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { 4556 ep_type = cfg & 3; 4557 /* Direction in or both */ 4558 if (!(ep_type & 2)) { 4559 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev, 4560 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4561 if (!hsotg->eps_in[i]) 4562 return -ENOMEM; 4563 } 4564 /* Direction out or both */ 4565 if (!(ep_type & 1)) { 4566 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev, 4567 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4568 if (!hsotg->eps_out[i]) 4569 return -ENOMEM; 4570 } 4571 } 4572 4573 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; 4574 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; 4575 4576 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", 4577 hsotg->num_of_eps, 4578 hsotg->dedicated_fifos ? "dedicated" : "shared", 4579 hsotg->fifo_mem); 4580 return 0; 4581 } 4582 4583 /** 4584 * dwc2_hsotg_dump - dump state of the udc 4585 * @param: The device state 4586 */ 4587 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) 4588 { 4589 #ifdef DEBUG 4590 struct device *dev = hsotg->dev; 4591 void __iomem *regs = hsotg->regs; 4592 u32 val; 4593 int idx; 4594 4595 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 4596 dwc2_readl(regs + DCFG), dwc2_readl(regs + DCTL), 4597 dwc2_readl(regs + DIEPMSK)); 4598 4599 dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n", 4600 dwc2_readl(regs + GAHBCFG), dwc2_readl(regs + GHWCFG1)); 4601 4602 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4603 dwc2_readl(regs + GRXFSIZ), dwc2_readl(regs + GNPTXFSIZ)); 4604 4605 /* show periodic fifo settings */ 4606 4607 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 4608 val = dwc2_readl(regs + DPTXFSIZN(idx)); 4609 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 4610 val >> FIFOSIZE_DEPTH_SHIFT, 4611 val & FIFOSIZE_STARTADDR_MASK); 4612 } 4613 4614 for (idx = 0; idx < hsotg->num_of_eps; idx++) { 4615 dev_info(dev, 4616 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 4617 dwc2_readl(regs + DIEPCTL(idx)), 4618 dwc2_readl(regs + DIEPTSIZ(idx)), 4619 dwc2_readl(regs + DIEPDMA(idx))); 4620 4621 val = dwc2_readl(regs + DOEPCTL(idx)); 4622 dev_info(dev, 4623 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 4624 idx, dwc2_readl(regs + DOEPCTL(idx)), 4625 dwc2_readl(regs + DOEPTSIZ(idx)), 4626 dwc2_readl(regs + DOEPDMA(idx))); 4627 } 4628 4629 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 4630 dwc2_readl(regs + DVBUSDIS), dwc2_readl(regs + DVBUSPULSE)); 4631 #endif 4632 } 4633 4634 /** 4635 * dwc2_gadget_init - init function for gadget 4636 * @dwc2: The data structure for the DWC2 driver. 4637 * @irq: The IRQ number for the controller. 4638 */ 4639 int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) 4640 { 4641 struct device *dev = hsotg->dev; 4642 int epnum; 4643 int ret; 4644 4645 /* Dump fifo information */ 4646 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", 4647 hsotg->params.g_np_tx_fifo_size); 4648 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); 4649 4650 hsotg->gadget.max_speed = USB_SPEED_HIGH; 4651 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; 4652 hsotg->gadget.name = dev_name(dev); 4653 if (hsotg->dr_mode == USB_DR_MODE_OTG) 4654 hsotg->gadget.is_otg = 1; 4655 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4656 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4657 4658 ret = dwc2_hsotg_hw_cfg(hsotg); 4659 if (ret) { 4660 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); 4661 return ret; 4662 } 4663 4664 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, 4665 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4666 if (!hsotg->ctrl_buff) 4667 return -ENOMEM; 4668 4669 hsotg->ep0_buff = devm_kzalloc(hsotg->dev, 4670 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4671 if (!hsotg->ep0_buff) 4672 return -ENOMEM; 4673 4674 if (using_desc_dma(hsotg)) { 4675 ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); 4676 if (ret < 0) 4677 return ret; 4678 } 4679 4680 ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED, 4681 dev_name(hsotg->dev), hsotg); 4682 if (ret < 0) { 4683 dev_err(dev, "cannot claim IRQ for gadget\n"); 4684 return ret; 4685 } 4686 4687 /* hsotg->num_of_eps holds number of EPs other than ep0 */ 4688 4689 if (hsotg->num_of_eps == 0) { 4690 dev_err(dev, "wrong number of EPs (zero)\n"); 4691 return -EINVAL; 4692 } 4693 4694 /* setup endpoint information */ 4695 4696 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 4697 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep; 4698 4699 /* allocate EP0 request */ 4700 4701 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep, 4702 GFP_KERNEL); 4703 if (!hsotg->ctrl_req) { 4704 dev_err(dev, "failed to allocate ctrl req\n"); 4705 return -ENOMEM; 4706 } 4707 4708 /* initialise the endpoints now the core has been initialised */ 4709 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) { 4710 if (hsotg->eps_in[epnum]) 4711 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum], 4712 epnum, 1); 4713 if (hsotg->eps_out[epnum]) 4714 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum], 4715 epnum, 0); 4716 } 4717 4718 ret = usb_add_gadget_udc(dev, &hsotg->gadget); 4719 if (ret) 4720 return ret; 4721 4722 dwc2_hsotg_dump(hsotg); 4723 4724 return 0; 4725 } 4726 4727 /** 4728 * dwc2_hsotg_remove - remove function for hsotg driver 4729 * @pdev: The platform information for the driver 4730 */ 4731 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4732 { 4733 usb_del_gadget_udc(&hsotg->gadget); 4734 4735 return 0; 4736 } 4737 4738 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) 4739 { 4740 unsigned long flags; 4741 4742 if (hsotg->lx_state != DWC2_L0) 4743 return 0; 4744 4745 if (hsotg->driver) { 4746 int ep; 4747 4748 dev_info(hsotg->dev, "suspending usb gadget %s\n", 4749 hsotg->driver->driver.name); 4750 4751 spin_lock_irqsave(&hsotg->lock, flags); 4752 if (hsotg->enabled) 4753 dwc2_hsotg_core_disconnect(hsotg); 4754 dwc2_hsotg_disconnect(hsotg); 4755 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4756 spin_unlock_irqrestore(&hsotg->lock, flags); 4757 4758 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 4759 if (hsotg->eps_in[ep]) 4760 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 4761 if (hsotg->eps_out[ep]) 4762 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 4763 } 4764 } 4765 4766 return 0; 4767 } 4768 4769 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) 4770 { 4771 unsigned long flags; 4772 4773 if (hsotg->lx_state == DWC2_L2) 4774 return 0; 4775 4776 if (hsotg->driver) { 4777 dev_info(hsotg->dev, "resuming usb gadget %s\n", 4778 hsotg->driver->driver.name); 4779 4780 spin_lock_irqsave(&hsotg->lock, flags); 4781 dwc2_hsotg_core_init_disconnected(hsotg, false); 4782 if (hsotg->enabled) 4783 dwc2_hsotg_core_connect(hsotg); 4784 spin_unlock_irqrestore(&hsotg->lock, flags); 4785 } 4786 4787 return 0; 4788 } 4789 4790 /** 4791 * dwc2_backup_device_registers() - Backup controller device registers. 4792 * When suspending usb bus, registers needs to be backuped 4793 * if controller power is disabled once suspended. 4794 * 4795 * @hsotg: Programming view of the DWC_otg controller 4796 */ 4797 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 4798 { 4799 struct dwc2_dregs_backup *dr; 4800 int i; 4801 4802 dev_dbg(hsotg->dev, "%s\n", __func__); 4803 4804 /* Backup dev regs */ 4805 dr = &hsotg->dr_backup; 4806 4807 dr->dcfg = dwc2_readl(hsotg->regs + DCFG); 4808 dr->dctl = dwc2_readl(hsotg->regs + DCTL); 4809 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); 4810 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); 4811 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); 4812 4813 for (i = 0; i < hsotg->num_of_eps; i++) { 4814 /* Backup IN EPs */ 4815 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); 4816 4817 /* Ensure DATA PID is correctly configured */ 4818 if (dr->diepctl[i] & DXEPCTL_DPID) 4819 dr->diepctl[i] |= DXEPCTL_SETD1PID; 4820 else 4821 dr->diepctl[i] |= DXEPCTL_SETD0PID; 4822 4823 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); 4824 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); 4825 4826 /* Backup OUT EPs */ 4827 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); 4828 4829 /* Ensure DATA PID is correctly configured */ 4830 if (dr->doepctl[i] & DXEPCTL_DPID) 4831 dr->doepctl[i] |= DXEPCTL_SETD1PID; 4832 else 4833 dr->doepctl[i] |= DXEPCTL_SETD0PID; 4834 4835 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); 4836 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); 4837 } 4838 dr->valid = true; 4839 return 0; 4840 } 4841 4842 /** 4843 * dwc2_restore_device_registers() - Restore controller device registers. 4844 * When resuming usb bus, device registers needs to be restored 4845 * if controller power were disabled. 4846 * 4847 * @hsotg: Programming view of the DWC_otg controller 4848 */ 4849 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 4850 { 4851 struct dwc2_dregs_backup *dr; 4852 u32 dctl; 4853 int i; 4854 4855 dev_dbg(hsotg->dev, "%s\n", __func__); 4856 4857 /* Restore dev regs */ 4858 dr = &hsotg->dr_backup; 4859 if (!dr->valid) { 4860 dev_err(hsotg->dev, "%s: no device registers to restore\n", 4861 __func__); 4862 return -EINVAL; 4863 } 4864 dr->valid = false; 4865 4866 dwc2_writel(dr->dcfg, hsotg->regs + DCFG); 4867 dwc2_writel(dr->dctl, hsotg->regs + DCTL); 4868 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); 4869 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); 4870 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); 4871 4872 for (i = 0; i < hsotg->num_of_eps; i++) { 4873 /* Restore IN EPs */ 4874 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); 4875 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); 4876 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); 4877 4878 /* Restore OUT EPs */ 4879 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); 4880 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); 4881 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); 4882 } 4883 4884 /* Set the Power-On Programming done bit */ 4885 dctl = dwc2_readl(hsotg->regs + DCTL); 4886 dctl |= DCTL_PWRONPRGDONE; 4887 dwc2_writel(dctl, hsotg->regs + DCTL); 4888 4889 return 0; 4890 } 4891