1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC. 4 * 5 * 2013 (c) Aeroflex Gaisler AB 6 * 7 * This driver supports GRUSBDC USB Device Controller cores available in the 8 * GRLIB VHDL IP core library. 9 * 10 * Full documentation of the GRUSBDC core can be found here: 11 * http://www.gaisler.com/products/grlib/grip.pdf 12 * 13 * Contributors: 14 * - Andreas Larsson <andreas@gaisler.com> 15 * - Marko Isomaki 16 */ 17 18 /* 19 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each 20 * individually configurable to any of the four USB transfer types. This driver 21 * only supports cores in DMA mode. 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/errno.h> 29 #include <linux/list.h> 30 #include <linux/interrupt.h> 31 #include <linux/device.h> 32 #include <linux/usb.h> 33 #include <linux/usb/ch9.h> 34 #include <linux/usb/gadget.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/dmapool.h> 37 #include <linux/debugfs.h> 38 #include <linux/seq_file.h> 39 #include <linux/of_platform.h> 40 #include <linux/of_irq.h> 41 #include <linux/of_address.h> 42 43 #include <asm/byteorder.h> 44 45 #include "gr_udc.h" 46 47 #define DRIVER_NAME "gr_udc" 48 #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller" 49 50 static const char driver_name[] = DRIVER_NAME; 51 52 #define gr_read32(x) (ioread32be((x))) 53 #define gr_write32(x, v) (iowrite32be((v), (x))) 54 55 /* USB speed and corresponding string calculated from status register value */ 56 #define GR_SPEED(status) \ 57 ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH) 58 #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status)) 59 60 /* Size of hardware buffer calculated from epctrl register value */ 61 #define GR_BUFFER_SIZE(epctrl) \ 62 ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \ 63 GR_EPCTRL_BUFSZ_SCALER) 64 65 /* ---------------------------------------------------------------------- */ 66 /* Debug printout functionality */ 67 68 static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"}; 69 70 static const char *gr_ep0state_string(enum gr_ep0state state) 71 { 72 static const char *const names[] = { 73 [GR_EP0_DISCONNECT] = "disconnect", 74 [GR_EP0_SETUP] = "setup", 75 [GR_EP0_IDATA] = "idata", 76 [GR_EP0_ODATA] = "odata", 77 [GR_EP0_ISTATUS] = "istatus", 78 [GR_EP0_OSTATUS] = "ostatus", 79 [GR_EP0_STALL] = "stall", 80 [GR_EP0_SUSPEND] = "suspend", 81 }; 82 83 if (state < 0 || state >= ARRAY_SIZE(names)) 84 return "UNKNOWN"; 85 86 return names[state]; 87 } 88 89 #ifdef VERBOSE_DEBUG 90 91 static void gr_dbgprint_request(const char *str, struct gr_ep *ep, 92 struct gr_request *req) 93 { 94 int buflen = ep->is_in ? req->req.length : req->req.actual; 95 int rowlen = 32; 96 int plen = min(rowlen, buflen); 97 98 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen, 99 (buflen > plen ? " (truncated)" : "")); 100 print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 101 rowlen, 4, req->req.buf, plen, false); 102 } 103 104 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request, 105 u16 value, u16 index, u16 length) 106 { 107 dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n", 108 type, request, value, index, length); 109 } 110 #else /* !VERBOSE_DEBUG */ 111 112 static void gr_dbgprint_request(const char *str, struct gr_ep *ep, 113 struct gr_request *req) {} 114 115 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request, 116 u16 value, u16 index, u16 length) {} 117 118 #endif /* VERBOSE_DEBUG */ 119 120 /* ---------------------------------------------------------------------- */ 121 /* Debugfs functionality */ 122 123 #ifdef CONFIG_USB_GADGET_DEBUG_FS 124 125 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep) 126 { 127 u32 epctrl = gr_read32(&ep->regs->epctrl); 128 u32 epstat = gr_read32(&ep->regs->epstat); 129 int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS; 130 struct gr_request *req; 131 132 seq_printf(seq, "%s:\n", ep->ep.name); 133 seq_printf(seq, " mode = %s\n", gr_modestring[mode]); 134 seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH)); 135 seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED)); 136 seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV)); 137 seq_printf(seq, " dma_start = %d\n", ep->dma_start); 138 seq_printf(seq, " stopped = %d\n", ep->stopped); 139 seq_printf(seq, " wedged = %d\n", ep->wedged); 140 seq_printf(seq, " callback = %d\n", ep->callback); 141 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket); 142 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit); 143 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer); 144 if (mode == 1 || mode == 3) 145 seq_printf(seq, " nt = %d\n", 146 (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS); 147 148 seq_printf(seq, " Buffer 0: %s %s%d\n", 149 epstat & GR_EPSTAT_B0 ? "valid" : "invalid", 150 epstat & GR_EPSTAT_BS ? " " : "selected ", 151 (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS); 152 seq_printf(seq, " Buffer 1: %s %s%d\n", 153 epstat & GR_EPSTAT_B1 ? "valid" : "invalid", 154 epstat & GR_EPSTAT_BS ? "selected " : " ", 155 (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS); 156 157 if (list_empty(&ep->queue)) { 158 seq_puts(seq, " Queue: empty\n\n"); 159 return; 160 } 161 162 seq_puts(seq, " Queue:\n"); 163 list_for_each_entry(req, &ep->queue, queue) { 164 struct gr_dma_desc *desc; 165 struct gr_dma_desc *next; 166 167 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req, 168 &req->req.buf, req->req.actual, req->req.length); 169 170 next = req->first_desc; 171 do { 172 desc = next; 173 next = desc->next_desc; 174 seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n", 175 desc == req->curr_desc ? 'c' : ' ', 176 desc, desc->paddr, desc->ctrl, desc->data); 177 } while (desc != req->last_desc); 178 } 179 seq_puts(seq, "\n"); 180 } 181 182 static int gr_dfs_show(struct seq_file *seq, void *v) 183 { 184 struct gr_udc *dev = seq->private; 185 u32 control = gr_read32(&dev->regs->control); 186 u32 status = gr_read32(&dev->regs->status); 187 struct gr_ep *ep; 188 189 seq_printf(seq, "usb state = %s\n", 190 usb_state_string(dev->gadget.state)); 191 seq_printf(seq, "address = %d\n", 192 (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS); 193 seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status)); 194 seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state)); 195 seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled); 196 seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup); 197 seq_printf(seq, "test_mode = %d\n", dev->test_mode); 198 seq_puts(seq, "\n"); 199 200 list_for_each_entry(ep, &dev->ep_list, ep_list) 201 gr_seq_ep_show(seq, ep); 202 203 return 0; 204 } 205 DEFINE_SHOW_ATTRIBUTE(gr_dfs); 206 207 static void gr_dfs_create(struct gr_udc *dev) 208 { 209 const char *name = "gr_udc_state"; 210 211 dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root); 212 debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops); 213 } 214 215 static void gr_dfs_delete(struct gr_udc *dev) 216 { 217 debugfs_remove_recursive(dev->dfs_root); 218 } 219 220 #else /* !CONFIG_USB_GADGET_DEBUG_FS */ 221 222 static void gr_dfs_create(struct gr_udc *dev) {} 223 static void gr_dfs_delete(struct gr_udc *dev) {} 224 225 #endif /* CONFIG_USB_GADGET_DEBUG_FS */ 226 227 /* ---------------------------------------------------------------------- */ 228 /* DMA and request handling */ 229 230 /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */ 231 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags) 232 { 233 dma_addr_t paddr; 234 struct gr_dma_desc *dma_desc; 235 236 dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr); 237 if (!dma_desc) { 238 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n"); 239 return NULL; 240 } 241 242 dma_desc->paddr = paddr; 243 244 return dma_desc; 245 } 246 247 static inline void gr_free_dma_desc(struct gr_udc *dev, 248 struct gr_dma_desc *desc) 249 { 250 dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr); 251 } 252 253 /* Frees the chain of struct gr_dma_desc for the given request */ 254 static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req) 255 { 256 struct gr_dma_desc *desc; 257 struct gr_dma_desc *next; 258 259 next = req->first_desc; 260 if (!next) 261 return; 262 263 do { 264 desc = next; 265 next = desc->next_desc; 266 gr_free_dma_desc(dev, desc); 267 } while (desc != req->last_desc); 268 269 req->first_desc = NULL; 270 req->curr_desc = NULL; 271 req->last_desc = NULL; 272 } 273 274 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req); 275 276 /* 277 * Frees allocated resources and calls the appropriate completion function/setup 278 * package handler for a finished request. 279 * 280 * Must be called with dev->lock held and irqs disabled. 281 */ 282 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req, 283 int status) 284 __releases(&dev->lock) 285 __acquires(&dev->lock) 286 { 287 struct gr_udc *dev; 288 289 list_del_init(&req->queue); 290 291 if (likely(req->req.status == -EINPROGRESS)) 292 req->req.status = status; 293 else 294 status = req->req.status; 295 296 dev = ep->dev; 297 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 298 gr_free_dma_desc_chain(dev, req); 299 300 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */ 301 req->req.actual = req->req.length; 302 } else if (req->oddlen && req->req.actual > req->evenlen) { 303 /* 304 * Copy to user buffer in this case where length was not evenly 305 * divisible by ep->ep.maxpacket and the last descriptor was 306 * actually used. 307 */ 308 char *buftail = ((char *)req->req.buf + req->evenlen); 309 310 memcpy(buftail, ep->tailbuf, req->oddlen); 311 312 if (req->req.actual > req->req.length) { 313 /* We got more data than was requested */ 314 dev_dbg(ep->dev->dev, "Overflow for ep %s\n", 315 ep->ep.name); 316 gr_dbgprint_request("OVFL", ep, req); 317 req->req.status = -EOVERFLOW; 318 } 319 } 320 321 if (!status) { 322 if (ep->is_in) 323 gr_dbgprint_request("SENT", ep, req); 324 else 325 gr_dbgprint_request("RECV", ep, req); 326 } 327 328 /* Prevent changes to ep->queue during callback */ 329 ep->callback = 1; 330 if (req == dev->ep0reqo && !status) { 331 if (req->setup) 332 gr_ep0_setup(dev, req); 333 else 334 dev_err(dev->dev, 335 "Unexpected non setup packet on ep0in\n"); 336 } else if (req->req.complete) { 337 spin_unlock(&dev->lock); 338 339 usb_gadget_giveback_request(&ep->ep, &req->req); 340 341 spin_lock(&dev->lock); 342 } 343 ep->callback = 0; 344 } 345 346 static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 347 { 348 struct gr_request *req; 349 350 req = kzalloc(sizeof(*req), gfp_flags); 351 if (!req) 352 return NULL; 353 354 INIT_LIST_HEAD(&req->queue); 355 356 return &req->req; 357 } 358 359 /* 360 * Starts DMA for endpoint ep if there are requests in the queue. 361 * 362 * Must be called with dev->lock held and with !ep->stopped. 363 */ 364 static void gr_start_dma(struct gr_ep *ep) 365 { 366 struct gr_request *req; 367 u32 dmactrl; 368 369 if (list_empty(&ep->queue)) { 370 ep->dma_start = 0; 371 return; 372 } 373 374 req = list_first_entry(&ep->queue, struct gr_request, queue); 375 376 /* A descriptor should already have been allocated */ 377 BUG_ON(!req->curr_desc); 378 379 /* 380 * The DMA controller can not handle smaller OUT buffers than 381 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly 382 * long packet are received. Therefore an internal bounce buffer gets 383 * used when such a request gets enabled. 384 */ 385 if (!ep->is_in && req->oddlen) 386 req->last_desc->data = ep->tailbuf_paddr; 387 388 wmb(); /* Make sure all is settled before handing it over to DMA */ 389 390 /* Set the descriptor pointer in the hardware */ 391 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr); 392 393 /* Announce available descriptors */ 394 dmactrl = gr_read32(&ep->regs->dmactrl); 395 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA); 396 397 ep->dma_start = 1; 398 } 399 400 /* 401 * Finishes the first request in the ep's queue and, if available, starts the 402 * next request in queue. 403 * 404 * Must be called with dev->lock held, irqs disabled and with !ep->stopped. 405 */ 406 static void gr_dma_advance(struct gr_ep *ep, int status) 407 { 408 struct gr_request *req; 409 410 req = list_first_entry(&ep->queue, struct gr_request, queue); 411 gr_finish_request(ep, req, status); 412 gr_start_dma(ep); /* Regardless of ep->dma_start */ 413 } 414 415 /* 416 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA 417 * transfer to be canceled and clears GR_DMACTRL_DA. 418 * 419 * Must be called with dev->lock held. 420 */ 421 static void gr_abort_dma(struct gr_ep *ep) 422 { 423 u32 dmactrl; 424 425 dmactrl = gr_read32(&ep->regs->dmactrl); 426 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD); 427 } 428 429 /* 430 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor 431 * chain. 432 * 433 * Size is not used for OUT endpoints. Hardware can not be instructed to handle 434 * smaller buffer than MAXPL in the OUT direction. 435 */ 436 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req, 437 dma_addr_t data, unsigned size, gfp_t gfp_flags) 438 { 439 struct gr_dma_desc *desc; 440 441 desc = gr_alloc_dma_desc(ep, gfp_flags); 442 if (!desc) 443 return -ENOMEM; 444 445 desc->data = data; 446 if (ep->is_in) 447 desc->ctrl = 448 (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN; 449 else 450 desc->ctrl = GR_DESC_OUT_CTRL_IE; 451 452 if (!req->first_desc) { 453 req->first_desc = desc; 454 req->curr_desc = desc; 455 } else { 456 req->last_desc->next_desc = desc; 457 req->last_desc->next = desc->paddr; 458 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX; 459 } 460 req->last_desc = desc; 461 462 return 0; 463 } 464 465 /* 466 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that 467 * together covers req->req.length bytes of the buffer at DMA address 468 * req->req.dma for the OUT direction. 469 * 470 * The first descriptor in the chain is enabled, the rest disabled. The 471 * interrupt handler will later enable them one by one when needed so we can 472 * find out when the transfer is finished. For OUT endpoints, all descriptors 473 * therefore generate interrutps. 474 */ 475 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req, 476 gfp_t gfp_flags) 477 { 478 u16 bytes_left; /* Bytes left to provide descriptors for */ 479 u16 bytes_used; /* Bytes accommodated for */ 480 int ret = 0; 481 482 req->first_desc = NULL; /* Signals that no allocation is done yet */ 483 bytes_left = req->req.length; 484 bytes_used = 0; 485 while (bytes_left > 0) { 486 dma_addr_t start = req->req.dma + bytes_used; 487 u16 size = min(bytes_left, ep->bytes_per_buffer); 488 489 if (size < ep->bytes_per_buffer) { 490 /* Prepare using bounce buffer */ 491 req->evenlen = req->req.length - bytes_left; 492 req->oddlen = size; 493 } 494 495 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); 496 if (ret) 497 goto alloc_err; 498 499 bytes_left -= size; 500 bytes_used += size; 501 } 502 503 req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN; 504 505 return 0; 506 507 alloc_err: 508 gr_free_dma_desc_chain(ep->dev, req); 509 510 return ret; 511 } 512 513 /* 514 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that 515 * together covers req->req.length bytes of the buffer at DMA address 516 * req->req.dma for the IN direction. 517 * 518 * When more data is provided than the maximum payload size, the hardware splits 519 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer 520 * is always set to a multiple of the maximum payload (restricted to the valid 521 * number of maximum payloads during high bandwidth isochronous or interrupt 522 * transfers) 523 * 524 * All descriptors are enabled from the beginning and we only generate an 525 * interrupt for the last one indicating that the entire request has been pushed 526 * to hardware. 527 */ 528 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req, 529 gfp_t gfp_flags) 530 { 531 u16 bytes_left; /* Bytes left in req to provide descriptors for */ 532 u16 bytes_used; /* Bytes in req accommodated for */ 533 int ret = 0; 534 535 req->first_desc = NULL; /* Signals that no allocation is done yet */ 536 bytes_left = req->req.length; 537 bytes_used = 0; 538 do { /* Allow for zero length packets */ 539 dma_addr_t start = req->req.dma + bytes_used; 540 u16 size = min(bytes_left, ep->bytes_per_buffer); 541 542 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); 543 if (ret) 544 goto alloc_err; 545 546 bytes_left -= size; 547 bytes_used += size; 548 } while (bytes_left > 0); 549 550 /* 551 * Send an extra zero length packet to indicate that no more data is 552 * available when req->req.zero is set and the data length is even 553 * multiples of ep->ep.maxpacket. 554 */ 555 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) { 556 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags); 557 if (ret) 558 goto alloc_err; 559 } 560 561 /* 562 * For IN packets we only want to know when the last packet has been 563 * transmitted (not just put into internal buffers). 564 */ 565 req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI; 566 567 return 0; 568 569 alloc_err: 570 gr_free_dma_desc_chain(ep->dev, req); 571 572 return ret; 573 } 574 575 /* Must be called with dev->lock held */ 576 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags) 577 { 578 struct gr_udc *dev = ep->dev; 579 int ret; 580 581 if (unlikely(!ep->ep.desc && ep->num != 0)) { 582 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name); 583 return -EINVAL; 584 } 585 586 if (unlikely(!req->req.buf || !list_empty(&req->queue))) { 587 dev_err(dev->dev, 588 "Invalid request for %s: buf=%p list_empty=%d\n", 589 ep->ep.name, req->req.buf, list_empty(&req->queue)); 590 return -EINVAL; 591 } 592 593 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { 594 dev_err(dev->dev, "-ESHUTDOWN"); 595 return -ESHUTDOWN; 596 } 597 598 /* Can't touch registers when suspended */ 599 if (dev->ep0state == GR_EP0_SUSPEND) { 600 dev_err(dev->dev, "-EBUSY"); 601 return -EBUSY; 602 } 603 604 /* Set up DMA mapping in case the caller didn't */ 605 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in); 606 if (ret) { 607 dev_err(dev->dev, "usb_gadget_map_request"); 608 return ret; 609 } 610 611 if (ep->is_in) 612 ret = gr_setup_in_desc_list(ep, req, gfp_flags); 613 else 614 ret = gr_setup_out_desc_list(ep, req, gfp_flags); 615 if (ret) 616 return ret; 617 618 req->req.status = -EINPROGRESS; 619 req->req.actual = 0; 620 list_add_tail(&req->queue, &ep->queue); 621 622 /* Start DMA if not started, otherwise interrupt handler handles it */ 623 if (!ep->dma_start && likely(!ep->stopped)) 624 gr_start_dma(ep); 625 626 return 0; 627 } 628 629 /* 630 * Queue a request from within the driver. 631 * 632 * Must be called with dev->lock held. 633 */ 634 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req, 635 gfp_t gfp_flags) 636 { 637 if (ep->is_in) 638 gr_dbgprint_request("RESP", ep, req); 639 640 return gr_queue(ep, req, gfp_flags); 641 } 642 643 /* ---------------------------------------------------------------------- */ 644 /* General helper functions */ 645 646 /* 647 * Dequeue ALL requests. 648 * 649 * Must be called with dev->lock held and irqs disabled. 650 */ 651 static void gr_ep_nuke(struct gr_ep *ep) 652 { 653 struct gr_request *req; 654 655 ep->stopped = 1; 656 ep->dma_start = 0; 657 gr_abort_dma(ep); 658 659 while (!list_empty(&ep->queue)) { 660 req = list_first_entry(&ep->queue, struct gr_request, queue); 661 gr_finish_request(ep, req, -ESHUTDOWN); 662 } 663 } 664 665 /* 666 * Reset the hardware state of this endpoint. 667 * 668 * Must be called with dev->lock held. 669 */ 670 static void gr_ep_reset(struct gr_ep *ep) 671 { 672 gr_write32(&ep->regs->epctrl, 0); 673 gr_write32(&ep->regs->dmactrl, 0); 674 675 ep->ep.maxpacket = MAX_CTRL_PL_SIZE; 676 ep->ep.desc = NULL; 677 ep->stopped = 1; 678 ep->dma_start = 0; 679 } 680 681 /* 682 * Generate STALL on ep0in/out. 683 * 684 * Must be called with dev->lock held. 685 */ 686 static void gr_control_stall(struct gr_udc *dev) 687 { 688 u32 epctrl; 689 690 epctrl = gr_read32(&dev->epo[0].regs->epctrl); 691 gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS); 692 epctrl = gr_read32(&dev->epi[0].regs->epctrl); 693 gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS); 694 695 dev->ep0state = GR_EP0_STALL; 696 } 697 698 /* 699 * Halts, halts and wedges, or clears halt for an endpoint. 700 * 701 * Must be called with dev->lock held. 702 */ 703 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost) 704 { 705 u32 epctrl; 706 int retval = 0; 707 708 if (ep->num && !ep->ep.desc) 709 return -EINVAL; 710 711 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) 712 return -EOPNOTSUPP; 713 714 /* Never actually halt ep0, and therefore never clear halt for ep0 */ 715 if (!ep->num) { 716 if (halt && !fromhost) { 717 /* ep0 halt from gadget - generate protocol stall */ 718 gr_control_stall(ep->dev); 719 dev_dbg(ep->dev->dev, "EP: stall ep0\n"); 720 return 0; 721 } 722 return -EINVAL; 723 } 724 725 dev_dbg(ep->dev->dev, "EP: %s halt %s\n", 726 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name); 727 728 epctrl = gr_read32(&ep->regs->epctrl); 729 if (halt) { 730 /* Set HALT */ 731 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH); 732 ep->stopped = 1; 733 if (wedge) 734 ep->wedged = 1; 735 } else { 736 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH); 737 ep->stopped = 0; 738 ep->wedged = 0; 739 740 /* Things might have been queued up in the meantime */ 741 if (!ep->dma_start) 742 gr_start_dma(ep); 743 } 744 745 return retval; 746 } 747 748 /* Must be called with dev->lock held */ 749 static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value) 750 { 751 if (dev->ep0state != value) 752 dev_vdbg(dev->dev, "STATE: ep0state=%s\n", 753 gr_ep0state_string(value)); 754 dev->ep0state = value; 755 } 756 757 /* 758 * Should only be called when endpoints can not generate interrupts. 759 * 760 * Must be called with dev->lock held. 761 */ 762 static void gr_disable_interrupts_and_pullup(struct gr_udc *dev) 763 { 764 gr_write32(&dev->regs->control, 0); 765 wmb(); /* Make sure that we do not deny one of our interrupts */ 766 dev->irq_enabled = 0; 767 } 768 769 /* 770 * Stop all device activity and disable data line pullup. 771 * 772 * Must be called with dev->lock held and irqs disabled. 773 */ 774 static void gr_stop_activity(struct gr_udc *dev) 775 { 776 struct gr_ep *ep; 777 778 list_for_each_entry(ep, &dev->ep_list, ep_list) 779 gr_ep_nuke(ep); 780 781 gr_disable_interrupts_and_pullup(dev); 782 783 gr_set_ep0state(dev, GR_EP0_DISCONNECT); 784 usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED); 785 } 786 787 /* ---------------------------------------------------------------------- */ 788 /* ep0 setup packet handling */ 789 790 static void gr_ep0_testmode_complete(struct usb_ep *_ep, 791 struct usb_request *_req) 792 { 793 struct gr_ep *ep; 794 struct gr_udc *dev; 795 u32 control; 796 797 ep = container_of(_ep, struct gr_ep, ep); 798 dev = ep->dev; 799 800 spin_lock(&dev->lock); 801 802 control = gr_read32(&dev->regs->control); 803 control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS); 804 gr_write32(&dev->regs->control, control); 805 806 spin_unlock(&dev->lock); 807 } 808 809 static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req) 810 { 811 /* Nothing needs to be done here */ 812 } 813 814 /* 815 * Queue a response on ep0in. 816 * 817 * Must be called with dev->lock held. 818 */ 819 static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length, 820 void (*complete)(struct usb_ep *ep, 821 struct usb_request *req)) 822 { 823 u8 *reqbuf = dev->ep0reqi->req.buf; 824 int status; 825 int i; 826 827 for (i = 0; i < length; i++) 828 reqbuf[i] = buf[i]; 829 dev->ep0reqi->req.length = length; 830 dev->ep0reqi->req.complete = complete; 831 832 status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC); 833 if (status < 0) 834 dev_err(dev->dev, 835 "Could not queue ep0in setup response: %d\n", status); 836 837 return status; 838 } 839 840 /* 841 * Queue a 2 byte response on ep0in. 842 * 843 * Must be called with dev->lock held. 844 */ 845 static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response) 846 { 847 __le16 le_response = cpu_to_le16(response); 848 849 return gr_ep0_respond(dev, (u8 *)&le_response, 2, 850 gr_ep0_dummy_complete); 851 } 852 853 /* 854 * Queue a ZLP response on ep0in. 855 * 856 * Must be called with dev->lock held. 857 */ 858 static inline int gr_ep0_respond_empty(struct gr_udc *dev) 859 { 860 return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete); 861 } 862 863 /* 864 * This is run when a SET_ADDRESS request is received. First writes 865 * the new address to the control register which is updated internally 866 * when the next IN packet is ACKED. 867 * 868 * Must be called with dev->lock held. 869 */ 870 static void gr_set_address(struct gr_udc *dev, u8 address) 871 { 872 u32 control; 873 874 control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK; 875 control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK; 876 control |= GR_CONTROL_SU; 877 gr_write32(&dev->regs->control, control); 878 } 879 880 /* 881 * Returns negative for STALL, 0 for successful handling and positive for 882 * delegation. 883 * 884 * Must be called with dev->lock held. 885 */ 886 static int gr_device_request(struct gr_udc *dev, u8 type, u8 request, 887 u16 value, u16 index) 888 { 889 u16 response; 890 u8 test; 891 892 switch (request) { 893 case USB_REQ_SET_ADDRESS: 894 dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff); 895 gr_set_address(dev, value & 0xff); 896 if (value) 897 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS); 898 else 899 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT); 900 return gr_ep0_respond_empty(dev); 901 902 case USB_REQ_GET_STATUS: 903 /* Self powered | remote wakeup */ 904 response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0); 905 return gr_ep0_respond_u16(dev, response); 906 907 case USB_REQ_SET_FEATURE: 908 switch (value) { 909 case USB_DEVICE_REMOTE_WAKEUP: 910 /* Allow remote wakeup */ 911 dev->remote_wakeup = 1; 912 return gr_ep0_respond_empty(dev); 913 914 case USB_DEVICE_TEST_MODE: 915 /* The hardware does not support TEST_FORCE_EN */ 916 test = index >> 8; 917 if (test >= TEST_J && test <= TEST_PACKET) { 918 dev->test_mode = test; 919 return gr_ep0_respond(dev, NULL, 0, 920 gr_ep0_testmode_complete); 921 } 922 } 923 break; 924 925 case USB_REQ_CLEAR_FEATURE: 926 switch (value) { 927 case USB_DEVICE_REMOTE_WAKEUP: 928 /* Disallow remote wakeup */ 929 dev->remote_wakeup = 0; 930 return gr_ep0_respond_empty(dev); 931 } 932 break; 933 } 934 935 return 1; /* Delegate the rest */ 936 } 937 938 /* 939 * Returns negative for STALL, 0 for successful handling and positive for 940 * delegation. 941 * 942 * Must be called with dev->lock held. 943 */ 944 static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request, 945 u16 value, u16 index) 946 { 947 if (dev->gadget.state != USB_STATE_CONFIGURED) 948 return -1; 949 950 /* 951 * Should return STALL for invalid interfaces, but udc driver does not 952 * know anything about that. However, many gadget drivers do not handle 953 * GET_STATUS so we need to take care of that. 954 */ 955 956 switch (request) { 957 case USB_REQ_GET_STATUS: 958 return gr_ep0_respond_u16(dev, 0x0000); 959 960 case USB_REQ_SET_FEATURE: 961 case USB_REQ_CLEAR_FEATURE: 962 /* 963 * No possible valid standard requests. Still let gadget drivers 964 * have a go at it. 965 */ 966 break; 967 } 968 969 return 1; /* Delegate the rest */ 970 } 971 972 /* 973 * Returns negative for STALL, 0 for successful handling and positive for 974 * delegation. 975 * 976 * Must be called with dev->lock held. 977 */ 978 static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request, 979 u16 value, u16 index) 980 { 981 struct gr_ep *ep; 982 int status; 983 int halted; 984 u8 epnum = index & USB_ENDPOINT_NUMBER_MASK; 985 u8 is_in = index & USB_ENDPOINT_DIR_MASK; 986 987 if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo)) 988 return -1; 989 990 if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0) 991 return -1; 992 993 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]); 994 995 switch (request) { 996 case USB_REQ_GET_STATUS: 997 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH; 998 return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0); 999 1000 case USB_REQ_SET_FEATURE: 1001 switch (value) { 1002 case USB_ENDPOINT_HALT: 1003 status = gr_ep_halt_wedge(ep, 1, 0, 1); 1004 if (status >= 0) 1005 status = gr_ep0_respond_empty(dev); 1006 return status; 1007 } 1008 break; 1009 1010 case USB_REQ_CLEAR_FEATURE: 1011 switch (value) { 1012 case USB_ENDPOINT_HALT: 1013 if (ep->wedged) 1014 return -1; 1015 status = gr_ep_halt_wedge(ep, 0, 0, 1); 1016 if (status >= 0) 1017 status = gr_ep0_respond_empty(dev); 1018 return status; 1019 } 1020 break; 1021 } 1022 1023 return 1; /* Delegate the rest */ 1024 } 1025 1026 /* Must be called with dev->lock held */ 1027 static void gr_ep0out_requeue(struct gr_udc *dev) 1028 { 1029 int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC); 1030 1031 if (ret) 1032 dev_err(dev->dev, "Could not queue ep0out setup request: %d\n", 1033 ret); 1034 } 1035 1036 /* 1037 * The main function dealing with setup requests on ep0. 1038 * 1039 * Must be called with dev->lock held and irqs disabled 1040 */ 1041 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req) 1042 __releases(&dev->lock) 1043 __acquires(&dev->lock) 1044 { 1045 union { 1046 struct usb_ctrlrequest ctrl; 1047 u8 raw[8]; 1048 u32 word[2]; 1049 } u; 1050 u8 type; 1051 u8 request; 1052 u16 value; 1053 u16 index; 1054 u16 length; 1055 int i; 1056 int status; 1057 1058 /* Restore from ep0 halt */ 1059 if (dev->ep0state == GR_EP0_STALL) { 1060 gr_set_ep0state(dev, GR_EP0_SETUP); 1061 if (!req->req.actual) 1062 goto out; 1063 } 1064 1065 if (dev->ep0state == GR_EP0_ISTATUS) { 1066 gr_set_ep0state(dev, GR_EP0_SETUP); 1067 if (req->req.actual > 0) 1068 dev_dbg(dev->dev, 1069 "Unexpected setup packet at state %s\n", 1070 gr_ep0state_string(GR_EP0_ISTATUS)); 1071 else 1072 goto out; /* Got expected ZLP */ 1073 } else if (dev->ep0state != GR_EP0_SETUP) { 1074 dev_info(dev->dev, 1075 "Unexpected ep0out request at state %s - stalling\n", 1076 gr_ep0state_string(dev->ep0state)); 1077 gr_control_stall(dev); 1078 gr_set_ep0state(dev, GR_EP0_SETUP); 1079 goto out; 1080 } else if (!req->req.actual) { 1081 dev_dbg(dev->dev, "Unexpected ZLP at state %s\n", 1082 gr_ep0state_string(dev->ep0state)); 1083 goto out; 1084 } 1085 1086 /* Handle SETUP packet */ 1087 for (i = 0; i < req->req.actual; i++) 1088 u.raw[i] = ((u8 *)req->req.buf)[i]; 1089 1090 type = u.ctrl.bRequestType; 1091 request = u.ctrl.bRequest; 1092 value = le16_to_cpu(u.ctrl.wValue); 1093 index = le16_to_cpu(u.ctrl.wIndex); 1094 length = le16_to_cpu(u.ctrl.wLength); 1095 1096 gr_dbgprint_devreq(dev, type, request, value, index, length); 1097 1098 /* Check for data stage */ 1099 if (length) { 1100 if (type & USB_DIR_IN) 1101 gr_set_ep0state(dev, GR_EP0_IDATA); 1102 else 1103 gr_set_ep0state(dev, GR_EP0_ODATA); 1104 } 1105 1106 status = 1; /* Positive status flags delegation */ 1107 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1108 switch (type & USB_RECIP_MASK) { 1109 case USB_RECIP_DEVICE: 1110 status = gr_device_request(dev, type, request, 1111 value, index); 1112 break; 1113 case USB_RECIP_ENDPOINT: 1114 status = gr_endpoint_request(dev, type, request, 1115 value, index); 1116 break; 1117 case USB_RECIP_INTERFACE: 1118 status = gr_interface_request(dev, type, request, 1119 value, index); 1120 break; 1121 } 1122 } 1123 1124 if (status > 0) { 1125 spin_unlock(&dev->lock); 1126 1127 dev_vdbg(dev->dev, "DELEGATE\n"); 1128 status = dev->driver->setup(&dev->gadget, &u.ctrl); 1129 1130 spin_lock(&dev->lock); 1131 } 1132 1133 /* Generate STALL on both ep0out and ep0in if requested */ 1134 if (unlikely(status < 0)) { 1135 dev_vdbg(dev->dev, "STALL\n"); 1136 gr_control_stall(dev); 1137 } 1138 1139 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD && 1140 request == USB_REQ_SET_CONFIGURATION) { 1141 if (!value) { 1142 dev_dbg(dev->dev, "STATUS: deconfigured\n"); 1143 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS); 1144 } else if (status >= 0) { 1145 /* Not configured unless gadget OK:s it */ 1146 dev_dbg(dev->dev, "STATUS: configured: %d\n", value); 1147 usb_gadget_set_state(&dev->gadget, 1148 USB_STATE_CONFIGURED); 1149 } 1150 } 1151 1152 /* Get ready for next stage */ 1153 if (dev->ep0state == GR_EP0_ODATA) 1154 gr_set_ep0state(dev, GR_EP0_OSTATUS); 1155 else if (dev->ep0state == GR_EP0_IDATA) 1156 gr_set_ep0state(dev, GR_EP0_ISTATUS); 1157 else 1158 gr_set_ep0state(dev, GR_EP0_SETUP); 1159 1160 out: 1161 gr_ep0out_requeue(dev); 1162 } 1163 1164 /* ---------------------------------------------------------------------- */ 1165 /* VBUS and USB reset handling */ 1166 1167 /* Must be called with dev->lock held and irqs disabled */ 1168 static void gr_vbus_connected(struct gr_udc *dev, u32 status) 1169 { 1170 u32 control; 1171 1172 dev->gadget.speed = GR_SPEED(status); 1173 usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED); 1174 1175 /* Turn on full interrupts and pullup */ 1176 control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI | 1177 GR_CONTROL_SP | GR_CONTROL_EP); 1178 gr_write32(&dev->regs->control, control); 1179 } 1180 1181 /* Must be called with dev->lock held */ 1182 static void gr_enable_vbus_detect(struct gr_udc *dev) 1183 { 1184 u32 status; 1185 1186 dev->irq_enabled = 1; 1187 wmb(); /* Make sure we do not ignore an interrupt */ 1188 gr_write32(&dev->regs->control, GR_CONTROL_VI); 1189 1190 /* Take care of the case we are already plugged in at this point */ 1191 status = gr_read32(&dev->regs->status); 1192 if (status & GR_STATUS_VB) 1193 gr_vbus_connected(dev, status); 1194 } 1195 1196 /* Must be called with dev->lock held and irqs disabled */ 1197 static void gr_vbus_disconnected(struct gr_udc *dev) 1198 { 1199 gr_stop_activity(dev); 1200 1201 /* Report disconnect */ 1202 if (dev->driver && dev->driver->disconnect) { 1203 spin_unlock(&dev->lock); 1204 1205 dev->driver->disconnect(&dev->gadget); 1206 1207 spin_lock(&dev->lock); 1208 } 1209 1210 gr_enable_vbus_detect(dev); 1211 } 1212 1213 /* Must be called with dev->lock held and irqs disabled */ 1214 static void gr_udc_usbreset(struct gr_udc *dev, u32 status) 1215 { 1216 gr_set_address(dev, 0); 1217 gr_set_ep0state(dev, GR_EP0_SETUP); 1218 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT); 1219 dev->gadget.speed = GR_SPEED(status); 1220 1221 gr_ep_nuke(&dev->epo[0]); 1222 gr_ep_nuke(&dev->epi[0]); 1223 dev->epo[0].stopped = 0; 1224 dev->epi[0].stopped = 0; 1225 gr_ep0out_requeue(dev); 1226 } 1227 1228 /* ---------------------------------------------------------------------- */ 1229 /* Irq handling */ 1230 1231 /* 1232 * Handles interrupts from in endpoints. Returns whether something was handled. 1233 * 1234 * Must be called with dev->lock held, irqs disabled and with !ep->stopped. 1235 */ 1236 static int gr_handle_in_ep(struct gr_ep *ep) 1237 { 1238 struct gr_request *req; 1239 1240 req = list_first_entry(&ep->queue, struct gr_request, queue); 1241 if (!req->last_desc) 1242 return 0; 1243 1244 if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) 1245 return 0; /* Not put in hardware buffers yet */ 1246 1247 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) 1248 return 0; /* Not transmitted yet, still in hardware buffers */ 1249 1250 /* Write complete */ 1251 gr_dma_advance(ep, 0); 1252 1253 return 1; 1254 } 1255 1256 /* 1257 * Handles interrupts from out endpoints. Returns whether something was handled. 1258 * 1259 * Must be called with dev->lock held, irqs disabled and with !ep->stopped. 1260 */ 1261 static int gr_handle_out_ep(struct gr_ep *ep) 1262 { 1263 u32 ep_dmactrl; 1264 u32 ctrl; 1265 u16 len; 1266 struct gr_request *req; 1267 struct gr_udc *dev = ep->dev; 1268 1269 req = list_first_entry(&ep->queue, struct gr_request, queue); 1270 if (!req->curr_desc) 1271 return 0; 1272 1273 ctrl = READ_ONCE(req->curr_desc->ctrl); 1274 if (ctrl & GR_DESC_OUT_CTRL_EN) 1275 return 0; /* Not received yet */ 1276 1277 /* Read complete */ 1278 len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK; 1279 req->req.actual += len; 1280 if (ctrl & GR_DESC_OUT_CTRL_SE) 1281 req->setup = 1; 1282 1283 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) { 1284 /* Short packet or >= expected size - we are done */ 1285 1286 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) { 1287 /* 1288 * Send a status stage ZLP to ack the DATA stage in the 1289 * OUT direction. This needs to be done before 1290 * gr_dma_advance as that can lead to a call to 1291 * ep0_setup that can change dev->ep0state. 1292 */ 1293 gr_ep0_respond_empty(dev); 1294 gr_set_ep0state(dev, GR_EP0_SETUP); 1295 } 1296 1297 gr_dma_advance(ep, 0); 1298 } else { 1299 /* Not done yet. Enable the next descriptor to receive more. */ 1300 req->curr_desc = req->curr_desc->next_desc; 1301 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN; 1302 1303 ep_dmactrl = gr_read32(&ep->regs->dmactrl); 1304 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA); 1305 } 1306 1307 return 1; 1308 } 1309 1310 /* 1311 * Handle state changes. Returns whether something was handled. 1312 * 1313 * Must be called with dev->lock held and irqs disabled. 1314 */ 1315 static int gr_handle_state_changes(struct gr_udc *dev) 1316 { 1317 u32 status = gr_read32(&dev->regs->status); 1318 int handled = 0; 1319 int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED || 1320 dev->gadget.state == USB_STATE_ATTACHED); 1321 1322 /* VBUS valid detected */ 1323 if (!powstate && (status & GR_STATUS_VB)) { 1324 dev_dbg(dev->dev, "STATUS: vbus valid detected\n"); 1325 gr_vbus_connected(dev, status); 1326 handled = 1; 1327 } 1328 1329 /* Disconnect */ 1330 if (powstate && !(status & GR_STATUS_VB)) { 1331 dev_dbg(dev->dev, "STATUS: vbus invalid detected\n"); 1332 gr_vbus_disconnected(dev); 1333 handled = 1; 1334 } 1335 1336 /* USB reset detected */ 1337 if (status & GR_STATUS_UR) { 1338 dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n", 1339 GR_SPEED_STR(status)); 1340 gr_write32(&dev->regs->status, GR_STATUS_UR); 1341 gr_udc_usbreset(dev, status); 1342 handled = 1; 1343 } 1344 1345 /* Speed change */ 1346 if (dev->gadget.speed != GR_SPEED(status)) { 1347 dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n", 1348 GR_SPEED_STR(status)); 1349 dev->gadget.speed = GR_SPEED(status); 1350 handled = 1; 1351 } 1352 1353 /* Going into suspend */ 1354 if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) { 1355 dev_dbg(dev->dev, "STATUS: USB suspend\n"); 1356 gr_set_ep0state(dev, GR_EP0_SUSPEND); 1357 dev->suspended_from = dev->gadget.state; 1358 usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED); 1359 1360 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) && 1361 dev->driver && dev->driver->suspend) { 1362 spin_unlock(&dev->lock); 1363 1364 dev->driver->suspend(&dev->gadget); 1365 1366 spin_lock(&dev->lock); 1367 } 1368 handled = 1; 1369 } 1370 1371 /* Coming out of suspend */ 1372 if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) { 1373 dev_dbg(dev->dev, "STATUS: USB resume\n"); 1374 if (dev->suspended_from == USB_STATE_POWERED) 1375 gr_set_ep0state(dev, GR_EP0_DISCONNECT); 1376 else 1377 gr_set_ep0state(dev, GR_EP0_SETUP); 1378 usb_gadget_set_state(&dev->gadget, dev->suspended_from); 1379 1380 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) && 1381 dev->driver && dev->driver->resume) { 1382 spin_unlock(&dev->lock); 1383 1384 dev->driver->resume(&dev->gadget); 1385 1386 spin_lock(&dev->lock); 1387 } 1388 handled = 1; 1389 } 1390 1391 return handled; 1392 } 1393 1394 /* Non-interrupt context irq handler */ 1395 static irqreturn_t gr_irq_handler(int irq, void *_dev) 1396 { 1397 struct gr_udc *dev = _dev; 1398 struct gr_ep *ep; 1399 int handled = 0; 1400 int i; 1401 unsigned long flags; 1402 1403 spin_lock_irqsave(&dev->lock, flags); 1404 1405 if (!dev->irq_enabled) 1406 goto out; 1407 1408 /* 1409 * Check IN ep interrupts. We check these before the OUT eps because 1410 * some gadgets reuse the request that might already be currently 1411 * outstanding and needs to be completed (mainly setup requests). 1412 */ 1413 for (i = 0; i < dev->nepi; i++) { 1414 ep = &dev->epi[i]; 1415 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue)) 1416 handled = gr_handle_in_ep(ep) || handled; 1417 } 1418 1419 /* Check OUT ep interrupts */ 1420 for (i = 0; i < dev->nepo; i++) { 1421 ep = &dev->epo[i]; 1422 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue)) 1423 handled = gr_handle_out_ep(ep) || handled; 1424 } 1425 1426 /* Check status interrupts */ 1427 handled = gr_handle_state_changes(dev) || handled; 1428 1429 /* 1430 * Check AMBA DMA errors. Only check if we didn't find anything else to 1431 * handle because this shouldn't happen if we did everything right. 1432 */ 1433 if (!handled) { 1434 list_for_each_entry(ep, &dev->ep_list, ep_list) { 1435 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) { 1436 dev_err(dev->dev, 1437 "AMBA Error occurred for %s\n", 1438 ep->ep.name); 1439 handled = 1; 1440 } 1441 } 1442 } 1443 1444 out: 1445 spin_unlock_irqrestore(&dev->lock, flags); 1446 1447 return handled ? IRQ_HANDLED : IRQ_NONE; 1448 } 1449 1450 /* Interrupt context irq handler */ 1451 static irqreturn_t gr_irq(int irq, void *_dev) 1452 { 1453 struct gr_udc *dev = _dev; 1454 1455 if (!dev->irq_enabled) 1456 return IRQ_NONE; 1457 1458 return IRQ_WAKE_THREAD; 1459 } 1460 1461 /* ---------------------------------------------------------------------- */ 1462 /* USB ep ops */ 1463 1464 /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */ 1465 static int gr_ep_enable(struct usb_ep *_ep, 1466 const struct usb_endpoint_descriptor *desc) 1467 { 1468 struct gr_udc *dev; 1469 struct gr_ep *ep; 1470 u8 mode; 1471 u8 nt; 1472 u16 max; 1473 u16 buffer_size = 0; 1474 u32 epctrl; 1475 1476 ep = container_of(_ep, struct gr_ep, ep); 1477 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) 1478 return -EINVAL; 1479 1480 dev = ep->dev; 1481 1482 /* 'ep0' IN and OUT are reserved */ 1483 if (ep == &dev->epo[0] || ep == &dev->epi[0]) 1484 return -EINVAL; 1485 1486 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 1487 return -ESHUTDOWN; 1488 1489 /* Make sure we are clear for enabling */ 1490 epctrl = gr_read32(&ep->regs->epctrl); 1491 if (epctrl & GR_EPCTRL_EV) 1492 return -EBUSY; 1493 1494 /* Check that directions match */ 1495 if (!ep->is_in != !usb_endpoint_dir_in(desc)) 1496 return -EINVAL; 1497 1498 /* Check ep num */ 1499 if ((!ep->is_in && ep->num >= dev->nepo) || 1500 (ep->is_in && ep->num >= dev->nepi)) 1501 return -EINVAL; 1502 1503 if (usb_endpoint_xfer_control(desc)) { 1504 mode = 0; 1505 } else if (usb_endpoint_xfer_isoc(desc)) { 1506 mode = 1; 1507 } else if (usb_endpoint_xfer_bulk(desc)) { 1508 mode = 2; 1509 } else if (usb_endpoint_xfer_int(desc)) { 1510 mode = 3; 1511 } else { 1512 dev_err(dev->dev, "Unknown transfer type for %s\n", 1513 ep->ep.name); 1514 return -EINVAL; 1515 } 1516 1517 /* 1518 * Bits 10-0 set the max payload. 12-11 set the number of 1519 * additional transactions. 1520 */ 1521 max = usb_endpoint_maxp(desc); 1522 nt = usb_endpoint_maxp_mult(desc) - 1; 1523 buffer_size = GR_BUFFER_SIZE(epctrl); 1524 if (nt && (mode == 0 || mode == 2)) { 1525 dev_err(dev->dev, 1526 "%s mode: multiple trans./microframe not valid\n", 1527 (mode == 2 ? "Bulk" : "Control")); 1528 return -EINVAL; 1529 } else if (nt == 0x3) { 1530 dev_err(dev->dev, 1531 "Invalid value 0x3 for additional trans./microframe\n"); 1532 return -EINVAL; 1533 } else if ((nt + 1) * max > buffer_size) { 1534 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n", 1535 buffer_size, (nt + 1), max); 1536 return -EINVAL; 1537 } else if (max == 0) { 1538 dev_err(dev->dev, "Max payload cannot be set to 0\n"); 1539 return -EINVAL; 1540 } else if (max > ep->ep.maxpacket_limit) { 1541 dev_err(dev->dev, "Requested max payload %d > limit %d\n", 1542 max, ep->ep.maxpacket_limit); 1543 return -EINVAL; 1544 } 1545 1546 spin_lock(&ep->dev->lock); 1547 1548 if (!ep->stopped) { 1549 spin_unlock(&ep->dev->lock); 1550 return -EBUSY; 1551 } 1552 1553 ep->stopped = 0; 1554 ep->wedged = 0; 1555 ep->ep.desc = desc; 1556 ep->ep.maxpacket = max; 1557 ep->dma_start = 0; 1558 1559 1560 if (nt) { 1561 /* 1562 * Maximum possible size of all payloads in one microframe 1563 * regardless of direction when using high-bandwidth mode. 1564 */ 1565 ep->bytes_per_buffer = (nt + 1) * max; 1566 } else if (ep->is_in) { 1567 /* 1568 * The biggest multiple of maximum packet size that fits into 1569 * the buffer. The hardware will split up into many packets in 1570 * the IN direction. 1571 */ 1572 ep->bytes_per_buffer = (buffer_size / max) * max; 1573 } else { 1574 /* 1575 * Only single packets will be placed the buffers in the OUT 1576 * direction. 1577 */ 1578 ep->bytes_per_buffer = max; 1579 } 1580 1581 epctrl = (max << GR_EPCTRL_MAXPL_POS) 1582 | (nt << GR_EPCTRL_NT_POS) 1583 | (mode << GR_EPCTRL_TT_POS) 1584 | GR_EPCTRL_EV; 1585 if (ep->is_in) 1586 epctrl |= GR_EPCTRL_PI; 1587 gr_write32(&ep->regs->epctrl, epctrl); 1588 1589 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI); 1590 1591 spin_unlock(&ep->dev->lock); 1592 1593 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n", 1594 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer); 1595 return 0; 1596 } 1597 1598 /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */ 1599 static int gr_ep_disable(struct usb_ep *_ep) 1600 { 1601 struct gr_ep *ep; 1602 struct gr_udc *dev; 1603 unsigned long flags; 1604 1605 ep = container_of(_ep, struct gr_ep, ep); 1606 if (!_ep || !ep->ep.desc) 1607 return -ENODEV; 1608 1609 dev = ep->dev; 1610 1611 /* 'ep0' IN and OUT are reserved */ 1612 if (ep == &dev->epo[0] || ep == &dev->epi[0]) 1613 return -EINVAL; 1614 1615 if (dev->ep0state == GR_EP0_SUSPEND) 1616 return -EBUSY; 1617 1618 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name); 1619 1620 spin_lock_irqsave(&dev->lock, flags); 1621 1622 gr_ep_nuke(ep); 1623 gr_ep_reset(ep); 1624 ep->ep.desc = NULL; 1625 1626 spin_unlock_irqrestore(&dev->lock, flags); 1627 1628 return 0; 1629 } 1630 1631 /* 1632 * Frees a request, but not any DMA buffers associated with it 1633 * (gr_finish_request should already have taken care of that). 1634 */ 1635 static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req) 1636 { 1637 struct gr_request *req; 1638 1639 if (!_ep || !_req) 1640 return; 1641 req = container_of(_req, struct gr_request, req); 1642 1643 /* Leads to memory leak */ 1644 WARN(!list_empty(&req->queue), 1645 "request not dequeued properly before freeing\n"); 1646 1647 kfree(req); 1648 } 1649 1650 /* Queue a request from the gadget */ 1651 static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req, 1652 gfp_t gfp_flags) 1653 { 1654 struct gr_ep *ep; 1655 struct gr_request *req; 1656 struct gr_udc *dev; 1657 int ret; 1658 1659 if (unlikely(!_ep || !_req)) 1660 return -EINVAL; 1661 1662 ep = container_of(_ep, struct gr_ep, ep); 1663 req = container_of(_req, struct gr_request, req); 1664 dev = ep->dev; 1665 1666 spin_lock(&ep->dev->lock); 1667 1668 /* 1669 * The ep0 pointer in the gadget struct is used both for ep0in and 1670 * ep0out. In a data stage in the out direction ep0out needs to be used 1671 * instead of the default ep0in. Completion functions might use 1672 * driver_data, so that needs to be copied as well. 1673 */ 1674 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) { 1675 ep = &dev->epo[0]; 1676 ep->ep.driver_data = dev->epi[0].ep.driver_data; 1677 } 1678 1679 if (ep->is_in) 1680 gr_dbgprint_request("EXTERN", ep, req); 1681 1682 ret = gr_queue(ep, req, GFP_ATOMIC); 1683 1684 spin_unlock(&ep->dev->lock); 1685 1686 return ret; 1687 } 1688 1689 /* Dequeue JUST ONE request */ 1690 static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1691 { 1692 struct gr_request *req; 1693 struct gr_ep *ep; 1694 struct gr_udc *dev; 1695 int ret = 0; 1696 unsigned long flags; 1697 1698 ep = container_of(_ep, struct gr_ep, ep); 1699 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) 1700 return -EINVAL; 1701 dev = ep->dev; 1702 if (!dev->driver) 1703 return -ESHUTDOWN; 1704 1705 /* We can't touch (DMA) registers when suspended */ 1706 if (dev->ep0state == GR_EP0_SUSPEND) 1707 return -EBUSY; 1708 1709 spin_lock_irqsave(&dev->lock, flags); 1710 1711 /* Make sure it's actually queued on this endpoint */ 1712 list_for_each_entry(req, &ep->queue, queue) { 1713 if (&req->req == _req) 1714 break; 1715 } 1716 if (&req->req != _req) { 1717 ret = -EINVAL; 1718 goto out; 1719 } 1720 1721 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) { 1722 /* This request is currently being processed */ 1723 gr_abort_dma(ep); 1724 if (ep->stopped) 1725 gr_finish_request(ep, req, -ECONNRESET); 1726 else 1727 gr_dma_advance(ep, -ECONNRESET); 1728 } else if (!list_empty(&req->queue)) { 1729 /* Not being processed - gr_finish_request dequeues it */ 1730 gr_finish_request(ep, req, -ECONNRESET); 1731 } else { 1732 ret = -EOPNOTSUPP; 1733 } 1734 1735 out: 1736 spin_unlock_irqrestore(&dev->lock, flags); 1737 1738 return ret; 1739 } 1740 1741 /* Helper for gr_set_halt and gr_set_wedge */ 1742 static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) 1743 { 1744 int ret; 1745 struct gr_ep *ep; 1746 1747 if (!_ep) 1748 return -ENODEV; 1749 ep = container_of(_ep, struct gr_ep, ep); 1750 1751 spin_lock(&ep->dev->lock); 1752 1753 /* Halting an IN endpoint should fail if queue is not empty */ 1754 if (halt && ep->is_in && !list_empty(&ep->queue)) { 1755 ret = -EAGAIN; 1756 goto out; 1757 } 1758 1759 ret = gr_ep_halt_wedge(ep, halt, wedge, 0); 1760 1761 out: 1762 spin_unlock(&ep->dev->lock); 1763 1764 return ret; 1765 } 1766 1767 /* Halt endpoint */ 1768 static int gr_set_halt(struct usb_ep *_ep, int halt) 1769 { 1770 return gr_set_halt_wedge(_ep, halt, 0); 1771 } 1772 1773 /* Halt and wedge endpoint */ 1774 static int gr_set_wedge(struct usb_ep *_ep) 1775 { 1776 return gr_set_halt_wedge(_ep, 1, 1); 1777 } 1778 1779 /* 1780 * Return the total number of bytes currently stored in the internal buffers of 1781 * the endpoint. 1782 */ 1783 static int gr_fifo_status(struct usb_ep *_ep) 1784 { 1785 struct gr_ep *ep; 1786 u32 epstat; 1787 u32 bytes = 0; 1788 1789 if (!_ep) 1790 return -ENODEV; 1791 ep = container_of(_ep, struct gr_ep, ep); 1792 1793 epstat = gr_read32(&ep->regs->epstat); 1794 1795 if (epstat & GR_EPSTAT_B0) 1796 bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS; 1797 if (epstat & GR_EPSTAT_B1) 1798 bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS; 1799 1800 return bytes; 1801 } 1802 1803 1804 /* Empty data from internal buffers of an endpoint. */ 1805 static void gr_fifo_flush(struct usb_ep *_ep) 1806 { 1807 struct gr_ep *ep; 1808 u32 epctrl; 1809 1810 if (!_ep) 1811 return; 1812 ep = container_of(_ep, struct gr_ep, ep); 1813 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name); 1814 1815 spin_lock(&ep->dev->lock); 1816 1817 epctrl = gr_read32(&ep->regs->epctrl); 1818 epctrl |= GR_EPCTRL_CB; 1819 gr_write32(&ep->regs->epctrl, epctrl); 1820 1821 spin_unlock(&ep->dev->lock); 1822 } 1823 1824 static const struct usb_ep_ops gr_ep_ops = { 1825 .enable = gr_ep_enable, 1826 .disable = gr_ep_disable, 1827 1828 .alloc_request = gr_alloc_request, 1829 .free_request = gr_free_request, 1830 1831 .queue = gr_queue_ext, 1832 .dequeue = gr_dequeue, 1833 1834 .set_halt = gr_set_halt, 1835 .set_wedge = gr_set_wedge, 1836 .fifo_status = gr_fifo_status, 1837 .fifo_flush = gr_fifo_flush, 1838 }; 1839 1840 /* ---------------------------------------------------------------------- */ 1841 /* USB Gadget ops */ 1842 1843 static int gr_get_frame(struct usb_gadget *_gadget) 1844 { 1845 struct gr_udc *dev; 1846 1847 if (!_gadget) 1848 return -ENODEV; 1849 dev = container_of(_gadget, struct gr_udc, gadget); 1850 return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK; 1851 } 1852 1853 static int gr_wakeup(struct usb_gadget *_gadget) 1854 { 1855 struct gr_udc *dev; 1856 1857 if (!_gadget) 1858 return -ENODEV; 1859 dev = container_of(_gadget, struct gr_udc, gadget); 1860 1861 /* Remote wakeup feature not enabled by host*/ 1862 if (!dev->remote_wakeup) 1863 return -EINVAL; 1864 1865 spin_lock(&dev->lock); 1866 1867 gr_write32(&dev->regs->control, 1868 gr_read32(&dev->regs->control) | GR_CONTROL_RW); 1869 1870 spin_unlock(&dev->lock); 1871 1872 return 0; 1873 } 1874 1875 static int gr_pullup(struct usb_gadget *_gadget, int is_on) 1876 { 1877 struct gr_udc *dev; 1878 u32 control; 1879 1880 if (!_gadget) 1881 return -ENODEV; 1882 dev = container_of(_gadget, struct gr_udc, gadget); 1883 1884 spin_lock(&dev->lock); 1885 1886 control = gr_read32(&dev->regs->control); 1887 if (is_on) 1888 control |= GR_CONTROL_EP; 1889 else 1890 control &= ~GR_CONTROL_EP; 1891 gr_write32(&dev->regs->control, control); 1892 1893 spin_unlock(&dev->lock); 1894 1895 return 0; 1896 } 1897 1898 static int gr_udc_start(struct usb_gadget *gadget, 1899 struct usb_gadget_driver *driver) 1900 { 1901 struct gr_udc *dev = to_gr_udc(gadget); 1902 1903 spin_lock(&dev->lock); 1904 1905 /* Hook up the driver */ 1906 driver->driver.bus = NULL; 1907 dev->driver = driver; 1908 1909 /* Get ready for host detection */ 1910 gr_enable_vbus_detect(dev); 1911 1912 spin_unlock(&dev->lock); 1913 1914 return 0; 1915 } 1916 1917 static int gr_udc_stop(struct usb_gadget *gadget) 1918 { 1919 struct gr_udc *dev = to_gr_udc(gadget); 1920 unsigned long flags; 1921 1922 spin_lock_irqsave(&dev->lock, flags); 1923 1924 dev->driver = NULL; 1925 gr_stop_activity(dev); 1926 1927 spin_unlock_irqrestore(&dev->lock, flags); 1928 1929 return 0; 1930 } 1931 1932 static const struct usb_gadget_ops gr_ops = { 1933 .get_frame = gr_get_frame, 1934 .wakeup = gr_wakeup, 1935 .pullup = gr_pullup, 1936 .udc_start = gr_udc_start, 1937 .udc_stop = gr_udc_stop, 1938 /* Other operations not supported */ 1939 }; 1940 1941 /* ---------------------------------------------------------------------- */ 1942 /* Module probe, removal and of-matching */ 1943 1944 static const char * const onames[] = { 1945 "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out", 1946 "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out", 1947 "ep12out", "ep13out", "ep14out", "ep15out" 1948 }; 1949 1950 static const char * const inames[] = { 1951 "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in", 1952 "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in", 1953 "ep12in", "ep13in", "ep14in", "ep15in" 1954 }; 1955 1956 /* Must be called with dev->lock held */ 1957 static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) 1958 { 1959 struct gr_ep *ep; 1960 struct gr_request *req; 1961 struct usb_request *_req; 1962 void *buf; 1963 1964 if (is_in) { 1965 ep = &dev->epi[num]; 1966 ep->ep.name = inames[num]; 1967 ep->regs = &dev->regs->epi[num]; 1968 } else { 1969 ep = &dev->epo[num]; 1970 ep->ep.name = onames[num]; 1971 ep->regs = &dev->regs->epo[num]; 1972 } 1973 1974 gr_ep_reset(ep); 1975 ep->num = num; 1976 ep->is_in = is_in; 1977 ep->dev = dev; 1978 ep->ep.ops = &gr_ep_ops; 1979 INIT_LIST_HEAD(&ep->queue); 1980 1981 if (num == 0) { 1982 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); 1983 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); 1984 if (!_req || !buf) { 1985 /* possible _req freed by gr_probe via gr_remove */ 1986 return -ENOMEM; 1987 } 1988 1989 req = container_of(_req, struct gr_request, req); 1990 req->req.buf = buf; 1991 req->req.length = MAX_CTRL_PL_SIZE; 1992 1993 if (is_in) 1994 dev->ep0reqi = req; /* Complete gets set as used */ 1995 else 1996 dev->ep0reqo = req; /* Completion treated separately */ 1997 1998 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE); 1999 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE; 2000 2001 ep->ep.caps.type_control = true; 2002 } else { 2003 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit); 2004 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); 2005 2006 ep->ep.caps.type_iso = true; 2007 ep->ep.caps.type_bulk = true; 2008 ep->ep.caps.type_int = true; 2009 } 2010 list_add_tail(&ep->ep_list, &dev->ep_list); 2011 2012 if (is_in) 2013 ep->ep.caps.dir_in = true; 2014 else 2015 ep->ep.caps.dir_out = true; 2016 2017 ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit, 2018 &ep->tailbuf_paddr, GFP_ATOMIC); 2019 if (!ep->tailbuf) 2020 return -ENOMEM; 2021 2022 return 0; 2023 } 2024 2025 /* Must be called with dev->lock held */ 2026 static int gr_udc_init(struct gr_udc *dev) 2027 { 2028 struct device_node *np = dev->dev->of_node; 2029 u32 epctrl_val; 2030 u32 dmactrl_val; 2031 int i; 2032 int ret = 0; 2033 u32 bufsize; 2034 2035 gr_set_address(dev, 0); 2036 2037 INIT_LIST_HEAD(&dev->gadget.ep_list); 2038 dev->gadget.speed = USB_SPEED_UNKNOWN; 2039 dev->gadget.ep0 = &dev->epi[0].ep; 2040 2041 INIT_LIST_HEAD(&dev->ep_list); 2042 gr_set_ep0state(dev, GR_EP0_DISCONNECT); 2043 2044 for (i = 0; i < dev->nepo; i++) { 2045 if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize)) 2046 bufsize = 1024; 2047 ret = gr_ep_init(dev, i, 0, bufsize); 2048 if (ret) 2049 return ret; 2050 } 2051 2052 for (i = 0; i < dev->nepi; i++) { 2053 if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize)) 2054 bufsize = 1024; 2055 ret = gr_ep_init(dev, i, 1, bufsize); 2056 if (ret) 2057 return ret; 2058 } 2059 2060 /* Must be disabled by default */ 2061 dev->remote_wakeup = 0; 2062 2063 /* Enable ep0out and ep0in */ 2064 epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV; 2065 dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI; 2066 gr_write32(&dev->epo[0].regs->epctrl, epctrl_val); 2067 gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI); 2068 gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val); 2069 gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val); 2070 2071 return 0; 2072 } 2073 2074 static void gr_ep_remove(struct gr_udc *dev, int num, int is_in) 2075 { 2076 struct gr_ep *ep; 2077 2078 if (is_in) 2079 ep = &dev->epi[num]; 2080 else 2081 ep = &dev->epo[num]; 2082 2083 if (ep->tailbuf) 2084 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit, 2085 ep->tailbuf, ep->tailbuf_paddr); 2086 } 2087 2088 static int gr_remove(struct platform_device *pdev) 2089 { 2090 struct gr_udc *dev = platform_get_drvdata(pdev); 2091 int i; 2092 2093 if (dev->added) 2094 usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */ 2095 if (dev->driver) 2096 return -EBUSY; 2097 2098 gr_dfs_delete(dev); 2099 dma_pool_destroy(dev->desc_pool); 2100 platform_set_drvdata(pdev, NULL); 2101 2102 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); 2103 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req); 2104 2105 for (i = 0; i < dev->nepo; i++) 2106 gr_ep_remove(dev, i, 0); 2107 for (i = 0; i < dev->nepi; i++) 2108 gr_ep_remove(dev, i, 1); 2109 2110 return 0; 2111 } 2112 static int gr_request_irq(struct gr_udc *dev, int irq) 2113 { 2114 return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler, 2115 IRQF_SHARED, driver_name, dev); 2116 } 2117 2118 static int gr_probe(struct platform_device *pdev) 2119 { 2120 struct gr_udc *dev; 2121 struct gr_regs __iomem *regs; 2122 int retval; 2123 u32 status; 2124 2125 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 2126 if (!dev) 2127 return -ENOMEM; 2128 dev->dev = &pdev->dev; 2129 2130 regs = devm_platform_ioremap_resource(pdev, 0); 2131 if (IS_ERR(regs)) 2132 return PTR_ERR(regs); 2133 2134 dev->irq = platform_get_irq(pdev, 0); 2135 if (dev->irq <= 0) 2136 return -ENODEV; 2137 2138 /* Some core configurations has separate irqs for IN and OUT events */ 2139 dev->irqi = platform_get_irq(pdev, 1); 2140 if (dev->irqi > 0) { 2141 dev->irqo = platform_get_irq(pdev, 2); 2142 if (dev->irqo <= 0) 2143 return -ENODEV; 2144 } else { 2145 dev->irqi = 0; 2146 } 2147 2148 dev->gadget.name = driver_name; 2149 dev->gadget.max_speed = USB_SPEED_HIGH; 2150 dev->gadget.ops = &gr_ops; 2151 2152 spin_lock_init(&dev->lock); 2153 dev->regs = regs; 2154 2155 platform_set_drvdata(pdev, dev); 2156 2157 /* Determine number of endpoints and data interface mode */ 2158 status = gr_read32(&dev->regs->status); 2159 dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1; 2160 dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1; 2161 2162 if (!(status & GR_STATUS_DM)) { 2163 dev_err(dev->dev, "Slave mode cores are not supported\n"); 2164 return -ENODEV; 2165 } 2166 2167 /* --- Effects of the following calls might need explicit cleanup --- */ 2168 2169 /* Create DMA pool for descriptors */ 2170 dev->desc_pool = dma_pool_create("desc_pool", dev->dev, 2171 sizeof(struct gr_dma_desc), 4, 0); 2172 if (!dev->desc_pool) { 2173 dev_err(dev->dev, "Could not allocate DMA pool"); 2174 return -ENOMEM; 2175 } 2176 2177 /* Inside lock so that no gadget can use this udc until probe is done */ 2178 retval = usb_add_gadget_udc(dev->dev, &dev->gadget); 2179 if (retval) { 2180 dev_err(dev->dev, "Could not add gadget udc"); 2181 goto out; 2182 } 2183 dev->added = 1; 2184 2185 spin_lock(&dev->lock); 2186 2187 retval = gr_udc_init(dev); 2188 if (retval) { 2189 spin_unlock(&dev->lock); 2190 goto out; 2191 } 2192 2193 /* Clear all interrupt enables that might be left on since last boot */ 2194 gr_disable_interrupts_and_pullup(dev); 2195 2196 spin_unlock(&dev->lock); 2197 2198 gr_dfs_create(dev); 2199 2200 retval = gr_request_irq(dev, dev->irq); 2201 if (retval) { 2202 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq); 2203 goto out; 2204 } 2205 2206 if (dev->irqi) { 2207 retval = gr_request_irq(dev, dev->irqi); 2208 if (retval) { 2209 dev_err(dev->dev, "Failed to request irqi %d\n", 2210 dev->irqi); 2211 goto out; 2212 } 2213 retval = gr_request_irq(dev, dev->irqo); 2214 if (retval) { 2215 dev_err(dev->dev, "Failed to request irqo %d\n", 2216 dev->irqo); 2217 goto out; 2218 } 2219 } 2220 2221 if (dev->irqi) 2222 dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs, 2223 dev->irq, dev->irqi, dev->irqo); 2224 else 2225 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq); 2226 2227 out: 2228 if (retval) 2229 gr_remove(pdev); 2230 2231 return retval; 2232 } 2233 2234 static const struct of_device_id gr_match[] = { 2235 {.name = "GAISLER_USBDC"}, 2236 {.name = "01_021"}, 2237 {}, 2238 }; 2239 MODULE_DEVICE_TABLE(of, gr_match); 2240 2241 static struct platform_driver gr_driver = { 2242 .driver = { 2243 .name = DRIVER_NAME, 2244 .of_match_table = gr_match, 2245 }, 2246 .probe = gr_probe, 2247 .remove = gr_remove, 2248 }; 2249 module_platform_driver(gr_driver); 2250 2251 MODULE_AUTHOR("Aeroflex Gaisler AB."); 2252 MODULE_DESCRIPTION(DRIVER_DESC); 2253 MODULE_LICENSE("GPL"); 2254