1 /* 2 * udc.c - ChipIdea UDC driver 3 * 4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved. 5 * 6 * Author: David Lopo 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/dmapool.h> 16 #include <linux/err.h> 17 #include <linux/irqreturn.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <linux/usb/otg.h> 24 #include <linux/usb/chipidea.h> 25 26 #include "ci.h" 27 #include "udc.h" 28 #include "bits.h" 29 #include "debug.h" 30 31 /* control endpoint description */ 32 static const struct usb_endpoint_descriptor 33 ctrl_endpt_out_desc = { 34 .bLength = USB_DT_ENDPOINT_SIZE, 35 .bDescriptorType = USB_DT_ENDPOINT, 36 37 .bEndpointAddress = USB_DIR_OUT, 38 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 39 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), 40 }; 41 42 static const struct usb_endpoint_descriptor 43 ctrl_endpt_in_desc = { 44 .bLength = USB_DT_ENDPOINT_SIZE, 45 .bDescriptorType = USB_DT_ENDPOINT, 46 47 .bEndpointAddress = USB_DIR_IN, 48 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 49 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), 50 }; 51 52 /** 53 * hw_ep_bit: calculates the bit number 54 * @num: endpoint number 55 * @dir: endpoint direction 56 * 57 * This function returns bit number 58 */ 59 static inline int hw_ep_bit(int num, int dir) 60 { 61 return num + (dir ? 16 : 0); 62 } 63 64 static inline int ep_to_bit(struct ci13xxx *ci, int n) 65 { 66 int fill = 16 - ci->hw_ep_max / 2; 67 68 if (n >= ci->hw_ep_max / 2) 69 n += fill; 70 71 return n; 72 } 73 74 /** 75 * hw_device_state: enables/disables interrupts (execute without interruption) 76 * @dma: 0 => disable, !0 => enable and set dma engine 77 * 78 * This function returns an error code 79 */ 80 static int hw_device_state(struct ci13xxx *ci, u32 dma) 81 { 82 if (dma) { 83 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma); 84 /* interrupt, error, port change, reset, sleep/suspend */ 85 hw_write(ci, OP_USBINTR, ~0, 86 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI); 87 } else { 88 hw_write(ci, OP_USBINTR, ~0, 0); 89 } 90 return 0; 91 } 92 93 /** 94 * hw_ep_flush: flush endpoint fifo (execute without interruption) 95 * @num: endpoint number 96 * @dir: endpoint direction 97 * 98 * This function returns an error code 99 */ 100 static int hw_ep_flush(struct ci13xxx *ci, int num, int dir) 101 { 102 int n = hw_ep_bit(num, dir); 103 104 do { 105 /* flush any pending transfer */ 106 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n)); 107 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) 108 cpu_relax(); 109 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); 110 111 return 0; 112 } 113 114 /** 115 * hw_ep_disable: disables endpoint (execute without interruption) 116 * @num: endpoint number 117 * @dir: endpoint direction 118 * 119 * This function returns an error code 120 */ 121 static int hw_ep_disable(struct ci13xxx *ci, int num, int dir) 122 { 123 hw_ep_flush(ci, num, dir); 124 hw_write(ci, OP_ENDPTCTRL + num, 125 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0); 126 return 0; 127 } 128 129 /** 130 * hw_ep_enable: enables endpoint (execute without interruption) 131 * @num: endpoint number 132 * @dir: endpoint direction 133 * @type: endpoint type 134 * 135 * This function returns an error code 136 */ 137 static int hw_ep_enable(struct ci13xxx *ci, int num, int dir, int type) 138 { 139 u32 mask, data; 140 141 if (dir) { 142 mask = ENDPTCTRL_TXT; /* type */ 143 data = type << __ffs(mask); 144 145 mask |= ENDPTCTRL_TXS; /* unstall */ 146 mask |= ENDPTCTRL_TXR; /* reset data toggle */ 147 data |= ENDPTCTRL_TXR; 148 mask |= ENDPTCTRL_TXE; /* enable */ 149 data |= ENDPTCTRL_TXE; 150 } else { 151 mask = ENDPTCTRL_RXT; /* type */ 152 data = type << __ffs(mask); 153 154 mask |= ENDPTCTRL_RXS; /* unstall */ 155 mask |= ENDPTCTRL_RXR; /* reset data toggle */ 156 data |= ENDPTCTRL_RXR; 157 mask |= ENDPTCTRL_RXE; /* enable */ 158 data |= ENDPTCTRL_RXE; 159 } 160 hw_write(ci, OP_ENDPTCTRL + num, mask, data); 161 return 0; 162 } 163 164 /** 165 * hw_ep_get_halt: return endpoint halt status 166 * @num: endpoint number 167 * @dir: endpoint direction 168 * 169 * This function returns 1 if endpoint halted 170 */ 171 static int hw_ep_get_halt(struct ci13xxx *ci, int num, int dir) 172 { 173 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; 174 175 return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0; 176 } 177 178 /** 179 * hw_test_and_clear_setup_status: test & clear setup status (execute without 180 * interruption) 181 * @n: endpoint number 182 * 183 * This function returns setup status 184 */ 185 static int hw_test_and_clear_setup_status(struct ci13xxx *ci, int n) 186 { 187 n = ep_to_bit(ci, n); 188 return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n)); 189 } 190 191 /** 192 * hw_ep_prime: primes endpoint (execute without interruption) 193 * @num: endpoint number 194 * @dir: endpoint direction 195 * @is_ctrl: true if control endpoint 196 * 197 * This function returns an error code 198 */ 199 static int hw_ep_prime(struct ci13xxx *ci, int num, int dir, int is_ctrl) 200 { 201 int n = hw_ep_bit(num, dir); 202 203 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) 204 return -EAGAIN; 205 206 hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n)); 207 208 while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 209 cpu_relax(); 210 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) 211 return -EAGAIN; 212 213 /* status shoult be tested according with manual but it doesn't work */ 214 return 0; 215 } 216 217 /** 218 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute 219 * without interruption) 220 * @num: endpoint number 221 * @dir: endpoint direction 222 * @value: true => stall, false => unstall 223 * 224 * This function returns an error code 225 */ 226 static int hw_ep_set_halt(struct ci13xxx *ci, int num, int dir, int value) 227 { 228 if (value != 0 && value != 1) 229 return -EINVAL; 230 231 do { 232 enum ci13xxx_regs reg = OP_ENDPTCTRL + num; 233 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; 234 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR; 235 236 /* data toggle - reserved for EP0 but it's in ESS */ 237 hw_write(ci, reg, mask_xs|mask_xr, 238 value ? mask_xs : mask_xr); 239 } while (value != hw_ep_get_halt(ci, num, dir)); 240 241 return 0; 242 } 243 244 /** 245 * hw_is_port_high_speed: test if port is high speed 246 * 247 * This function returns true if high speed port 248 */ 249 static int hw_port_is_high_speed(struct ci13xxx *ci) 250 { 251 return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) : 252 hw_read(ci, OP_PORTSC, PORTSC_HSP); 253 } 254 255 /** 256 * hw_read_intr_enable: returns interrupt enable register 257 * 258 * This function returns register data 259 */ 260 static u32 hw_read_intr_enable(struct ci13xxx *ci) 261 { 262 return hw_read(ci, OP_USBINTR, ~0); 263 } 264 265 /** 266 * hw_read_intr_status: returns interrupt status register 267 * 268 * This function returns register data 269 */ 270 static u32 hw_read_intr_status(struct ci13xxx *ci) 271 { 272 return hw_read(ci, OP_USBSTS, ~0); 273 } 274 275 /** 276 * hw_test_and_clear_complete: test & clear complete status (execute without 277 * interruption) 278 * @n: endpoint number 279 * 280 * This function returns complete status 281 */ 282 static int hw_test_and_clear_complete(struct ci13xxx *ci, int n) 283 { 284 n = ep_to_bit(ci, n); 285 return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n)); 286 } 287 288 /** 289 * hw_test_and_clear_intr_active: test & clear active interrupts (execute 290 * without interruption) 291 * 292 * This function returns active interrutps 293 */ 294 static u32 hw_test_and_clear_intr_active(struct ci13xxx *ci) 295 { 296 u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci); 297 298 hw_write(ci, OP_USBSTS, ~0, reg); 299 return reg; 300 } 301 302 /** 303 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without 304 * interruption) 305 * 306 * This function returns guard value 307 */ 308 static int hw_test_and_clear_setup_guard(struct ci13xxx *ci) 309 { 310 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0); 311 } 312 313 /** 314 * hw_test_and_set_setup_guard: test & set setup guard (execute without 315 * interruption) 316 * 317 * This function returns guard value 318 */ 319 static int hw_test_and_set_setup_guard(struct ci13xxx *ci) 320 { 321 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW); 322 } 323 324 /** 325 * hw_usb_set_address: configures USB address (execute without interruption) 326 * @value: new USB address 327 * 328 * This function explicitly sets the address, without the "USBADRA" (advance) 329 * feature, which is not supported by older versions of the controller. 330 */ 331 static void hw_usb_set_address(struct ci13xxx *ci, u8 value) 332 { 333 hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR, 334 value << __ffs(DEVICEADDR_USBADR)); 335 } 336 337 /** 338 * hw_usb_reset: restart device after a bus reset (execute without 339 * interruption) 340 * 341 * This function returns an error code 342 */ 343 static int hw_usb_reset(struct ci13xxx *ci) 344 { 345 hw_usb_set_address(ci, 0); 346 347 /* ESS flushes only at end?!? */ 348 hw_write(ci, OP_ENDPTFLUSH, ~0, ~0); 349 350 /* clear setup token semaphores */ 351 hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0); 352 353 /* clear complete status */ 354 hw_write(ci, OP_ENDPTCOMPLETE, 0, 0); 355 356 /* wait until all bits cleared */ 357 while (hw_read(ci, OP_ENDPTPRIME, ~0)) 358 udelay(10); /* not RTOS friendly */ 359 360 /* reset all endpoints ? */ 361 362 /* reset internal status and wait for further instructions 363 no need to verify the port reset status (ESS does it) */ 364 365 return 0; 366 } 367 368 /****************************************************************************** 369 * UTIL block 370 *****************************************************************************/ 371 /** 372 * _usb_addr: calculates endpoint address from direction & number 373 * @ep: endpoint 374 */ 375 static inline u8 _usb_addr(struct ci13xxx_ep *ep) 376 { 377 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num; 378 } 379 380 /** 381 * _hardware_queue: configures a request at hardware level 382 * @gadget: gadget 383 * @mEp: endpoint 384 * 385 * This function returns an error code 386 */ 387 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) 388 { 389 struct ci13xxx *ci = mEp->ci; 390 unsigned i; 391 int ret = 0; 392 unsigned length = mReq->req.length; 393 394 /* don't queue twice */ 395 if (mReq->req.status == -EALREADY) 396 return -EALREADY; 397 398 mReq->req.status = -EALREADY; 399 400 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) { 401 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, 402 &mReq->zdma); 403 if (mReq->zptr == NULL) 404 return -ENOMEM; 405 406 memset(mReq->zptr, 0, sizeof(*mReq->zptr)); 407 mReq->zptr->next = cpu_to_le32(TD_TERMINATE); 408 mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE); 409 if (!mReq->req.no_interrupt) 410 mReq->zptr->token |= cpu_to_le32(TD_IOC); 411 } 412 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); 413 if (ret) 414 return ret; 415 416 /* 417 * TD configuration 418 * TODO - handle requests which spawns into several TDs 419 */ 420 memset(mReq->ptr, 0, sizeof(*mReq->ptr)); 421 mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES)); 422 mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES); 423 mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE); 424 if (mReq->zptr) { 425 mReq->ptr->next = cpu_to_le32(mReq->zdma); 426 } else { 427 mReq->ptr->next = cpu_to_le32(TD_TERMINATE); 428 if (!mReq->req.no_interrupt) 429 mReq->ptr->token |= cpu_to_le32(TD_IOC); 430 } 431 mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma); 432 for (i = 1; i < TD_PAGE_COUNT; i++) { 433 u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE; 434 page &= ~TD_RESERVED_MASK; 435 mReq->ptr->page[i] = cpu_to_le32(page); 436 } 437 438 wmb(); 439 440 if (!list_empty(&mEp->qh.queue)) { 441 struct ci13xxx_req *mReqPrev; 442 int n = hw_ep_bit(mEp->num, mEp->dir); 443 int tmp_stat; 444 u32 next = mReq->dma & TD_ADDR_MASK; 445 446 mReqPrev = list_entry(mEp->qh.queue.prev, 447 struct ci13xxx_req, queue); 448 if (mReqPrev->zptr) 449 mReqPrev->zptr->next = cpu_to_le32(next); 450 else 451 mReqPrev->ptr->next = cpu_to_le32(next); 452 wmb(); 453 if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 454 goto done; 455 do { 456 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); 457 tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n)); 458 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW)); 459 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0); 460 if (tmp_stat) 461 goto done; 462 } 463 464 /* QH configuration */ 465 mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */ 466 mEp->qh.ptr->td.token &= 467 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE)); 468 469 wmb(); /* synchronize before ep prime */ 470 471 ret = hw_ep_prime(ci, mEp->num, mEp->dir, 472 mEp->type == USB_ENDPOINT_XFER_CONTROL); 473 done: 474 return ret; 475 } 476 477 /** 478 * _hardware_dequeue: handles a request at hardware level 479 * @gadget: gadget 480 * @mEp: endpoint 481 * 482 * This function returns an error code 483 */ 484 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) 485 { 486 u32 tmptoken = le32_to_cpu(mReq->ptr->token); 487 488 if (mReq->req.status != -EALREADY) 489 return -EINVAL; 490 491 if ((TD_STATUS_ACTIVE & tmptoken) != 0) 492 return -EBUSY; 493 494 if (mReq->zptr) { 495 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0) 496 return -EBUSY; 497 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); 498 mReq->zptr = NULL; 499 } 500 501 mReq->req.status = 0; 502 503 usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir); 504 505 mReq->req.status = tmptoken & TD_STATUS; 506 if ((TD_STATUS_HALTED & mReq->req.status) != 0) 507 mReq->req.status = -1; 508 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) 509 mReq->req.status = -1; 510 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) 511 mReq->req.status = -1; 512 513 mReq->req.actual = tmptoken & TD_TOTAL_BYTES; 514 mReq->req.actual >>= __ffs(TD_TOTAL_BYTES); 515 mReq->req.actual = mReq->req.length - mReq->req.actual; 516 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual; 517 518 return mReq->req.actual; 519 } 520 521 /** 522 * _ep_nuke: dequeues all endpoint requests 523 * @mEp: endpoint 524 * 525 * This function returns an error code 526 * Caller must hold lock 527 */ 528 static int _ep_nuke(struct ci13xxx_ep *mEp) 529 __releases(mEp->lock) 530 __acquires(mEp->lock) 531 { 532 if (mEp == NULL) 533 return -EINVAL; 534 535 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 536 537 while (!list_empty(&mEp->qh.queue)) { 538 539 /* pop oldest request */ 540 struct ci13xxx_req *mReq = \ 541 list_entry(mEp->qh.queue.next, 542 struct ci13xxx_req, queue); 543 544 if (mReq->zptr) { 545 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); 546 mReq->zptr = NULL; 547 } 548 549 list_del_init(&mReq->queue); 550 mReq->req.status = -ESHUTDOWN; 551 552 if (mReq->req.complete != NULL) { 553 spin_unlock(mEp->lock); 554 mReq->req.complete(&mEp->ep, &mReq->req); 555 spin_lock(mEp->lock); 556 } 557 } 558 return 0; 559 } 560 561 /** 562 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts 563 * @gadget: gadget 564 * 565 * This function returns an error code 566 */ 567 static int _gadget_stop_activity(struct usb_gadget *gadget) 568 { 569 struct usb_ep *ep; 570 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget); 571 unsigned long flags; 572 573 spin_lock_irqsave(&ci->lock, flags); 574 ci->gadget.speed = USB_SPEED_UNKNOWN; 575 ci->remote_wakeup = 0; 576 ci->suspended = 0; 577 spin_unlock_irqrestore(&ci->lock, flags); 578 579 /* flush all endpoints */ 580 gadget_for_each_ep(ep, gadget) { 581 usb_ep_fifo_flush(ep); 582 } 583 usb_ep_fifo_flush(&ci->ep0out->ep); 584 usb_ep_fifo_flush(&ci->ep0in->ep); 585 586 if (ci->driver) 587 ci->driver->disconnect(gadget); 588 589 /* make sure to disable all endpoints */ 590 gadget_for_each_ep(ep, gadget) { 591 usb_ep_disable(ep); 592 } 593 594 if (ci->status != NULL) { 595 usb_ep_free_request(&ci->ep0in->ep, ci->status); 596 ci->status = NULL; 597 } 598 599 return 0; 600 } 601 602 /****************************************************************************** 603 * ISR block 604 *****************************************************************************/ 605 /** 606 * isr_reset_handler: USB reset interrupt handler 607 * @ci: UDC device 608 * 609 * This function resets USB engine after a bus reset occurred 610 */ 611 static void isr_reset_handler(struct ci13xxx *ci) 612 __releases(ci->lock) 613 __acquires(ci->lock) 614 { 615 int retval; 616 617 spin_unlock(&ci->lock); 618 retval = _gadget_stop_activity(&ci->gadget); 619 if (retval) 620 goto done; 621 622 retval = hw_usb_reset(ci); 623 if (retval) 624 goto done; 625 626 ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC); 627 if (ci->status == NULL) 628 retval = -ENOMEM; 629 630 done: 631 spin_lock(&ci->lock); 632 633 if (retval) 634 dev_err(ci->dev, "error: %i\n", retval); 635 } 636 637 /** 638 * isr_get_status_complete: get_status request complete function 639 * @ep: endpoint 640 * @req: request handled 641 * 642 * Caller must release lock 643 */ 644 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) 645 { 646 if (ep == NULL || req == NULL) 647 return; 648 649 kfree(req->buf); 650 usb_ep_free_request(ep, req); 651 } 652 653 /** 654 * _ep_queue: queues (submits) an I/O request to an endpoint 655 * 656 * Caller must hold lock 657 */ 658 static int _ep_queue(struct usb_ep *ep, struct usb_request *req, 659 gfp_t __maybe_unused gfp_flags) 660 { 661 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 662 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 663 struct ci13xxx *ci = mEp->ci; 664 int retval = 0; 665 666 if (ep == NULL || req == NULL || mEp->ep.desc == NULL) 667 return -EINVAL; 668 669 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 670 if (req->length) 671 mEp = (ci->ep0_dir == RX) ? 672 ci->ep0out : ci->ep0in; 673 if (!list_empty(&mEp->qh.queue)) { 674 _ep_nuke(mEp); 675 retval = -EOVERFLOW; 676 dev_warn(mEp->ci->dev, "endpoint ctrl %X nuked\n", 677 _usb_addr(mEp)); 678 } 679 } 680 681 /* first nuke then test link, e.g. previous status has not sent */ 682 if (!list_empty(&mReq->queue)) { 683 dev_err(mEp->ci->dev, "request already in queue\n"); 684 return -EBUSY; 685 } 686 687 if (req->length > (TD_PAGE_COUNT - 1) * CI13XXX_PAGE_SIZE) { 688 dev_err(mEp->ci->dev, "request bigger than one td\n"); 689 return -EMSGSIZE; 690 } 691 692 /* push request */ 693 mReq->req.status = -EINPROGRESS; 694 mReq->req.actual = 0; 695 696 retval = _hardware_enqueue(mEp, mReq); 697 698 if (retval == -EALREADY) 699 retval = 0; 700 if (!retval) 701 list_add_tail(&mReq->queue, &mEp->qh.queue); 702 703 return retval; 704 } 705 706 /** 707 * isr_get_status_response: get_status request response 708 * @ci: ci struct 709 * @setup: setup request packet 710 * 711 * This function returns an error code 712 */ 713 static int isr_get_status_response(struct ci13xxx *ci, 714 struct usb_ctrlrequest *setup) 715 __releases(mEp->lock) 716 __acquires(mEp->lock) 717 { 718 struct ci13xxx_ep *mEp = ci->ep0in; 719 struct usb_request *req = NULL; 720 gfp_t gfp_flags = GFP_ATOMIC; 721 int dir, num, retval; 722 723 if (mEp == NULL || setup == NULL) 724 return -EINVAL; 725 726 spin_unlock(mEp->lock); 727 req = usb_ep_alloc_request(&mEp->ep, gfp_flags); 728 spin_lock(mEp->lock); 729 if (req == NULL) 730 return -ENOMEM; 731 732 req->complete = isr_get_status_complete; 733 req->length = 2; 734 req->buf = kzalloc(req->length, gfp_flags); 735 if (req->buf == NULL) { 736 retval = -ENOMEM; 737 goto err_free_req; 738 } 739 740 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { 741 /* Assume that device is bus powered for now. */ 742 *(u16 *)req->buf = ci->remote_wakeup << 1; 743 retval = 0; 744 } else if ((setup->bRequestType & USB_RECIP_MASK) \ 745 == USB_RECIP_ENDPOINT) { 746 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ? 747 TX : RX; 748 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK; 749 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir); 750 } 751 /* else do nothing; reserved for future use */ 752 753 retval = _ep_queue(&mEp->ep, req, gfp_flags); 754 if (retval) 755 goto err_free_buf; 756 757 return 0; 758 759 err_free_buf: 760 kfree(req->buf); 761 err_free_req: 762 spin_unlock(mEp->lock); 763 usb_ep_free_request(&mEp->ep, req); 764 spin_lock(mEp->lock); 765 return retval; 766 } 767 768 /** 769 * isr_setup_status_complete: setup_status request complete function 770 * @ep: endpoint 771 * @req: request handled 772 * 773 * Caller must release lock. Put the port in test mode if test mode 774 * feature is selected. 775 */ 776 static void 777 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) 778 { 779 struct ci13xxx *ci = req->context; 780 unsigned long flags; 781 782 if (ci->setaddr) { 783 hw_usb_set_address(ci, ci->address); 784 ci->setaddr = false; 785 } 786 787 spin_lock_irqsave(&ci->lock, flags); 788 if (ci->test_mode) 789 hw_port_test_set(ci, ci->test_mode); 790 spin_unlock_irqrestore(&ci->lock, flags); 791 } 792 793 /** 794 * isr_setup_status_phase: queues the status phase of a setup transation 795 * @ci: ci struct 796 * 797 * This function returns an error code 798 */ 799 static int isr_setup_status_phase(struct ci13xxx *ci) 800 { 801 int retval; 802 struct ci13xxx_ep *mEp; 803 804 mEp = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; 805 ci->status->context = ci; 806 ci->status->complete = isr_setup_status_complete; 807 808 retval = _ep_queue(&mEp->ep, ci->status, GFP_ATOMIC); 809 810 return retval; 811 } 812 813 /** 814 * isr_tr_complete_low: transaction complete low level handler 815 * @mEp: endpoint 816 * 817 * This function returns an error code 818 * Caller must hold lock 819 */ 820 static int isr_tr_complete_low(struct ci13xxx_ep *mEp) 821 __releases(mEp->lock) 822 __acquires(mEp->lock) 823 { 824 struct ci13xxx_req *mReq, *mReqTemp; 825 struct ci13xxx_ep *mEpTemp = mEp; 826 int retval = 0; 827 828 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, 829 queue) { 830 retval = _hardware_dequeue(mEp, mReq); 831 if (retval < 0) 832 break; 833 list_del_init(&mReq->queue); 834 if (mReq->req.complete != NULL) { 835 spin_unlock(mEp->lock); 836 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && 837 mReq->req.length) 838 mEpTemp = mEp->ci->ep0in; 839 mReq->req.complete(&mEpTemp->ep, &mReq->req); 840 spin_lock(mEp->lock); 841 } 842 } 843 844 if (retval == -EBUSY) 845 retval = 0; 846 847 return retval; 848 } 849 850 /** 851 * isr_tr_complete_handler: transaction complete interrupt handler 852 * @ci: UDC descriptor 853 * 854 * This function handles traffic events 855 */ 856 static void isr_tr_complete_handler(struct ci13xxx *ci) 857 __releases(ci->lock) 858 __acquires(ci->lock) 859 { 860 unsigned i; 861 u8 tmode = 0; 862 863 for (i = 0; i < ci->hw_ep_max; i++) { 864 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; 865 int type, num, dir, err = -EINVAL; 866 struct usb_ctrlrequest req; 867 868 if (mEp->ep.desc == NULL) 869 continue; /* not configured */ 870 871 if (hw_test_and_clear_complete(ci, i)) { 872 err = isr_tr_complete_low(mEp); 873 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 874 if (err > 0) /* needs status phase */ 875 err = isr_setup_status_phase(ci); 876 if (err < 0) { 877 spin_unlock(&ci->lock); 878 if (usb_ep_set_halt(&mEp->ep)) 879 dev_err(ci->dev, 880 "error: ep_set_halt\n"); 881 spin_lock(&ci->lock); 882 } 883 } 884 } 885 886 if (mEp->type != USB_ENDPOINT_XFER_CONTROL || 887 !hw_test_and_clear_setup_status(ci, i)) 888 continue; 889 890 if (i != 0) { 891 dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i); 892 continue; 893 } 894 895 /* 896 * Flush data and handshake transactions of previous 897 * setup packet. 898 */ 899 _ep_nuke(ci->ep0out); 900 _ep_nuke(ci->ep0in); 901 902 /* read_setup_packet */ 903 do { 904 hw_test_and_set_setup_guard(ci); 905 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req)); 906 } while (!hw_test_and_clear_setup_guard(ci)); 907 908 type = req.bRequestType; 909 910 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX; 911 912 switch (req.bRequest) { 913 case USB_REQ_CLEAR_FEATURE: 914 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && 915 le16_to_cpu(req.wValue) == 916 USB_ENDPOINT_HALT) { 917 if (req.wLength != 0) 918 break; 919 num = le16_to_cpu(req.wIndex); 920 dir = num & USB_ENDPOINT_DIR_MASK; 921 num &= USB_ENDPOINT_NUMBER_MASK; 922 if (dir) /* TX */ 923 num += ci->hw_ep_max/2; 924 if (!ci->ci13xxx_ep[num].wedge) { 925 spin_unlock(&ci->lock); 926 err = usb_ep_clear_halt( 927 &ci->ci13xxx_ep[num].ep); 928 spin_lock(&ci->lock); 929 if (err) 930 break; 931 } 932 err = isr_setup_status_phase(ci); 933 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) && 934 le16_to_cpu(req.wValue) == 935 USB_DEVICE_REMOTE_WAKEUP) { 936 if (req.wLength != 0) 937 break; 938 ci->remote_wakeup = 0; 939 err = isr_setup_status_phase(ci); 940 } else { 941 goto delegate; 942 } 943 break; 944 case USB_REQ_GET_STATUS: 945 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && 946 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) && 947 type != (USB_DIR_IN|USB_RECIP_INTERFACE)) 948 goto delegate; 949 if (le16_to_cpu(req.wLength) != 2 || 950 le16_to_cpu(req.wValue) != 0) 951 break; 952 err = isr_get_status_response(ci, &req); 953 break; 954 case USB_REQ_SET_ADDRESS: 955 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE)) 956 goto delegate; 957 if (le16_to_cpu(req.wLength) != 0 || 958 le16_to_cpu(req.wIndex) != 0) 959 break; 960 ci->address = (u8)le16_to_cpu(req.wValue); 961 ci->setaddr = true; 962 err = isr_setup_status_phase(ci); 963 break; 964 case USB_REQ_SET_FEATURE: 965 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && 966 le16_to_cpu(req.wValue) == 967 USB_ENDPOINT_HALT) { 968 if (req.wLength != 0) 969 break; 970 num = le16_to_cpu(req.wIndex); 971 dir = num & USB_ENDPOINT_DIR_MASK; 972 num &= USB_ENDPOINT_NUMBER_MASK; 973 if (dir) /* TX */ 974 num += ci->hw_ep_max/2; 975 976 spin_unlock(&ci->lock); 977 err = usb_ep_set_halt(&ci->ci13xxx_ep[num].ep); 978 spin_lock(&ci->lock); 979 if (!err) 980 isr_setup_status_phase(ci); 981 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) { 982 if (req.wLength != 0) 983 break; 984 switch (le16_to_cpu(req.wValue)) { 985 case USB_DEVICE_REMOTE_WAKEUP: 986 ci->remote_wakeup = 1; 987 err = isr_setup_status_phase(ci); 988 break; 989 case USB_DEVICE_TEST_MODE: 990 tmode = le16_to_cpu(req.wIndex) >> 8; 991 switch (tmode) { 992 case TEST_J: 993 case TEST_K: 994 case TEST_SE0_NAK: 995 case TEST_PACKET: 996 case TEST_FORCE_EN: 997 ci->test_mode = tmode; 998 err = isr_setup_status_phase( 999 ci); 1000 break; 1001 default: 1002 break; 1003 } 1004 default: 1005 goto delegate; 1006 } 1007 } else { 1008 goto delegate; 1009 } 1010 break; 1011 default: 1012 delegate: 1013 if (req.wLength == 0) /* no data phase */ 1014 ci->ep0_dir = TX; 1015 1016 spin_unlock(&ci->lock); 1017 err = ci->driver->setup(&ci->gadget, &req); 1018 spin_lock(&ci->lock); 1019 break; 1020 } 1021 1022 if (err < 0) { 1023 spin_unlock(&ci->lock); 1024 if (usb_ep_set_halt(&mEp->ep)) 1025 dev_err(ci->dev, "error: ep_set_halt\n"); 1026 spin_lock(&ci->lock); 1027 } 1028 } 1029 } 1030 1031 /****************************************************************************** 1032 * ENDPT block 1033 *****************************************************************************/ 1034 /** 1035 * ep_enable: configure endpoint, making it usable 1036 * 1037 * Check usb_ep_enable() at "usb_gadget.h" for details 1038 */ 1039 static int ep_enable(struct usb_ep *ep, 1040 const struct usb_endpoint_descriptor *desc) 1041 { 1042 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1043 int retval = 0; 1044 unsigned long flags; 1045 u32 cap = 0; 1046 1047 if (ep == NULL || desc == NULL) 1048 return -EINVAL; 1049 1050 spin_lock_irqsave(mEp->lock, flags); 1051 1052 /* only internal SW should enable ctrl endpts */ 1053 1054 mEp->ep.desc = desc; 1055 1056 if (!list_empty(&mEp->qh.queue)) 1057 dev_warn(mEp->ci->dev, "enabling a non-empty endpoint!\n"); 1058 1059 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; 1060 mEp->num = usb_endpoint_num(desc); 1061 mEp->type = usb_endpoint_type(desc); 1062 1063 mEp->ep.maxpacket = usb_endpoint_maxp(desc); 1064 1065 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1066 cap |= QH_IOS; 1067 if (mEp->num) 1068 cap |= QH_ZLT; 1069 cap |= (mEp->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1070 1071 mEp->qh.ptr->cap = cpu_to_le32(cap); 1072 1073 mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */ 1074 1075 /* 1076 * Enable endpoints in the HW other than ep0 as ep0 1077 * is always enabled 1078 */ 1079 if (mEp->num) 1080 retval |= hw_ep_enable(mEp->ci, mEp->num, mEp->dir, mEp->type); 1081 1082 spin_unlock_irqrestore(mEp->lock, flags); 1083 return retval; 1084 } 1085 1086 /** 1087 * ep_disable: endpoint is no longer usable 1088 * 1089 * Check usb_ep_disable() at "usb_gadget.h" for details 1090 */ 1091 static int ep_disable(struct usb_ep *ep) 1092 { 1093 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1094 int direction, retval = 0; 1095 unsigned long flags; 1096 1097 if (ep == NULL) 1098 return -EINVAL; 1099 else if (mEp->ep.desc == NULL) 1100 return -EBUSY; 1101 1102 spin_lock_irqsave(mEp->lock, flags); 1103 1104 /* only internal SW should disable ctrl endpts */ 1105 1106 direction = mEp->dir; 1107 do { 1108 retval |= _ep_nuke(mEp); 1109 retval |= hw_ep_disable(mEp->ci, mEp->num, mEp->dir); 1110 1111 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1112 mEp->dir = (mEp->dir == TX) ? RX : TX; 1113 1114 } while (mEp->dir != direction); 1115 1116 mEp->ep.desc = NULL; 1117 1118 spin_unlock_irqrestore(mEp->lock, flags); 1119 return retval; 1120 } 1121 1122 /** 1123 * ep_alloc_request: allocate a request object to use with this endpoint 1124 * 1125 * Check usb_ep_alloc_request() at "usb_gadget.h" for details 1126 */ 1127 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1128 { 1129 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1130 struct ci13xxx_req *mReq = NULL; 1131 1132 if (ep == NULL) 1133 return NULL; 1134 1135 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 1136 if (mReq != NULL) { 1137 INIT_LIST_HEAD(&mReq->queue); 1138 1139 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, 1140 &mReq->dma); 1141 if (mReq->ptr == NULL) { 1142 kfree(mReq); 1143 mReq = NULL; 1144 } 1145 } 1146 1147 return (mReq == NULL) ? NULL : &mReq->req; 1148 } 1149 1150 /** 1151 * ep_free_request: frees a request object 1152 * 1153 * Check usb_ep_free_request() at "usb_gadget.h" for details 1154 */ 1155 static void ep_free_request(struct usb_ep *ep, struct usb_request *req) 1156 { 1157 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1158 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 1159 unsigned long flags; 1160 1161 if (ep == NULL || req == NULL) { 1162 return; 1163 } else if (!list_empty(&mReq->queue)) { 1164 dev_err(mEp->ci->dev, "freeing queued request\n"); 1165 return; 1166 } 1167 1168 spin_lock_irqsave(mEp->lock, flags); 1169 1170 if (mReq->ptr) 1171 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma); 1172 kfree(mReq); 1173 1174 spin_unlock_irqrestore(mEp->lock, flags); 1175 } 1176 1177 /** 1178 * ep_queue: queues (submits) an I/O request to an endpoint 1179 * 1180 * Check usb_ep_queue()* at usb_gadget.h" for details 1181 */ 1182 static int ep_queue(struct usb_ep *ep, struct usb_request *req, 1183 gfp_t __maybe_unused gfp_flags) 1184 { 1185 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1186 int retval = 0; 1187 unsigned long flags; 1188 1189 if (ep == NULL || req == NULL || mEp->ep.desc == NULL) 1190 return -EINVAL; 1191 1192 spin_lock_irqsave(mEp->lock, flags); 1193 retval = _ep_queue(ep, req, gfp_flags); 1194 spin_unlock_irqrestore(mEp->lock, flags); 1195 return retval; 1196 } 1197 1198 /** 1199 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint 1200 * 1201 * Check usb_ep_dequeue() at "usb_gadget.h" for details 1202 */ 1203 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) 1204 { 1205 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1206 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); 1207 unsigned long flags; 1208 1209 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY || 1210 mEp->ep.desc == NULL || list_empty(&mReq->queue) || 1211 list_empty(&mEp->qh.queue)) 1212 return -EINVAL; 1213 1214 spin_lock_irqsave(mEp->lock, flags); 1215 1216 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 1217 1218 /* pop request */ 1219 list_del_init(&mReq->queue); 1220 1221 usb_gadget_unmap_request(&mEp->ci->gadget, req, mEp->dir); 1222 1223 req->status = -ECONNRESET; 1224 1225 if (mReq->req.complete != NULL) { 1226 spin_unlock(mEp->lock); 1227 mReq->req.complete(&mEp->ep, &mReq->req); 1228 spin_lock(mEp->lock); 1229 } 1230 1231 spin_unlock_irqrestore(mEp->lock, flags); 1232 return 0; 1233 } 1234 1235 /** 1236 * ep_set_halt: sets the endpoint halt feature 1237 * 1238 * Check usb_ep_set_halt() at "usb_gadget.h" for details 1239 */ 1240 static int ep_set_halt(struct usb_ep *ep, int value) 1241 { 1242 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1243 int direction, retval = 0; 1244 unsigned long flags; 1245 1246 if (ep == NULL || mEp->ep.desc == NULL) 1247 return -EINVAL; 1248 1249 spin_lock_irqsave(mEp->lock, flags); 1250 1251 #ifndef STALL_IN 1252 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ 1253 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && 1254 !list_empty(&mEp->qh.queue)) { 1255 spin_unlock_irqrestore(mEp->lock, flags); 1256 return -EAGAIN; 1257 } 1258 #endif 1259 1260 direction = mEp->dir; 1261 do { 1262 retval |= hw_ep_set_halt(mEp->ci, mEp->num, mEp->dir, value); 1263 1264 if (!value) 1265 mEp->wedge = 0; 1266 1267 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1268 mEp->dir = (mEp->dir == TX) ? RX : TX; 1269 1270 } while (mEp->dir != direction); 1271 1272 spin_unlock_irqrestore(mEp->lock, flags); 1273 return retval; 1274 } 1275 1276 /** 1277 * ep_set_wedge: sets the halt feature and ignores clear requests 1278 * 1279 * Check usb_ep_set_wedge() at "usb_gadget.h" for details 1280 */ 1281 static int ep_set_wedge(struct usb_ep *ep) 1282 { 1283 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1284 unsigned long flags; 1285 1286 if (ep == NULL || mEp->ep.desc == NULL) 1287 return -EINVAL; 1288 1289 spin_lock_irqsave(mEp->lock, flags); 1290 mEp->wedge = 1; 1291 spin_unlock_irqrestore(mEp->lock, flags); 1292 1293 return usb_ep_set_halt(ep); 1294 } 1295 1296 /** 1297 * ep_fifo_flush: flushes contents of a fifo 1298 * 1299 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details 1300 */ 1301 static void ep_fifo_flush(struct usb_ep *ep) 1302 { 1303 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1304 unsigned long flags; 1305 1306 if (ep == NULL) { 1307 dev_err(mEp->ci->dev, "%02X: -EINVAL\n", _usb_addr(mEp)); 1308 return; 1309 } 1310 1311 spin_lock_irqsave(mEp->lock, flags); 1312 1313 hw_ep_flush(mEp->ci, mEp->num, mEp->dir); 1314 1315 spin_unlock_irqrestore(mEp->lock, flags); 1316 } 1317 1318 /** 1319 * Endpoint-specific part of the API to the USB controller hardware 1320 * Check "usb_gadget.h" for details 1321 */ 1322 static const struct usb_ep_ops usb_ep_ops = { 1323 .enable = ep_enable, 1324 .disable = ep_disable, 1325 .alloc_request = ep_alloc_request, 1326 .free_request = ep_free_request, 1327 .queue = ep_queue, 1328 .dequeue = ep_dequeue, 1329 .set_halt = ep_set_halt, 1330 .set_wedge = ep_set_wedge, 1331 .fifo_flush = ep_fifo_flush, 1332 }; 1333 1334 /****************************************************************************** 1335 * GADGET block 1336 *****************************************************************************/ 1337 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active) 1338 { 1339 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); 1340 unsigned long flags; 1341 int gadget_ready = 0; 1342 1343 if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS)) 1344 return -EOPNOTSUPP; 1345 1346 spin_lock_irqsave(&ci->lock, flags); 1347 ci->vbus_active = is_active; 1348 if (ci->driver) 1349 gadget_ready = 1; 1350 spin_unlock_irqrestore(&ci->lock, flags); 1351 1352 if (gadget_ready) { 1353 if (is_active) { 1354 pm_runtime_get_sync(&_gadget->dev); 1355 hw_device_reset(ci, USBMODE_CM_DC); 1356 hw_device_state(ci, ci->ep0out->qh.dma); 1357 } else { 1358 hw_device_state(ci, 0); 1359 if (ci->platdata->notify_event) 1360 ci->platdata->notify_event(ci, 1361 CI13XXX_CONTROLLER_STOPPED_EVENT); 1362 _gadget_stop_activity(&ci->gadget); 1363 pm_runtime_put_sync(&_gadget->dev); 1364 } 1365 } 1366 1367 return 0; 1368 } 1369 1370 static int ci13xxx_wakeup(struct usb_gadget *_gadget) 1371 { 1372 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); 1373 unsigned long flags; 1374 int ret = 0; 1375 1376 spin_lock_irqsave(&ci->lock, flags); 1377 if (!ci->remote_wakeup) { 1378 ret = -EOPNOTSUPP; 1379 goto out; 1380 } 1381 if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) { 1382 ret = -EINVAL; 1383 goto out; 1384 } 1385 hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR); 1386 out: 1387 spin_unlock_irqrestore(&ci->lock, flags); 1388 return ret; 1389 } 1390 1391 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1392 { 1393 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); 1394 1395 if (ci->transceiver) 1396 return usb_phy_set_power(ci->transceiver, mA); 1397 return -ENOTSUPP; 1398 } 1399 1400 /* Change Data+ pullup status 1401 * this func is used by usb_gadget_connect/disconnet 1402 */ 1403 static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on) 1404 { 1405 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); 1406 1407 if (is_on) 1408 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS); 1409 else 1410 hw_write(ci, OP_USBCMD, USBCMD_RS, 0); 1411 1412 return 0; 1413 } 1414 1415 static int ci13xxx_start(struct usb_gadget *gadget, 1416 struct usb_gadget_driver *driver); 1417 static int ci13xxx_stop(struct usb_gadget *gadget, 1418 struct usb_gadget_driver *driver); 1419 /** 1420 * Device operations part of the API to the USB controller hardware, 1421 * which don't involve endpoints (or i/o) 1422 * Check "usb_gadget.h" for details 1423 */ 1424 static const struct usb_gadget_ops usb_gadget_ops = { 1425 .vbus_session = ci13xxx_vbus_session, 1426 .wakeup = ci13xxx_wakeup, 1427 .pullup = ci13xxx_pullup, 1428 .vbus_draw = ci13xxx_vbus_draw, 1429 .udc_start = ci13xxx_start, 1430 .udc_stop = ci13xxx_stop, 1431 }; 1432 1433 static int init_eps(struct ci13xxx *ci) 1434 { 1435 int retval = 0, i, j; 1436 1437 for (i = 0; i < ci->hw_ep_max/2; i++) 1438 for (j = RX; j <= TX; j++) { 1439 int k = i + j * ci->hw_ep_max/2; 1440 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[k]; 1441 1442 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i, 1443 (j == TX) ? "in" : "out"); 1444 1445 mEp->ci = ci; 1446 mEp->lock = &ci->lock; 1447 mEp->td_pool = ci->td_pool; 1448 1449 mEp->ep.name = mEp->name; 1450 mEp->ep.ops = &usb_ep_ops; 1451 /* 1452 * for ep0: maxP defined in desc, for other 1453 * eps, maxP is set by epautoconfig() called 1454 * by gadget layer 1455 */ 1456 mEp->ep.maxpacket = (unsigned short)~0; 1457 1458 INIT_LIST_HEAD(&mEp->qh.queue); 1459 mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL, 1460 &mEp->qh.dma); 1461 if (mEp->qh.ptr == NULL) 1462 retval = -ENOMEM; 1463 else 1464 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr)); 1465 1466 /* 1467 * set up shorthands for ep0 out and in endpoints, 1468 * don't add to gadget's ep_list 1469 */ 1470 if (i == 0) { 1471 if (j == RX) 1472 ci->ep0out = mEp; 1473 else 1474 ci->ep0in = mEp; 1475 1476 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; 1477 continue; 1478 } 1479 1480 list_add_tail(&mEp->ep.ep_list, &ci->gadget.ep_list); 1481 } 1482 1483 return retval; 1484 } 1485 1486 static void destroy_eps(struct ci13xxx *ci) 1487 { 1488 int i; 1489 1490 for (i = 0; i < ci->hw_ep_max; i++) { 1491 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; 1492 1493 dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma); 1494 } 1495 } 1496 1497 /** 1498 * ci13xxx_start: register a gadget driver 1499 * @gadget: our gadget 1500 * @driver: the driver being registered 1501 * 1502 * Interrupts are enabled here. 1503 */ 1504 static int ci13xxx_start(struct usb_gadget *gadget, 1505 struct usb_gadget_driver *driver) 1506 { 1507 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget); 1508 unsigned long flags; 1509 int retval = -ENOMEM; 1510 1511 if (driver->disconnect == NULL) 1512 return -EINVAL; 1513 1514 1515 ci->ep0out->ep.desc = &ctrl_endpt_out_desc; 1516 retval = usb_ep_enable(&ci->ep0out->ep); 1517 if (retval) 1518 return retval; 1519 1520 ci->ep0in->ep.desc = &ctrl_endpt_in_desc; 1521 retval = usb_ep_enable(&ci->ep0in->ep); 1522 if (retval) 1523 return retval; 1524 spin_lock_irqsave(&ci->lock, flags); 1525 1526 ci->driver = driver; 1527 pm_runtime_get_sync(&ci->gadget.dev); 1528 if (ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) { 1529 if (ci->vbus_active) { 1530 if (ci->platdata->flags & CI13XXX_REGS_SHARED) 1531 hw_device_reset(ci, USBMODE_CM_DC); 1532 } else { 1533 pm_runtime_put_sync(&ci->gadget.dev); 1534 goto done; 1535 } 1536 } 1537 1538 retval = hw_device_state(ci, ci->ep0out->qh.dma); 1539 if (retval) 1540 pm_runtime_put_sync(&ci->gadget.dev); 1541 1542 done: 1543 spin_unlock_irqrestore(&ci->lock, flags); 1544 return retval; 1545 } 1546 1547 /** 1548 * ci13xxx_stop: unregister a gadget driver 1549 */ 1550 static int ci13xxx_stop(struct usb_gadget *gadget, 1551 struct usb_gadget_driver *driver) 1552 { 1553 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget); 1554 unsigned long flags; 1555 1556 spin_lock_irqsave(&ci->lock, flags); 1557 1558 if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) || 1559 ci->vbus_active) { 1560 hw_device_state(ci, 0); 1561 if (ci->platdata->notify_event) 1562 ci->platdata->notify_event(ci, 1563 CI13XXX_CONTROLLER_STOPPED_EVENT); 1564 ci->driver = NULL; 1565 spin_unlock_irqrestore(&ci->lock, flags); 1566 _gadget_stop_activity(&ci->gadget); 1567 spin_lock_irqsave(&ci->lock, flags); 1568 pm_runtime_put(&ci->gadget.dev); 1569 } 1570 1571 spin_unlock_irqrestore(&ci->lock, flags); 1572 1573 return 0; 1574 } 1575 1576 /****************************************************************************** 1577 * BUS block 1578 *****************************************************************************/ 1579 /** 1580 * udc_irq: ci interrupt handler 1581 * 1582 * This function returns IRQ_HANDLED if the IRQ has been handled 1583 * It locks access to registers 1584 */ 1585 static irqreturn_t udc_irq(struct ci13xxx *ci) 1586 { 1587 irqreturn_t retval; 1588 u32 intr; 1589 1590 if (ci == NULL) 1591 return IRQ_HANDLED; 1592 1593 spin_lock(&ci->lock); 1594 1595 if (ci->platdata->flags & CI13XXX_REGS_SHARED) { 1596 if (hw_read(ci, OP_USBMODE, USBMODE_CM) != 1597 USBMODE_CM_DC) { 1598 spin_unlock(&ci->lock); 1599 return IRQ_NONE; 1600 } 1601 } 1602 intr = hw_test_and_clear_intr_active(ci); 1603 1604 if (intr) { 1605 /* order defines priority - do NOT change it */ 1606 if (USBi_URI & intr) 1607 isr_reset_handler(ci); 1608 1609 if (USBi_PCI & intr) { 1610 ci->gadget.speed = hw_port_is_high_speed(ci) ? 1611 USB_SPEED_HIGH : USB_SPEED_FULL; 1612 if (ci->suspended && ci->driver->resume) { 1613 spin_unlock(&ci->lock); 1614 ci->driver->resume(&ci->gadget); 1615 spin_lock(&ci->lock); 1616 ci->suspended = 0; 1617 } 1618 } 1619 1620 if (USBi_UI & intr) 1621 isr_tr_complete_handler(ci); 1622 1623 if (USBi_SLI & intr) { 1624 if (ci->gadget.speed != USB_SPEED_UNKNOWN && 1625 ci->driver->suspend) { 1626 ci->suspended = 1; 1627 spin_unlock(&ci->lock); 1628 ci->driver->suspend(&ci->gadget); 1629 spin_lock(&ci->lock); 1630 } 1631 } 1632 retval = IRQ_HANDLED; 1633 } else { 1634 retval = IRQ_NONE; 1635 } 1636 spin_unlock(&ci->lock); 1637 1638 return retval; 1639 } 1640 1641 /** 1642 * udc_start: initialize gadget role 1643 * @ci: chipidea controller 1644 */ 1645 static int udc_start(struct ci13xxx *ci) 1646 { 1647 struct device *dev = ci->dev; 1648 int retval = 0; 1649 1650 spin_lock_init(&ci->lock); 1651 1652 ci->gadget.ops = &usb_gadget_ops; 1653 ci->gadget.speed = USB_SPEED_UNKNOWN; 1654 ci->gadget.max_speed = USB_SPEED_HIGH; 1655 ci->gadget.is_otg = 0; 1656 ci->gadget.name = ci->platdata->name; 1657 1658 INIT_LIST_HEAD(&ci->gadget.ep_list); 1659 1660 /* alloc resources */ 1661 ci->qh_pool = dma_pool_create("ci13xxx_qh", dev, 1662 sizeof(struct ci13xxx_qh), 1663 64, CI13XXX_PAGE_SIZE); 1664 if (ci->qh_pool == NULL) 1665 return -ENOMEM; 1666 1667 ci->td_pool = dma_pool_create("ci13xxx_td", dev, 1668 sizeof(struct ci13xxx_td), 1669 64, CI13XXX_PAGE_SIZE); 1670 if (ci->td_pool == NULL) { 1671 retval = -ENOMEM; 1672 goto free_qh_pool; 1673 } 1674 1675 retval = init_eps(ci); 1676 if (retval) 1677 goto free_pools; 1678 1679 ci->gadget.ep0 = &ci->ep0in->ep; 1680 1681 if (ci->global_phy) 1682 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); 1683 1684 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) { 1685 if (ci->transceiver == NULL) { 1686 retval = -ENODEV; 1687 goto destroy_eps; 1688 } 1689 } 1690 1691 if (!(ci->platdata->flags & CI13XXX_REGS_SHARED)) { 1692 retval = hw_device_reset(ci, USBMODE_CM_DC); 1693 if (retval) 1694 goto put_transceiver; 1695 } 1696 1697 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1698 retval = otg_set_peripheral(ci->transceiver->otg, 1699 &ci->gadget); 1700 if (retval) 1701 goto put_transceiver; 1702 } 1703 1704 retval = usb_add_gadget_udc(dev, &ci->gadget); 1705 if (retval) 1706 goto remove_trans; 1707 1708 pm_runtime_no_callbacks(&ci->gadget.dev); 1709 pm_runtime_enable(&ci->gadget.dev); 1710 1711 return retval; 1712 1713 remove_trans: 1714 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1715 otg_set_peripheral(ci->transceiver->otg, NULL); 1716 if (ci->global_phy) 1717 usb_put_phy(ci->transceiver); 1718 } 1719 1720 dev_err(dev, "error = %i\n", retval); 1721 put_transceiver: 1722 if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy) 1723 usb_put_phy(ci->transceiver); 1724 destroy_eps: 1725 destroy_eps(ci); 1726 free_pools: 1727 dma_pool_destroy(ci->td_pool); 1728 free_qh_pool: 1729 dma_pool_destroy(ci->qh_pool); 1730 return retval; 1731 } 1732 1733 /** 1734 * udc_remove: parent remove must call this to remove UDC 1735 * 1736 * No interrupts active, the IRQ has been released 1737 */ 1738 static void udc_stop(struct ci13xxx *ci) 1739 { 1740 if (ci == NULL) 1741 return; 1742 1743 usb_del_gadget_udc(&ci->gadget); 1744 1745 destroy_eps(ci); 1746 1747 dma_pool_destroy(ci->td_pool); 1748 dma_pool_destroy(ci->qh_pool); 1749 1750 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1751 otg_set_peripheral(ci->transceiver->otg, NULL); 1752 if (ci->global_phy) 1753 usb_put_phy(ci->transceiver); 1754 } 1755 /* my kobject is dynamic, I swear! */ 1756 memset(&ci->gadget, 0, sizeof(ci->gadget)); 1757 } 1758 1759 /** 1760 * ci_hdrc_gadget_init - initialize device related bits 1761 * ci: the controller 1762 * 1763 * This function enables the gadget role, if the device is "device capable". 1764 */ 1765 int ci_hdrc_gadget_init(struct ci13xxx *ci) 1766 { 1767 struct ci_role_driver *rdrv; 1768 1769 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) 1770 return -ENXIO; 1771 1772 rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL); 1773 if (!rdrv) 1774 return -ENOMEM; 1775 1776 rdrv->start = udc_start; 1777 rdrv->stop = udc_stop; 1778 rdrv->irq = udc_irq; 1779 rdrv->name = "gadget"; 1780 ci->roles[CI_ROLE_GADGET] = rdrv; 1781 1782 return 0; 1783 } 1784