1 /* 2 * Copyright 2011, Marvell Semiconductor Inc. 3 * Lei Wen <leiwen@marvell.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 * 7 * Back ported to the 8xx platform (from the 8260 platform) by 8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 9 */ 10 11 #include <common.h> 12 #include <command.h> 13 #include <config.h> 14 #include <net.h> 15 #include <malloc.h> 16 #include <asm/byteorder.h> 17 #include <asm/errno.h> 18 #include <asm/io.h> 19 #include <asm/unaligned.h> 20 #include <linux/types.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <usb/ci_udc.h> 24 #include "../host/ehci.h" 25 #include "ci_udc.h" 26 27 /* 28 * Check if the system has too long cachelines. If the cachelines are 29 * longer then 128b, the driver will not be able flush/invalidate data 30 * cache over separate QH entries. We use 128b because one QH entry is 31 * 64b long and there are always two QH list entries for each endpoint. 32 */ 33 #if ARCH_DMA_MINALIGN > 128 34 #error This driver can not work on systems with caches longer than 128b 35 #endif 36 37 /* 38 * Every QTD must be individually aligned, since we can program any 39 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, 40 * and the USB HW requires 32-byte alignment. Align to both: 41 */ 42 #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) 43 /* Each QTD is this size */ 44 #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) 45 /* 46 * Align the size of the QTD too, so we can add this value to each 47 * QTD's address to get another aligned address. 48 */ 49 #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) 50 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ 51 #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) 52 53 #define EP_MAX_LENGTH_TRANSFER 0x4000 54 55 #ifndef DEBUG 56 #define DBG(x...) do {} while (0) 57 #else 58 #define DBG(x...) printf(x) 59 static const char *reqname(unsigned r) 60 { 61 switch (r) { 62 case USB_REQ_GET_STATUS: return "GET_STATUS"; 63 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; 64 case USB_REQ_SET_FEATURE: return "SET_FEATURE"; 65 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; 66 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; 67 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; 68 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; 69 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; 70 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; 71 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; 72 default: return "*UNKNOWN*"; 73 } 74 } 75 #endif 76 77 static struct usb_endpoint_descriptor ep0_desc = { 78 .bLength = sizeof(struct usb_endpoint_descriptor), 79 .bDescriptorType = USB_DT_ENDPOINT, 80 .bEndpointAddress = USB_DIR_IN, 81 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 82 }; 83 84 static int ci_pullup(struct usb_gadget *gadget, int is_on); 85 static int ci_ep_enable(struct usb_ep *ep, 86 const struct usb_endpoint_descriptor *desc); 87 static int ci_ep_disable(struct usb_ep *ep); 88 static int ci_ep_queue(struct usb_ep *ep, 89 struct usb_request *req, gfp_t gfp_flags); 90 static struct usb_request * 91 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); 92 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); 93 94 static struct usb_gadget_ops ci_udc_ops = { 95 .pullup = ci_pullup, 96 }; 97 98 static struct usb_ep_ops ci_ep_ops = { 99 .enable = ci_ep_enable, 100 .disable = ci_ep_disable, 101 .queue = ci_ep_queue, 102 .alloc_request = ci_ep_alloc_request, 103 .free_request = ci_ep_free_request, 104 }; 105 106 /* Init values for USB endpoints. */ 107 static const struct usb_ep ci_ep_init[5] = { 108 [0] = { /* EP 0 */ 109 .maxpacket = 64, 110 .name = "ep0", 111 .ops = &ci_ep_ops, 112 }, 113 [1] = { 114 .maxpacket = 512, 115 .name = "ep1in-bulk", 116 .ops = &ci_ep_ops, 117 }, 118 [2] = { 119 .maxpacket = 512, 120 .name = "ep2out-bulk", 121 .ops = &ci_ep_ops, 122 }, 123 [3] = { 124 .maxpacket = 512, 125 .name = "ep3in-int", 126 .ops = &ci_ep_ops, 127 }, 128 [4] = { 129 .maxpacket = 512, 130 .name = "ep-", 131 .ops = &ci_ep_ops, 132 }, 133 }; 134 135 static struct ci_drv controller = { 136 .gadget = { 137 .name = "ci_udc", 138 .ops = &ci_udc_ops, 139 .is_dualspeed = 1, 140 }, 141 }; 142 143 /** 144 * ci_get_qh() - return queue head for endpoint 145 * @ep_num: Endpoint number 146 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 147 * 148 * This function returns the QH associated with particular endpoint 149 * and it's direction. 150 */ 151 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) 152 { 153 return &controller.epts[(ep_num * 2) + dir_in]; 154 } 155 156 /** 157 * ci_get_qtd() - return queue item for endpoint 158 * @ep_num: Endpoint number 159 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 160 * 161 * This function returns the QH associated with particular endpoint 162 * and it's direction. 163 */ 164 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) 165 { 166 int index = (ep_num * 2) + dir_in; 167 uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); 168 return (struct ept_queue_item *)imem; 169 } 170 171 /** 172 * ci_flush_qh - flush cache over queue head 173 * @ep_num: Endpoint number 174 * 175 * This function flushes cache over QH for particular endpoint. 176 */ 177 static void ci_flush_qh(int ep_num) 178 { 179 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 180 const unsigned long start = (unsigned long)head; 181 const unsigned long end = start + 2 * sizeof(*head); 182 183 flush_dcache_range(start, end); 184 } 185 186 /** 187 * ci_invalidate_qh - invalidate cache over queue head 188 * @ep_num: Endpoint number 189 * 190 * This function invalidates cache over QH for particular endpoint. 191 */ 192 static void ci_invalidate_qh(int ep_num) 193 { 194 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 195 unsigned long start = (unsigned long)head; 196 unsigned long end = start + 2 * sizeof(*head); 197 198 invalidate_dcache_range(start, end); 199 } 200 201 /** 202 * ci_flush_qtd - flush cache over queue item 203 * @ep_num: Endpoint number 204 * 205 * This function flushes cache over qTD pair for particular endpoint. 206 */ 207 static void ci_flush_qtd(int ep_num) 208 { 209 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 210 const unsigned long start = (unsigned long)item; 211 const unsigned long end = start + 2 * ILIST_ENT_SZ; 212 213 flush_dcache_range(start, end); 214 } 215 216 /** 217 * ci_flush_td - flush cache over queue item 218 * @td: td pointer 219 * 220 * This function flushes cache for particular transfer descriptor. 221 */ 222 static void ci_flush_td(struct ept_queue_item *td) 223 { 224 const uint32_t start = (uint32_t)td; 225 const uint32_t end = (uint32_t) td + ILIST_ENT_SZ; 226 flush_dcache_range(start, end); 227 } 228 229 /** 230 * ci_invalidate_qtd - invalidate cache over queue item 231 * @ep_num: Endpoint number 232 * 233 * This function invalidates cache over qTD pair for particular endpoint. 234 */ 235 static void ci_invalidate_qtd(int ep_num) 236 { 237 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 238 const unsigned long start = (unsigned long)item; 239 const unsigned long end = start + 2 * ILIST_ENT_SZ; 240 241 invalidate_dcache_range(start, end); 242 } 243 244 /** 245 * ci_invalidate_td - invalidate cache over queue item 246 * @td: td pointer 247 * 248 * This function invalidates cache for particular transfer descriptor. 249 */ 250 static void ci_invalidate_td(struct ept_queue_item *td) 251 { 252 const uint32_t start = (uint32_t)td; 253 const uint32_t end = start + ILIST_ENT_SZ; 254 invalidate_dcache_range(start, end); 255 } 256 257 static struct usb_request * 258 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) 259 { 260 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 261 int num; 262 struct ci_req *ci_req; 263 264 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 265 if (num == 0 && controller.ep0_req) 266 return &controller.ep0_req->req; 267 268 ci_req = calloc(1, sizeof(*ci_req)); 269 if (!ci_req) 270 return NULL; 271 272 INIT_LIST_HEAD(&ci_req->queue); 273 274 if (num == 0) 275 controller.ep0_req = ci_req; 276 277 return &ci_req->req; 278 } 279 280 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) 281 { 282 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 283 struct ci_req *ci_req = container_of(req, struct ci_req, req); 284 int num; 285 286 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 287 if (num == 0) { 288 if (!controller.ep0_req) 289 return; 290 controller.ep0_req = 0; 291 } 292 293 if (ci_req->b_buf) 294 free(ci_req->b_buf); 295 free(ci_req); 296 } 297 298 static void ep_enable(int num, int in, int maxpacket) 299 { 300 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 301 unsigned n; 302 303 n = readl(&udc->epctrl[num]); 304 if (in) 305 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); 306 else 307 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); 308 309 if (num != 0) { 310 struct ept_queue_head *head = ci_get_qh(num, in); 311 312 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; 313 ci_flush_qh(num); 314 } 315 writel(n, &udc->epctrl[num]); 316 } 317 318 static int ci_ep_enable(struct usb_ep *ep, 319 const struct usb_endpoint_descriptor *desc) 320 { 321 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 322 int num, in; 323 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 324 in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 325 ci_ep->desc = desc; 326 327 if (num) { 328 int max = get_unaligned_le16(&desc->wMaxPacketSize); 329 330 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) 331 max = 64; 332 if (ep->maxpacket != max) { 333 DBG("%s: from %d to %d\n", __func__, 334 ep->maxpacket, max); 335 ep->maxpacket = max; 336 } 337 } 338 ep_enable(num, in, ep->maxpacket); 339 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); 340 return 0; 341 } 342 343 static int ci_ep_disable(struct usb_ep *ep) 344 { 345 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 346 347 ci_ep->desc = NULL; 348 return 0; 349 } 350 351 static int ci_bounce(struct ci_req *ci_req, int in) 352 { 353 struct usb_request *req = &ci_req->req; 354 unsigned long addr = (unsigned long)req->buf; 355 unsigned long hwaddr; 356 uint32_t aligned_used_len; 357 358 /* Input buffer address is not aligned. */ 359 if (addr & (ARCH_DMA_MINALIGN - 1)) 360 goto align; 361 362 /* Input buffer length is not aligned. */ 363 if (req->length & (ARCH_DMA_MINALIGN - 1)) 364 goto align; 365 366 /* The buffer is well aligned, only flush cache. */ 367 ci_req->hw_len = req->length; 368 ci_req->hw_buf = req->buf; 369 goto flush; 370 371 align: 372 if (ci_req->b_buf && req->length > ci_req->b_len) { 373 free(ci_req->b_buf); 374 ci_req->b_buf = 0; 375 } 376 if (!ci_req->b_buf) { 377 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); 378 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); 379 if (!ci_req->b_buf) 380 return -ENOMEM; 381 } 382 ci_req->hw_len = ci_req->b_len; 383 ci_req->hw_buf = ci_req->b_buf; 384 385 if (in) 386 memcpy(ci_req->hw_buf, req->buf, req->length); 387 388 flush: 389 hwaddr = (unsigned long)ci_req->hw_buf; 390 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); 391 flush_dcache_range(hwaddr, hwaddr + aligned_used_len); 392 393 return 0; 394 } 395 396 static void ci_debounce(struct ci_req *ci_req, int in) 397 { 398 struct usb_request *req = &ci_req->req; 399 unsigned long addr = (unsigned long)req->buf; 400 unsigned long hwaddr = (unsigned long)ci_req->hw_buf; 401 uint32_t aligned_used_len; 402 403 if (in) 404 return; 405 406 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); 407 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); 408 409 if (addr == hwaddr) 410 return; /* not a bounce */ 411 412 memcpy(req->buf, ci_req->hw_buf, req->actual); 413 } 414 415 static void ci_ep_submit_next_request(struct ci_ep *ci_ep) 416 { 417 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 418 struct ept_queue_item *item; 419 struct ept_queue_head *head; 420 int bit, num, len, in; 421 struct ci_req *ci_req; 422 u8 *buf; 423 uint32_t length, actlen; 424 struct ept_queue_item *dtd, *qtd; 425 426 ci_ep->req_primed = true; 427 428 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 429 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 430 item = ci_get_qtd(num, in); 431 head = ci_get_qh(num, in); 432 433 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 434 len = ci_req->req.length; 435 436 head->next = (unsigned long)item; 437 head->info = 0; 438 439 ci_req->dtd_count = 0; 440 buf = ci_req->hw_buf; 441 actlen = 0; 442 dtd = item; 443 444 do { 445 length = min(ci_req->req.length - actlen, 446 (unsigned)EP_MAX_LENGTH_TRANSFER); 447 448 dtd->info = INFO_BYTES(length) | INFO_ACTIVE; 449 dtd->page0 = (unsigned long)buf; 450 dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000; 451 dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000; 452 dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000; 453 dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000; 454 455 len -= length; 456 actlen += length; 457 buf += length; 458 459 if (len) { 460 qtd = (struct ept_queue_item *) 461 memalign(ILIST_ALIGN, ILIST_ENT_SZ); 462 dtd->next = (uint32_t)qtd; 463 dtd = qtd; 464 memset(dtd, 0, ILIST_ENT_SZ); 465 } 466 467 ci_req->dtd_count++; 468 } while (len); 469 470 item = dtd; 471 /* 472 * When sending the data for an IN transaction, the attached host 473 * knows that all data for the IN is sent when one of the following 474 * occurs: 475 * a) A zero-length packet is transmitted. 476 * b) A packet with length that isn't an exact multiple of the ep's 477 * maxpacket is transmitted. 478 * c) Enough data is sent to exactly fill the host's maximum expected 479 * IN transaction size. 480 * 481 * One of these conditions MUST apply at the end of an IN transaction, 482 * or the transaction will not be considered complete by the host. If 483 * none of (a)..(c) already applies, then we must force (a) to apply 484 * by explicitly sending an extra zero-length packet. 485 */ 486 /* IN !a !b !c */ 487 if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { 488 /* 489 * Each endpoint has 2 items allocated, even though typically 490 * only 1 is used at a time since either an IN or an OUT but 491 * not both is queued. For an IN transaction, item currently 492 * points at the second of these items, so we know that we 493 * can use the other to transmit the extra zero-length packet. 494 */ 495 struct ept_queue_item *other_item = ci_get_qtd(num, 0); 496 item->next = (unsigned long)other_item; 497 item = other_item; 498 item->info = INFO_ACTIVE; 499 } 500 501 item->next = TERMINATE; 502 item->info |= INFO_IOC; 503 504 ci_flush_qtd(num); 505 506 item = (struct ept_queue_item *)head->next; 507 while (item->next != TERMINATE) { 508 ci_flush_td((struct ept_queue_item *)item->next); 509 item = (struct ept_queue_item *)item->next; 510 } 511 512 DBG("ept%d %s queue len %x, req %p, buffer %p\n", 513 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); 514 ci_flush_qh(num); 515 516 if (in) 517 bit = EPT_TX(num); 518 else 519 bit = EPT_RX(num); 520 521 writel(bit, &udc->epprime); 522 } 523 524 static int ci_ep_queue(struct usb_ep *ep, 525 struct usb_request *req, gfp_t gfp_flags) 526 { 527 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 528 struct ci_req *ci_req = container_of(req, struct ci_req, req); 529 int in, ret; 530 int __maybe_unused num; 531 532 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 533 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 534 535 if (!num && ci_ep->req_primed) { 536 /* 537 * The flipping of ep0 between IN and OUT relies on 538 * ci_ep_queue consuming the current IN/OUT setting 539 * immediately. If this is deferred to a later point when the 540 * req is pulled out of ci_req->queue, then the IN/OUT setting 541 * may have been changed since the req was queued, and state 542 * will get out of sync. This condition doesn't occur today, 543 * but could if bugs were introduced later, and this error 544 * check will save a lot of debugging time. 545 */ 546 printf("%s: ep0 transaction already in progress\n", __func__); 547 return -EPROTO; 548 } 549 550 ret = ci_bounce(ci_req, in); 551 if (ret) 552 return ret; 553 554 DBG("ept%d %s pre-queue req %p, buffer %p\n", 555 num, in ? "in" : "out", ci_req, ci_req->hw_buf); 556 list_add_tail(&ci_req->queue, &ci_ep->queue); 557 558 if (!ci_ep->req_primed) 559 ci_ep_submit_next_request(ci_ep); 560 561 return 0; 562 } 563 564 static void flip_ep0_direction(void) 565 { 566 if (ep0_desc.bEndpointAddress == USB_DIR_IN) { 567 DBG("%s: Flipping ep0 to OUT\n", __func__); 568 ep0_desc.bEndpointAddress = 0; 569 } else { 570 DBG("%s: Flipping ep0 to IN\n", __func__); 571 ep0_desc.bEndpointAddress = USB_DIR_IN; 572 } 573 } 574 575 static void handle_ep_complete(struct ci_ep *ci_ep) 576 { 577 struct ept_queue_item *item, *next_td; 578 int num, in, len, j; 579 struct ci_req *ci_req; 580 581 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 582 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 583 item = ci_get_qtd(num, in); 584 ci_invalidate_qtd(num); 585 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 586 587 next_td = item; 588 len = 0; 589 for (j = 0; j < ci_req->dtd_count; j++) { 590 ci_invalidate_td(next_td); 591 item = next_td; 592 len += (item->info >> 16) & 0x7fff; 593 if (item->info & 0xff) 594 printf("EP%d/%s FAIL info=%x pg0=%x\n", 595 num, in ? "in" : "out", item->info, item->page0); 596 if (j != ci_req->dtd_count - 1) 597 next_td = (struct ept_queue_item *)item->next; 598 if (j != 0) 599 free(item); 600 } 601 602 list_del_init(&ci_req->queue); 603 ci_ep->req_primed = false; 604 605 if (!list_empty(&ci_ep->queue)) 606 ci_ep_submit_next_request(ci_ep); 607 608 ci_req->req.actual = ci_req->req.length - len; 609 ci_debounce(ci_req, in); 610 611 DBG("ept%d %s req %p, complete %x\n", 612 num, in ? "in" : "out", ci_req, len); 613 if (num != 0 || controller.ep0_data_phase) 614 ci_req->req.complete(&ci_ep->ep, &ci_req->req); 615 if (num == 0 && controller.ep0_data_phase) { 616 /* 617 * Data Stage is complete, so flip ep0 dir for Status Stage, 618 * which always transfers a packet in the opposite direction. 619 */ 620 DBG("%s: flip ep0 dir for Status Stage\n", __func__); 621 flip_ep0_direction(); 622 controller.ep0_data_phase = false; 623 ci_req->req.length = 0; 624 usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); 625 } 626 } 627 628 #define SETUP(type, request) (((type) << 8) | (request)) 629 630 static void handle_setup(void) 631 { 632 struct ci_ep *ci_ep = &controller.ep[0]; 633 struct ci_req *ci_req; 634 struct usb_request *req; 635 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 636 struct ept_queue_head *head; 637 struct usb_ctrlrequest r; 638 int status = 0; 639 int num, in, _num, _in, i; 640 char *buf; 641 642 ci_req = controller.ep0_req; 643 req = &ci_req->req; 644 head = ci_get_qh(0, 0); /* EP0 OUT */ 645 646 ci_invalidate_qh(0); 647 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); 648 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 649 writel(EPT_RX(0), &udc->epsetupstat); 650 #else 651 writel(EPT_RX(0), &udc->epstat); 652 #endif 653 DBG("handle setup %s, %x, %x index %x value %x length %x\n", 654 reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, 655 r.wValue, r.wLength); 656 657 /* Set EP0 dir for Data Stage based on Setup Stage data */ 658 if (r.bRequestType & USB_DIR_IN) { 659 DBG("%s: Set ep0 to IN for Data Stage\n", __func__); 660 ep0_desc.bEndpointAddress = USB_DIR_IN; 661 } else { 662 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); 663 ep0_desc.bEndpointAddress = 0; 664 } 665 if (r.wLength) { 666 controller.ep0_data_phase = true; 667 } else { 668 /* 0 length -> no Data Stage. Flip dir for Status Stage */ 669 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); 670 flip_ep0_direction(); 671 controller.ep0_data_phase = false; 672 } 673 674 list_del_init(&ci_req->queue); 675 ci_ep->req_primed = false; 676 677 switch (SETUP(r.bRequestType, r.bRequest)) { 678 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): 679 _num = r.wIndex & 15; 680 _in = !!(r.wIndex & 0x80); 681 682 if ((r.wValue == 0) && (r.wLength == 0)) { 683 req->length = 0; 684 for (i = 0; i < NUM_ENDPOINTS; i++) { 685 struct ci_ep *ep = &controller.ep[i]; 686 687 if (!ep->desc) 688 continue; 689 num = ep->desc->bEndpointAddress 690 & USB_ENDPOINT_NUMBER_MASK; 691 in = (ep->desc->bEndpointAddress 692 & USB_DIR_IN) != 0; 693 if ((num == _num) && (in == _in)) { 694 ep_enable(num, in, ep->ep.maxpacket); 695 usb_ep_queue(controller.gadget.ep0, 696 req, 0); 697 break; 698 } 699 } 700 } 701 return; 702 703 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): 704 /* 705 * write address delayed (will take effect 706 * after the next IN txn) 707 */ 708 writel((r.wValue << 25) | (1 << 24), &udc->devaddr); 709 req->length = 0; 710 usb_ep_queue(controller.gadget.ep0, req, 0); 711 return; 712 713 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): 714 req->length = 2; 715 buf = (char *)req->buf; 716 buf[0] = 1 << USB_DEVICE_SELF_POWERED; 717 buf[1] = 0; 718 usb_ep_queue(controller.gadget.ep0, req, 0); 719 return; 720 } 721 /* pass request up to the gadget driver */ 722 if (controller.driver) 723 status = controller.driver->setup(&controller.gadget, &r); 724 else 725 status = -ENODEV; 726 727 if (!status) 728 return; 729 DBG("STALL reqname %s type %x value %x, index %x\n", 730 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); 731 writel((1<<16) | (1 << 0), &udc->epctrl[0]); 732 } 733 734 static void stop_activity(void) 735 { 736 int i, num, in; 737 struct ept_queue_head *head; 738 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 739 writel(readl(&udc->epcomp), &udc->epcomp); 740 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 741 writel(readl(&udc->epsetupstat), &udc->epsetupstat); 742 #endif 743 writel(readl(&udc->epstat), &udc->epstat); 744 writel(0xffffffff, &udc->epflush); 745 746 /* error out any pending reqs */ 747 for (i = 0; i < NUM_ENDPOINTS; i++) { 748 if (i != 0) 749 writel(0, &udc->epctrl[i]); 750 if (controller.ep[i].desc) { 751 num = controller.ep[i].desc->bEndpointAddress 752 & USB_ENDPOINT_NUMBER_MASK; 753 in = (controller.ep[i].desc->bEndpointAddress 754 & USB_DIR_IN) != 0; 755 head = ci_get_qh(num, in); 756 head->info = INFO_ACTIVE; 757 ci_flush_qh(num); 758 } 759 } 760 } 761 762 void udc_irq(void) 763 { 764 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 765 unsigned n = readl(&udc->usbsts); 766 writel(n, &udc->usbsts); 767 int bit, i, num, in; 768 769 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); 770 if (n == 0) 771 return; 772 773 if (n & STS_URI) { 774 DBG("-- reset --\n"); 775 stop_activity(); 776 } 777 if (n & STS_SLI) 778 DBG("-- suspend --\n"); 779 780 if (n & STS_PCI) { 781 int max = 64; 782 int speed = USB_SPEED_FULL; 783 784 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 785 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; 786 #else 787 bit = (readl(&udc->portsc) >> 26) & 3; 788 #endif 789 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); 790 if (bit == 2) { 791 speed = USB_SPEED_HIGH; 792 max = 512; 793 } 794 controller.gadget.speed = speed; 795 for (i = 1; i < NUM_ENDPOINTS; i++) { 796 if (controller.ep[i].ep.maxpacket > max) 797 controller.ep[i].ep.maxpacket = max; 798 } 799 } 800 801 if (n & STS_UEI) 802 printf("<UEI %x>\n", readl(&udc->epcomp)); 803 804 if ((n & STS_UI) || (n & STS_UEI)) { 805 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 806 n = readl(&udc->epsetupstat); 807 #else 808 n = readl(&udc->epstat); 809 #endif 810 if (n & EPT_RX(0)) 811 handle_setup(); 812 813 n = readl(&udc->epcomp); 814 if (n != 0) 815 writel(n, &udc->epcomp); 816 817 for (i = 0; i < NUM_ENDPOINTS && n; i++) { 818 if (controller.ep[i].desc) { 819 num = controller.ep[i].desc->bEndpointAddress 820 & USB_ENDPOINT_NUMBER_MASK; 821 in = (controller.ep[i].desc->bEndpointAddress 822 & USB_DIR_IN) != 0; 823 bit = (in) ? EPT_TX(num) : EPT_RX(num); 824 if (n & bit) 825 handle_ep_complete(&controller.ep[i]); 826 } 827 } 828 } 829 } 830 831 int usb_gadget_handle_interrupts(int index) 832 { 833 u32 value; 834 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 835 836 value = readl(&udc->usbsts); 837 if (value) 838 udc_irq(); 839 840 return value; 841 } 842 843 void udc_disconnect(void) 844 { 845 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 846 /* disable pullup */ 847 stop_activity(); 848 writel(USBCMD_FS2, &udc->usbcmd); 849 udelay(800); 850 if (controller.driver) 851 controller.driver->disconnect(&controller.gadget); 852 } 853 854 static int ci_pullup(struct usb_gadget *gadget, int is_on) 855 { 856 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 857 if (is_on) { 858 /* RESET */ 859 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); 860 udelay(200); 861 862 writel((unsigned long)controller.epts, &udc->epinitaddr); 863 864 /* select DEVICE mode */ 865 writel(USBMODE_DEVICE, &udc->usbmode); 866 867 #if !defined(CONFIG_USB_GADGET_DUALSPEED) 868 /* Port force Full-Speed Connect */ 869 setbits_le32(&udc->portsc, PFSC); 870 #endif 871 872 writel(0xffffffff, &udc->epflush); 873 874 /* Turn on the USB connection by enabling the pullup resistor */ 875 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); 876 } else { 877 udc_disconnect(); 878 } 879 880 return 0; 881 } 882 883 static int ci_udc_probe(void) 884 { 885 struct ept_queue_head *head; 886 int i; 887 888 const int num = 2 * NUM_ENDPOINTS; 889 890 const int eplist_min_align = 4096; 891 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); 892 const int eplist_raw_sz = num * sizeof(struct ept_queue_head); 893 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); 894 895 /* The QH list must be aligned to 4096 bytes. */ 896 controller.epts = memalign(eplist_align, eplist_sz); 897 if (!controller.epts) 898 return -ENOMEM; 899 memset(controller.epts, 0, eplist_sz); 900 901 controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); 902 if (!controller.items_mem) { 903 free(controller.epts); 904 return -ENOMEM; 905 } 906 memset(controller.items_mem, 0, ILIST_SZ); 907 908 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { 909 /* 910 * Configure QH for each endpoint. The structure of the QH list 911 * is such that each two subsequent fields, N and N+1 where N is 912 * even, in the QH list represent QH for one endpoint. The Nth 913 * entry represents OUT configuration and the N+1th entry does 914 * represent IN configuration of the endpoint. 915 */ 916 head = controller.epts + i; 917 if (i < 2) 918 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) 919 | CONFIG_ZLT | CONFIG_IOS; 920 else 921 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) 922 | CONFIG_ZLT; 923 head->next = TERMINATE; 924 head->info = 0; 925 926 if (i & 1) { 927 ci_flush_qh(i / 2); 928 ci_flush_qtd(i / 2); 929 } 930 } 931 932 INIT_LIST_HEAD(&controller.gadget.ep_list); 933 934 /* Init EP 0 */ 935 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); 936 controller.ep[0].desc = &ep0_desc; 937 INIT_LIST_HEAD(&controller.ep[0].queue); 938 controller.ep[0].req_primed = false; 939 controller.gadget.ep0 = &controller.ep[0].ep; 940 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); 941 942 /* Init EP 1..3 */ 943 for (i = 1; i < 4; i++) { 944 memcpy(&controller.ep[i].ep, &ci_ep_init[i], 945 sizeof(*ci_ep_init)); 946 INIT_LIST_HEAD(&controller.ep[i].queue); 947 controller.ep[i].req_primed = false; 948 list_add_tail(&controller.ep[i].ep.ep_list, 949 &controller.gadget.ep_list); 950 } 951 952 /* Init EP 4..n */ 953 for (i = 4; i < NUM_ENDPOINTS; i++) { 954 memcpy(&controller.ep[i].ep, &ci_ep_init[4], 955 sizeof(*ci_ep_init)); 956 INIT_LIST_HEAD(&controller.ep[i].queue); 957 controller.ep[i].req_primed = false; 958 list_add_tail(&controller.ep[i].ep.ep_list, 959 &controller.gadget.ep_list); 960 } 961 962 ci_ep_alloc_request(&controller.ep[0].ep, 0); 963 if (!controller.ep0_req) { 964 free(controller.items_mem); 965 free(controller.epts); 966 return -ENOMEM; 967 } 968 969 return 0; 970 } 971 972 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 973 { 974 int ret; 975 976 if (!driver) 977 return -EINVAL; 978 if (!driver->bind || !driver->setup || !driver->disconnect) 979 return -EINVAL; 980 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) 981 return -EINVAL; 982 983 #ifdef CONFIG_DM_USB 984 ret = usb_setup_ehci_gadget(&controller.ctrl); 985 #else 986 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); 987 #endif 988 if (ret) 989 return ret; 990 991 ret = ci_udc_probe(); 992 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) 993 /* 994 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all 995 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection 996 */ 997 if (!ret) { 998 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 999 1000 /* select ULPI phy */ 1001 writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); 1002 } 1003 #endif 1004 1005 ret = driver->bind(&controller.gadget); 1006 if (ret) { 1007 DBG("driver->bind() returned %d\n", ret); 1008 return ret; 1009 } 1010 controller.driver = driver; 1011 1012 return 0; 1013 } 1014 1015 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1016 { 1017 udc_disconnect(); 1018 1019 driver->unbind(&controller.gadget); 1020 controller.driver = NULL; 1021 1022 ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); 1023 free(controller.items_mem); 1024 free(controller.epts); 1025 1026 return 0; 1027 } 1028 1029 bool dfu_usb_get_reset(void) 1030 { 1031 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 1032 1033 return !!(readl(&udc->usbsts) & STS_URI); 1034 } 1035