1 /* 2 * Copyright 2011, Marvell Semiconductor Inc. 3 * Lei Wen <leiwen@marvell.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 * 7 * Back ported to the 8xx platform (from the 8260 platform) by 8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 9 */ 10 11 #include <common.h> 12 #include <command.h> 13 #include <config.h> 14 #include <net.h> 15 #include <malloc.h> 16 #include <asm/byteorder.h> 17 #include <asm/errno.h> 18 #include <asm/io.h> 19 #include <asm/unaligned.h> 20 #include <linux/types.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <usb/ci_udc.h> 24 #include "../host/ehci.h" 25 #include "ci_udc.h" 26 27 /* 28 * Check if the system has too long cachelines. If the cachelines are 29 * longer then 128b, the driver will not be able flush/invalidate data 30 * cache over separate QH entries. We use 128b because one QH entry is 31 * 64b long and there are always two QH list entries for each endpoint. 32 */ 33 #if ARCH_DMA_MINALIGN > 128 34 #error This driver can not work on systems with caches longer than 128b 35 #endif 36 37 /* 38 * Every QTD must be individually aligned, since we can program any 39 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, 40 * and the USB HW requires 32-byte alignment. Align to both: 41 */ 42 #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) 43 /* Each QTD is this size */ 44 #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) 45 /* 46 * Align the size of the QTD too, so we can add this value to each 47 * QTD's address to get another aligned address. 48 */ 49 #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) 50 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ 51 #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) 52 53 #define EP_MAX_LENGTH_TRANSFER 0x4000 54 55 #ifndef DEBUG 56 #define DBG(x...) do {} while (0) 57 #else 58 #define DBG(x...) printf(x) 59 static const char *reqname(unsigned r) 60 { 61 switch (r) { 62 case USB_REQ_GET_STATUS: return "GET_STATUS"; 63 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; 64 case USB_REQ_SET_FEATURE: return "SET_FEATURE"; 65 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; 66 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; 67 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; 68 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; 69 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; 70 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; 71 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; 72 default: return "*UNKNOWN*"; 73 } 74 } 75 #endif 76 77 static struct usb_endpoint_descriptor ep0_desc = { 78 .bLength = sizeof(struct usb_endpoint_descriptor), 79 .bDescriptorType = USB_DT_ENDPOINT, 80 .bEndpointAddress = USB_DIR_IN, 81 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 82 }; 83 84 static int ci_pullup(struct usb_gadget *gadget, int is_on); 85 static int ci_ep_enable(struct usb_ep *ep, 86 const struct usb_endpoint_descriptor *desc); 87 static int ci_ep_disable(struct usb_ep *ep); 88 static int ci_ep_queue(struct usb_ep *ep, 89 struct usb_request *req, gfp_t gfp_flags); 90 static struct usb_request * 91 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); 92 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); 93 94 static struct usb_gadget_ops ci_udc_ops = { 95 .pullup = ci_pullup, 96 }; 97 98 static struct usb_ep_ops ci_ep_ops = { 99 .enable = ci_ep_enable, 100 .disable = ci_ep_disable, 101 .queue = ci_ep_queue, 102 .alloc_request = ci_ep_alloc_request, 103 .free_request = ci_ep_free_request, 104 }; 105 106 /* Init values for USB endpoints. */ 107 static const struct usb_ep ci_ep_init[5] = { 108 [0] = { /* EP 0 */ 109 .maxpacket = 64, 110 .name = "ep0", 111 .ops = &ci_ep_ops, 112 }, 113 [1] = { 114 .maxpacket = 512, 115 .name = "ep1in-bulk", 116 .ops = &ci_ep_ops, 117 }, 118 [2] = { 119 .maxpacket = 512, 120 .name = "ep2out-bulk", 121 .ops = &ci_ep_ops, 122 }, 123 [3] = { 124 .maxpacket = 512, 125 .name = "ep3in-int", 126 .ops = &ci_ep_ops, 127 }, 128 [4] = { 129 .maxpacket = 512, 130 .name = "ep-", 131 .ops = &ci_ep_ops, 132 }, 133 }; 134 135 static struct ci_drv controller = { 136 .gadget = { 137 .name = "ci_udc", 138 .ops = &ci_udc_ops, 139 .is_dualspeed = 1, 140 }, 141 }; 142 143 /** 144 * ci_get_qh() - return queue head for endpoint 145 * @ep_num: Endpoint number 146 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 147 * 148 * This function returns the QH associated with particular endpoint 149 * and it's direction. 150 */ 151 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) 152 { 153 return &controller.epts[(ep_num * 2) + dir_in]; 154 } 155 156 /** 157 * ci_get_qtd() - return queue item for endpoint 158 * @ep_num: Endpoint number 159 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 160 * 161 * This function returns the QH associated with particular endpoint 162 * and it's direction. 163 */ 164 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) 165 { 166 int index = (ep_num * 2) + dir_in; 167 uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); 168 return (struct ept_queue_item *)imem; 169 } 170 171 /** 172 * ci_flush_qh - flush cache over queue head 173 * @ep_num: Endpoint number 174 * 175 * This function flushes cache over QH for particular endpoint. 176 */ 177 static void ci_flush_qh(int ep_num) 178 { 179 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 180 const unsigned long start = (unsigned long)head; 181 const unsigned long end = start + 2 * sizeof(*head); 182 183 flush_dcache_range(start, end); 184 } 185 186 /** 187 * ci_invalidate_qh - invalidate cache over queue head 188 * @ep_num: Endpoint number 189 * 190 * This function invalidates cache over QH for particular endpoint. 191 */ 192 static void ci_invalidate_qh(int ep_num) 193 { 194 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 195 unsigned long start = (unsigned long)head; 196 unsigned long end = start + 2 * sizeof(*head); 197 198 invalidate_dcache_range(start, end); 199 } 200 201 /** 202 * ci_flush_qtd - flush cache over queue item 203 * @ep_num: Endpoint number 204 * 205 * This function flushes cache over qTD pair for particular endpoint. 206 */ 207 static void ci_flush_qtd(int ep_num) 208 { 209 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 210 const unsigned long start = (unsigned long)item; 211 const unsigned long end = start + 2 * ILIST_ENT_SZ; 212 213 flush_dcache_range(start, end); 214 } 215 216 /** 217 * ci_flush_td - flush cache over queue item 218 * @td: td pointer 219 * 220 * This function flushes cache for particular transfer descriptor. 221 */ 222 static void ci_flush_td(struct ept_queue_item *td) 223 { 224 const unsigned long start = (unsigned long)td; 225 const unsigned long end = (unsigned long)td + ILIST_ENT_SZ; 226 flush_dcache_range(start, end); 227 } 228 229 /** 230 * ci_invalidate_qtd - invalidate cache over queue item 231 * @ep_num: Endpoint number 232 * 233 * This function invalidates cache over qTD pair for particular endpoint. 234 */ 235 static void ci_invalidate_qtd(int ep_num) 236 { 237 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 238 const unsigned long start = (unsigned long)item; 239 const unsigned long end = start + 2 * ILIST_ENT_SZ; 240 241 invalidate_dcache_range(start, end); 242 } 243 244 /** 245 * ci_invalidate_td - invalidate cache over queue item 246 * @td: td pointer 247 * 248 * This function invalidates cache for particular transfer descriptor. 249 */ 250 static void ci_invalidate_td(struct ept_queue_item *td) 251 { 252 const unsigned long start = (unsigned long)td; 253 const unsigned long end = start + ILIST_ENT_SZ; 254 invalidate_dcache_range(start, end); 255 } 256 257 static struct usb_request * 258 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) 259 { 260 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 261 int num = -1; 262 struct ci_req *ci_req; 263 264 if (ci_ep->desc) 265 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 266 267 if (num == 0 && controller.ep0_req) 268 return &controller.ep0_req->req; 269 270 ci_req = calloc(1, sizeof(*ci_req)); 271 if (!ci_req) 272 return NULL; 273 274 INIT_LIST_HEAD(&ci_req->queue); 275 276 if (num == 0) 277 controller.ep0_req = ci_req; 278 279 return &ci_req->req; 280 } 281 282 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) 283 { 284 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 285 struct ci_req *ci_req = container_of(req, struct ci_req, req); 286 int num = -1; 287 288 if (ci_ep->desc) 289 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 290 291 if (num == 0) { 292 if (!controller.ep0_req) 293 return; 294 controller.ep0_req = 0; 295 } 296 297 if (ci_req->b_buf) 298 free(ci_req->b_buf); 299 free(ci_req); 300 } 301 302 static void ep_enable(int num, int in, int maxpacket) 303 { 304 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 305 unsigned n; 306 307 n = readl(&udc->epctrl[num]); 308 if (in) 309 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); 310 else 311 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); 312 313 if (num != 0) { 314 struct ept_queue_head *head = ci_get_qh(num, in); 315 316 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; 317 ci_flush_qh(num); 318 } 319 writel(n, &udc->epctrl[num]); 320 } 321 322 static int ci_ep_enable(struct usb_ep *ep, 323 const struct usb_endpoint_descriptor *desc) 324 { 325 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 326 int num, in; 327 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 328 in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 329 ci_ep->desc = desc; 330 331 if (num) { 332 int max = get_unaligned_le16(&desc->wMaxPacketSize); 333 334 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) 335 max = 64; 336 if (ep->maxpacket != max) { 337 DBG("%s: from %d to %d\n", __func__, 338 ep->maxpacket, max); 339 ep->maxpacket = max; 340 } 341 } 342 ep_enable(num, in, ep->maxpacket); 343 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); 344 return 0; 345 } 346 347 static int ci_ep_disable(struct usb_ep *ep) 348 { 349 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 350 351 ci_ep->desc = NULL; 352 return 0; 353 } 354 355 static int ci_bounce(struct ci_req *ci_req, int in) 356 { 357 struct usb_request *req = &ci_req->req; 358 unsigned long addr = (unsigned long)req->buf; 359 unsigned long hwaddr; 360 uint32_t aligned_used_len; 361 362 /* Input buffer address is not aligned. */ 363 if (addr & (ARCH_DMA_MINALIGN - 1)) 364 goto align; 365 366 /* Input buffer length is not aligned. */ 367 if (req->length & (ARCH_DMA_MINALIGN - 1)) 368 goto align; 369 370 /* The buffer is well aligned, only flush cache. */ 371 ci_req->hw_len = req->length; 372 ci_req->hw_buf = req->buf; 373 goto flush; 374 375 align: 376 if (ci_req->b_buf && req->length > ci_req->b_len) { 377 free(ci_req->b_buf); 378 ci_req->b_buf = 0; 379 } 380 if (!ci_req->b_buf) { 381 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); 382 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); 383 if (!ci_req->b_buf) 384 return -ENOMEM; 385 } 386 ci_req->hw_len = ci_req->b_len; 387 ci_req->hw_buf = ci_req->b_buf; 388 389 if (in) 390 memcpy(ci_req->hw_buf, req->buf, req->length); 391 392 flush: 393 hwaddr = (unsigned long)ci_req->hw_buf; 394 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); 395 flush_dcache_range(hwaddr, hwaddr + aligned_used_len); 396 397 return 0; 398 } 399 400 static void ci_debounce(struct ci_req *ci_req, int in) 401 { 402 struct usb_request *req = &ci_req->req; 403 unsigned long addr = (unsigned long)req->buf; 404 unsigned long hwaddr = (unsigned long)ci_req->hw_buf; 405 uint32_t aligned_used_len; 406 407 if (in) 408 return; 409 410 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); 411 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); 412 413 if (addr == hwaddr) 414 return; /* not a bounce */ 415 416 memcpy(req->buf, ci_req->hw_buf, req->actual); 417 } 418 419 static void ci_ep_submit_next_request(struct ci_ep *ci_ep) 420 { 421 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 422 struct ept_queue_item *item; 423 struct ept_queue_head *head; 424 int bit, num, len, in; 425 struct ci_req *ci_req; 426 u8 *buf; 427 uint32_t length, actlen; 428 struct ept_queue_item *dtd, *qtd; 429 430 ci_ep->req_primed = true; 431 432 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 433 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 434 item = ci_get_qtd(num, in); 435 head = ci_get_qh(num, in); 436 437 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 438 len = ci_req->req.length; 439 440 head->next = (unsigned long)item; 441 head->info = 0; 442 443 ci_req->dtd_count = 0; 444 buf = ci_req->hw_buf; 445 actlen = 0; 446 dtd = item; 447 448 do { 449 length = min(ci_req->req.length - actlen, 450 (unsigned)EP_MAX_LENGTH_TRANSFER); 451 452 dtd->info = INFO_BYTES(length) | INFO_ACTIVE; 453 dtd->page0 = (unsigned long)buf; 454 dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000; 455 dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000; 456 dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000; 457 dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000; 458 459 len -= length; 460 actlen += length; 461 buf += length; 462 463 if (len) { 464 qtd = (struct ept_queue_item *) 465 memalign(ILIST_ALIGN, ILIST_ENT_SZ); 466 dtd->next = (unsigned long)qtd; 467 dtd = qtd; 468 memset(dtd, 0, ILIST_ENT_SZ); 469 } 470 471 ci_req->dtd_count++; 472 } while (len); 473 474 item = dtd; 475 /* 476 * When sending the data for an IN transaction, the attached host 477 * knows that all data for the IN is sent when one of the following 478 * occurs: 479 * a) A zero-length packet is transmitted. 480 * b) A packet with length that isn't an exact multiple of the ep's 481 * maxpacket is transmitted. 482 * c) Enough data is sent to exactly fill the host's maximum expected 483 * IN transaction size. 484 * 485 * One of these conditions MUST apply at the end of an IN transaction, 486 * or the transaction will not be considered complete by the host. If 487 * none of (a)..(c) already applies, then we must force (a) to apply 488 * by explicitly sending an extra zero-length packet. 489 */ 490 /* IN !a !b !c */ 491 if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { 492 /* 493 * Each endpoint has 2 items allocated, even though typically 494 * only 1 is used at a time since either an IN or an OUT but 495 * not both is queued. For an IN transaction, item currently 496 * points at the second of these items, so we know that we 497 * can use the other to transmit the extra zero-length packet. 498 */ 499 struct ept_queue_item *other_item = ci_get_qtd(num, 0); 500 item->next = (unsigned long)other_item; 501 item = other_item; 502 item->info = INFO_ACTIVE; 503 } 504 505 item->next = TERMINATE; 506 item->info |= INFO_IOC; 507 508 ci_flush_qtd(num); 509 510 item = (struct ept_queue_item *)(unsigned long)head->next; 511 while (item->next != TERMINATE) { 512 ci_flush_td((struct ept_queue_item *)(unsigned long)item->next); 513 item = (struct ept_queue_item *)(unsigned long)item->next; 514 } 515 516 DBG("ept%d %s queue len %x, req %p, buffer %p\n", 517 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); 518 ci_flush_qh(num); 519 520 if (in) 521 bit = EPT_TX(num); 522 else 523 bit = EPT_RX(num); 524 525 writel(bit, &udc->epprime); 526 } 527 528 static int ci_ep_queue(struct usb_ep *ep, 529 struct usb_request *req, gfp_t gfp_flags) 530 { 531 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 532 struct ci_req *ci_req = container_of(req, struct ci_req, req); 533 int in, ret; 534 int __maybe_unused num; 535 536 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 537 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 538 539 if (!num && ci_ep->req_primed) { 540 /* 541 * The flipping of ep0 between IN and OUT relies on 542 * ci_ep_queue consuming the current IN/OUT setting 543 * immediately. If this is deferred to a later point when the 544 * req is pulled out of ci_req->queue, then the IN/OUT setting 545 * may have been changed since the req was queued, and state 546 * will get out of sync. This condition doesn't occur today, 547 * but could if bugs were introduced later, and this error 548 * check will save a lot of debugging time. 549 */ 550 printf("%s: ep0 transaction already in progress\n", __func__); 551 return -EPROTO; 552 } 553 554 ret = ci_bounce(ci_req, in); 555 if (ret) 556 return ret; 557 558 DBG("ept%d %s pre-queue req %p, buffer %p\n", 559 num, in ? "in" : "out", ci_req, ci_req->hw_buf); 560 list_add_tail(&ci_req->queue, &ci_ep->queue); 561 562 if (!ci_ep->req_primed) 563 ci_ep_submit_next_request(ci_ep); 564 565 return 0; 566 } 567 568 static void flip_ep0_direction(void) 569 { 570 if (ep0_desc.bEndpointAddress == USB_DIR_IN) { 571 DBG("%s: Flipping ep0 to OUT\n", __func__); 572 ep0_desc.bEndpointAddress = 0; 573 } else { 574 DBG("%s: Flipping ep0 to IN\n", __func__); 575 ep0_desc.bEndpointAddress = USB_DIR_IN; 576 } 577 } 578 579 static void handle_ep_complete(struct ci_ep *ci_ep) 580 { 581 struct ept_queue_item *item, *next_td; 582 int num, in, len, j; 583 struct ci_req *ci_req; 584 585 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 586 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 587 item = ci_get_qtd(num, in); 588 ci_invalidate_qtd(num); 589 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 590 591 next_td = item; 592 len = 0; 593 for (j = 0; j < ci_req->dtd_count; j++) { 594 ci_invalidate_td(next_td); 595 item = next_td; 596 len += (item->info >> 16) & 0x7fff; 597 if (item->info & 0xff) 598 printf("EP%d/%s FAIL info=%x pg0=%x\n", 599 num, in ? "in" : "out", item->info, item->page0); 600 if (j != ci_req->dtd_count - 1) 601 next_td = (struct ept_queue_item *)(unsigned long) 602 item->next; 603 if (j != 0) 604 free(item); 605 } 606 607 list_del_init(&ci_req->queue); 608 ci_ep->req_primed = false; 609 610 if (!list_empty(&ci_ep->queue)) 611 ci_ep_submit_next_request(ci_ep); 612 613 ci_req->req.actual = ci_req->req.length - len; 614 ci_debounce(ci_req, in); 615 616 DBG("ept%d %s req %p, complete %x\n", 617 num, in ? "in" : "out", ci_req, len); 618 if (num != 0 || controller.ep0_data_phase) 619 ci_req->req.complete(&ci_ep->ep, &ci_req->req); 620 if (num == 0 && controller.ep0_data_phase) { 621 /* 622 * Data Stage is complete, so flip ep0 dir for Status Stage, 623 * which always transfers a packet in the opposite direction. 624 */ 625 DBG("%s: flip ep0 dir for Status Stage\n", __func__); 626 flip_ep0_direction(); 627 controller.ep0_data_phase = false; 628 ci_req->req.length = 0; 629 usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); 630 } 631 } 632 633 #define SETUP(type, request) (((type) << 8) | (request)) 634 635 static void handle_setup(void) 636 { 637 struct ci_ep *ci_ep = &controller.ep[0]; 638 struct ci_req *ci_req; 639 struct usb_request *req; 640 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 641 struct ept_queue_head *head; 642 struct usb_ctrlrequest r; 643 int status = 0; 644 int num, in, _num, _in, i; 645 char *buf; 646 647 ci_req = controller.ep0_req; 648 req = &ci_req->req; 649 head = ci_get_qh(0, 0); /* EP0 OUT */ 650 651 ci_invalidate_qh(0); 652 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); 653 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 654 writel(EPT_RX(0), &udc->epsetupstat); 655 #else 656 writel(EPT_RX(0), &udc->epstat); 657 #endif 658 DBG("handle setup %s, %x, %x index %x value %x length %x\n", 659 reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, 660 r.wValue, r.wLength); 661 662 /* Set EP0 dir for Data Stage based on Setup Stage data */ 663 if (r.bRequestType & USB_DIR_IN) { 664 DBG("%s: Set ep0 to IN for Data Stage\n", __func__); 665 ep0_desc.bEndpointAddress = USB_DIR_IN; 666 } else { 667 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); 668 ep0_desc.bEndpointAddress = 0; 669 } 670 if (r.wLength) { 671 controller.ep0_data_phase = true; 672 } else { 673 /* 0 length -> no Data Stage. Flip dir for Status Stage */ 674 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); 675 flip_ep0_direction(); 676 controller.ep0_data_phase = false; 677 } 678 679 list_del_init(&ci_req->queue); 680 ci_ep->req_primed = false; 681 682 switch (SETUP(r.bRequestType, r.bRequest)) { 683 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): 684 _num = r.wIndex & 15; 685 _in = !!(r.wIndex & 0x80); 686 687 if ((r.wValue == 0) && (r.wLength == 0)) { 688 req->length = 0; 689 for (i = 0; i < NUM_ENDPOINTS; i++) { 690 struct ci_ep *ep = &controller.ep[i]; 691 692 if (!ep->desc) 693 continue; 694 num = ep->desc->bEndpointAddress 695 & USB_ENDPOINT_NUMBER_MASK; 696 in = (ep->desc->bEndpointAddress 697 & USB_DIR_IN) != 0; 698 if ((num == _num) && (in == _in)) { 699 ep_enable(num, in, ep->ep.maxpacket); 700 usb_ep_queue(controller.gadget.ep0, 701 req, 0); 702 break; 703 } 704 } 705 } 706 return; 707 708 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): 709 /* 710 * write address delayed (will take effect 711 * after the next IN txn) 712 */ 713 writel((r.wValue << 25) | (1 << 24), &udc->devaddr); 714 req->length = 0; 715 usb_ep_queue(controller.gadget.ep0, req, 0); 716 return; 717 718 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): 719 req->length = 2; 720 buf = (char *)req->buf; 721 buf[0] = 1 << USB_DEVICE_SELF_POWERED; 722 buf[1] = 0; 723 usb_ep_queue(controller.gadget.ep0, req, 0); 724 return; 725 } 726 /* pass request up to the gadget driver */ 727 if (controller.driver) 728 status = controller.driver->setup(&controller.gadget, &r); 729 else 730 status = -ENODEV; 731 732 if (!status) 733 return; 734 DBG("STALL reqname %s type %x value %x, index %x\n", 735 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); 736 writel((1<<16) | (1 << 0), &udc->epctrl[0]); 737 } 738 739 static void stop_activity(void) 740 { 741 int i, num, in; 742 struct ept_queue_head *head; 743 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 744 writel(readl(&udc->epcomp), &udc->epcomp); 745 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 746 writel(readl(&udc->epsetupstat), &udc->epsetupstat); 747 #endif 748 writel(readl(&udc->epstat), &udc->epstat); 749 writel(0xffffffff, &udc->epflush); 750 751 /* error out any pending reqs */ 752 for (i = 0; i < NUM_ENDPOINTS; i++) { 753 if (i != 0) 754 writel(0, &udc->epctrl[i]); 755 if (controller.ep[i].desc) { 756 num = controller.ep[i].desc->bEndpointAddress 757 & USB_ENDPOINT_NUMBER_MASK; 758 in = (controller.ep[i].desc->bEndpointAddress 759 & USB_DIR_IN) != 0; 760 head = ci_get_qh(num, in); 761 head->info = INFO_ACTIVE; 762 ci_flush_qh(num); 763 } 764 } 765 } 766 767 void udc_irq(void) 768 { 769 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 770 unsigned n = readl(&udc->usbsts); 771 writel(n, &udc->usbsts); 772 int bit, i, num, in; 773 774 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); 775 if (n == 0) 776 return; 777 778 if (n & STS_URI) { 779 DBG("-- reset --\n"); 780 stop_activity(); 781 } 782 if (n & STS_SLI) 783 DBG("-- suspend --\n"); 784 785 if (n & STS_PCI) { 786 int max = 64; 787 int speed = USB_SPEED_FULL; 788 789 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 790 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; 791 #else 792 bit = (readl(&udc->portsc) >> 26) & 3; 793 #endif 794 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); 795 if (bit == 2) { 796 speed = USB_SPEED_HIGH; 797 max = 512; 798 } 799 controller.gadget.speed = speed; 800 for (i = 1; i < NUM_ENDPOINTS; i++) { 801 if (controller.ep[i].ep.maxpacket > max) 802 controller.ep[i].ep.maxpacket = max; 803 } 804 } 805 806 if (n & STS_UEI) 807 printf("<UEI %x>\n", readl(&udc->epcomp)); 808 809 if ((n & STS_UI) || (n & STS_UEI)) { 810 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 811 n = readl(&udc->epsetupstat); 812 #else 813 n = readl(&udc->epstat); 814 #endif 815 if (n & EPT_RX(0)) 816 handle_setup(); 817 818 n = readl(&udc->epcomp); 819 if (n != 0) 820 writel(n, &udc->epcomp); 821 822 for (i = 0; i < NUM_ENDPOINTS && n; i++) { 823 if (controller.ep[i].desc) { 824 num = controller.ep[i].desc->bEndpointAddress 825 & USB_ENDPOINT_NUMBER_MASK; 826 in = (controller.ep[i].desc->bEndpointAddress 827 & USB_DIR_IN) != 0; 828 bit = (in) ? EPT_TX(num) : EPT_RX(num); 829 if (n & bit) 830 handle_ep_complete(&controller.ep[i]); 831 } 832 } 833 } 834 } 835 836 int usb_gadget_handle_interrupts(int index) 837 { 838 u32 value; 839 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 840 841 value = readl(&udc->usbsts); 842 if (value) 843 udc_irq(); 844 845 return value; 846 } 847 848 void udc_disconnect(void) 849 { 850 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 851 /* disable pullup */ 852 stop_activity(); 853 writel(USBCMD_FS2, &udc->usbcmd); 854 udelay(800); 855 if (controller.driver) 856 controller.driver->disconnect(&controller.gadget); 857 } 858 859 static int ci_pullup(struct usb_gadget *gadget, int is_on) 860 { 861 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 862 if (is_on) { 863 /* RESET */ 864 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); 865 udelay(200); 866 867 writel((unsigned long)controller.epts, &udc->epinitaddr); 868 869 /* select DEVICE mode */ 870 writel(USBMODE_DEVICE, &udc->usbmode); 871 872 #if !defined(CONFIG_USB_GADGET_DUALSPEED) 873 /* Port force Full-Speed Connect */ 874 setbits_le32(&udc->portsc, PFSC); 875 #endif 876 877 writel(0xffffffff, &udc->epflush); 878 879 /* Turn on the USB connection by enabling the pullup resistor */ 880 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); 881 } else { 882 udc_disconnect(); 883 } 884 885 return 0; 886 } 887 888 static int ci_udc_probe(void) 889 { 890 struct ept_queue_head *head; 891 int i; 892 893 const int num = 2 * NUM_ENDPOINTS; 894 895 const int eplist_min_align = 4096; 896 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); 897 const int eplist_raw_sz = num * sizeof(struct ept_queue_head); 898 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); 899 900 /* The QH list must be aligned to 4096 bytes. */ 901 controller.epts = memalign(eplist_align, eplist_sz); 902 if (!controller.epts) 903 return -ENOMEM; 904 memset(controller.epts, 0, eplist_sz); 905 906 controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); 907 if (!controller.items_mem) { 908 free(controller.epts); 909 return -ENOMEM; 910 } 911 memset(controller.items_mem, 0, ILIST_SZ); 912 913 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { 914 /* 915 * Configure QH for each endpoint. The structure of the QH list 916 * is such that each two subsequent fields, N and N+1 where N is 917 * even, in the QH list represent QH for one endpoint. The Nth 918 * entry represents OUT configuration and the N+1th entry does 919 * represent IN configuration of the endpoint. 920 */ 921 head = controller.epts + i; 922 if (i < 2) 923 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) 924 | CONFIG_ZLT | CONFIG_IOS; 925 else 926 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) 927 | CONFIG_ZLT; 928 head->next = TERMINATE; 929 head->info = 0; 930 931 if (i & 1) { 932 ci_flush_qh(i / 2); 933 ci_flush_qtd(i / 2); 934 } 935 } 936 937 INIT_LIST_HEAD(&controller.gadget.ep_list); 938 939 /* Init EP 0 */ 940 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); 941 controller.ep[0].desc = &ep0_desc; 942 INIT_LIST_HEAD(&controller.ep[0].queue); 943 controller.ep[0].req_primed = false; 944 controller.gadget.ep0 = &controller.ep[0].ep; 945 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); 946 947 /* Init EP 1..3 */ 948 for (i = 1; i < 4; i++) { 949 memcpy(&controller.ep[i].ep, &ci_ep_init[i], 950 sizeof(*ci_ep_init)); 951 INIT_LIST_HEAD(&controller.ep[i].queue); 952 controller.ep[i].req_primed = false; 953 list_add_tail(&controller.ep[i].ep.ep_list, 954 &controller.gadget.ep_list); 955 } 956 957 /* Init EP 4..n */ 958 for (i = 4; i < NUM_ENDPOINTS; i++) { 959 memcpy(&controller.ep[i].ep, &ci_ep_init[4], 960 sizeof(*ci_ep_init)); 961 INIT_LIST_HEAD(&controller.ep[i].queue); 962 controller.ep[i].req_primed = false; 963 list_add_tail(&controller.ep[i].ep.ep_list, 964 &controller.gadget.ep_list); 965 } 966 967 ci_ep_alloc_request(&controller.ep[0].ep, 0); 968 if (!controller.ep0_req) { 969 free(controller.items_mem); 970 free(controller.epts); 971 return -ENOMEM; 972 } 973 974 return 0; 975 } 976 977 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 978 { 979 int ret; 980 981 if (!driver) 982 return -EINVAL; 983 if (!driver->bind || !driver->setup || !driver->disconnect) 984 return -EINVAL; 985 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) 986 return -EINVAL; 987 988 #ifdef CONFIG_DM_USB 989 ret = usb_setup_ehci_gadget(&controller.ctrl); 990 #else 991 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); 992 #endif 993 if (ret) 994 return ret; 995 996 ret = ci_udc_probe(); 997 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) 998 /* 999 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all 1000 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection 1001 */ 1002 if (!ret) { 1003 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 1004 1005 /* select ULPI phy */ 1006 writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); 1007 } 1008 #endif 1009 1010 ret = driver->bind(&controller.gadget); 1011 if (ret) { 1012 DBG("driver->bind() returned %d\n", ret); 1013 return ret; 1014 } 1015 controller.driver = driver; 1016 1017 return 0; 1018 } 1019 1020 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1021 { 1022 udc_disconnect(); 1023 1024 driver->unbind(&controller.gadget); 1025 controller.driver = NULL; 1026 1027 ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); 1028 free(controller.items_mem); 1029 free(controller.epts); 1030 1031 return 0; 1032 } 1033 1034 bool dfu_usb_get_reset(void) 1035 { 1036 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 1037 1038 return !!(readl(&udc->usbsts) & STS_URI); 1039 } 1040