1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2011, Marvell Semiconductor Inc. 4 * Lei Wen <leiwen@marvell.com> 5 * 6 * Back ported to the 8xx platform (from the 8260 platform) by 7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 8 */ 9 10 #include <common.h> 11 #include <command.h> 12 #include <config.h> 13 #include <net.h> 14 #include <malloc.h> 15 #include <asm/byteorder.h> 16 #include <linux/errno.h> 17 #include <asm/io.h> 18 #include <asm/unaligned.h> 19 #include <linux/types.h> 20 #include <linux/usb/ch9.h> 21 #include <linux/usb/gadget.h> 22 #include <usb/ci_udc.h> 23 #include "../host/ehci.h" 24 #include "ci_udc.h" 25 26 /* 27 * Check if the system has too long cachelines. If the cachelines are 28 * longer then 128b, the driver will not be able flush/invalidate data 29 * cache over separate QH entries. We use 128b because one QH entry is 30 * 64b long and there are always two QH list entries for each endpoint. 31 */ 32 #if ARCH_DMA_MINALIGN > 128 33 #error This driver can not work on systems with caches longer than 128b 34 #endif 35 36 /* 37 * Every QTD must be individually aligned, since we can program any 38 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, 39 * and the USB HW requires 32-byte alignment. Align to both: 40 */ 41 #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) 42 /* Each QTD is this size */ 43 #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) 44 /* 45 * Align the size of the QTD too, so we can add this value to each 46 * QTD's address to get another aligned address. 47 */ 48 #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) 49 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ 50 #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) 51 52 #define EP_MAX_LENGTH_TRANSFER 0x4000 53 54 #ifndef DEBUG 55 #define DBG(x...) do {} while (0) 56 #else 57 #define DBG(x...) printf(x) 58 static const char *reqname(unsigned r) 59 { 60 switch (r) { 61 case USB_REQ_GET_STATUS: return "GET_STATUS"; 62 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; 63 case USB_REQ_SET_FEATURE: return "SET_FEATURE"; 64 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; 65 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; 66 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; 67 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; 68 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; 69 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; 70 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; 71 default: return "*UNKNOWN*"; 72 } 73 } 74 #endif 75 76 static struct usb_endpoint_descriptor ep0_desc = { 77 .bLength = sizeof(struct usb_endpoint_descriptor), 78 .bDescriptorType = USB_DT_ENDPOINT, 79 .bEndpointAddress = USB_DIR_IN, 80 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 81 }; 82 83 static int ci_pullup(struct usb_gadget *gadget, int is_on); 84 static int ci_ep_enable(struct usb_ep *ep, 85 const struct usb_endpoint_descriptor *desc); 86 static int ci_ep_disable(struct usb_ep *ep); 87 static int ci_ep_queue(struct usb_ep *ep, 88 struct usb_request *req, gfp_t gfp_flags); 89 static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req); 90 static struct usb_request * 91 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); 92 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); 93 94 static struct usb_gadget_ops ci_udc_ops = { 95 .pullup = ci_pullup, 96 }; 97 98 static struct usb_ep_ops ci_ep_ops = { 99 .enable = ci_ep_enable, 100 .disable = ci_ep_disable, 101 .queue = ci_ep_queue, 102 .dequeue = ci_ep_dequeue, 103 .alloc_request = ci_ep_alloc_request, 104 .free_request = ci_ep_free_request, 105 }; 106 107 /* Init values for USB endpoints. */ 108 static const struct usb_ep ci_ep_init[5] = { 109 [0] = { /* EP 0 */ 110 .maxpacket = 64, 111 .name = "ep0", 112 .ops = &ci_ep_ops, 113 }, 114 [1] = { 115 .maxpacket = 512, 116 .name = "ep1in-bulk", 117 .ops = &ci_ep_ops, 118 }, 119 [2] = { 120 .maxpacket = 512, 121 .name = "ep2out-bulk", 122 .ops = &ci_ep_ops, 123 }, 124 [3] = { 125 .maxpacket = 512, 126 .name = "ep3in-int", 127 .ops = &ci_ep_ops, 128 }, 129 [4] = { 130 .maxpacket = 512, 131 .name = "ep-", 132 .ops = &ci_ep_ops, 133 }, 134 }; 135 136 static struct ci_drv controller = { 137 .gadget = { 138 .name = "ci_udc", 139 .ops = &ci_udc_ops, 140 .is_dualspeed = 1, 141 }, 142 }; 143 144 /** 145 * ci_get_qh() - return queue head for endpoint 146 * @ep_num: Endpoint number 147 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 148 * 149 * This function returns the QH associated with particular endpoint 150 * and it's direction. 151 */ 152 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) 153 { 154 return &controller.epts[(ep_num * 2) + dir_in]; 155 } 156 157 /** 158 * ci_get_qtd() - return queue item for endpoint 159 * @ep_num: Endpoint number 160 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 161 * 162 * This function returns the QH associated with particular endpoint 163 * and it's direction. 164 */ 165 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) 166 { 167 int index = (ep_num * 2) + dir_in; 168 uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); 169 return (struct ept_queue_item *)imem; 170 } 171 172 /** 173 * ci_flush_qh - flush cache over queue head 174 * @ep_num: Endpoint number 175 * 176 * This function flushes cache over QH for particular endpoint. 177 */ 178 static void ci_flush_qh(int ep_num) 179 { 180 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 181 const unsigned long start = (unsigned long)head; 182 const unsigned long end = start + 2 * sizeof(*head); 183 184 flush_dcache_range(start, end); 185 } 186 187 /** 188 * ci_invalidate_qh - invalidate cache over queue head 189 * @ep_num: Endpoint number 190 * 191 * This function invalidates cache over QH for particular endpoint. 192 */ 193 static void ci_invalidate_qh(int ep_num) 194 { 195 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 196 unsigned long start = (unsigned long)head; 197 unsigned long end = start + 2 * sizeof(*head); 198 199 invalidate_dcache_range(start, end); 200 } 201 202 /** 203 * ci_flush_qtd - flush cache over queue item 204 * @ep_num: Endpoint number 205 * 206 * This function flushes cache over qTD pair for particular endpoint. 207 */ 208 static void ci_flush_qtd(int ep_num) 209 { 210 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 211 const unsigned long start = (unsigned long)item; 212 const unsigned long end = start + 2 * ILIST_ENT_SZ; 213 214 flush_dcache_range(start, end); 215 } 216 217 /** 218 * ci_flush_td - flush cache over queue item 219 * @td: td pointer 220 * 221 * This function flushes cache for particular transfer descriptor. 222 */ 223 static void ci_flush_td(struct ept_queue_item *td) 224 { 225 const unsigned long start = (unsigned long)td; 226 const unsigned long end = (unsigned long)td + ILIST_ENT_SZ; 227 flush_dcache_range(start, end); 228 } 229 230 /** 231 * ci_invalidate_qtd - invalidate cache over queue item 232 * @ep_num: Endpoint number 233 * 234 * This function invalidates cache over qTD pair for particular endpoint. 235 */ 236 static void ci_invalidate_qtd(int ep_num) 237 { 238 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 239 const unsigned long start = (unsigned long)item; 240 const unsigned long end = start + 2 * ILIST_ENT_SZ; 241 242 invalidate_dcache_range(start, end); 243 } 244 245 /** 246 * ci_invalidate_td - invalidate cache over queue item 247 * @td: td pointer 248 * 249 * This function invalidates cache for particular transfer descriptor. 250 */ 251 static void ci_invalidate_td(struct ept_queue_item *td) 252 { 253 const unsigned long start = (unsigned long)td; 254 const unsigned long end = start + ILIST_ENT_SZ; 255 invalidate_dcache_range(start, end); 256 } 257 258 static struct usb_request * 259 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) 260 { 261 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 262 int num = -1; 263 struct ci_req *ci_req; 264 265 if (ci_ep->desc) 266 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 267 268 if (num == 0 && controller.ep0_req) 269 return &controller.ep0_req->req; 270 271 ci_req = calloc(1, sizeof(*ci_req)); 272 if (!ci_req) 273 return NULL; 274 275 INIT_LIST_HEAD(&ci_req->queue); 276 277 if (num == 0) 278 controller.ep0_req = ci_req; 279 280 return &ci_req->req; 281 } 282 283 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) 284 { 285 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 286 struct ci_req *ci_req = container_of(req, struct ci_req, req); 287 int num = -1; 288 289 if (ci_ep->desc) 290 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 291 292 if (num == 0) { 293 if (!controller.ep0_req) 294 return; 295 controller.ep0_req = 0; 296 } 297 298 if (ci_req->b_buf) 299 free(ci_req->b_buf); 300 free(ci_req); 301 } 302 303 static void ep_enable(int num, int in, int maxpacket) 304 { 305 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 306 unsigned n; 307 308 n = readl(&udc->epctrl[num]); 309 if (in) 310 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); 311 else 312 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); 313 314 if (num != 0) { 315 struct ept_queue_head *head = ci_get_qh(num, in); 316 317 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; 318 ci_flush_qh(num); 319 } 320 writel(n, &udc->epctrl[num]); 321 } 322 323 static int ci_ep_enable(struct usb_ep *ep, 324 const struct usb_endpoint_descriptor *desc) 325 { 326 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 327 int num, in; 328 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 329 in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 330 ci_ep->desc = desc; 331 332 if (num) { 333 int max = get_unaligned_le16(&desc->wMaxPacketSize); 334 335 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) 336 max = 64; 337 if (ep->maxpacket != max) { 338 DBG("%s: from %d to %d\n", __func__, 339 ep->maxpacket, max); 340 ep->maxpacket = max; 341 } 342 } 343 ep_enable(num, in, ep->maxpacket); 344 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); 345 return 0; 346 } 347 348 static int ci_ep_disable(struct usb_ep *ep) 349 { 350 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 351 352 ci_ep->desc = NULL; 353 return 0; 354 } 355 356 static int ci_bounce(struct ci_req *ci_req, int in) 357 { 358 struct usb_request *req = &ci_req->req; 359 unsigned long addr = (unsigned long)req->buf; 360 unsigned long hwaddr; 361 uint32_t aligned_used_len; 362 363 /* Input buffer address is not aligned. */ 364 if (addr & (ARCH_DMA_MINALIGN - 1)) 365 goto align; 366 367 /* Input buffer length is not aligned. */ 368 if (req->length & (ARCH_DMA_MINALIGN - 1)) 369 goto align; 370 371 /* The buffer is well aligned, only flush cache. */ 372 ci_req->hw_len = req->length; 373 ci_req->hw_buf = req->buf; 374 goto flush; 375 376 align: 377 if (ci_req->b_buf && req->length > ci_req->b_len) { 378 free(ci_req->b_buf); 379 ci_req->b_buf = 0; 380 } 381 if (!ci_req->b_buf) { 382 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); 383 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); 384 if (!ci_req->b_buf) 385 return -ENOMEM; 386 } 387 ci_req->hw_len = ci_req->b_len; 388 ci_req->hw_buf = ci_req->b_buf; 389 390 if (in) 391 memcpy(ci_req->hw_buf, req->buf, req->length); 392 393 flush: 394 hwaddr = (unsigned long)ci_req->hw_buf; 395 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); 396 flush_dcache_range(hwaddr, hwaddr + aligned_used_len); 397 398 return 0; 399 } 400 401 static void ci_debounce(struct ci_req *ci_req, int in) 402 { 403 struct usb_request *req = &ci_req->req; 404 unsigned long addr = (unsigned long)req->buf; 405 unsigned long hwaddr = (unsigned long)ci_req->hw_buf; 406 uint32_t aligned_used_len; 407 408 if (in) 409 return; 410 411 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); 412 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); 413 414 if (addr == hwaddr) 415 return; /* not a bounce */ 416 417 memcpy(req->buf, ci_req->hw_buf, req->actual); 418 } 419 420 static void ci_ep_submit_next_request(struct ci_ep *ci_ep) 421 { 422 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 423 struct ept_queue_item *item; 424 struct ept_queue_head *head; 425 int bit, num, len, in; 426 struct ci_req *ci_req; 427 u8 *buf; 428 uint32_t len_left, len_this_dtd; 429 struct ept_queue_item *dtd, *qtd; 430 431 ci_ep->req_primed = true; 432 433 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 434 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 435 item = ci_get_qtd(num, in); 436 head = ci_get_qh(num, in); 437 438 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 439 len = ci_req->req.length; 440 441 head->next = (unsigned long)item; 442 head->info = 0; 443 444 ci_req->dtd_count = 0; 445 buf = ci_req->hw_buf; 446 len_left = len; 447 dtd = item; 448 449 do { 450 len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER); 451 452 dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE; 453 dtd->page0 = (unsigned long)buf; 454 dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000; 455 dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000; 456 dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000; 457 dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000; 458 459 len_left -= len_this_dtd; 460 buf += len_this_dtd; 461 462 if (len_left) { 463 qtd = (struct ept_queue_item *) 464 memalign(ILIST_ALIGN, ILIST_ENT_SZ); 465 dtd->next = (unsigned long)qtd; 466 dtd = qtd; 467 memset(dtd, 0, ILIST_ENT_SZ); 468 } 469 470 ci_req->dtd_count++; 471 } while (len_left); 472 473 item = dtd; 474 /* 475 * When sending the data for an IN transaction, the attached host 476 * knows that all data for the IN is sent when one of the following 477 * occurs: 478 * a) A zero-length packet is transmitted. 479 * b) A packet with length that isn't an exact multiple of the ep's 480 * maxpacket is transmitted. 481 * c) Enough data is sent to exactly fill the host's maximum expected 482 * IN transaction size. 483 * 484 * One of these conditions MUST apply at the end of an IN transaction, 485 * or the transaction will not be considered complete by the host. If 486 * none of (a)..(c) already applies, then we must force (a) to apply 487 * by explicitly sending an extra zero-length packet. 488 */ 489 /* IN !a !b !c */ 490 if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { 491 /* 492 * Each endpoint has 2 items allocated, even though typically 493 * only 1 is used at a time since either an IN or an OUT but 494 * not both is queued. For an IN transaction, item currently 495 * points at the second of these items, so we know that we 496 * can use the other to transmit the extra zero-length packet. 497 */ 498 struct ept_queue_item *other_item = ci_get_qtd(num, 0); 499 item->next = (unsigned long)other_item; 500 item = other_item; 501 item->info = INFO_ACTIVE; 502 } 503 504 item->next = TERMINATE; 505 item->info |= INFO_IOC; 506 507 ci_flush_qtd(num); 508 509 item = (struct ept_queue_item *)(unsigned long)head->next; 510 while (item->next != TERMINATE) { 511 ci_flush_td((struct ept_queue_item *)(unsigned long)item->next); 512 item = (struct ept_queue_item *)(unsigned long)item->next; 513 } 514 515 DBG("ept%d %s queue len %x, req %p, buffer %p\n", 516 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); 517 ci_flush_qh(num); 518 519 if (in) 520 bit = EPT_TX(num); 521 else 522 bit = EPT_RX(num); 523 524 writel(bit, &udc->epprime); 525 } 526 527 static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 528 { 529 struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep); 530 struct ci_req *ci_req; 531 532 list_for_each_entry(ci_req, &ci_ep->queue, queue) { 533 if (&ci_req->req == _req) 534 break; 535 } 536 537 if (&ci_req->req != _req) 538 return -EINVAL; 539 540 list_del_init(&ci_req->queue); 541 542 if (ci_req->req.status == -EINPROGRESS) { 543 ci_req->req.status = -ECONNRESET; 544 if (ci_req->req.complete) 545 ci_req->req.complete(_ep, _req); 546 } 547 548 return 0; 549 } 550 551 static int ci_ep_queue(struct usb_ep *ep, 552 struct usb_request *req, gfp_t gfp_flags) 553 { 554 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 555 struct ci_req *ci_req = container_of(req, struct ci_req, req); 556 int in, ret; 557 int __maybe_unused num; 558 559 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 560 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 561 562 if (!num && ci_ep->req_primed) { 563 /* 564 * The flipping of ep0 between IN and OUT relies on 565 * ci_ep_queue consuming the current IN/OUT setting 566 * immediately. If this is deferred to a later point when the 567 * req is pulled out of ci_req->queue, then the IN/OUT setting 568 * may have been changed since the req was queued, and state 569 * will get out of sync. This condition doesn't occur today, 570 * but could if bugs were introduced later, and this error 571 * check will save a lot of debugging time. 572 */ 573 printf("%s: ep0 transaction already in progress\n", __func__); 574 return -EPROTO; 575 } 576 577 ret = ci_bounce(ci_req, in); 578 if (ret) 579 return ret; 580 581 DBG("ept%d %s pre-queue req %p, buffer %p\n", 582 num, in ? "in" : "out", ci_req, ci_req->hw_buf); 583 list_add_tail(&ci_req->queue, &ci_ep->queue); 584 585 if (!ci_ep->req_primed) 586 ci_ep_submit_next_request(ci_ep); 587 588 return 0; 589 } 590 591 static void flip_ep0_direction(void) 592 { 593 if (ep0_desc.bEndpointAddress == USB_DIR_IN) { 594 DBG("%s: Flipping ep0 to OUT\n", __func__); 595 ep0_desc.bEndpointAddress = 0; 596 } else { 597 DBG("%s: Flipping ep0 to IN\n", __func__); 598 ep0_desc.bEndpointAddress = USB_DIR_IN; 599 } 600 } 601 602 static void handle_ep_complete(struct ci_ep *ci_ep) 603 { 604 struct ept_queue_item *item, *next_td; 605 int num, in, len, j; 606 struct ci_req *ci_req; 607 608 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 609 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 610 item = ci_get_qtd(num, in); 611 ci_invalidate_qtd(num); 612 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 613 614 next_td = item; 615 len = 0; 616 for (j = 0; j < ci_req->dtd_count; j++) { 617 ci_invalidate_td(next_td); 618 item = next_td; 619 len += (item->info >> 16) & 0x7fff; 620 if (item->info & 0xff) 621 printf("EP%d/%s FAIL info=%x pg0=%x\n", 622 num, in ? "in" : "out", item->info, item->page0); 623 if (j != ci_req->dtd_count - 1) 624 next_td = (struct ept_queue_item *)(unsigned long) 625 item->next; 626 if (j != 0) 627 free(item); 628 } 629 630 list_del_init(&ci_req->queue); 631 ci_ep->req_primed = false; 632 633 if (!list_empty(&ci_ep->queue)) 634 ci_ep_submit_next_request(ci_ep); 635 636 ci_req->req.actual = ci_req->req.length - len; 637 ci_debounce(ci_req, in); 638 639 DBG("ept%d %s req %p, complete %x\n", 640 num, in ? "in" : "out", ci_req, len); 641 if (num != 0 || controller.ep0_data_phase) 642 ci_req->req.complete(&ci_ep->ep, &ci_req->req); 643 if (num == 0 && controller.ep0_data_phase) { 644 /* 645 * Data Stage is complete, so flip ep0 dir for Status Stage, 646 * which always transfers a packet in the opposite direction. 647 */ 648 DBG("%s: flip ep0 dir for Status Stage\n", __func__); 649 flip_ep0_direction(); 650 controller.ep0_data_phase = false; 651 ci_req->req.length = 0; 652 usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); 653 } 654 } 655 656 #define SETUP(type, request) (((type) << 8) | (request)) 657 658 static void handle_setup(void) 659 { 660 struct ci_ep *ci_ep = &controller.ep[0]; 661 struct ci_req *ci_req; 662 struct usb_request *req; 663 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 664 struct ept_queue_head *head; 665 struct usb_ctrlrequest r; 666 int status = 0; 667 int num, in, _num, _in, i; 668 char *buf; 669 670 ci_req = controller.ep0_req; 671 req = &ci_req->req; 672 head = ci_get_qh(0, 0); /* EP0 OUT */ 673 674 ci_invalidate_qh(0); 675 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); 676 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 677 writel(EPT_RX(0), &udc->epsetupstat); 678 #else 679 writel(EPT_RX(0), &udc->epstat); 680 #endif 681 DBG("handle setup %s, %x, %x index %x value %x length %x\n", 682 reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, 683 r.wValue, r.wLength); 684 685 /* Set EP0 dir for Data Stage based on Setup Stage data */ 686 if (r.bRequestType & USB_DIR_IN) { 687 DBG("%s: Set ep0 to IN for Data Stage\n", __func__); 688 ep0_desc.bEndpointAddress = USB_DIR_IN; 689 } else { 690 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); 691 ep0_desc.bEndpointAddress = 0; 692 } 693 if (r.wLength) { 694 controller.ep0_data_phase = true; 695 } else { 696 /* 0 length -> no Data Stage. Flip dir for Status Stage */ 697 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); 698 flip_ep0_direction(); 699 controller.ep0_data_phase = false; 700 } 701 702 list_del_init(&ci_req->queue); 703 ci_ep->req_primed = false; 704 705 switch (SETUP(r.bRequestType, r.bRequest)) { 706 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): 707 _num = r.wIndex & 15; 708 _in = !!(r.wIndex & 0x80); 709 710 if ((r.wValue == 0) && (r.wLength == 0)) { 711 req->length = 0; 712 for (i = 0; i < NUM_ENDPOINTS; i++) { 713 struct ci_ep *ep = &controller.ep[i]; 714 715 if (!ep->desc) 716 continue; 717 num = ep->desc->bEndpointAddress 718 & USB_ENDPOINT_NUMBER_MASK; 719 in = (ep->desc->bEndpointAddress 720 & USB_DIR_IN) != 0; 721 if ((num == _num) && (in == _in)) { 722 ep_enable(num, in, ep->ep.maxpacket); 723 usb_ep_queue(controller.gadget.ep0, 724 req, 0); 725 break; 726 } 727 } 728 } 729 return; 730 731 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): 732 /* 733 * write address delayed (will take effect 734 * after the next IN txn) 735 */ 736 writel((r.wValue << 25) | (1 << 24), &udc->devaddr); 737 req->length = 0; 738 usb_ep_queue(controller.gadget.ep0, req, 0); 739 return; 740 741 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): 742 req->length = 2; 743 buf = (char *)req->buf; 744 buf[0] = 1 << USB_DEVICE_SELF_POWERED; 745 buf[1] = 0; 746 usb_ep_queue(controller.gadget.ep0, req, 0); 747 return; 748 } 749 /* pass request up to the gadget driver */ 750 if (controller.driver) 751 status = controller.driver->setup(&controller.gadget, &r); 752 else 753 status = -ENODEV; 754 755 if (!status) 756 return; 757 DBG("STALL reqname %s type %x value %x, index %x\n", 758 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); 759 writel((1<<16) | (1 << 0), &udc->epctrl[0]); 760 } 761 762 static void stop_activity(void) 763 { 764 int i, num, in; 765 struct ept_queue_head *head; 766 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 767 writel(readl(&udc->epcomp), &udc->epcomp); 768 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 769 writel(readl(&udc->epsetupstat), &udc->epsetupstat); 770 #endif 771 writel(readl(&udc->epstat), &udc->epstat); 772 writel(0xffffffff, &udc->epflush); 773 774 /* error out any pending reqs */ 775 for (i = 0; i < NUM_ENDPOINTS; i++) { 776 if (i != 0) 777 writel(0, &udc->epctrl[i]); 778 if (controller.ep[i].desc) { 779 num = controller.ep[i].desc->bEndpointAddress 780 & USB_ENDPOINT_NUMBER_MASK; 781 in = (controller.ep[i].desc->bEndpointAddress 782 & USB_DIR_IN) != 0; 783 head = ci_get_qh(num, in); 784 head->info = INFO_ACTIVE; 785 ci_flush_qh(num); 786 } 787 } 788 } 789 790 void udc_irq(void) 791 { 792 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 793 unsigned n = readl(&udc->usbsts); 794 writel(n, &udc->usbsts); 795 int bit, i, num, in; 796 797 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); 798 if (n == 0) 799 return; 800 801 if (n & STS_URI) { 802 DBG("-- reset --\n"); 803 stop_activity(); 804 } 805 if (n & STS_SLI) 806 DBG("-- suspend --\n"); 807 808 if (n & STS_PCI) { 809 int max = 64; 810 int speed = USB_SPEED_FULL; 811 812 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 813 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; 814 #else 815 bit = (readl(&udc->portsc) >> 26) & 3; 816 #endif 817 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); 818 if (bit == 2) { 819 speed = USB_SPEED_HIGH; 820 max = 512; 821 } 822 controller.gadget.speed = speed; 823 for (i = 1; i < NUM_ENDPOINTS; i++) { 824 if (controller.ep[i].ep.maxpacket > max) 825 controller.ep[i].ep.maxpacket = max; 826 } 827 } 828 829 if (n & STS_UEI) 830 printf("<UEI %x>\n", readl(&udc->epcomp)); 831 832 if ((n & STS_UI) || (n & STS_UEI)) { 833 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 834 n = readl(&udc->epsetupstat); 835 #else 836 n = readl(&udc->epstat); 837 #endif 838 if (n & EPT_RX(0)) 839 handle_setup(); 840 841 n = readl(&udc->epcomp); 842 if (n != 0) 843 writel(n, &udc->epcomp); 844 845 for (i = 0; i < NUM_ENDPOINTS && n; i++) { 846 if (controller.ep[i].desc) { 847 num = controller.ep[i].desc->bEndpointAddress 848 & USB_ENDPOINT_NUMBER_MASK; 849 in = (controller.ep[i].desc->bEndpointAddress 850 & USB_DIR_IN) != 0; 851 bit = (in) ? EPT_TX(num) : EPT_RX(num); 852 if (n & bit) 853 handle_ep_complete(&controller.ep[i]); 854 } 855 } 856 } 857 } 858 859 int usb_gadget_handle_interrupts(int index) 860 { 861 u32 value; 862 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 863 864 value = readl(&udc->usbsts); 865 if (value) 866 udc_irq(); 867 868 return value; 869 } 870 871 void udc_disconnect(void) 872 { 873 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 874 /* disable pullup */ 875 stop_activity(); 876 writel(USBCMD_FS2, &udc->usbcmd); 877 udelay(800); 878 if (controller.driver) 879 controller.driver->disconnect(&controller.gadget); 880 } 881 882 static int ci_pullup(struct usb_gadget *gadget, int is_on) 883 { 884 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 885 if (is_on) { 886 /* RESET */ 887 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); 888 udelay(200); 889 890 writel((unsigned long)controller.epts, &udc->epinitaddr); 891 892 /* select DEVICE mode */ 893 writel(USBMODE_DEVICE, &udc->usbmode); 894 895 #if !defined(CONFIG_USB_GADGET_DUALSPEED) 896 /* Port force Full-Speed Connect */ 897 setbits_le32(&udc->portsc, PFSC); 898 #endif 899 900 writel(0xffffffff, &udc->epflush); 901 902 /* Turn on the USB connection by enabling the pullup resistor */ 903 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); 904 } else { 905 udc_disconnect(); 906 } 907 908 return 0; 909 } 910 911 static int ci_udc_probe(void) 912 { 913 struct ept_queue_head *head; 914 int i; 915 916 const int num = 2 * NUM_ENDPOINTS; 917 918 const int eplist_min_align = 4096; 919 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); 920 const int eplist_raw_sz = num * sizeof(struct ept_queue_head); 921 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); 922 923 /* The QH list must be aligned to 4096 bytes. */ 924 controller.epts = memalign(eplist_align, eplist_sz); 925 if (!controller.epts) 926 return -ENOMEM; 927 memset(controller.epts, 0, eplist_sz); 928 929 controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); 930 if (!controller.items_mem) { 931 free(controller.epts); 932 return -ENOMEM; 933 } 934 memset(controller.items_mem, 0, ILIST_SZ); 935 936 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { 937 /* 938 * Configure QH for each endpoint. The structure of the QH list 939 * is such that each two subsequent fields, N and N+1 where N is 940 * even, in the QH list represent QH for one endpoint. The Nth 941 * entry represents OUT configuration and the N+1th entry does 942 * represent IN configuration of the endpoint. 943 */ 944 head = controller.epts + i; 945 if (i < 2) 946 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) 947 | CONFIG_ZLT | CONFIG_IOS; 948 else 949 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) 950 | CONFIG_ZLT; 951 head->next = TERMINATE; 952 head->info = 0; 953 954 if (i & 1) { 955 ci_flush_qh(i / 2); 956 ci_flush_qtd(i / 2); 957 } 958 } 959 960 INIT_LIST_HEAD(&controller.gadget.ep_list); 961 962 /* Init EP 0 */ 963 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); 964 controller.ep[0].desc = &ep0_desc; 965 INIT_LIST_HEAD(&controller.ep[0].queue); 966 controller.ep[0].req_primed = false; 967 controller.gadget.ep0 = &controller.ep[0].ep; 968 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); 969 970 /* Init EP 1..3 */ 971 for (i = 1; i < 4; i++) { 972 memcpy(&controller.ep[i].ep, &ci_ep_init[i], 973 sizeof(*ci_ep_init)); 974 INIT_LIST_HEAD(&controller.ep[i].queue); 975 controller.ep[i].req_primed = false; 976 list_add_tail(&controller.ep[i].ep.ep_list, 977 &controller.gadget.ep_list); 978 } 979 980 /* Init EP 4..n */ 981 for (i = 4; i < NUM_ENDPOINTS; i++) { 982 memcpy(&controller.ep[i].ep, &ci_ep_init[4], 983 sizeof(*ci_ep_init)); 984 INIT_LIST_HEAD(&controller.ep[i].queue); 985 controller.ep[i].req_primed = false; 986 list_add_tail(&controller.ep[i].ep.ep_list, 987 &controller.gadget.ep_list); 988 } 989 990 ci_ep_alloc_request(&controller.ep[0].ep, 0); 991 if (!controller.ep0_req) { 992 free(controller.items_mem); 993 free(controller.epts); 994 return -ENOMEM; 995 } 996 997 return 0; 998 } 999 1000 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1001 { 1002 int ret; 1003 1004 if (!driver) 1005 return -EINVAL; 1006 if (!driver->bind || !driver->setup || !driver->disconnect) 1007 return -EINVAL; 1008 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) 1009 return -EINVAL; 1010 1011 #ifdef CONFIG_DM_USB 1012 ret = usb_setup_ehci_gadget(&controller.ctrl); 1013 #else 1014 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); 1015 #endif 1016 if (ret) 1017 return ret; 1018 1019 ret = ci_udc_probe(); 1020 if (ret) { 1021 DBG("udc probe failed, returned %d\n", ret); 1022 return ret; 1023 } 1024 1025 ret = driver->bind(&controller.gadget); 1026 if (ret) { 1027 DBG("driver->bind() returned %d\n", ret); 1028 return ret; 1029 } 1030 controller.driver = driver; 1031 1032 return 0; 1033 } 1034 1035 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1036 { 1037 udc_disconnect(); 1038 1039 driver->unbind(&controller.gadget); 1040 controller.driver = NULL; 1041 1042 ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); 1043 free(controller.items_mem); 1044 free(controller.epts); 1045 1046 return 0; 1047 } 1048 1049 bool dfu_usb_get_reset(void) 1050 { 1051 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 1052 1053 return !!(readl(&udc->usbsts) & STS_URI); 1054 } 1055