1 /* 2 * Copyright 2011, Marvell Semiconductor Inc. 3 * Lei Wen <leiwen@marvell.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 * 7 * Back ported to the 8xx platform (from the 8260 platform) by 8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 9 */ 10 11 #include <common.h> 12 #include <command.h> 13 #include <config.h> 14 #include <net.h> 15 #include <malloc.h> 16 #include <asm/byteorder.h> 17 #include <asm/errno.h> 18 #include <asm/io.h> 19 #include <asm/unaligned.h> 20 #include <linux/types.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <usb/ci_udc.h> 24 #include "../host/ehci.h" 25 #include "ci_udc.h" 26 27 /* 28 * Check if the system has too long cachelines. If the cachelines are 29 * longer then 128b, the driver will not be able flush/invalidate data 30 * cache over separate QH entries. We use 128b because one QH entry is 31 * 64b long and there are always two QH list entries for each endpoint. 32 */ 33 #if ARCH_DMA_MINALIGN > 128 34 #error This driver can not work on systems with caches longer than 128b 35 #endif 36 37 /* 38 * Every QTD must be individually aligned, since we can program any 39 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, 40 * and the USB HW requires 32-byte alignment. Align to both: 41 */ 42 #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) 43 /* Each QTD is this size */ 44 #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) 45 /* 46 * Align the size of the QTD too, so we can add this value to each 47 * QTD's address to get another aligned address. 48 */ 49 #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) 50 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ 51 #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) 52 53 #ifndef DEBUG 54 #define DBG(x...) do {} while (0) 55 #else 56 #define DBG(x...) printf(x) 57 static const char *reqname(unsigned r) 58 { 59 switch (r) { 60 case USB_REQ_GET_STATUS: return "GET_STATUS"; 61 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; 62 case USB_REQ_SET_FEATURE: return "SET_FEATURE"; 63 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; 64 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; 65 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; 66 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; 67 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; 68 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; 69 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; 70 default: return "*UNKNOWN*"; 71 } 72 } 73 #endif 74 75 static struct usb_endpoint_descriptor ep0_desc = { 76 .bLength = sizeof(struct usb_endpoint_descriptor), 77 .bDescriptorType = USB_DT_ENDPOINT, 78 .bEndpointAddress = USB_DIR_IN, 79 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 80 }; 81 82 static int ci_pullup(struct usb_gadget *gadget, int is_on); 83 static int ci_ep_enable(struct usb_ep *ep, 84 const struct usb_endpoint_descriptor *desc); 85 static int ci_ep_disable(struct usb_ep *ep); 86 static int ci_ep_queue(struct usb_ep *ep, 87 struct usb_request *req, gfp_t gfp_flags); 88 static struct usb_request * 89 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); 90 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); 91 92 static struct usb_gadget_ops ci_udc_ops = { 93 .pullup = ci_pullup, 94 }; 95 96 static struct usb_ep_ops ci_ep_ops = { 97 .enable = ci_ep_enable, 98 .disable = ci_ep_disable, 99 .queue = ci_ep_queue, 100 .alloc_request = ci_ep_alloc_request, 101 .free_request = ci_ep_free_request, 102 }; 103 104 /* Init values for USB endpoints. */ 105 static const struct usb_ep ci_ep_init[2] = { 106 [0] = { /* EP 0 */ 107 .maxpacket = 64, 108 .name = "ep0", 109 .ops = &ci_ep_ops, 110 }, 111 [1] = { /* EP 1..n */ 112 .maxpacket = 512, 113 .name = "ep-", 114 .ops = &ci_ep_ops, 115 }, 116 }; 117 118 static struct ci_drv controller = { 119 .gadget = { 120 .name = "ci_udc", 121 .ops = &ci_udc_ops, 122 .is_dualspeed = 1, 123 }, 124 }; 125 126 /** 127 * ci_get_qh() - return queue head for endpoint 128 * @ep_num: Endpoint number 129 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 130 * 131 * This function returns the QH associated with particular endpoint 132 * and it's direction. 133 */ 134 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) 135 { 136 return &controller.epts[(ep_num * 2) + dir_in]; 137 } 138 139 /** 140 * ci_get_qtd() - return queue item for endpoint 141 * @ep_num: Endpoint number 142 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 143 * 144 * This function returns the QH associated with particular endpoint 145 * and it's direction. 146 */ 147 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) 148 { 149 int index = (ep_num * 2) + dir_in; 150 uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); 151 return (struct ept_queue_item *)imem; 152 } 153 154 /** 155 * ci_flush_qh - flush cache over queue head 156 * @ep_num: Endpoint number 157 * 158 * This function flushes cache over QH for particular endpoint. 159 */ 160 static void ci_flush_qh(int ep_num) 161 { 162 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 163 const unsigned long start = (unsigned long)head; 164 const unsigned long end = start + 2 * sizeof(*head); 165 166 flush_dcache_range(start, end); 167 } 168 169 /** 170 * ci_invalidate_qh - invalidate cache over queue head 171 * @ep_num: Endpoint number 172 * 173 * This function invalidates cache over QH for particular endpoint. 174 */ 175 static void ci_invalidate_qh(int ep_num) 176 { 177 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 178 unsigned long start = (unsigned long)head; 179 unsigned long end = start + 2 * sizeof(*head); 180 181 invalidate_dcache_range(start, end); 182 } 183 184 /** 185 * ci_flush_qtd - flush cache over queue item 186 * @ep_num: Endpoint number 187 * 188 * This function flushes cache over qTD pair for particular endpoint. 189 */ 190 static void ci_flush_qtd(int ep_num) 191 { 192 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 193 const unsigned long start = (unsigned long)item; 194 const unsigned long end = start + 2 * ILIST_ENT_SZ; 195 196 flush_dcache_range(start, end); 197 } 198 199 /** 200 * ci_invalidate_qtd - invalidate cache over queue item 201 * @ep_num: Endpoint number 202 * 203 * This function invalidates cache over qTD pair for particular endpoint. 204 */ 205 static void ci_invalidate_qtd(int ep_num) 206 { 207 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 208 const unsigned long start = (unsigned long)item; 209 const unsigned long end = start + 2 * ILIST_ENT_SZ; 210 211 invalidate_dcache_range(start, end); 212 } 213 214 static struct usb_request * 215 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) 216 { 217 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 218 int num; 219 struct ci_req *ci_req; 220 221 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 222 if (num == 0 && controller.ep0_req) 223 return &controller.ep0_req->req; 224 225 ci_req = calloc(1, sizeof(*ci_req)); 226 if (!ci_req) 227 return NULL; 228 229 INIT_LIST_HEAD(&ci_req->queue); 230 231 if (num == 0) 232 controller.ep0_req = ci_req; 233 234 return &ci_req->req; 235 } 236 237 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) 238 { 239 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 240 struct ci_req *ci_req = container_of(req, struct ci_req, req); 241 int num; 242 243 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 244 if (num == 0) { 245 if (!controller.ep0_req) 246 return; 247 controller.ep0_req = 0; 248 } 249 250 if (ci_req->b_buf) 251 free(ci_req->b_buf); 252 free(ci_req); 253 } 254 255 static void ep_enable(int num, int in, int maxpacket) 256 { 257 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 258 unsigned n; 259 260 n = readl(&udc->epctrl[num]); 261 if (in) 262 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); 263 else 264 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); 265 266 if (num != 0) { 267 struct ept_queue_head *head = ci_get_qh(num, in); 268 269 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; 270 ci_flush_qh(num); 271 } 272 writel(n, &udc->epctrl[num]); 273 } 274 275 static int ci_ep_enable(struct usb_ep *ep, 276 const struct usb_endpoint_descriptor *desc) 277 { 278 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 279 int num, in; 280 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 281 in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 282 ci_ep->desc = desc; 283 284 if (num) { 285 int max = get_unaligned_le16(&desc->wMaxPacketSize); 286 287 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) 288 max = 64; 289 if (ep->maxpacket != max) { 290 DBG("%s: from %d to %d\n", __func__, 291 ep->maxpacket, max); 292 ep->maxpacket = max; 293 } 294 } 295 ep_enable(num, in, ep->maxpacket); 296 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); 297 return 0; 298 } 299 300 static int ci_ep_disable(struct usb_ep *ep) 301 { 302 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 303 304 ci_ep->desc = NULL; 305 return 0; 306 } 307 308 static int ci_bounce(struct ci_req *ci_req, int in) 309 { 310 struct usb_request *req = &ci_req->req; 311 unsigned long addr = (unsigned long)req->buf; 312 unsigned long hwaddr; 313 uint32_t aligned_used_len; 314 315 /* Input buffer address is not aligned. */ 316 if (addr & (ARCH_DMA_MINALIGN - 1)) 317 goto align; 318 319 /* Input buffer length is not aligned. */ 320 if (req->length & (ARCH_DMA_MINALIGN - 1)) 321 goto align; 322 323 /* The buffer is well aligned, only flush cache. */ 324 ci_req->hw_len = req->length; 325 ci_req->hw_buf = req->buf; 326 goto flush; 327 328 align: 329 if (ci_req->b_buf && req->length > ci_req->b_len) { 330 free(ci_req->b_buf); 331 ci_req->b_buf = 0; 332 } 333 if (!ci_req->b_buf) { 334 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); 335 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); 336 if (!ci_req->b_buf) 337 return -ENOMEM; 338 } 339 ci_req->hw_len = ci_req->b_len; 340 ci_req->hw_buf = ci_req->b_buf; 341 342 if (in) 343 memcpy(ci_req->hw_buf, req->buf, req->length); 344 345 flush: 346 hwaddr = (unsigned long)ci_req->hw_buf; 347 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); 348 flush_dcache_range(hwaddr, hwaddr + aligned_used_len); 349 350 return 0; 351 } 352 353 static void ci_debounce(struct ci_req *ci_req, int in) 354 { 355 struct usb_request *req = &ci_req->req; 356 unsigned long addr = (unsigned long)req->buf; 357 unsigned long hwaddr = (unsigned long)ci_req->hw_buf; 358 uint32_t aligned_used_len; 359 360 if (in) 361 return; 362 363 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); 364 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); 365 366 if (addr == hwaddr) 367 return; /* not a bounce */ 368 369 memcpy(req->buf, ci_req->hw_buf, req->actual); 370 } 371 372 static void ci_ep_submit_next_request(struct ci_ep *ci_ep) 373 { 374 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 375 struct ept_queue_item *item; 376 struct ept_queue_head *head; 377 int bit, num, len, in; 378 struct ci_req *ci_req; 379 380 ci_ep->req_primed = true; 381 382 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 383 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 384 item = ci_get_qtd(num, in); 385 head = ci_get_qh(num, in); 386 387 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 388 len = ci_req->req.length; 389 390 item->info = INFO_BYTES(len) | INFO_ACTIVE; 391 item->page0 = (unsigned long)ci_req->hw_buf; 392 item->page1 = ((unsigned long)ci_req->hw_buf & 0xfffff000) + 0x1000; 393 item->page2 = ((unsigned long)ci_req->hw_buf & 0xfffff000) + 0x2000; 394 item->page3 = ((unsigned long)ci_req->hw_buf & 0xfffff000) + 0x3000; 395 item->page4 = ((unsigned long)ci_req->hw_buf & 0xfffff000) + 0x4000; 396 397 head->next = (unsigned long)item; 398 head->info = 0; 399 400 /* 401 * When sending the data for an IN transaction, the attached host 402 * knows that all data for the IN is sent when one of the following 403 * occurs: 404 * a) A zero-length packet is transmitted. 405 * b) A packet with length that isn't an exact multiple of the ep's 406 * maxpacket is transmitted. 407 * c) Enough data is sent to exactly fill the host's maximum expected 408 * IN transaction size. 409 * 410 * One of these conditions MUST apply at the end of an IN transaction, 411 * or the transaction will not be considered complete by the host. If 412 * none of (a)..(c) already applies, then we must force (a) to apply 413 * by explicitly sending an extra zero-length packet. 414 */ 415 /* IN !a !b !c */ 416 if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { 417 /* 418 * Each endpoint has 2 items allocated, even though typically 419 * only 1 is used at a time since either an IN or an OUT but 420 * not both is queued. For an IN transaction, item currently 421 * points at the second of these items, so we know that we 422 * can use the other to transmit the extra zero-length packet. 423 */ 424 struct ept_queue_item *other_item = ci_get_qtd(num, 0); 425 item->next = (unsigned long)other_item; 426 item = other_item; 427 item->info = INFO_ACTIVE; 428 } 429 430 item->next = TERMINATE; 431 item->info |= INFO_IOC; 432 433 ci_flush_qtd(num); 434 435 DBG("ept%d %s queue len %x, req %p, buffer %p\n", 436 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); 437 ci_flush_qh(num); 438 439 if (in) 440 bit = EPT_TX(num); 441 else 442 bit = EPT_RX(num); 443 444 writel(bit, &udc->epprime); 445 } 446 447 static int ci_ep_queue(struct usb_ep *ep, 448 struct usb_request *req, gfp_t gfp_flags) 449 { 450 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 451 struct ci_req *ci_req = container_of(req, struct ci_req, req); 452 int in, ret; 453 int __maybe_unused num; 454 455 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 456 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 457 458 if (!num && ci_ep->req_primed) { 459 /* 460 * The flipping of ep0 between IN and OUT relies on 461 * ci_ep_queue consuming the current IN/OUT setting 462 * immediately. If this is deferred to a later point when the 463 * req is pulled out of ci_req->queue, then the IN/OUT setting 464 * may have been changed since the req was queued, and state 465 * will get out of sync. This condition doesn't occur today, 466 * but could if bugs were introduced later, and this error 467 * check will save a lot of debugging time. 468 */ 469 printf("%s: ep0 transaction already in progress\n", __func__); 470 return -EPROTO; 471 } 472 473 ret = ci_bounce(ci_req, in); 474 if (ret) 475 return ret; 476 477 DBG("ept%d %s pre-queue req %p, buffer %p\n", 478 num, in ? "in" : "out", ci_req, ci_req->hw_buf); 479 list_add_tail(&ci_req->queue, &ci_ep->queue); 480 481 if (!ci_ep->req_primed) 482 ci_ep_submit_next_request(ci_ep); 483 484 return 0; 485 } 486 487 static void flip_ep0_direction(void) 488 { 489 if (ep0_desc.bEndpointAddress == USB_DIR_IN) { 490 DBG("%s: Flipping ep0 to OUT\n", __func__); 491 ep0_desc.bEndpointAddress = 0; 492 } else { 493 DBG("%s: Flipping ep0 to IN\n", __func__); 494 ep0_desc.bEndpointAddress = USB_DIR_IN; 495 } 496 } 497 498 static void handle_ep_complete(struct ci_ep *ci_ep) 499 { 500 struct ept_queue_item *item; 501 int num, in, len; 502 struct ci_req *ci_req; 503 504 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 505 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 506 item = ci_get_qtd(num, in); 507 ci_invalidate_qtd(num); 508 509 len = (item->info >> 16) & 0x7fff; 510 if (item->info & 0xff) 511 printf("EP%d/%s FAIL info=%x pg0=%x\n", 512 num, in ? "in" : "out", item->info, item->page0); 513 514 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 515 list_del_init(&ci_req->queue); 516 ci_ep->req_primed = false; 517 518 if (!list_empty(&ci_ep->queue)) 519 ci_ep_submit_next_request(ci_ep); 520 521 ci_req->req.actual = ci_req->req.length - len; 522 ci_debounce(ci_req, in); 523 524 DBG("ept%d %s req %p, complete %x\n", 525 num, in ? "in" : "out", ci_req, len); 526 if (num != 0 || controller.ep0_data_phase) 527 ci_req->req.complete(&ci_ep->ep, &ci_req->req); 528 if (num == 0 && controller.ep0_data_phase) { 529 /* 530 * Data Stage is complete, so flip ep0 dir for Status Stage, 531 * which always transfers a packet in the opposite direction. 532 */ 533 DBG("%s: flip ep0 dir for Status Stage\n", __func__); 534 flip_ep0_direction(); 535 controller.ep0_data_phase = false; 536 ci_req->req.length = 0; 537 usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); 538 } 539 } 540 541 #define SETUP(type, request) (((type) << 8) | (request)) 542 543 static void handle_setup(void) 544 { 545 struct ci_ep *ci_ep = &controller.ep[0]; 546 struct ci_req *ci_req; 547 struct usb_request *req; 548 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 549 struct ept_queue_head *head; 550 struct usb_ctrlrequest r; 551 int status = 0; 552 int num, in, _num, _in, i; 553 char *buf; 554 555 ci_req = controller.ep0_req; 556 req = &ci_req->req; 557 head = ci_get_qh(0, 0); /* EP0 OUT */ 558 559 ci_invalidate_qh(0); 560 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); 561 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 562 writel(EPT_RX(0), &udc->epsetupstat); 563 #else 564 writel(EPT_RX(0), &udc->epstat); 565 #endif 566 DBG("handle setup %s, %x, %x index %x value %x length %x\n", 567 reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, 568 r.wValue, r.wLength); 569 570 /* Set EP0 dir for Data Stage based on Setup Stage data */ 571 if (r.bRequestType & USB_DIR_IN) { 572 DBG("%s: Set ep0 to IN for Data Stage\n", __func__); 573 ep0_desc.bEndpointAddress = USB_DIR_IN; 574 } else { 575 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); 576 ep0_desc.bEndpointAddress = 0; 577 } 578 if (r.wLength) { 579 controller.ep0_data_phase = true; 580 } else { 581 /* 0 length -> no Data Stage. Flip dir for Status Stage */ 582 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); 583 flip_ep0_direction(); 584 controller.ep0_data_phase = false; 585 } 586 587 list_del_init(&ci_req->queue); 588 ci_ep->req_primed = false; 589 590 switch (SETUP(r.bRequestType, r.bRequest)) { 591 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): 592 _num = r.wIndex & 15; 593 _in = !!(r.wIndex & 0x80); 594 595 if ((r.wValue == 0) && (r.wLength == 0)) { 596 req->length = 0; 597 for (i = 0; i < NUM_ENDPOINTS; i++) { 598 struct ci_ep *ep = &controller.ep[i]; 599 600 if (!ep->desc) 601 continue; 602 num = ep->desc->bEndpointAddress 603 & USB_ENDPOINT_NUMBER_MASK; 604 in = (ep->desc->bEndpointAddress 605 & USB_DIR_IN) != 0; 606 if ((num == _num) && (in == _in)) { 607 ep_enable(num, in, ep->ep.maxpacket); 608 usb_ep_queue(controller.gadget.ep0, 609 req, 0); 610 break; 611 } 612 } 613 } 614 return; 615 616 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): 617 /* 618 * write address delayed (will take effect 619 * after the next IN txn) 620 */ 621 writel((r.wValue << 25) | (1 << 24), &udc->devaddr); 622 req->length = 0; 623 usb_ep_queue(controller.gadget.ep0, req, 0); 624 return; 625 626 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): 627 req->length = 2; 628 buf = (char *)req->buf; 629 buf[0] = 1 << USB_DEVICE_SELF_POWERED; 630 buf[1] = 0; 631 usb_ep_queue(controller.gadget.ep0, req, 0); 632 return; 633 } 634 /* pass request up to the gadget driver */ 635 if (controller.driver) 636 status = controller.driver->setup(&controller.gadget, &r); 637 else 638 status = -ENODEV; 639 640 if (!status) 641 return; 642 DBG("STALL reqname %s type %x value %x, index %x\n", 643 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); 644 writel((1<<16) | (1 << 0), &udc->epctrl[0]); 645 } 646 647 static void stop_activity(void) 648 { 649 int i, num, in; 650 struct ept_queue_head *head; 651 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 652 writel(readl(&udc->epcomp), &udc->epcomp); 653 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 654 writel(readl(&udc->epsetupstat), &udc->epsetupstat); 655 #endif 656 writel(readl(&udc->epstat), &udc->epstat); 657 writel(0xffffffff, &udc->epflush); 658 659 /* error out any pending reqs */ 660 for (i = 0; i < NUM_ENDPOINTS; i++) { 661 if (i != 0) 662 writel(0, &udc->epctrl[i]); 663 if (controller.ep[i].desc) { 664 num = controller.ep[i].desc->bEndpointAddress 665 & USB_ENDPOINT_NUMBER_MASK; 666 in = (controller.ep[i].desc->bEndpointAddress 667 & USB_DIR_IN) != 0; 668 head = ci_get_qh(num, in); 669 head->info = INFO_ACTIVE; 670 ci_flush_qh(num); 671 } 672 } 673 } 674 675 void udc_irq(void) 676 { 677 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 678 unsigned n = readl(&udc->usbsts); 679 writel(n, &udc->usbsts); 680 int bit, i, num, in; 681 682 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); 683 if (n == 0) 684 return; 685 686 if (n & STS_URI) { 687 DBG("-- reset --\n"); 688 stop_activity(); 689 } 690 if (n & STS_SLI) 691 DBG("-- suspend --\n"); 692 693 if (n & STS_PCI) { 694 int max = 64; 695 int speed = USB_SPEED_FULL; 696 697 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 698 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; 699 #else 700 bit = (readl(&udc->portsc) >> 26) & 3; 701 #endif 702 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); 703 if (bit == 2) { 704 speed = USB_SPEED_HIGH; 705 max = 512; 706 } 707 controller.gadget.speed = speed; 708 for (i = 1; i < NUM_ENDPOINTS; i++) { 709 if (controller.ep[i].ep.maxpacket > max) 710 controller.ep[i].ep.maxpacket = max; 711 } 712 } 713 714 if (n & STS_UEI) 715 printf("<UEI %x>\n", readl(&udc->epcomp)); 716 717 if ((n & STS_UI) || (n & STS_UEI)) { 718 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 719 n = readl(&udc->epsetupstat); 720 #else 721 n = readl(&udc->epstat); 722 #endif 723 if (n & EPT_RX(0)) 724 handle_setup(); 725 726 n = readl(&udc->epcomp); 727 if (n != 0) 728 writel(n, &udc->epcomp); 729 730 for (i = 0; i < NUM_ENDPOINTS && n; i++) { 731 if (controller.ep[i].desc) { 732 num = controller.ep[i].desc->bEndpointAddress 733 & USB_ENDPOINT_NUMBER_MASK; 734 in = (controller.ep[i].desc->bEndpointAddress 735 & USB_DIR_IN) != 0; 736 bit = (in) ? EPT_TX(num) : EPT_RX(num); 737 if (n & bit) 738 handle_ep_complete(&controller.ep[i]); 739 } 740 } 741 } 742 } 743 744 int usb_gadget_handle_interrupts(int index) 745 { 746 u32 value; 747 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 748 749 value = readl(&udc->usbsts); 750 if (value) 751 udc_irq(); 752 753 return value; 754 } 755 756 void udc_disconnect(void) 757 { 758 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 759 /* disable pullup */ 760 stop_activity(); 761 writel(USBCMD_FS2, &udc->usbcmd); 762 udelay(800); 763 if (controller.driver) 764 controller.driver->disconnect(&controller.gadget); 765 } 766 767 static int ci_pullup(struct usb_gadget *gadget, int is_on) 768 { 769 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 770 if (is_on) { 771 /* RESET */ 772 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); 773 udelay(200); 774 775 writel((unsigned long)controller.epts, &udc->epinitaddr); 776 777 /* select DEVICE mode */ 778 writel(USBMODE_DEVICE, &udc->usbmode); 779 780 #if !defined(CONFIG_USB_GADGET_DUALSPEED) 781 /* Port force Full-Speed Connect */ 782 setbits_le32(&udc->portsc, PFSC); 783 #endif 784 785 writel(0xffffffff, &udc->epflush); 786 787 /* Turn on the USB connection by enabling the pullup resistor */ 788 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); 789 } else { 790 udc_disconnect(); 791 } 792 793 return 0; 794 } 795 796 static int ci_udc_probe(void) 797 { 798 struct ept_queue_head *head; 799 int i; 800 801 const int num = 2 * NUM_ENDPOINTS; 802 803 const int eplist_min_align = 4096; 804 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); 805 const int eplist_raw_sz = num * sizeof(struct ept_queue_head); 806 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); 807 808 /* The QH list must be aligned to 4096 bytes. */ 809 controller.epts = memalign(eplist_align, eplist_sz); 810 if (!controller.epts) 811 return -ENOMEM; 812 memset(controller.epts, 0, eplist_sz); 813 814 controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); 815 if (!controller.items_mem) { 816 free(controller.epts); 817 return -ENOMEM; 818 } 819 memset(controller.items_mem, 0, ILIST_SZ); 820 821 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { 822 /* 823 * Configure QH for each endpoint. The structure of the QH list 824 * is such that each two subsequent fields, N and N+1 where N is 825 * even, in the QH list represent QH for one endpoint. The Nth 826 * entry represents OUT configuration and the N+1th entry does 827 * represent IN configuration of the endpoint. 828 */ 829 head = controller.epts + i; 830 if (i < 2) 831 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) 832 | CONFIG_ZLT | CONFIG_IOS; 833 else 834 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) 835 | CONFIG_ZLT; 836 head->next = TERMINATE; 837 head->info = 0; 838 839 if (i & 1) { 840 ci_flush_qh(i / 2); 841 ci_flush_qtd(i / 2); 842 } 843 } 844 845 INIT_LIST_HEAD(&controller.gadget.ep_list); 846 847 /* Init EP 0 */ 848 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); 849 controller.ep[0].desc = &ep0_desc; 850 INIT_LIST_HEAD(&controller.ep[0].queue); 851 controller.ep[0].req_primed = false; 852 controller.gadget.ep0 = &controller.ep[0].ep; 853 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); 854 855 /* Init EP 1..n */ 856 for (i = 1; i < NUM_ENDPOINTS; i++) { 857 memcpy(&controller.ep[i].ep, &ci_ep_init[1], 858 sizeof(*ci_ep_init)); 859 INIT_LIST_HEAD(&controller.ep[i].queue); 860 controller.ep[i].req_primed = false; 861 list_add_tail(&controller.ep[i].ep.ep_list, 862 &controller.gadget.ep_list); 863 } 864 865 ci_ep_alloc_request(&controller.ep[0].ep, 0); 866 if (!controller.ep0_req) { 867 free(controller.items_mem); 868 free(controller.epts); 869 return -ENOMEM; 870 } 871 872 return 0; 873 } 874 875 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 876 { 877 int ret; 878 879 if (!driver) 880 return -EINVAL; 881 if (!driver->bind || !driver->setup || !driver->disconnect) 882 return -EINVAL; 883 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) 884 return -EINVAL; 885 886 #ifdef CONFIG_DM_USB 887 ret = usb_setup_ehci_gadget(&controller.ctrl); 888 #else 889 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); 890 #endif 891 if (ret) 892 return ret; 893 894 ret = ci_udc_probe(); 895 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) 896 /* 897 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all 898 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection 899 */ 900 if (!ret) { 901 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 902 903 /* select ULPI phy */ 904 writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); 905 } 906 #endif 907 908 ret = driver->bind(&controller.gadget); 909 if (ret) { 910 DBG("driver->bind() returned %d\n", ret); 911 return ret; 912 } 913 controller.driver = driver; 914 915 return 0; 916 } 917 918 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 919 { 920 udc_disconnect(); 921 922 driver->unbind(&controller.gadget); 923 controller.driver = NULL; 924 925 ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); 926 free(controller.items_mem); 927 free(controller.epts); 928 929 return 0; 930 } 931 932 bool dfu_usb_get_reset(void) 933 { 934 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 935 936 return !!(readl(&udc->usbsts) & STS_URI); 937 } 938