1 /* 2 * Copyright 2011, Marvell Semiconductor Inc. 3 * Lei Wen <leiwen@marvell.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 * 7 * Back ported to the 8xx platform (from the 8260 platform) by 8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 9 */ 10 11 #include <common.h> 12 #include <command.h> 13 #include <config.h> 14 #include <net.h> 15 #include <malloc.h> 16 #include <asm/byteorder.h> 17 #include <asm/errno.h> 18 #include <asm/io.h> 19 #include <asm/unaligned.h> 20 #include <linux/types.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <usb/ci_udc.h> 24 #include "../host/ehci.h" 25 #include "ci_udc.h" 26 27 /* 28 * Check if the system has too long cachelines. If the cachelines are 29 * longer then 128b, the driver will not be able flush/invalidate data 30 * cache over separate QH entries. We use 128b because one QH entry is 31 * 64b long and there are always two QH list entries for each endpoint. 32 */ 33 #if ARCH_DMA_MINALIGN > 128 34 #error This driver can not work on systems with caches longer than 128b 35 #endif 36 37 #ifndef DEBUG 38 #define DBG(x...) do {} while (0) 39 #else 40 #define DBG(x...) printf(x) 41 static const char *reqname(unsigned r) 42 { 43 switch (r) { 44 case USB_REQ_GET_STATUS: return "GET_STATUS"; 45 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; 46 case USB_REQ_SET_FEATURE: return "SET_FEATURE"; 47 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; 48 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; 49 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; 50 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; 51 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; 52 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; 53 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; 54 default: return "*UNKNOWN*"; 55 } 56 } 57 #endif 58 59 static struct usb_endpoint_descriptor ep0_out_desc = { 60 .bLength = sizeof(struct usb_endpoint_descriptor), 61 .bDescriptorType = USB_DT_ENDPOINT, 62 .bEndpointAddress = 0, 63 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 64 }; 65 66 static struct usb_endpoint_descriptor ep0_in_desc = { 67 .bLength = sizeof(struct usb_endpoint_descriptor), 68 .bDescriptorType = USB_DT_ENDPOINT, 69 .bEndpointAddress = USB_DIR_IN, 70 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 71 }; 72 73 static int ci_pullup(struct usb_gadget *gadget, int is_on); 74 static int ci_ep_enable(struct usb_ep *ep, 75 const struct usb_endpoint_descriptor *desc); 76 static int ci_ep_disable(struct usb_ep *ep); 77 static int ci_ep_queue(struct usb_ep *ep, 78 struct usb_request *req, gfp_t gfp_flags); 79 static struct usb_request * 80 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); 81 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); 82 83 static struct usb_gadget_ops ci_udc_ops = { 84 .pullup = ci_pullup, 85 }; 86 87 static struct usb_ep_ops ci_ep_ops = { 88 .enable = ci_ep_enable, 89 .disable = ci_ep_disable, 90 .queue = ci_ep_queue, 91 .alloc_request = ci_ep_alloc_request, 92 .free_request = ci_ep_free_request, 93 }; 94 95 /* Init values for USB endpoints. */ 96 static const struct usb_ep ci_ep_init[2] = { 97 [0] = { /* EP 0 */ 98 .maxpacket = 64, 99 .name = "ep0", 100 .ops = &ci_ep_ops, 101 }, 102 [1] = { /* EP 1..n */ 103 .maxpacket = 512, 104 .name = "ep-", 105 .ops = &ci_ep_ops, 106 }, 107 }; 108 109 static struct ci_drv controller = { 110 .gadget = { 111 .name = "ci_udc", 112 .ops = &ci_udc_ops, 113 .is_dualspeed = 1, 114 }, 115 }; 116 117 /** 118 * ci_get_qh() - return queue head for endpoint 119 * @ep_num: Endpoint number 120 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 121 * 122 * This function returns the QH associated with particular endpoint 123 * and it's direction. 124 */ 125 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) 126 { 127 return &controller.epts[(ep_num * 2) + dir_in]; 128 } 129 130 /** 131 * ci_get_qtd() - return queue item for endpoint 132 * @ep_num: Endpoint number 133 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) 134 * 135 * This function returns the QH associated with particular endpoint 136 * and it's direction. 137 */ 138 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) 139 { 140 return controller.items[(ep_num * 2) + dir_in]; 141 } 142 143 /** 144 * ci_flush_qh - flush cache over queue head 145 * @ep_num: Endpoint number 146 * 147 * This function flushes cache over QH for particular endpoint. 148 */ 149 static void ci_flush_qh(int ep_num) 150 { 151 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 152 const uint32_t start = (uint32_t)head; 153 const uint32_t end = start + 2 * sizeof(*head); 154 155 flush_dcache_range(start, end); 156 } 157 158 /** 159 * ci_invalidate_qh - invalidate cache over queue head 160 * @ep_num: Endpoint number 161 * 162 * This function invalidates cache over QH for particular endpoint. 163 */ 164 static void ci_invalidate_qh(int ep_num) 165 { 166 struct ept_queue_head *head = ci_get_qh(ep_num, 0); 167 uint32_t start = (uint32_t)head; 168 uint32_t end = start + 2 * sizeof(*head); 169 170 invalidate_dcache_range(start, end); 171 } 172 173 /** 174 * ci_flush_qtd - flush cache over queue item 175 * @ep_num: Endpoint number 176 * 177 * This function flushes cache over qTD pair for particular endpoint. 178 */ 179 static void ci_flush_qtd(int ep_num) 180 { 181 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 182 const uint32_t start = (uint32_t)item; 183 const uint32_t end_raw = start + 2 * sizeof(*item); 184 const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN); 185 186 flush_dcache_range(start, end); 187 } 188 189 /** 190 * ci_invalidate_qtd - invalidate cache over queue item 191 * @ep_num: Endpoint number 192 * 193 * This function invalidates cache over qTD pair for particular endpoint. 194 */ 195 static void ci_invalidate_qtd(int ep_num) 196 { 197 struct ept_queue_item *item = ci_get_qtd(ep_num, 0); 198 const uint32_t start = (uint32_t)item; 199 const uint32_t end_raw = start + 2 * sizeof(*item); 200 const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN); 201 202 invalidate_dcache_range(start, end); 203 } 204 205 static struct usb_request * 206 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) 207 { 208 struct ci_req *ci_req; 209 210 ci_req = memalign(ARCH_DMA_MINALIGN, sizeof(*ci_req)); 211 if (!ci_req) 212 return NULL; 213 214 INIT_LIST_HEAD(&ci_req->queue); 215 ci_req->b_buf = 0; 216 217 return &ci_req->req; 218 } 219 220 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) 221 { 222 struct ci_req *ci_req; 223 224 ci_req = container_of(req, struct ci_req, req); 225 if (ci_req->b_buf) 226 free(ci_req->b_buf); 227 free(ci_req); 228 } 229 230 static void ep_enable(int num, int in, int maxpacket) 231 { 232 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 233 unsigned n; 234 235 n = readl(&udc->epctrl[num]); 236 if (in) 237 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); 238 else 239 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); 240 241 if (num != 0) { 242 struct ept_queue_head *head = ci_get_qh(num, in); 243 244 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; 245 ci_flush_qh(num); 246 } 247 writel(n, &udc->epctrl[num]); 248 } 249 250 static int ci_ep_enable(struct usb_ep *ep, 251 const struct usb_endpoint_descriptor *desc) 252 { 253 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 254 int num, in; 255 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 256 in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 257 ci_ep->desc = desc; 258 259 if (num) { 260 int max = get_unaligned_le16(&desc->wMaxPacketSize); 261 262 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) 263 max = 64; 264 if (ep->maxpacket != max) { 265 DBG("%s: from %d to %d\n", __func__, 266 ep->maxpacket, max); 267 ep->maxpacket = max; 268 } 269 } 270 ep_enable(num, in, ep->maxpacket); 271 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); 272 return 0; 273 } 274 275 static int ci_ep_disable(struct usb_ep *ep) 276 { 277 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 278 279 ci_ep->desc = NULL; 280 return 0; 281 } 282 283 static int ci_bounce(struct ci_req *ci_req, int in) 284 { 285 struct usb_request *req = &ci_req->req; 286 uint32_t addr = (uint32_t)req->buf; 287 uint32_t hwaddr; 288 uint32_t aligned_used_len; 289 290 /* Input buffer address is not aligned. */ 291 if (addr & (ARCH_DMA_MINALIGN - 1)) 292 goto align; 293 294 /* Input buffer length is not aligned. */ 295 if (req->length & (ARCH_DMA_MINALIGN - 1)) 296 goto align; 297 298 /* The buffer is well aligned, only flush cache. */ 299 ci_req->hw_len = req->length; 300 ci_req->hw_buf = req->buf; 301 goto flush; 302 303 align: 304 if (ci_req->b_buf && req->length > ci_req->b_len) { 305 free(ci_req->b_buf); 306 ci_req->b_buf = 0; 307 } 308 if (!ci_req->b_buf) { 309 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); 310 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); 311 if (!ci_req->b_buf) 312 return -ENOMEM; 313 } 314 ci_req->hw_len = ci_req->b_len; 315 ci_req->hw_buf = ci_req->b_buf; 316 317 if (in) 318 memcpy(ci_req->hw_buf, req->buf, req->length); 319 320 flush: 321 hwaddr = (uint32_t)ci_req->hw_buf; 322 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); 323 flush_dcache_range(hwaddr, hwaddr + aligned_used_len); 324 325 return 0; 326 } 327 328 static void ci_debounce(struct ci_req *ci_req, int in) 329 { 330 struct usb_request *req = &ci_req->req; 331 uint32_t addr = (uint32_t)req->buf; 332 uint32_t hwaddr = (uint32_t)ci_req->hw_buf; 333 uint32_t aligned_used_len; 334 335 if (in) 336 return; 337 338 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); 339 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); 340 341 if (addr == hwaddr) 342 return; /* not a bounce */ 343 344 memcpy(req->buf, ci_req->hw_buf, req->actual); 345 } 346 347 static void ci_ep_submit_next_request(struct ci_ep *ci_ep) 348 { 349 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 350 struct ept_queue_item *item; 351 struct ept_queue_head *head; 352 int bit, num, len, in; 353 struct ci_req *ci_req; 354 355 ci_ep->req_primed = true; 356 357 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 358 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 359 item = ci_get_qtd(num, in); 360 head = ci_get_qh(num, in); 361 362 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 363 len = ci_req->req.length; 364 365 item->next = TERMINATE; 366 item->info = INFO_BYTES(len) | INFO_IOC | INFO_ACTIVE; 367 item->page0 = (uint32_t)ci_req->hw_buf; 368 item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000; 369 item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000; 370 item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000; 371 item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000; 372 ci_flush_qtd(num); 373 374 head->next = (unsigned) item; 375 head->info = 0; 376 377 DBG("ept%d %s queue len %x, req %p, buffer %p\n", 378 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); 379 ci_flush_qh(num); 380 381 if (in) 382 bit = EPT_TX(num); 383 else 384 bit = EPT_RX(num); 385 386 writel(bit, &udc->epprime); 387 } 388 389 static int ci_ep_queue(struct usb_ep *ep, 390 struct usb_request *req, gfp_t gfp_flags) 391 { 392 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); 393 struct ci_req *ci_req = container_of(req, struct ci_req, req); 394 int in, ret; 395 int __maybe_unused num; 396 397 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 398 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 399 400 ret = ci_bounce(ci_req, in); 401 if (ret) 402 return ret; 403 404 DBG("ept%d %s pre-queue req %p, buffer %p\n", 405 num, in ? "in" : "out", ci_req, ci_req->hw_buf); 406 list_add_tail(&ci_req->queue, &ci_ep->queue); 407 408 if (!ci_ep->req_primed) 409 ci_ep_submit_next_request(ci_ep); 410 411 return 0; 412 } 413 414 static void handle_ep_complete(struct ci_ep *ep) 415 { 416 struct ept_queue_item *item; 417 int num, in, len; 418 struct ci_req *ci_req; 419 420 num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 421 in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0; 422 if (num == 0) 423 ep->desc = &ep0_out_desc; 424 item = ci_get_qtd(num, in); 425 ci_invalidate_qtd(num); 426 427 len = (item->info >> 16) & 0x7fff; 428 if (item->info & 0xff) 429 printf("EP%d/%s FAIL info=%x pg0=%x\n", 430 num, in ? "in" : "out", item->info, item->page0); 431 432 ci_req = list_first_entry(&ep->queue, struct ci_req, queue); 433 list_del_init(&ci_req->queue); 434 ep->req_primed = false; 435 436 if (!list_empty(&ep->queue)) 437 ci_ep_submit_next_request(ep); 438 439 ci_req->req.actual = ci_req->req.length - len; 440 ci_debounce(ci_req, in); 441 442 DBG("ept%d %s req %p, complete %x\n", 443 num, in ? "in" : "out", ci_req, len); 444 ci_req->req.complete(&ep->ep, &ci_req->req); 445 if (num == 0) { 446 ci_req->req.length = 0; 447 usb_ep_queue(&ep->ep, &ci_req->req, 0); 448 ep->desc = &ep0_in_desc; 449 } 450 } 451 452 #define SETUP(type, request) (((type) << 8) | (request)) 453 454 static void handle_setup(void) 455 { 456 struct ci_ep *ci_ep = &controller.ep[0]; 457 struct ci_req *ci_req; 458 struct usb_request *req; 459 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 460 struct ept_queue_head *head; 461 struct usb_ctrlrequest r; 462 int status = 0; 463 int num, in, _num, _in, i; 464 char *buf; 465 466 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); 467 req = &ci_req->req; 468 head = ci_get_qh(0, 0); /* EP0 OUT */ 469 470 ci_invalidate_qh(0); 471 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); 472 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 473 writel(EPT_RX(0), &udc->epsetupstat); 474 #else 475 writel(EPT_RX(0), &udc->epstat); 476 #endif 477 DBG("handle setup %s, %x, %x index %x value %x\n", reqname(r.bRequest), 478 r.bRequestType, r.bRequest, r.wIndex, r.wValue); 479 480 list_del_init(&ci_req->queue); 481 ci_ep->req_primed = false; 482 483 switch (SETUP(r.bRequestType, r.bRequest)) { 484 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): 485 _num = r.wIndex & 15; 486 _in = !!(r.wIndex & 0x80); 487 488 if ((r.wValue == 0) && (r.wLength == 0)) { 489 req->length = 0; 490 for (i = 0; i < NUM_ENDPOINTS; i++) { 491 struct ci_ep *ep = &controller.ep[i]; 492 493 if (!ep->desc) 494 continue; 495 num = ep->desc->bEndpointAddress 496 & USB_ENDPOINT_NUMBER_MASK; 497 in = (ep->desc->bEndpointAddress 498 & USB_DIR_IN) != 0; 499 if ((num == _num) && (in == _in)) { 500 ep_enable(num, in, ep->ep.maxpacket); 501 usb_ep_queue(controller.gadget.ep0, 502 req, 0); 503 break; 504 } 505 } 506 } 507 return; 508 509 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): 510 /* 511 * write address delayed (will take effect 512 * after the next IN txn) 513 */ 514 writel((r.wValue << 25) | (1 << 24), &udc->devaddr); 515 req->length = 0; 516 usb_ep_queue(controller.gadget.ep0, req, 0); 517 return; 518 519 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): 520 req->length = 2; 521 buf = (char *)req->buf; 522 buf[0] = 1 << USB_DEVICE_SELF_POWERED; 523 buf[1] = 0; 524 usb_ep_queue(controller.gadget.ep0, req, 0); 525 return; 526 } 527 /* pass request up to the gadget driver */ 528 if (controller.driver) 529 status = controller.driver->setup(&controller.gadget, &r); 530 else 531 status = -ENODEV; 532 533 if (!status) 534 return; 535 DBG("STALL reqname %s type %x value %x, index %x\n", 536 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); 537 writel((1<<16) | (1 << 0), &udc->epctrl[0]); 538 } 539 540 static void stop_activity(void) 541 { 542 int i, num, in; 543 struct ept_queue_head *head; 544 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 545 writel(readl(&udc->epcomp), &udc->epcomp); 546 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 547 writel(readl(&udc->epsetupstat), &udc->epsetupstat); 548 #endif 549 writel(readl(&udc->epstat), &udc->epstat); 550 writel(0xffffffff, &udc->epflush); 551 552 /* error out any pending reqs */ 553 for (i = 0; i < NUM_ENDPOINTS; i++) { 554 if (i != 0) 555 writel(0, &udc->epctrl[i]); 556 if (controller.ep[i].desc) { 557 num = controller.ep[i].desc->bEndpointAddress 558 & USB_ENDPOINT_NUMBER_MASK; 559 in = (controller.ep[i].desc->bEndpointAddress 560 & USB_DIR_IN) != 0; 561 head = ci_get_qh(num, in); 562 head->info = INFO_ACTIVE; 563 ci_flush_qh(num); 564 } 565 } 566 } 567 568 void udc_irq(void) 569 { 570 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 571 unsigned n = readl(&udc->usbsts); 572 writel(n, &udc->usbsts); 573 int bit, i, num, in; 574 575 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); 576 if (n == 0) 577 return; 578 579 if (n & STS_URI) { 580 DBG("-- reset --\n"); 581 stop_activity(); 582 } 583 if (n & STS_SLI) 584 DBG("-- suspend --\n"); 585 586 if (n & STS_PCI) { 587 int max = 64; 588 int speed = USB_SPEED_FULL; 589 590 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 591 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; 592 #else 593 bit = (readl(&udc->portsc) >> 26) & 3; 594 #endif 595 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); 596 if (bit == 2) { 597 speed = USB_SPEED_HIGH; 598 max = 512; 599 } 600 controller.gadget.speed = speed; 601 for (i = 1; i < NUM_ENDPOINTS; i++) { 602 if (controller.ep[i].ep.maxpacket > max) 603 controller.ep[i].ep.maxpacket = max; 604 } 605 } 606 607 if (n & STS_UEI) 608 printf("<UEI %x>\n", readl(&udc->epcomp)); 609 610 if ((n & STS_UI) || (n & STS_UEI)) { 611 #ifdef CONFIG_CI_UDC_HAS_HOSTPC 612 n = readl(&udc->epsetupstat); 613 #else 614 n = readl(&udc->epstat); 615 #endif 616 if (n & EPT_RX(0)) 617 handle_setup(); 618 619 n = readl(&udc->epcomp); 620 if (n != 0) 621 writel(n, &udc->epcomp); 622 623 for (i = 0; i < NUM_ENDPOINTS && n; i++) { 624 if (controller.ep[i].desc) { 625 num = controller.ep[i].desc->bEndpointAddress 626 & USB_ENDPOINT_NUMBER_MASK; 627 in = (controller.ep[i].desc->bEndpointAddress 628 & USB_DIR_IN) != 0; 629 bit = (in) ? EPT_TX(num) : EPT_RX(num); 630 if (n & bit) 631 handle_ep_complete(&controller.ep[i]); 632 } 633 } 634 } 635 } 636 637 int usb_gadget_handle_interrupts(void) 638 { 639 u32 value; 640 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 641 642 value = readl(&udc->usbsts); 643 if (value) 644 udc_irq(); 645 646 return value; 647 } 648 649 static int ci_pullup(struct usb_gadget *gadget, int is_on) 650 { 651 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 652 if (is_on) { 653 /* RESET */ 654 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); 655 udelay(200); 656 657 writel((unsigned)controller.epts, &udc->epinitaddr); 658 659 /* select DEVICE mode */ 660 writel(USBMODE_DEVICE, &udc->usbmode); 661 662 writel(0xffffffff, &udc->epflush); 663 664 /* Turn on the USB connection by enabling the pullup resistor */ 665 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); 666 } else { 667 stop_activity(); 668 writel(USBCMD_FS2, &udc->usbcmd); 669 udelay(800); 670 if (controller.driver) 671 controller.driver->disconnect(gadget); 672 } 673 674 return 0; 675 } 676 677 void udc_disconnect(void) 678 { 679 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 680 /* disable pullup */ 681 stop_activity(); 682 writel(USBCMD_FS2, &udc->usbcmd); 683 udelay(800); 684 if (controller.driver) 685 controller.driver->disconnect(&controller.gadget); 686 } 687 688 static int ci_udc_probe(void) 689 { 690 struct ept_queue_head *head; 691 uint8_t *imem; 692 int i; 693 694 const int num = 2 * NUM_ENDPOINTS; 695 696 const int eplist_min_align = 4096; 697 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); 698 const int eplist_raw_sz = num * sizeof(struct ept_queue_head); 699 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); 700 701 const int ilist_align = roundup(ARCH_DMA_MINALIGN, 32); 702 const int ilist_ent_raw_sz = 2 * sizeof(struct ept_queue_item); 703 const int ilist_ent_sz = roundup(ilist_ent_raw_sz, ARCH_DMA_MINALIGN); 704 const int ilist_sz = NUM_ENDPOINTS * ilist_ent_sz; 705 706 /* The QH list must be aligned to 4096 bytes. */ 707 controller.epts = memalign(eplist_align, eplist_sz); 708 if (!controller.epts) 709 return -ENOMEM; 710 memset(controller.epts, 0, eplist_sz); 711 712 /* 713 * Each qTD item must be 32-byte aligned, each qTD touple must be 714 * cacheline aligned. There are two qTD items for each endpoint and 715 * only one of them is used for the endpoint at time, so we can group 716 * them together. 717 */ 718 controller.items_mem = memalign(ilist_align, ilist_sz); 719 if (!controller.items_mem) { 720 free(controller.epts); 721 return -ENOMEM; 722 } 723 memset(controller.items_mem, 0, ilist_sz); 724 725 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { 726 /* 727 * Configure QH for each endpoint. The structure of the QH list 728 * is such that each two subsequent fields, N and N+1 where N is 729 * even, in the QH list represent QH for one endpoint. The Nth 730 * entry represents OUT configuration and the N+1th entry does 731 * represent IN configuration of the endpoint. 732 */ 733 head = controller.epts + i; 734 if (i < 2) 735 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) 736 | CONFIG_ZLT | CONFIG_IOS; 737 else 738 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) 739 | CONFIG_ZLT; 740 head->next = TERMINATE; 741 head->info = 0; 742 743 imem = controller.items_mem + ((i >> 1) * ilist_ent_sz); 744 if (i & 1) 745 imem += sizeof(struct ept_queue_item); 746 747 controller.items[i] = (struct ept_queue_item *)imem; 748 749 if (i & 1) { 750 ci_flush_qh(i - 1); 751 ci_flush_qtd(i - 1); 752 } 753 } 754 755 INIT_LIST_HEAD(&controller.gadget.ep_list); 756 757 /* Init EP 0 */ 758 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); 759 controller.ep[0].desc = &ep0_in_desc; 760 INIT_LIST_HEAD(&controller.ep[0].queue); 761 controller.ep[0].req_primed = false; 762 controller.gadget.ep0 = &controller.ep[0].ep; 763 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); 764 765 /* Init EP 1..n */ 766 for (i = 1; i < NUM_ENDPOINTS; i++) { 767 memcpy(&controller.ep[i].ep, &ci_ep_init[1], 768 sizeof(*ci_ep_init)); 769 INIT_LIST_HEAD(&controller.ep[i].queue); 770 controller.ep[i].req_primed = false; 771 list_add_tail(&controller.ep[i].ep.ep_list, 772 &controller.gadget.ep_list); 773 } 774 775 return 0; 776 } 777 778 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 779 { 780 int ret; 781 782 if (!driver) 783 return -EINVAL; 784 if (!driver->bind || !driver->setup || !driver->disconnect) 785 return -EINVAL; 786 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) 787 return -EINVAL; 788 789 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); 790 if (ret) 791 return ret; 792 793 ret = ci_udc_probe(); 794 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) 795 /* 796 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all 797 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection 798 */ 799 if (!ret) { 800 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; 801 802 /* select ULPI phy */ 803 writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); 804 } 805 #endif 806 807 ret = driver->bind(&controller.gadget); 808 if (ret) { 809 DBG("driver->bind() returned %d\n", ret); 810 return ret; 811 } 812 controller.driver = driver; 813 814 return 0; 815 } 816 817 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 818 { 819 return 0; 820 } 821