1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for the Atmel USBA high speed USB device controller 4 * [Original from Linux kernel: drivers/usb/gadget/atmel_usba_udc.c] 5 * 6 * Copyright (C) 2005-2013 Atmel Corporation 7 * Bo Shen <voice.shen@atmel.com> 8 */ 9 10 #include <common.h> 11 #include <linux/errno.h> 12 #include <asm/gpio.h> 13 #include <asm/hardware.h> 14 #include <linux/list.h> 15 #include <linux/usb/ch9.h> 16 #include <linux/usb/gadget.h> 17 #include <linux/usb/atmel_usba_udc.h> 18 #include <malloc.h> 19 20 #include "atmel_usba_udc.h" 21 22 static int vbus_is_present(struct usba_udc *udc) 23 { 24 /* No Vbus detection: Assume always present */ 25 return 1; 26 } 27 28 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) 29 { 30 unsigned int transaction_len; 31 32 transaction_len = req->req.length - req->req.actual; 33 req->last_transaction = 1; 34 if (transaction_len > ep->ep.maxpacket) { 35 transaction_len = ep->ep.maxpacket; 36 req->last_transaction = 0; 37 } else if (transaction_len == ep->ep.maxpacket && req->req.zero) { 38 req->last_transaction = 0; 39 } 40 41 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", 42 ep->ep.name, req, transaction_len, 43 req->last_transaction ? ", done" : ""); 44 45 memcpy(ep->fifo, req->req.buf + req->req.actual, transaction_len); 46 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 47 req->req.actual += transaction_len; 48 } 49 50 static void submit_request(struct usba_ep *ep, struct usba_request *req) 51 { 52 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d), dma: %d\n", 53 ep->ep.name, req, req->req.length, req->using_dma); 54 55 req->req.actual = 0; 56 req->submitted = 1; 57 58 next_fifo_transaction(ep, req); 59 if (req->last_transaction) { 60 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 61 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 62 } else { 63 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 64 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 65 } 66 } 67 68 static void submit_next_request(struct usba_ep *ep) 69 { 70 struct usba_request *req; 71 72 if (list_empty(&ep->queue)) { 73 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); 74 return; 75 } 76 77 req = list_entry(ep->queue.next, struct usba_request, queue); 78 if (!req->submitted) 79 submit_request(ep, req); 80 } 81 82 static void send_status(struct usba_udc *udc, struct usba_ep *ep) 83 { 84 ep->state = STATUS_STAGE_IN; 85 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 86 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 87 } 88 89 static void receive_data(struct usba_ep *ep) 90 { 91 struct usba_udc *udc = ep->udc; 92 struct usba_request *req; 93 unsigned long status; 94 unsigned int bytecount, nr_busy; 95 int is_complete = 0; 96 97 status = usba_ep_readl(ep, STA); 98 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 99 100 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); 101 102 while (nr_busy > 0) { 103 if (list_empty(&ep->queue)) { 104 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 105 break; 106 } 107 req = list_entry(ep->queue.next, 108 struct usba_request, queue); 109 110 bytecount = USBA_BFEXT(BYTE_COUNT, status); 111 112 if (status & USBA_SHORT_PACKET) 113 is_complete = 1; 114 if (req->req.actual + bytecount >= req->req.length) { 115 is_complete = 1; 116 bytecount = req->req.length - req->req.actual; 117 } 118 119 memcpy(req->req.buf + req->req.actual, ep->fifo, bytecount); 120 req->req.actual += bytecount; 121 122 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 123 124 if (is_complete) { 125 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); 126 req->req.status = 0; 127 list_del_init(&req->queue); 128 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 129 spin_lock(&udc->lock); 130 req->req.complete(&ep->ep, &req->req); 131 spin_unlock(&udc->lock); 132 } 133 134 status = usba_ep_readl(ep, STA); 135 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 136 137 if (is_complete && ep_is_control(ep)) { 138 send_status(udc, ep); 139 break; 140 } 141 } 142 } 143 144 static void 145 request_complete(struct usba_ep *ep, struct usba_request *req, int status) 146 { 147 if (req->req.status == -EINPROGRESS) 148 req->req.status = status; 149 150 DBG(DBG_GADGET | DBG_REQ, "%s: req %p complete: status %d, actual %u\n", 151 ep->ep.name, req, req->req.status, req->req.actual); 152 153 req->req.complete(&ep->ep, &req->req); 154 } 155 156 static void 157 request_complete_list(struct usba_ep *ep, struct list_head *list, int status) 158 { 159 struct usba_request *req, *tmp_req; 160 161 list_for_each_entry_safe(req, tmp_req, list, queue) { 162 list_del_init(&req->queue); 163 request_complete(ep, req, status); 164 } 165 } 166 167 static int 168 usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 169 { 170 struct usba_ep *ep = to_usba_ep(_ep); 171 struct usba_udc *udc = ep->udc; 172 unsigned long flags = 0, ept_cfg, maxpacket; 173 unsigned int nr_trans; 174 175 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); 176 177 maxpacket = usb_endpoint_maxp(desc) & 0x7ff; 178 179 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 180 != ep->index) || 181 ep->index == 0 || 182 desc->bDescriptorType != USB_DT_ENDPOINT || 183 maxpacket == 0 || 184 maxpacket > ep->fifo_size) { 185 DBG(DBG_ERR, "ep_enable: Invalid argument"); 186 return -EINVAL; 187 } 188 189 ep->is_isoc = 0; 190 ep->is_in = 0; 191 192 if (maxpacket <= 8) 193 ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 194 else 195 /* LSB is bit 1, not 0 */ 196 ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); 197 198 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", 199 ep->ep.name, ept_cfg, maxpacket); 200 201 if (usb_endpoint_dir_in(desc)) { 202 ep->is_in = 1; 203 ept_cfg |= USBA_EPT_DIR_IN; 204 } 205 206 switch (usb_endpoint_type(desc)) { 207 case USB_ENDPOINT_XFER_CONTROL: 208 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 209 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); 210 break; 211 case USB_ENDPOINT_XFER_ISOC: 212 if (!ep->can_isoc) { 213 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", 214 ep->ep.name); 215 return -EINVAL; 216 } 217 218 /* 219 * Bits 11:12 specify number of _additional_ 220 * transactions per microframe. 221 */ 222 nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1; 223 if (nr_trans > 3) 224 return -EINVAL; 225 226 ep->is_isoc = 1; 227 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); 228 229 /* 230 * Do triple-buffering on high-bandwidth iso endpoints. 231 */ 232 if (nr_trans > 1 && ep->nr_banks == 3) 233 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); 234 else 235 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); 236 ept_cfg |= USBA_BF(NB_TRANS, nr_trans); 237 break; 238 case USB_ENDPOINT_XFER_BULK: 239 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); 240 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); 241 break; 242 case USB_ENDPOINT_XFER_INT: 243 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); 244 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); 245 break; 246 } 247 248 spin_lock_irqsave(&ep->udc->lock, flags); 249 250 ep->desc = desc; 251 ep->ep.maxpacket = maxpacket; 252 253 usba_ep_writel(ep, CFG, ept_cfg); 254 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 255 256 usba_writel(udc, INT_ENB, 257 (usba_readl(udc, INT_ENB) 258 | USBA_BF(EPT_INT, 1 << ep->index))); 259 260 spin_unlock_irqrestore(&udc->lock, flags); 261 262 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, 263 (unsigned long)usba_ep_readl(ep, CFG)); 264 DBG(DBG_HW, "INT_ENB after init: %#08lx\n", 265 (unsigned long)usba_readl(udc, INT_ENB)); 266 267 return 0; 268 } 269 270 static int usba_ep_disable(struct usb_ep *_ep) 271 { 272 struct usba_ep *ep = to_usba_ep(_ep); 273 struct usba_udc *udc = ep->udc; 274 LIST_HEAD(req_list); 275 unsigned long flags = 0; 276 277 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); 278 279 spin_lock_irqsave(&udc->lock, flags); 280 281 if (!ep->desc) { 282 spin_unlock_irqrestore(&udc->lock, flags); 283 /* REVISIT because this driver disables endpoints in 284 * reset_all_endpoints() before calling disconnect(), 285 * most gadget drivers would trigger this non-error ... 286 */ 287 if (udc->gadget.speed != USB_SPEED_UNKNOWN) 288 DBG(DBG_ERR, "ep_disable: %s not enabled\n", 289 ep->ep.name); 290 return -EINVAL; 291 } 292 ep->desc = NULL; 293 294 list_splice_init(&ep->queue, &req_list); 295 usba_ep_writel(ep, CFG, 0); 296 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); 297 usba_writel(udc, INT_ENB, 298 usba_readl(udc, INT_ENB) & 299 ~USBA_BF(EPT_INT, 1 << ep->index)); 300 301 request_complete_list(ep, &req_list, -ESHUTDOWN); 302 303 spin_unlock_irqrestore(&udc->lock, flags); 304 305 return 0; 306 } 307 308 static struct usb_request * 309 usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 310 { 311 struct usba_request *req; 312 313 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); 314 315 req = calloc(1, sizeof(struct usba_request)); 316 if (!req) 317 return NULL; 318 319 INIT_LIST_HEAD(&req->queue); 320 321 return &req->req; 322 } 323 324 static void 325 usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) 326 { 327 struct usba_request *req = to_usba_req(_req); 328 329 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); 330 331 free(req); 332 } 333 334 static int 335 usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 336 { 337 struct usba_request *req = to_usba_req(_req); 338 struct usba_ep *ep = to_usba_ep(_ep); 339 struct usba_udc *udc = ep->udc; 340 unsigned long flags = 0; 341 int ret; 342 343 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", 344 ep->ep.name, req, _req->length); 345 346 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || 347 !ep->desc) 348 return -ESHUTDOWN; 349 350 req->submitted = 0; 351 req->using_dma = 0; 352 req->last_transaction = 0; 353 354 _req->status = -EINPROGRESS; 355 _req->actual = 0; 356 357 /* May have received a reset since last time we checked */ 358 ret = -ESHUTDOWN; 359 spin_lock_irqsave(&udc->lock, flags); 360 if (ep->desc) { 361 list_add_tail(&req->queue, &ep->queue); 362 363 if ((!ep_is_control(ep) && ep->is_in) || 364 (ep_is_control(ep) && (ep->state == DATA_STAGE_IN || 365 ep->state == STATUS_STAGE_IN))) 366 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 367 else 368 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 369 370 ret = 0; 371 } 372 spin_unlock_irqrestore(&udc->lock, flags); 373 374 return ret; 375 } 376 377 static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 378 { 379 struct usba_ep *ep = to_usba_ep(_ep); 380 struct usba_request *req = to_usba_req(_req); 381 382 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", 383 ep->ep.name, req); 384 385 /* 386 * Errors should stop the queue from advancing until the 387 * completion function returns. 388 */ 389 list_del_init(&req->queue); 390 391 request_complete(ep, req, -ECONNRESET); 392 393 /* Process the next request if any */ 394 submit_next_request(ep); 395 396 return 0; 397 } 398 399 static int usba_ep_set_halt(struct usb_ep *_ep, int value) 400 { 401 struct usba_ep *ep = to_usba_ep(_ep); 402 unsigned long flags = 0; 403 int ret = 0; 404 405 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, 406 value ? "set" : "clear"); 407 408 if (!ep->desc) { 409 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", 410 ep->ep.name); 411 return -ENODEV; 412 } 413 414 if (ep->is_isoc) { 415 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", 416 ep->ep.name); 417 return -ENOTTY; 418 } 419 420 spin_lock_irqsave(&udc->lock, flags); 421 422 /* 423 * We can't halt IN endpoints while there are still data to be 424 * transferred 425 */ 426 if (!list_empty(&ep->queue) || 427 ((value && ep->is_in && (usba_ep_readl(ep, STA) & 428 USBA_BF(BUSY_BANKS, -1L))))) { 429 ret = -EAGAIN; 430 } else { 431 if (value) 432 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 433 else 434 usba_ep_writel(ep, CLR_STA, 435 USBA_FORCE_STALL | USBA_TOGGLE_CLR); 436 usba_ep_readl(ep, STA); 437 } 438 439 spin_unlock_irqrestore(&udc->lock, flags); 440 441 return ret; 442 } 443 444 static int usba_ep_fifo_status(struct usb_ep *_ep) 445 { 446 struct usba_ep *ep = to_usba_ep(_ep); 447 448 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 449 } 450 451 static void usba_ep_fifo_flush(struct usb_ep *_ep) 452 { 453 struct usba_ep *ep = to_usba_ep(_ep); 454 struct usba_udc *udc = ep->udc; 455 456 usba_writel(udc, EPT_RST, 1 << ep->index); 457 } 458 459 static const struct usb_ep_ops usba_ep_ops = { 460 .enable = usba_ep_enable, 461 .disable = usba_ep_disable, 462 .alloc_request = usba_ep_alloc_request, 463 .free_request = usba_ep_free_request, 464 .queue = usba_ep_queue, 465 .dequeue = usba_ep_dequeue, 466 .set_halt = usba_ep_set_halt, 467 .fifo_status = usba_ep_fifo_status, 468 .fifo_flush = usba_ep_fifo_flush, 469 }; 470 471 static int usba_udc_get_frame(struct usb_gadget *gadget) 472 { 473 struct usba_udc *udc = to_usba_udc(gadget); 474 475 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); 476 } 477 478 static int usba_udc_wakeup(struct usb_gadget *gadget) 479 { 480 struct usba_udc *udc = to_usba_udc(gadget); 481 unsigned long flags = 0; 482 u32 ctrl; 483 int ret = -EINVAL; 484 485 spin_lock_irqsave(&udc->lock, flags); 486 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 487 ctrl = usba_readl(udc, CTRL); 488 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); 489 ret = 0; 490 } 491 spin_unlock_irqrestore(&udc->lock, flags); 492 493 return ret; 494 } 495 496 static int 497 usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) 498 { 499 struct usba_udc *udc = to_usba_udc(gadget); 500 unsigned long flags = 0; 501 502 spin_lock_irqsave(&udc->lock, flags); 503 if (is_selfpowered) 504 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; 505 else 506 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); 507 spin_unlock_irqrestore(&udc->lock, flags); 508 509 return 0; 510 } 511 512 static const struct usb_gadget_ops usba_udc_ops = { 513 .get_frame = usba_udc_get_frame, 514 .wakeup = usba_udc_wakeup, 515 .set_selfpowered = usba_udc_set_selfpowered, 516 }; 517 518 static struct usb_endpoint_descriptor usba_ep0_desc = { 519 .bLength = USB_DT_ENDPOINT_SIZE, 520 .bDescriptorType = USB_DT_ENDPOINT, 521 .bEndpointAddress = 0, 522 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 523 .wMaxPacketSize = cpu_to_le16(64), 524 /* FIXME: I have no idea what to put here */ 525 .bInterval = 1, 526 }; 527 528 /* 529 * Called with interrupts disabled and udc->lock held. 530 */ 531 static void reset_all_endpoints(struct usba_udc *udc) 532 { 533 struct usba_ep *ep; 534 struct usba_request *req, *tmp_req; 535 536 usba_writel(udc, EPT_RST, ~0UL); 537 538 ep = to_usba_ep(udc->gadget.ep0); 539 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { 540 list_del_init(&req->queue); 541 request_complete(ep, req, -ECONNRESET); 542 } 543 544 /* NOTE: normally, the next call to the gadget driver is in 545 * charge of disabling endpoints... usually disconnect(). 546 * The exception would be entering a high speed test mode. 547 * 548 * FIXME remove this code ... and retest thoroughly. 549 */ 550 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 551 if (ep->desc) { 552 spin_unlock(&udc->lock); 553 usba_ep_disable(&ep->ep); 554 spin_lock(&udc->lock); 555 } 556 } 557 } 558 559 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 560 { 561 struct usba_ep *ep; 562 563 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 564 return to_usba_ep(udc->gadget.ep0); 565 566 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { 567 u8 bEndpointAddress; 568 569 if (!ep->desc) 570 continue; 571 bEndpointAddress = ep->desc->bEndpointAddress; 572 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 573 continue; 574 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 575 == (wIndex & USB_ENDPOINT_NUMBER_MASK)) 576 return ep; 577 } 578 579 return NULL; 580 } 581 582 /* Called with interrupts disabled and udc->lock held */ 583 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) 584 { 585 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 586 ep->state = WAIT_FOR_SETUP; 587 } 588 589 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) 590 { 591 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) 592 return 1; 593 return 0; 594 } 595 596 static inline void set_address(struct usba_udc *udc, unsigned int addr) 597 { 598 u32 regval; 599 600 DBG(DBG_BUS, "setting address %u...\n", addr); 601 regval = usba_readl(udc, CTRL); 602 regval = USBA_BFINS(DEV_ADDR, addr, regval); 603 usba_writel(udc, CTRL, regval); 604 } 605 606 static int do_test_mode(struct usba_udc *udc) 607 { 608 static const char test_packet_buffer[] = { 609 /* JKJKJKJK * 9 */ 610 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 611 /* JJKKJJKK * 8 */ 612 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 613 /* JJKKJJKK * 8 */ 614 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 615 /* JJJJJJJKKKKKKK * 8 */ 616 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 617 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 618 /* JJJJJJJK * 8 */ 619 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 620 /* {JKKKKKKK * 10}, JK */ 621 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E 622 }; 623 struct usba_ep *ep; 624 int test_mode; 625 626 test_mode = udc->test_mode; 627 628 /* Start from a clean slate */ 629 reset_all_endpoints(udc); 630 631 switch (test_mode) { 632 case 0x0100: 633 /* Test_J */ 634 usba_writel(udc, TST, USBA_TST_J_MODE); 635 DBG(DBG_ALL, "Entering Test_J mode...\n"); 636 break; 637 case 0x0200: 638 /* Test_K */ 639 usba_writel(udc, TST, USBA_TST_K_MODE); 640 DBG(DBG_ALL, "Entering Test_K mode...\n"); 641 break; 642 case 0x0300: 643 /* 644 * Test_SE0_NAK: Force high-speed mode and set up ep0 645 * for Bulk IN transfers 646 */ 647 ep = &udc->usba_ep[0]; 648 usba_writel(udc, TST, 649 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); 650 usba_ep_writel(ep, CFG, 651 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 652 | USBA_EPT_DIR_IN 653 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 654 | USBA_BF(BK_NUMBER, 1)); 655 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 656 set_protocol_stall(udc, ep); 657 DBG(DBG_ALL, "Test_SE0_NAK: ep0 not mapped\n"); 658 } else { 659 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 660 DBG(DBG_ALL, "Entering Test_SE0_NAK mode...\n"); 661 } 662 break; 663 case 0x0400: 664 /* Test_Packet */ 665 ep = &udc->usba_ep[0]; 666 usba_ep_writel(ep, CFG, 667 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 668 | USBA_EPT_DIR_IN 669 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 670 | USBA_BF(BK_NUMBER, 1)); 671 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 672 set_protocol_stall(udc, ep); 673 DBG(DBG_ALL, "Test_Packet: ep0 not mapped\n"); 674 } else { 675 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 676 usba_writel(udc, TST, USBA_TST_PKT_MODE); 677 memcpy(ep->fifo, test_packet_buffer, 678 sizeof(test_packet_buffer)); 679 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 680 DBG(DBG_ALL, "Entering Test_Packet mode...\n"); 681 } 682 break; 683 default: 684 DBG(DBG_ERR, "Invalid test mode: 0x%04x\n", test_mode); 685 return -EINVAL; 686 } 687 688 return 0; 689 } 690 691 /* Avoid overly long expressions */ 692 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) 693 { 694 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) 695 return true; 696 return false; 697 } 698 699 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) 700 { 701 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE)) 702 return true; 703 return false; 704 } 705 706 static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) 707 { 708 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT)) 709 return true; 710 return false; 711 } 712 713 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, 714 struct usb_ctrlrequest *crq) 715 { 716 int retval = 0; 717 718 switch (crq->bRequest) { 719 case USB_REQ_GET_STATUS: { 720 u16 status; 721 722 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { 723 status = cpu_to_le16(udc->devstatus); 724 } else if (crq->bRequestType 725 == (USB_DIR_IN | USB_RECIP_INTERFACE)) { 726 status = cpu_to_le16(0); 727 } else if (crq->bRequestType 728 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { 729 struct usba_ep *target; 730 731 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 732 if (!target) 733 goto stall; 734 735 status = 0; 736 if (is_stalled(udc, target)) 737 status |= cpu_to_le16(1); 738 } else { 739 goto delegate; 740 } 741 742 /* Write directly to the FIFO. No queueing is done. */ 743 if (crq->wLength != cpu_to_le16(sizeof(status))) 744 goto stall; 745 ep->state = DATA_STAGE_IN; 746 __raw_writew(status, ep->fifo); 747 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 748 break; 749 } 750 751 case USB_REQ_CLEAR_FEATURE: { 752 if (crq->bRequestType == USB_RECIP_DEVICE) { 753 if (feature_is_dev_remote_wakeup(crq)) 754 udc->devstatus 755 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); 756 else 757 /* Can't CLEAR_FEATURE TEST_MODE */ 758 goto stall; 759 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 760 struct usba_ep *target; 761 762 if (crq->wLength != cpu_to_le16(0) || 763 !feature_is_ep_halt(crq)) 764 goto stall; 765 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 766 if (!target) 767 goto stall; 768 769 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); 770 if (target->index != 0) 771 usba_ep_writel(target, CLR_STA, 772 USBA_TOGGLE_CLR); 773 } else { 774 goto delegate; 775 } 776 777 send_status(udc, ep); 778 break; 779 } 780 781 case USB_REQ_SET_FEATURE: { 782 if (crq->bRequestType == USB_RECIP_DEVICE) { 783 if (feature_is_dev_test_mode(crq)) { 784 send_status(udc, ep); 785 ep->state = STATUS_STAGE_TEST; 786 udc->test_mode = le16_to_cpu(crq->wIndex); 787 return 0; 788 } else if (feature_is_dev_remote_wakeup(crq)) { 789 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP; 790 } else { 791 goto stall; 792 } 793 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 794 struct usba_ep *target; 795 796 if (crq->wLength != cpu_to_le16(0) || 797 !feature_is_ep_halt(crq)) 798 goto stall; 799 800 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 801 if (!target) 802 goto stall; 803 804 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); 805 } else { 806 goto delegate; 807 } 808 809 send_status(udc, ep); 810 break; 811 } 812 813 case USB_REQ_SET_ADDRESS: 814 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) 815 goto delegate; 816 817 set_address(udc, le16_to_cpu(crq->wValue)); 818 send_status(udc, ep); 819 ep->state = STATUS_STAGE_ADDR; 820 break; 821 822 default: 823 delegate: 824 spin_unlock(&udc->lock); 825 retval = udc->driver->setup(&udc->gadget, crq); 826 spin_lock(&udc->lock); 827 } 828 829 return retval; 830 831 stall: 832 DBG(DBG_ALL, "%s: Invalid setup request: %02x.%02x v%04x i%04x l%d\n", 833 ep->ep.name, crq->bRequestType, crq->bRequest, 834 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), 835 le16_to_cpu(crq->wLength)); 836 set_protocol_stall(udc, ep); 837 838 return -1; 839 } 840 841 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) 842 { 843 struct usba_request *req; 844 u32 epstatus; 845 u32 epctrl; 846 847 restart: 848 epstatus = usba_ep_readl(ep, STA); 849 epctrl = usba_ep_readl(ep, CTL); 850 851 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", 852 ep->ep.name, ep->state, epstatus, epctrl); 853 854 req = NULL; 855 if (!list_empty(&ep->queue)) 856 req = list_entry(ep->queue.next, 857 struct usba_request, queue); 858 859 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 860 if (req->submitted) 861 next_fifo_transaction(ep, req); 862 else 863 submit_request(ep, req); 864 865 if (req->last_transaction) { 866 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 867 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 868 } 869 goto restart; 870 } 871 if ((epstatus & epctrl) & USBA_TX_COMPLETE) { 872 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); 873 874 switch (ep->state) { 875 case DATA_STAGE_IN: 876 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 877 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 878 ep->state = STATUS_STAGE_OUT; 879 break; 880 case STATUS_STAGE_ADDR: 881 /* Activate our new address */ 882 usba_writel(udc, CTRL, (usba_readl(udc, CTRL) 883 | USBA_FADDR_EN)); 884 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 885 ep->state = WAIT_FOR_SETUP; 886 break; 887 case STATUS_STAGE_IN: 888 if (req) { 889 list_del_init(&req->queue); 890 request_complete(ep, req, 0); 891 submit_next_request(ep); 892 } 893 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 894 ep->state = WAIT_FOR_SETUP; 895 break; 896 case STATUS_STAGE_TEST: 897 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 898 ep->state = WAIT_FOR_SETUP; 899 if (do_test_mode(udc)) 900 set_protocol_stall(udc, ep); 901 break; 902 default: 903 DBG(DBG_ALL, "%s: TXCOMP: Invalid endpoint state %d\n", 904 ep->ep.name, ep->state); 905 set_protocol_stall(udc, ep); 906 break; 907 } 908 909 goto restart; 910 } 911 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 912 switch (ep->state) { 913 case STATUS_STAGE_OUT: 914 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 915 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 916 917 if (req) { 918 list_del_init(&req->queue); 919 request_complete(ep, req, 0); 920 } 921 ep->state = WAIT_FOR_SETUP; 922 break; 923 924 case DATA_STAGE_OUT: 925 receive_data(ep); 926 break; 927 928 default: 929 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 930 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 931 DBG(DBG_ALL, "%s: RXRDY: Invalid endpoint state %d\n", 932 ep->ep.name, ep->state); 933 set_protocol_stall(udc, ep); 934 break; 935 } 936 937 goto restart; 938 } 939 if (epstatus & USBA_RX_SETUP) { 940 union { 941 struct usb_ctrlrequest crq; 942 unsigned long data[2]; 943 } crq; 944 unsigned int pkt_len; 945 int ret; 946 947 if (ep->state != WAIT_FOR_SETUP) { 948 /* 949 * Didn't expect a SETUP packet at this 950 * point. Clean up any pending requests (which 951 * may be successful). 952 */ 953 int status = -EPROTO; 954 955 /* 956 * RXRDY and TXCOMP are dropped when SETUP 957 * packets arrive. Just pretend we received 958 * the status packet. 959 */ 960 if (ep->state == STATUS_STAGE_OUT || 961 ep->state == STATUS_STAGE_IN) { 962 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 963 status = 0; 964 } 965 966 if (req) { 967 list_del_init(&req->queue); 968 request_complete(ep, req, status); 969 } 970 } 971 972 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 973 DBG(DBG_HW, "Packet length: %u\n", pkt_len); 974 if (pkt_len != sizeof(crq)) { 975 DBG(DBG_ALL, "udc: Invalid length %u (expected %zu)\n", 976 pkt_len, sizeof(crq)); 977 set_protocol_stall(udc, ep); 978 return; 979 } 980 981 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); 982 memcpy(crq.data, ep->fifo, sizeof(crq)); 983 984 /* Free up one bank in the FIFO so that we can 985 * generate or receive a reply right away. */ 986 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); 987 988 if (crq.crq.bRequestType & USB_DIR_IN) { 989 /* 990 * The USB 2.0 spec states that "if wLength is 991 * zero, there is no data transfer phase." 992 * However, testusb #14 seems to actually 993 * expect a data phase even if wLength = 0... 994 */ 995 ep->state = DATA_STAGE_IN; 996 } else { 997 if (crq.crq.wLength != cpu_to_le16(0)) 998 ep->state = DATA_STAGE_OUT; 999 else 1000 ep->state = STATUS_STAGE_IN; 1001 } 1002 1003 ret = -1; 1004 if (ep->index == 0) { 1005 ret = handle_ep0_setup(udc, ep, &crq.crq); 1006 } else { 1007 spin_unlock(&udc->lock); 1008 ret = udc->driver->setup(&udc->gadget, &crq.crq); 1009 spin_lock(&udc->lock); 1010 } 1011 1012 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", 1013 crq.crq.bRequestType, crq.crq.bRequest, 1014 le16_to_cpu(crq.crq.wLength), ep->state, ret); 1015 1016 if (ret < 0) { 1017 /* Let the host know that we failed */ 1018 set_protocol_stall(udc, ep); 1019 } 1020 } 1021 } 1022 1023 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) 1024 { 1025 struct usba_request *req; 1026 u32 epstatus; 1027 u32 epctrl; 1028 1029 epstatus = usba_ep_readl(ep, STA); 1030 epctrl = usba_ep_readl(ep, CTL); 1031 1032 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); 1033 1034 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 1035 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); 1036 1037 if (list_empty(&ep->queue)) { 1038 DBG(DBG_INT, "ep_irq: queue empty\n"); 1039 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 1040 return; 1041 } 1042 1043 req = list_entry(ep->queue.next, struct usba_request, queue); 1044 1045 if (req->submitted) 1046 next_fifo_transaction(ep, req); 1047 else 1048 submit_request(ep, req); 1049 1050 if (req->last_transaction) { 1051 list_del_init(&req->queue); 1052 submit_next_request(ep); 1053 request_complete(ep, req, 0); 1054 } 1055 1056 epstatus = usba_ep_readl(ep, STA); 1057 epctrl = usba_ep_readl(ep, CTL); 1058 } 1059 1060 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1061 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); 1062 receive_data(ep); 1063 } 1064 } 1065 1066 static int usba_udc_irq(struct usba_udc *udc) 1067 { 1068 u32 status, ep_status; 1069 1070 spin_lock(&udc->lock); 1071 1072 status = usba_readl(udc, INT_STA); 1073 DBG(DBG_INT, "irq, status=%#08x\n", status); 1074 1075 if (status & USBA_DET_SUSPEND) { 1076 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND); 1077 DBG(DBG_BUS, "Suspend detected\n"); 1078 if (udc->gadget.speed != USB_SPEED_UNKNOWN && 1079 udc->driver && udc->driver->suspend) { 1080 spin_unlock(&udc->lock); 1081 udc->driver->suspend(&udc->gadget); 1082 spin_lock(&udc->lock); 1083 } 1084 } 1085 1086 if (status & USBA_WAKE_UP) { 1087 usba_writel(udc, INT_CLR, USBA_WAKE_UP); 1088 DBG(DBG_BUS, "Wake Up CPU detected\n"); 1089 } 1090 1091 if (status & USBA_END_OF_RESUME) { 1092 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); 1093 DBG(DBG_BUS, "Resume detected\n"); 1094 if (udc->gadget.speed != USB_SPEED_UNKNOWN && 1095 udc->driver && udc->driver->resume) { 1096 spin_unlock(&udc->lock); 1097 udc->driver->resume(&udc->gadget); 1098 spin_lock(&udc->lock); 1099 } 1100 } 1101 1102 ep_status = USBA_BFEXT(EPT_INT, status); 1103 if (ep_status) { 1104 int i; 1105 1106 for (i = 0; i < USBA_NR_ENDPOINTS; i++) 1107 if (ep_status & (1 << i)) { 1108 if (ep_is_control(&udc->usba_ep[i])) 1109 usba_control_irq(udc, &udc->usba_ep[i]); 1110 else 1111 usba_ep_irq(udc, &udc->usba_ep[i]); 1112 } 1113 } 1114 1115 if (status & USBA_END_OF_RESET) { 1116 struct usba_ep *ep0; 1117 1118 usba_writel(udc, INT_CLR, USBA_END_OF_RESET); 1119 reset_all_endpoints(udc); 1120 1121 if (udc->gadget.speed != USB_SPEED_UNKNOWN && 1122 udc->driver->disconnect) { 1123 udc->gadget.speed = USB_SPEED_UNKNOWN; 1124 spin_unlock(&udc->lock); 1125 udc->driver->disconnect(&udc->gadget); 1126 spin_lock(&udc->lock); 1127 } 1128 1129 if (status & USBA_HIGH_SPEED) 1130 udc->gadget.speed = USB_SPEED_HIGH; 1131 else 1132 udc->gadget.speed = USB_SPEED_FULL; 1133 1134 ep0 = &udc->usba_ep[0]; 1135 ep0->desc = &usba_ep0_desc; 1136 ep0->state = WAIT_FOR_SETUP; 1137 usba_ep_writel(ep0, CFG, 1138 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) 1139 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) 1140 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); 1141 usba_ep_writel(ep0, CTL_ENB, 1142 USBA_EPT_ENABLE | USBA_RX_SETUP); 1143 usba_writel(udc, INT_ENB, 1144 (usba_readl(udc, INT_ENB) 1145 | USBA_BF(EPT_INT, 1) 1146 | USBA_DET_SUSPEND 1147 | USBA_END_OF_RESUME)); 1148 1149 /* 1150 * Unclear why we hit this irregularly, e.g. in usbtest, 1151 * but it's clearly harmless... 1152 */ 1153 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) 1154 DBG(DBG_ALL, "ODD: EP0 configuration is invalid!\n"); 1155 } 1156 1157 spin_unlock(&udc->lock); 1158 1159 return 0; 1160 } 1161 1162 static int atmel_usba_start(struct usba_udc *udc) 1163 { 1164 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED; 1165 1166 udc->vbus_prev = 0; 1167 1168 /* If Vbus is present, enable the controller and wait for reset */ 1169 if (vbus_is_present(udc) && udc->vbus_prev == 0) { 1170 usba_writel(udc, CTRL, USBA_ENABLE_MASK); 1171 usba_writel(udc, INT_ENB, USBA_END_OF_RESET); 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int atmel_usba_stop(struct usba_udc *udc) 1178 { 1179 udc->gadget.speed = USB_SPEED_UNKNOWN; 1180 reset_all_endpoints(udc); 1181 1182 /* This will also disable the DP pullup */ 1183 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 1184 1185 return 0; 1186 } 1187 1188 static struct usba_udc controller = { 1189 .regs = (unsigned *)ATMEL_BASE_UDPHS, 1190 .fifo = (unsigned *)ATMEL_BASE_UDPHS_FIFO, 1191 .gadget = { 1192 .ops = &usba_udc_ops, 1193 .ep_list = LIST_HEAD_INIT(controller.gadget.ep_list), 1194 .speed = USB_SPEED_HIGH, 1195 .is_dualspeed = 1, 1196 .name = "atmel_usba_udc", 1197 }, 1198 }; 1199 1200 int usb_gadget_handle_interrupts(int index) 1201 { 1202 struct usba_udc *udc = &controller; 1203 1204 return usba_udc_irq(udc); 1205 } 1206 1207 1208 int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1209 { 1210 struct usba_udc *udc = &controller; 1211 int ret; 1212 1213 if (!driver || !driver->bind || !driver->setup) { 1214 printf("bad paramter\n"); 1215 return -EINVAL; 1216 } 1217 1218 if (udc->driver) { 1219 printf("UDC already has a gadget driver\n"); 1220 return -EBUSY; 1221 } 1222 1223 atmel_usba_start(udc); 1224 1225 udc->driver = driver; 1226 1227 ret = driver->bind(&udc->gadget); 1228 if (ret) { 1229 pr_err("driver->bind() returned %d\n", ret); 1230 udc->driver = NULL; 1231 } 1232 1233 return ret; 1234 } 1235 1236 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1237 { 1238 struct usba_udc *udc = &controller; 1239 1240 if (!driver || !driver->unbind || !driver->disconnect) { 1241 pr_err("bad paramter\n"); 1242 return -EINVAL; 1243 } 1244 1245 driver->disconnect(&udc->gadget); 1246 driver->unbind(&udc->gadget); 1247 udc->driver = NULL; 1248 1249 atmel_usba_stop(udc); 1250 1251 return 0; 1252 } 1253 1254 static struct usba_ep *usba_udc_pdata(struct usba_platform_data *pdata, 1255 struct usba_udc *udc) 1256 { 1257 struct usba_ep *eps; 1258 int i; 1259 1260 eps = malloc(sizeof(struct usba_ep) * pdata->num_ep); 1261 if (!eps) { 1262 pr_err("failed to alloc eps\n"); 1263 return NULL; 1264 } 1265 1266 udc->gadget.ep0 = &eps[0].ep; 1267 1268 INIT_LIST_HEAD(&udc->gadget.ep_list); 1269 INIT_LIST_HEAD(&eps[0].ep.ep_list); 1270 1271 for (i = 0; i < pdata->num_ep; i++) { 1272 struct usba_ep *ep = &eps[i]; 1273 1274 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 1275 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 1276 ep->fifo = udc->fifo + USBA_FIFO_BASE(i); 1277 ep->ep.ops = &usba_ep_ops; 1278 ep->ep.name = pdata->ep[i].name; 1279 ep->ep.maxpacket = pdata->ep[i].fifo_size; 1280 ep->fifo_size = ep->ep.maxpacket; 1281 ep->udc = udc; 1282 INIT_LIST_HEAD(&ep->queue); 1283 ep->nr_banks = pdata->ep[i].nr_banks; 1284 ep->index = pdata->ep[i].index; 1285 ep->can_dma = pdata->ep[i].can_dma; 1286 ep->can_isoc = pdata->ep[i].can_isoc; 1287 if (i) 1288 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1289 }; 1290 1291 return eps; 1292 } 1293 1294 int usba_udc_probe(struct usba_platform_data *pdata) 1295 { 1296 struct usba_udc *udc; 1297 1298 udc = &controller; 1299 1300 udc->usba_ep = usba_udc_pdata(pdata, udc); 1301 1302 return 0; 1303 } 1304