1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for the Atmel USBA high speed USB device controller 4 * 5 * Copyright (C) 2005-2007 Atmel Corporation 6 */ 7 #include <linux/clk.h> 8 #include <linux/clk/at91_pmc.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/slab.h> 14 #include <linux/device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/list.h> 17 #include <linux/mfd/syscon.h> 18 #include <linux/platform_device.h> 19 #include <linux/regmap.h> 20 #include <linux/ctype.h> 21 #include <linux/usb/ch9.h> 22 #include <linux/usb/gadget.h> 23 #include <linux/delay.h> 24 #include <linux/of.h> 25 #include <linux/irq.h> 26 #include <linux/gpio/consumer.h> 27 28 #include "atmel_usba_udc.h" 29 #define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \ 30 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING) 31 32 #ifdef CONFIG_USB_GADGET_DEBUG_FS 33 #include <linux/debugfs.h> 34 #include <linux/uaccess.h> 35 36 static int queue_dbg_open(struct inode *inode, struct file *file) 37 { 38 struct usba_ep *ep = inode->i_private; 39 struct usba_request *req, *req_copy; 40 struct list_head *queue_data; 41 42 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL); 43 if (!queue_data) 44 return -ENOMEM; 45 INIT_LIST_HEAD(queue_data); 46 47 spin_lock_irq(&ep->udc->lock); 48 list_for_each_entry(req, &ep->queue, queue) { 49 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC); 50 if (!req_copy) 51 goto fail; 52 list_add_tail(&req_copy->queue, queue_data); 53 } 54 spin_unlock_irq(&ep->udc->lock); 55 56 file->private_data = queue_data; 57 return 0; 58 59 fail: 60 spin_unlock_irq(&ep->udc->lock); 61 list_for_each_entry_safe(req, req_copy, queue_data, queue) { 62 list_del(&req->queue); 63 kfree(req); 64 } 65 kfree(queue_data); 66 return -ENOMEM; 67 } 68 69 /* 70 * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0 71 * 72 * b: buffer address 73 * l: buffer length 74 * I/i: interrupt/no interrupt 75 * Z/z: zero/no zero 76 * S/s: short ok/short not ok 77 * s: status 78 * n: nr_packets 79 * F/f: submitted/not submitted to FIFO 80 * D/d: using/not using DMA 81 * L/l: last transaction/not last transaction 82 */ 83 static ssize_t queue_dbg_read(struct file *file, char __user *buf, 84 size_t nbytes, loff_t *ppos) 85 { 86 struct list_head *queue = file->private_data; 87 struct usba_request *req, *tmp_req; 88 size_t len, remaining, actual = 0; 89 char tmpbuf[38]; 90 91 if (!access_ok(buf, nbytes)) 92 return -EFAULT; 93 94 inode_lock(file_inode(file)); 95 list_for_each_entry_safe(req, tmp_req, queue, queue) { 96 len = snprintf(tmpbuf, sizeof(tmpbuf), 97 "%8p %08x %c%c%c %5d %c%c%c\n", 98 req->req.buf, req->req.length, 99 req->req.no_interrupt ? 'i' : 'I', 100 req->req.zero ? 'Z' : 'z', 101 req->req.short_not_ok ? 's' : 'S', 102 req->req.status, 103 req->submitted ? 'F' : 'f', 104 req->using_dma ? 'D' : 'd', 105 req->last_transaction ? 'L' : 'l'); 106 len = min(len, sizeof(tmpbuf)); 107 if (len > nbytes) 108 break; 109 110 list_del(&req->queue); 111 kfree(req); 112 113 remaining = __copy_to_user(buf, tmpbuf, len); 114 actual += len - remaining; 115 if (remaining) 116 break; 117 118 nbytes -= len; 119 buf += len; 120 } 121 inode_unlock(file_inode(file)); 122 123 return actual; 124 } 125 126 static int queue_dbg_release(struct inode *inode, struct file *file) 127 { 128 struct list_head *queue_data = file->private_data; 129 struct usba_request *req, *tmp_req; 130 131 list_for_each_entry_safe(req, tmp_req, queue_data, queue) { 132 list_del(&req->queue); 133 kfree(req); 134 } 135 kfree(queue_data); 136 return 0; 137 } 138 139 static int regs_dbg_open(struct inode *inode, struct file *file) 140 { 141 struct usba_udc *udc; 142 unsigned int i; 143 u32 *data; 144 int ret = -ENOMEM; 145 146 inode_lock(inode); 147 udc = inode->i_private; 148 data = kmalloc(inode->i_size, GFP_KERNEL); 149 if (!data) 150 goto out; 151 152 spin_lock_irq(&udc->lock); 153 for (i = 0; i < inode->i_size / 4; i++) 154 data[i] = readl_relaxed(udc->regs + i * 4); 155 spin_unlock_irq(&udc->lock); 156 157 file->private_data = data; 158 ret = 0; 159 160 out: 161 inode_unlock(inode); 162 163 return ret; 164 } 165 166 static ssize_t regs_dbg_read(struct file *file, char __user *buf, 167 size_t nbytes, loff_t *ppos) 168 { 169 struct inode *inode = file_inode(file); 170 int ret; 171 172 inode_lock(inode); 173 ret = simple_read_from_buffer(buf, nbytes, ppos, 174 file->private_data, 175 file_inode(file)->i_size); 176 inode_unlock(inode); 177 178 return ret; 179 } 180 181 static int regs_dbg_release(struct inode *inode, struct file *file) 182 { 183 kfree(file->private_data); 184 return 0; 185 } 186 187 const struct file_operations queue_dbg_fops = { 188 .owner = THIS_MODULE, 189 .open = queue_dbg_open, 190 .llseek = no_llseek, 191 .read = queue_dbg_read, 192 .release = queue_dbg_release, 193 }; 194 195 const struct file_operations regs_dbg_fops = { 196 .owner = THIS_MODULE, 197 .open = regs_dbg_open, 198 .llseek = generic_file_llseek, 199 .read = regs_dbg_read, 200 .release = regs_dbg_release, 201 }; 202 203 static void usba_ep_init_debugfs(struct usba_udc *udc, 204 struct usba_ep *ep) 205 { 206 struct dentry *ep_root; 207 208 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root); 209 ep->debugfs_dir = ep_root; 210 211 debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops); 212 if (ep->can_dma) 213 debugfs_create_u32("dma_status", 0400, ep_root, 214 &ep->last_dma_status); 215 if (ep_is_control(ep)) 216 debugfs_create_u32("state", 0400, ep_root, &ep->state); 217 } 218 219 static void usba_ep_cleanup_debugfs(struct usba_ep *ep) 220 { 221 debugfs_remove_recursive(ep->debugfs_dir); 222 } 223 224 static void usba_init_debugfs(struct usba_udc *udc) 225 { 226 struct dentry *root; 227 struct resource *regs_resource; 228 229 root = debugfs_create_dir(udc->gadget.name, NULL); 230 udc->debugfs_root = root; 231 232 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM, 233 CTRL_IOMEM_ID); 234 235 if (regs_resource) { 236 debugfs_create_file_size("regs", 0400, root, udc, 237 ®s_dbg_fops, 238 resource_size(regs_resource)); 239 } 240 241 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0)); 242 } 243 244 static void usba_cleanup_debugfs(struct usba_udc *udc) 245 { 246 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0)); 247 debugfs_remove_recursive(udc->debugfs_root); 248 } 249 #else 250 static inline void usba_ep_init_debugfs(struct usba_udc *udc, 251 struct usba_ep *ep) 252 { 253 254 } 255 256 static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep) 257 { 258 259 } 260 261 static inline void usba_init_debugfs(struct usba_udc *udc) 262 { 263 264 } 265 266 static inline void usba_cleanup_debugfs(struct usba_udc *udc) 267 { 268 269 } 270 #endif 271 272 static ushort fifo_mode; 273 274 module_param(fifo_mode, ushort, 0x0); 275 MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode"); 276 277 /* mode 0 - uses autoconfig */ 278 279 /* mode 1 - fits in 8KB, generic max fifo configuration */ 280 static struct usba_fifo_cfg mode_1_cfg[] = { 281 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 282 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 283 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, }, 284 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, }, 285 { .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, }, 286 { .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, }, 287 { .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, }, 288 }; 289 290 /* mode 2 - fits in 8KB, performance max fifo configuration */ 291 static struct usba_fifo_cfg mode_2_cfg[] = { 292 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 293 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, }, 294 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, }, 295 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, }, 296 }; 297 298 /* mode 3 - fits in 8KB, mixed fifo configuration */ 299 static struct usba_fifo_cfg mode_3_cfg[] = { 300 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 301 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 302 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 303 { .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, }, 304 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 305 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 306 { .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, }, 307 }; 308 309 /* mode 4 - fits in 8KB, custom fifo configuration */ 310 static struct usba_fifo_cfg mode_4_cfg[] = { 311 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 312 { .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, }, 313 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 314 { .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, }, 315 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 316 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 317 { .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, }, 318 { .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, }, 319 { .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, }, 320 }; 321 /* Add additional configurations here */ 322 323 static int usba_config_fifo_table(struct usba_udc *udc) 324 { 325 int n; 326 327 switch (fifo_mode) { 328 default: 329 fifo_mode = 0; 330 /* fall through */ 331 case 0: 332 udc->fifo_cfg = NULL; 333 n = 0; 334 break; 335 case 1: 336 udc->fifo_cfg = mode_1_cfg; 337 n = ARRAY_SIZE(mode_1_cfg); 338 break; 339 case 2: 340 udc->fifo_cfg = mode_2_cfg; 341 n = ARRAY_SIZE(mode_2_cfg); 342 break; 343 case 3: 344 udc->fifo_cfg = mode_3_cfg; 345 n = ARRAY_SIZE(mode_3_cfg); 346 break; 347 case 4: 348 udc->fifo_cfg = mode_4_cfg; 349 n = ARRAY_SIZE(mode_4_cfg); 350 break; 351 } 352 DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode); 353 354 return n; 355 } 356 357 static inline u32 usba_int_enb_get(struct usba_udc *udc) 358 { 359 return udc->int_enb_cache; 360 } 361 362 static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask) 363 { 364 u32 val; 365 366 val = udc->int_enb_cache | mask; 367 usba_writel(udc, INT_ENB, val); 368 udc->int_enb_cache = val; 369 } 370 371 static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask) 372 { 373 u32 val; 374 375 val = udc->int_enb_cache & ~mask; 376 usba_writel(udc, INT_ENB, val); 377 udc->int_enb_cache = val; 378 } 379 380 static int vbus_is_present(struct usba_udc *udc) 381 { 382 if (udc->vbus_pin) 383 return gpiod_get_value(udc->vbus_pin); 384 385 /* No Vbus detection: Assume always present */ 386 return 1; 387 } 388 389 static void toggle_bias(struct usba_udc *udc, int is_on) 390 { 391 if (udc->errata && udc->errata->toggle_bias) 392 udc->errata->toggle_bias(udc, is_on); 393 } 394 395 static void generate_bias_pulse(struct usba_udc *udc) 396 { 397 if (!udc->bias_pulse_needed) 398 return; 399 400 if (udc->errata && udc->errata->pulse_bias) 401 udc->errata->pulse_bias(udc); 402 403 udc->bias_pulse_needed = false; 404 } 405 406 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) 407 { 408 unsigned int transaction_len; 409 410 transaction_len = req->req.length - req->req.actual; 411 req->last_transaction = 1; 412 if (transaction_len > ep->ep.maxpacket) { 413 transaction_len = ep->ep.maxpacket; 414 req->last_transaction = 0; 415 } else if (transaction_len == ep->ep.maxpacket && req->req.zero) 416 req->last_transaction = 0; 417 418 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", 419 ep->ep.name, req, transaction_len, 420 req->last_transaction ? ", done" : ""); 421 422 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len); 423 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 424 req->req.actual += transaction_len; 425 } 426 427 static void submit_request(struct usba_ep *ep, struct usba_request *req) 428 { 429 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n", 430 ep->ep.name, req, req->req.length); 431 432 req->req.actual = 0; 433 req->submitted = 1; 434 435 if (req->using_dma) { 436 if (req->req.length == 0) { 437 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 438 return; 439 } 440 441 if (req->req.zero) 442 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET); 443 else 444 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET); 445 446 usba_dma_writel(ep, ADDRESS, req->req.dma); 447 usba_dma_writel(ep, CONTROL, req->ctrl); 448 } else { 449 next_fifo_transaction(ep, req); 450 if (req->last_transaction) { 451 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 452 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 453 } else { 454 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 455 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 456 } 457 } 458 } 459 460 static void submit_next_request(struct usba_ep *ep) 461 { 462 struct usba_request *req; 463 464 if (list_empty(&ep->queue)) { 465 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); 466 return; 467 } 468 469 req = list_entry(ep->queue.next, struct usba_request, queue); 470 if (!req->submitted) 471 submit_request(ep, req); 472 } 473 474 static void send_status(struct usba_udc *udc, struct usba_ep *ep) 475 { 476 ep->state = STATUS_STAGE_IN; 477 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 478 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 479 } 480 481 static void receive_data(struct usba_ep *ep) 482 { 483 struct usba_udc *udc = ep->udc; 484 struct usba_request *req; 485 unsigned long status; 486 unsigned int bytecount, nr_busy; 487 int is_complete = 0; 488 489 status = usba_ep_readl(ep, STA); 490 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 491 492 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); 493 494 while (nr_busy > 0) { 495 if (list_empty(&ep->queue)) { 496 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 497 break; 498 } 499 req = list_entry(ep->queue.next, 500 struct usba_request, queue); 501 502 bytecount = USBA_BFEXT(BYTE_COUNT, status); 503 504 if (status & (1 << 31)) 505 is_complete = 1; 506 if (req->req.actual + bytecount >= req->req.length) { 507 is_complete = 1; 508 bytecount = req->req.length - req->req.actual; 509 } 510 511 memcpy_fromio(req->req.buf + req->req.actual, 512 ep->fifo, bytecount); 513 req->req.actual += bytecount; 514 515 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 516 517 if (is_complete) { 518 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); 519 req->req.status = 0; 520 list_del_init(&req->queue); 521 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 522 spin_unlock(&udc->lock); 523 usb_gadget_giveback_request(&ep->ep, &req->req); 524 spin_lock(&udc->lock); 525 } 526 527 status = usba_ep_readl(ep, STA); 528 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 529 530 if (is_complete && ep_is_control(ep)) { 531 send_status(udc, ep); 532 break; 533 } 534 } 535 } 536 537 static void 538 request_complete(struct usba_ep *ep, struct usba_request *req, int status) 539 { 540 struct usba_udc *udc = ep->udc; 541 542 WARN_ON(!list_empty(&req->queue)); 543 544 if (req->req.status == -EINPROGRESS) 545 req->req.status = status; 546 547 if (req->using_dma) 548 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); 549 550 DBG(DBG_GADGET | DBG_REQ, 551 "%s: req %p complete: status %d, actual %u\n", 552 ep->ep.name, req, req->req.status, req->req.actual); 553 554 spin_unlock(&udc->lock); 555 usb_gadget_giveback_request(&ep->ep, &req->req); 556 spin_lock(&udc->lock); 557 } 558 559 static void 560 request_complete_list(struct usba_ep *ep, struct list_head *list, int status) 561 { 562 struct usba_request *req, *tmp_req; 563 564 list_for_each_entry_safe(req, tmp_req, list, queue) { 565 list_del_init(&req->queue); 566 request_complete(ep, req, status); 567 } 568 } 569 570 static int 571 usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 572 { 573 struct usba_ep *ep = to_usba_ep(_ep); 574 struct usba_udc *udc = ep->udc; 575 unsigned long flags, maxpacket; 576 unsigned int nr_trans; 577 578 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); 579 580 maxpacket = usb_endpoint_maxp(desc); 581 582 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) 583 || ep->index == 0 584 || desc->bDescriptorType != USB_DT_ENDPOINT 585 || maxpacket == 0 586 || maxpacket > ep->fifo_size) { 587 DBG(DBG_ERR, "ep_enable: Invalid argument"); 588 return -EINVAL; 589 } 590 591 ep->is_isoc = 0; 592 ep->is_in = 0; 593 594 DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", 595 ep->ep.name, ep->ept_cfg, maxpacket); 596 597 if (usb_endpoint_dir_in(desc)) { 598 ep->is_in = 1; 599 ep->ept_cfg |= USBA_EPT_DIR_IN; 600 } 601 602 switch (usb_endpoint_type(desc)) { 603 case USB_ENDPOINT_XFER_CONTROL: 604 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 605 break; 606 case USB_ENDPOINT_XFER_ISOC: 607 if (!ep->can_isoc) { 608 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", 609 ep->ep.name); 610 return -EINVAL; 611 } 612 613 /* 614 * Bits 11:12 specify number of _additional_ 615 * transactions per microframe. 616 */ 617 nr_trans = usb_endpoint_maxp_mult(desc); 618 if (nr_trans > 3) 619 return -EINVAL; 620 621 ep->is_isoc = 1; 622 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); 623 ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans); 624 625 break; 626 case USB_ENDPOINT_XFER_BULK: 627 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); 628 break; 629 case USB_ENDPOINT_XFER_INT: 630 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); 631 break; 632 } 633 634 spin_lock_irqsave(&ep->udc->lock, flags); 635 636 ep->ep.desc = desc; 637 ep->ep.maxpacket = maxpacket; 638 639 usba_ep_writel(ep, CFG, ep->ept_cfg); 640 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 641 642 if (ep->can_dma) { 643 u32 ctrl; 644 645 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) | 646 USBA_BF(DMA_INT, 1 << ep->index)); 647 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA; 648 usba_ep_writel(ep, CTL_ENB, ctrl); 649 } else { 650 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index)); 651 } 652 653 spin_unlock_irqrestore(&udc->lock, flags); 654 655 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, 656 (unsigned long)usba_ep_readl(ep, CFG)); 657 DBG(DBG_HW, "INT_ENB after init: %#08lx\n", 658 (unsigned long)usba_int_enb_get(udc)); 659 660 return 0; 661 } 662 663 static int usba_ep_disable(struct usb_ep *_ep) 664 { 665 struct usba_ep *ep = to_usba_ep(_ep); 666 struct usba_udc *udc = ep->udc; 667 LIST_HEAD(req_list); 668 unsigned long flags; 669 670 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); 671 672 spin_lock_irqsave(&udc->lock, flags); 673 674 if (!ep->ep.desc) { 675 spin_unlock_irqrestore(&udc->lock, flags); 676 /* REVISIT because this driver disables endpoints in 677 * reset_all_endpoints() before calling disconnect(), 678 * most gadget drivers would trigger this non-error ... 679 */ 680 if (udc->gadget.speed != USB_SPEED_UNKNOWN) 681 DBG(DBG_ERR, "ep_disable: %s not enabled\n", 682 ep->ep.name); 683 return -EINVAL; 684 } 685 ep->ep.desc = NULL; 686 687 list_splice_init(&ep->queue, &req_list); 688 if (ep->can_dma) { 689 usba_dma_writel(ep, CONTROL, 0); 690 usba_dma_writel(ep, ADDRESS, 0); 691 usba_dma_readl(ep, STATUS); 692 } 693 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); 694 usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index)); 695 696 request_complete_list(ep, &req_list, -ESHUTDOWN); 697 698 spin_unlock_irqrestore(&udc->lock, flags); 699 700 return 0; 701 } 702 703 static struct usb_request * 704 usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 705 { 706 struct usba_request *req; 707 708 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); 709 710 req = kzalloc(sizeof(*req), gfp_flags); 711 if (!req) 712 return NULL; 713 714 INIT_LIST_HEAD(&req->queue); 715 716 return &req->req; 717 } 718 719 static void 720 usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) 721 { 722 struct usba_request *req = to_usba_req(_req); 723 724 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); 725 726 kfree(req); 727 } 728 729 static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, 730 struct usba_request *req, gfp_t gfp_flags) 731 { 732 unsigned long flags; 733 int ret; 734 735 DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n", 736 ep->ep.name, req->req.length, &req->req.dma, 737 req->req.zero ? 'Z' : 'z', 738 req->req.short_not_ok ? 'S' : 's', 739 req->req.no_interrupt ? 'I' : 'i'); 740 741 if (req->req.length > 0x10000) { 742 /* Lengths from 0 to 65536 (inclusive) are supported */ 743 DBG(DBG_ERR, "invalid request length %u\n", req->req.length); 744 return -EINVAL; 745 } 746 747 ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in); 748 if (ret) 749 return ret; 750 751 req->using_dma = 1; 752 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) 753 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE 754 | USBA_DMA_END_BUF_EN; 755 756 if (!ep->is_in) 757 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; 758 759 /* 760 * Add this request to the queue and submit for DMA if 761 * possible. Check if we're still alive first -- we may have 762 * received a reset since last time we checked. 763 */ 764 ret = -ESHUTDOWN; 765 spin_lock_irqsave(&udc->lock, flags); 766 if (ep->ep.desc) { 767 if (list_empty(&ep->queue)) 768 submit_request(ep, req); 769 770 list_add_tail(&req->queue, &ep->queue); 771 ret = 0; 772 } 773 spin_unlock_irqrestore(&udc->lock, flags); 774 775 return ret; 776 } 777 778 static int 779 usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 780 { 781 struct usba_request *req = to_usba_req(_req); 782 struct usba_ep *ep = to_usba_ep(_ep); 783 struct usba_udc *udc = ep->udc; 784 unsigned long flags; 785 int ret; 786 787 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", 788 ep->ep.name, req, _req->length); 789 790 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || 791 !ep->ep.desc) 792 return -ESHUTDOWN; 793 794 req->submitted = 0; 795 req->using_dma = 0; 796 req->last_transaction = 0; 797 798 _req->status = -EINPROGRESS; 799 _req->actual = 0; 800 801 if (ep->can_dma) 802 return queue_dma(udc, ep, req, gfp_flags); 803 804 /* May have received a reset since last time we checked */ 805 ret = -ESHUTDOWN; 806 spin_lock_irqsave(&udc->lock, flags); 807 if (ep->ep.desc) { 808 list_add_tail(&req->queue, &ep->queue); 809 810 if ((!ep_is_control(ep) && ep->is_in) || 811 (ep_is_control(ep) 812 && (ep->state == DATA_STAGE_IN 813 || ep->state == STATUS_STAGE_IN))) 814 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 815 else 816 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 817 ret = 0; 818 } 819 spin_unlock_irqrestore(&udc->lock, flags); 820 821 return ret; 822 } 823 824 static void 825 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status) 826 { 827 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status); 828 } 829 830 static int stop_dma(struct usba_ep *ep, u32 *pstatus) 831 { 832 unsigned int timeout; 833 u32 status; 834 835 /* 836 * Stop the DMA controller. When writing both CH_EN 837 * and LINK to 0, the other bits are not affected. 838 */ 839 usba_dma_writel(ep, CONTROL, 0); 840 841 /* Wait for the FIFO to empty */ 842 for (timeout = 40; timeout; --timeout) { 843 status = usba_dma_readl(ep, STATUS); 844 if (!(status & USBA_DMA_CH_EN)) 845 break; 846 udelay(1); 847 } 848 849 if (pstatus) 850 *pstatus = status; 851 852 if (timeout == 0) { 853 dev_err(&ep->udc->pdev->dev, 854 "%s: timed out waiting for DMA FIFO to empty\n", 855 ep->ep.name); 856 return -ETIMEDOUT; 857 } 858 859 return 0; 860 } 861 862 static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 863 { 864 struct usba_ep *ep = to_usba_ep(_ep); 865 struct usba_udc *udc = ep->udc; 866 struct usba_request *req; 867 unsigned long flags; 868 u32 status; 869 870 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", 871 ep->ep.name, req); 872 873 spin_lock_irqsave(&udc->lock, flags); 874 875 list_for_each_entry(req, &ep->queue, queue) { 876 if (&req->req == _req) 877 break; 878 } 879 880 if (&req->req != _req) { 881 spin_unlock_irqrestore(&udc->lock, flags); 882 return -EINVAL; 883 } 884 885 if (req->using_dma) { 886 /* 887 * If this request is currently being transferred, 888 * stop the DMA controller and reset the FIFO. 889 */ 890 if (ep->queue.next == &req->queue) { 891 status = usba_dma_readl(ep, STATUS); 892 if (status & USBA_DMA_CH_EN) 893 stop_dma(ep, &status); 894 895 #ifdef CONFIG_USB_GADGET_DEBUG_FS 896 ep->last_dma_status = status; 897 #endif 898 899 usba_writel(udc, EPT_RST, 1 << ep->index); 900 901 usba_update_req(ep, req, status); 902 } 903 } 904 905 /* 906 * Errors should stop the queue from advancing until the 907 * completion function returns. 908 */ 909 list_del_init(&req->queue); 910 911 request_complete(ep, req, -ECONNRESET); 912 913 /* Process the next request if any */ 914 submit_next_request(ep); 915 spin_unlock_irqrestore(&udc->lock, flags); 916 917 return 0; 918 } 919 920 static int usba_ep_set_halt(struct usb_ep *_ep, int value) 921 { 922 struct usba_ep *ep = to_usba_ep(_ep); 923 struct usba_udc *udc = ep->udc; 924 unsigned long flags; 925 int ret = 0; 926 927 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, 928 value ? "set" : "clear"); 929 930 if (!ep->ep.desc) { 931 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", 932 ep->ep.name); 933 return -ENODEV; 934 } 935 if (ep->is_isoc) { 936 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", 937 ep->ep.name); 938 return -ENOTTY; 939 } 940 941 spin_lock_irqsave(&udc->lock, flags); 942 943 /* 944 * We can't halt IN endpoints while there are still data to be 945 * transferred 946 */ 947 if (!list_empty(&ep->queue) 948 || ((value && ep->is_in && (usba_ep_readl(ep, STA) 949 & USBA_BF(BUSY_BANKS, -1L))))) { 950 ret = -EAGAIN; 951 } else { 952 if (value) 953 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 954 else 955 usba_ep_writel(ep, CLR_STA, 956 USBA_FORCE_STALL | USBA_TOGGLE_CLR); 957 usba_ep_readl(ep, STA); 958 } 959 960 spin_unlock_irqrestore(&udc->lock, flags); 961 962 return ret; 963 } 964 965 static int usba_ep_fifo_status(struct usb_ep *_ep) 966 { 967 struct usba_ep *ep = to_usba_ep(_ep); 968 969 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 970 } 971 972 static void usba_ep_fifo_flush(struct usb_ep *_ep) 973 { 974 struct usba_ep *ep = to_usba_ep(_ep); 975 struct usba_udc *udc = ep->udc; 976 977 usba_writel(udc, EPT_RST, 1 << ep->index); 978 } 979 980 static const struct usb_ep_ops usba_ep_ops = { 981 .enable = usba_ep_enable, 982 .disable = usba_ep_disable, 983 .alloc_request = usba_ep_alloc_request, 984 .free_request = usba_ep_free_request, 985 .queue = usba_ep_queue, 986 .dequeue = usba_ep_dequeue, 987 .set_halt = usba_ep_set_halt, 988 .fifo_status = usba_ep_fifo_status, 989 .fifo_flush = usba_ep_fifo_flush, 990 }; 991 992 static int usba_udc_get_frame(struct usb_gadget *gadget) 993 { 994 struct usba_udc *udc = to_usba_udc(gadget); 995 996 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); 997 } 998 999 static int usba_udc_wakeup(struct usb_gadget *gadget) 1000 { 1001 struct usba_udc *udc = to_usba_udc(gadget); 1002 unsigned long flags; 1003 u32 ctrl; 1004 int ret = -EINVAL; 1005 1006 spin_lock_irqsave(&udc->lock, flags); 1007 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 1008 ctrl = usba_readl(udc, CTRL); 1009 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); 1010 ret = 0; 1011 } 1012 spin_unlock_irqrestore(&udc->lock, flags); 1013 1014 return ret; 1015 } 1016 1017 static int 1018 usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) 1019 { 1020 struct usba_udc *udc = to_usba_udc(gadget); 1021 unsigned long flags; 1022 1023 gadget->is_selfpowered = (is_selfpowered != 0); 1024 spin_lock_irqsave(&udc->lock, flags); 1025 if (is_selfpowered) 1026 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; 1027 else 1028 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); 1029 spin_unlock_irqrestore(&udc->lock, flags); 1030 1031 return 0; 1032 } 1033 1034 static int atmel_usba_start(struct usb_gadget *gadget, 1035 struct usb_gadget_driver *driver); 1036 static int atmel_usba_stop(struct usb_gadget *gadget); 1037 1038 static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget, 1039 struct usb_endpoint_descriptor *desc, 1040 struct usb_ss_ep_comp_descriptor *ep_comp) 1041 { 1042 struct usb_ep *_ep; 1043 struct usba_ep *ep; 1044 1045 /* Look at endpoints until an unclaimed one looks usable */ 1046 list_for_each_entry(_ep, &gadget->ep_list, ep_list) { 1047 if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp)) 1048 goto found_ep; 1049 } 1050 /* Fail */ 1051 return NULL; 1052 1053 found_ep: 1054 1055 if (fifo_mode == 0) { 1056 /* Optimize hw fifo size based on ep type and other info */ 1057 ep = to_usba_ep(_ep); 1058 1059 switch (usb_endpoint_type(desc)) { 1060 case USB_ENDPOINT_XFER_CONTROL: 1061 break; 1062 1063 case USB_ENDPOINT_XFER_ISOC: 1064 ep->fifo_size = 1024; 1065 ep->nr_banks = 2; 1066 break; 1067 1068 case USB_ENDPOINT_XFER_BULK: 1069 ep->fifo_size = 512; 1070 ep->nr_banks = 1; 1071 break; 1072 1073 case USB_ENDPOINT_XFER_INT: 1074 if (desc->wMaxPacketSize == 0) 1075 ep->fifo_size = 1076 roundup_pow_of_two(_ep->maxpacket_limit); 1077 else 1078 ep->fifo_size = 1079 roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize)); 1080 ep->nr_banks = 1; 1081 break; 1082 } 1083 1084 /* It might be a little bit late to set this */ 1085 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); 1086 1087 /* Generate ept_cfg basd on FIFO size and number of banks */ 1088 if (ep->fifo_size <= 8) 1089 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 1090 else 1091 /* LSB is bit 1, not 0 */ 1092 ep->ept_cfg = 1093 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 1094 1095 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 1096 1097 ep->udc->configured_ep++; 1098 } 1099 1100 return _ep; 1101 } 1102 1103 static const struct usb_gadget_ops usba_udc_ops = { 1104 .get_frame = usba_udc_get_frame, 1105 .wakeup = usba_udc_wakeup, 1106 .set_selfpowered = usba_udc_set_selfpowered, 1107 .udc_start = atmel_usba_start, 1108 .udc_stop = atmel_usba_stop, 1109 .match_ep = atmel_usba_match_ep, 1110 }; 1111 1112 static struct usb_endpoint_descriptor usba_ep0_desc = { 1113 .bLength = USB_DT_ENDPOINT_SIZE, 1114 .bDescriptorType = USB_DT_ENDPOINT, 1115 .bEndpointAddress = 0, 1116 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1117 .wMaxPacketSize = cpu_to_le16(64), 1118 /* FIXME: I have no idea what to put here */ 1119 .bInterval = 1, 1120 }; 1121 1122 static struct usb_gadget usba_gadget_template = { 1123 .ops = &usba_udc_ops, 1124 .max_speed = USB_SPEED_HIGH, 1125 .name = "atmel_usba_udc", 1126 }; 1127 1128 /* 1129 * Called with interrupts disabled and udc->lock held. 1130 */ 1131 static void reset_all_endpoints(struct usba_udc *udc) 1132 { 1133 struct usba_ep *ep; 1134 struct usba_request *req, *tmp_req; 1135 1136 usba_writel(udc, EPT_RST, ~0UL); 1137 1138 ep = to_usba_ep(udc->gadget.ep0); 1139 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { 1140 list_del_init(&req->queue); 1141 request_complete(ep, req, -ECONNRESET); 1142 } 1143 } 1144 1145 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 1146 { 1147 struct usba_ep *ep; 1148 1149 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1150 return to_usba_ep(udc->gadget.ep0); 1151 1152 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { 1153 u8 bEndpointAddress; 1154 1155 if (!ep->ep.desc) 1156 continue; 1157 bEndpointAddress = ep->ep.desc->bEndpointAddress; 1158 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1159 continue; 1160 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 1161 == (wIndex & USB_ENDPOINT_NUMBER_MASK)) 1162 return ep; 1163 } 1164 1165 return NULL; 1166 } 1167 1168 /* Called with interrupts disabled and udc->lock held */ 1169 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) 1170 { 1171 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 1172 ep->state = WAIT_FOR_SETUP; 1173 } 1174 1175 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) 1176 { 1177 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) 1178 return 1; 1179 return 0; 1180 } 1181 1182 static inline void set_address(struct usba_udc *udc, unsigned int addr) 1183 { 1184 u32 regval; 1185 1186 DBG(DBG_BUS, "setting address %u...\n", addr); 1187 regval = usba_readl(udc, CTRL); 1188 regval = USBA_BFINS(DEV_ADDR, addr, regval); 1189 usba_writel(udc, CTRL, regval); 1190 } 1191 1192 static int do_test_mode(struct usba_udc *udc) 1193 { 1194 static const char test_packet_buffer[] = { 1195 /* JKJKJKJK * 9 */ 1196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1197 /* JJKKJJKK * 8 */ 1198 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1199 /* JJKKJJKK * 8 */ 1200 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1201 /* JJJJJJJKKKKKKK * 8 */ 1202 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1203 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1204 /* JJJJJJJK * 8 */ 1205 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1206 /* {JKKKKKKK * 10}, JK */ 1207 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E 1208 }; 1209 struct usba_ep *ep; 1210 struct device *dev = &udc->pdev->dev; 1211 int test_mode; 1212 1213 test_mode = udc->test_mode; 1214 1215 /* Start from a clean slate */ 1216 reset_all_endpoints(udc); 1217 1218 switch (test_mode) { 1219 case 0x0100: 1220 /* Test_J */ 1221 usba_writel(udc, TST, USBA_TST_J_MODE); 1222 dev_info(dev, "Entering Test_J mode...\n"); 1223 break; 1224 case 0x0200: 1225 /* Test_K */ 1226 usba_writel(udc, TST, USBA_TST_K_MODE); 1227 dev_info(dev, "Entering Test_K mode...\n"); 1228 break; 1229 case 0x0300: 1230 /* 1231 * Test_SE0_NAK: Force high-speed mode and set up ep0 1232 * for Bulk IN transfers 1233 */ 1234 ep = &udc->usba_ep[0]; 1235 usba_writel(udc, TST, 1236 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); 1237 usba_ep_writel(ep, CFG, 1238 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 1239 | USBA_EPT_DIR_IN 1240 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 1241 | USBA_BF(BK_NUMBER, 1)); 1242 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 1243 set_protocol_stall(udc, ep); 1244 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n"); 1245 } else { 1246 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 1247 dev_info(dev, "Entering Test_SE0_NAK mode...\n"); 1248 } 1249 break; 1250 case 0x0400: 1251 /* Test_Packet */ 1252 ep = &udc->usba_ep[0]; 1253 usba_ep_writel(ep, CFG, 1254 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 1255 | USBA_EPT_DIR_IN 1256 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 1257 | USBA_BF(BK_NUMBER, 1)); 1258 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 1259 set_protocol_stall(udc, ep); 1260 dev_err(dev, "Test_Packet: ep0 not mapped\n"); 1261 } else { 1262 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 1263 usba_writel(udc, TST, USBA_TST_PKT_MODE); 1264 memcpy_toio(ep->fifo, test_packet_buffer, 1265 sizeof(test_packet_buffer)); 1266 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 1267 dev_info(dev, "Entering Test_Packet mode...\n"); 1268 } 1269 break; 1270 default: 1271 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode); 1272 return -EINVAL; 1273 } 1274 1275 return 0; 1276 } 1277 1278 /* Avoid overly long expressions */ 1279 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) 1280 { 1281 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) 1282 return true; 1283 return false; 1284 } 1285 1286 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) 1287 { 1288 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE)) 1289 return true; 1290 return false; 1291 } 1292 1293 static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) 1294 { 1295 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT)) 1296 return true; 1297 return false; 1298 } 1299 1300 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, 1301 struct usb_ctrlrequest *crq) 1302 { 1303 int retval = 0; 1304 1305 switch (crq->bRequest) { 1306 case USB_REQ_GET_STATUS: { 1307 u16 status; 1308 1309 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { 1310 status = cpu_to_le16(udc->devstatus); 1311 } else if (crq->bRequestType 1312 == (USB_DIR_IN | USB_RECIP_INTERFACE)) { 1313 status = cpu_to_le16(0); 1314 } else if (crq->bRequestType 1315 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { 1316 struct usba_ep *target; 1317 1318 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1319 if (!target) 1320 goto stall; 1321 1322 status = 0; 1323 if (is_stalled(udc, target)) 1324 status |= cpu_to_le16(1); 1325 } else 1326 goto delegate; 1327 1328 /* Write directly to the FIFO. No queueing is done. */ 1329 if (crq->wLength != cpu_to_le16(sizeof(status))) 1330 goto stall; 1331 ep->state = DATA_STAGE_IN; 1332 writew_relaxed(status, ep->fifo); 1333 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 1334 break; 1335 } 1336 1337 case USB_REQ_CLEAR_FEATURE: { 1338 if (crq->bRequestType == USB_RECIP_DEVICE) { 1339 if (feature_is_dev_remote_wakeup(crq)) 1340 udc->devstatus 1341 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); 1342 else 1343 /* Can't CLEAR_FEATURE TEST_MODE */ 1344 goto stall; 1345 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 1346 struct usba_ep *target; 1347 1348 if (crq->wLength != cpu_to_le16(0) 1349 || !feature_is_ep_halt(crq)) 1350 goto stall; 1351 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1352 if (!target) 1353 goto stall; 1354 1355 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); 1356 if (target->index != 0) 1357 usba_ep_writel(target, CLR_STA, 1358 USBA_TOGGLE_CLR); 1359 } else { 1360 goto delegate; 1361 } 1362 1363 send_status(udc, ep); 1364 break; 1365 } 1366 1367 case USB_REQ_SET_FEATURE: { 1368 if (crq->bRequestType == USB_RECIP_DEVICE) { 1369 if (feature_is_dev_test_mode(crq)) { 1370 send_status(udc, ep); 1371 ep->state = STATUS_STAGE_TEST; 1372 udc->test_mode = le16_to_cpu(crq->wIndex); 1373 return 0; 1374 } else if (feature_is_dev_remote_wakeup(crq)) { 1375 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP; 1376 } else { 1377 goto stall; 1378 } 1379 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 1380 struct usba_ep *target; 1381 1382 if (crq->wLength != cpu_to_le16(0) 1383 || !feature_is_ep_halt(crq)) 1384 goto stall; 1385 1386 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1387 if (!target) 1388 goto stall; 1389 1390 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); 1391 } else 1392 goto delegate; 1393 1394 send_status(udc, ep); 1395 break; 1396 } 1397 1398 case USB_REQ_SET_ADDRESS: 1399 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) 1400 goto delegate; 1401 1402 set_address(udc, le16_to_cpu(crq->wValue)); 1403 send_status(udc, ep); 1404 ep->state = STATUS_STAGE_ADDR; 1405 break; 1406 1407 default: 1408 delegate: 1409 spin_unlock(&udc->lock); 1410 retval = udc->driver->setup(&udc->gadget, crq); 1411 spin_lock(&udc->lock); 1412 } 1413 1414 return retval; 1415 1416 stall: 1417 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, " 1418 "halting endpoint...\n", 1419 ep->ep.name, crq->bRequestType, crq->bRequest, 1420 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), 1421 le16_to_cpu(crq->wLength)); 1422 set_protocol_stall(udc, ep); 1423 return -1; 1424 } 1425 1426 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) 1427 { 1428 struct usba_request *req; 1429 u32 epstatus; 1430 u32 epctrl; 1431 1432 restart: 1433 epstatus = usba_ep_readl(ep, STA); 1434 epctrl = usba_ep_readl(ep, CTL); 1435 1436 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", 1437 ep->ep.name, ep->state, epstatus, epctrl); 1438 1439 req = NULL; 1440 if (!list_empty(&ep->queue)) 1441 req = list_entry(ep->queue.next, 1442 struct usba_request, queue); 1443 1444 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 1445 if (req->submitted) 1446 next_fifo_transaction(ep, req); 1447 else 1448 submit_request(ep, req); 1449 1450 if (req->last_transaction) { 1451 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 1452 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 1453 } 1454 goto restart; 1455 } 1456 if ((epstatus & epctrl) & USBA_TX_COMPLETE) { 1457 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); 1458 1459 switch (ep->state) { 1460 case DATA_STAGE_IN: 1461 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 1462 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1463 ep->state = STATUS_STAGE_OUT; 1464 break; 1465 case STATUS_STAGE_ADDR: 1466 /* Activate our new address */ 1467 usba_writel(udc, CTRL, (usba_readl(udc, CTRL) 1468 | USBA_FADDR_EN)); 1469 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1470 ep->state = WAIT_FOR_SETUP; 1471 break; 1472 case STATUS_STAGE_IN: 1473 if (req) { 1474 list_del_init(&req->queue); 1475 request_complete(ep, req, 0); 1476 submit_next_request(ep); 1477 } 1478 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1479 ep->state = WAIT_FOR_SETUP; 1480 break; 1481 case STATUS_STAGE_TEST: 1482 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1483 ep->state = WAIT_FOR_SETUP; 1484 if (do_test_mode(udc)) 1485 set_protocol_stall(udc, ep); 1486 break; 1487 default: 1488 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, " 1489 "halting endpoint...\n", 1490 ep->ep.name, ep->state); 1491 set_protocol_stall(udc, ep); 1492 break; 1493 } 1494 1495 goto restart; 1496 } 1497 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1498 switch (ep->state) { 1499 case STATUS_STAGE_OUT: 1500 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 1501 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1502 1503 if (req) { 1504 list_del_init(&req->queue); 1505 request_complete(ep, req, 0); 1506 } 1507 ep->state = WAIT_FOR_SETUP; 1508 break; 1509 1510 case DATA_STAGE_OUT: 1511 receive_data(ep); 1512 break; 1513 1514 default: 1515 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 1516 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1517 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, " 1518 "halting endpoint...\n", 1519 ep->ep.name, ep->state); 1520 set_protocol_stall(udc, ep); 1521 break; 1522 } 1523 1524 goto restart; 1525 } 1526 if (epstatus & USBA_RX_SETUP) { 1527 union { 1528 struct usb_ctrlrequest crq; 1529 unsigned long data[2]; 1530 } crq; 1531 unsigned int pkt_len; 1532 int ret; 1533 1534 if (ep->state != WAIT_FOR_SETUP) { 1535 /* 1536 * Didn't expect a SETUP packet at this 1537 * point. Clean up any pending requests (which 1538 * may be successful). 1539 */ 1540 int status = -EPROTO; 1541 1542 /* 1543 * RXRDY and TXCOMP are dropped when SETUP 1544 * packets arrive. Just pretend we received 1545 * the status packet. 1546 */ 1547 if (ep->state == STATUS_STAGE_OUT 1548 || ep->state == STATUS_STAGE_IN) { 1549 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1550 status = 0; 1551 } 1552 1553 if (req) { 1554 list_del_init(&req->queue); 1555 request_complete(ep, req, status); 1556 } 1557 } 1558 1559 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 1560 DBG(DBG_HW, "Packet length: %u\n", pkt_len); 1561 if (pkt_len != sizeof(crq)) { 1562 pr_warn("udc: Invalid packet length %u (expected %zu)\n", 1563 pkt_len, sizeof(crq)); 1564 set_protocol_stall(udc, ep); 1565 return; 1566 } 1567 1568 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); 1569 memcpy_fromio(crq.data, ep->fifo, sizeof(crq)); 1570 1571 /* Free up one bank in the FIFO so that we can 1572 * generate or receive a reply right away. */ 1573 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); 1574 1575 /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n", 1576 ep->state, crq.crq.bRequestType, 1577 crq.crq.bRequest); */ 1578 1579 if (crq.crq.bRequestType & USB_DIR_IN) { 1580 /* 1581 * The USB 2.0 spec states that "if wLength is 1582 * zero, there is no data transfer phase." 1583 * However, testusb #14 seems to actually 1584 * expect a data phase even if wLength = 0... 1585 */ 1586 ep->state = DATA_STAGE_IN; 1587 } else { 1588 if (crq.crq.wLength != cpu_to_le16(0)) 1589 ep->state = DATA_STAGE_OUT; 1590 else 1591 ep->state = STATUS_STAGE_IN; 1592 } 1593 1594 ret = -1; 1595 if (ep->index == 0) 1596 ret = handle_ep0_setup(udc, ep, &crq.crq); 1597 else { 1598 spin_unlock(&udc->lock); 1599 ret = udc->driver->setup(&udc->gadget, &crq.crq); 1600 spin_lock(&udc->lock); 1601 } 1602 1603 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", 1604 crq.crq.bRequestType, crq.crq.bRequest, 1605 le16_to_cpu(crq.crq.wLength), ep->state, ret); 1606 1607 if (ret < 0) { 1608 /* Let the host know that we failed */ 1609 set_protocol_stall(udc, ep); 1610 } 1611 } 1612 } 1613 1614 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) 1615 { 1616 struct usba_request *req; 1617 u32 epstatus; 1618 u32 epctrl; 1619 1620 epstatus = usba_ep_readl(ep, STA); 1621 epctrl = usba_ep_readl(ep, CTL); 1622 1623 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); 1624 1625 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 1626 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); 1627 1628 if (list_empty(&ep->queue)) { 1629 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n"); 1630 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 1631 return; 1632 } 1633 1634 req = list_entry(ep->queue.next, struct usba_request, queue); 1635 1636 if (req->using_dma) { 1637 /* Send a zero-length packet */ 1638 usba_ep_writel(ep, SET_STA, 1639 USBA_TX_PK_RDY); 1640 usba_ep_writel(ep, CTL_DIS, 1641 USBA_TX_PK_RDY); 1642 list_del_init(&req->queue); 1643 submit_next_request(ep); 1644 request_complete(ep, req, 0); 1645 } else { 1646 if (req->submitted) 1647 next_fifo_transaction(ep, req); 1648 else 1649 submit_request(ep, req); 1650 1651 if (req->last_transaction) { 1652 list_del_init(&req->queue); 1653 submit_next_request(ep); 1654 request_complete(ep, req, 0); 1655 } 1656 } 1657 1658 epstatus = usba_ep_readl(ep, STA); 1659 epctrl = usba_ep_readl(ep, CTL); 1660 } 1661 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1662 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); 1663 receive_data(ep); 1664 } 1665 } 1666 1667 static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep) 1668 { 1669 struct usba_request *req; 1670 u32 status, control, pending; 1671 1672 status = usba_dma_readl(ep, STATUS); 1673 control = usba_dma_readl(ep, CONTROL); 1674 #ifdef CONFIG_USB_GADGET_DEBUG_FS 1675 ep->last_dma_status = status; 1676 #endif 1677 pending = status & control; 1678 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control); 1679 1680 if (status & USBA_DMA_CH_EN) { 1681 dev_err(&udc->pdev->dev, 1682 "DMA_CH_EN is set after transfer is finished!\n"); 1683 dev_err(&udc->pdev->dev, 1684 "status=%#08x, pending=%#08x, control=%#08x\n", 1685 status, pending, control); 1686 1687 /* 1688 * try to pretend nothing happened. We might have to 1689 * do something here... 1690 */ 1691 } 1692 1693 if (list_empty(&ep->queue)) 1694 /* Might happen if a reset comes along at the right moment */ 1695 return; 1696 1697 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) { 1698 req = list_entry(ep->queue.next, struct usba_request, queue); 1699 usba_update_req(ep, req, status); 1700 1701 list_del_init(&req->queue); 1702 submit_next_request(ep); 1703 request_complete(ep, req, 0); 1704 } 1705 } 1706 1707 static int start_clock(struct usba_udc *udc); 1708 static void stop_clock(struct usba_udc *udc); 1709 1710 static irqreturn_t usba_udc_irq(int irq, void *devid) 1711 { 1712 struct usba_udc *udc = devid; 1713 u32 status, int_enb; 1714 u32 dma_status; 1715 u32 ep_status; 1716 1717 spin_lock(&udc->lock); 1718 1719 int_enb = usba_int_enb_get(udc); 1720 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); 1721 DBG(DBG_INT, "irq, status=%#08x\n", status); 1722 1723 if (status & USBA_DET_SUSPEND) { 1724 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP); 1725 usba_int_enb_set(udc, USBA_WAKE_UP); 1726 usba_int_enb_clear(udc, USBA_DET_SUSPEND); 1727 udc->suspended = true; 1728 toggle_bias(udc, 0); 1729 udc->bias_pulse_needed = true; 1730 stop_clock(udc); 1731 DBG(DBG_BUS, "Suspend detected\n"); 1732 if (udc->gadget.speed != USB_SPEED_UNKNOWN 1733 && udc->driver && udc->driver->suspend) { 1734 spin_unlock(&udc->lock); 1735 udc->driver->suspend(&udc->gadget); 1736 spin_lock(&udc->lock); 1737 } 1738 } 1739 1740 if (status & USBA_WAKE_UP) { 1741 start_clock(udc); 1742 toggle_bias(udc, 1); 1743 usba_writel(udc, INT_CLR, USBA_WAKE_UP); 1744 DBG(DBG_BUS, "Wake Up CPU detected\n"); 1745 } 1746 1747 if (status & USBA_END_OF_RESUME) { 1748 udc->suspended = false; 1749 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); 1750 usba_int_enb_clear(udc, USBA_WAKE_UP); 1751 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1752 generate_bias_pulse(udc); 1753 DBG(DBG_BUS, "Resume detected\n"); 1754 if (udc->gadget.speed != USB_SPEED_UNKNOWN 1755 && udc->driver && udc->driver->resume) { 1756 spin_unlock(&udc->lock); 1757 udc->driver->resume(&udc->gadget); 1758 spin_lock(&udc->lock); 1759 } 1760 } 1761 1762 dma_status = USBA_BFEXT(DMA_INT, status); 1763 if (dma_status) { 1764 int i; 1765 1766 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1767 1768 for (i = 1; i <= USBA_NR_DMAS; i++) 1769 if (dma_status & (1 << i)) 1770 usba_dma_irq(udc, &udc->usba_ep[i]); 1771 } 1772 1773 ep_status = USBA_BFEXT(EPT_INT, status); 1774 if (ep_status) { 1775 int i; 1776 1777 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1778 1779 for (i = 0; i < udc->num_ep; i++) 1780 if (ep_status & (1 << i)) { 1781 if (ep_is_control(&udc->usba_ep[i])) 1782 usba_control_irq(udc, &udc->usba_ep[i]); 1783 else 1784 usba_ep_irq(udc, &udc->usba_ep[i]); 1785 } 1786 } 1787 1788 if (status & USBA_END_OF_RESET) { 1789 struct usba_ep *ep0, *ep; 1790 int i, n; 1791 1792 usba_writel(udc, INT_CLR, 1793 USBA_END_OF_RESET|USBA_END_OF_RESUME 1794 |USBA_DET_SUSPEND|USBA_WAKE_UP); 1795 generate_bias_pulse(udc); 1796 reset_all_endpoints(udc); 1797 1798 if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) { 1799 udc->gadget.speed = USB_SPEED_UNKNOWN; 1800 spin_unlock(&udc->lock); 1801 usb_gadget_udc_reset(&udc->gadget, udc->driver); 1802 spin_lock(&udc->lock); 1803 } 1804 1805 if (status & USBA_HIGH_SPEED) 1806 udc->gadget.speed = USB_SPEED_HIGH; 1807 else 1808 udc->gadget.speed = USB_SPEED_FULL; 1809 DBG(DBG_BUS, "%s bus reset detected\n", 1810 usb_speed_string(udc->gadget.speed)); 1811 1812 ep0 = &udc->usba_ep[0]; 1813 ep0->ep.desc = &usba_ep0_desc; 1814 ep0->state = WAIT_FOR_SETUP; 1815 usba_ep_writel(ep0, CFG, 1816 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) 1817 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) 1818 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); 1819 usba_ep_writel(ep0, CTL_ENB, 1820 USBA_EPT_ENABLE | USBA_RX_SETUP); 1821 1822 /* If we get reset while suspended... */ 1823 udc->suspended = false; 1824 usba_int_enb_clear(udc, USBA_WAKE_UP); 1825 1826 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) | 1827 USBA_DET_SUSPEND | USBA_END_OF_RESUME); 1828 1829 /* 1830 * Unclear why we hit this irregularly, e.g. in usbtest, 1831 * but it's clearly harmless... 1832 */ 1833 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) 1834 dev_err(&udc->pdev->dev, 1835 "ODD: EP0 configuration is invalid!\n"); 1836 1837 /* Preallocate other endpoints */ 1838 n = fifo_mode ? udc->num_ep : udc->configured_ep; 1839 for (i = 1; i < n; i++) { 1840 ep = &udc->usba_ep[i]; 1841 usba_ep_writel(ep, CFG, ep->ept_cfg); 1842 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) 1843 dev_err(&udc->pdev->dev, 1844 "ODD: EP%d configuration is invalid!\n", i); 1845 } 1846 } 1847 1848 spin_unlock(&udc->lock); 1849 1850 return IRQ_HANDLED; 1851 } 1852 1853 static int start_clock(struct usba_udc *udc) 1854 { 1855 int ret; 1856 1857 if (udc->clocked) 1858 return 0; 1859 1860 pm_stay_awake(&udc->pdev->dev); 1861 1862 ret = clk_prepare_enable(udc->pclk); 1863 if (ret) 1864 return ret; 1865 ret = clk_prepare_enable(udc->hclk); 1866 if (ret) { 1867 clk_disable_unprepare(udc->pclk); 1868 return ret; 1869 } 1870 1871 udc->clocked = true; 1872 return 0; 1873 } 1874 1875 static void stop_clock(struct usba_udc *udc) 1876 { 1877 if (!udc->clocked) 1878 return; 1879 1880 clk_disable_unprepare(udc->hclk); 1881 clk_disable_unprepare(udc->pclk); 1882 1883 udc->clocked = false; 1884 1885 pm_relax(&udc->pdev->dev); 1886 } 1887 1888 static int usba_start(struct usba_udc *udc) 1889 { 1890 unsigned long flags; 1891 int ret; 1892 1893 ret = start_clock(udc); 1894 if (ret) 1895 return ret; 1896 1897 if (udc->suspended) 1898 return 0; 1899 1900 spin_lock_irqsave(&udc->lock, flags); 1901 toggle_bias(udc, 1); 1902 usba_writel(udc, CTRL, USBA_ENABLE_MASK); 1903 /* Clear all requested and pending interrupts... */ 1904 usba_writel(udc, INT_ENB, 0); 1905 udc->int_enb_cache = 0; 1906 usba_writel(udc, INT_CLR, 1907 USBA_END_OF_RESET|USBA_END_OF_RESUME 1908 |USBA_DET_SUSPEND|USBA_WAKE_UP); 1909 /* ...and enable just 'reset' IRQ to get us started */ 1910 usba_int_enb_set(udc, USBA_END_OF_RESET); 1911 spin_unlock_irqrestore(&udc->lock, flags); 1912 1913 return 0; 1914 } 1915 1916 static void usba_stop(struct usba_udc *udc) 1917 { 1918 unsigned long flags; 1919 1920 if (udc->suspended) 1921 return; 1922 1923 spin_lock_irqsave(&udc->lock, flags); 1924 udc->gadget.speed = USB_SPEED_UNKNOWN; 1925 reset_all_endpoints(udc); 1926 1927 /* This will also disable the DP pullup */ 1928 toggle_bias(udc, 0); 1929 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 1930 spin_unlock_irqrestore(&udc->lock, flags); 1931 1932 stop_clock(udc); 1933 } 1934 1935 static irqreturn_t usba_vbus_irq_thread(int irq, void *devid) 1936 { 1937 struct usba_udc *udc = devid; 1938 int vbus; 1939 1940 /* debounce */ 1941 udelay(10); 1942 1943 mutex_lock(&udc->vbus_mutex); 1944 1945 vbus = vbus_is_present(udc); 1946 if (vbus != udc->vbus_prev) { 1947 if (vbus) { 1948 usba_start(udc); 1949 } else { 1950 udc->suspended = false; 1951 usba_stop(udc); 1952 1953 if (udc->driver->disconnect) 1954 udc->driver->disconnect(&udc->gadget); 1955 } 1956 udc->vbus_prev = vbus; 1957 } 1958 1959 mutex_unlock(&udc->vbus_mutex); 1960 return IRQ_HANDLED; 1961 } 1962 1963 static int atmel_usba_start(struct usb_gadget *gadget, 1964 struct usb_gadget_driver *driver) 1965 { 1966 int ret; 1967 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget); 1968 unsigned long flags; 1969 1970 spin_lock_irqsave(&udc->lock, flags); 1971 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED; 1972 udc->driver = driver; 1973 spin_unlock_irqrestore(&udc->lock, flags); 1974 1975 mutex_lock(&udc->vbus_mutex); 1976 1977 if (udc->vbus_pin) 1978 enable_irq(gpiod_to_irq(udc->vbus_pin)); 1979 1980 /* If Vbus is present, enable the controller and wait for reset */ 1981 udc->vbus_prev = vbus_is_present(udc); 1982 if (udc->vbus_prev) { 1983 ret = usba_start(udc); 1984 if (ret) 1985 goto err; 1986 } 1987 1988 mutex_unlock(&udc->vbus_mutex); 1989 return 0; 1990 1991 err: 1992 if (udc->vbus_pin) 1993 disable_irq(gpiod_to_irq(udc->vbus_pin)); 1994 1995 mutex_unlock(&udc->vbus_mutex); 1996 1997 spin_lock_irqsave(&udc->lock, flags); 1998 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); 1999 udc->driver = NULL; 2000 spin_unlock_irqrestore(&udc->lock, flags); 2001 return ret; 2002 } 2003 2004 static int atmel_usba_stop(struct usb_gadget *gadget) 2005 { 2006 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget); 2007 2008 if (udc->vbus_pin) 2009 disable_irq(gpiod_to_irq(udc->vbus_pin)); 2010 2011 if (fifo_mode == 0) 2012 udc->configured_ep = 1; 2013 2014 udc->suspended = false; 2015 usba_stop(udc); 2016 2017 udc->driver = NULL; 2018 2019 return 0; 2020 } 2021 2022 static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on) 2023 { 2024 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 2025 is_on ? AT91_PMC_BIASEN : 0); 2026 } 2027 2028 static void at91sam9g45_pulse_bias(struct usba_udc *udc) 2029 { 2030 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0); 2031 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 2032 AT91_PMC_BIASEN); 2033 } 2034 2035 static const struct usba_udc_errata at91sam9rl_errata = { 2036 .toggle_bias = at91sam9rl_toggle_bias, 2037 }; 2038 2039 static const struct usba_udc_errata at91sam9g45_errata = { 2040 .pulse_bias = at91sam9g45_pulse_bias, 2041 }; 2042 2043 static const struct of_device_id atmel_udc_dt_ids[] = { 2044 { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata }, 2045 { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata }, 2046 { .compatible = "atmel,sama5d3-udc" }, 2047 { /* sentinel */ } 2048 }; 2049 2050 MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids); 2051 2052 static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, 2053 struct usba_udc *udc) 2054 { 2055 u32 val; 2056 struct device_node *np = pdev->dev.of_node; 2057 const struct of_device_id *match; 2058 struct device_node *pp; 2059 int i, ret; 2060 struct usba_ep *eps, *ep; 2061 2062 match = of_match_node(atmel_udc_dt_ids, np); 2063 if (!match) 2064 return ERR_PTR(-EINVAL); 2065 2066 udc->errata = match->data; 2067 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc"); 2068 if (IS_ERR(udc->pmc)) 2069 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc"); 2070 if (IS_ERR(udc->pmc)) 2071 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc"); 2072 if (udc->errata && IS_ERR(udc->pmc)) 2073 return ERR_CAST(udc->pmc); 2074 2075 udc->num_ep = 0; 2076 2077 udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus", 2078 GPIOD_IN); 2079 2080 if (fifo_mode == 0) { 2081 pp = NULL; 2082 while ((pp = of_get_next_child(np, pp))) 2083 udc->num_ep++; 2084 udc->configured_ep = 1; 2085 } else { 2086 udc->num_ep = usba_config_fifo_table(udc); 2087 } 2088 2089 eps = devm_kcalloc(&pdev->dev, udc->num_ep, sizeof(struct usba_ep), 2090 GFP_KERNEL); 2091 if (!eps) 2092 return ERR_PTR(-ENOMEM); 2093 2094 udc->gadget.ep0 = &eps[0].ep; 2095 2096 INIT_LIST_HEAD(&eps[0].ep.ep_list); 2097 2098 pp = NULL; 2099 i = 0; 2100 while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) { 2101 ep = &eps[i]; 2102 2103 ret = of_property_read_u32(pp, "reg", &val); 2104 if (ret) { 2105 dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret); 2106 goto err; 2107 } 2108 ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : val; 2109 2110 ret = of_property_read_u32(pp, "atmel,fifo-size", &val); 2111 if (ret) { 2112 dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret); 2113 goto err; 2114 } 2115 if (fifo_mode) { 2116 if (val < udc->fifo_cfg[i].fifo_size) { 2117 dev_warn(&pdev->dev, 2118 "Using max fifo-size value from DT\n"); 2119 ep->fifo_size = val; 2120 } else { 2121 ep->fifo_size = udc->fifo_cfg[i].fifo_size; 2122 } 2123 } else { 2124 ep->fifo_size = val; 2125 } 2126 2127 ret = of_property_read_u32(pp, "atmel,nb-banks", &val); 2128 if (ret) { 2129 dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret); 2130 goto err; 2131 } 2132 if (fifo_mode) { 2133 if (val < udc->fifo_cfg[i].nr_banks) { 2134 dev_warn(&pdev->dev, 2135 "Using max nb-banks value from DT\n"); 2136 ep->nr_banks = val; 2137 } else { 2138 ep->nr_banks = udc->fifo_cfg[i].nr_banks; 2139 } 2140 } else { 2141 ep->nr_banks = val; 2142 } 2143 2144 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma"); 2145 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc"); 2146 2147 sprintf(ep->name, "ep%d", ep->index); 2148 ep->ep.name = ep->name; 2149 2150 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 2151 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 2152 ep->fifo = udc->fifo + USBA_FIFO_BASE(i); 2153 ep->ep.ops = &usba_ep_ops; 2154 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); 2155 ep->udc = udc; 2156 INIT_LIST_HEAD(&ep->queue); 2157 2158 if (ep->index == 0) { 2159 ep->ep.caps.type_control = true; 2160 } else { 2161 ep->ep.caps.type_iso = ep->can_isoc; 2162 ep->ep.caps.type_bulk = true; 2163 ep->ep.caps.type_int = true; 2164 } 2165 2166 ep->ep.caps.dir_in = true; 2167 ep->ep.caps.dir_out = true; 2168 2169 if (fifo_mode != 0) { 2170 /* 2171 * Generate ept_cfg based on FIFO size and 2172 * banks number 2173 */ 2174 if (ep->fifo_size <= 8) 2175 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 2176 else 2177 /* LSB is bit 1, not 0 */ 2178 ep->ept_cfg = 2179 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 2180 2181 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 2182 } 2183 2184 if (i) 2185 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 2186 2187 i++; 2188 } 2189 2190 if (i == 0) { 2191 dev_err(&pdev->dev, "of_probe: no endpoint specified\n"); 2192 ret = -EINVAL; 2193 goto err; 2194 } 2195 2196 return eps; 2197 err: 2198 return ERR_PTR(ret); 2199 } 2200 2201 static int usba_udc_probe(struct platform_device *pdev) 2202 { 2203 struct resource *res; 2204 struct clk *pclk, *hclk; 2205 struct usba_udc *udc; 2206 int irq, ret, i; 2207 2208 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); 2209 if (!udc) 2210 return -ENOMEM; 2211 2212 udc->gadget = usba_gadget_template; 2213 INIT_LIST_HEAD(&udc->gadget.ep_list); 2214 2215 res = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); 2216 udc->regs = devm_ioremap_resource(&pdev->dev, res); 2217 if (IS_ERR(udc->regs)) 2218 return PTR_ERR(udc->regs); 2219 dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n", 2220 res, udc->regs); 2221 2222 res = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID); 2223 udc->fifo = devm_ioremap_resource(&pdev->dev, res); 2224 if (IS_ERR(udc->fifo)) 2225 return PTR_ERR(udc->fifo); 2226 dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo); 2227 2228 irq = platform_get_irq(pdev, 0); 2229 if (irq < 0) 2230 return irq; 2231 2232 pclk = devm_clk_get(&pdev->dev, "pclk"); 2233 if (IS_ERR(pclk)) 2234 return PTR_ERR(pclk); 2235 hclk = devm_clk_get(&pdev->dev, "hclk"); 2236 if (IS_ERR(hclk)) 2237 return PTR_ERR(hclk); 2238 2239 spin_lock_init(&udc->lock); 2240 mutex_init(&udc->vbus_mutex); 2241 udc->pdev = pdev; 2242 udc->pclk = pclk; 2243 udc->hclk = hclk; 2244 2245 platform_set_drvdata(pdev, udc); 2246 2247 /* Make sure we start from a clean slate */ 2248 ret = clk_prepare_enable(pclk); 2249 if (ret) { 2250 dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n"); 2251 return ret; 2252 } 2253 2254 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 2255 clk_disable_unprepare(pclk); 2256 2257 udc->usba_ep = atmel_udc_of_init(pdev, udc); 2258 2259 toggle_bias(udc, 0); 2260 2261 if (IS_ERR(udc->usba_ep)) 2262 return PTR_ERR(udc->usba_ep); 2263 2264 ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0, 2265 "atmel_usba_udc", udc); 2266 if (ret) { 2267 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n", 2268 irq, ret); 2269 return ret; 2270 } 2271 udc->irq = irq; 2272 2273 if (udc->vbus_pin) { 2274 irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN); 2275 ret = devm_request_threaded_irq(&pdev->dev, 2276 gpiod_to_irq(udc->vbus_pin), NULL, 2277 usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, 2278 "atmel_usba_udc", udc); 2279 if (ret) { 2280 udc->vbus_pin = NULL; 2281 dev_warn(&udc->pdev->dev, 2282 "failed to request vbus irq; " 2283 "assuming always on\n"); 2284 } 2285 } 2286 2287 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 2288 if (ret) 2289 return ret; 2290 device_init_wakeup(&pdev->dev, 1); 2291 2292 usba_init_debugfs(udc); 2293 for (i = 1; i < udc->num_ep; i++) 2294 usba_ep_init_debugfs(udc, &udc->usba_ep[i]); 2295 2296 return 0; 2297 } 2298 2299 static int usba_udc_remove(struct platform_device *pdev) 2300 { 2301 struct usba_udc *udc; 2302 int i; 2303 2304 udc = platform_get_drvdata(pdev); 2305 2306 device_init_wakeup(&pdev->dev, 0); 2307 usb_del_gadget_udc(&udc->gadget); 2308 2309 for (i = 1; i < udc->num_ep; i++) 2310 usba_ep_cleanup_debugfs(&udc->usba_ep[i]); 2311 usba_cleanup_debugfs(udc); 2312 2313 return 0; 2314 } 2315 2316 #ifdef CONFIG_PM_SLEEP 2317 static int usba_udc_suspend(struct device *dev) 2318 { 2319 struct usba_udc *udc = dev_get_drvdata(dev); 2320 2321 /* Not started */ 2322 if (!udc->driver) 2323 return 0; 2324 2325 mutex_lock(&udc->vbus_mutex); 2326 2327 if (!device_may_wakeup(dev)) { 2328 udc->suspended = false; 2329 usba_stop(udc); 2330 goto out; 2331 } 2332 2333 /* 2334 * Device may wake up. We stay clocked if we failed 2335 * to request vbus irq, assuming always on. 2336 */ 2337 if (udc->vbus_pin) { 2338 /* FIXME: right to stop here...??? */ 2339 usba_stop(udc); 2340 enable_irq_wake(gpiod_to_irq(udc->vbus_pin)); 2341 } 2342 2343 enable_irq_wake(udc->irq); 2344 2345 out: 2346 mutex_unlock(&udc->vbus_mutex); 2347 return 0; 2348 } 2349 2350 static int usba_udc_resume(struct device *dev) 2351 { 2352 struct usba_udc *udc = dev_get_drvdata(dev); 2353 2354 /* Not started */ 2355 if (!udc->driver) 2356 return 0; 2357 2358 if (device_may_wakeup(dev)) { 2359 if (udc->vbus_pin) 2360 disable_irq_wake(gpiod_to_irq(udc->vbus_pin)); 2361 2362 disable_irq_wake(udc->irq); 2363 } 2364 2365 /* If Vbus is present, enable the controller and wait for reset */ 2366 mutex_lock(&udc->vbus_mutex); 2367 udc->vbus_prev = vbus_is_present(udc); 2368 if (udc->vbus_prev) 2369 usba_start(udc); 2370 mutex_unlock(&udc->vbus_mutex); 2371 2372 return 0; 2373 } 2374 #endif 2375 2376 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume); 2377 2378 static struct platform_driver udc_driver = { 2379 .remove = usba_udc_remove, 2380 .driver = { 2381 .name = "atmel_usba_udc", 2382 .pm = &usba_udc_pm_ops, 2383 .of_match_table = atmel_udc_dt_ids, 2384 }, 2385 }; 2386 2387 module_platform_driver_probe(udc_driver, usba_udc_probe); 2388 2389 MODULE_DESCRIPTION("Atmel USBA UDC driver"); 2390 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 2391 MODULE_LICENSE("GPL"); 2392 MODULE_ALIAS("platform:atmel_usba_udc"); 2393