1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Handles the Intel 27x USB Device Controller (UDC) 4 * 5 * Inspired by original driver by Frank Becker, David Brownell, and others. 6 * Copyright (C) 2008 Robert Jarzmik 7 */ 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/types.h> 11 #include <linux/errno.h> 12 #include <linux/err.h> 13 #include <linux/platform_device.h> 14 #include <linux/delay.h> 15 #include <linux/list.h> 16 #include <linux/interrupt.h> 17 #include <linux/proc_fs.h> 18 #include <linux/clk.h> 19 #include <linux/irq.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/prefetch.h> 24 #include <linux/byteorder/generic.h> 25 #include <linux/platform_data/pxa2xx_udc.h> 26 #include <linux/of_device.h> 27 #include <linux/of_gpio.h> 28 29 #include <linux/usb.h> 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 #include <linux/usb/phy.h> 33 34 #include "pxa27x_udc.h" 35 36 /* 37 * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x 38 * series processors. 39 * 40 * Such controller drivers work with a gadget driver. The gadget driver 41 * returns descriptors, implements configuration and data protocols used 42 * by the host to interact with this device, and allocates endpoints to 43 * the different protocol interfaces. The controller driver virtualizes 44 * usb hardware so that the gadget drivers will be more portable. 45 * 46 * This UDC hardware wants to implement a bit too much USB protocol. The 47 * biggest issues are: that the endpoints have to be set up before the 48 * controller can be enabled (minor, and not uncommon); and each endpoint 49 * can only have one configuration, interface and alternative interface 50 * number (major, and very unusual). Once set up, these cannot be changed 51 * without a controller reset. 52 * 53 * The workaround is to setup all combinations necessary for the gadgets which 54 * will work with this driver. This is done in pxa_udc structure, statically. 55 * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep. 56 * (You could modify this if needed. Some drivers have a "fifo_mode" module 57 * parameter to facilitate such changes.) 58 * 59 * The combinations have been tested with these gadgets : 60 * - zero gadget 61 * - file storage gadget 62 * - ether gadget 63 * 64 * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is 65 * made of UDC's double buffering either. USB "On-The-Go" is not implemented. 66 * 67 * All the requests are handled the same way : 68 * - the drivers tries to handle the request directly to the IO 69 * - if the IO fifo is not big enough, the remaining is send/received in 70 * interrupt handling. 71 */ 72 73 #define DRIVER_VERSION "2008-04-18" 74 #define DRIVER_DESC "PXA 27x USB Device Controller driver" 75 76 static const char driver_name[] = "pxa27x_udc"; 77 static struct pxa_udc *the_controller; 78 79 static void handle_ep(struct pxa_ep *ep); 80 81 /* 82 * Debug filesystem 83 */ 84 #ifdef CONFIG_USB_GADGET_DEBUG_FS 85 86 #include <linux/debugfs.h> 87 #include <linux/uaccess.h> 88 #include <linux/seq_file.h> 89 90 static int state_dbg_show(struct seq_file *s, void *p) 91 { 92 struct pxa_udc *udc = s->private; 93 u32 tmp; 94 95 if (!udc->driver) 96 return -ENODEV; 97 98 /* basic device status */ 99 seq_printf(s, DRIVER_DESC "\n" 100 "%s version: %s\n" 101 "Gadget driver: %s\n", 102 driver_name, DRIVER_VERSION, 103 udc->driver ? udc->driver->driver.name : "(none)"); 104 105 tmp = udc_readl(udc, UDCCR); 106 seq_printf(s, 107 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n", 108 tmp, 109 (tmp & UDCCR_OEN) ? " oen":"", 110 (tmp & UDCCR_AALTHNP) ? " aalthnp":"", 111 (tmp & UDCCR_AHNP) ? " rem" : "", 112 (tmp & UDCCR_BHNP) ? " rstir" : "", 113 (tmp & UDCCR_DWRE) ? " dwre" : "", 114 (tmp & UDCCR_SMAC) ? " smac" : "", 115 (tmp & UDCCR_EMCE) ? " emce" : "", 116 (tmp & UDCCR_UDR) ? " udr" : "", 117 (tmp & UDCCR_UDA) ? " uda" : "", 118 (tmp & UDCCR_UDE) ? " ude" : "", 119 (tmp & UDCCR_ACN) >> UDCCR_ACN_S, 120 (tmp & UDCCR_AIN) >> UDCCR_AIN_S, 121 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S); 122 /* registers for device and ep0 */ 123 seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n", 124 udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1)); 125 seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n", 126 udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1)); 127 seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR)); 128 seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n", 129 udc->stats.irqs_reset, udc->stats.irqs_suspend, 130 udc->stats.irqs_resume, udc->stats.irqs_reconfig); 131 132 return 0; 133 } 134 135 static int queues_dbg_show(struct seq_file *s, void *p) 136 { 137 struct pxa_udc *udc = s->private; 138 struct pxa_ep *ep; 139 struct pxa27x_request *req; 140 int i, maxpkt; 141 142 if (!udc->driver) 143 return -ENODEV; 144 145 /* dump endpoint queues */ 146 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 147 ep = &udc->pxa_ep[i]; 148 maxpkt = ep->fifo_size; 149 seq_printf(s, "%-12s max_pkt=%d %s\n", 150 EPNAME(ep), maxpkt, "pio"); 151 152 if (list_empty(&ep->queue)) { 153 seq_puts(s, "\t(nothing queued)\n"); 154 continue; 155 } 156 157 list_for_each_entry(req, &ep->queue, queue) { 158 seq_printf(s, "\treq %p len %d/%d buf %p\n", 159 &req->req, req->req.actual, 160 req->req.length, req->req.buf); 161 } 162 } 163 164 return 0; 165 } 166 167 static int eps_dbg_show(struct seq_file *s, void *p) 168 { 169 struct pxa_udc *udc = s->private; 170 struct pxa_ep *ep; 171 int i; 172 u32 tmp; 173 174 if (!udc->driver) 175 return -ENODEV; 176 177 ep = &udc->pxa_ep[0]; 178 tmp = udc_ep_readl(ep, UDCCSR); 179 seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", 180 tmp, 181 (tmp & UDCCSR0_SA) ? " sa" : "", 182 (tmp & UDCCSR0_RNE) ? " rne" : "", 183 (tmp & UDCCSR0_FST) ? " fst" : "", 184 (tmp & UDCCSR0_SST) ? " sst" : "", 185 (tmp & UDCCSR0_DME) ? " dme" : "", 186 (tmp & UDCCSR0_IPR) ? " ipr" : "", 187 (tmp & UDCCSR0_OPC) ? " opc" : ""); 188 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 189 ep = &udc->pxa_ep[i]; 190 tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR); 191 seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n", 192 EPNAME(ep), 193 ep->stats.in_bytes, ep->stats.in_ops, 194 ep->stats.out_bytes, ep->stats.out_ops, 195 ep->stats.irqs, 196 tmp, udc_ep_readl(ep, UDCCSR), 197 udc_ep_readl(ep, UDCBCR)); 198 } 199 200 return 0; 201 } 202 203 static int eps_dbg_open(struct inode *inode, struct file *file) 204 { 205 return single_open(file, eps_dbg_show, inode->i_private); 206 } 207 208 static int queues_dbg_open(struct inode *inode, struct file *file) 209 { 210 return single_open(file, queues_dbg_show, inode->i_private); 211 } 212 213 static int state_dbg_open(struct inode *inode, struct file *file) 214 { 215 return single_open(file, state_dbg_show, inode->i_private); 216 } 217 218 static const struct file_operations state_dbg_fops = { 219 .owner = THIS_MODULE, 220 .open = state_dbg_open, 221 .llseek = seq_lseek, 222 .read = seq_read, 223 .release = single_release, 224 }; 225 226 static const struct file_operations queues_dbg_fops = { 227 .owner = THIS_MODULE, 228 .open = queues_dbg_open, 229 .llseek = seq_lseek, 230 .read = seq_read, 231 .release = single_release, 232 }; 233 234 static const struct file_operations eps_dbg_fops = { 235 .owner = THIS_MODULE, 236 .open = eps_dbg_open, 237 .llseek = seq_lseek, 238 .read = seq_read, 239 .release = single_release, 240 }; 241 242 static void pxa_init_debugfs(struct pxa_udc *udc) 243 { 244 struct dentry *root, *state, *queues, *eps; 245 246 root = debugfs_create_dir(udc->gadget.name, NULL); 247 if (IS_ERR(root) || !root) 248 goto err_root; 249 250 state = debugfs_create_file("udcstate", 0400, root, udc, 251 &state_dbg_fops); 252 if (!state) 253 goto err_state; 254 queues = debugfs_create_file("queues", 0400, root, udc, 255 &queues_dbg_fops); 256 if (!queues) 257 goto err_queues; 258 eps = debugfs_create_file("epstate", 0400, root, udc, 259 &eps_dbg_fops); 260 if (!eps) 261 goto err_eps; 262 263 udc->debugfs_root = root; 264 udc->debugfs_state = state; 265 udc->debugfs_queues = queues; 266 udc->debugfs_eps = eps; 267 return; 268 err_eps: 269 debugfs_remove(eps); 270 err_queues: 271 debugfs_remove(queues); 272 err_state: 273 debugfs_remove(root); 274 err_root: 275 dev_err(udc->dev, "debugfs is not available\n"); 276 } 277 278 static void pxa_cleanup_debugfs(struct pxa_udc *udc) 279 { 280 debugfs_remove(udc->debugfs_eps); 281 debugfs_remove(udc->debugfs_queues); 282 debugfs_remove(udc->debugfs_state); 283 debugfs_remove(udc->debugfs_root); 284 udc->debugfs_eps = NULL; 285 udc->debugfs_queues = NULL; 286 udc->debugfs_state = NULL; 287 udc->debugfs_root = NULL; 288 } 289 290 #else 291 static inline void pxa_init_debugfs(struct pxa_udc *udc) 292 { 293 } 294 295 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc) 296 { 297 } 298 #endif 299 300 /** 301 * is_match_usb_pxa - check if usb_ep and pxa_ep match 302 * @udc_usb_ep: usb endpoint 303 * @ep: pxa endpoint 304 * @config: configuration required in pxa_ep 305 * @interface: interface required in pxa_ep 306 * @altsetting: altsetting required in pxa_ep 307 * 308 * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise 309 */ 310 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep, 311 int config, int interface, int altsetting) 312 { 313 if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr) 314 return 0; 315 if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in) 316 return 0; 317 if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type) 318 return 0; 319 if ((ep->config != config) || (ep->interface != interface) 320 || (ep->alternate != altsetting)) 321 return 0; 322 return 1; 323 } 324 325 /** 326 * find_pxa_ep - find pxa_ep structure matching udc_usb_ep 327 * @udc: pxa udc 328 * @udc_usb_ep: udc_usb_ep structure 329 * 330 * Match udc_usb_ep and all pxa_ep available, to see if one matches. 331 * This is necessary because of the strong pxa hardware restriction requiring 332 * that once pxa endpoints are initialized, their configuration is freezed, and 333 * no change can be made to their address, direction, or in which configuration, 334 * interface or altsetting they are active ... which differs from more usual 335 * models which have endpoints be roughly just addressable fifos, and leave 336 * configuration events up to gadget drivers (like all control messages). 337 * 338 * Note that there is still a blurred point here : 339 * - we rely on UDCCR register "active interface" and "active altsetting". 340 * This is a nonsense in regard of USB spec, where multiple interfaces are 341 * active at the same time. 342 * - if we knew for sure that the pxa can handle multiple interface at the 343 * same time, assuming Intel's Developer Guide is wrong, this function 344 * should be reviewed, and a cache of couples (iface, altsetting) should 345 * be kept in the pxa_udc structure. In this case this function would match 346 * against the cache of couples instead of the "last altsetting" set up. 347 * 348 * Returns the matched pxa_ep structure or NULL if none found 349 */ 350 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc, 351 struct udc_usb_ep *udc_usb_ep) 352 { 353 int i; 354 struct pxa_ep *ep; 355 int cfg = udc->config; 356 int iface = udc->last_interface; 357 int alt = udc->last_alternate; 358 359 if (udc_usb_ep == &udc->udc_usb_ep[0]) 360 return &udc->pxa_ep[0]; 361 362 for (i = 1; i < NR_PXA_ENDPOINTS; i++) { 363 ep = &udc->pxa_ep[i]; 364 if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt)) 365 return ep; 366 } 367 return NULL; 368 } 369 370 /** 371 * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep 372 * @udc: pxa udc 373 * 374 * Context: in_interrupt() 375 * 376 * Updates all pxa_ep fields in udc_usb_ep structures, if this field was 377 * previously set up (and is not NULL). The update is necessary is a 378 * configuration change or altsetting change was issued by the USB host. 379 */ 380 static void update_pxa_ep_matches(struct pxa_udc *udc) 381 { 382 int i; 383 struct udc_usb_ep *udc_usb_ep; 384 385 for (i = 1; i < NR_USB_ENDPOINTS; i++) { 386 udc_usb_ep = &udc->udc_usb_ep[i]; 387 if (udc_usb_ep->pxa_ep) 388 udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep); 389 } 390 } 391 392 /** 393 * pio_irq_enable - Enables irq generation for one endpoint 394 * @ep: udc endpoint 395 */ 396 static void pio_irq_enable(struct pxa_ep *ep) 397 { 398 struct pxa_udc *udc = ep->dev; 399 int index = EPIDX(ep); 400 u32 udcicr0 = udc_readl(udc, UDCICR0); 401 u32 udcicr1 = udc_readl(udc, UDCICR1); 402 403 if (index < 16) 404 udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2))); 405 else 406 udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2))); 407 } 408 409 /** 410 * pio_irq_disable - Disables irq generation for one endpoint 411 * @ep: udc endpoint 412 */ 413 static void pio_irq_disable(struct pxa_ep *ep) 414 { 415 struct pxa_udc *udc = ep->dev; 416 int index = EPIDX(ep); 417 u32 udcicr0 = udc_readl(udc, UDCICR0); 418 u32 udcicr1 = udc_readl(udc, UDCICR1); 419 420 if (index < 16) 421 udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2))); 422 else 423 udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2))); 424 } 425 426 /** 427 * udc_set_mask_UDCCR - set bits in UDCCR 428 * @udc: udc device 429 * @mask: bits to set in UDCCR 430 * 431 * Sets bits in UDCCR, leaving DME and FST bits as they were. 432 */ 433 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask) 434 { 435 u32 udccr = udc_readl(udc, UDCCR); 436 udc_writel(udc, UDCCR, 437 (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS)); 438 } 439 440 /** 441 * udc_clear_mask_UDCCR - clears bits in UDCCR 442 * @udc: udc device 443 * @mask: bit to clear in UDCCR 444 * 445 * Clears bits in UDCCR, leaving DME and FST bits as they were. 446 */ 447 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask) 448 { 449 u32 udccr = udc_readl(udc, UDCCR); 450 udc_writel(udc, UDCCR, 451 (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS)); 452 } 453 454 /** 455 * ep_write_UDCCSR - set bits in UDCCSR 456 * @udc: udc device 457 * @mask: bits to set in UDCCR 458 * 459 * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*). 460 * 461 * A specific case is applied to ep0 : the ACM bit is always set to 1, for 462 * SET_INTERFACE and SET_CONFIGURATION. 463 */ 464 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask) 465 { 466 if (is_ep0(ep)) 467 mask |= UDCCSR0_ACM; 468 udc_ep_writel(ep, UDCCSR, mask); 469 } 470 471 /** 472 * ep_count_bytes_remain - get how many bytes in udc endpoint 473 * @ep: udc endpoint 474 * 475 * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP) 476 */ 477 static int ep_count_bytes_remain(struct pxa_ep *ep) 478 { 479 if (ep->dir_in) 480 return -EOPNOTSUPP; 481 return udc_ep_readl(ep, UDCBCR) & 0x3ff; 482 } 483 484 /** 485 * ep_is_empty - checks if ep has byte ready for reading 486 * @ep: udc endpoint 487 * 488 * If endpoint is the control endpoint, checks if there are bytes in the 489 * control endpoint fifo. If endpoint is a data endpoint, checks if bytes 490 * are ready for reading on OUT endpoint. 491 * 492 * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint 493 */ 494 static int ep_is_empty(struct pxa_ep *ep) 495 { 496 int ret; 497 498 if (!is_ep0(ep) && ep->dir_in) 499 return -EOPNOTSUPP; 500 if (is_ep0(ep)) 501 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE); 502 else 503 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE); 504 return ret; 505 } 506 507 /** 508 * ep_is_full - checks if ep has place to write bytes 509 * @ep: udc endpoint 510 * 511 * If endpoint is not the control endpoint and is an IN endpoint, checks if 512 * there is place to write bytes into the endpoint. 513 * 514 * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint 515 */ 516 static int ep_is_full(struct pxa_ep *ep) 517 { 518 if (is_ep0(ep)) 519 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR); 520 if (!ep->dir_in) 521 return -EOPNOTSUPP; 522 return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF)); 523 } 524 525 /** 526 * epout_has_pkt - checks if OUT endpoint fifo has a packet available 527 * @ep: pxa endpoint 528 * 529 * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep. 530 */ 531 static int epout_has_pkt(struct pxa_ep *ep) 532 { 533 if (!is_ep0(ep) && ep->dir_in) 534 return -EOPNOTSUPP; 535 if (is_ep0(ep)) 536 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC); 537 return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC); 538 } 539 540 /** 541 * set_ep0state - Set ep0 automata state 542 * @dev: udc device 543 * @state: state 544 */ 545 static void set_ep0state(struct pxa_udc *udc, int state) 546 { 547 struct pxa_ep *ep = &udc->pxa_ep[0]; 548 char *old_stname = EP0_STNAME(udc); 549 550 udc->ep0state = state; 551 ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname, 552 EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR), 553 udc_ep_readl(ep, UDCBCR)); 554 } 555 556 /** 557 * ep0_idle - Put control endpoint into idle state 558 * @dev: udc device 559 */ 560 static void ep0_idle(struct pxa_udc *dev) 561 { 562 set_ep0state(dev, WAIT_FOR_SETUP); 563 } 564 565 /** 566 * inc_ep_stats_reqs - Update ep stats counts 567 * @ep: physical endpoint 568 * @req: usb request 569 * @is_in: ep direction (USB_DIR_IN or 0) 570 * 571 */ 572 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in) 573 { 574 if (is_in) 575 ep->stats.in_ops++; 576 else 577 ep->stats.out_ops++; 578 } 579 580 /** 581 * inc_ep_stats_bytes - Update ep stats counts 582 * @ep: physical endpoint 583 * @count: bytes transferred on endpoint 584 * @is_in: ep direction (USB_DIR_IN or 0) 585 */ 586 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in) 587 { 588 if (is_in) 589 ep->stats.in_bytes += count; 590 else 591 ep->stats.out_bytes += count; 592 } 593 594 /** 595 * pxa_ep_setup - Sets up an usb physical endpoint 596 * @ep: pxa27x physical endpoint 597 * 598 * Find the physical pxa27x ep, and setup its UDCCR 599 */ 600 static void pxa_ep_setup(struct pxa_ep *ep) 601 { 602 u32 new_udccr; 603 604 new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN) 605 | ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN) 606 | ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN) 607 | ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN) 608 | ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET) 609 | ((ep->dir_in) ? UDCCONR_ED : 0) 610 | ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS) 611 | UDCCONR_EE; 612 613 udc_ep_writel(ep, UDCCR, new_udccr); 614 } 615 616 /** 617 * pxa_eps_setup - Sets up all usb physical endpoints 618 * @dev: udc device 619 * 620 * Setup all pxa physical endpoints, except ep0 621 */ 622 static void pxa_eps_setup(struct pxa_udc *dev) 623 { 624 unsigned int i; 625 626 dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev); 627 628 for (i = 1; i < NR_PXA_ENDPOINTS; i++) 629 pxa_ep_setup(&dev->pxa_ep[i]); 630 } 631 632 /** 633 * pxa_ep_alloc_request - Allocate usb request 634 * @_ep: usb endpoint 635 * @gfp_flags: 636 * 637 * For the pxa27x, these can just wrap kmalloc/kfree. gadget drivers 638 * must still pass correctly initialized endpoints, since other controller 639 * drivers may care about how it's currently set up (dma issues etc). 640 */ 641 static struct usb_request * 642 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 643 { 644 struct pxa27x_request *req; 645 646 req = kzalloc(sizeof *req, gfp_flags); 647 if (!req) 648 return NULL; 649 650 INIT_LIST_HEAD(&req->queue); 651 req->in_use = 0; 652 req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 653 654 return &req->req; 655 } 656 657 /** 658 * pxa_ep_free_request - Free usb request 659 * @_ep: usb endpoint 660 * @_req: usb request 661 * 662 * Wrapper around kfree to free _req 663 */ 664 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) 665 { 666 struct pxa27x_request *req; 667 668 req = container_of(_req, struct pxa27x_request, req); 669 WARN_ON(!list_empty(&req->queue)); 670 kfree(req); 671 } 672 673 /** 674 * ep_add_request - add a request to the endpoint's queue 675 * @ep: usb endpoint 676 * @req: usb request 677 * 678 * Context: ep->lock held 679 * 680 * Queues the request in the endpoint's queue, and enables the interrupts 681 * on the endpoint. 682 */ 683 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req) 684 { 685 if (unlikely(!req)) 686 return; 687 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, 688 req->req.length, udc_ep_readl(ep, UDCCSR)); 689 690 req->in_use = 1; 691 list_add_tail(&req->queue, &ep->queue); 692 pio_irq_enable(ep); 693 } 694 695 /** 696 * ep_del_request - removes a request from the endpoint's queue 697 * @ep: usb endpoint 698 * @req: usb request 699 * 700 * Context: ep->lock held 701 * 702 * Unqueue the request from the endpoint's queue. If there are no more requests 703 * on the endpoint, and if it's not the control endpoint, interrupts are 704 * disabled on the endpoint. 705 */ 706 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) 707 { 708 if (unlikely(!req)) 709 return; 710 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, 711 req->req.length, udc_ep_readl(ep, UDCCSR)); 712 713 list_del_init(&req->queue); 714 req->in_use = 0; 715 if (!is_ep0(ep) && list_empty(&ep->queue)) 716 pio_irq_disable(ep); 717 } 718 719 /** 720 * req_done - Complete an usb request 721 * @ep: pxa physical endpoint 722 * @req: pxa request 723 * @status: usb request status sent to gadget API 724 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 725 * 726 * Context: ep->lock held if flags not NULL, else ep->lock released 727 * 728 * Retire a pxa27x usb request. Endpoint must be locked. 729 */ 730 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, 731 unsigned long *pflags) 732 { 733 unsigned long flags; 734 735 ep_del_request(ep, req); 736 if (likely(req->req.status == -EINPROGRESS)) 737 req->req.status = status; 738 else 739 status = req->req.status; 740 741 if (status && status != -ESHUTDOWN) 742 ep_dbg(ep, "complete req %p stat %d len %u/%u\n", 743 &req->req, status, 744 req->req.actual, req->req.length); 745 746 if (pflags) 747 spin_unlock_irqrestore(&ep->lock, *pflags); 748 local_irq_save(flags); 749 usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req); 750 local_irq_restore(flags); 751 if (pflags) 752 spin_lock_irqsave(&ep->lock, *pflags); 753 } 754 755 /** 756 * ep_end_out_req - Ends endpoint OUT request 757 * @ep: physical endpoint 758 * @req: pxa request 759 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 760 * 761 * Context: ep->lock held or released (see req_done()) 762 * 763 * Ends endpoint OUT request (completes usb request). 764 */ 765 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, 766 unsigned long *pflags) 767 { 768 inc_ep_stats_reqs(ep, !USB_DIR_IN); 769 req_done(ep, req, 0, pflags); 770 } 771 772 /** 773 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) 774 * @ep: physical endpoint 775 * @req: pxa request 776 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 777 * 778 * Context: ep->lock held or released (see req_done()) 779 * 780 * Ends control endpoint OUT request (completes usb request), and puts 781 * control endpoint into idle state 782 */ 783 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, 784 unsigned long *pflags) 785 { 786 set_ep0state(ep->dev, OUT_STATUS_STAGE); 787 ep_end_out_req(ep, req, pflags); 788 ep0_idle(ep->dev); 789 } 790 791 /** 792 * ep_end_in_req - Ends endpoint IN request 793 * @ep: physical endpoint 794 * @req: pxa request 795 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 796 * 797 * Context: ep->lock held or released (see req_done()) 798 * 799 * Ends endpoint IN request (completes usb request). 800 */ 801 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, 802 unsigned long *pflags) 803 { 804 inc_ep_stats_reqs(ep, USB_DIR_IN); 805 req_done(ep, req, 0, pflags); 806 } 807 808 /** 809 * ep0_end_in_req - Ends control endpoint IN request (ends data stage) 810 * @ep: physical endpoint 811 * @req: pxa request 812 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 813 * 814 * Context: ep->lock held or released (see req_done()) 815 * 816 * Ends control endpoint IN request (completes usb request), and puts 817 * control endpoint into status state 818 */ 819 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, 820 unsigned long *pflags) 821 { 822 set_ep0state(ep->dev, IN_STATUS_STAGE); 823 ep_end_in_req(ep, req, pflags); 824 } 825 826 /** 827 * nuke - Dequeue all requests 828 * @ep: pxa endpoint 829 * @status: usb request status 830 * 831 * Context: ep->lock released 832 * 833 * Dequeues all requests on an endpoint. As a side effect, interrupts will be 834 * disabled on that endpoint (because no more requests). 835 */ 836 static void nuke(struct pxa_ep *ep, int status) 837 { 838 struct pxa27x_request *req; 839 unsigned long flags; 840 841 spin_lock_irqsave(&ep->lock, flags); 842 while (!list_empty(&ep->queue)) { 843 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 844 req_done(ep, req, status, &flags); 845 } 846 spin_unlock_irqrestore(&ep->lock, flags); 847 } 848 849 /** 850 * read_packet - transfer 1 packet from an OUT endpoint into request 851 * @ep: pxa physical endpoint 852 * @req: usb request 853 * 854 * Takes bytes from OUT endpoint and transfers them info the usb request. 855 * If there is less space in request than bytes received in OUT endpoint, 856 * bytes are left in the OUT endpoint. 857 * 858 * Returns how many bytes were actually transferred 859 */ 860 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req) 861 { 862 u32 *buf; 863 int bytes_ep, bufferspace, count, i; 864 865 bytes_ep = ep_count_bytes_remain(ep); 866 bufferspace = req->req.length - req->req.actual; 867 868 buf = (u32 *)(req->req.buf + req->req.actual); 869 prefetchw(buf); 870 871 if (likely(!ep_is_empty(ep))) 872 count = min(bytes_ep, bufferspace); 873 else /* zlp */ 874 count = 0; 875 876 for (i = count; i > 0; i -= 4) 877 *buf++ = udc_ep_readl(ep, UDCDR); 878 req->req.actual += count; 879 880 ep_write_UDCCSR(ep, UDCCSR_PC); 881 882 return count; 883 } 884 885 /** 886 * write_packet - transfer 1 packet from request into an IN endpoint 887 * @ep: pxa physical endpoint 888 * @req: usb request 889 * @max: max bytes that fit into endpoint 890 * 891 * Takes bytes from usb request, and transfers them into the physical 892 * endpoint. If there are no bytes to transfer, doesn't write anything 893 * to physical endpoint. 894 * 895 * Returns how many bytes were actually transferred. 896 */ 897 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req, 898 unsigned int max) 899 { 900 int length, count, remain, i; 901 u32 *buf; 902 u8 *buf_8; 903 904 buf = (u32 *)(req->req.buf + req->req.actual); 905 prefetch(buf); 906 907 length = min(req->req.length - req->req.actual, max); 908 req->req.actual += length; 909 910 remain = length & 0x3; 911 count = length & ~(0x3); 912 for (i = count; i > 0 ; i -= 4) 913 udc_ep_writel(ep, UDCDR, *buf++); 914 915 buf_8 = (u8 *)buf; 916 for (i = remain; i > 0; i--) 917 udc_ep_writeb(ep, UDCDR, *buf_8++); 918 919 ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain, 920 udc_ep_readl(ep, UDCCSR)); 921 922 return length; 923 } 924 925 /** 926 * read_fifo - Transfer packets from OUT endpoint into usb request 927 * @ep: pxa physical endpoint 928 * @req: usb request 929 * 930 * Context: callable when in_interrupt() 931 * 932 * Unload as many packets as possible from the fifo we use for usb OUT 933 * transfers and put them into the request. Caller should have made sure 934 * there's at least one packet ready. 935 * Doesn't complete the request, that's the caller's job 936 * 937 * Returns 1 if the request completed, 0 otherwise 938 */ 939 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 940 { 941 int count, is_short, completed = 0; 942 943 while (epout_has_pkt(ep)) { 944 count = read_packet(ep, req); 945 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); 946 947 is_short = (count < ep->fifo_size); 948 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", 949 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", 950 &req->req, req->req.actual, req->req.length); 951 952 /* completion */ 953 if (is_short || req->req.actual == req->req.length) { 954 completed = 1; 955 break; 956 } 957 /* finished that packet. the next one may be waiting... */ 958 } 959 return completed; 960 } 961 962 /** 963 * write_fifo - transfer packets from usb request into an IN endpoint 964 * @ep: pxa physical endpoint 965 * @req: pxa usb request 966 * 967 * Write to an IN endpoint fifo, as many packets as possible. 968 * irqs will use this to write the rest later. 969 * caller guarantees at least one packet buffer is ready (or a zlp). 970 * Doesn't complete the request, that's the caller's job 971 * 972 * Returns 1 if request fully transferred, 0 if partial transfer 973 */ 974 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 975 { 976 unsigned max; 977 int count, is_short, is_last = 0, completed = 0, totcount = 0; 978 u32 udccsr; 979 980 max = ep->fifo_size; 981 do { 982 udccsr = udc_ep_readl(ep, UDCCSR); 983 if (udccsr & UDCCSR_PC) { 984 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n", 985 udccsr); 986 ep_write_UDCCSR(ep, UDCCSR_PC); 987 } 988 if (udccsr & UDCCSR_TRN) { 989 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n", 990 udccsr); 991 ep_write_UDCCSR(ep, UDCCSR_TRN); 992 } 993 994 count = write_packet(ep, req, max); 995 inc_ep_stats_bytes(ep, count, USB_DIR_IN); 996 totcount += count; 997 998 /* last packet is usually short (or a zlp) */ 999 if (unlikely(count < max)) { 1000 is_last = 1; 1001 is_short = 1; 1002 } else { 1003 if (likely(req->req.length > req->req.actual) 1004 || req->req.zero) 1005 is_last = 0; 1006 else 1007 is_last = 1; 1008 /* interrupt/iso maxpacket may not fill the fifo */ 1009 is_short = unlikely(max < ep->fifo_size); 1010 } 1011 1012 if (is_short) 1013 ep_write_UDCCSR(ep, UDCCSR_SP); 1014 1015 /* requests complete when all IN data is in the FIFO */ 1016 if (is_last) { 1017 completed = 1; 1018 break; 1019 } 1020 } while (!ep_is_full(ep)); 1021 1022 ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n", 1023 totcount, is_last ? "/L" : "", is_short ? "/S" : "", 1024 req->req.length - req->req.actual, &req->req); 1025 1026 return completed; 1027 } 1028 1029 /** 1030 * read_ep0_fifo - Transfer packets from control endpoint into usb request 1031 * @ep: control endpoint 1032 * @req: pxa usb request 1033 * 1034 * Special ep0 version of the above read_fifo. Reads as many bytes from control 1035 * endpoint as can be read, and stores them into usb request (limited by request 1036 * maximum length). 1037 * 1038 * Returns 0 if usb request only partially filled, 1 if fully filled 1039 */ 1040 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 1041 { 1042 int count, is_short, completed = 0; 1043 1044 while (epout_has_pkt(ep)) { 1045 count = read_packet(ep, req); 1046 ep_write_UDCCSR(ep, UDCCSR0_OPC); 1047 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); 1048 1049 is_short = (count < ep->fifo_size); 1050 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", 1051 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", 1052 &req->req, req->req.actual, req->req.length); 1053 1054 if (is_short || req->req.actual >= req->req.length) { 1055 completed = 1; 1056 break; 1057 } 1058 } 1059 1060 return completed; 1061 } 1062 1063 /** 1064 * write_ep0_fifo - Send a request to control endpoint (ep0 in) 1065 * @ep: control endpoint 1066 * @req: request 1067 * 1068 * Context: callable when in_interrupt() 1069 * 1070 * Sends a request (or a part of the request) to the control endpoint (ep0 in). 1071 * If the request doesn't fit, the remaining part will be sent from irq. 1072 * The request is considered fully written only if either : 1073 * - last write transferred all remaining bytes, but fifo was not fully filled 1074 * - last write was a 0 length write 1075 * 1076 * Returns 1 if request fully written, 0 if request only partially sent 1077 */ 1078 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 1079 { 1080 unsigned count; 1081 int is_last, is_short; 1082 1083 count = write_packet(ep, req, EP0_FIFO_SIZE); 1084 inc_ep_stats_bytes(ep, count, USB_DIR_IN); 1085 1086 is_short = (count < EP0_FIFO_SIZE); 1087 is_last = ((count == 0) || (count < EP0_FIFO_SIZE)); 1088 1089 /* Sends either a short packet or a 0 length packet */ 1090 if (unlikely(is_short)) 1091 ep_write_UDCCSR(ep, UDCCSR0_IPR); 1092 1093 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n", 1094 count, is_short ? "/S" : "", is_last ? "/L" : "", 1095 req->req.length - req->req.actual, 1096 &req->req, udc_ep_readl(ep, UDCCSR)); 1097 1098 return is_last; 1099 } 1100 1101 /** 1102 * pxa_ep_queue - Queue a request into an IN endpoint 1103 * @_ep: usb endpoint 1104 * @_req: usb request 1105 * @gfp_flags: flags 1106 * 1107 * Context: normally called when !in_interrupt, but callable when in_interrupt() 1108 * in the special case of ep0 setup : 1109 * (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue) 1110 * 1111 * Returns 0 if succedeed, error otherwise 1112 */ 1113 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, 1114 gfp_t gfp_flags) 1115 { 1116 struct udc_usb_ep *udc_usb_ep; 1117 struct pxa_ep *ep; 1118 struct pxa27x_request *req; 1119 struct pxa_udc *dev; 1120 unsigned long flags; 1121 int rc = 0; 1122 int is_first_req; 1123 unsigned length; 1124 int recursion_detected; 1125 1126 req = container_of(_req, struct pxa27x_request, req); 1127 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1128 1129 if (unlikely(!_req || !_req->complete || !_req->buf)) 1130 return -EINVAL; 1131 1132 if (unlikely(!_ep)) 1133 return -EINVAL; 1134 1135 ep = udc_usb_ep->pxa_ep; 1136 if (unlikely(!ep)) 1137 return -EINVAL; 1138 1139 dev = ep->dev; 1140 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { 1141 ep_dbg(ep, "bogus device state\n"); 1142 return -ESHUTDOWN; 1143 } 1144 1145 /* iso is always one packet per request, that's the only way 1146 * we can report per-packet status. that also helps with dma. 1147 */ 1148 if (unlikely(EPXFERTYPE_is_ISO(ep) 1149 && req->req.length > ep->fifo_size)) 1150 return -EMSGSIZE; 1151 1152 spin_lock_irqsave(&ep->lock, flags); 1153 recursion_detected = ep->in_handle_ep; 1154 1155 is_first_req = list_empty(&ep->queue); 1156 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", 1157 _req, is_first_req ? "yes" : "no", 1158 _req->length, _req->buf); 1159 1160 if (!ep->enabled) { 1161 _req->status = -ESHUTDOWN; 1162 rc = -ESHUTDOWN; 1163 goto out_locked; 1164 } 1165 1166 if (req->in_use) { 1167 ep_err(ep, "refusing to queue req %p (already queued)\n", req); 1168 goto out_locked; 1169 } 1170 1171 length = _req->length; 1172 _req->status = -EINPROGRESS; 1173 _req->actual = 0; 1174 1175 ep_add_request(ep, req); 1176 spin_unlock_irqrestore(&ep->lock, flags); 1177 1178 if (is_ep0(ep)) { 1179 switch (dev->ep0state) { 1180 case WAIT_ACK_SET_CONF_INTERF: 1181 if (length == 0) { 1182 ep_end_in_req(ep, req, NULL); 1183 } else { 1184 ep_err(ep, "got a request of %d bytes while" 1185 "in state WAIT_ACK_SET_CONF_INTERF\n", 1186 length); 1187 ep_del_request(ep, req); 1188 rc = -EL2HLT; 1189 } 1190 ep0_idle(ep->dev); 1191 break; 1192 case IN_DATA_STAGE: 1193 if (!ep_is_full(ep)) 1194 if (write_ep0_fifo(ep, req)) 1195 ep0_end_in_req(ep, req, NULL); 1196 break; 1197 case OUT_DATA_STAGE: 1198 if ((length == 0) || !epout_has_pkt(ep)) 1199 if (read_ep0_fifo(ep, req)) 1200 ep0_end_out_req(ep, req, NULL); 1201 break; 1202 default: 1203 ep_err(ep, "odd state %s to send me a request\n", 1204 EP0_STNAME(ep->dev)); 1205 ep_del_request(ep, req); 1206 rc = -EL2HLT; 1207 break; 1208 } 1209 } else { 1210 if (!recursion_detected) 1211 handle_ep(ep); 1212 } 1213 1214 out: 1215 return rc; 1216 out_locked: 1217 spin_unlock_irqrestore(&ep->lock, flags); 1218 goto out; 1219 } 1220 1221 /** 1222 * pxa_ep_dequeue - Dequeue one request 1223 * @_ep: usb endpoint 1224 * @_req: usb request 1225 * 1226 * Return 0 if no error, -EINVAL or -ECONNRESET otherwise 1227 */ 1228 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1229 { 1230 struct pxa_ep *ep; 1231 struct udc_usb_ep *udc_usb_ep; 1232 struct pxa27x_request *req; 1233 unsigned long flags; 1234 int rc = -EINVAL; 1235 1236 if (!_ep) 1237 return rc; 1238 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1239 ep = udc_usb_ep->pxa_ep; 1240 if (!ep || is_ep0(ep)) 1241 return rc; 1242 1243 spin_lock_irqsave(&ep->lock, flags); 1244 1245 /* make sure it's actually queued on this endpoint */ 1246 list_for_each_entry(req, &ep->queue, queue) { 1247 if (&req->req == _req) { 1248 rc = 0; 1249 break; 1250 } 1251 } 1252 1253 spin_unlock_irqrestore(&ep->lock, flags); 1254 if (!rc) 1255 req_done(ep, req, -ECONNRESET, NULL); 1256 return rc; 1257 } 1258 1259 /** 1260 * pxa_ep_set_halt - Halts operations on one endpoint 1261 * @_ep: usb endpoint 1262 * @value: 1263 * 1264 * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise 1265 */ 1266 static int pxa_ep_set_halt(struct usb_ep *_ep, int value) 1267 { 1268 struct pxa_ep *ep; 1269 struct udc_usb_ep *udc_usb_ep; 1270 unsigned long flags; 1271 int rc; 1272 1273 1274 if (!_ep) 1275 return -EINVAL; 1276 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1277 ep = udc_usb_ep->pxa_ep; 1278 if (!ep || is_ep0(ep)) 1279 return -EINVAL; 1280 1281 if (value == 0) { 1282 /* 1283 * This path (reset toggle+halt) is needed to implement 1284 * SET_INTERFACE on normal hardware. but it can't be 1285 * done from software on the PXA UDC, and the hardware 1286 * forgets to do it as part of SET_INTERFACE automagic. 1287 */ 1288 ep_dbg(ep, "only host can clear halt\n"); 1289 return -EROFS; 1290 } 1291 1292 spin_lock_irqsave(&ep->lock, flags); 1293 1294 rc = -EAGAIN; 1295 if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue))) 1296 goto out; 1297 1298 /* FST, FEF bits are the same for control and non control endpoints */ 1299 rc = 0; 1300 ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF); 1301 if (is_ep0(ep)) 1302 set_ep0state(ep->dev, STALL); 1303 1304 out: 1305 spin_unlock_irqrestore(&ep->lock, flags); 1306 return rc; 1307 } 1308 1309 /** 1310 * pxa_ep_fifo_status - Get how many bytes in physical endpoint 1311 * @_ep: usb endpoint 1312 * 1313 * Returns number of bytes in OUT fifos. Broken for IN fifos. 1314 */ 1315 static int pxa_ep_fifo_status(struct usb_ep *_ep) 1316 { 1317 struct pxa_ep *ep; 1318 struct udc_usb_ep *udc_usb_ep; 1319 1320 if (!_ep) 1321 return -ENODEV; 1322 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1323 ep = udc_usb_ep->pxa_ep; 1324 if (!ep || is_ep0(ep)) 1325 return -ENODEV; 1326 1327 if (ep->dir_in) 1328 return -EOPNOTSUPP; 1329 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep)) 1330 return 0; 1331 else 1332 return ep_count_bytes_remain(ep) + 1; 1333 } 1334 1335 /** 1336 * pxa_ep_fifo_flush - Flushes one endpoint 1337 * @_ep: usb endpoint 1338 * 1339 * Discards all data in one endpoint(IN or OUT), except control endpoint. 1340 */ 1341 static void pxa_ep_fifo_flush(struct usb_ep *_ep) 1342 { 1343 struct pxa_ep *ep; 1344 struct udc_usb_ep *udc_usb_ep; 1345 unsigned long flags; 1346 1347 if (!_ep) 1348 return; 1349 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1350 ep = udc_usb_ep->pxa_ep; 1351 if (!ep || is_ep0(ep)) 1352 return; 1353 1354 spin_lock_irqsave(&ep->lock, flags); 1355 1356 if (unlikely(!list_empty(&ep->queue))) 1357 ep_dbg(ep, "called while queue list not empty\n"); 1358 ep_dbg(ep, "called\n"); 1359 1360 /* for OUT, just read and discard the FIFO contents. */ 1361 if (!ep->dir_in) { 1362 while (!ep_is_empty(ep)) 1363 udc_ep_readl(ep, UDCDR); 1364 } else { 1365 /* most IN status is the same, but ISO can't stall */ 1366 ep_write_UDCCSR(ep, 1367 UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN 1368 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST)); 1369 } 1370 1371 spin_unlock_irqrestore(&ep->lock, flags); 1372 } 1373 1374 /** 1375 * pxa_ep_enable - Enables usb endpoint 1376 * @_ep: usb endpoint 1377 * @desc: usb endpoint descriptor 1378 * 1379 * Nothing much to do here, as ep configuration is done once and for all 1380 * before udc is enabled. After udc enable, no physical endpoint configuration 1381 * can be changed. 1382 * Function makes sanity checks and flushes the endpoint. 1383 */ 1384 static int pxa_ep_enable(struct usb_ep *_ep, 1385 const struct usb_endpoint_descriptor *desc) 1386 { 1387 struct pxa_ep *ep; 1388 struct udc_usb_ep *udc_usb_ep; 1389 struct pxa_udc *udc; 1390 1391 if (!_ep || !desc) 1392 return -EINVAL; 1393 1394 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1395 if (udc_usb_ep->pxa_ep) { 1396 ep = udc_usb_ep->pxa_ep; 1397 ep_warn(ep, "usb_ep %s already enabled, doing nothing\n", 1398 _ep->name); 1399 } else { 1400 ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep); 1401 } 1402 1403 if (!ep || is_ep0(ep)) { 1404 dev_err(udc_usb_ep->dev->dev, 1405 "unable to match pxa_ep for ep %s\n", 1406 _ep->name); 1407 return -EINVAL; 1408 } 1409 1410 if ((desc->bDescriptorType != USB_DT_ENDPOINT) 1411 || (ep->type != usb_endpoint_type(desc))) { 1412 ep_err(ep, "type mismatch\n"); 1413 return -EINVAL; 1414 } 1415 1416 if (ep->fifo_size < usb_endpoint_maxp(desc)) { 1417 ep_err(ep, "bad maxpacket\n"); 1418 return -ERANGE; 1419 } 1420 1421 udc_usb_ep->pxa_ep = ep; 1422 udc = ep->dev; 1423 1424 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) { 1425 ep_err(ep, "bogus device state\n"); 1426 return -ESHUTDOWN; 1427 } 1428 1429 ep->enabled = 1; 1430 1431 /* flush fifo (mostly for OUT buffers) */ 1432 pxa_ep_fifo_flush(_ep); 1433 1434 ep_dbg(ep, "enabled\n"); 1435 return 0; 1436 } 1437 1438 /** 1439 * pxa_ep_disable - Disable usb endpoint 1440 * @_ep: usb endpoint 1441 * 1442 * Same as for pxa_ep_enable, no physical endpoint configuration can be 1443 * changed. 1444 * Function flushes the endpoint and related requests. 1445 */ 1446 static int pxa_ep_disable(struct usb_ep *_ep) 1447 { 1448 struct pxa_ep *ep; 1449 struct udc_usb_ep *udc_usb_ep; 1450 1451 if (!_ep) 1452 return -EINVAL; 1453 1454 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1455 ep = udc_usb_ep->pxa_ep; 1456 if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) 1457 return -EINVAL; 1458 1459 ep->enabled = 0; 1460 nuke(ep, -ESHUTDOWN); 1461 1462 pxa_ep_fifo_flush(_ep); 1463 udc_usb_ep->pxa_ep = NULL; 1464 1465 ep_dbg(ep, "disabled\n"); 1466 return 0; 1467 } 1468 1469 static const struct usb_ep_ops pxa_ep_ops = { 1470 .enable = pxa_ep_enable, 1471 .disable = pxa_ep_disable, 1472 1473 .alloc_request = pxa_ep_alloc_request, 1474 .free_request = pxa_ep_free_request, 1475 1476 .queue = pxa_ep_queue, 1477 .dequeue = pxa_ep_dequeue, 1478 1479 .set_halt = pxa_ep_set_halt, 1480 .fifo_status = pxa_ep_fifo_status, 1481 .fifo_flush = pxa_ep_fifo_flush, 1482 }; 1483 1484 /** 1485 * dplus_pullup - Connect or disconnect pullup resistor to D+ pin 1486 * @udc: udc device 1487 * @on: 0 if disconnect pullup resistor, 1 otherwise 1488 * Context: any 1489 * 1490 * Handle D+ pullup resistor, make the device visible to the usb bus, and 1491 * declare it as a full speed usb device 1492 */ 1493 static void dplus_pullup(struct pxa_udc *udc, int on) 1494 { 1495 if (udc->gpiod) { 1496 gpiod_set_value(udc->gpiod, on); 1497 } else if (udc->udc_command) { 1498 if (on) 1499 udc->udc_command(PXA2XX_UDC_CMD_CONNECT); 1500 else 1501 udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 1502 } 1503 udc->pullup_on = on; 1504 } 1505 1506 /** 1507 * pxa_udc_get_frame - Returns usb frame number 1508 * @_gadget: usb gadget 1509 */ 1510 static int pxa_udc_get_frame(struct usb_gadget *_gadget) 1511 { 1512 struct pxa_udc *udc = to_gadget_udc(_gadget); 1513 1514 return (udc_readl(udc, UDCFNR) & 0x7ff); 1515 } 1516 1517 /** 1518 * pxa_udc_wakeup - Force udc device out of suspend 1519 * @_gadget: usb gadget 1520 * 1521 * Returns 0 if successful, error code otherwise 1522 */ 1523 static int pxa_udc_wakeup(struct usb_gadget *_gadget) 1524 { 1525 struct pxa_udc *udc = to_gadget_udc(_gadget); 1526 1527 /* host may not have enabled remote wakeup */ 1528 if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0) 1529 return -EHOSTUNREACH; 1530 udc_set_mask_UDCCR(udc, UDCCR_UDR); 1531 return 0; 1532 } 1533 1534 static void udc_enable(struct pxa_udc *udc); 1535 static void udc_disable(struct pxa_udc *udc); 1536 1537 /** 1538 * should_enable_udc - Tells if UDC should be enabled 1539 * @udc: udc device 1540 * Context: any 1541 * 1542 * The UDC should be enabled if : 1543 1544 * - the pullup resistor is connected 1545 * - and a gadget driver is bound 1546 * - and vbus is sensed (or no vbus sense is available) 1547 * 1548 * Returns 1 if UDC should be enabled, 0 otherwise 1549 */ 1550 static int should_enable_udc(struct pxa_udc *udc) 1551 { 1552 int put_on; 1553 1554 put_on = ((udc->pullup_on) && (udc->driver)); 1555 put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver))); 1556 return put_on; 1557 } 1558 1559 /** 1560 * should_disable_udc - Tells if UDC should be disabled 1561 * @udc: udc device 1562 * Context: any 1563 * 1564 * The UDC should be disabled if : 1565 * - the pullup resistor is not connected 1566 * - or no gadget driver is bound 1567 * - or no vbus is sensed (when vbus sesing is available) 1568 * 1569 * Returns 1 if UDC should be disabled 1570 */ 1571 static int should_disable_udc(struct pxa_udc *udc) 1572 { 1573 int put_off; 1574 1575 put_off = ((!udc->pullup_on) || (!udc->driver)); 1576 put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver))); 1577 return put_off; 1578 } 1579 1580 /** 1581 * pxa_udc_pullup - Offer manual D+ pullup control 1582 * @_gadget: usb gadget using the control 1583 * @is_active: 0 if disconnect, else connect D+ pullup resistor 1584 * Context: !in_interrupt() 1585 * 1586 * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup 1587 */ 1588 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active) 1589 { 1590 struct pxa_udc *udc = to_gadget_udc(_gadget); 1591 1592 if (!udc->gpiod && !udc->udc_command) 1593 return -EOPNOTSUPP; 1594 1595 dplus_pullup(udc, is_active); 1596 1597 if (should_enable_udc(udc)) 1598 udc_enable(udc); 1599 if (should_disable_udc(udc)) 1600 udc_disable(udc); 1601 return 0; 1602 } 1603 1604 /** 1605 * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc 1606 * @_gadget: usb gadget 1607 * @is_active: 0 if should disable the udc, 1 if should enable 1608 * 1609 * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the 1610 * udc, and deactivates D+ pullup resistor. 1611 * 1612 * Returns 0 1613 */ 1614 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active) 1615 { 1616 struct pxa_udc *udc = to_gadget_udc(_gadget); 1617 1618 udc->vbus_sensed = is_active; 1619 if (should_enable_udc(udc)) 1620 udc_enable(udc); 1621 if (should_disable_udc(udc)) 1622 udc_disable(udc); 1623 1624 return 0; 1625 } 1626 1627 /** 1628 * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed 1629 * @_gadget: usb gadget 1630 * @mA: current drawn 1631 * 1632 * Context: !in_interrupt() 1633 * 1634 * Called after a configuration was chosen by a USB host, to inform how much 1635 * current can be drawn by the device from VBus line. 1636 * 1637 * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc 1638 */ 1639 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1640 { 1641 struct pxa_udc *udc; 1642 1643 udc = to_gadget_udc(_gadget); 1644 if (!IS_ERR_OR_NULL(udc->transceiver)) 1645 return usb_phy_set_power(udc->transceiver, mA); 1646 return -EOPNOTSUPP; 1647 } 1648 1649 /** 1650 * pxa_udc_phy_event - Called by phy upon VBus event 1651 * @nb: notifier block 1652 * @action: phy action, is vbus connect or disconnect 1653 * @data: the usb_gadget structure in pxa_udc 1654 * 1655 * Called by the USB Phy when a cable connect or disconnect is sensed. 1656 * 1657 * Returns 0 1658 */ 1659 static int pxa_udc_phy_event(struct notifier_block *nb, unsigned long action, 1660 void *data) 1661 { 1662 struct usb_gadget *gadget = data; 1663 1664 switch (action) { 1665 case USB_EVENT_VBUS: 1666 usb_gadget_vbus_connect(gadget); 1667 return NOTIFY_OK; 1668 case USB_EVENT_NONE: 1669 usb_gadget_vbus_disconnect(gadget); 1670 return NOTIFY_OK; 1671 default: 1672 return NOTIFY_DONE; 1673 } 1674 } 1675 1676 static struct notifier_block pxa27x_udc_phy = { 1677 .notifier_call = pxa_udc_phy_event, 1678 }; 1679 1680 static int pxa27x_udc_start(struct usb_gadget *g, 1681 struct usb_gadget_driver *driver); 1682 static int pxa27x_udc_stop(struct usb_gadget *g); 1683 1684 static const struct usb_gadget_ops pxa_udc_ops = { 1685 .get_frame = pxa_udc_get_frame, 1686 .wakeup = pxa_udc_wakeup, 1687 .pullup = pxa_udc_pullup, 1688 .vbus_session = pxa_udc_vbus_session, 1689 .vbus_draw = pxa_udc_vbus_draw, 1690 .udc_start = pxa27x_udc_start, 1691 .udc_stop = pxa27x_udc_stop, 1692 }; 1693 1694 /** 1695 * udc_disable - disable udc device controller 1696 * @udc: udc device 1697 * Context: any 1698 * 1699 * Disables the udc device : disables clocks, udc interrupts, control endpoint 1700 * interrupts. 1701 */ 1702 static void udc_disable(struct pxa_udc *udc) 1703 { 1704 if (!udc->enabled) 1705 return; 1706 1707 udc_writel(udc, UDCICR0, 0); 1708 udc_writel(udc, UDCICR1, 0); 1709 1710 udc_clear_mask_UDCCR(udc, UDCCR_UDE); 1711 1712 ep0_idle(udc); 1713 udc->gadget.speed = USB_SPEED_UNKNOWN; 1714 clk_disable(udc->clk); 1715 1716 udc->enabled = 0; 1717 } 1718 1719 /** 1720 * udc_init_data - Initialize udc device data structures 1721 * @dev: udc device 1722 * 1723 * Initializes gadget endpoint list, endpoints locks. No action is taken 1724 * on the hardware. 1725 */ 1726 static void udc_init_data(struct pxa_udc *dev) 1727 { 1728 int i; 1729 struct pxa_ep *ep; 1730 1731 /* device/ep0 records init */ 1732 INIT_LIST_HEAD(&dev->gadget.ep_list); 1733 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1734 dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0]; 1735 dev->gadget.quirk_altset_not_supp = 1; 1736 ep0_idle(dev); 1737 1738 /* PXA endpoints init */ 1739 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 1740 ep = &dev->pxa_ep[i]; 1741 1742 ep->enabled = is_ep0(ep); 1743 INIT_LIST_HEAD(&ep->queue); 1744 spin_lock_init(&ep->lock); 1745 } 1746 1747 /* USB endpoints init */ 1748 for (i = 1; i < NR_USB_ENDPOINTS; i++) { 1749 list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list, 1750 &dev->gadget.ep_list); 1751 usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep, 1752 dev->udc_usb_ep[i].usb_ep.maxpacket); 1753 } 1754 } 1755 1756 /** 1757 * udc_enable - Enables the udc device 1758 * @dev: udc device 1759 * 1760 * Enables the udc device : enables clocks, udc interrupts, control endpoint 1761 * interrupts, sets usb as UDC client and setups endpoints. 1762 */ 1763 static void udc_enable(struct pxa_udc *udc) 1764 { 1765 if (udc->enabled) 1766 return; 1767 1768 clk_enable(udc->clk); 1769 udc_writel(udc, UDCICR0, 0); 1770 udc_writel(udc, UDCICR1, 0); 1771 udc_clear_mask_UDCCR(udc, UDCCR_UDE); 1772 1773 ep0_idle(udc); 1774 udc->gadget.speed = USB_SPEED_FULL; 1775 memset(&udc->stats, 0, sizeof(udc->stats)); 1776 1777 pxa_eps_setup(udc); 1778 udc_set_mask_UDCCR(udc, UDCCR_UDE); 1779 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM); 1780 udelay(2); 1781 if (udc_readl(udc, UDCCR) & UDCCR_EMCE) 1782 dev_err(udc->dev, "Configuration errors, udc disabled\n"); 1783 1784 /* 1785 * Caller must be able to sleep in order to cope with startup transients 1786 */ 1787 msleep(100); 1788 1789 /* enable suspend/resume and reset irqs */ 1790 udc_writel(udc, UDCICR1, 1791 UDCICR1_IECC | UDCICR1_IERU 1792 | UDCICR1_IESU | UDCICR1_IERS); 1793 1794 /* enable ep0 irqs */ 1795 pio_irq_enable(&udc->pxa_ep[0]); 1796 1797 udc->enabled = 1; 1798 } 1799 1800 /** 1801 * pxa27x_start - Register gadget driver 1802 * @driver: gadget driver 1803 * @bind: bind function 1804 * 1805 * When a driver is successfully registered, it will receive control requests 1806 * including set_configuration(), which enables non-control requests. Then 1807 * usb traffic follows until a disconnect is reported. Then a host may connect 1808 * again, or the driver might get unbound. 1809 * 1810 * Note that the udc is not automatically enabled. Check function 1811 * should_enable_udc(). 1812 * 1813 * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise 1814 */ 1815 static int pxa27x_udc_start(struct usb_gadget *g, 1816 struct usb_gadget_driver *driver) 1817 { 1818 struct pxa_udc *udc = to_pxa(g); 1819 int retval; 1820 1821 /* first hook up the driver ... */ 1822 udc->driver = driver; 1823 1824 if (!IS_ERR_OR_NULL(udc->transceiver)) { 1825 retval = otg_set_peripheral(udc->transceiver->otg, 1826 &udc->gadget); 1827 if (retval) { 1828 dev_err(udc->dev, "can't bind to transceiver\n"); 1829 goto fail; 1830 } 1831 } 1832 1833 if (should_enable_udc(udc)) 1834 udc_enable(udc); 1835 return 0; 1836 1837 fail: 1838 udc->driver = NULL; 1839 return retval; 1840 } 1841 1842 /** 1843 * stop_activity - Stops udc endpoints 1844 * @udc: udc device 1845 * @driver: gadget driver 1846 * 1847 * Disables all udc endpoints (even control endpoint), report disconnect to 1848 * the gadget user. 1849 */ 1850 static void stop_activity(struct pxa_udc *udc) 1851 { 1852 int i; 1853 1854 udc->gadget.speed = USB_SPEED_UNKNOWN; 1855 1856 for (i = 0; i < NR_USB_ENDPOINTS; i++) 1857 pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep); 1858 } 1859 1860 /** 1861 * pxa27x_udc_stop - Unregister the gadget driver 1862 * @driver: gadget driver 1863 * 1864 * Returns 0 if no error, -ENODEV, -EINVAL otherwise 1865 */ 1866 static int pxa27x_udc_stop(struct usb_gadget *g) 1867 { 1868 struct pxa_udc *udc = to_pxa(g); 1869 1870 stop_activity(udc); 1871 udc_disable(udc); 1872 1873 udc->driver = NULL; 1874 1875 if (!IS_ERR_OR_NULL(udc->transceiver)) 1876 return otg_set_peripheral(udc->transceiver->otg, NULL); 1877 return 0; 1878 } 1879 1880 /** 1881 * handle_ep0_ctrl_req - handle control endpoint control request 1882 * @udc: udc device 1883 * @req: control request 1884 */ 1885 static void handle_ep0_ctrl_req(struct pxa_udc *udc, 1886 struct pxa27x_request *req) 1887 { 1888 struct pxa_ep *ep = &udc->pxa_ep[0]; 1889 union { 1890 struct usb_ctrlrequest r; 1891 u32 word[2]; 1892 } u; 1893 int i; 1894 int have_extrabytes = 0; 1895 unsigned long flags; 1896 1897 nuke(ep, -EPROTO); 1898 spin_lock_irqsave(&ep->lock, flags); 1899 1900 /* 1901 * In the PXA320 manual, in the section about Back-to-Back setup 1902 * packets, it describes this situation. The solution is to set OPC to 1903 * get rid of the status packet, and then continue with the setup 1904 * packet. Generalize to pxa27x CPUs. 1905 */ 1906 if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0)) 1907 ep_write_UDCCSR(ep, UDCCSR0_OPC); 1908 1909 /* read SETUP packet */ 1910 for (i = 0; i < 2; i++) { 1911 if (unlikely(ep_is_empty(ep))) 1912 goto stall; 1913 u.word[i] = udc_ep_readl(ep, UDCDR); 1914 } 1915 1916 have_extrabytes = !ep_is_empty(ep); 1917 while (!ep_is_empty(ep)) { 1918 i = udc_ep_readl(ep, UDCDR); 1919 ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i); 1920 } 1921 1922 ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1923 u.r.bRequestType, u.r.bRequest, 1924 le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex), 1925 le16_to_cpu(u.r.wLength)); 1926 if (unlikely(have_extrabytes)) 1927 goto stall; 1928 1929 if (u.r.bRequestType & USB_DIR_IN) 1930 set_ep0state(udc, IN_DATA_STAGE); 1931 else 1932 set_ep0state(udc, OUT_DATA_STAGE); 1933 1934 /* Tell UDC to enter Data Stage */ 1935 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); 1936 1937 spin_unlock_irqrestore(&ep->lock, flags); 1938 i = udc->driver->setup(&udc->gadget, &u.r); 1939 spin_lock_irqsave(&ep->lock, flags); 1940 if (i < 0) 1941 goto stall; 1942 out: 1943 spin_unlock_irqrestore(&ep->lock, flags); 1944 return; 1945 stall: 1946 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", 1947 udc_ep_readl(ep, UDCCSR), i); 1948 ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF); 1949 set_ep0state(udc, STALL); 1950 goto out; 1951 } 1952 1953 /** 1954 * handle_ep0 - Handle control endpoint data transfers 1955 * @udc: udc device 1956 * @fifo_irq: 1 if triggered by fifo service type irq 1957 * @opc_irq: 1 if triggered by output packet complete type irq 1958 * 1959 * Context : when in_interrupt() or with ep->lock held 1960 * 1961 * Tries to transfer all pending request data into the endpoint and/or 1962 * transfer all pending data in the endpoint into usb requests. 1963 * Handles states of ep0 automata. 1964 * 1965 * PXA27x hardware handles several standard usb control requests without 1966 * driver notification. The requests fully handled by hardware are : 1967 * SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE, 1968 * GET_STATUS 1969 * The requests handled by hardware, but with irq notification are : 1970 * SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE 1971 * The remaining standard requests really handled by handle_ep0 are : 1972 * GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests. 1973 * Requests standardized outside of USB 2.0 chapter 9 are handled more 1974 * uniformly, by gadget drivers. 1975 * 1976 * The control endpoint state machine is _not_ USB spec compliant, it's even 1977 * hardly compliant with Intel PXA270 developers guide. 1978 * The key points which inferred this state machine are : 1979 * - on every setup token, bit UDCCSR0_SA is raised and held until cleared by 1980 * software. 1981 * - on every OUT packet received, UDCCSR0_OPC is raised and held until 1982 * cleared by software. 1983 * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it 1984 * before reading ep0. 1985 * This is true only for PXA27x. This is not true anymore for PXA3xx family 1986 * (check Back-to-Back setup packet in developers guide). 1987 * - irq can be called on a "packet complete" event (opc_irq=1), while 1988 * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms 1989 * from experimentation). 1990 * - as UDCCSR0_SA can be activated while in irq handling, and clearing 1991 * UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC 1992 * => we never actually read the "status stage" packet of an IN data stage 1993 * => this is not documented in Intel documentation 1994 * - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA 1995 * STAGE. The driver add STATUS STAGE to send last zero length packet in 1996 * OUT_STATUS_STAGE. 1997 * - special attention was needed for IN_STATUS_STAGE. If a packet complete 1998 * event is detected, we terminate the status stage without ackowledging the 1999 * packet (not to risk to loose a potential SETUP packet) 2000 */ 2001 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) 2002 { 2003 u32 udccsr0; 2004 struct pxa_ep *ep = &udc->pxa_ep[0]; 2005 struct pxa27x_request *req = NULL; 2006 int completed = 0; 2007 2008 if (!list_empty(&ep->queue)) 2009 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 2010 2011 udccsr0 = udc_ep_readl(ep, UDCCSR); 2012 ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n", 2013 EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR), 2014 (fifo_irq << 1 | opc_irq)); 2015 2016 if (udccsr0 & UDCCSR0_SST) { 2017 ep_dbg(ep, "clearing stall status\n"); 2018 nuke(ep, -EPIPE); 2019 ep_write_UDCCSR(ep, UDCCSR0_SST); 2020 ep0_idle(udc); 2021 } 2022 2023 if (udccsr0 & UDCCSR0_SA) { 2024 nuke(ep, 0); 2025 set_ep0state(udc, SETUP_STAGE); 2026 } 2027 2028 switch (udc->ep0state) { 2029 case WAIT_FOR_SETUP: 2030 /* 2031 * Hardware bug : beware, we cannot clear OPC, since we would 2032 * miss a potential OPC irq for a setup packet. 2033 * So, we only do ... nothing, and hope for a next irq with 2034 * UDCCSR0_SA set. 2035 */ 2036 break; 2037 case SETUP_STAGE: 2038 udccsr0 &= UDCCSR0_CTRL_REQ_MASK; 2039 if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK)) 2040 handle_ep0_ctrl_req(udc, req); 2041 break; 2042 case IN_DATA_STAGE: /* GET_DESCRIPTOR */ 2043 if (epout_has_pkt(ep)) 2044 ep_write_UDCCSR(ep, UDCCSR0_OPC); 2045 if (req && !ep_is_full(ep)) 2046 completed = write_ep0_fifo(ep, req); 2047 if (completed) 2048 ep0_end_in_req(ep, req, NULL); 2049 break; 2050 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ 2051 if (epout_has_pkt(ep) && req) 2052 completed = read_ep0_fifo(ep, req); 2053 if (completed) 2054 ep0_end_out_req(ep, req, NULL); 2055 break; 2056 case STALL: 2057 ep_write_UDCCSR(ep, UDCCSR0_FST); 2058 break; 2059 case IN_STATUS_STAGE: 2060 /* 2061 * Hardware bug : beware, we cannot clear OPC, since we would 2062 * miss a potential PC irq for a setup packet. 2063 * So, we only put the ep0 into WAIT_FOR_SETUP state. 2064 */ 2065 if (opc_irq) 2066 ep0_idle(udc); 2067 break; 2068 case OUT_STATUS_STAGE: 2069 case WAIT_ACK_SET_CONF_INTERF: 2070 ep_warn(ep, "should never get in %s state here!!!\n", 2071 EP0_STNAME(ep->dev)); 2072 ep0_idle(udc); 2073 break; 2074 } 2075 } 2076 2077 /** 2078 * handle_ep - Handle endpoint data tranfers 2079 * @ep: pxa physical endpoint 2080 * 2081 * Tries to transfer all pending request data into the endpoint and/or 2082 * transfer all pending data in the endpoint into usb requests. 2083 * 2084 * Is always called when in_interrupt() and with ep->lock released. 2085 */ 2086 static void handle_ep(struct pxa_ep *ep) 2087 { 2088 struct pxa27x_request *req; 2089 int completed; 2090 u32 udccsr; 2091 int is_in = ep->dir_in; 2092 int loop = 0; 2093 unsigned long flags; 2094 2095 spin_lock_irqsave(&ep->lock, flags); 2096 if (ep->in_handle_ep) 2097 goto recursion_detected; 2098 ep->in_handle_ep = 1; 2099 2100 do { 2101 completed = 0; 2102 udccsr = udc_ep_readl(ep, UDCCSR); 2103 2104 if (likely(!list_empty(&ep->queue))) 2105 req = list_entry(ep->queue.next, 2106 struct pxa27x_request, queue); 2107 else 2108 req = NULL; 2109 2110 ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n", 2111 req, udccsr, loop++); 2112 2113 if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN))) 2114 udc_ep_writel(ep, UDCCSR, 2115 udccsr & (UDCCSR_SST | UDCCSR_TRN)); 2116 if (!req) 2117 break; 2118 2119 if (unlikely(is_in)) { 2120 if (likely(!ep_is_full(ep))) 2121 completed = write_fifo(ep, req); 2122 } else { 2123 if (likely(epout_has_pkt(ep))) 2124 completed = read_fifo(ep, req); 2125 } 2126 2127 if (completed) { 2128 if (is_in) 2129 ep_end_in_req(ep, req, &flags); 2130 else 2131 ep_end_out_req(ep, req, &flags); 2132 } 2133 } while (completed); 2134 2135 ep->in_handle_ep = 0; 2136 recursion_detected: 2137 spin_unlock_irqrestore(&ep->lock, flags); 2138 } 2139 2140 /** 2141 * pxa27x_change_configuration - Handle SET_CONF usb request notification 2142 * @udc: udc device 2143 * @config: usb configuration 2144 * 2145 * Post the request to upper level. 2146 * Don't use any pxa specific harware configuration capabilities 2147 */ 2148 static void pxa27x_change_configuration(struct pxa_udc *udc, int config) 2149 { 2150 struct usb_ctrlrequest req ; 2151 2152 dev_dbg(udc->dev, "config=%d\n", config); 2153 2154 udc->config = config; 2155 udc->last_interface = 0; 2156 udc->last_alternate = 0; 2157 2158 req.bRequestType = 0; 2159 req.bRequest = USB_REQ_SET_CONFIGURATION; 2160 req.wValue = config; 2161 req.wIndex = 0; 2162 req.wLength = 0; 2163 2164 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2165 udc->driver->setup(&udc->gadget, &req); 2166 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN); 2167 } 2168 2169 /** 2170 * pxa27x_change_interface - Handle SET_INTERF usb request notification 2171 * @udc: udc device 2172 * @iface: interface number 2173 * @alt: alternate setting number 2174 * 2175 * Post the request to upper level. 2176 * Don't use any pxa specific harware configuration capabilities 2177 */ 2178 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt) 2179 { 2180 struct usb_ctrlrequest req; 2181 2182 dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt); 2183 2184 udc->last_interface = iface; 2185 udc->last_alternate = alt; 2186 2187 req.bRequestType = USB_RECIP_INTERFACE; 2188 req.bRequest = USB_REQ_SET_INTERFACE; 2189 req.wValue = alt; 2190 req.wIndex = iface; 2191 req.wLength = 0; 2192 2193 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2194 udc->driver->setup(&udc->gadget, &req); 2195 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN); 2196 } 2197 2198 /* 2199 * irq_handle_data - Handle data transfer 2200 * @irq: irq IRQ number 2201 * @udc: dev pxa_udc device structure 2202 * 2203 * Called from irq handler, transferts data to or from endpoint to queue 2204 */ 2205 static void irq_handle_data(int irq, struct pxa_udc *udc) 2206 { 2207 int i; 2208 struct pxa_ep *ep; 2209 u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK; 2210 u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK; 2211 2212 if (udcisr0 & UDCISR_INT_MASK) { 2213 udc->pxa_ep[0].stats.irqs++; 2214 udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK)); 2215 handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR), 2216 !!(udcisr0 & UDCICR_PKTCOMPL)); 2217 } 2218 2219 udcisr0 >>= 2; 2220 for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) { 2221 if (!(udcisr0 & UDCISR_INT_MASK)) 2222 continue; 2223 2224 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); 2225 2226 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); 2227 if (i < ARRAY_SIZE(udc->pxa_ep)) { 2228 ep = &udc->pxa_ep[i]; 2229 ep->stats.irqs++; 2230 handle_ep(ep); 2231 } 2232 } 2233 2234 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { 2235 udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK)); 2236 if (!(udcisr1 & UDCISR_INT_MASK)) 2237 continue; 2238 2239 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); 2240 if (i < ARRAY_SIZE(udc->pxa_ep)) { 2241 ep = &udc->pxa_ep[i]; 2242 ep->stats.irqs++; 2243 handle_ep(ep); 2244 } 2245 } 2246 2247 } 2248 2249 /** 2250 * irq_udc_suspend - Handle IRQ "UDC Suspend" 2251 * @udc: udc device 2252 */ 2253 static void irq_udc_suspend(struct pxa_udc *udc) 2254 { 2255 udc_writel(udc, UDCISR1, UDCISR1_IRSU); 2256 udc->stats.irqs_suspend++; 2257 2258 if (udc->gadget.speed != USB_SPEED_UNKNOWN 2259 && udc->driver && udc->driver->suspend) 2260 udc->driver->suspend(&udc->gadget); 2261 ep0_idle(udc); 2262 } 2263 2264 /** 2265 * irq_udc_resume - Handle IRQ "UDC Resume" 2266 * @udc: udc device 2267 */ 2268 static void irq_udc_resume(struct pxa_udc *udc) 2269 { 2270 udc_writel(udc, UDCISR1, UDCISR1_IRRU); 2271 udc->stats.irqs_resume++; 2272 2273 if (udc->gadget.speed != USB_SPEED_UNKNOWN 2274 && udc->driver && udc->driver->resume) 2275 udc->driver->resume(&udc->gadget); 2276 } 2277 2278 /** 2279 * irq_udc_reconfig - Handle IRQ "UDC Change Configuration" 2280 * @udc: udc device 2281 */ 2282 static void irq_udc_reconfig(struct pxa_udc *udc) 2283 { 2284 unsigned config, interface, alternate, config_change; 2285 u32 udccr = udc_readl(udc, UDCCR); 2286 2287 udc_writel(udc, UDCISR1, UDCISR1_IRCC); 2288 udc->stats.irqs_reconfig++; 2289 2290 config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S; 2291 config_change = (config != udc->config); 2292 pxa27x_change_configuration(udc, config); 2293 2294 interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S; 2295 alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S; 2296 pxa27x_change_interface(udc, interface, alternate); 2297 2298 if (config_change) 2299 update_pxa_ep_matches(udc); 2300 udc_set_mask_UDCCR(udc, UDCCR_SMAC); 2301 } 2302 2303 /** 2304 * irq_udc_reset - Handle IRQ "UDC Reset" 2305 * @udc: udc device 2306 */ 2307 static void irq_udc_reset(struct pxa_udc *udc) 2308 { 2309 u32 udccr = udc_readl(udc, UDCCR); 2310 struct pxa_ep *ep = &udc->pxa_ep[0]; 2311 2312 dev_info(udc->dev, "USB reset\n"); 2313 udc_writel(udc, UDCISR1, UDCISR1_IRRS); 2314 udc->stats.irqs_reset++; 2315 2316 if ((udccr & UDCCR_UDA) == 0) { 2317 dev_dbg(udc->dev, "USB reset start\n"); 2318 stop_activity(udc); 2319 } 2320 udc->gadget.speed = USB_SPEED_FULL; 2321 memset(&udc->stats, 0, sizeof udc->stats); 2322 2323 nuke(ep, -EPROTO); 2324 ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC); 2325 ep0_idle(udc); 2326 } 2327 2328 /** 2329 * pxa_udc_irq - Main irq handler 2330 * @irq: irq number 2331 * @_dev: udc device 2332 * 2333 * Handles all udc interrupts 2334 */ 2335 static irqreturn_t pxa_udc_irq(int irq, void *_dev) 2336 { 2337 struct pxa_udc *udc = _dev; 2338 u32 udcisr0 = udc_readl(udc, UDCISR0); 2339 u32 udcisr1 = udc_readl(udc, UDCISR1); 2340 u32 udccr = udc_readl(udc, UDCCR); 2341 u32 udcisr1_spec; 2342 2343 dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, " 2344 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr); 2345 2346 udcisr1_spec = udcisr1 & 0xf8000000; 2347 if (unlikely(udcisr1_spec & UDCISR1_IRSU)) 2348 irq_udc_suspend(udc); 2349 if (unlikely(udcisr1_spec & UDCISR1_IRRU)) 2350 irq_udc_resume(udc); 2351 if (unlikely(udcisr1_spec & UDCISR1_IRCC)) 2352 irq_udc_reconfig(udc); 2353 if (unlikely(udcisr1_spec & UDCISR1_IRRS)) 2354 irq_udc_reset(udc); 2355 2356 if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK)) 2357 irq_handle_data(irq, udc); 2358 2359 return IRQ_HANDLED; 2360 } 2361 2362 static struct pxa_udc memory = { 2363 .gadget = { 2364 .ops = &pxa_udc_ops, 2365 .ep0 = &memory.udc_usb_ep[0].usb_ep, 2366 .name = driver_name, 2367 .dev = { 2368 .init_name = "gadget", 2369 }, 2370 }, 2371 2372 .udc_usb_ep = { 2373 USB_EP_CTRL, 2374 USB_EP_OUT_BULK(1), 2375 USB_EP_IN_BULK(2), 2376 USB_EP_IN_ISO(3), 2377 USB_EP_OUT_ISO(4), 2378 USB_EP_IN_INT(5), 2379 }, 2380 2381 .pxa_ep = { 2382 PXA_EP_CTRL, 2383 /* Endpoints for gadget zero */ 2384 PXA_EP_OUT_BULK(1, 1, 3, 0, 0), 2385 PXA_EP_IN_BULK(2, 2, 3, 0, 0), 2386 /* Endpoints for ether gadget, file storage gadget */ 2387 PXA_EP_OUT_BULK(3, 1, 1, 0, 0), 2388 PXA_EP_IN_BULK(4, 2, 1, 0, 0), 2389 PXA_EP_IN_ISO(5, 3, 1, 0, 0), 2390 PXA_EP_OUT_ISO(6, 4, 1, 0, 0), 2391 PXA_EP_IN_INT(7, 5, 1, 0, 0), 2392 /* Endpoints for RNDIS, serial */ 2393 PXA_EP_OUT_BULK(8, 1, 2, 0, 0), 2394 PXA_EP_IN_BULK(9, 2, 2, 0, 0), 2395 PXA_EP_IN_INT(10, 5, 2, 0, 0), 2396 /* 2397 * All the following endpoints are only for completion. They 2398 * won't never work, as multiple interfaces are really broken on 2399 * the pxa. 2400 */ 2401 PXA_EP_OUT_BULK(11, 1, 2, 1, 0), 2402 PXA_EP_IN_BULK(12, 2, 2, 1, 0), 2403 /* Endpoint for CDC Ether */ 2404 PXA_EP_OUT_BULK(13, 1, 1, 1, 1), 2405 PXA_EP_IN_BULK(14, 2, 1, 1, 1), 2406 } 2407 }; 2408 2409 #if defined(CONFIG_OF) 2410 static const struct of_device_id udc_pxa_dt_ids[] = { 2411 { .compatible = "marvell,pxa270-udc" }, 2412 {} 2413 }; 2414 MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids); 2415 #endif 2416 2417 /** 2418 * pxa_udc_probe - probes the udc device 2419 * @_dev: platform device 2420 * 2421 * Perform basic init : allocates udc clock, creates sysfs files, requests 2422 * irq. 2423 */ 2424 static int pxa_udc_probe(struct platform_device *pdev) 2425 { 2426 struct resource *regs; 2427 struct pxa_udc *udc = &memory; 2428 int retval = 0, gpio; 2429 struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev); 2430 unsigned long gpio_flags; 2431 2432 if (mach) { 2433 gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0; 2434 gpio = mach->gpio_pullup; 2435 if (gpio_is_valid(gpio)) { 2436 retval = devm_gpio_request_one(&pdev->dev, gpio, 2437 gpio_flags, 2438 "USB D+ pullup"); 2439 if (retval) 2440 return retval; 2441 udc->gpiod = gpio_to_desc(mach->gpio_pullup); 2442 } 2443 udc->udc_command = mach->udc_command; 2444 } else { 2445 udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS); 2446 } 2447 2448 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2449 udc->regs = devm_ioremap_resource(&pdev->dev, regs); 2450 if (IS_ERR(udc->regs)) 2451 return PTR_ERR(udc->regs); 2452 udc->irq = platform_get_irq(pdev, 0); 2453 if (udc->irq < 0) 2454 return udc->irq; 2455 2456 udc->dev = &pdev->dev; 2457 if (of_have_populated_dt()) { 2458 udc->transceiver = 2459 devm_usb_get_phy_by_phandle(udc->dev, "phys", 0); 2460 if (IS_ERR(udc->transceiver)) 2461 return PTR_ERR(udc->transceiver); 2462 } else { 2463 udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); 2464 } 2465 2466 if (IS_ERR(udc->gpiod)) { 2467 dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n", 2468 PTR_ERR(udc->gpiod)); 2469 return PTR_ERR(udc->gpiod); 2470 } 2471 if (udc->gpiod) 2472 gpiod_direction_output(udc->gpiod, 0); 2473 2474 udc->clk = devm_clk_get(&pdev->dev, NULL); 2475 if (IS_ERR(udc->clk)) 2476 return PTR_ERR(udc->clk); 2477 2478 retval = clk_prepare(udc->clk); 2479 if (retval) 2480 return retval; 2481 2482 udc->vbus_sensed = 0; 2483 2484 the_controller = udc; 2485 platform_set_drvdata(pdev, udc); 2486 udc_init_data(udc); 2487 2488 /* irq setup after old hardware state is cleaned up */ 2489 retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq, 2490 IRQF_SHARED, driver_name, udc); 2491 if (retval != 0) { 2492 dev_err(udc->dev, "%s: can't get irq %i, err %d\n", 2493 driver_name, udc->irq, retval); 2494 goto err; 2495 } 2496 2497 if (!IS_ERR_OR_NULL(udc->transceiver)) 2498 usb_register_notifier(udc->transceiver, &pxa27x_udc_phy); 2499 retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 2500 if (retval) 2501 goto err_add_gadget; 2502 2503 pxa_init_debugfs(udc); 2504 if (should_enable_udc(udc)) 2505 udc_enable(udc); 2506 return 0; 2507 2508 err_add_gadget: 2509 if (!IS_ERR_OR_NULL(udc->transceiver)) 2510 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); 2511 err: 2512 clk_unprepare(udc->clk); 2513 return retval; 2514 } 2515 2516 /** 2517 * pxa_udc_remove - removes the udc device driver 2518 * @_dev: platform device 2519 */ 2520 static int pxa_udc_remove(struct platform_device *_dev) 2521 { 2522 struct pxa_udc *udc = platform_get_drvdata(_dev); 2523 2524 usb_del_gadget_udc(&udc->gadget); 2525 pxa_cleanup_debugfs(udc); 2526 2527 if (!IS_ERR_OR_NULL(udc->transceiver)) { 2528 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); 2529 usb_put_phy(udc->transceiver); 2530 } 2531 2532 udc->transceiver = NULL; 2533 the_controller = NULL; 2534 clk_unprepare(udc->clk); 2535 2536 return 0; 2537 } 2538 2539 static void pxa_udc_shutdown(struct platform_device *_dev) 2540 { 2541 struct pxa_udc *udc = platform_get_drvdata(_dev); 2542 2543 if (udc_readl(udc, UDCCR) & UDCCR_UDE) 2544 udc_disable(udc); 2545 } 2546 2547 #ifdef CONFIG_PXA27x 2548 extern void pxa27x_clear_otgph(void); 2549 #else 2550 #define pxa27x_clear_otgph() do {} while (0) 2551 #endif 2552 2553 #ifdef CONFIG_PM 2554 /** 2555 * pxa_udc_suspend - Suspend udc device 2556 * @_dev: platform device 2557 * @state: suspend state 2558 * 2559 * Suspends udc : saves configuration registers (UDCCR*), then disables the udc 2560 * device. 2561 */ 2562 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state) 2563 { 2564 struct pxa_udc *udc = platform_get_drvdata(_dev); 2565 struct pxa_ep *ep; 2566 2567 ep = &udc->pxa_ep[0]; 2568 udc->udccsr0 = udc_ep_readl(ep, UDCCSR); 2569 2570 udc_disable(udc); 2571 udc->pullup_resume = udc->pullup_on; 2572 dplus_pullup(udc, 0); 2573 2574 if (udc->driver) 2575 udc->driver->disconnect(&udc->gadget); 2576 2577 return 0; 2578 } 2579 2580 /** 2581 * pxa_udc_resume - Resume udc device 2582 * @_dev: platform device 2583 * 2584 * Resumes udc : restores configuration registers (UDCCR*), then enables the udc 2585 * device. 2586 */ 2587 static int pxa_udc_resume(struct platform_device *_dev) 2588 { 2589 struct pxa_udc *udc = platform_get_drvdata(_dev); 2590 struct pxa_ep *ep; 2591 2592 ep = &udc->pxa_ep[0]; 2593 udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME)); 2594 2595 dplus_pullup(udc, udc->pullup_resume); 2596 if (should_enable_udc(udc)) 2597 udc_enable(udc); 2598 /* 2599 * We do not handle OTG yet. 2600 * 2601 * OTGPH bit is set when sleep mode is entered. 2602 * it indicates that OTG pad is retaining its state. 2603 * Upon exit from sleep mode and before clearing OTGPH, 2604 * Software must configure the USB OTG pad, UDC, and UHC 2605 * to the state they were in before entering sleep mode. 2606 */ 2607 pxa27x_clear_otgph(); 2608 2609 return 0; 2610 } 2611 #endif 2612 2613 /* work with hotplug and coldplug */ 2614 MODULE_ALIAS("platform:pxa27x-udc"); 2615 2616 static struct platform_driver udc_driver = { 2617 .driver = { 2618 .name = "pxa27x-udc", 2619 .of_match_table = of_match_ptr(udc_pxa_dt_ids), 2620 }, 2621 .probe = pxa_udc_probe, 2622 .remove = pxa_udc_remove, 2623 .shutdown = pxa_udc_shutdown, 2624 #ifdef CONFIG_PM 2625 .suspend = pxa_udc_suspend, 2626 .resume = pxa_udc_resume 2627 #endif 2628 }; 2629 2630 module_platform_driver(udc_driver); 2631 2632 MODULE_DESCRIPTION(DRIVER_DESC); 2633 MODULE_AUTHOR("Robert Jarzmik"); 2634 MODULE_LICENSE("GPL"); 2635