1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Handles the Intel 27x USB Device Controller (UDC) 4 * 5 * Inspired by original driver by Frank Becker, David Brownell, and others. 6 * Copyright (C) 2008 Robert Jarzmik 7 */ 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/types.h> 11 #include <linux/errno.h> 12 #include <linux/err.h> 13 #include <linux/platform_device.h> 14 #include <linux/delay.h> 15 #include <linux/list.h> 16 #include <linux/interrupt.h> 17 #include <linux/proc_fs.h> 18 #include <linux/clk.h> 19 #include <linux/irq.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/prefetch.h> 24 #include <linux/byteorder/generic.h> 25 #include <linux/platform_data/pxa2xx_udc.h> 26 #include <linux/of_device.h> 27 #include <linux/of_gpio.h> 28 29 #include <linux/usb.h> 30 #include <linux/usb/ch9.h> 31 #include <linux/usb/gadget.h> 32 #include <linux/usb/phy.h> 33 34 #include "pxa27x_udc.h" 35 36 /* 37 * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x 38 * series processors. 39 * 40 * Such controller drivers work with a gadget driver. The gadget driver 41 * returns descriptors, implements configuration and data protocols used 42 * by the host to interact with this device, and allocates endpoints to 43 * the different protocol interfaces. The controller driver virtualizes 44 * usb hardware so that the gadget drivers will be more portable. 45 * 46 * This UDC hardware wants to implement a bit too much USB protocol. The 47 * biggest issues are: that the endpoints have to be set up before the 48 * controller can be enabled (minor, and not uncommon); and each endpoint 49 * can only have one configuration, interface and alternative interface 50 * number (major, and very unusual). Once set up, these cannot be changed 51 * without a controller reset. 52 * 53 * The workaround is to setup all combinations necessary for the gadgets which 54 * will work with this driver. This is done in pxa_udc structure, statically. 55 * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep. 56 * (You could modify this if needed. Some drivers have a "fifo_mode" module 57 * parameter to facilitate such changes.) 58 * 59 * The combinations have been tested with these gadgets : 60 * - zero gadget 61 * - file storage gadget 62 * - ether gadget 63 * 64 * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is 65 * made of UDC's double buffering either. USB "On-The-Go" is not implemented. 66 * 67 * All the requests are handled the same way : 68 * - the drivers tries to handle the request directly to the IO 69 * - if the IO fifo is not big enough, the remaining is send/received in 70 * interrupt handling. 71 */ 72 73 #define DRIVER_VERSION "2008-04-18" 74 #define DRIVER_DESC "PXA 27x USB Device Controller driver" 75 76 static const char driver_name[] = "pxa27x_udc"; 77 static struct pxa_udc *the_controller; 78 79 static void handle_ep(struct pxa_ep *ep); 80 81 /* 82 * Debug filesystem 83 */ 84 #ifdef CONFIG_USB_GADGET_DEBUG_FS 85 86 #include <linux/debugfs.h> 87 #include <linux/uaccess.h> 88 #include <linux/seq_file.h> 89 90 static int state_dbg_show(struct seq_file *s, void *p) 91 { 92 struct pxa_udc *udc = s->private; 93 u32 tmp; 94 95 if (!udc->driver) 96 return -ENODEV; 97 98 /* basic device status */ 99 seq_printf(s, DRIVER_DESC "\n" 100 "%s version: %s\n" 101 "Gadget driver: %s\n", 102 driver_name, DRIVER_VERSION, 103 udc->driver ? udc->driver->driver.name : "(none)"); 104 105 tmp = udc_readl(udc, UDCCR); 106 seq_printf(s, 107 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n", 108 tmp, 109 (tmp & UDCCR_OEN) ? " oen":"", 110 (tmp & UDCCR_AALTHNP) ? " aalthnp":"", 111 (tmp & UDCCR_AHNP) ? " rem" : "", 112 (tmp & UDCCR_BHNP) ? " rstir" : "", 113 (tmp & UDCCR_DWRE) ? " dwre" : "", 114 (tmp & UDCCR_SMAC) ? " smac" : "", 115 (tmp & UDCCR_EMCE) ? " emce" : "", 116 (tmp & UDCCR_UDR) ? " udr" : "", 117 (tmp & UDCCR_UDA) ? " uda" : "", 118 (tmp & UDCCR_UDE) ? " ude" : "", 119 (tmp & UDCCR_ACN) >> UDCCR_ACN_S, 120 (tmp & UDCCR_AIN) >> UDCCR_AIN_S, 121 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S); 122 /* registers for device and ep0 */ 123 seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n", 124 udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1)); 125 seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n", 126 udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1)); 127 seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR)); 128 seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n", 129 udc->stats.irqs_reset, udc->stats.irqs_suspend, 130 udc->stats.irqs_resume, udc->stats.irqs_reconfig); 131 132 return 0; 133 } 134 DEFINE_SHOW_ATTRIBUTE(state_dbg); 135 136 static int queues_dbg_show(struct seq_file *s, void *p) 137 { 138 struct pxa_udc *udc = s->private; 139 struct pxa_ep *ep; 140 struct pxa27x_request *req; 141 int i, maxpkt; 142 143 if (!udc->driver) 144 return -ENODEV; 145 146 /* dump endpoint queues */ 147 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 148 ep = &udc->pxa_ep[i]; 149 maxpkt = ep->fifo_size; 150 seq_printf(s, "%-12s max_pkt=%d %s\n", 151 EPNAME(ep), maxpkt, "pio"); 152 153 if (list_empty(&ep->queue)) { 154 seq_puts(s, "\t(nothing queued)\n"); 155 continue; 156 } 157 158 list_for_each_entry(req, &ep->queue, queue) { 159 seq_printf(s, "\treq %p len %d/%d buf %p\n", 160 &req->req, req->req.actual, 161 req->req.length, req->req.buf); 162 } 163 } 164 165 return 0; 166 } 167 DEFINE_SHOW_ATTRIBUTE(queues_dbg); 168 169 static int eps_dbg_show(struct seq_file *s, void *p) 170 { 171 struct pxa_udc *udc = s->private; 172 struct pxa_ep *ep; 173 int i; 174 u32 tmp; 175 176 if (!udc->driver) 177 return -ENODEV; 178 179 ep = &udc->pxa_ep[0]; 180 tmp = udc_ep_readl(ep, UDCCSR); 181 seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", 182 tmp, 183 (tmp & UDCCSR0_SA) ? " sa" : "", 184 (tmp & UDCCSR0_RNE) ? " rne" : "", 185 (tmp & UDCCSR0_FST) ? " fst" : "", 186 (tmp & UDCCSR0_SST) ? " sst" : "", 187 (tmp & UDCCSR0_DME) ? " dme" : "", 188 (tmp & UDCCSR0_IPR) ? " ipr" : "", 189 (tmp & UDCCSR0_OPC) ? " opc" : ""); 190 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 191 ep = &udc->pxa_ep[i]; 192 tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR); 193 seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n", 194 EPNAME(ep), 195 ep->stats.in_bytes, ep->stats.in_ops, 196 ep->stats.out_bytes, ep->stats.out_ops, 197 ep->stats.irqs, 198 tmp, udc_ep_readl(ep, UDCCSR), 199 udc_ep_readl(ep, UDCBCR)); 200 } 201 202 return 0; 203 } 204 DEFINE_SHOW_ATTRIBUTE(eps_dbg); 205 206 static void pxa_init_debugfs(struct pxa_udc *udc) 207 { 208 struct dentry *root; 209 210 root = debugfs_create_dir(udc->gadget.name, usb_debug_root); 211 udc->debugfs_root = root; 212 213 debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops); 214 debugfs_create_file("queues", 0400, root, udc, &queues_dbg_fops); 215 debugfs_create_file("epstate", 0400, root, udc, &eps_dbg_fops); 216 } 217 218 static void pxa_cleanup_debugfs(struct pxa_udc *udc) 219 { 220 debugfs_remove_recursive(udc->debugfs_root); 221 } 222 223 #else 224 static inline void pxa_init_debugfs(struct pxa_udc *udc) 225 { 226 } 227 228 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc) 229 { 230 } 231 #endif 232 233 /** 234 * is_match_usb_pxa - check if usb_ep and pxa_ep match 235 * @udc_usb_ep: usb endpoint 236 * @ep: pxa endpoint 237 * @config: configuration required in pxa_ep 238 * @interface: interface required in pxa_ep 239 * @altsetting: altsetting required in pxa_ep 240 * 241 * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise 242 */ 243 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep, 244 int config, int interface, int altsetting) 245 { 246 if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr) 247 return 0; 248 if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in) 249 return 0; 250 if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type) 251 return 0; 252 if ((ep->config != config) || (ep->interface != interface) 253 || (ep->alternate != altsetting)) 254 return 0; 255 return 1; 256 } 257 258 /** 259 * find_pxa_ep - find pxa_ep structure matching udc_usb_ep 260 * @udc: pxa udc 261 * @udc_usb_ep: udc_usb_ep structure 262 * 263 * Match udc_usb_ep and all pxa_ep available, to see if one matches. 264 * This is necessary because of the strong pxa hardware restriction requiring 265 * that once pxa endpoints are initialized, their configuration is freezed, and 266 * no change can be made to their address, direction, or in which configuration, 267 * interface or altsetting they are active ... which differs from more usual 268 * models which have endpoints be roughly just addressable fifos, and leave 269 * configuration events up to gadget drivers (like all control messages). 270 * 271 * Note that there is still a blurred point here : 272 * - we rely on UDCCR register "active interface" and "active altsetting". 273 * This is a nonsense in regard of USB spec, where multiple interfaces are 274 * active at the same time. 275 * - if we knew for sure that the pxa can handle multiple interface at the 276 * same time, assuming Intel's Developer Guide is wrong, this function 277 * should be reviewed, and a cache of couples (iface, altsetting) should 278 * be kept in the pxa_udc structure. In this case this function would match 279 * against the cache of couples instead of the "last altsetting" set up. 280 * 281 * Returns the matched pxa_ep structure or NULL if none found 282 */ 283 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc, 284 struct udc_usb_ep *udc_usb_ep) 285 { 286 int i; 287 struct pxa_ep *ep; 288 int cfg = udc->config; 289 int iface = udc->last_interface; 290 int alt = udc->last_alternate; 291 292 if (udc_usb_ep == &udc->udc_usb_ep[0]) 293 return &udc->pxa_ep[0]; 294 295 for (i = 1; i < NR_PXA_ENDPOINTS; i++) { 296 ep = &udc->pxa_ep[i]; 297 if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt)) 298 return ep; 299 } 300 return NULL; 301 } 302 303 /** 304 * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep 305 * @udc: pxa udc 306 * 307 * Context: interrupt handler 308 * 309 * Updates all pxa_ep fields in udc_usb_ep structures, if this field was 310 * previously set up (and is not NULL). The update is necessary is a 311 * configuration change or altsetting change was issued by the USB host. 312 */ 313 static void update_pxa_ep_matches(struct pxa_udc *udc) 314 { 315 int i; 316 struct udc_usb_ep *udc_usb_ep; 317 318 for (i = 1; i < NR_USB_ENDPOINTS; i++) { 319 udc_usb_ep = &udc->udc_usb_ep[i]; 320 if (udc_usb_ep->pxa_ep) 321 udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep); 322 } 323 } 324 325 /** 326 * pio_irq_enable - Enables irq generation for one endpoint 327 * @ep: udc endpoint 328 */ 329 static void pio_irq_enable(struct pxa_ep *ep) 330 { 331 struct pxa_udc *udc = ep->dev; 332 int index = EPIDX(ep); 333 u32 udcicr0 = udc_readl(udc, UDCICR0); 334 u32 udcicr1 = udc_readl(udc, UDCICR1); 335 336 if (index < 16) 337 udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2))); 338 else 339 udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2))); 340 } 341 342 /** 343 * pio_irq_disable - Disables irq generation for one endpoint 344 * @ep: udc endpoint 345 */ 346 static void pio_irq_disable(struct pxa_ep *ep) 347 { 348 struct pxa_udc *udc = ep->dev; 349 int index = EPIDX(ep); 350 u32 udcicr0 = udc_readl(udc, UDCICR0); 351 u32 udcicr1 = udc_readl(udc, UDCICR1); 352 353 if (index < 16) 354 udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2))); 355 else 356 udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2))); 357 } 358 359 /** 360 * udc_set_mask_UDCCR - set bits in UDCCR 361 * @udc: udc device 362 * @mask: bits to set in UDCCR 363 * 364 * Sets bits in UDCCR, leaving DME and FST bits as they were. 365 */ 366 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask) 367 { 368 u32 udccr = udc_readl(udc, UDCCR); 369 udc_writel(udc, UDCCR, 370 (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS)); 371 } 372 373 /** 374 * udc_clear_mask_UDCCR - clears bits in UDCCR 375 * @udc: udc device 376 * @mask: bit to clear in UDCCR 377 * 378 * Clears bits in UDCCR, leaving DME and FST bits as they were. 379 */ 380 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask) 381 { 382 u32 udccr = udc_readl(udc, UDCCR); 383 udc_writel(udc, UDCCR, 384 (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS)); 385 } 386 387 /** 388 * ep_write_UDCCSR - set bits in UDCCSR 389 * @ep: udc endpoint 390 * @mask: bits to set in UDCCR 391 * 392 * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*). 393 * 394 * A specific case is applied to ep0 : the ACM bit is always set to 1, for 395 * SET_INTERFACE and SET_CONFIGURATION. 396 */ 397 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask) 398 { 399 if (is_ep0(ep)) 400 mask |= UDCCSR0_ACM; 401 udc_ep_writel(ep, UDCCSR, mask); 402 } 403 404 /** 405 * ep_count_bytes_remain - get how many bytes in udc endpoint 406 * @ep: udc endpoint 407 * 408 * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP) 409 */ 410 static int ep_count_bytes_remain(struct pxa_ep *ep) 411 { 412 if (ep->dir_in) 413 return -EOPNOTSUPP; 414 return udc_ep_readl(ep, UDCBCR) & 0x3ff; 415 } 416 417 /** 418 * ep_is_empty - checks if ep has byte ready for reading 419 * @ep: udc endpoint 420 * 421 * If endpoint is the control endpoint, checks if there are bytes in the 422 * control endpoint fifo. If endpoint is a data endpoint, checks if bytes 423 * are ready for reading on OUT endpoint. 424 * 425 * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint 426 */ 427 static int ep_is_empty(struct pxa_ep *ep) 428 { 429 int ret; 430 431 if (!is_ep0(ep) && ep->dir_in) 432 return -EOPNOTSUPP; 433 if (is_ep0(ep)) 434 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE); 435 else 436 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE); 437 return ret; 438 } 439 440 /** 441 * ep_is_full - checks if ep has place to write bytes 442 * @ep: udc endpoint 443 * 444 * If endpoint is not the control endpoint and is an IN endpoint, checks if 445 * there is place to write bytes into the endpoint. 446 * 447 * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint 448 */ 449 static int ep_is_full(struct pxa_ep *ep) 450 { 451 if (is_ep0(ep)) 452 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR); 453 if (!ep->dir_in) 454 return -EOPNOTSUPP; 455 return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF)); 456 } 457 458 /** 459 * epout_has_pkt - checks if OUT endpoint fifo has a packet available 460 * @ep: pxa endpoint 461 * 462 * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep. 463 */ 464 static int epout_has_pkt(struct pxa_ep *ep) 465 { 466 if (!is_ep0(ep) && ep->dir_in) 467 return -EOPNOTSUPP; 468 if (is_ep0(ep)) 469 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC); 470 return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC); 471 } 472 473 /** 474 * set_ep0state - Set ep0 automata state 475 * @udc: udc device 476 * @state: state 477 */ 478 static void set_ep0state(struct pxa_udc *udc, int state) 479 { 480 struct pxa_ep *ep = &udc->pxa_ep[0]; 481 char *old_stname = EP0_STNAME(udc); 482 483 udc->ep0state = state; 484 ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname, 485 EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR), 486 udc_ep_readl(ep, UDCBCR)); 487 } 488 489 /** 490 * ep0_idle - Put control endpoint into idle state 491 * @dev: udc device 492 */ 493 static void ep0_idle(struct pxa_udc *dev) 494 { 495 set_ep0state(dev, WAIT_FOR_SETUP); 496 } 497 498 /** 499 * inc_ep_stats_reqs - Update ep stats counts 500 * @ep: physical endpoint 501 * @is_in: ep direction (USB_DIR_IN or 0) 502 * 503 */ 504 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in) 505 { 506 if (is_in) 507 ep->stats.in_ops++; 508 else 509 ep->stats.out_ops++; 510 } 511 512 /** 513 * inc_ep_stats_bytes - Update ep stats counts 514 * @ep: physical endpoint 515 * @count: bytes transferred on endpoint 516 * @is_in: ep direction (USB_DIR_IN or 0) 517 */ 518 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in) 519 { 520 if (is_in) 521 ep->stats.in_bytes += count; 522 else 523 ep->stats.out_bytes += count; 524 } 525 526 /** 527 * pxa_ep_setup - Sets up an usb physical endpoint 528 * @ep: pxa27x physical endpoint 529 * 530 * Find the physical pxa27x ep, and setup its UDCCR 531 */ 532 static void pxa_ep_setup(struct pxa_ep *ep) 533 { 534 u32 new_udccr; 535 536 new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN) 537 | ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN) 538 | ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN) 539 | ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN) 540 | ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET) 541 | ((ep->dir_in) ? UDCCONR_ED : 0) 542 | ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS) 543 | UDCCONR_EE; 544 545 udc_ep_writel(ep, UDCCR, new_udccr); 546 } 547 548 /** 549 * pxa_eps_setup - Sets up all usb physical endpoints 550 * @dev: udc device 551 * 552 * Setup all pxa physical endpoints, except ep0 553 */ 554 static void pxa_eps_setup(struct pxa_udc *dev) 555 { 556 unsigned int i; 557 558 dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev); 559 560 for (i = 1; i < NR_PXA_ENDPOINTS; i++) 561 pxa_ep_setup(&dev->pxa_ep[i]); 562 } 563 564 /** 565 * pxa_ep_alloc_request - Allocate usb request 566 * @_ep: usb endpoint 567 * @gfp_flags: 568 * 569 * For the pxa27x, these can just wrap kmalloc/kfree. gadget drivers 570 * must still pass correctly initialized endpoints, since other controller 571 * drivers may care about how it's currently set up (dma issues etc). 572 */ 573 static struct usb_request * 574 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 575 { 576 struct pxa27x_request *req; 577 578 req = kzalloc(sizeof *req, gfp_flags); 579 if (!req) 580 return NULL; 581 582 INIT_LIST_HEAD(&req->queue); 583 req->in_use = 0; 584 req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 585 586 return &req->req; 587 } 588 589 /** 590 * pxa_ep_free_request - Free usb request 591 * @_ep: usb endpoint 592 * @_req: usb request 593 * 594 * Wrapper around kfree to free _req 595 */ 596 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) 597 { 598 struct pxa27x_request *req; 599 600 req = container_of(_req, struct pxa27x_request, req); 601 WARN_ON(!list_empty(&req->queue)); 602 kfree(req); 603 } 604 605 /** 606 * ep_add_request - add a request to the endpoint's queue 607 * @ep: usb endpoint 608 * @req: usb request 609 * 610 * Context: ep->lock held 611 * 612 * Queues the request in the endpoint's queue, and enables the interrupts 613 * on the endpoint. 614 */ 615 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req) 616 { 617 if (unlikely(!req)) 618 return; 619 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, 620 req->req.length, udc_ep_readl(ep, UDCCSR)); 621 622 req->in_use = 1; 623 list_add_tail(&req->queue, &ep->queue); 624 pio_irq_enable(ep); 625 } 626 627 /** 628 * ep_del_request - removes a request from the endpoint's queue 629 * @ep: usb endpoint 630 * @req: usb request 631 * 632 * Context: ep->lock held 633 * 634 * Unqueue the request from the endpoint's queue. If there are no more requests 635 * on the endpoint, and if it's not the control endpoint, interrupts are 636 * disabled on the endpoint. 637 */ 638 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) 639 { 640 if (unlikely(!req)) 641 return; 642 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, 643 req->req.length, udc_ep_readl(ep, UDCCSR)); 644 645 list_del_init(&req->queue); 646 req->in_use = 0; 647 if (!is_ep0(ep) && list_empty(&ep->queue)) 648 pio_irq_disable(ep); 649 } 650 651 /** 652 * req_done - Complete an usb request 653 * @ep: pxa physical endpoint 654 * @req: pxa request 655 * @status: usb request status sent to gadget API 656 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 657 * 658 * Context: ep->lock held if flags not NULL, else ep->lock released 659 * 660 * Retire a pxa27x usb request. Endpoint must be locked. 661 */ 662 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, 663 unsigned long *pflags) 664 { 665 unsigned long flags; 666 667 ep_del_request(ep, req); 668 if (likely(req->req.status == -EINPROGRESS)) 669 req->req.status = status; 670 else 671 status = req->req.status; 672 673 if (status && status != -ESHUTDOWN) 674 ep_dbg(ep, "complete req %p stat %d len %u/%u\n", 675 &req->req, status, 676 req->req.actual, req->req.length); 677 678 if (pflags) 679 spin_unlock_irqrestore(&ep->lock, *pflags); 680 local_irq_save(flags); 681 usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req); 682 local_irq_restore(flags); 683 if (pflags) 684 spin_lock_irqsave(&ep->lock, *pflags); 685 } 686 687 /** 688 * ep_end_out_req - Ends endpoint OUT request 689 * @ep: physical endpoint 690 * @req: pxa request 691 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 692 * 693 * Context: ep->lock held or released (see req_done()) 694 * 695 * Ends endpoint OUT request (completes usb request). 696 */ 697 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, 698 unsigned long *pflags) 699 { 700 inc_ep_stats_reqs(ep, !USB_DIR_IN); 701 req_done(ep, req, 0, pflags); 702 } 703 704 /** 705 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) 706 * @ep: physical endpoint 707 * @req: pxa request 708 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 709 * 710 * Context: ep->lock held or released (see req_done()) 711 * 712 * Ends control endpoint OUT request (completes usb request), and puts 713 * control endpoint into idle state 714 */ 715 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, 716 unsigned long *pflags) 717 { 718 set_ep0state(ep->dev, OUT_STATUS_STAGE); 719 ep_end_out_req(ep, req, pflags); 720 ep0_idle(ep->dev); 721 } 722 723 /** 724 * ep_end_in_req - Ends endpoint IN request 725 * @ep: physical endpoint 726 * @req: pxa request 727 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 728 * 729 * Context: ep->lock held or released (see req_done()) 730 * 731 * Ends endpoint IN request (completes usb request). 732 */ 733 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, 734 unsigned long *pflags) 735 { 736 inc_ep_stats_reqs(ep, USB_DIR_IN); 737 req_done(ep, req, 0, pflags); 738 } 739 740 /** 741 * ep0_end_in_req - Ends control endpoint IN request (ends data stage) 742 * @ep: physical endpoint 743 * @req: pxa request 744 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held 745 * 746 * Context: ep->lock held or released (see req_done()) 747 * 748 * Ends control endpoint IN request (completes usb request), and puts 749 * control endpoint into status state 750 */ 751 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, 752 unsigned long *pflags) 753 { 754 set_ep0state(ep->dev, IN_STATUS_STAGE); 755 ep_end_in_req(ep, req, pflags); 756 } 757 758 /** 759 * nuke - Dequeue all requests 760 * @ep: pxa endpoint 761 * @status: usb request status 762 * 763 * Context: ep->lock released 764 * 765 * Dequeues all requests on an endpoint. As a side effect, interrupts will be 766 * disabled on that endpoint (because no more requests). 767 */ 768 static void nuke(struct pxa_ep *ep, int status) 769 { 770 struct pxa27x_request *req; 771 unsigned long flags; 772 773 spin_lock_irqsave(&ep->lock, flags); 774 while (!list_empty(&ep->queue)) { 775 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 776 req_done(ep, req, status, &flags); 777 } 778 spin_unlock_irqrestore(&ep->lock, flags); 779 } 780 781 /** 782 * read_packet - transfer 1 packet from an OUT endpoint into request 783 * @ep: pxa physical endpoint 784 * @req: usb request 785 * 786 * Takes bytes from OUT endpoint and transfers them info the usb request. 787 * If there is less space in request than bytes received in OUT endpoint, 788 * bytes are left in the OUT endpoint. 789 * 790 * Returns how many bytes were actually transferred 791 */ 792 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req) 793 { 794 u32 *buf; 795 int bytes_ep, bufferspace, count, i; 796 797 bytes_ep = ep_count_bytes_remain(ep); 798 bufferspace = req->req.length - req->req.actual; 799 800 buf = (u32 *)(req->req.buf + req->req.actual); 801 prefetchw(buf); 802 803 if (likely(!ep_is_empty(ep))) 804 count = min(bytes_ep, bufferspace); 805 else /* zlp */ 806 count = 0; 807 808 for (i = count; i > 0; i -= 4) 809 *buf++ = udc_ep_readl(ep, UDCDR); 810 req->req.actual += count; 811 812 ep_write_UDCCSR(ep, UDCCSR_PC); 813 814 return count; 815 } 816 817 /** 818 * write_packet - transfer 1 packet from request into an IN endpoint 819 * @ep: pxa physical endpoint 820 * @req: usb request 821 * @max: max bytes that fit into endpoint 822 * 823 * Takes bytes from usb request, and transfers them into the physical 824 * endpoint. If there are no bytes to transfer, doesn't write anything 825 * to physical endpoint. 826 * 827 * Returns how many bytes were actually transferred. 828 */ 829 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req, 830 unsigned int max) 831 { 832 int length, count, remain, i; 833 u32 *buf; 834 u8 *buf_8; 835 836 buf = (u32 *)(req->req.buf + req->req.actual); 837 prefetch(buf); 838 839 length = min(req->req.length - req->req.actual, max); 840 req->req.actual += length; 841 842 remain = length & 0x3; 843 count = length & ~(0x3); 844 for (i = count; i > 0 ; i -= 4) 845 udc_ep_writel(ep, UDCDR, *buf++); 846 847 buf_8 = (u8 *)buf; 848 for (i = remain; i > 0; i--) 849 udc_ep_writeb(ep, UDCDR, *buf_8++); 850 851 ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain, 852 udc_ep_readl(ep, UDCCSR)); 853 854 return length; 855 } 856 857 /** 858 * read_fifo - Transfer packets from OUT endpoint into usb request 859 * @ep: pxa physical endpoint 860 * @req: usb request 861 * 862 * Context: interrupt handler 863 * 864 * Unload as many packets as possible from the fifo we use for usb OUT 865 * transfers and put them into the request. Caller should have made sure 866 * there's at least one packet ready. 867 * Doesn't complete the request, that's the caller's job 868 * 869 * Returns 1 if the request completed, 0 otherwise 870 */ 871 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 872 { 873 int count, is_short, completed = 0; 874 875 while (epout_has_pkt(ep)) { 876 count = read_packet(ep, req); 877 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); 878 879 is_short = (count < ep->fifo_size); 880 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", 881 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", 882 &req->req, req->req.actual, req->req.length); 883 884 /* completion */ 885 if (is_short || req->req.actual == req->req.length) { 886 completed = 1; 887 break; 888 } 889 /* finished that packet. the next one may be waiting... */ 890 } 891 return completed; 892 } 893 894 /** 895 * write_fifo - transfer packets from usb request into an IN endpoint 896 * @ep: pxa physical endpoint 897 * @req: pxa usb request 898 * 899 * Write to an IN endpoint fifo, as many packets as possible. 900 * irqs will use this to write the rest later. 901 * caller guarantees at least one packet buffer is ready (or a zlp). 902 * Doesn't complete the request, that's the caller's job 903 * 904 * Returns 1 if request fully transferred, 0 if partial transfer 905 */ 906 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 907 { 908 unsigned max; 909 int count, is_short, is_last = 0, completed = 0, totcount = 0; 910 u32 udccsr; 911 912 max = ep->fifo_size; 913 do { 914 udccsr = udc_ep_readl(ep, UDCCSR); 915 if (udccsr & UDCCSR_PC) { 916 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n", 917 udccsr); 918 ep_write_UDCCSR(ep, UDCCSR_PC); 919 } 920 if (udccsr & UDCCSR_TRN) { 921 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n", 922 udccsr); 923 ep_write_UDCCSR(ep, UDCCSR_TRN); 924 } 925 926 count = write_packet(ep, req, max); 927 inc_ep_stats_bytes(ep, count, USB_DIR_IN); 928 totcount += count; 929 930 /* last packet is usually short (or a zlp) */ 931 if (unlikely(count < max)) { 932 is_last = 1; 933 is_short = 1; 934 } else { 935 if (likely(req->req.length > req->req.actual) 936 || req->req.zero) 937 is_last = 0; 938 else 939 is_last = 1; 940 /* interrupt/iso maxpacket may not fill the fifo */ 941 is_short = unlikely(max < ep->fifo_size); 942 } 943 944 if (is_short) 945 ep_write_UDCCSR(ep, UDCCSR_SP); 946 947 /* requests complete when all IN data is in the FIFO */ 948 if (is_last) { 949 completed = 1; 950 break; 951 } 952 } while (!ep_is_full(ep)); 953 954 ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n", 955 totcount, is_last ? "/L" : "", is_short ? "/S" : "", 956 req->req.length - req->req.actual, &req->req); 957 958 return completed; 959 } 960 961 /** 962 * read_ep0_fifo - Transfer packets from control endpoint into usb request 963 * @ep: control endpoint 964 * @req: pxa usb request 965 * 966 * Special ep0 version of the above read_fifo. Reads as many bytes from control 967 * endpoint as can be read, and stores them into usb request (limited by request 968 * maximum length). 969 * 970 * Returns 0 if usb request only partially filled, 1 if fully filled 971 */ 972 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 973 { 974 int count, is_short, completed = 0; 975 976 while (epout_has_pkt(ep)) { 977 count = read_packet(ep, req); 978 ep_write_UDCCSR(ep, UDCCSR0_OPC); 979 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); 980 981 is_short = (count < ep->fifo_size); 982 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", 983 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", 984 &req->req, req->req.actual, req->req.length); 985 986 if (is_short || req->req.actual >= req->req.length) { 987 completed = 1; 988 break; 989 } 990 } 991 992 return completed; 993 } 994 995 /** 996 * write_ep0_fifo - Send a request to control endpoint (ep0 in) 997 * @ep: control endpoint 998 * @req: request 999 * 1000 * Context: interrupt handler 1001 * 1002 * Sends a request (or a part of the request) to the control endpoint (ep0 in). 1003 * If the request doesn't fit, the remaining part will be sent from irq. 1004 * The request is considered fully written only if either : 1005 * - last write transferred all remaining bytes, but fifo was not fully filled 1006 * - last write was a 0 length write 1007 * 1008 * Returns 1 if request fully written, 0 if request only partially sent 1009 */ 1010 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) 1011 { 1012 unsigned count; 1013 int is_last, is_short; 1014 1015 count = write_packet(ep, req, EP0_FIFO_SIZE); 1016 inc_ep_stats_bytes(ep, count, USB_DIR_IN); 1017 1018 is_short = (count < EP0_FIFO_SIZE); 1019 is_last = ((count == 0) || (count < EP0_FIFO_SIZE)); 1020 1021 /* Sends either a short packet or a 0 length packet */ 1022 if (unlikely(is_short)) 1023 ep_write_UDCCSR(ep, UDCCSR0_IPR); 1024 1025 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n", 1026 count, is_short ? "/S" : "", is_last ? "/L" : "", 1027 req->req.length - req->req.actual, 1028 &req->req, udc_ep_readl(ep, UDCCSR)); 1029 1030 return is_last; 1031 } 1032 1033 /** 1034 * pxa_ep_queue - Queue a request into an IN endpoint 1035 * @_ep: usb endpoint 1036 * @_req: usb request 1037 * @gfp_flags: flags 1038 * 1039 * Context: thread context or from the interrupt handler in the 1040 * special case of ep0 setup : 1041 * (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue) 1042 * 1043 * Returns 0 if succedeed, error otherwise 1044 */ 1045 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, 1046 gfp_t gfp_flags) 1047 { 1048 struct udc_usb_ep *udc_usb_ep; 1049 struct pxa_ep *ep; 1050 struct pxa27x_request *req; 1051 struct pxa_udc *dev; 1052 unsigned long flags; 1053 int rc = 0; 1054 int is_first_req; 1055 unsigned length; 1056 int recursion_detected; 1057 1058 req = container_of(_req, struct pxa27x_request, req); 1059 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1060 1061 if (unlikely(!_req || !_req->complete || !_req->buf)) 1062 return -EINVAL; 1063 1064 if (unlikely(!_ep)) 1065 return -EINVAL; 1066 1067 ep = udc_usb_ep->pxa_ep; 1068 if (unlikely(!ep)) 1069 return -EINVAL; 1070 1071 dev = ep->dev; 1072 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { 1073 ep_dbg(ep, "bogus device state\n"); 1074 return -ESHUTDOWN; 1075 } 1076 1077 /* iso is always one packet per request, that's the only way 1078 * we can report per-packet status. that also helps with dma. 1079 */ 1080 if (unlikely(EPXFERTYPE_is_ISO(ep) 1081 && req->req.length > ep->fifo_size)) 1082 return -EMSGSIZE; 1083 1084 spin_lock_irqsave(&ep->lock, flags); 1085 recursion_detected = ep->in_handle_ep; 1086 1087 is_first_req = list_empty(&ep->queue); 1088 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", 1089 _req, is_first_req ? "yes" : "no", 1090 _req->length, _req->buf); 1091 1092 if (!ep->enabled) { 1093 _req->status = -ESHUTDOWN; 1094 rc = -ESHUTDOWN; 1095 goto out_locked; 1096 } 1097 1098 if (req->in_use) { 1099 ep_err(ep, "refusing to queue req %p (already queued)\n", req); 1100 goto out_locked; 1101 } 1102 1103 length = _req->length; 1104 _req->status = -EINPROGRESS; 1105 _req->actual = 0; 1106 1107 ep_add_request(ep, req); 1108 spin_unlock_irqrestore(&ep->lock, flags); 1109 1110 if (is_ep0(ep)) { 1111 switch (dev->ep0state) { 1112 case WAIT_ACK_SET_CONF_INTERF: 1113 if (length == 0) { 1114 ep_end_in_req(ep, req, NULL); 1115 } else { 1116 ep_err(ep, "got a request of %d bytes while" 1117 "in state WAIT_ACK_SET_CONF_INTERF\n", 1118 length); 1119 ep_del_request(ep, req); 1120 rc = -EL2HLT; 1121 } 1122 ep0_idle(ep->dev); 1123 break; 1124 case IN_DATA_STAGE: 1125 if (!ep_is_full(ep)) 1126 if (write_ep0_fifo(ep, req)) 1127 ep0_end_in_req(ep, req, NULL); 1128 break; 1129 case OUT_DATA_STAGE: 1130 if ((length == 0) || !epout_has_pkt(ep)) 1131 if (read_ep0_fifo(ep, req)) 1132 ep0_end_out_req(ep, req, NULL); 1133 break; 1134 default: 1135 ep_err(ep, "odd state %s to send me a request\n", 1136 EP0_STNAME(ep->dev)); 1137 ep_del_request(ep, req); 1138 rc = -EL2HLT; 1139 break; 1140 } 1141 } else { 1142 if (!recursion_detected) 1143 handle_ep(ep); 1144 } 1145 1146 out: 1147 return rc; 1148 out_locked: 1149 spin_unlock_irqrestore(&ep->lock, flags); 1150 goto out; 1151 } 1152 1153 /** 1154 * pxa_ep_dequeue - Dequeue one request 1155 * @_ep: usb endpoint 1156 * @_req: usb request 1157 * 1158 * Return 0 if no error, -EINVAL or -ECONNRESET otherwise 1159 */ 1160 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1161 { 1162 struct pxa_ep *ep; 1163 struct udc_usb_ep *udc_usb_ep; 1164 struct pxa27x_request *req; 1165 unsigned long flags; 1166 int rc = -EINVAL; 1167 1168 if (!_ep) 1169 return rc; 1170 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1171 ep = udc_usb_ep->pxa_ep; 1172 if (!ep || is_ep0(ep)) 1173 return rc; 1174 1175 spin_lock_irqsave(&ep->lock, flags); 1176 1177 /* make sure it's actually queued on this endpoint */ 1178 list_for_each_entry(req, &ep->queue, queue) { 1179 if (&req->req == _req) { 1180 rc = 0; 1181 break; 1182 } 1183 } 1184 1185 spin_unlock_irqrestore(&ep->lock, flags); 1186 if (!rc) 1187 req_done(ep, req, -ECONNRESET, NULL); 1188 return rc; 1189 } 1190 1191 /** 1192 * pxa_ep_set_halt - Halts operations on one endpoint 1193 * @_ep: usb endpoint 1194 * @value: 1195 * 1196 * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise 1197 */ 1198 static int pxa_ep_set_halt(struct usb_ep *_ep, int value) 1199 { 1200 struct pxa_ep *ep; 1201 struct udc_usb_ep *udc_usb_ep; 1202 unsigned long flags; 1203 int rc; 1204 1205 1206 if (!_ep) 1207 return -EINVAL; 1208 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1209 ep = udc_usb_ep->pxa_ep; 1210 if (!ep || is_ep0(ep)) 1211 return -EINVAL; 1212 1213 if (value == 0) { 1214 /* 1215 * This path (reset toggle+halt) is needed to implement 1216 * SET_INTERFACE on normal hardware. but it can't be 1217 * done from software on the PXA UDC, and the hardware 1218 * forgets to do it as part of SET_INTERFACE automagic. 1219 */ 1220 ep_dbg(ep, "only host can clear halt\n"); 1221 return -EROFS; 1222 } 1223 1224 spin_lock_irqsave(&ep->lock, flags); 1225 1226 rc = -EAGAIN; 1227 if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue))) 1228 goto out; 1229 1230 /* FST, FEF bits are the same for control and non control endpoints */ 1231 rc = 0; 1232 ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF); 1233 if (is_ep0(ep)) 1234 set_ep0state(ep->dev, STALL); 1235 1236 out: 1237 spin_unlock_irqrestore(&ep->lock, flags); 1238 return rc; 1239 } 1240 1241 /** 1242 * pxa_ep_fifo_status - Get how many bytes in physical endpoint 1243 * @_ep: usb endpoint 1244 * 1245 * Returns number of bytes in OUT fifos. Broken for IN fifos. 1246 */ 1247 static int pxa_ep_fifo_status(struct usb_ep *_ep) 1248 { 1249 struct pxa_ep *ep; 1250 struct udc_usb_ep *udc_usb_ep; 1251 1252 if (!_ep) 1253 return -ENODEV; 1254 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1255 ep = udc_usb_ep->pxa_ep; 1256 if (!ep || is_ep0(ep)) 1257 return -ENODEV; 1258 1259 if (ep->dir_in) 1260 return -EOPNOTSUPP; 1261 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep)) 1262 return 0; 1263 else 1264 return ep_count_bytes_remain(ep) + 1; 1265 } 1266 1267 /** 1268 * pxa_ep_fifo_flush - Flushes one endpoint 1269 * @_ep: usb endpoint 1270 * 1271 * Discards all data in one endpoint(IN or OUT), except control endpoint. 1272 */ 1273 static void pxa_ep_fifo_flush(struct usb_ep *_ep) 1274 { 1275 struct pxa_ep *ep; 1276 struct udc_usb_ep *udc_usb_ep; 1277 unsigned long flags; 1278 1279 if (!_ep) 1280 return; 1281 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1282 ep = udc_usb_ep->pxa_ep; 1283 if (!ep || is_ep0(ep)) 1284 return; 1285 1286 spin_lock_irqsave(&ep->lock, flags); 1287 1288 if (unlikely(!list_empty(&ep->queue))) 1289 ep_dbg(ep, "called while queue list not empty\n"); 1290 ep_dbg(ep, "called\n"); 1291 1292 /* for OUT, just read and discard the FIFO contents. */ 1293 if (!ep->dir_in) { 1294 while (!ep_is_empty(ep)) 1295 udc_ep_readl(ep, UDCDR); 1296 } else { 1297 /* most IN status is the same, but ISO can't stall */ 1298 ep_write_UDCCSR(ep, 1299 UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN 1300 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST)); 1301 } 1302 1303 spin_unlock_irqrestore(&ep->lock, flags); 1304 } 1305 1306 /** 1307 * pxa_ep_enable - Enables usb endpoint 1308 * @_ep: usb endpoint 1309 * @desc: usb endpoint descriptor 1310 * 1311 * Nothing much to do here, as ep configuration is done once and for all 1312 * before udc is enabled. After udc enable, no physical endpoint configuration 1313 * can be changed. 1314 * Function makes sanity checks and flushes the endpoint. 1315 */ 1316 static int pxa_ep_enable(struct usb_ep *_ep, 1317 const struct usb_endpoint_descriptor *desc) 1318 { 1319 struct pxa_ep *ep; 1320 struct udc_usb_ep *udc_usb_ep; 1321 struct pxa_udc *udc; 1322 1323 if (!_ep || !desc) 1324 return -EINVAL; 1325 1326 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1327 if (udc_usb_ep->pxa_ep) { 1328 ep = udc_usb_ep->pxa_ep; 1329 ep_warn(ep, "usb_ep %s already enabled, doing nothing\n", 1330 _ep->name); 1331 } else { 1332 ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep); 1333 } 1334 1335 if (!ep || is_ep0(ep)) { 1336 dev_err(udc_usb_ep->dev->dev, 1337 "unable to match pxa_ep for ep %s\n", 1338 _ep->name); 1339 return -EINVAL; 1340 } 1341 1342 if ((desc->bDescriptorType != USB_DT_ENDPOINT) 1343 || (ep->type != usb_endpoint_type(desc))) { 1344 ep_err(ep, "type mismatch\n"); 1345 return -EINVAL; 1346 } 1347 1348 if (ep->fifo_size < usb_endpoint_maxp(desc)) { 1349 ep_err(ep, "bad maxpacket\n"); 1350 return -ERANGE; 1351 } 1352 1353 udc_usb_ep->pxa_ep = ep; 1354 udc = ep->dev; 1355 1356 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) { 1357 ep_err(ep, "bogus device state\n"); 1358 return -ESHUTDOWN; 1359 } 1360 1361 ep->enabled = 1; 1362 1363 /* flush fifo (mostly for OUT buffers) */ 1364 pxa_ep_fifo_flush(_ep); 1365 1366 ep_dbg(ep, "enabled\n"); 1367 return 0; 1368 } 1369 1370 /** 1371 * pxa_ep_disable - Disable usb endpoint 1372 * @_ep: usb endpoint 1373 * 1374 * Same as for pxa_ep_enable, no physical endpoint configuration can be 1375 * changed. 1376 * Function flushes the endpoint and related requests. 1377 */ 1378 static int pxa_ep_disable(struct usb_ep *_ep) 1379 { 1380 struct pxa_ep *ep; 1381 struct udc_usb_ep *udc_usb_ep; 1382 1383 if (!_ep) 1384 return -EINVAL; 1385 1386 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1387 ep = udc_usb_ep->pxa_ep; 1388 if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) 1389 return -EINVAL; 1390 1391 ep->enabled = 0; 1392 nuke(ep, -ESHUTDOWN); 1393 1394 pxa_ep_fifo_flush(_ep); 1395 udc_usb_ep->pxa_ep = NULL; 1396 1397 ep_dbg(ep, "disabled\n"); 1398 return 0; 1399 } 1400 1401 static const struct usb_ep_ops pxa_ep_ops = { 1402 .enable = pxa_ep_enable, 1403 .disable = pxa_ep_disable, 1404 1405 .alloc_request = pxa_ep_alloc_request, 1406 .free_request = pxa_ep_free_request, 1407 1408 .queue = pxa_ep_queue, 1409 .dequeue = pxa_ep_dequeue, 1410 1411 .set_halt = pxa_ep_set_halt, 1412 .fifo_status = pxa_ep_fifo_status, 1413 .fifo_flush = pxa_ep_fifo_flush, 1414 }; 1415 1416 /** 1417 * dplus_pullup - Connect or disconnect pullup resistor to D+ pin 1418 * @udc: udc device 1419 * @on: 0 if disconnect pullup resistor, 1 otherwise 1420 * Context: any 1421 * 1422 * Handle D+ pullup resistor, make the device visible to the usb bus, and 1423 * declare it as a full speed usb device 1424 */ 1425 static void dplus_pullup(struct pxa_udc *udc, int on) 1426 { 1427 if (udc->gpiod) { 1428 gpiod_set_value(udc->gpiod, on); 1429 } else if (udc->udc_command) { 1430 if (on) 1431 udc->udc_command(PXA2XX_UDC_CMD_CONNECT); 1432 else 1433 udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 1434 } 1435 udc->pullup_on = on; 1436 } 1437 1438 /** 1439 * pxa_udc_get_frame - Returns usb frame number 1440 * @_gadget: usb gadget 1441 */ 1442 static int pxa_udc_get_frame(struct usb_gadget *_gadget) 1443 { 1444 struct pxa_udc *udc = to_gadget_udc(_gadget); 1445 1446 return (udc_readl(udc, UDCFNR) & 0x7ff); 1447 } 1448 1449 /** 1450 * pxa_udc_wakeup - Force udc device out of suspend 1451 * @_gadget: usb gadget 1452 * 1453 * Returns 0 if successful, error code otherwise 1454 */ 1455 static int pxa_udc_wakeup(struct usb_gadget *_gadget) 1456 { 1457 struct pxa_udc *udc = to_gadget_udc(_gadget); 1458 1459 /* host may not have enabled remote wakeup */ 1460 if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0) 1461 return -EHOSTUNREACH; 1462 udc_set_mask_UDCCR(udc, UDCCR_UDR); 1463 return 0; 1464 } 1465 1466 static void udc_enable(struct pxa_udc *udc); 1467 static void udc_disable(struct pxa_udc *udc); 1468 1469 /** 1470 * should_enable_udc - Tells if UDC should be enabled 1471 * @udc: udc device 1472 * Context: any 1473 * 1474 * The UDC should be enabled if : 1475 * - the pullup resistor is connected 1476 * - and a gadget driver is bound 1477 * - and vbus is sensed (or no vbus sense is available) 1478 * 1479 * Returns 1 if UDC should be enabled, 0 otherwise 1480 */ 1481 static int should_enable_udc(struct pxa_udc *udc) 1482 { 1483 int put_on; 1484 1485 put_on = ((udc->pullup_on) && (udc->driver)); 1486 put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver))); 1487 return put_on; 1488 } 1489 1490 /** 1491 * should_disable_udc - Tells if UDC should be disabled 1492 * @udc: udc device 1493 * Context: any 1494 * 1495 * The UDC should be disabled if : 1496 * - the pullup resistor is not connected 1497 * - or no gadget driver is bound 1498 * - or no vbus is sensed (when vbus sesing is available) 1499 * 1500 * Returns 1 if UDC should be disabled 1501 */ 1502 static int should_disable_udc(struct pxa_udc *udc) 1503 { 1504 int put_off; 1505 1506 put_off = ((!udc->pullup_on) || (!udc->driver)); 1507 put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver))); 1508 return put_off; 1509 } 1510 1511 /** 1512 * pxa_udc_pullup - Offer manual D+ pullup control 1513 * @_gadget: usb gadget using the control 1514 * @is_active: 0 if disconnect, else connect D+ pullup resistor 1515 * 1516 * Context: task context, might sleep 1517 * 1518 * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup 1519 */ 1520 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active) 1521 { 1522 struct pxa_udc *udc = to_gadget_udc(_gadget); 1523 1524 if (!udc->gpiod && !udc->udc_command) 1525 return -EOPNOTSUPP; 1526 1527 dplus_pullup(udc, is_active); 1528 1529 if (should_enable_udc(udc)) 1530 udc_enable(udc); 1531 if (should_disable_udc(udc)) 1532 udc_disable(udc); 1533 return 0; 1534 } 1535 1536 /** 1537 * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc 1538 * @_gadget: usb gadget 1539 * @is_active: 0 if should disable the udc, 1 if should enable 1540 * 1541 * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the 1542 * udc, and deactivates D+ pullup resistor. 1543 * 1544 * Returns 0 1545 */ 1546 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active) 1547 { 1548 struct pxa_udc *udc = to_gadget_udc(_gadget); 1549 1550 udc->vbus_sensed = is_active; 1551 if (should_enable_udc(udc)) 1552 udc_enable(udc); 1553 if (should_disable_udc(udc)) 1554 udc_disable(udc); 1555 1556 return 0; 1557 } 1558 1559 /** 1560 * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed 1561 * @_gadget: usb gadget 1562 * @mA: current drawn 1563 * 1564 * Context: task context, might sleep 1565 * 1566 * Called after a configuration was chosen by a USB host, to inform how much 1567 * current can be drawn by the device from VBus line. 1568 * 1569 * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc 1570 */ 1571 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1572 { 1573 struct pxa_udc *udc; 1574 1575 udc = to_gadget_udc(_gadget); 1576 if (!IS_ERR_OR_NULL(udc->transceiver)) 1577 return usb_phy_set_power(udc->transceiver, mA); 1578 return -EOPNOTSUPP; 1579 } 1580 1581 /** 1582 * pxa_udc_phy_event - Called by phy upon VBus event 1583 * @nb: notifier block 1584 * @action: phy action, is vbus connect or disconnect 1585 * @data: the usb_gadget structure in pxa_udc 1586 * 1587 * Called by the USB Phy when a cable connect or disconnect is sensed. 1588 * 1589 * Returns 0 1590 */ 1591 static int pxa_udc_phy_event(struct notifier_block *nb, unsigned long action, 1592 void *data) 1593 { 1594 struct usb_gadget *gadget = data; 1595 1596 switch (action) { 1597 case USB_EVENT_VBUS: 1598 usb_gadget_vbus_connect(gadget); 1599 return NOTIFY_OK; 1600 case USB_EVENT_NONE: 1601 usb_gadget_vbus_disconnect(gadget); 1602 return NOTIFY_OK; 1603 default: 1604 return NOTIFY_DONE; 1605 } 1606 } 1607 1608 static struct notifier_block pxa27x_udc_phy = { 1609 .notifier_call = pxa_udc_phy_event, 1610 }; 1611 1612 static int pxa27x_udc_start(struct usb_gadget *g, 1613 struct usb_gadget_driver *driver); 1614 static int pxa27x_udc_stop(struct usb_gadget *g); 1615 1616 static const struct usb_gadget_ops pxa_udc_ops = { 1617 .get_frame = pxa_udc_get_frame, 1618 .wakeup = pxa_udc_wakeup, 1619 .pullup = pxa_udc_pullup, 1620 .vbus_session = pxa_udc_vbus_session, 1621 .vbus_draw = pxa_udc_vbus_draw, 1622 .udc_start = pxa27x_udc_start, 1623 .udc_stop = pxa27x_udc_stop, 1624 }; 1625 1626 /** 1627 * udc_disable - disable udc device controller 1628 * @udc: udc device 1629 * Context: any 1630 * 1631 * Disables the udc device : disables clocks, udc interrupts, control endpoint 1632 * interrupts. 1633 */ 1634 static void udc_disable(struct pxa_udc *udc) 1635 { 1636 if (!udc->enabled) 1637 return; 1638 1639 udc_writel(udc, UDCICR0, 0); 1640 udc_writel(udc, UDCICR1, 0); 1641 1642 udc_clear_mask_UDCCR(udc, UDCCR_UDE); 1643 1644 ep0_idle(udc); 1645 udc->gadget.speed = USB_SPEED_UNKNOWN; 1646 clk_disable(udc->clk); 1647 1648 udc->enabled = 0; 1649 } 1650 1651 /** 1652 * udc_init_data - Initialize udc device data structures 1653 * @dev: udc device 1654 * 1655 * Initializes gadget endpoint list, endpoints locks. No action is taken 1656 * on the hardware. 1657 */ 1658 static void udc_init_data(struct pxa_udc *dev) 1659 { 1660 int i; 1661 struct pxa_ep *ep; 1662 1663 /* device/ep0 records init */ 1664 INIT_LIST_HEAD(&dev->gadget.ep_list); 1665 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1666 dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0]; 1667 dev->gadget.quirk_altset_not_supp = 1; 1668 ep0_idle(dev); 1669 1670 /* PXA endpoints init */ 1671 for (i = 0; i < NR_PXA_ENDPOINTS; i++) { 1672 ep = &dev->pxa_ep[i]; 1673 1674 ep->enabled = is_ep0(ep); 1675 INIT_LIST_HEAD(&ep->queue); 1676 spin_lock_init(&ep->lock); 1677 } 1678 1679 /* USB endpoints init */ 1680 for (i = 1; i < NR_USB_ENDPOINTS; i++) { 1681 list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list, 1682 &dev->gadget.ep_list); 1683 usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep, 1684 dev->udc_usb_ep[i].usb_ep.maxpacket); 1685 } 1686 } 1687 1688 /** 1689 * udc_enable - Enables the udc device 1690 * @udc: udc device 1691 * 1692 * Enables the udc device : enables clocks, udc interrupts, control endpoint 1693 * interrupts, sets usb as UDC client and setups endpoints. 1694 */ 1695 static void udc_enable(struct pxa_udc *udc) 1696 { 1697 if (udc->enabled) 1698 return; 1699 1700 clk_enable(udc->clk); 1701 udc_writel(udc, UDCICR0, 0); 1702 udc_writel(udc, UDCICR1, 0); 1703 udc_clear_mask_UDCCR(udc, UDCCR_UDE); 1704 1705 ep0_idle(udc); 1706 udc->gadget.speed = USB_SPEED_FULL; 1707 memset(&udc->stats, 0, sizeof(udc->stats)); 1708 1709 pxa_eps_setup(udc); 1710 udc_set_mask_UDCCR(udc, UDCCR_UDE); 1711 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM); 1712 udelay(2); 1713 if (udc_readl(udc, UDCCR) & UDCCR_EMCE) 1714 dev_err(udc->dev, "Configuration errors, udc disabled\n"); 1715 1716 /* 1717 * Caller must be able to sleep in order to cope with startup transients 1718 */ 1719 msleep(100); 1720 1721 /* enable suspend/resume and reset irqs */ 1722 udc_writel(udc, UDCICR1, 1723 UDCICR1_IECC | UDCICR1_IERU 1724 | UDCICR1_IESU | UDCICR1_IERS); 1725 1726 /* enable ep0 irqs */ 1727 pio_irq_enable(&udc->pxa_ep[0]); 1728 1729 udc->enabled = 1; 1730 } 1731 1732 /** 1733 * pxa27x_start - Register gadget driver 1734 * @g: gadget 1735 * @driver: gadget driver 1736 * 1737 * When a driver is successfully registered, it will receive control requests 1738 * including set_configuration(), which enables non-control requests. Then 1739 * usb traffic follows until a disconnect is reported. Then a host may connect 1740 * again, or the driver might get unbound. 1741 * 1742 * Note that the udc is not automatically enabled. Check function 1743 * should_enable_udc(). 1744 * 1745 * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise 1746 */ 1747 static int pxa27x_udc_start(struct usb_gadget *g, 1748 struct usb_gadget_driver *driver) 1749 { 1750 struct pxa_udc *udc = to_pxa(g); 1751 int retval; 1752 1753 /* first hook up the driver ... */ 1754 udc->driver = driver; 1755 1756 if (!IS_ERR_OR_NULL(udc->transceiver)) { 1757 retval = otg_set_peripheral(udc->transceiver->otg, 1758 &udc->gadget); 1759 if (retval) { 1760 dev_err(udc->dev, "can't bind to transceiver\n"); 1761 goto fail; 1762 } 1763 } 1764 1765 if (should_enable_udc(udc)) 1766 udc_enable(udc); 1767 return 0; 1768 1769 fail: 1770 udc->driver = NULL; 1771 return retval; 1772 } 1773 1774 /** 1775 * stop_activity - Stops udc endpoints 1776 * @udc: udc device 1777 * 1778 * Disables all udc endpoints (even control endpoint), report disconnect to 1779 * the gadget user. 1780 */ 1781 static void stop_activity(struct pxa_udc *udc) 1782 { 1783 int i; 1784 1785 udc->gadget.speed = USB_SPEED_UNKNOWN; 1786 1787 for (i = 0; i < NR_USB_ENDPOINTS; i++) 1788 pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep); 1789 } 1790 1791 /** 1792 * pxa27x_udc_stop - Unregister the gadget driver 1793 * @g: gadget 1794 * 1795 * Returns 0 if no error, -ENODEV, -EINVAL otherwise 1796 */ 1797 static int pxa27x_udc_stop(struct usb_gadget *g) 1798 { 1799 struct pxa_udc *udc = to_pxa(g); 1800 1801 stop_activity(udc); 1802 udc_disable(udc); 1803 1804 udc->driver = NULL; 1805 1806 if (!IS_ERR_OR_NULL(udc->transceiver)) 1807 return otg_set_peripheral(udc->transceiver->otg, NULL); 1808 return 0; 1809 } 1810 1811 /** 1812 * handle_ep0_ctrl_req - handle control endpoint control request 1813 * @udc: udc device 1814 * @req: control request 1815 */ 1816 static void handle_ep0_ctrl_req(struct pxa_udc *udc, 1817 struct pxa27x_request *req) 1818 { 1819 struct pxa_ep *ep = &udc->pxa_ep[0]; 1820 union { 1821 struct usb_ctrlrequest r; 1822 u32 word[2]; 1823 } u; 1824 int i; 1825 int have_extrabytes = 0; 1826 unsigned long flags; 1827 1828 nuke(ep, -EPROTO); 1829 spin_lock_irqsave(&ep->lock, flags); 1830 1831 /* 1832 * In the PXA320 manual, in the section about Back-to-Back setup 1833 * packets, it describes this situation. The solution is to set OPC to 1834 * get rid of the status packet, and then continue with the setup 1835 * packet. Generalize to pxa27x CPUs. 1836 */ 1837 if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0)) 1838 ep_write_UDCCSR(ep, UDCCSR0_OPC); 1839 1840 /* read SETUP packet */ 1841 for (i = 0; i < 2; i++) { 1842 if (unlikely(ep_is_empty(ep))) 1843 goto stall; 1844 u.word[i] = udc_ep_readl(ep, UDCDR); 1845 } 1846 1847 have_extrabytes = !ep_is_empty(ep); 1848 while (!ep_is_empty(ep)) { 1849 i = udc_ep_readl(ep, UDCDR); 1850 ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i); 1851 } 1852 1853 ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1854 u.r.bRequestType, u.r.bRequest, 1855 le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex), 1856 le16_to_cpu(u.r.wLength)); 1857 if (unlikely(have_extrabytes)) 1858 goto stall; 1859 1860 if (u.r.bRequestType & USB_DIR_IN) 1861 set_ep0state(udc, IN_DATA_STAGE); 1862 else 1863 set_ep0state(udc, OUT_DATA_STAGE); 1864 1865 /* Tell UDC to enter Data Stage */ 1866 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); 1867 1868 spin_unlock_irqrestore(&ep->lock, flags); 1869 i = udc->driver->setup(&udc->gadget, &u.r); 1870 spin_lock_irqsave(&ep->lock, flags); 1871 if (i < 0) 1872 goto stall; 1873 out: 1874 spin_unlock_irqrestore(&ep->lock, flags); 1875 return; 1876 stall: 1877 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", 1878 udc_ep_readl(ep, UDCCSR), i); 1879 ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF); 1880 set_ep0state(udc, STALL); 1881 goto out; 1882 } 1883 1884 /** 1885 * handle_ep0 - Handle control endpoint data transfers 1886 * @udc: udc device 1887 * @fifo_irq: 1 if triggered by fifo service type irq 1888 * @opc_irq: 1 if triggered by output packet complete type irq 1889 * 1890 * Context : interrupt handler 1891 * 1892 * Tries to transfer all pending request data into the endpoint and/or 1893 * transfer all pending data in the endpoint into usb requests. 1894 * Handles states of ep0 automata. 1895 * 1896 * PXA27x hardware handles several standard usb control requests without 1897 * driver notification. The requests fully handled by hardware are : 1898 * SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE, 1899 * GET_STATUS 1900 * The requests handled by hardware, but with irq notification are : 1901 * SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE 1902 * The remaining standard requests really handled by handle_ep0 are : 1903 * GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests. 1904 * Requests standardized outside of USB 2.0 chapter 9 are handled more 1905 * uniformly, by gadget drivers. 1906 * 1907 * The control endpoint state machine is _not_ USB spec compliant, it's even 1908 * hardly compliant with Intel PXA270 developers guide. 1909 * The key points which inferred this state machine are : 1910 * - on every setup token, bit UDCCSR0_SA is raised and held until cleared by 1911 * software. 1912 * - on every OUT packet received, UDCCSR0_OPC is raised and held until 1913 * cleared by software. 1914 * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it 1915 * before reading ep0. 1916 * This is true only for PXA27x. This is not true anymore for PXA3xx family 1917 * (check Back-to-Back setup packet in developers guide). 1918 * - irq can be called on a "packet complete" event (opc_irq=1), while 1919 * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms 1920 * from experimentation). 1921 * - as UDCCSR0_SA can be activated while in irq handling, and clearing 1922 * UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC 1923 * => we never actually read the "status stage" packet of an IN data stage 1924 * => this is not documented in Intel documentation 1925 * - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA 1926 * STAGE. The driver add STATUS STAGE to send last zero length packet in 1927 * OUT_STATUS_STAGE. 1928 * - special attention was needed for IN_STATUS_STAGE. If a packet complete 1929 * event is detected, we terminate the status stage without ackowledging the 1930 * packet (not to risk to loose a potential SETUP packet) 1931 */ 1932 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) 1933 { 1934 u32 udccsr0; 1935 struct pxa_ep *ep = &udc->pxa_ep[0]; 1936 struct pxa27x_request *req = NULL; 1937 int completed = 0; 1938 1939 if (!list_empty(&ep->queue)) 1940 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 1941 1942 udccsr0 = udc_ep_readl(ep, UDCCSR); 1943 ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n", 1944 EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR), 1945 (fifo_irq << 1 | opc_irq)); 1946 1947 if (udccsr0 & UDCCSR0_SST) { 1948 ep_dbg(ep, "clearing stall status\n"); 1949 nuke(ep, -EPIPE); 1950 ep_write_UDCCSR(ep, UDCCSR0_SST); 1951 ep0_idle(udc); 1952 } 1953 1954 if (udccsr0 & UDCCSR0_SA) { 1955 nuke(ep, 0); 1956 set_ep0state(udc, SETUP_STAGE); 1957 } 1958 1959 switch (udc->ep0state) { 1960 case WAIT_FOR_SETUP: 1961 /* 1962 * Hardware bug : beware, we cannot clear OPC, since we would 1963 * miss a potential OPC irq for a setup packet. 1964 * So, we only do ... nothing, and hope for a next irq with 1965 * UDCCSR0_SA set. 1966 */ 1967 break; 1968 case SETUP_STAGE: 1969 udccsr0 &= UDCCSR0_CTRL_REQ_MASK; 1970 if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK)) 1971 handle_ep0_ctrl_req(udc, req); 1972 break; 1973 case IN_DATA_STAGE: /* GET_DESCRIPTOR */ 1974 if (epout_has_pkt(ep)) 1975 ep_write_UDCCSR(ep, UDCCSR0_OPC); 1976 if (req && !ep_is_full(ep)) 1977 completed = write_ep0_fifo(ep, req); 1978 if (completed) 1979 ep0_end_in_req(ep, req, NULL); 1980 break; 1981 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ 1982 if (epout_has_pkt(ep) && req) 1983 completed = read_ep0_fifo(ep, req); 1984 if (completed) 1985 ep0_end_out_req(ep, req, NULL); 1986 break; 1987 case STALL: 1988 ep_write_UDCCSR(ep, UDCCSR0_FST); 1989 break; 1990 case IN_STATUS_STAGE: 1991 /* 1992 * Hardware bug : beware, we cannot clear OPC, since we would 1993 * miss a potential PC irq for a setup packet. 1994 * So, we only put the ep0 into WAIT_FOR_SETUP state. 1995 */ 1996 if (opc_irq) 1997 ep0_idle(udc); 1998 break; 1999 case OUT_STATUS_STAGE: 2000 case WAIT_ACK_SET_CONF_INTERF: 2001 ep_warn(ep, "should never get in %s state here!!!\n", 2002 EP0_STNAME(ep->dev)); 2003 ep0_idle(udc); 2004 break; 2005 } 2006 } 2007 2008 /** 2009 * handle_ep - Handle endpoint data tranfers 2010 * @ep: pxa physical endpoint 2011 * 2012 * Tries to transfer all pending request data into the endpoint and/or 2013 * transfer all pending data in the endpoint into usb requests. 2014 * 2015 * Is always called from the interrupt handler. ep->lock must not be held. 2016 */ 2017 static void handle_ep(struct pxa_ep *ep) 2018 { 2019 struct pxa27x_request *req; 2020 int completed; 2021 u32 udccsr; 2022 int is_in = ep->dir_in; 2023 int loop = 0; 2024 unsigned long flags; 2025 2026 spin_lock_irqsave(&ep->lock, flags); 2027 if (ep->in_handle_ep) 2028 goto recursion_detected; 2029 ep->in_handle_ep = 1; 2030 2031 do { 2032 completed = 0; 2033 udccsr = udc_ep_readl(ep, UDCCSR); 2034 2035 if (likely(!list_empty(&ep->queue))) 2036 req = list_entry(ep->queue.next, 2037 struct pxa27x_request, queue); 2038 else 2039 req = NULL; 2040 2041 ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n", 2042 req, udccsr, loop++); 2043 2044 if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN))) 2045 udc_ep_writel(ep, UDCCSR, 2046 udccsr & (UDCCSR_SST | UDCCSR_TRN)); 2047 if (!req) 2048 break; 2049 2050 if (unlikely(is_in)) { 2051 if (likely(!ep_is_full(ep))) 2052 completed = write_fifo(ep, req); 2053 } else { 2054 if (likely(epout_has_pkt(ep))) 2055 completed = read_fifo(ep, req); 2056 } 2057 2058 if (completed) { 2059 if (is_in) 2060 ep_end_in_req(ep, req, &flags); 2061 else 2062 ep_end_out_req(ep, req, &flags); 2063 } 2064 } while (completed); 2065 2066 ep->in_handle_ep = 0; 2067 recursion_detected: 2068 spin_unlock_irqrestore(&ep->lock, flags); 2069 } 2070 2071 /** 2072 * pxa27x_change_configuration - Handle SET_CONF usb request notification 2073 * @udc: udc device 2074 * @config: usb configuration 2075 * 2076 * Post the request to upper level. 2077 * Don't use any pxa specific harware configuration capabilities 2078 */ 2079 static void pxa27x_change_configuration(struct pxa_udc *udc, int config) 2080 { 2081 struct usb_ctrlrequest req ; 2082 2083 dev_dbg(udc->dev, "config=%d\n", config); 2084 2085 udc->config = config; 2086 udc->last_interface = 0; 2087 udc->last_alternate = 0; 2088 2089 req.bRequestType = 0; 2090 req.bRequest = USB_REQ_SET_CONFIGURATION; 2091 req.wValue = config; 2092 req.wIndex = 0; 2093 req.wLength = 0; 2094 2095 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2096 udc->driver->setup(&udc->gadget, &req); 2097 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN); 2098 } 2099 2100 /** 2101 * pxa27x_change_interface - Handle SET_INTERF usb request notification 2102 * @udc: udc device 2103 * @iface: interface number 2104 * @alt: alternate setting number 2105 * 2106 * Post the request to upper level. 2107 * Don't use any pxa specific harware configuration capabilities 2108 */ 2109 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt) 2110 { 2111 struct usb_ctrlrequest req; 2112 2113 dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt); 2114 2115 udc->last_interface = iface; 2116 udc->last_alternate = alt; 2117 2118 req.bRequestType = USB_RECIP_INTERFACE; 2119 req.bRequest = USB_REQ_SET_INTERFACE; 2120 req.wValue = alt; 2121 req.wIndex = iface; 2122 req.wLength = 0; 2123 2124 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2125 udc->driver->setup(&udc->gadget, &req); 2126 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN); 2127 } 2128 2129 /* 2130 * irq_handle_data - Handle data transfer 2131 * @irq: irq IRQ number 2132 * @udc: dev pxa_udc device structure 2133 * 2134 * Called from irq handler, transferts data to or from endpoint to queue 2135 */ 2136 static void irq_handle_data(int irq, struct pxa_udc *udc) 2137 { 2138 int i; 2139 struct pxa_ep *ep; 2140 u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK; 2141 u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK; 2142 2143 if (udcisr0 & UDCISR_INT_MASK) { 2144 udc->pxa_ep[0].stats.irqs++; 2145 udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK)); 2146 handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR), 2147 !!(udcisr0 & UDCICR_PKTCOMPL)); 2148 } 2149 2150 udcisr0 >>= 2; 2151 for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) { 2152 if (!(udcisr0 & UDCISR_INT_MASK)) 2153 continue; 2154 2155 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); 2156 2157 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); 2158 if (i < ARRAY_SIZE(udc->pxa_ep)) { 2159 ep = &udc->pxa_ep[i]; 2160 ep->stats.irqs++; 2161 handle_ep(ep); 2162 } 2163 } 2164 2165 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { 2166 udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK)); 2167 if (!(udcisr1 & UDCISR_INT_MASK)) 2168 continue; 2169 2170 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); 2171 if (i < ARRAY_SIZE(udc->pxa_ep)) { 2172 ep = &udc->pxa_ep[i]; 2173 ep->stats.irqs++; 2174 handle_ep(ep); 2175 } 2176 } 2177 2178 } 2179 2180 /** 2181 * irq_udc_suspend - Handle IRQ "UDC Suspend" 2182 * @udc: udc device 2183 */ 2184 static void irq_udc_suspend(struct pxa_udc *udc) 2185 { 2186 udc_writel(udc, UDCISR1, UDCISR1_IRSU); 2187 udc->stats.irqs_suspend++; 2188 2189 if (udc->gadget.speed != USB_SPEED_UNKNOWN 2190 && udc->driver && udc->driver->suspend) 2191 udc->driver->suspend(&udc->gadget); 2192 ep0_idle(udc); 2193 } 2194 2195 /** 2196 * irq_udc_resume - Handle IRQ "UDC Resume" 2197 * @udc: udc device 2198 */ 2199 static void irq_udc_resume(struct pxa_udc *udc) 2200 { 2201 udc_writel(udc, UDCISR1, UDCISR1_IRRU); 2202 udc->stats.irqs_resume++; 2203 2204 if (udc->gadget.speed != USB_SPEED_UNKNOWN 2205 && udc->driver && udc->driver->resume) 2206 udc->driver->resume(&udc->gadget); 2207 } 2208 2209 /** 2210 * irq_udc_reconfig - Handle IRQ "UDC Change Configuration" 2211 * @udc: udc device 2212 */ 2213 static void irq_udc_reconfig(struct pxa_udc *udc) 2214 { 2215 unsigned config, interface, alternate, config_change; 2216 u32 udccr = udc_readl(udc, UDCCR); 2217 2218 udc_writel(udc, UDCISR1, UDCISR1_IRCC); 2219 udc->stats.irqs_reconfig++; 2220 2221 config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S; 2222 config_change = (config != udc->config); 2223 pxa27x_change_configuration(udc, config); 2224 2225 interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S; 2226 alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S; 2227 pxa27x_change_interface(udc, interface, alternate); 2228 2229 if (config_change) 2230 update_pxa_ep_matches(udc); 2231 udc_set_mask_UDCCR(udc, UDCCR_SMAC); 2232 } 2233 2234 /** 2235 * irq_udc_reset - Handle IRQ "UDC Reset" 2236 * @udc: udc device 2237 */ 2238 static void irq_udc_reset(struct pxa_udc *udc) 2239 { 2240 u32 udccr = udc_readl(udc, UDCCR); 2241 struct pxa_ep *ep = &udc->pxa_ep[0]; 2242 2243 dev_info(udc->dev, "USB reset\n"); 2244 udc_writel(udc, UDCISR1, UDCISR1_IRRS); 2245 udc->stats.irqs_reset++; 2246 2247 if ((udccr & UDCCR_UDA) == 0) { 2248 dev_dbg(udc->dev, "USB reset start\n"); 2249 stop_activity(udc); 2250 } 2251 udc->gadget.speed = USB_SPEED_FULL; 2252 memset(&udc->stats, 0, sizeof udc->stats); 2253 2254 nuke(ep, -EPROTO); 2255 ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC); 2256 ep0_idle(udc); 2257 } 2258 2259 /** 2260 * pxa_udc_irq - Main irq handler 2261 * @irq: irq number 2262 * @_dev: udc device 2263 * 2264 * Handles all udc interrupts 2265 */ 2266 static irqreturn_t pxa_udc_irq(int irq, void *_dev) 2267 { 2268 struct pxa_udc *udc = _dev; 2269 u32 udcisr0 = udc_readl(udc, UDCISR0); 2270 u32 udcisr1 = udc_readl(udc, UDCISR1); 2271 u32 udccr = udc_readl(udc, UDCCR); 2272 u32 udcisr1_spec; 2273 2274 dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, " 2275 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr); 2276 2277 udcisr1_spec = udcisr1 & 0xf8000000; 2278 if (unlikely(udcisr1_spec & UDCISR1_IRSU)) 2279 irq_udc_suspend(udc); 2280 if (unlikely(udcisr1_spec & UDCISR1_IRRU)) 2281 irq_udc_resume(udc); 2282 if (unlikely(udcisr1_spec & UDCISR1_IRCC)) 2283 irq_udc_reconfig(udc); 2284 if (unlikely(udcisr1_spec & UDCISR1_IRRS)) 2285 irq_udc_reset(udc); 2286 2287 if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK)) 2288 irq_handle_data(irq, udc); 2289 2290 return IRQ_HANDLED; 2291 } 2292 2293 static struct pxa_udc memory = { 2294 .gadget = { 2295 .ops = &pxa_udc_ops, 2296 .ep0 = &memory.udc_usb_ep[0].usb_ep, 2297 .name = driver_name, 2298 .dev = { 2299 .init_name = "gadget", 2300 }, 2301 }, 2302 2303 .udc_usb_ep = { 2304 USB_EP_CTRL, 2305 USB_EP_OUT_BULK(1), 2306 USB_EP_IN_BULK(2), 2307 USB_EP_IN_ISO(3), 2308 USB_EP_OUT_ISO(4), 2309 USB_EP_IN_INT(5), 2310 }, 2311 2312 .pxa_ep = { 2313 PXA_EP_CTRL, 2314 /* Endpoints for gadget zero */ 2315 PXA_EP_OUT_BULK(1, 1, 3, 0, 0), 2316 PXA_EP_IN_BULK(2, 2, 3, 0, 0), 2317 /* Endpoints for ether gadget, file storage gadget */ 2318 PXA_EP_OUT_BULK(3, 1, 1, 0, 0), 2319 PXA_EP_IN_BULK(4, 2, 1, 0, 0), 2320 PXA_EP_IN_ISO(5, 3, 1, 0, 0), 2321 PXA_EP_OUT_ISO(6, 4, 1, 0, 0), 2322 PXA_EP_IN_INT(7, 5, 1, 0, 0), 2323 /* Endpoints for RNDIS, serial */ 2324 PXA_EP_OUT_BULK(8, 1, 2, 0, 0), 2325 PXA_EP_IN_BULK(9, 2, 2, 0, 0), 2326 PXA_EP_IN_INT(10, 5, 2, 0, 0), 2327 /* 2328 * All the following endpoints are only for completion. They 2329 * won't never work, as multiple interfaces are really broken on 2330 * the pxa. 2331 */ 2332 PXA_EP_OUT_BULK(11, 1, 2, 1, 0), 2333 PXA_EP_IN_BULK(12, 2, 2, 1, 0), 2334 /* Endpoint for CDC Ether */ 2335 PXA_EP_OUT_BULK(13, 1, 1, 1, 1), 2336 PXA_EP_IN_BULK(14, 2, 1, 1, 1), 2337 } 2338 }; 2339 2340 #if defined(CONFIG_OF) 2341 static const struct of_device_id udc_pxa_dt_ids[] = { 2342 { .compatible = "marvell,pxa270-udc" }, 2343 {} 2344 }; 2345 MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids); 2346 #endif 2347 2348 /** 2349 * pxa_udc_probe - probes the udc device 2350 * @pdev: platform device 2351 * 2352 * Perform basic init : allocates udc clock, creates sysfs files, requests 2353 * irq. 2354 */ 2355 static int pxa_udc_probe(struct platform_device *pdev) 2356 { 2357 struct pxa_udc *udc = &memory; 2358 int retval = 0, gpio; 2359 struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev); 2360 unsigned long gpio_flags; 2361 2362 if (mach) { 2363 gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0; 2364 gpio = mach->gpio_pullup; 2365 if (gpio_is_valid(gpio)) { 2366 retval = devm_gpio_request_one(&pdev->dev, gpio, 2367 gpio_flags, 2368 "USB D+ pullup"); 2369 if (retval) 2370 return retval; 2371 udc->gpiod = gpio_to_desc(mach->gpio_pullup); 2372 } 2373 udc->udc_command = mach->udc_command; 2374 } else { 2375 udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS); 2376 } 2377 2378 udc->regs = devm_platform_ioremap_resource(pdev, 0); 2379 if (IS_ERR(udc->regs)) 2380 return PTR_ERR(udc->regs); 2381 udc->irq = platform_get_irq(pdev, 0); 2382 if (udc->irq < 0) 2383 return udc->irq; 2384 2385 udc->dev = &pdev->dev; 2386 if (of_have_populated_dt()) { 2387 udc->transceiver = 2388 devm_usb_get_phy_by_phandle(udc->dev, "phys", 0); 2389 if (IS_ERR(udc->transceiver)) 2390 return PTR_ERR(udc->transceiver); 2391 } else { 2392 udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); 2393 } 2394 2395 if (IS_ERR(udc->gpiod)) { 2396 dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n", 2397 PTR_ERR(udc->gpiod)); 2398 return PTR_ERR(udc->gpiod); 2399 } 2400 if (udc->gpiod) 2401 gpiod_direction_output(udc->gpiod, 0); 2402 2403 udc->clk = devm_clk_get(&pdev->dev, NULL); 2404 if (IS_ERR(udc->clk)) 2405 return PTR_ERR(udc->clk); 2406 2407 retval = clk_prepare(udc->clk); 2408 if (retval) 2409 return retval; 2410 2411 udc->vbus_sensed = 0; 2412 2413 the_controller = udc; 2414 platform_set_drvdata(pdev, udc); 2415 udc_init_data(udc); 2416 2417 /* irq setup after old hardware state is cleaned up */ 2418 retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq, 2419 IRQF_SHARED, driver_name, udc); 2420 if (retval != 0) { 2421 dev_err(udc->dev, "%s: can't get irq %i, err %d\n", 2422 driver_name, udc->irq, retval); 2423 goto err; 2424 } 2425 2426 if (!IS_ERR_OR_NULL(udc->transceiver)) 2427 usb_register_notifier(udc->transceiver, &pxa27x_udc_phy); 2428 retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 2429 if (retval) 2430 goto err_add_gadget; 2431 2432 pxa_init_debugfs(udc); 2433 if (should_enable_udc(udc)) 2434 udc_enable(udc); 2435 return 0; 2436 2437 err_add_gadget: 2438 if (!IS_ERR_OR_NULL(udc->transceiver)) 2439 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); 2440 err: 2441 clk_unprepare(udc->clk); 2442 return retval; 2443 } 2444 2445 /** 2446 * pxa_udc_remove - removes the udc device driver 2447 * @_dev: platform device 2448 */ 2449 static int pxa_udc_remove(struct platform_device *_dev) 2450 { 2451 struct pxa_udc *udc = platform_get_drvdata(_dev); 2452 2453 usb_del_gadget_udc(&udc->gadget); 2454 pxa_cleanup_debugfs(udc); 2455 2456 if (!IS_ERR_OR_NULL(udc->transceiver)) { 2457 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); 2458 usb_put_phy(udc->transceiver); 2459 } 2460 2461 udc->transceiver = NULL; 2462 the_controller = NULL; 2463 clk_unprepare(udc->clk); 2464 2465 return 0; 2466 } 2467 2468 static void pxa_udc_shutdown(struct platform_device *_dev) 2469 { 2470 struct pxa_udc *udc = platform_get_drvdata(_dev); 2471 2472 if (udc_readl(udc, UDCCR) & UDCCR_UDE) 2473 udc_disable(udc); 2474 } 2475 2476 #ifdef CONFIG_PXA27x 2477 extern void pxa27x_clear_otgph(void); 2478 #else 2479 #define pxa27x_clear_otgph() do {} while (0) 2480 #endif 2481 2482 #ifdef CONFIG_PM 2483 /** 2484 * pxa_udc_suspend - Suspend udc device 2485 * @_dev: platform device 2486 * @state: suspend state 2487 * 2488 * Suspends udc : saves configuration registers (UDCCR*), then disables the udc 2489 * device. 2490 */ 2491 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state) 2492 { 2493 struct pxa_udc *udc = platform_get_drvdata(_dev); 2494 struct pxa_ep *ep; 2495 2496 ep = &udc->pxa_ep[0]; 2497 udc->udccsr0 = udc_ep_readl(ep, UDCCSR); 2498 2499 udc_disable(udc); 2500 udc->pullup_resume = udc->pullup_on; 2501 dplus_pullup(udc, 0); 2502 2503 if (udc->driver) 2504 udc->driver->disconnect(&udc->gadget); 2505 2506 return 0; 2507 } 2508 2509 /** 2510 * pxa_udc_resume - Resume udc device 2511 * @_dev: platform device 2512 * 2513 * Resumes udc : restores configuration registers (UDCCR*), then enables the udc 2514 * device. 2515 */ 2516 static int pxa_udc_resume(struct platform_device *_dev) 2517 { 2518 struct pxa_udc *udc = platform_get_drvdata(_dev); 2519 struct pxa_ep *ep; 2520 2521 ep = &udc->pxa_ep[0]; 2522 udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME)); 2523 2524 dplus_pullup(udc, udc->pullup_resume); 2525 if (should_enable_udc(udc)) 2526 udc_enable(udc); 2527 /* 2528 * We do not handle OTG yet. 2529 * 2530 * OTGPH bit is set when sleep mode is entered. 2531 * it indicates that OTG pad is retaining its state. 2532 * Upon exit from sleep mode and before clearing OTGPH, 2533 * Software must configure the USB OTG pad, UDC, and UHC 2534 * to the state they were in before entering sleep mode. 2535 */ 2536 pxa27x_clear_otgph(); 2537 2538 return 0; 2539 } 2540 #endif 2541 2542 /* work with hotplug and coldplug */ 2543 MODULE_ALIAS("platform:pxa27x-udc"); 2544 2545 static struct platform_driver udc_driver = { 2546 .driver = { 2547 .name = "pxa27x-udc", 2548 .of_match_table = of_match_ptr(udc_pxa_dt_ids), 2549 }, 2550 .probe = pxa_udc_probe, 2551 .remove = pxa_udc_remove, 2552 .shutdown = pxa_udc_shutdown, 2553 #ifdef CONFIG_PM 2554 .suspend = pxa_udc_suspend, 2555 .resume = pxa_udc_resume 2556 #endif 2557 }; 2558 2559 module_platform_driver(udc_driver); 2560 2561 MODULE_DESCRIPTION(DRIVER_DESC); 2562 MODULE_AUTHOR("Robert Jarzmik"); 2563 MODULE_LICENSE("GPL"); 2564