1 /* 2 * USB Gadget driver for LPC32xx 3 * 4 * Authors: 5 * Kevin Wells <kevin.wells@nxp.com> 6 * Mike James 7 * Roland Stigge <stigge@antcom.de> 8 * 9 * Copyright (C) 2006 Philips Semiconductors 10 * Copyright (C) 2009 NXP Semiconductors 11 * Copyright (C) 2012 Roland Stigge 12 * 13 * Note: This driver is based on original work done by Mike James for 14 * the LPC3180. 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License as published by 18 * the Free Software Foundation; either version 2 of the License, or 19 * (at your option) any later version. 20 * 21 * This program is distributed in the hope that it will be useful, 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 * GNU General Public License for more details. 25 * 26 * You should have received a copy of the GNU General Public License 27 * along with this program; if not, write to the Free Software 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/platform_device.h> 34 #include <linux/delay.h> 35 #include <linux/ioport.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/init.h> 39 #include <linux/list.h> 40 #include <linux/interrupt.h> 41 #include <linux/proc_fs.h> 42 #include <linux/clk.h> 43 #include <linux/usb/ch9.h> 44 #include <linux/usb/gadget.h> 45 #include <linux/i2c.h> 46 #include <linux/kthread.h> 47 #include <linux/freezer.h> 48 #include <linux/dma-mapping.h> 49 #include <linux/dmapool.h> 50 #include <linux/workqueue.h> 51 #include <linux/of.h> 52 #include <linux/usb/isp1301.h> 53 54 #include <asm/byteorder.h> 55 #include <mach/hardware.h> 56 #include <linux/io.h> 57 #include <asm/irq.h> 58 59 #include <mach/platform.h> 60 #include <mach/irqs.h> 61 #include <mach/board.h> 62 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 63 #include <linux/debugfs.h> 64 #include <linux/seq_file.h> 65 #endif 66 67 /* 68 * USB device configuration structure 69 */ 70 typedef void (*usc_chg_event)(int); 71 struct lpc32xx_usbd_cfg { 72 int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */ 73 usc_chg_event conn_chgb; /* Connection change event (optional) */ 74 usc_chg_event susp_chgb; /* Suspend/resume event (optional) */ 75 usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */ 76 }; 77 78 /* 79 * controller driver data structures 80 */ 81 82 /* 16 endpoints (not to be confused with 32 hardware endpoints) */ 83 #define NUM_ENDPOINTS 16 84 85 /* 86 * IRQ indices make reading the code a little easier 87 */ 88 #define IRQ_USB_LP 0 89 #define IRQ_USB_HP 1 90 #define IRQ_USB_DEVDMA 2 91 #define IRQ_USB_ATX 3 92 93 #define EP_OUT 0 /* RX (from host) */ 94 #define EP_IN 1 /* TX (to host) */ 95 96 /* Returns the interrupt mask for the selected hardware endpoint */ 97 #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir)) 98 99 #define EP_INT_TYPE 0 100 #define EP_ISO_TYPE 1 101 #define EP_BLK_TYPE 2 102 #define EP_CTL_TYPE 3 103 104 /* EP0 states */ 105 #define WAIT_FOR_SETUP 0 /* Wait for setup packet */ 106 #define DATA_IN 1 /* Expect dev->host transfer */ 107 #define DATA_OUT 2 /* Expect host->dev transfer */ 108 109 /* DD (DMA Descriptor) structure, requires word alignment, this is already 110 * defined in the LPC32XX USB device header file, but this version is slightly 111 * modified to tag some work data with each DMA descriptor. */ 112 struct lpc32xx_usbd_dd_gad { 113 u32 dd_next_phy; 114 u32 dd_setup; 115 u32 dd_buffer_addr; 116 u32 dd_status; 117 u32 dd_iso_ps_mem_addr; 118 u32 this_dma; 119 u32 iso_status[6]; /* 5 spare */ 120 u32 dd_next_v; 121 }; 122 123 /* 124 * Logical endpoint structure 125 */ 126 struct lpc32xx_ep { 127 struct usb_ep ep; 128 struct list_head queue; 129 struct lpc32xx_udc *udc; 130 131 u32 hwep_num_base; /* Physical hardware EP */ 132 u32 hwep_num; /* Maps to hardware endpoint */ 133 u32 maxpacket; 134 u32 lep; 135 136 bool is_in; 137 bool req_pending; 138 u32 eptype; 139 140 u32 totalints; 141 142 bool wedge; 143 }; 144 145 /* 146 * Common UDC structure 147 */ 148 struct lpc32xx_udc { 149 struct usb_gadget gadget; 150 struct usb_gadget_driver *driver; 151 struct platform_device *pdev; 152 struct device *dev; 153 struct dentry *pde; 154 spinlock_t lock; 155 struct i2c_client *isp1301_i2c_client; 156 157 /* Board and device specific */ 158 struct lpc32xx_usbd_cfg *board; 159 u32 io_p_start; 160 u32 io_p_size; 161 void __iomem *udp_baseaddr; 162 int udp_irq[4]; 163 struct clk *usb_pll_clk; 164 struct clk *usb_slv_clk; 165 struct clk *usb_otg_clk; 166 167 /* DMA support */ 168 u32 *udca_v_base; 169 u32 udca_p_base; 170 struct dma_pool *dd_cache; 171 172 /* Common EP and control data */ 173 u32 enabled_devints; 174 u32 enabled_hwepints; 175 u32 dev_status; 176 u32 realized_eps; 177 178 /* VBUS detection, pullup, and power flags */ 179 u8 vbus; 180 u8 last_vbus; 181 int pullup; 182 int poweron; 183 184 /* Work queues related to I2C support */ 185 struct work_struct pullup_job; 186 struct work_struct vbus_job; 187 struct work_struct power_job; 188 189 /* USB device peripheral - various */ 190 struct lpc32xx_ep ep[NUM_ENDPOINTS]; 191 bool enabled; 192 bool clocked; 193 bool suspended; 194 int ep0state; 195 atomic_t enabled_ep_cnt; 196 wait_queue_head_t ep_disable_wait_queue; 197 }; 198 199 /* 200 * Endpoint request 201 */ 202 struct lpc32xx_request { 203 struct usb_request req; 204 struct list_head queue; 205 struct lpc32xx_usbd_dd_gad *dd_desc_ptr; 206 bool mapped; 207 bool send_zlp; 208 }; 209 210 static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g) 211 { 212 return container_of(g, struct lpc32xx_udc, gadget); 213 } 214 215 #define ep_dbg(epp, fmt, arg...) \ 216 dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg) 217 #define ep_err(epp, fmt, arg...) \ 218 dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg) 219 #define ep_info(epp, fmt, arg...) \ 220 dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg) 221 #define ep_warn(epp, fmt, arg...) \ 222 dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg) 223 224 #define UDCA_BUFF_SIZE (128) 225 226 /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will 227 * be replaced with an inremap()ed pointer 228 * */ 229 #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64) 230 231 /* USB_CTRL bit defines */ 232 #define USB_SLAVE_HCLK_EN (1 << 24) 233 #define USB_HOST_NEED_CLK_EN (1 << 21) 234 #define USB_DEV_NEED_CLK_EN (1 << 22) 235 236 /********************************************************************** 237 * USB device controller register offsets 238 **********************************************************************/ 239 240 #define USBD_DEVINTST(x) ((x) + 0x200) 241 #define USBD_DEVINTEN(x) ((x) + 0x204) 242 #define USBD_DEVINTCLR(x) ((x) + 0x208) 243 #define USBD_DEVINTSET(x) ((x) + 0x20C) 244 #define USBD_CMDCODE(x) ((x) + 0x210) 245 #define USBD_CMDDATA(x) ((x) + 0x214) 246 #define USBD_RXDATA(x) ((x) + 0x218) 247 #define USBD_TXDATA(x) ((x) + 0x21C) 248 #define USBD_RXPLEN(x) ((x) + 0x220) 249 #define USBD_TXPLEN(x) ((x) + 0x224) 250 #define USBD_CTRL(x) ((x) + 0x228) 251 #define USBD_DEVINTPRI(x) ((x) + 0x22C) 252 #define USBD_EPINTST(x) ((x) + 0x230) 253 #define USBD_EPINTEN(x) ((x) + 0x234) 254 #define USBD_EPINTCLR(x) ((x) + 0x238) 255 #define USBD_EPINTSET(x) ((x) + 0x23C) 256 #define USBD_EPINTPRI(x) ((x) + 0x240) 257 #define USBD_REEP(x) ((x) + 0x244) 258 #define USBD_EPIND(x) ((x) + 0x248) 259 #define USBD_EPMAXPSIZE(x) ((x) + 0x24C) 260 /* DMA support registers only below */ 261 /* Set, clear, or get enabled state of the DMA request status. If 262 * enabled, an IN or OUT token will start a DMA transfer for the EP */ 263 #define USBD_DMARST(x) ((x) + 0x250) 264 #define USBD_DMARCLR(x) ((x) + 0x254) 265 #define USBD_DMARSET(x) ((x) + 0x258) 266 /* DMA UDCA head pointer */ 267 #define USBD_UDCAH(x) ((x) + 0x280) 268 /* EP DMA status, enable, and disable. This is used to specifically 269 * enabled or disable DMA for a specific EP */ 270 #define USBD_EPDMAST(x) ((x) + 0x284) 271 #define USBD_EPDMAEN(x) ((x) + 0x288) 272 #define USBD_EPDMADIS(x) ((x) + 0x28C) 273 /* DMA master interrupts enable and pending interrupts */ 274 #define USBD_DMAINTST(x) ((x) + 0x290) 275 #define USBD_DMAINTEN(x) ((x) + 0x294) 276 /* DMA end of transfer interrupt enable, disable, status */ 277 #define USBD_EOTINTST(x) ((x) + 0x2A0) 278 #define USBD_EOTINTCLR(x) ((x) + 0x2A4) 279 #define USBD_EOTINTSET(x) ((x) + 0x2A8) 280 /* New DD request interrupt enable, disable, status */ 281 #define USBD_NDDRTINTST(x) ((x) + 0x2AC) 282 #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0) 283 #define USBD_NDDRTINTSET(x) ((x) + 0x2B4) 284 /* DMA error interrupt enable, disable, status */ 285 #define USBD_SYSERRTINTST(x) ((x) + 0x2B8) 286 #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC) 287 #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0) 288 289 /********************************************************************** 290 * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/ 291 * USBD_DEVINTPRI register definitions 292 **********************************************************************/ 293 #define USBD_ERR_INT (1 << 9) 294 #define USBD_EP_RLZED (1 << 8) 295 #define USBD_TXENDPKT (1 << 7) 296 #define USBD_RXENDPKT (1 << 6) 297 #define USBD_CDFULL (1 << 5) 298 #define USBD_CCEMPTY (1 << 4) 299 #define USBD_DEV_STAT (1 << 3) 300 #define USBD_EP_SLOW (1 << 2) 301 #define USBD_EP_FAST (1 << 1) 302 #define USBD_FRAME (1 << 0) 303 304 /********************************************************************** 305 * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/ 306 * USBD_EPINTPRI register definitions 307 **********************************************************************/ 308 /* End point selection macro (RX) */ 309 #define USBD_RX_EP_SEL(e) (1 << ((e) << 1)) 310 311 /* End point selection macro (TX) */ 312 #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1)) 313 314 /********************************************************************** 315 * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/ 316 * USBD_EPDMAEN/USBD_EPDMADIS/ 317 * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/ 318 * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/ 319 * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET 320 * register definitions 321 **********************************************************************/ 322 /* Endpoint selection macro */ 323 #define USBD_EP_SEL(e) (1 << (e)) 324 325 /********************************************************************** 326 * SBD_DMAINTST/USBD_DMAINTEN 327 **********************************************************************/ 328 #define USBD_SYS_ERR_INT (1 << 2) 329 #define USBD_NEW_DD_INT (1 << 1) 330 #define USBD_EOT_INT (1 << 0) 331 332 /********************************************************************** 333 * USBD_RXPLEN register definitions 334 **********************************************************************/ 335 #define USBD_PKT_RDY (1 << 11) 336 #define USBD_DV (1 << 10) 337 #define USBD_PK_LEN_MASK 0x3FF 338 339 /********************************************************************** 340 * USBD_CTRL register definitions 341 **********************************************************************/ 342 #define USBD_LOG_ENDPOINT(e) ((e) << 2) 343 #define USBD_WR_EN (1 << 1) 344 #define USBD_RD_EN (1 << 0) 345 346 /********************************************************************** 347 * USBD_CMDCODE register definitions 348 **********************************************************************/ 349 #define USBD_CMD_CODE(c) ((c) << 16) 350 #define USBD_CMD_PHASE(p) ((p) << 8) 351 352 /********************************************************************** 353 * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions 354 **********************************************************************/ 355 #define USBD_DMAEP(e) (1 << (e)) 356 357 /* DD (DMA Descriptor) structure, requires word alignment */ 358 struct lpc32xx_usbd_dd { 359 u32 *dd_next; 360 u32 dd_setup; 361 u32 dd_buffer_addr; 362 u32 dd_status; 363 u32 dd_iso_ps_mem_addr; 364 }; 365 366 /* dd_setup bit defines */ 367 #define DD_SETUP_ATLE_DMA_MODE 0x01 368 #define DD_SETUP_NEXT_DD_VALID 0x04 369 #define DD_SETUP_ISO_EP 0x10 370 #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5) 371 #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16) 372 373 /* dd_status bit defines */ 374 #define DD_STATUS_DD_RETIRED 0x01 375 #define DD_STATUS_STS_MASK 0x1E 376 #define DD_STATUS_STS_NS 0x00 /* Not serviced */ 377 #define DD_STATUS_STS_BS 0x02 /* Being serviced */ 378 #define DD_STATUS_STS_NC 0x04 /* Normal completion */ 379 #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */ 380 #define DD_STATUS_STS_DOR 0x08 /* Data overrun */ 381 #define DD_STATUS_STS_SE 0x12 /* System error */ 382 #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */ 383 #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */ 384 #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */ 385 #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F) 386 #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF) 387 388 /* 389 * 390 * Protocol engine bits below 391 * 392 */ 393 /* Device Interrupt Bit Definitions */ 394 #define FRAME_INT 0x00000001 395 #define EP_FAST_INT 0x00000002 396 #define EP_SLOW_INT 0x00000004 397 #define DEV_STAT_INT 0x00000008 398 #define CCEMTY_INT 0x00000010 399 #define CDFULL_INT 0x00000020 400 #define RxENDPKT_INT 0x00000040 401 #define TxENDPKT_INT 0x00000080 402 #define EP_RLZED_INT 0x00000100 403 #define ERR_INT 0x00000200 404 405 /* Rx & Tx Packet Length Definitions */ 406 #define PKT_LNGTH_MASK 0x000003FF 407 #define PKT_DV 0x00000400 408 #define PKT_RDY 0x00000800 409 410 /* USB Control Definitions */ 411 #define CTRL_RD_EN 0x00000001 412 #define CTRL_WR_EN 0x00000002 413 414 /* Command Codes */ 415 #define CMD_SET_ADDR 0x00D00500 416 #define CMD_CFG_DEV 0x00D80500 417 #define CMD_SET_MODE 0x00F30500 418 #define CMD_RD_FRAME 0x00F50500 419 #define DAT_RD_FRAME 0x00F50200 420 #define CMD_RD_TEST 0x00FD0500 421 #define DAT_RD_TEST 0x00FD0200 422 #define CMD_SET_DEV_STAT 0x00FE0500 423 #define CMD_GET_DEV_STAT 0x00FE0500 424 #define DAT_GET_DEV_STAT 0x00FE0200 425 #define CMD_GET_ERR_CODE 0x00FF0500 426 #define DAT_GET_ERR_CODE 0x00FF0200 427 #define CMD_RD_ERR_STAT 0x00FB0500 428 #define DAT_RD_ERR_STAT 0x00FB0200 429 #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16)) 430 #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16)) 431 #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16)) 432 #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16)) 433 #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16)) 434 #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16)) 435 #define CMD_CLR_BUF 0x00F20500 436 #define DAT_CLR_BUF 0x00F20200 437 #define CMD_VALID_BUF 0x00FA0500 438 439 /* Device Address Register Definitions */ 440 #define DEV_ADDR_MASK 0x7F 441 #define DEV_EN 0x80 442 443 /* Device Configure Register Definitions */ 444 #define CONF_DVICE 0x01 445 446 /* Device Mode Register Definitions */ 447 #define AP_CLK 0x01 448 #define INAK_CI 0x02 449 #define INAK_CO 0x04 450 #define INAK_II 0x08 451 #define INAK_IO 0x10 452 #define INAK_BI 0x20 453 #define INAK_BO 0x40 454 455 /* Device Status Register Definitions */ 456 #define DEV_CON 0x01 457 #define DEV_CON_CH 0x02 458 #define DEV_SUS 0x04 459 #define DEV_SUS_CH 0x08 460 #define DEV_RST 0x10 461 462 /* Error Code Register Definitions */ 463 #define ERR_EC_MASK 0x0F 464 #define ERR_EA 0x10 465 466 /* Error Status Register Definitions */ 467 #define ERR_PID 0x01 468 #define ERR_UEPKT 0x02 469 #define ERR_DCRC 0x04 470 #define ERR_TIMOUT 0x08 471 #define ERR_EOP 0x10 472 #define ERR_B_OVRN 0x20 473 #define ERR_BTSTF 0x40 474 #define ERR_TGL 0x80 475 476 /* Endpoint Select Register Definitions */ 477 #define EP_SEL_F 0x01 478 #define EP_SEL_ST 0x02 479 #define EP_SEL_STP 0x04 480 #define EP_SEL_PO 0x08 481 #define EP_SEL_EPN 0x10 482 #define EP_SEL_B_1_FULL 0x20 483 #define EP_SEL_B_2_FULL 0x40 484 485 /* Endpoint Status Register Definitions */ 486 #define EP_STAT_ST 0x01 487 #define EP_STAT_DA 0x20 488 #define EP_STAT_RF_MO 0x40 489 #define EP_STAT_CND_ST 0x80 490 491 /* Clear Buffer Register Definitions */ 492 #define CLR_BUF_PO 0x01 493 494 /* DMA Interrupt Bit Definitions */ 495 #define EOT_INT 0x01 496 #define NDD_REQ_INT 0x02 497 #define SYS_ERR_INT 0x04 498 499 #define DRIVER_VERSION "1.03" 500 static const char driver_name[] = "lpc32xx_udc"; 501 502 /* 503 * 504 * proc interface support 505 * 506 */ 507 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 508 static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"}; 509 static const char debug_filename[] = "driver/udc"; 510 511 static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep) 512 { 513 struct lpc32xx_request *req; 514 515 seq_printf(s, "\n"); 516 seq_printf(s, "%12s, maxpacket %4d %3s", 517 ep->ep.name, ep->ep.maxpacket, 518 ep->is_in ? "in" : "out"); 519 seq_printf(s, " type %4s", epnames[ep->eptype]); 520 seq_printf(s, " ints: %12d", ep->totalints); 521 522 if (list_empty(&ep->queue)) 523 seq_printf(s, "\t(queue empty)\n"); 524 else { 525 list_for_each_entry(req, &ep->queue, queue) { 526 u32 length = req->req.actual; 527 528 seq_printf(s, "\treq %p len %d/%d buf %p\n", 529 &req->req, length, 530 req->req.length, req->req.buf); 531 } 532 } 533 } 534 535 static int proc_udc_show(struct seq_file *s, void *unused) 536 { 537 struct lpc32xx_udc *udc = s->private; 538 struct lpc32xx_ep *ep; 539 unsigned long flags; 540 541 seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION); 542 543 spin_lock_irqsave(&udc->lock, flags); 544 545 seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n", 546 udc->vbus ? "present" : "off", 547 udc->enabled ? (udc->vbus ? "active" : "enabled") : 548 "disabled", 549 udc->gadget.is_selfpowered ? "self" : "VBUS", 550 udc->suspended ? ", suspended" : "", 551 udc->driver ? udc->driver->driver.name : "(none)"); 552 553 if (udc->enabled && udc->vbus) { 554 proc_ep_show(s, &udc->ep[0]); 555 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) 556 proc_ep_show(s, ep); 557 } 558 559 spin_unlock_irqrestore(&udc->lock, flags); 560 561 return 0; 562 } 563 564 static int proc_udc_open(struct inode *inode, struct file *file) 565 { 566 return single_open(file, proc_udc_show, PDE_DATA(inode)); 567 } 568 569 static const struct file_operations proc_ops = { 570 .owner = THIS_MODULE, 571 .open = proc_udc_open, 572 .read = seq_read, 573 .llseek = seq_lseek, 574 .release = single_release, 575 }; 576 577 static void create_debug_file(struct lpc32xx_udc *udc) 578 { 579 udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops); 580 } 581 582 static void remove_debug_file(struct lpc32xx_udc *udc) 583 { 584 debugfs_remove(udc->pde); 585 } 586 587 #else 588 static inline void create_debug_file(struct lpc32xx_udc *udc) {} 589 static inline void remove_debug_file(struct lpc32xx_udc *udc) {} 590 #endif 591 592 /* Primary initialization sequence for the ISP1301 transceiver */ 593 static void isp1301_udc_configure(struct lpc32xx_udc *udc) 594 { 595 /* LPC32XX only supports DAT_SE0 USB mode */ 596 /* This sequence is important */ 597 598 /* Disable transparent UART mode first */ 599 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 600 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), 601 MC1_UART_EN); 602 603 /* Set full speed and SE0 mode */ 604 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 605 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); 606 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 607 ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0)); 608 609 /* 610 * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide 611 */ 612 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 613 (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); 614 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 615 ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL)); 616 617 /* Driver VBUS_DRV high or low depending on board setup */ 618 if (udc->board->vbus_drv_pol != 0) 619 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 620 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV); 621 else 622 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 623 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, 624 OTG1_VBUS_DRV); 625 626 /* Bi-directional mode with suspend control 627 * Enable both pulldowns for now - the pullup will be enable when VBUS 628 * is detected */ 629 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 630 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); 631 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 632 ISP1301_I2C_OTG_CONTROL_1, 633 (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN)); 634 635 /* Discharge VBUS (just in case) */ 636 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 637 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG); 638 msleep(1); 639 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 640 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), 641 OTG1_VBUS_DISCHRG); 642 643 /* Clear and enable VBUS high edge interrupt */ 644 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 645 ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0); 646 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 647 ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); 648 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 649 ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD); 650 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 651 ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); 652 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 653 ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD); 654 655 /* Enable usb_need_clk clock after transceiver is initialized */ 656 writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL); 657 658 dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", 659 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00)); 660 dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", 661 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02)); 662 dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n", 663 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14)); 664 } 665 666 /* Enables or disables the USB device pullup via the ISP1301 transceiver */ 667 static void isp1301_pullup_set(struct lpc32xx_udc *udc) 668 { 669 if (udc->pullup) 670 /* Enable pullup for bus signalling */ 671 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 672 ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP); 673 else 674 /* Enable pullup for bus signalling */ 675 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 676 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, 677 OTG1_DP_PULLUP); 678 } 679 680 static void pullup_work(struct work_struct *work) 681 { 682 struct lpc32xx_udc *udc = 683 container_of(work, struct lpc32xx_udc, pullup_job); 684 685 isp1301_pullup_set(udc); 686 } 687 688 static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup, 689 int block) 690 { 691 if (en_pullup == udc->pullup) 692 return; 693 694 udc->pullup = en_pullup; 695 if (block) 696 isp1301_pullup_set(udc); 697 else 698 /* defer slow i2c pull up setting */ 699 schedule_work(&udc->pullup_job); 700 } 701 702 #ifdef CONFIG_PM 703 /* Powers up or down the ISP1301 transceiver */ 704 static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable) 705 { 706 if (enable != 0) 707 /* Power up ISP1301 - this ISP1301 will automatically wakeup 708 when VBUS is detected */ 709 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 710 ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR, 711 MC2_GLOBAL_PWR_DN); 712 else 713 /* Power down ISP1301 */ 714 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 715 ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); 716 } 717 718 static void power_work(struct work_struct *work) 719 { 720 struct lpc32xx_udc *udc = 721 container_of(work, struct lpc32xx_udc, power_job); 722 723 isp1301_set_powerstate(udc, udc->poweron); 724 } 725 #endif 726 727 /* 728 * 729 * USB protocol engine command/data read/write helper functions 730 * 731 */ 732 /* Issues a single command to the USB device state machine */ 733 static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd) 734 { 735 u32 pass = 0; 736 int to; 737 738 /* EP may lock on CLRI if this read isn't done */ 739 u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr)); 740 (void) tmp; 741 742 while (pass == 0) { 743 writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr)); 744 745 /* Write command code */ 746 writel(cmd, USBD_CMDCODE(udc->udp_baseaddr)); 747 to = 10000; 748 while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) & 749 USBD_CCEMPTY) == 0) && (to > 0)) { 750 to--; 751 } 752 753 if (to > 0) 754 pass = 1; 755 756 cpu_relax(); 757 } 758 } 759 760 /* Issues 2 commands (or command and data) to the USB device state machine */ 761 static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd, 762 u32 data) 763 { 764 udc_protocol_cmd_w(udc, cmd); 765 udc_protocol_cmd_w(udc, data); 766 } 767 768 /* Issues a single command to the USB device state machine and reads 769 * response data */ 770 static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd) 771 { 772 u32 tmp; 773 int to = 1000; 774 775 /* Write a command and read data from the protocol engine */ 776 writel((USBD_CDFULL | USBD_CCEMPTY), 777 USBD_DEVINTCLR(udc->udp_baseaddr)); 778 779 /* Write command code */ 780 udc_protocol_cmd_w(udc, cmd); 781 782 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr)); 783 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL)) 784 && (to > 0)) 785 to--; 786 if (!to) 787 dev_dbg(udc->dev, 788 "Protocol engine didn't receive response (CDFULL)\n"); 789 790 return readl(USBD_CMDDATA(udc->udp_baseaddr)); 791 } 792 793 /* 794 * 795 * USB device interrupt mask support functions 796 * 797 */ 798 /* Enable one or more USB device interrupts */ 799 static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask) 800 { 801 udc->enabled_devints |= devmask; 802 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr)); 803 } 804 805 /* Disable one or more USB device interrupts */ 806 static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask) 807 { 808 udc->enabled_devints &= ~mask; 809 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr)); 810 } 811 812 /* Clear one or more USB device interrupts */ 813 static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask) 814 { 815 writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr)); 816 } 817 818 /* 819 * 820 * Endpoint interrupt disable/enable functions 821 * 822 */ 823 /* Enable one or more USB endpoint interrupts */ 824 static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep) 825 { 826 udc->enabled_hwepints |= (1 << hwep); 827 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr)); 828 } 829 830 /* Disable one or more USB endpoint interrupts */ 831 static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep) 832 { 833 udc->enabled_hwepints &= ~(1 << hwep); 834 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr)); 835 } 836 837 /* Clear one or more USB endpoint interrupts */ 838 static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep) 839 { 840 writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr)); 841 } 842 843 /* Enable DMA for the HW channel */ 844 static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep) 845 { 846 writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr)); 847 } 848 849 /* Disable DMA for the HW channel */ 850 static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep) 851 { 852 writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr)); 853 } 854 855 /* 856 * 857 * Endpoint realize/unrealize functions 858 * 859 */ 860 /* Before an endpoint can be used, it needs to be realized 861 * in the USB protocol engine - this realizes the endpoint. 862 * The interrupt (FIFO or DMA) is not enabled with this function */ 863 static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep, 864 u32 maxpacket) 865 { 866 int to = 1000; 867 868 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr)); 869 writel(hwep, USBD_EPIND(udc->udp_baseaddr)); 870 udc->realized_eps |= (1 << hwep); 871 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr)); 872 writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr)); 873 874 /* Wait until endpoint is realized in hardware */ 875 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & 876 USBD_EP_RLZED)) && (to > 0)) 877 to--; 878 if (!to) 879 dev_dbg(udc->dev, "EP not correctly realized in hardware\n"); 880 881 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr)); 882 } 883 884 /* Unrealize an EP */ 885 static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep) 886 { 887 udc->realized_eps &= ~(1 << hwep); 888 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr)); 889 } 890 891 /* 892 * 893 * Endpoint support functions 894 * 895 */ 896 /* Select and clear endpoint interrupt */ 897 static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep) 898 { 899 udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep)); 900 return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep)); 901 } 902 903 /* Disables the endpoint in the USB protocol engine */ 904 static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep) 905 { 906 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), 907 DAT_WR_BYTE(EP_STAT_DA)); 908 } 909 910 /* Stalls the endpoint - endpoint will return STALL */ 911 static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep) 912 { 913 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), 914 DAT_WR_BYTE(EP_STAT_ST)); 915 } 916 917 /* Clear stall or reset endpoint */ 918 static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep) 919 { 920 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), 921 DAT_WR_BYTE(0)); 922 } 923 924 /* Select an endpoint for endpoint status, clear, validate */ 925 static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep) 926 { 927 udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep)); 928 } 929 930 /* 931 * 932 * Endpoint buffer management functions 933 * 934 */ 935 /* Clear the current endpoint's buffer */ 936 static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep) 937 { 938 udc_select_hwep(udc, hwep); 939 udc_protocol_cmd_w(udc, CMD_CLR_BUF); 940 } 941 942 /* Validate the current endpoint's buffer */ 943 static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep) 944 { 945 udc_select_hwep(udc, hwep); 946 udc_protocol_cmd_w(udc, CMD_VALID_BUF); 947 } 948 949 static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep) 950 { 951 /* Clear EP interrupt */ 952 uda_clear_hwepint(udc, hwep); 953 return udc_selep_clrint(udc, hwep); 954 } 955 956 /* 957 * 958 * USB EP DMA support 959 * 960 */ 961 /* Allocate a DMA Descriptor */ 962 static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc) 963 { 964 dma_addr_t dma; 965 struct lpc32xx_usbd_dd_gad *dd; 966 967 dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc( 968 udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma); 969 if (dd) 970 dd->this_dma = dma; 971 972 return dd; 973 } 974 975 /* Free a DMA Descriptor */ 976 static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd) 977 { 978 dma_pool_free(udc->dd_cache, dd, dd->this_dma); 979 } 980 981 /* 982 * 983 * USB setup and shutdown functions 984 * 985 */ 986 /* Enables or disables most of the USB system clocks when low power mode is 987 * needed. Clocks are typically started on a connection event, and disabled 988 * when a cable is disconnected */ 989 static void udc_clk_set(struct lpc32xx_udc *udc, int enable) 990 { 991 if (enable != 0) { 992 if (udc->clocked) 993 return; 994 995 udc->clocked = 1; 996 997 /* 48MHz PLL up */ 998 clk_enable(udc->usb_pll_clk); 999 1000 /* Enable the USB device clock */ 1001 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, 1002 USB_CTRL); 1003 1004 clk_enable(udc->usb_otg_clk); 1005 } else { 1006 if (!udc->clocked) 1007 return; 1008 1009 udc->clocked = 0; 1010 1011 /* Never disable the USB_HCLK during normal operation */ 1012 1013 /* 48MHz PLL dpwn */ 1014 clk_disable(udc->usb_pll_clk); 1015 1016 /* Disable the USB device clock */ 1017 writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN, 1018 USB_CTRL); 1019 1020 clk_disable(udc->usb_otg_clk); 1021 } 1022 } 1023 1024 /* Set/reset USB device address */ 1025 static void udc_set_address(struct lpc32xx_udc *udc, u32 addr) 1026 { 1027 /* Address will be latched at the end of the status phase, or 1028 latched immediately if function is called twice */ 1029 udc_protocol_cmd_data_w(udc, CMD_SET_ADDR, 1030 DAT_WR_BYTE(DEV_EN | addr)); 1031 } 1032 1033 /* Setup up a IN request for DMA transfer - this consists of determining the 1034 * list of DMA addresses for the transfer, allocating DMA Descriptors, 1035 * installing the DD into the UDCA, and then enabling the DMA for that EP */ 1036 static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) 1037 { 1038 struct lpc32xx_request *req; 1039 u32 hwep = ep->hwep_num; 1040 1041 ep->req_pending = 1; 1042 1043 /* There will always be a request waiting here */ 1044 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); 1045 1046 /* Place the DD Descriptor into the UDCA */ 1047 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma; 1048 1049 /* Enable DMA and interrupt for the HW EP */ 1050 udc_ep_dma_enable(udc, hwep); 1051 1052 /* Clear ZLP if last packet is not of MAXP size */ 1053 if (req->req.length % ep->ep.maxpacket) 1054 req->send_zlp = 0; 1055 1056 return 0; 1057 } 1058 1059 /* Setup up a OUT request for DMA transfer - this consists of determining the 1060 * list of DMA addresses for the transfer, allocating DMA Descriptors, 1061 * installing the DD into the UDCA, and then enabling the DMA for that EP */ 1062 static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) 1063 { 1064 struct lpc32xx_request *req; 1065 u32 hwep = ep->hwep_num; 1066 1067 ep->req_pending = 1; 1068 1069 /* There will always be a request waiting here */ 1070 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); 1071 1072 /* Place the DD Descriptor into the UDCA */ 1073 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma; 1074 1075 /* Enable DMA and interrupt for the HW EP */ 1076 udc_ep_dma_enable(udc, hwep); 1077 return 0; 1078 } 1079 1080 static void udc_disable(struct lpc32xx_udc *udc) 1081 { 1082 u32 i; 1083 1084 /* Disable device */ 1085 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0)); 1086 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0)); 1087 1088 /* Disable all device interrupts (including EP0) */ 1089 uda_disable_devint(udc, 0x3FF); 1090 1091 /* Disable and reset all endpoint interrupts */ 1092 for (i = 0; i < 32; i++) { 1093 uda_disable_hwepint(udc, i); 1094 uda_clear_hwepint(udc, i); 1095 udc_disable_hwep(udc, i); 1096 udc_unrealize_hwep(udc, i); 1097 udc->udca_v_base[i] = 0; 1098 1099 /* Disable and clear all interrupts and DMA */ 1100 udc_ep_dma_disable(udc, i); 1101 writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr)); 1102 writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr)); 1103 writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr)); 1104 writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr)); 1105 } 1106 1107 /* Disable DMA interrupts */ 1108 writel(0, USBD_DMAINTEN(udc->udp_baseaddr)); 1109 1110 writel(0, USBD_UDCAH(udc->udp_baseaddr)); 1111 } 1112 1113 static void udc_enable(struct lpc32xx_udc *udc) 1114 { 1115 u32 i; 1116 struct lpc32xx_ep *ep = &udc->ep[0]; 1117 1118 /* Start with known state */ 1119 udc_disable(udc); 1120 1121 /* Enable device */ 1122 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON)); 1123 1124 /* EP interrupts on high priority, FRAME interrupt on low priority */ 1125 writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr)); 1126 writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr)); 1127 1128 /* Clear any pending device interrupts */ 1129 writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr)); 1130 1131 /* Setup UDCA - not yet used (DMA) */ 1132 writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr)); 1133 1134 /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */ 1135 for (i = 0; i <= 1; i++) { 1136 udc_realize_hwep(udc, i, ep->ep.maxpacket); 1137 uda_enable_hwepint(udc, i); 1138 udc_select_hwep(udc, i); 1139 udc_clrstall_hwep(udc, i); 1140 udc_clr_buffer_hwep(udc, i); 1141 } 1142 1143 /* Device interrupt setup */ 1144 uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW | 1145 USBD_EP_FAST)); 1146 uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW | 1147 USBD_EP_FAST)); 1148 1149 /* Set device address to 0 - called twice to force a latch in the USB 1150 engine without the need of a setup packet status closure */ 1151 udc_set_address(udc, 0); 1152 udc_set_address(udc, 0); 1153 1154 /* Enable master DMA interrupts */ 1155 writel((USBD_SYS_ERR_INT | USBD_EOT_INT), 1156 USBD_DMAINTEN(udc->udp_baseaddr)); 1157 1158 udc->dev_status = 0; 1159 } 1160 1161 /* 1162 * 1163 * USB device board specific events handled via callbacks 1164 * 1165 */ 1166 /* Connection change event - notify board function of change */ 1167 static void uda_power_event(struct lpc32xx_udc *udc, u32 conn) 1168 { 1169 /* Just notify of a connection change event (optional) */ 1170 if (udc->board->conn_chgb != NULL) 1171 udc->board->conn_chgb(conn); 1172 } 1173 1174 /* Suspend/resume event - notify board function of change */ 1175 static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn) 1176 { 1177 /* Just notify of a Suspend/resume change event (optional) */ 1178 if (udc->board->susp_chgb != NULL) 1179 udc->board->susp_chgb(conn); 1180 1181 if (conn) 1182 udc->suspended = 0; 1183 else 1184 udc->suspended = 1; 1185 } 1186 1187 /* Remote wakeup enable/disable - notify board function of change */ 1188 static void uda_remwkp_cgh(struct lpc32xx_udc *udc) 1189 { 1190 if (udc->board->rmwk_chgb != NULL) 1191 udc->board->rmwk_chgb(udc->dev_status & 1192 (1 << USB_DEVICE_REMOTE_WAKEUP)); 1193 } 1194 1195 /* Reads data from FIFO, adjusts for alignment and data size */ 1196 static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) 1197 { 1198 int n, i, bl; 1199 u16 *p16; 1200 u32 *p32, tmp, cbytes; 1201 1202 /* Use optimal data transfer method based on source address and size */ 1203 switch (((u32) data) & 0x3) { 1204 case 0: /* 32-bit aligned */ 1205 p32 = (u32 *) data; 1206 cbytes = (bytes & ~0x3); 1207 1208 /* Copy 32-bit aligned data first */ 1209 for (n = 0; n < cbytes; n += 4) 1210 *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr)); 1211 1212 /* Handle any remaining bytes */ 1213 bl = bytes - cbytes; 1214 if (bl) { 1215 tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); 1216 for (n = 0; n < bl; n++) 1217 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); 1218 1219 } 1220 break; 1221 1222 case 1: /* 8-bit aligned */ 1223 case 3: 1224 /* Each byte has to be handled independently */ 1225 for (n = 0; n < bytes; n += 4) { 1226 tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); 1227 1228 bl = bytes - n; 1229 if (bl > 3) 1230 bl = 3; 1231 1232 for (i = 0; i < bl; i++) 1233 data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF); 1234 } 1235 break; 1236 1237 case 2: /* 16-bit aligned */ 1238 p16 = (u16 *) data; 1239 cbytes = (bytes & ~0x3); 1240 1241 /* Copy 32-bit sized objects first with 16-bit alignment */ 1242 for (n = 0; n < cbytes; n += 4) { 1243 tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); 1244 *p16++ = (u16)(tmp & 0xFFFF); 1245 *p16++ = (u16)((tmp >> 16) & 0xFFFF); 1246 } 1247 1248 /* Handle any remaining bytes */ 1249 bl = bytes - cbytes; 1250 if (bl) { 1251 tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); 1252 for (n = 0; n < bl; n++) 1253 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); 1254 } 1255 break; 1256 } 1257 } 1258 1259 /* Read data from the FIFO for an endpoint. This function is for endpoints (such 1260 * as EP0) that don't use DMA. This function should only be called if a packet 1261 * is known to be ready to read for the endpoint. Note that the endpoint must 1262 * be selected in the protocol engine prior to this call. */ 1263 static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data, 1264 u32 bytes) 1265 { 1266 u32 tmpv; 1267 int to = 1000; 1268 u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN; 1269 1270 /* Setup read of endpoint */ 1271 writel(hwrep, USBD_CTRL(udc->udp_baseaddr)); 1272 1273 /* Wait until packet is ready */ 1274 while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) & 1275 PKT_RDY) == 0) && (to > 0)) 1276 to--; 1277 if (!to) 1278 dev_dbg(udc->dev, "No packet ready on FIFO EP read\n"); 1279 1280 /* Mask out count */ 1281 tmp = tmpv & PKT_LNGTH_MASK; 1282 if (bytes < tmp) 1283 tmp = bytes; 1284 1285 if ((tmp > 0) && (data != NULL)) 1286 udc_pop_fifo(udc, (u8 *) data, tmp); 1287 1288 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr)); 1289 1290 /* Clear the buffer */ 1291 udc_clr_buffer_hwep(udc, hwep); 1292 1293 return tmp; 1294 } 1295 1296 /* Stuffs data into the FIFO, adjusts for alignment and data size */ 1297 static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) 1298 { 1299 int n, i, bl; 1300 u16 *p16; 1301 u32 *p32, tmp, cbytes; 1302 1303 /* Use optimal data transfer method based on source address and size */ 1304 switch (((u32) data) & 0x3) { 1305 case 0: /* 32-bit aligned */ 1306 p32 = (u32 *) data; 1307 cbytes = (bytes & ~0x3); 1308 1309 /* Copy 32-bit aligned data first */ 1310 for (n = 0; n < cbytes; n += 4) 1311 writel(*p32++, USBD_TXDATA(udc->udp_baseaddr)); 1312 1313 /* Handle any remaining bytes */ 1314 bl = bytes - cbytes; 1315 if (bl) { 1316 tmp = 0; 1317 for (n = 0; n < bl; n++) 1318 tmp |= data[cbytes + n] << (n * 8); 1319 1320 writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); 1321 } 1322 break; 1323 1324 case 1: /* 8-bit aligned */ 1325 case 3: 1326 /* Each byte has to be handled independently */ 1327 for (n = 0; n < bytes; n += 4) { 1328 bl = bytes - n; 1329 if (bl > 4) 1330 bl = 4; 1331 1332 tmp = 0; 1333 for (i = 0; i < bl; i++) 1334 tmp |= data[n + i] << (i * 8); 1335 1336 writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); 1337 } 1338 break; 1339 1340 case 2: /* 16-bit aligned */ 1341 p16 = (u16 *) data; 1342 cbytes = (bytes & ~0x3); 1343 1344 /* Copy 32-bit aligned data first */ 1345 for (n = 0; n < cbytes; n += 4) { 1346 tmp = *p16++ & 0xFFFF; 1347 tmp |= (*p16++ & 0xFFFF) << 16; 1348 writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); 1349 } 1350 1351 /* Handle any remaining bytes */ 1352 bl = bytes - cbytes; 1353 if (bl) { 1354 tmp = 0; 1355 for (n = 0; n < bl; n++) 1356 tmp |= data[cbytes + n] << (n * 8); 1357 1358 writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); 1359 } 1360 break; 1361 } 1362 } 1363 1364 /* Write data to the FIFO for an endpoint. This function is for endpoints (such 1365 * as EP0) that don't use DMA. Note that the endpoint must be selected in the 1366 * protocol engine prior to this call. */ 1367 static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data, 1368 u32 bytes) 1369 { 1370 u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN; 1371 1372 if ((bytes > 0) && (data == NULL)) 1373 return; 1374 1375 /* Setup write of endpoint */ 1376 writel(hwwep, USBD_CTRL(udc->udp_baseaddr)); 1377 1378 writel(bytes, USBD_TXPLEN(udc->udp_baseaddr)); 1379 1380 /* Need at least 1 byte to trigger TX */ 1381 if (bytes == 0) 1382 writel(0, USBD_TXDATA(udc->udp_baseaddr)); 1383 else 1384 udc_stuff_fifo(udc, (u8 *) data, bytes); 1385 1386 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr)); 1387 1388 udc_val_buffer_hwep(udc, hwep); 1389 } 1390 1391 /* USB device reset - resets USB to a default state with just EP0 1392 enabled */ 1393 static void uda_usb_reset(struct lpc32xx_udc *udc) 1394 { 1395 u32 i = 0; 1396 /* Re-init device controller and EP0 */ 1397 udc_enable(udc); 1398 udc->gadget.speed = USB_SPEED_FULL; 1399 1400 for (i = 1; i < NUM_ENDPOINTS; i++) { 1401 struct lpc32xx_ep *ep = &udc->ep[i]; 1402 ep->req_pending = 0; 1403 } 1404 } 1405 1406 /* Send a ZLP on EP0 */ 1407 static void udc_ep0_send_zlp(struct lpc32xx_udc *udc) 1408 { 1409 udc_write_hwep(udc, EP_IN, NULL, 0); 1410 } 1411 1412 /* Get current frame number */ 1413 static u16 udc_get_current_frame(struct lpc32xx_udc *udc) 1414 { 1415 u16 flo, fhi; 1416 1417 udc_protocol_cmd_w(udc, CMD_RD_FRAME); 1418 flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME); 1419 fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME); 1420 1421 return (fhi << 8) | flo; 1422 } 1423 1424 /* Set the device as configured - enables all endpoints */ 1425 static inline void udc_set_device_configured(struct lpc32xx_udc *udc) 1426 { 1427 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE)); 1428 } 1429 1430 /* Set the device as unconfigured - disables all endpoints */ 1431 static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc) 1432 { 1433 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0)); 1434 } 1435 1436 /* reinit == restore initial software state */ 1437 static void udc_reinit(struct lpc32xx_udc *udc) 1438 { 1439 u32 i; 1440 1441 INIT_LIST_HEAD(&udc->gadget.ep_list); 1442 INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); 1443 1444 for (i = 0; i < NUM_ENDPOINTS; i++) { 1445 struct lpc32xx_ep *ep = &udc->ep[i]; 1446 1447 if (i != 0) 1448 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 1449 usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket); 1450 INIT_LIST_HEAD(&ep->queue); 1451 ep->req_pending = 0; 1452 } 1453 1454 udc->ep0state = WAIT_FOR_SETUP; 1455 } 1456 1457 /* Must be called with lock */ 1458 static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status) 1459 { 1460 struct lpc32xx_udc *udc = ep->udc; 1461 1462 list_del_init(&req->queue); 1463 if (req->req.status == -EINPROGRESS) 1464 req->req.status = status; 1465 else 1466 status = req->req.status; 1467 1468 if (ep->lep) { 1469 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); 1470 1471 /* Free DDs */ 1472 udc_dd_free(udc, req->dd_desc_ptr); 1473 } 1474 1475 if (status && status != -ESHUTDOWN) 1476 ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status); 1477 1478 ep->req_pending = 0; 1479 spin_unlock(&udc->lock); 1480 usb_gadget_giveback_request(&ep->ep, &req->req); 1481 spin_lock(&udc->lock); 1482 } 1483 1484 /* Must be called with lock */ 1485 static void nuke(struct lpc32xx_ep *ep, int status) 1486 { 1487 struct lpc32xx_request *req; 1488 1489 while (!list_empty(&ep->queue)) { 1490 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); 1491 done(ep, req, status); 1492 } 1493 1494 if (status == -ESHUTDOWN) { 1495 uda_disable_hwepint(ep->udc, ep->hwep_num); 1496 udc_disable_hwep(ep->udc, ep->hwep_num); 1497 } 1498 } 1499 1500 /* IN endpoint 0 transfer */ 1501 static int udc_ep0_in_req(struct lpc32xx_udc *udc) 1502 { 1503 struct lpc32xx_request *req; 1504 struct lpc32xx_ep *ep0 = &udc->ep[0]; 1505 u32 tsend, ts = 0; 1506 1507 if (list_empty(&ep0->queue)) 1508 /* Nothing to send */ 1509 return 0; 1510 else 1511 req = list_entry(ep0->queue.next, struct lpc32xx_request, 1512 queue); 1513 1514 tsend = ts = req->req.length - req->req.actual; 1515 if (ts == 0) { 1516 /* Send a ZLP */ 1517 udc_ep0_send_zlp(udc); 1518 done(ep0, req, 0); 1519 return 1; 1520 } else if (ts > ep0->ep.maxpacket) 1521 ts = ep0->ep.maxpacket; /* Just send what we can */ 1522 1523 /* Write data to the EP0 FIFO and start transfer */ 1524 udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts); 1525 1526 /* Increment data pointer */ 1527 req->req.actual += ts; 1528 1529 if (tsend >= ep0->ep.maxpacket) 1530 return 0; /* Stay in data transfer state */ 1531 1532 /* Transfer request is complete */ 1533 udc->ep0state = WAIT_FOR_SETUP; 1534 done(ep0, req, 0); 1535 return 1; 1536 } 1537 1538 /* OUT endpoint 0 transfer */ 1539 static int udc_ep0_out_req(struct lpc32xx_udc *udc) 1540 { 1541 struct lpc32xx_request *req; 1542 struct lpc32xx_ep *ep0 = &udc->ep[0]; 1543 u32 tr, bufferspace; 1544 1545 if (list_empty(&ep0->queue)) 1546 return 0; 1547 else 1548 req = list_entry(ep0->queue.next, struct lpc32xx_request, 1549 queue); 1550 1551 if (req) { 1552 if (req->req.length == 0) { 1553 /* Just dequeue request */ 1554 done(ep0, req, 0); 1555 udc->ep0state = WAIT_FOR_SETUP; 1556 return 1; 1557 } 1558 1559 /* Get data from FIFO */ 1560 bufferspace = req->req.length - req->req.actual; 1561 if (bufferspace > ep0->ep.maxpacket) 1562 bufferspace = ep0->ep.maxpacket; 1563 1564 /* Copy data to buffer */ 1565 prefetchw(req->req.buf + req->req.actual); 1566 tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual, 1567 bufferspace); 1568 req->req.actual += bufferspace; 1569 1570 if (tr < ep0->ep.maxpacket) { 1571 /* This is the last packet */ 1572 done(ep0, req, 0); 1573 udc->ep0state = WAIT_FOR_SETUP; 1574 return 1; 1575 } 1576 } 1577 1578 return 0; 1579 } 1580 1581 /* Must be called with lock */ 1582 static void stop_activity(struct lpc32xx_udc *udc) 1583 { 1584 struct usb_gadget_driver *driver = udc->driver; 1585 int i; 1586 1587 if (udc->gadget.speed == USB_SPEED_UNKNOWN) 1588 driver = NULL; 1589 1590 udc->gadget.speed = USB_SPEED_UNKNOWN; 1591 udc->suspended = 0; 1592 1593 for (i = 0; i < NUM_ENDPOINTS; i++) { 1594 struct lpc32xx_ep *ep = &udc->ep[i]; 1595 nuke(ep, -ESHUTDOWN); 1596 } 1597 if (driver) { 1598 spin_unlock(&udc->lock); 1599 driver->disconnect(&udc->gadget); 1600 spin_lock(&udc->lock); 1601 } 1602 1603 isp1301_pullup_enable(udc, 0, 0); 1604 udc_disable(udc); 1605 udc_reinit(udc); 1606 } 1607 1608 /* 1609 * Activate or kill host pullup 1610 * Can be called with or without lock 1611 */ 1612 static void pullup(struct lpc32xx_udc *udc, int is_on) 1613 { 1614 if (!udc->clocked) 1615 return; 1616 1617 if (!udc->enabled || !udc->vbus) 1618 is_on = 0; 1619 1620 if (is_on != udc->pullup) 1621 isp1301_pullup_enable(udc, is_on, 0); 1622 } 1623 1624 /* Must be called without lock */ 1625 static int lpc32xx_ep_disable(struct usb_ep *_ep) 1626 { 1627 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); 1628 struct lpc32xx_udc *udc = ep->udc; 1629 unsigned long flags; 1630 1631 if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0)) 1632 return -EINVAL; 1633 spin_lock_irqsave(&udc->lock, flags); 1634 1635 nuke(ep, -ESHUTDOWN); 1636 1637 /* Clear all DMA statuses for this EP */ 1638 udc_ep_dma_disable(udc, ep->hwep_num); 1639 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); 1640 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); 1641 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); 1642 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); 1643 1644 /* Remove the DD pointer in the UDCA */ 1645 udc->udca_v_base[ep->hwep_num] = 0; 1646 1647 /* Disable and reset endpoint and interrupt */ 1648 uda_clear_hwepint(udc, ep->hwep_num); 1649 udc_unrealize_hwep(udc, ep->hwep_num); 1650 1651 ep->hwep_num = 0; 1652 1653 spin_unlock_irqrestore(&udc->lock, flags); 1654 1655 atomic_dec(&udc->enabled_ep_cnt); 1656 wake_up(&udc->ep_disable_wait_queue); 1657 1658 return 0; 1659 } 1660 1661 /* Must be called without lock */ 1662 static int lpc32xx_ep_enable(struct usb_ep *_ep, 1663 const struct usb_endpoint_descriptor *desc) 1664 { 1665 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); 1666 struct lpc32xx_udc *udc = ep->udc; 1667 u16 maxpacket; 1668 u32 tmp; 1669 unsigned long flags; 1670 1671 /* Verify EP data */ 1672 if ((!_ep) || (!ep) || (!desc) || 1673 (desc->bDescriptorType != USB_DT_ENDPOINT)) { 1674 dev_dbg(udc->dev, "bad ep or descriptor\n"); 1675 return -EINVAL; 1676 } 1677 maxpacket = usb_endpoint_maxp(desc); 1678 if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { 1679 dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); 1680 return -EINVAL; 1681 } 1682 1683 /* Don't touch EP0 */ 1684 if (ep->hwep_num_base == 0) { 1685 dev_dbg(udc->dev, "Can't re-enable EP0!!!\n"); 1686 return -EINVAL; 1687 } 1688 1689 /* Is driver ready? */ 1690 if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { 1691 dev_dbg(udc->dev, "bogus device state\n"); 1692 return -ESHUTDOWN; 1693 } 1694 1695 tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 1696 switch (tmp) { 1697 case USB_ENDPOINT_XFER_CONTROL: 1698 return -EINVAL; 1699 1700 case USB_ENDPOINT_XFER_INT: 1701 if (maxpacket > ep->maxpacket) { 1702 dev_dbg(udc->dev, 1703 "Bad INT endpoint maxpacket %d\n", maxpacket); 1704 return -EINVAL; 1705 } 1706 break; 1707 1708 case USB_ENDPOINT_XFER_BULK: 1709 switch (maxpacket) { 1710 case 8: 1711 case 16: 1712 case 32: 1713 case 64: 1714 break; 1715 1716 default: 1717 dev_dbg(udc->dev, 1718 "Bad BULK endpoint maxpacket %d\n", maxpacket); 1719 return -EINVAL; 1720 } 1721 break; 1722 1723 case USB_ENDPOINT_XFER_ISOC: 1724 break; 1725 } 1726 spin_lock_irqsave(&udc->lock, flags); 1727 1728 /* Initialize endpoint to match the selected descriptor */ 1729 ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 1730 ep->ep.maxpacket = maxpacket; 1731 1732 /* Map hardware endpoint from base and direction */ 1733 if (ep->is_in) 1734 /* IN endpoints are offset 1 from the OUT endpoint */ 1735 ep->hwep_num = ep->hwep_num_base + EP_IN; 1736 else 1737 ep->hwep_num = ep->hwep_num_base; 1738 1739 ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name, 1740 ep->hwep_num, maxpacket, (ep->is_in == 1)); 1741 1742 /* Realize the endpoint, interrupt is enabled later when 1743 * buffers are queued, IN EPs will NAK until buffers are ready */ 1744 udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket); 1745 udc_clr_buffer_hwep(udc, ep->hwep_num); 1746 uda_disable_hwepint(udc, ep->hwep_num); 1747 udc_clrstall_hwep(udc, ep->hwep_num); 1748 1749 /* Clear all DMA statuses for this EP */ 1750 udc_ep_dma_disable(udc, ep->hwep_num); 1751 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); 1752 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); 1753 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); 1754 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); 1755 1756 spin_unlock_irqrestore(&udc->lock, flags); 1757 1758 atomic_inc(&udc->enabled_ep_cnt); 1759 return 0; 1760 } 1761 1762 /* 1763 * Allocate a USB request list 1764 * Can be called with or without lock 1765 */ 1766 static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep, 1767 gfp_t gfp_flags) 1768 { 1769 struct lpc32xx_request *req; 1770 1771 req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags); 1772 if (!req) 1773 return NULL; 1774 1775 INIT_LIST_HEAD(&req->queue); 1776 return &req->req; 1777 } 1778 1779 /* 1780 * De-allocate a USB request list 1781 * Can be called with or without lock 1782 */ 1783 static void lpc32xx_ep_free_request(struct usb_ep *_ep, 1784 struct usb_request *_req) 1785 { 1786 struct lpc32xx_request *req; 1787 1788 req = container_of(_req, struct lpc32xx_request, req); 1789 BUG_ON(!list_empty(&req->queue)); 1790 kfree(req); 1791 } 1792 1793 /* Must be called without lock */ 1794 static int lpc32xx_ep_queue(struct usb_ep *_ep, 1795 struct usb_request *_req, gfp_t gfp_flags) 1796 { 1797 struct lpc32xx_request *req; 1798 struct lpc32xx_ep *ep; 1799 struct lpc32xx_udc *udc; 1800 unsigned long flags; 1801 int status = 0; 1802 1803 req = container_of(_req, struct lpc32xx_request, req); 1804 ep = container_of(_ep, struct lpc32xx_ep, ep); 1805 1806 if (!_ep || !_req || !_req->complete || !_req->buf || 1807 !list_empty(&req->queue)) 1808 return -EINVAL; 1809 1810 udc = ep->udc; 1811 1812 if (udc->gadget.speed == USB_SPEED_UNKNOWN) 1813 return -EPIPE; 1814 1815 if (ep->lep) { 1816 struct lpc32xx_usbd_dd_gad *dd; 1817 1818 status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in); 1819 if (status) 1820 return status; 1821 1822 /* For the request, build a list of DDs */ 1823 dd = udc_dd_alloc(udc); 1824 if (!dd) { 1825 /* Error allocating DD */ 1826 return -ENOMEM; 1827 } 1828 req->dd_desc_ptr = dd; 1829 1830 /* Setup the DMA descriptor */ 1831 dd->dd_next_phy = dd->dd_next_v = 0; 1832 dd->dd_buffer_addr = req->req.dma; 1833 dd->dd_status = 0; 1834 1835 /* Special handling for ISO EPs */ 1836 if (ep->eptype == EP_ISO_TYPE) { 1837 dd->dd_setup = DD_SETUP_ISO_EP | 1838 DD_SETUP_PACKETLEN(0) | 1839 DD_SETUP_DMALENBYTES(1); 1840 dd->dd_iso_ps_mem_addr = dd->this_dma + 24; 1841 if (ep->is_in) 1842 dd->iso_status[0] = req->req.length; 1843 else 1844 dd->iso_status[0] = 0; 1845 } else 1846 dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) | 1847 DD_SETUP_DMALENBYTES(req->req.length); 1848 } 1849 1850 ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name, 1851 _req, _req->length, _req->buf, ep->is_in, _req->zero); 1852 1853 spin_lock_irqsave(&udc->lock, flags); 1854 1855 _req->status = -EINPROGRESS; 1856 _req->actual = 0; 1857 req->send_zlp = _req->zero; 1858 1859 /* Kickstart empty queues */ 1860 if (list_empty(&ep->queue)) { 1861 list_add_tail(&req->queue, &ep->queue); 1862 1863 if (ep->hwep_num_base == 0) { 1864 /* Handle expected data direction */ 1865 if (ep->is_in) { 1866 /* IN packet to host */ 1867 udc->ep0state = DATA_IN; 1868 status = udc_ep0_in_req(udc); 1869 } else { 1870 /* OUT packet from host */ 1871 udc->ep0state = DATA_OUT; 1872 status = udc_ep0_out_req(udc); 1873 } 1874 } else if (ep->is_in) { 1875 /* IN packet to host and kick off transfer */ 1876 if (!ep->req_pending) 1877 udc_ep_in_req_dma(udc, ep); 1878 } else 1879 /* OUT packet from host and kick off list */ 1880 if (!ep->req_pending) 1881 udc_ep_out_req_dma(udc, ep); 1882 } else 1883 list_add_tail(&req->queue, &ep->queue); 1884 1885 spin_unlock_irqrestore(&udc->lock, flags); 1886 1887 return (status < 0) ? status : 0; 1888 } 1889 1890 /* Must be called without lock */ 1891 static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1892 { 1893 struct lpc32xx_ep *ep; 1894 struct lpc32xx_request *req; 1895 unsigned long flags; 1896 1897 ep = container_of(_ep, struct lpc32xx_ep, ep); 1898 if (!_ep || ep->hwep_num_base == 0) 1899 return -EINVAL; 1900 1901 spin_lock_irqsave(&ep->udc->lock, flags); 1902 1903 /* make sure it's actually queued on this endpoint */ 1904 list_for_each_entry(req, &ep->queue, queue) { 1905 if (&req->req == _req) 1906 break; 1907 } 1908 if (&req->req != _req) { 1909 spin_unlock_irqrestore(&ep->udc->lock, flags); 1910 return -EINVAL; 1911 } 1912 1913 done(ep, req, -ECONNRESET); 1914 1915 spin_unlock_irqrestore(&ep->udc->lock, flags); 1916 1917 return 0; 1918 } 1919 1920 /* Must be called without lock */ 1921 static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) 1922 { 1923 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); 1924 struct lpc32xx_udc *udc = ep->udc; 1925 unsigned long flags; 1926 1927 if ((!ep) || (ep->hwep_num <= 1)) 1928 return -EINVAL; 1929 1930 /* Don't halt an IN EP */ 1931 if (ep->is_in) 1932 return -EAGAIN; 1933 1934 spin_lock_irqsave(&udc->lock, flags); 1935 1936 if (value == 1) { 1937 /* stall */ 1938 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), 1939 DAT_WR_BYTE(EP_STAT_ST)); 1940 } else { 1941 /* End stall */ 1942 ep->wedge = 0; 1943 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), 1944 DAT_WR_BYTE(0)); 1945 } 1946 1947 spin_unlock_irqrestore(&udc->lock, flags); 1948 1949 return 0; 1950 } 1951 1952 /* set the halt feature and ignores clear requests */ 1953 static int lpc32xx_ep_set_wedge(struct usb_ep *_ep) 1954 { 1955 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); 1956 1957 if (!_ep || !ep->udc) 1958 return -EINVAL; 1959 1960 ep->wedge = 1; 1961 1962 return usb_ep_set_halt(_ep); 1963 } 1964 1965 static const struct usb_ep_ops lpc32xx_ep_ops = { 1966 .enable = lpc32xx_ep_enable, 1967 .disable = lpc32xx_ep_disable, 1968 .alloc_request = lpc32xx_ep_alloc_request, 1969 .free_request = lpc32xx_ep_free_request, 1970 .queue = lpc32xx_ep_queue, 1971 .dequeue = lpc32xx_ep_dequeue, 1972 .set_halt = lpc32xx_ep_set_halt, 1973 .set_wedge = lpc32xx_ep_set_wedge, 1974 }; 1975 1976 /* Send a ZLP on a non-0 IN EP */ 1977 void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) 1978 { 1979 /* Clear EP status */ 1980 udc_clearep_getsts(udc, ep->hwep_num); 1981 1982 /* Send ZLP via FIFO mechanism */ 1983 udc_write_hwep(udc, ep->hwep_num, NULL, 0); 1984 } 1985 1986 /* 1987 * Handle EP completion for ZLP 1988 * This function will only be called when a delayed ZLP needs to be sent out 1989 * after a DMA transfer has filled both buffers. 1990 */ 1991 void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) 1992 { 1993 u32 epstatus; 1994 struct lpc32xx_request *req; 1995 1996 if (ep->hwep_num <= 0) 1997 return; 1998 1999 uda_clear_hwepint(udc, ep->hwep_num); 2000 2001 /* If this interrupt isn't enabled, return now */ 2002 if (!(udc->enabled_hwepints & (1 << ep->hwep_num))) 2003 return; 2004 2005 /* Get endpoint status */ 2006 epstatus = udc_clearep_getsts(udc, ep->hwep_num); 2007 2008 /* 2009 * This should never happen, but protect against writing to the 2010 * buffer when full. 2011 */ 2012 if (epstatus & EP_SEL_F) 2013 return; 2014 2015 if (ep->is_in) { 2016 udc_send_in_zlp(udc, ep); 2017 uda_disable_hwepint(udc, ep->hwep_num); 2018 } else 2019 return; 2020 2021 /* If there isn't a request waiting, something went wrong */ 2022 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); 2023 if (req) { 2024 done(ep, req, 0); 2025 2026 /* Start another request if ready */ 2027 if (!list_empty(&ep->queue)) { 2028 if (ep->is_in) 2029 udc_ep_in_req_dma(udc, ep); 2030 else 2031 udc_ep_out_req_dma(udc, ep); 2032 } else 2033 ep->req_pending = 0; 2034 } 2035 } 2036 2037 2038 /* DMA end of transfer completion */ 2039 static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) 2040 { 2041 u32 status, epstatus; 2042 struct lpc32xx_request *req; 2043 struct lpc32xx_usbd_dd_gad *dd; 2044 2045 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 2046 ep->totalints++; 2047 #endif 2048 2049 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); 2050 if (!req) { 2051 ep_err(ep, "DMA interrupt on no req!\n"); 2052 return; 2053 } 2054 dd = req->dd_desc_ptr; 2055 2056 /* DMA descriptor should always be retired for this call */ 2057 if (!(dd->dd_status & DD_STATUS_DD_RETIRED)) 2058 ep_warn(ep, "DMA descriptor did not retire\n"); 2059 2060 /* Disable DMA */ 2061 udc_ep_dma_disable(udc, ep->hwep_num); 2062 writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr)); 2063 writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr)); 2064 2065 /* System error? */ 2066 if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) & 2067 (1 << ep->hwep_num)) { 2068 writel((1 << ep->hwep_num), 2069 USBD_SYSERRTINTCLR(udc->udp_baseaddr)); 2070 ep_err(ep, "AHB critical error!\n"); 2071 ep->req_pending = 0; 2072 2073 /* The error could have occurred on a packet of a multipacket 2074 * transfer, so recovering the transfer is not possible. Close 2075 * the request with an error */ 2076 done(ep, req, -ECONNABORTED); 2077 return; 2078 } 2079 2080 /* Handle the current DD's status */ 2081 status = dd->dd_status; 2082 switch (status & DD_STATUS_STS_MASK) { 2083 case DD_STATUS_STS_NS: 2084 /* DD not serviced? This shouldn't happen! */ 2085 ep->req_pending = 0; 2086 ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n", 2087 status); 2088 2089 done(ep, req, -ECONNABORTED); 2090 return; 2091 2092 case DD_STATUS_STS_BS: 2093 /* Interrupt only fires on EOT - This shouldn't happen! */ 2094 ep->req_pending = 0; 2095 ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n", 2096 status); 2097 done(ep, req, -ECONNABORTED); 2098 return; 2099 2100 case DD_STATUS_STS_NC: 2101 case DD_STATUS_STS_DUR: 2102 /* Really just a short packet, not an underrun */ 2103 /* This is a good status and what we expect */ 2104 break; 2105 2106 default: 2107 /* Data overrun, system error, or unknown */ 2108 ep->req_pending = 0; 2109 ep_err(ep, "DMA critical EP error: System error (0x%x)!\n", 2110 status); 2111 done(ep, req, -ECONNABORTED); 2112 return; 2113 } 2114 2115 /* ISO endpoints are handled differently */ 2116 if (ep->eptype == EP_ISO_TYPE) { 2117 if (ep->is_in) 2118 req->req.actual = req->req.length; 2119 else 2120 req->req.actual = dd->iso_status[0] & 0xFFFF; 2121 } else 2122 req->req.actual += DD_STATUS_CURDMACNT(status); 2123 2124 /* Send a ZLP if necessary. This will be done for non-int 2125 * packets which have a size that is a divisor of MAXP */ 2126 if (req->send_zlp) { 2127 /* 2128 * If at least 1 buffer is available, send the ZLP now. 2129 * Otherwise, the ZLP send needs to be deferred until a 2130 * buffer is available. 2131 */ 2132 if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) { 2133 udc_clearep_getsts(udc, ep->hwep_num); 2134 uda_enable_hwepint(udc, ep->hwep_num); 2135 epstatus = udc_clearep_getsts(udc, ep->hwep_num); 2136 2137 /* Let the EP interrupt handle the ZLP */ 2138 return; 2139 } else 2140 udc_send_in_zlp(udc, ep); 2141 } 2142 2143 /* Transfer request is complete */ 2144 done(ep, req, 0); 2145 2146 /* Start another request if ready */ 2147 udc_clearep_getsts(udc, ep->hwep_num); 2148 if (!list_empty((&ep->queue))) { 2149 if (ep->is_in) 2150 udc_ep_in_req_dma(udc, ep); 2151 else 2152 udc_ep_out_req_dma(udc, ep); 2153 } else 2154 ep->req_pending = 0; 2155 2156 } 2157 2158 /* 2159 * 2160 * Endpoint 0 functions 2161 * 2162 */ 2163 static void udc_handle_dev(struct lpc32xx_udc *udc) 2164 { 2165 u32 tmp; 2166 2167 udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT); 2168 tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT); 2169 2170 if (tmp & DEV_RST) 2171 uda_usb_reset(udc); 2172 else if (tmp & DEV_CON_CH) 2173 uda_power_event(udc, (tmp & DEV_CON)); 2174 else if (tmp & DEV_SUS_CH) { 2175 if (tmp & DEV_SUS) { 2176 if (udc->vbus == 0) 2177 stop_activity(udc); 2178 else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) && 2179 udc->driver) { 2180 /* Power down transceiver */ 2181 udc->poweron = 0; 2182 schedule_work(&udc->pullup_job); 2183 uda_resm_susp_event(udc, 1); 2184 } 2185 } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) && 2186 udc->driver && udc->vbus) { 2187 uda_resm_susp_event(udc, 0); 2188 /* Power up transceiver */ 2189 udc->poweron = 1; 2190 schedule_work(&udc->pullup_job); 2191 } 2192 } 2193 } 2194 2195 static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex) 2196 { 2197 struct lpc32xx_ep *ep; 2198 u32 ep0buff = 0, tmp; 2199 2200 switch (reqtype & USB_RECIP_MASK) { 2201 case USB_RECIP_INTERFACE: 2202 break; /* Not supported */ 2203 2204 case USB_RECIP_DEVICE: 2205 ep0buff = udc->gadget.is_selfpowered; 2206 if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP)) 2207 ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP); 2208 break; 2209 2210 case USB_RECIP_ENDPOINT: 2211 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK; 2212 ep = &udc->ep[tmp]; 2213 if ((tmp == 0) || (tmp >= NUM_ENDPOINTS)) 2214 return -EOPNOTSUPP; 2215 2216 if (wIndex & USB_DIR_IN) { 2217 if (!ep->is_in) 2218 return -EOPNOTSUPP; /* Something's wrong */ 2219 } else if (ep->is_in) 2220 return -EOPNOTSUPP; /* Not an IN endpoint */ 2221 2222 /* Get status of the endpoint */ 2223 udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num)); 2224 tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num)); 2225 2226 if (tmp & EP_SEL_ST) 2227 ep0buff = (1 << USB_ENDPOINT_HALT); 2228 else 2229 ep0buff = 0; 2230 break; 2231 2232 default: 2233 break; 2234 } 2235 2236 /* Return data */ 2237 udc_write_hwep(udc, EP_IN, &ep0buff, 2); 2238 2239 return 0; 2240 } 2241 2242 static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) 2243 { 2244 struct lpc32xx_ep *ep, *ep0 = &udc->ep[0]; 2245 struct usb_ctrlrequest ctrlpkt; 2246 int i, bytes; 2247 u16 wIndex, wValue, wLength, reqtype, req, tmp; 2248 2249 /* Nuke previous transfers */ 2250 nuke(ep0, -EPROTO); 2251 2252 /* Get setup packet */ 2253 bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8); 2254 if (bytes != 8) { 2255 ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n", 2256 bytes); 2257 return; 2258 } 2259 2260 /* Native endianness */ 2261 wIndex = le16_to_cpu(ctrlpkt.wIndex); 2262 wValue = le16_to_cpu(ctrlpkt.wValue); 2263 wLength = le16_to_cpu(ctrlpkt.wLength); 2264 reqtype = le16_to_cpu(ctrlpkt.bRequestType); 2265 2266 /* Set direction of EP0 */ 2267 if (likely(reqtype & USB_DIR_IN)) 2268 ep0->is_in = 1; 2269 else 2270 ep0->is_in = 0; 2271 2272 /* Handle SETUP packet */ 2273 req = le16_to_cpu(ctrlpkt.bRequest); 2274 switch (req) { 2275 case USB_REQ_CLEAR_FEATURE: 2276 case USB_REQ_SET_FEATURE: 2277 switch (reqtype) { 2278 case (USB_TYPE_STANDARD | USB_RECIP_DEVICE): 2279 if (wValue != USB_DEVICE_REMOTE_WAKEUP) 2280 goto stall; /* Nothing else handled */ 2281 2282 /* Tell board about event */ 2283 if (req == USB_REQ_CLEAR_FEATURE) 2284 udc->dev_status &= 2285 ~(1 << USB_DEVICE_REMOTE_WAKEUP); 2286 else 2287 udc->dev_status |= 2288 (1 << USB_DEVICE_REMOTE_WAKEUP); 2289 uda_remwkp_cgh(udc); 2290 goto zlp_send; 2291 2292 case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): 2293 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK; 2294 if ((wValue != USB_ENDPOINT_HALT) || 2295 (tmp >= NUM_ENDPOINTS)) 2296 break; 2297 2298 /* Find hardware endpoint from logical endpoint */ 2299 ep = &udc->ep[tmp]; 2300 tmp = ep->hwep_num; 2301 if (tmp == 0) 2302 break; 2303 2304 if (req == USB_REQ_SET_FEATURE) 2305 udc_stall_hwep(udc, tmp); 2306 else if (!ep->wedge) 2307 udc_clrstall_hwep(udc, tmp); 2308 2309 goto zlp_send; 2310 2311 default: 2312 break; 2313 } 2314 2315 2316 case USB_REQ_SET_ADDRESS: 2317 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { 2318 udc_set_address(udc, wValue); 2319 goto zlp_send; 2320 } 2321 break; 2322 2323 case USB_REQ_GET_STATUS: 2324 udc_get_status(udc, reqtype, wIndex); 2325 return; 2326 2327 default: 2328 break; /* Let GadgetFS handle the descriptor instead */ 2329 } 2330 2331 if (likely(udc->driver)) { 2332 /* device-2-host (IN) or no data setup command, process 2333 * immediately */ 2334 spin_unlock(&udc->lock); 2335 i = udc->driver->setup(&udc->gadget, &ctrlpkt); 2336 2337 spin_lock(&udc->lock); 2338 if (req == USB_REQ_SET_CONFIGURATION) { 2339 /* Configuration is set after endpoints are realized */ 2340 if (wValue) { 2341 /* Set configuration */ 2342 udc_set_device_configured(udc); 2343 2344 udc_protocol_cmd_data_w(udc, CMD_SET_MODE, 2345 DAT_WR_BYTE(AP_CLK | 2346 INAK_BI | INAK_II)); 2347 } else { 2348 /* Clear configuration */ 2349 udc_set_device_unconfigured(udc); 2350 2351 /* Disable NAK interrupts */ 2352 udc_protocol_cmd_data_w(udc, CMD_SET_MODE, 2353 DAT_WR_BYTE(AP_CLK)); 2354 } 2355 } 2356 2357 if (i < 0) { 2358 /* setup processing failed, force stall */ 2359 dev_dbg(udc->dev, 2360 "req %02x.%02x protocol STALL; stat %d\n", 2361 reqtype, req, i); 2362 udc->ep0state = WAIT_FOR_SETUP; 2363 goto stall; 2364 } 2365 } 2366 2367 if (!ep0->is_in) 2368 udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */ 2369 2370 return; 2371 2372 stall: 2373 udc_stall_hwep(udc, EP_IN); 2374 return; 2375 2376 zlp_send: 2377 udc_ep0_send_zlp(udc); 2378 return; 2379 } 2380 2381 /* IN endpoint 0 transfer */ 2382 static void udc_handle_ep0_in(struct lpc32xx_udc *udc) 2383 { 2384 struct lpc32xx_ep *ep0 = &udc->ep[0]; 2385 u32 epstatus; 2386 2387 /* Clear EP interrupt */ 2388 epstatus = udc_clearep_getsts(udc, EP_IN); 2389 2390 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 2391 ep0->totalints++; 2392 #endif 2393 2394 /* Stalled? Clear stall and reset buffers */ 2395 if (epstatus & EP_SEL_ST) { 2396 udc_clrstall_hwep(udc, EP_IN); 2397 nuke(ep0, -ECONNABORTED); 2398 udc->ep0state = WAIT_FOR_SETUP; 2399 return; 2400 } 2401 2402 /* Is a buffer available? */ 2403 if (!(epstatus & EP_SEL_F)) { 2404 /* Handle based on current state */ 2405 if (udc->ep0state == DATA_IN) 2406 udc_ep0_in_req(udc); 2407 else { 2408 /* Unknown state for EP0 oe end of DATA IN phase */ 2409 nuke(ep0, -ECONNABORTED); 2410 udc->ep0state = WAIT_FOR_SETUP; 2411 } 2412 } 2413 } 2414 2415 /* OUT endpoint 0 transfer */ 2416 static void udc_handle_ep0_out(struct lpc32xx_udc *udc) 2417 { 2418 struct lpc32xx_ep *ep0 = &udc->ep[0]; 2419 u32 epstatus; 2420 2421 /* Clear EP interrupt */ 2422 epstatus = udc_clearep_getsts(udc, EP_OUT); 2423 2424 2425 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 2426 ep0->totalints++; 2427 #endif 2428 2429 /* Stalled? */ 2430 if (epstatus & EP_SEL_ST) { 2431 udc_clrstall_hwep(udc, EP_OUT); 2432 nuke(ep0, -ECONNABORTED); 2433 udc->ep0state = WAIT_FOR_SETUP; 2434 return; 2435 } 2436 2437 /* A NAK may occur if a packet couldn't be received yet */ 2438 if (epstatus & EP_SEL_EPN) 2439 return; 2440 /* Setup packet incoming? */ 2441 if (epstatus & EP_SEL_STP) { 2442 nuke(ep0, 0); 2443 udc->ep0state = WAIT_FOR_SETUP; 2444 } 2445 2446 /* Data available? */ 2447 if (epstatus & EP_SEL_F) 2448 /* Handle based on current state */ 2449 switch (udc->ep0state) { 2450 case WAIT_FOR_SETUP: 2451 udc_handle_ep0_setup(udc); 2452 break; 2453 2454 case DATA_OUT: 2455 udc_ep0_out_req(udc); 2456 break; 2457 2458 default: 2459 /* Unknown state for EP0 */ 2460 nuke(ep0, -ECONNABORTED); 2461 udc->ep0state = WAIT_FOR_SETUP; 2462 } 2463 } 2464 2465 /* Must be called without lock */ 2466 static int lpc32xx_get_frame(struct usb_gadget *gadget) 2467 { 2468 int frame; 2469 unsigned long flags; 2470 struct lpc32xx_udc *udc = to_udc(gadget); 2471 2472 if (!udc->clocked) 2473 return -EINVAL; 2474 2475 spin_lock_irqsave(&udc->lock, flags); 2476 2477 frame = (int) udc_get_current_frame(udc); 2478 2479 spin_unlock_irqrestore(&udc->lock, flags); 2480 2481 return frame; 2482 } 2483 2484 static int lpc32xx_wakeup(struct usb_gadget *gadget) 2485 { 2486 return -ENOTSUPP; 2487 } 2488 2489 static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on) 2490 { 2491 gadget->is_selfpowered = (is_on != 0); 2492 2493 return 0; 2494 } 2495 2496 /* 2497 * vbus is here! turn everything on that's ready 2498 * Must be called without lock 2499 */ 2500 static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active) 2501 { 2502 unsigned long flags; 2503 struct lpc32xx_udc *udc = to_udc(gadget); 2504 2505 spin_lock_irqsave(&udc->lock, flags); 2506 2507 /* Doesn't need lock */ 2508 if (udc->driver) { 2509 udc_clk_set(udc, 1); 2510 udc_enable(udc); 2511 pullup(udc, is_active); 2512 } else { 2513 stop_activity(udc); 2514 pullup(udc, 0); 2515 2516 spin_unlock_irqrestore(&udc->lock, flags); 2517 /* 2518 * Wait for all the endpoints to disable, 2519 * before disabling clocks. Don't wait if 2520 * endpoints are not enabled. 2521 */ 2522 if (atomic_read(&udc->enabled_ep_cnt)) 2523 wait_event_interruptible(udc->ep_disable_wait_queue, 2524 (atomic_read(&udc->enabled_ep_cnt) == 0)); 2525 2526 spin_lock_irqsave(&udc->lock, flags); 2527 2528 udc_clk_set(udc, 0); 2529 } 2530 2531 spin_unlock_irqrestore(&udc->lock, flags); 2532 2533 return 0; 2534 } 2535 2536 /* Can be called with or without lock */ 2537 static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on) 2538 { 2539 struct lpc32xx_udc *udc = to_udc(gadget); 2540 2541 /* Doesn't need lock */ 2542 pullup(udc, is_on); 2543 2544 return 0; 2545 } 2546 2547 static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *); 2548 static int lpc32xx_stop(struct usb_gadget *); 2549 2550 static const struct usb_gadget_ops lpc32xx_udc_ops = { 2551 .get_frame = lpc32xx_get_frame, 2552 .wakeup = lpc32xx_wakeup, 2553 .set_selfpowered = lpc32xx_set_selfpowered, 2554 .vbus_session = lpc32xx_vbus_session, 2555 .pullup = lpc32xx_pullup, 2556 .udc_start = lpc32xx_start, 2557 .udc_stop = lpc32xx_stop, 2558 }; 2559 2560 static void nop_release(struct device *dev) 2561 { 2562 /* nothing to free */ 2563 } 2564 2565 static const struct lpc32xx_udc controller_template = { 2566 .gadget = { 2567 .ops = &lpc32xx_udc_ops, 2568 .name = driver_name, 2569 .dev = { 2570 .init_name = "gadget", 2571 .release = nop_release, 2572 } 2573 }, 2574 .ep[0] = { 2575 .ep = { 2576 .name = "ep0", 2577 .ops = &lpc32xx_ep_ops, 2578 }, 2579 .maxpacket = 64, 2580 .hwep_num_base = 0, 2581 .hwep_num = 0, /* Can be 0 or 1, has special handling */ 2582 .lep = 0, 2583 .eptype = EP_CTL_TYPE, 2584 }, 2585 .ep[1] = { 2586 .ep = { 2587 .name = "ep1-int", 2588 .ops = &lpc32xx_ep_ops, 2589 }, 2590 .maxpacket = 64, 2591 .hwep_num_base = 2, 2592 .hwep_num = 0, /* 2 or 3, will be set later */ 2593 .lep = 1, 2594 .eptype = EP_INT_TYPE, 2595 }, 2596 .ep[2] = { 2597 .ep = { 2598 .name = "ep2-bulk", 2599 .ops = &lpc32xx_ep_ops, 2600 }, 2601 .maxpacket = 64, 2602 .hwep_num_base = 4, 2603 .hwep_num = 0, /* 4 or 5, will be set later */ 2604 .lep = 2, 2605 .eptype = EP_BLK_TYPE, 2606 }, 2607 .ep[3] = { 2608 .ep = { 2609 .name = "ep3-iso", 2610 .ops = &lpc32xx_ep_ops, 2611 }, 2612 .maxpacket = 1023, 2613 .hwep_num_base = 6, 2614 .hwep_num = 0, /* 6 or 7, will be set later */ 2615 .lep = 3, 2616 .eptype = EP_ISO_TYPE, 2617 }, 2618 .ep[4] = { 2619 .ep = { 2620 .name = "ep4-int", 2621 .ops = &lpc32xx_ep_ops, 2622 }, 2623 .maxpacket = 64, 2624 .hwep_num_base = 8, 2625 .hwep_num = 0, /* 8 or 9, will be set later */ 2626 .lep = 4, 2627 .eptype = EP_INT_TYPE, 2628 }, 2629 .ep[5] = { 2630 .ep = { 2631 .name = "ep5-bulk", 2632 .ops = &lpc32xx_ep_ops, 2633 }, 2634 .maxpacket = 64, 2635 .hwep_num_base = 10, 2636 .hwep_num = 0, /* 10 or 11, will be set later */ 2637 .lep = 5, 2638 .eptype = EP_BLK_TYPE, 2639 }, 2640 .ep[6] = { 2641 .ep = { 2642 .name = "ep6-iso", 2643 .ops = &lpc32xx_ep_ops, 2644 }, 2645 .maxpacket = 1023, 2646 .hwep_num_base = 12, 2647 .hwep_num = 0, /* 12 or 13, will be set later */ 2648 .lep = 6, 2649 .eptype = EP_ISO_TYPE, 2650 }, 2651 .ep[7] = { 2652 .ep = { 2653 .name = "ep7-int", 2654 .ops = &lpc32xx_ep_ops, 2655 }, 2656 .maxpacket = 64, 2657 .hwep_num_base = 14, 2658 .hwep_num = 0, 2659 .lep = 7, 2660 .eptype = EP_INT_TYPE, 2661 }, 2662 .ep[8] = { 2663 .ep = { 2664 .name = "ep8-bulk", 2665 .ops = &lpc32xx_ep_ops, 2666 }, 2667 .maxpacket = 64, 2668 .hwep_num_base = 16, 2669 .hwep_num = 0, 2670 .lep = 8, 2671 .eptype = EP_BLK_TYPE, 2672 }, 2673 .ep[9] = { 2674 .ep = { 2675 .name = "ep9-iso", 2676 .ops = &lpc32xx_ep_ops, 2677 }, 2678 .maxpacket = 1023, 2679 .hwep_num_base = 18, 2680 .hwep_num = 0, 2681 .lep = 9, 2682 .eptype = EP_ISO_TYPE, 2683 }, 2684 .ep[10] = { 2685 .ep = { 2686 .name = "ep10-int", 2687 .ops = &lpc32xx_ep_ops, 2688 }, 2689 .maxpacket = 64, 2690 .hwep_num_base = 20, 2691 .hwep_num = 0, 2692 .lep = 10, 2693 .eptype = EP_INT_TYPE, 2694 }, 2695 .ep[11] = { 2696 .ep = { 2697 .name = "ep11-bulk", 2698 .ops = &lpc32xx_ep_ops, 2699 }, 2700 .maxpacket = 64, 2701 .hwep_num_base = 22, 2702 .hwep_num = 0, 2703 .lep = 11, 2704 .eptype = EP_BLK_TYPE, 2705 }, 2706 .ep[12] = { 2707 .ep = { 2708 .name = "ep12-iso", 2709 .ops = &lpc32xx_ep_ops, 2710 }, 2711 .maxpacket = 1023, 2712 .hwep_num_base = 24, 2713 .hwep_num = 0, 2714 .lep = 12, 2715 .eptype = EP_ISO_TYPE, 2716 }, 2717 .ep[13] = { 2718 .ep = { 2719 .name = "ep13-int", 2720 .ops = &lpc32xx_ep_ops, 2721 }, 2722 .maxpacket = 64, 2723 .hwep_num_base = 26, 2724 .hwep_num = 0, 2725 .lep = 13, 2726 .eptype = EP_INT_TYPE, 2727 }, 2728 .ep[14] = { 2729 .ep = { 2730 .name = "ep14-bulk", 2731 .ops = &lpc32xx_ep_ops, 2732 }, 2733 .maxpacket = 64, 2734 .hwep_num_base = 28, 2735 .hwep_num = 0, 2736 .lep = 14, 2737 .eptype = EP_BLK_TYPE, 2738 }, 2739 .ep[15] = { 2740 .ep = { 2741 .name = "ep15-bulk", 2742 .ops = &lpc32xx_ep_ops, 2743 }, 2744 .maxpacket = 1023, 2745 .hwep_num_base = 30, 2746 .hwep_num = 0, 2747 .lep = 15, 2748 .eptype = EP_BLK_TYPE, 2749 }, 2750 }; 2751 2752 /* ISO and status interrupts */ 2753 static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc) 2754 { 2755 u32 tmp, devstat; 2756 struct lpc32xx_udc *udc = _udc; 2757 2758 spin_lock(&udc->lock); 2759 2760 /* Read the device status register */ 2761 devstat = readl(USBD_DEVINTST(udc->udp_baseaddr)); 2762 2763 devstat &= ~USBD_EP_FAST; 2764 writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr)); 2765 devstat = devstat & udc->enabled_devints; 2766 2767 /* Device specific handling needed? */ 2768 if (devstat & USBD_DEV_STAT) 2769 udc_handle_dev(udc); 2770 2771 /* Start of frame? (devstat & FRAME_INT): 2772 * The frame interrupt isn't really needed for ISO support, 2773 * as the driver will queue the necessary packets */ 2774 2775 /* Error? */ 2776 if (devstat & ERR_INT) { 2777 /* All types of errors, from cable removal during transfer to 2778 * misc protocol and bit errors. These are mostly for just info, 2779 * as the USB hardware will work around these. If these errors 2780 * happen alot, something is wrong. */ 2781 udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT); 2782 tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT); 2783 dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp); 2784 } 2785 2786 spin_unlock(&udc->lock); 2787 2788 return IRQ_HANDLED; 2789 } 2790 2791 /* EP interrupts */ 2792 static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc) 2793 { 2794 u32 tmp; 2795 struct lpc32xx_udc *udc = _udc; 2796 2797 spin_lock(&udc->lock); 2798 2799 /* Read the device status register */ 2800 writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr)); 2801 2802 /* Endpoints */ 2803 tmp = readl(USBD_EPINTST(udc->udp_baseaddr)); 2804 2805 /* Special handling for EP0 */ 2806 if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) { 2807 /* Handle EP0 IN */ 2808 if (tmp & (EP_MASK_SEL(0, EP_IN))) 2809 udc_handle_ep0_in(udc); 2810 2811 /* Handle EP0 OUT */ 2812 if (tmp & (EP_MASK_SEL(0, EP_OUT))) 2813 udc_handle_ep0_out(udc); 2814 } 2815 2816 /* All other EPs */ 2817 if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) { 2818 int i; 2819 2820 /* Handle other EP interrupts */ 2821 for (i = 1; i < NUM_ENDPOINTS; i++) { 2822 if (tmp & (1 << udc->ep[i].hwep_num)) 2823 udc_handle_eps(udc, &udc->ep[i]); 2824 } 2825 } 2826 2827 spin_unlock(&udc->lock); 2828 2829 return IRQ_HANDLED; 2830 } 2831 2832 static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc) 2833 { 2834 struct lpc32xx_udc *udc = _udc; 2835 2836 int i; 2837 u32 tmp; 2838 2839 spin_lock(&udc->lock); 2840 2841 /* Handle EP DMA EOT interrupts */ 2842 tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) | 2843 (readl(USBD_EPDMAST(udc->udp_baseaddr)) & 2844 readl(USBD_NDDRTINTST(udc->udp_baseaddr))) | 2845 readl(USBD_SYSERRTINTST(udc->udp_baseaddr)); 2846 for (i = 1; i < NUM_ENDPOINTS; i++) { 2847 if (tmp & (1 << udc->ep[i].hwep_num)) 2848 udc_handle_dma_ep(udc, &udc->ep[i]); 2849 } 2850 2851 spin_unlock(&udc->lock); 2852 2853 return IRQ_HANDLED; 2854 } 2855 2856 /* 2857 * 2858 * VBUS detection, pullup handler, and Gadget cable state notification 2859 * 2860 */ 2861 static void vbus_work(struct work_struct *work) 2862 { 2863 u8 value; 2864 struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc, 2865 vbus_job); 2866 2867 if (udc->enabled != 0) { 2868 /* Discharge VBUS real quick */ 2869 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 2870 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG); 2871 2872 /* Give VBUS some time (100mS) to discharge */ 2873 msleep(100); 2874 2875 /* Disable VBUS discharge resistor */ 2876 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 2877 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, 2878 OTG1_VBUS_DISCHRG); 2879 2880 /* Clear interrupt */ 2881 i2c_smbus_write_byte_data(udc->isp1301_i2c_client, 2882 ISP1301_I2C_INTERRUPT_LATCH | 2883 ISP1301_I2C_REG_CLEAR_ADDR, ~0); 2884 2885 /* Get the VBUS status from the transceiver */ 2886 value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client, 2887 ISP1301_I2C_INTERRUPT_SOURCE); 2888 2889 /* VBUS on or off? */ 2890 if (value & INT_SESS_VLD) 2891 udc->vbus = 1; 2892 else 2893 udc->vbus = 0; 2894 2895 /* VBUS changed? */ 2896 if (udc->last_vbus != udc->vbus) { 2897 udc->last_vbus = udc->vbus; 2898 lpc32xx_vbus_session(&udc->gadget, udc->vbus); 2899 } 2900 } 2901 2902 /* Re-enable after completion */ 2903 enable_irq(udc->udp_irq[IRQ_USB_ATX]); 2904 } 2905 2906 static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc) 2907 { 2908 struct lpc32xx_udc *udc = _udc; 2909 2910 /* Defer handling of VBUS IRQ to work queue */ 2911 disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]); 2912 schedule_work(&udc->vbus_job); 2913 2914 return IRQ_HANDLED; 2915 } 2916 2917 static int lpc32xx_start(struct usb_gadget *gadget, 2918 struct usb_gadget_driver *driver) 2919 { 2920 struct lpc32xx_udc *udc = to_udc(gadget); 2921 int i; 2922 2923 if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) { 2924 dev_err(udc->dev, "bad parameter.\n"); 2925 return -EINVAL; 2926 } 2927 2928 if (udc->driver) { 2929 dev_err(udc->dev, "UDC already has a gadget driver\n"); 2930 return -EBUSY; 2931 } 2932 2933 udc->driver = driver; 2934 udc->gadget.dev.of_node = udc->dev->of_node; 2935 udc->enabled = 1; 2936 udc->gadget.is_selfpowered = 1; 2937 udc->vbus = 0; 2938 2939 /* Force VBUS process once to check for cable insertion */ 2940 udc->last_vbus = udc->vbus = 0; 2941 schedule_work(&udc->vbus_job); 2942 2943 /* Do not re-enable ATX IRQ (3) */ 2944 for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++) 2945 enable_irq(udc->udp_irq[i]); 2946 2947 return 0; 2948 } 2949 2950 static int lpc32xx_stop(struct usb_gadget *gadget) 2951 { 2952 int i; 2953 struct lpc32xx_udc *udc = to_udc(gadget); 2954 2955 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++) 2956 disable_irq(udc->udp_irq[i]); 2957 2958 if (udc->clocked) { 2959 spin_lock(&udc->lock); 2960 stop_activity(udc); 2961 spin_unlock(&udc->lock); 2962 2963 /* 2964 * Wait for all the endpoints to disable, 2965 * before disabling clocks. Don't wait if 2966 * endpoints are not enabled. 2967 */ 2968 if (atomic_read(&udc->enabled_ep_cnt)) 2969 wait_event_interruptible(udc->ep_disable_wait_queue, 2970 (atomic_read(&udc->enabled_ep_cnt) == 0)); 2971 2972 spin_lock(&udc->lock); 2973 udc_clk_set(udc, 0); 2974 spin_unlock(&udc->lock); 2975 } 2976 2977 udc->enabled = 0; 2978 udc->driver = NULL; 2979 2980 return 0; 2981 } 2982 2983 static void lpc32xx_udc_shutdown(struct platform_device *dev) 2984 { 2985 /* Force disconnect on reboot */ 2986 struct lpc32xx_udc *udc = platform_get_drvdata(dev); 2987 2988 pullup(udc, 0); 2989 } 2990 2991 /* 2992 * Callbacks to be overridden by options passed via OF (TODO) 2993 */ 2994 2995 static void lpc32xx_usbd_conn_chg(int conn) 2996 { 2997 /* Do nothing, it might be nice to enable an LED 2998 * based on conn state being !0 */ 2999 } 3000 3001 static void lpc32xx_usbd_susp_chg(int susp) 3002 { 3003 /* Device suspend if susp != 0 */ 3004 } 3005 3006 static void lpc32xx_rmwkup_chg(int remote_wakup_enable) 3007 { 3008 /* Enable or disable USB remote wakeup */ 3009 } 3010 3011 struct lpc32xx_usbd_cfg lpc32xx_usbddata = { 3012 .vbus_drv_pol = 0, 3013 .conn_chgb = &lpc32xx_usbd_conn_chg, 3014 .susp_chgb = &lpc32xx_usbd_susp_chg, 3015 .rmwk_chgb = &lpc32xx_rmwkup_chg, 3016 }; 3017 3018 3019 static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F; 3020 3021 static int lpc32xx_udc_probe(struct platform_device *pdev) 3022 { 3023 struct device *dev = &pdev->dev; 3024 struct lpc32xx_udc *udc; 3025 int retval, i; 3026 struct resource *res; 3027 dma_addr_t dma_handle; 3028 struct device_node *isp1301_node; 3029 3030 udc = kmemdup(&controller_template, sizeof(*udc), GFP_KERNEL); 3031 if (!udc) 3032 return -ENOMEM; 3033 3034 for (i = 0; i <= 15; i++) 3035 udc->ep[i].udc = udc; 3036 udc->gadget.ep0 = &udc->ep[0].ep; 3037 3038 /* init software state */ 3039 udc->gadget.dev.parent = dev; 3040 udc->pdev = pdev; 3041 udc->dev = &pdev->dev; 3042 udc->enabled = 0; 3043 3044 if (pdev->dev.of_node) { 3045 isp1301_node = of_parse_phandle(pdev->dev.of_node, 3046 "transceiver", 0); 3047 } else { 3048 isp1301_node = NULL; 3049 } 3050 3051 udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); 3052 if (!udc->isp1301_i2c_client) { 3053 retval = -EPROBE_DEFER; 3054 goto phy_fail; 3055 } 3056 3057 dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n", 3058 udc->isp1301_i2c_client->addr); 3059 3060 pdev->dev.dma_mask = &lpc32xx_usbd_dmamask; 3061 retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 3062 if (retval) 3063 goto resource_fail; 3064 3065 udc->board = &lpc32xx_usbddata; 3066 3067 /* 3068 * Resources are mapped as follows: 3069 * IORESOURCE_MEM, base address and size of USB space 3070 * IORESOURCE_IRQ, USB device low priority interrupt number 3071 * IORESOURCE_IRQ, USB device high priority interrupt number 3072 * IORESOURCE_IRQ, USB device interrupt number 3073 * IORESOURCE_IRQ, USB transceiver interrupt number 3074 */ 3075 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3076 if (!res) { 3077 retval = -ENXIO; 3078 goto resource_fail; 3079 } 3080 3081 spin_lock_init(&udc->lock); 3082 3083 /* Get IRQs */ 3084 for (i = 0; i < 4; i++) { 3085 udc->udp_irq[i] = platform_get_irq(pdev, i); 3086 if (udc->udp_irq[i] < 0) { 3087 dev_err(udc->dev, 3088 "irq resource %d not available!\n", i); 3089 retval = udc->udp_irq[i]; 3090 goto irq_fail; 3091 } 3092 } 3093 3094 udc->io_p_start = res->start; 3095 udc->io_p_size = resource_size(res); 3096 if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) { 3097 dev_err(udc->dev, "someone's using UDC memory\n"); 3098 retval = -EBUSY; 3099 goto request_mem_region_fail; 3100 } 3101 3102 udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size); 3103 if (!udc->udp_baseaddr) { 3104 retval = -ENOMEM; 3105 dev_err(udc->dev, "IO map failure\n"); 3106 goto io_map_fail; 3107 } 3108 3109 /* Enable AHB slave USB clock, needed for further USB clock control */ 3110 writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL); 3111 3112 /* Get required clocks */ 3113 udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5"); 3114 if (IS_ERR(udc->usb_pll_clk)) { 3115 dev_err(udc->dev, "failed to acquire USB PLL\n"); 3116 retval = PTR_ERR(udc->usb_pll_clk); 3117 goto pll_get_fail; 3118 } 3119 udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd"); 3120 if (IS_ERR(udc->usb_slv_clk)) { 3121 dev_err(udc->dev, "failed to acquire USB device clock\n"); 3122 retval = PTR_ERR(udc->usb_slv_clk); 3123 goto usb_clk_get_fail; 3124 } 3125 udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg"); 3126 if (IS_ERR(udc->usb_otg_clk)) { 3127 dev_err(udc->dev, "failed to acquire USB otg clock\n"); 3128 retval = PTR_ERR(udc->usb_otg_clk); 3129 goto usb_otg_clk_get_fail; 3130 } 3131 3132 /* Setup PLL clock to 48MHz */ 3133 retval = clk_enable(udc->usb_pll_clk); 3134 if (retval < 0) { 3135 dev_err(udc->dev, "failed to start USB PLL\n"); 3136 goto pll_enable_fail; 3137 } 3138 3139 retval = clk_set_rate(udc->usb_pll_clk, 48000); 3140 if (retval < 0) { 3141 dev_err(udc->dev, "failed to set USB clock rate\n"); 3142 goto pll_set_fail; 3143 } 3144 3145 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL); 3146 3147 /* Enable USB device clock */ 3148 retval = clk_enable(udc->usb_slv_clk); 3149 if (retval < 0) { 3150 dev_err(udc->dev, "failed to start USB device clock\n"); 3151 goto usb_clk_enable_fail; 3152 } 3153 3154 /* Enable USB OTG clock */ 3155 retval = clk_enable(udc->usb_otg_clk); 3156 if (retval < 0) { 3157 dev_err(udc->dev, "failed to start USB otg clock\n"); 3158 goto usb_otg_clk_enable_fail; 3159 } 3160 3161 /* Setup deferred workqueue data */ 3162 udc->poweron = udc->pullup = 0; 3163 INIT_WORK(&udc->pullup_job, pullup_work); 3164 INIT_WORK(&udc->vbus_job, vbus_work); 3165 #ifdef CONFIG_PM 3166 INIT_WORK(&udc->power_job, power_work); 3167 #endif 3168 3169 /* All clocks are now on */ 3170 udc->clocked = 1; 3171 3172 isp1301_udc_configure(udc); 3173 /* Allocate memory for the UDCA */ 3174 udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE, 3175 &dma_handle, 3176 (GFP_KERNEL | GFP_DMA)); 3177 if (!udc->udca_v_base) { 3178 dev_err(udc->dev, "error getting UDCA region\n"); 3179 retval = -ENOMEM; 3180 goto i2c_fail; 3181 } 3182 udc->udca_p_base = dma_handle; 3183 dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n", 3184 UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base); 3185 3186 /* Setup the DD DMA memory pool */ 3187 udc->dd_cache = dma_pool_create("udc_dd", udc->dev, 3188 sizeof(struct lpc32xx_usbd_dd_gad), 3189 sizeof(u32), 0); 3190 if (!udc->dd_cache) { 3191 dev_err(udc->dev, "error getting DD DMA region\n"); 3192 retval = -ENOMEM; 3193 goto dma_alloc_fail; 3194 } 3195 3196 /* Clear USB peripheral and initialize gadget endpoints */ 3197 udc_disable(udc); 3198 udc_reinit(udc); 3199 3200 /* Request IRQs - low and high priority USB device IRQs are routed to 3201 * the same handler, while the DMA interrupt is routed elsewhere */ 3202 retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq, 3203 0, "udc_lp", udc); 3204 if (retval < 0) { 3205 dev_err(udc->dev, "LP request irq %d failed\n", 3206 udc->udp_irq[IRQ_USB_LP]); 3207 goto irq_lp_fail; 3208 } 3209 retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq, 3210 0, "udc_hp", udc); 3211 if (retval < 0) { 3212 dev_err(udc->dev, "HP request irq %d failed\n", 3213 udc->udp_irq[IRQ_USB_HP]); 3214 goto irq_hp_fail; 3215 } 3216 3217 retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA], 3218 lpc32xx_usb_devdma_irq, 0, "udc_dma", udc); 3219 if (retval < 0) { 3220 dev_err(udc->dev, "DEV request irq %d failed\n", 3221 udc->udp_irq[IRQ_USB_DEVDMA]); 3222 goto irq_dev_fail; 3223 } 3224 3225 /* The transceiver interrupt is used for VBUS detection and will 3226 kick off the VBUS handler function */ 3227 retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq, 3228 0, "udc_otg", udc); 3229 if (retval < 0) { 3230 dev_err(udc->dev, "VBUS request irq %d failed\n", 3231 udc->udp_irq[IRQ_USB_ATX]); 3232 goto irq_xcvr_fail; 3233 } 3234 3235 /* Initialize wait queue */ 3236 init_waitqueue_head(&udc->ep_disable_wait_queue); 3237 atomic_set(&udc->enabled_ep_cnt, 0); 3238 3239 /* Keep all IRQs disabled until GadgetFS starts up */ 3240 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++) 3241 disable_irq(udc->udp_irq[i]); 3242 3243 retval = usb_add_gadget_udc(dev, &udc->gadget); 3244 if (retval < 0) 3245 goto add_gadget_fail; 3246 3247 dev_set_drvdata(dev, udc); 3248 device_init_wakeup(dev, 1); 3249 create_debug_file(udc); 3250 3251 /* Disable clocks for now */ 3252 udc_clk_set(udc, 0); 3253 3254 dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION); 3255 return 0; 3256 3257 add_gadget_fail: 3258 free_irq(udc->udp_irq[IRQ_USB_ATX], udc); 3259 irq_xcvr_fail: 3260 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc); 3261 irq_dev_fail: 3262 free_irq(udc->udp_irq[IRQ_USB_HP], udc); 3263 irq_hp_fail: 3264 free_irq(udc->udp_irq[IRQ_USB_LP], udc); 3265 irq_lp_fail: 3266 dma_pool_destroy(udc->dd_cache); 3267 dma_alloc_fail: 3268 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, 3269 udc->udca_v_base, udc->udca_p_base); 3270 i2c_fail: 3271 clk_disable(udc->usb_otg_clk); 3272 usb_otg_clk_enable_fail: 3273 clk_disable(udc->usb_slv_clk); 3274 usb_clk_enable_fail: 3275 pll_set_fail: 3276 clk_disable(udc->usb_pll_clk); 3277 pll_enable_fail: 3278 clk_put(udc->usb_otg_clk); 3279 usb_otg_clk_get_fail: 3280 clk_put(udc->usb_slv_clk); 3281 usb_clk_get_fail: 3282 clk_put(udc->usb_pll_clk); 3283 pll_get_fail: 3284 iounmap(udc->udp_baseaddr); 3285 io_map_fail: 3286 release_mem_region(udc->io_p_start, udc->io_p_size); 3287 dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval); 3288 request_mem_region_fail: 3289 irq_fail: 3290 resource_fail: 3291 phy_fail: 3292 kfree(udc); 3293 return retval; 3294 } 3295 3296 static int lpc32xx_udc_remove(struct platform_device *pdev) 3297 { 3298 struct lpc32xx_udc *udc = platform_get_drvdata(pdev); 3299 3300 usb_del_gadget_udc(&udc->gadget); 3301 if (udc->driver) 3302 return -EBUSY; 3303 3304 udc_clk_set(udc, 1); 3305 udc_disable(udc); 3306 pullup(udc, 0); 3307 3308 free_irq(udc->udp_irq[IRQ_USB_ATX], udc); 3309 3310 device_init_wakeup(&pdev->dev, 0); 3311 remove_debug_file(udc); 3312 3313 dma_pool_destroy(udc->dd_cache); 3314 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, 3315 udc->udca_v_base, udc->udca_p_base); 3316 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc); 3317 free_irq(udc->udp_irq[IRQ_USB_HP], udc); 3318 free_irq(udc->udp_irq[IRQ_USB_LP], udc); 3319 3320 clk_disable(udc->usb_otg_clk); 3321 clk_put(udc->usb_otg_clk); 3322 clk_disable(udc->usb_slv_clk); 3323 clk_put(udc->usb_slv_clk); 3324 clk_disable(udc->usb_pll_clk); 3325 clk_put(udc->usb_pll_clk); 3326 iounmap(udc->udp_baseaddr); 3327 release_mem_region(udc->io_p_start, udc->io_p_size); 3328 kfree(udc); 3329 3330 return 0; 3331 } 3332 3333 #ifdef CONFIG_PM 3334 static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg) 3335 { 3336 struct lpc32xx_udc *udc = platform_get_drvdata(pdev); 3337 3338 if (udc->clocked) { 3339 /* Power down ISP */ 3340 udc->poweron = 0; 3341 isp1301_set_powerstate(udc, 0); 3342 3343 /* Disable clocking */ 3344 udc_clk_set(udc, 0); 3345 3346 /* Keep clock flag on, so we know to re-enable clocks 3347 on resume */ 3348 udc->clocked = 1; 3349 3350 /* Kill global USB clock */ 3351 clk_disable(udc->usb_slv_clk); 3352 } 3353 3354 return 0; 3355 } 3356 3357 static int lpc32xx_udc_resume(struct platform_device *pdev) 3358 { 3359 struct lpc32xx_udc *udc = platform_get_drvdata(pdev); 3360 3361 if (udc->clocked) { 3362 /* Enable global USB clock */ 3363 clk_enable(udc->usb_slv_clk); 3364 3365 /* Enable clocking */ 3366 udc_clk_set(udc, 1); 3367 3368 /* ISP back to normal power mode */ 3369 udc->poweron = 1; 3370 isp1301_set_powerstate(udc, 1); 3371 } 3372 3373 return 0; 3374 } 3375 #else 3376 #define lpc32xx_udc_suspend NULL 3377 #define lpc32xx_udc_resume NULL 3378 #endif 3379 3380 #ifdef CONFIG_OF 3381 static const struct of_device_id lpc32xx_udc_of_match[] = { 3382 { .compatible = "nxp,lpc3220-udc", }, 3383 { }, 3384 }; 3385 MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match); 3386 #endif 3387 3388 static struct platform_driver lpc32xx_udc_driver = { 3389 .remove = lpc32xx_udc_remove, 3390 .shutdown = lpc32xx_udc_shutdown, 3391 .suspend = lpc32xx_udc_suspend, 3392 .resume = lpc32xx_udc_resume, 3393 .driver = { 3394 .name = (char *) driver_name, 3395 .of_match_table = of_match_ptr(lpc32xx_udc_of_match), 3396 }, 3397 }; 3398 3399 module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe); 3400 3401 MODULE_DESCRIPTION("LPC32XX udc driver"); 3402 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); 3403 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); 3404 MODULE_LICENSE("GPL"); 3405 MODULE_ALIAS("platform:lpc32xx_udc"); 3406