1 /* 2 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers 3 * 4 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker) 5 * Copyright (C) 2003 Robert Schwebel, Pengutronix 6 * Copyright (C) 2003 Benedikt Spranger, Pengutronix 7 * Copyright (C) 2003 David Brownell 8 * Copyright (C) 2003 Joshua Wise 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 */ 15 16 /* #define VERBOSE_DEBUG */ 17 18 #include <linux/device.h> 19 #include <linux/gpio.h> 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/ioport.h> 23 #include <linux/types.h> 24 #include <linux/errno.h> 25 #include <linux/err.h> 26 #include <linux/delay.h> 27 #include <linux/slab.h> 28 #include <linux/timer.h> 29 #include <linux/list.h> 30 #include <linux/interrupt.h> 31 #include <linux/mm.h> 32 #include <linux/platform_data/pxa2xx_udc.h> 33 #include <linux/platform_device.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/irq.h> 36 #include <linux/clk.h> 37 #include <linux/seq_file.h> 38 #include <linux/debugfs.h> 39 #include <linux/io.h> 40 #include <linux/prefetch.h> 41 42 #include <asm/byteorder.h> 43 #include <asm/dma.h> 44 #include <asm/mach-types.h> 45 #include <asm/unaligned.h> 46 47 #include <linux/usb/ch9.h> 48 #include <linux/usb/gadget.h> 49 #include <linux/usb/otg.h> 50 51 #ifdef CONFIG_ARCH_LUBBOCK 52 #include <mach/lubbock.h> 53 #endif 54 55 #define UDCCR 0x0000 /* UDC Control Register */ 56 #define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */ 57 #define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */ 58 #define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */ 59 #define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */ 60 #define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */ 61 #define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */ 62 #define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */ 63 #define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */ 64 #define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */ 65 #define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */ 66 #define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */ 67 #define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */ 68 #define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */ 69 #define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */ 70 #define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */ 71 #define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */ 72 #define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */ 73 #define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */ 74 #define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */ 75 #define UFNRH 0x0060 /* UDC Frame Number Register High */ 76 #define UFNRL 0x0064 /* UDC Frame Number Register Low */ 77 #define UBCR2 0x0068 /* UDC Byte Count Reg 2 */ 78 #define UBCR4 0x006c /* UDC Byte Count Reg 4 */ 79 #define UBCR7 0x0070 /* UDC Byte Count Reg 7 */ 80 #define UBCR9 0x0074 /* UDC Byte Count Reg 9 */ 81 #define UBCR12 0x0078 /* UDC Byte Count Reg 12 */ 82 #define UBCR14 0x007c /* UDC Byte Count Reg 14 */ 83 #define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */ 84 #define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */ 85 #define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */ 86 #define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */ 87 #define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */ 88 #define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */ 89 #define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */ 90 #define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */ 91 #define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */ 92 #define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */ 93 #define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */ 94 #define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */ 95 #define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */ 96 #define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */ 97 #define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */ 98 #define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */ 99 100 #define UICR0 0x0050 /* UDC Interrupt Control Register 0 */ 101 #define UICR1 0x0054 /* UDC Interrupt Control Register 1 */ 102 103 #define USIR0 0x0058 /* UDC Status Interrupt Register 0 */ 104 #define USIR1 0x005C /* UDC Status Interrupt Register 1 */ 105 106 #define UDCCR_UDE (1 << 0) /* UDC enable */ 107 #define UDCCR_UDA (1 << 1) /* UDC active */ 108 #define UDCCR_RSM (1 << 2) /* Device resume */ 109 #define UDCCR_RESIR (1 << 3) /* Resume interrupt request */ 110 #define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */ 111 #define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */ 112 #define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */ 113 #define UDCCR_REM (1 << 7) /* Reset interrupt mask */ 114 115 #define UDCCS0_OPR (1 << 0) /* OUT packet ready */ 116 #define UDCCS0_IPR (1 << 1) /* IN packet ready */ 117 #define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */ 118 #define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */ 119 #define UDCCS0_SST (1 << 4) /* Sent stall */ 120 #define UDCCS0_FST (1 << 5) /* Force stall */ 121 #define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */ 122 #define UDCCS0_SA (1 << 7) /* Setup active */ 123 124 #define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */ 125 #define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */ 126 #define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */ 127 #define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */ 128 #define UDCCS_BI_SST (1 << 4) /* Sent stall */ 129 #define UDCCS_BI_FST (1 << 5) /* Force stall */ 130 #define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */ 131 132 #define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */ 133 #define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */ 134 #define UDCCS_BO_DME (1 << 3) /* DMA enable */ 135 #define UDCCS_BO_SST (1 << 4) /* Sent stall */ 136 #define UDCCS_BO_FST (1 << 5) /* Force stall */ 137 #define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */ 138 #define UDCCS_BO_RSP (1 << 7) /* Receive short packet */ 139 140 #define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */ 141 #define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */ 142 #define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */ 143 #define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */ 144 #define UDCCS_II_TSP (1 << 7) /* Transmit short packet */ 145 146 #define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ 147 #define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ 148 #ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */ 149 #define UDCCS_IO_ROF (1 << 3) /* Receive overflow */ 150 #endif 151 #ifdef CONFIG_ARCH_PXA 152 #define UDCCS_IO_ROF (1 << 2) /* Receive overflow */ 153 #endif 154 #define UDCCS_IO_DME (1 << 3) /* DMA enable */ 155 #define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ 156 #define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ 157 158 #define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */ 159 #define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */ 160 #define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */ 161 #define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */ 162 #define UDCCS_INT_SST (1 << 4) /* Sent stall */ 163 #define UDCCS_INT_FST (1 << 5) /* Force stall */ 164 #define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */ 165 166 #define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */ 167 #define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */ 168 #define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */ 169 #define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */ 170 #define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */ 171 #define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */ 172 #define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */ 173 #define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */ 174 175 #define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */ 176 #define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */ 177 #define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */ 178 #define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */ 179 #define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */ 180 #define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */ 181 #define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */ 182 #define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */ 183 184 #define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */ 185 #define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */ 186 #define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */ 187 #define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */ 188 #define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */ 189 #define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */ 190 #define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */ 191 #define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */ 192 193 #define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */ 194 #define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */ 195 #define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */ 196 #define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */ 197 #define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */ 198 #define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */ 199 #define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */ 200 #define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */ 201 202 /* 203 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x 204 * series processors. The UDC for the IXP 4xx series is very similar. 205 * There are fifteen endpoints, in addition to ep0. 206 * 207 * Such controller drivers work with a gadget driver. The gadget driver 208 * returns descriptors, implements configuration and data protocols used 209 * by the host to interact with this device, and allocates endpoints to 210 * the different protocol interfaces. The controller driver virtualizes 211 * usb hardware so that the gadget drivers will be more portable. 212 * 213 * This UDC hardware wants to implement a bit too much USB protocol, so 214 * it constrains the sorts of USB configuration change events that work. 215 * The errata for these chips are misleading; some "fixed" bugs from 216 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there. 217 * 218 * Note that the UDC hardware supports DMA (except on IXP) but that's 219 * not used here. IN-DMA (to host) is simple enough, when the data is 220 * suitably aligned (16 bytes) ... the network stack doesn't do that, 221 * other software can. OUT-DMA is buggy in most chip versions, as well 222 * as poorly designed (data toggle not automatic). So this driver won't 223 * bother using DMA. (Mostly-working IN-DMA support was available in 224 * kernels before 2.6.23, but was never enabled or well tested.) 225 */ 226 227 #define DRIVER_VERSION "30-June-2007" 228 #define DRIVER_DESC "PXA 25x USB Device Controller driver" 229 230 231 static const char driver_name [] = "pxa25x_udc"; 232 233 static const char ep0name [] = "ep0"; 234 235 236 #ifdef CONFIG_ARCH_IXP4XX 237 238 /* cpu-specific register addresses are compiled in to this code */ 239 #ifdef CONFIG_ARCH_PXA 240 #error "Can't configure both IXP and PXA" 241 #endif 242 243 /* IXP doesn't yet support <linux/clk.h> */ 244 #define clk_get(dev,name) NULL 245 #define clk_enable(clk) do { } while (0) 246 #define clk_disable(clk) do { } while (0) 247 #define clk_put(clk) do { } while (0) 248 249 #endif 250 251 #include "pxa25x_udc.h" 252 253 254 #ifdef CONFIG_USB_PXA25X_SMALL 255 #define SIZE_STR " (small)" 256 #else 257 #define SIZE_STR "" 258 #endif 259 260 /* --------------------------------------------------------------------------- 261 * endpoint related parts of the api to the usb controller hardware, 262 * used by gadget driver; and the inner talker-to-hardware core. 263 * --------------------------------------------------------------------------- 264 */ 265 266 static void pxa25x_ep_fifo_flush (struct usb_ep *ep); 267 static void nuke (struct pxa25x_ep *, int status); 268 269 /* one GPIO should control a D+ pullup, so host sees this device (or not) */ 270 static void pullup_off(void) 271 { 272 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 273 int off_level = mach->gpio_pullup_inverted; 274 275 if (gpio_is_valid(mach->gpio_pullup)) 276 gpio_set_value(mach->gpio_pullup, off_level); 277 else if (mach->udc_command) 278 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 279 } 280 281 static void pullup_on(void) 282 { 283 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 284 int on_level = !mach->gpio_pullup_inverted; 285 286 if (gpio_is_valid(mach->gpio_pullup)) 287 gpio_set_value(mach->gpio_pullup, on_level); 288 else if (mach->udc_command) 289 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 290 } 291 292 #if defined(CONFIG_CPU_BIG_ENDIAN) 293 /* 294 * IXP4xx has its buses wired up in a way that relies on never doing any 295 * byte swaps, independent of whether it runs in big-endian or little-endian 296 * mode, as explained by Krzysztof Hałasa. 297 * 298 * We only support pxa25x in little-endian mode, but it is very likely 299 * that it works the same way. 300 */ 301 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) 302 { 303 iowrite32be(val, dev->regs + reg); 304 } 305 306 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) 307 { 308 return ioread32be(dev->regs + reg); 309 } 310 #else 311 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) 312 { 313 writel(val, dev->regs + reg); 314 } 315 316 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) 317 { 318 return readl(dev->regs + reg); 319 } 320 #endif 321 322 static void pio_irq_enable(struct pxa25x_ep *ep) 323 { 324 u32 bEndpointAddress = ep->bEndpointAddress & 0xf; 325 326 if (bEndpointAddress < 8) 327 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) & 328 ~(1 << bEndpointAddress)); 329 else { 330 bEndpointAddress -= 8; 331 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) & 332 ~(1 << bEndpointAddress)); 333 } 334 } 335 336 static void pio_irq_disable(struct pxa25x_ep *ep) 337 { 338 u32 bEndpointAddress = ep->bEndpointAddress & 0xf; 339 340 if (bEndpointAddress < 8) 341 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) | 342 (1 << bEndpointAddress)); 343 else { 344 bEndpointAddress -= 8; 345 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) | 346 (1 << bEndpointAddress)); 347 } 348 } 349 350 /* The UDCCR reg contains mask and interrupt status bits, 351 * so using '|=' isn't safe as it may ack an interrupt. 352 */ 353 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE) 354 355 static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask) 356 { 357 u32 udccr = udc_get_reg(dev, UDCCR); 358 359 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR); 360 } 361 362 static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask) 363 { 364 u32 udccr = udc_get_reg(dev, UDCCR); 365 366 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR); 367 } 368 369 static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask) 370 { 371 /* udccr contains the bits we dont want to change */ 372 u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS; 373 374 udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR); 375 } 376 377 static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep) 378 { 379 return udc_get_reg(ep->dev, ep->regoff_udccs); 380 } 381 382 static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data) 383 { 384 udc_set_reg(ep->dev, data, ep->regoff_udccs); 385 } 386 387 static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev) 388 { 389 return udc_get_reg(dev, UDCCS0); 390 } 391 392 static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data) 393 { 394 udc_set_reg(dev, data, UDCCS0); 395 } 396 397 static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep) 398 { 399 return udc_get_reg(ep->dev, ep->regoff_uddr); 400 } 401 402 static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data) 403 { 404 udc_set_reg(ep->dev, data, ep->regoff_uddr); 405 } 406 407 static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep) 408 { 409 return udc_get_reg(ep->dev, ep->regoff_ubcr); 410 } 411 412 /* 413 * endpoint enable/disable 414 * 415 * we need to verify the descriptors used to enable endpoints. since pxa25x 416 * endpoint configurations are fixed, and are pretty much always enabled, 417 * there's not a lot to manage here. 418 * 419 * because pxa25x can't selectively initialize bulk (or interrupt) endpoints, 420 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except 421 * for a single interface (with only the default altsetting) and for gadget 422 * drivers that don't halt endpoints (not reset by set_interface). that also 423 * means that if you use ISO, you must violate the USB spec rule that all 424 * iso endpoints must be in non-default altsettings. 425 */ 426 static int pxa25x_ep_enable (struct usb_ep *_ep, 427 const struct usb_endpoint_descriptor *desc) 428 { 429 struct pxa25x_ep *ep; 430 struct pxa25x_udc *dev; 431 432 ep = container_of (_ep, struct pxa25x_ep, ep); 433 if (!_ep || !desc || _ep->name == ep0name 434 || desc->bDescriptorType != USB_DT_ENDPOINT 435 || ep->bEndpointAddress != desc->bEndpointAddress 436 || ep->fifo_size < usb_endpoint_maxp (desc)) { 437 DMSG("%s, bad ep or descriptor\n", __func__); 438 return -EINVAL; 439 } 440 441 /* xfer types must match, except that interrupt ~= bulk */ 442 if (ep->bmAttributes != desc->bmAttributes 443 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK 444 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { 445 DMSG("%s, %s type mismatch\n", __func__, _ep->name); 446 return -EINVAL; 447 } 448 449 /* hardware _could_ do smaller, but driver doesn't */ 450 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK 451 && usb_endpoint_maxp (desc) 452 != BULK_FIFO_SIZE) 453 || !desc->wMaxPacketSize) { 454 DMSG("%s, bad %s maxpacket\n", __func__, _ep->name); 455 return -ERANGE; 456 } 457 458 dev = ep->dev; 459 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 460 DMSG("%s, bogus device state\n", __func__); 461 return -ESHUTDOWN; 462 } 463 464 ep->ep.desc = desc; 465 ep->stopped = 0; 466 ep->pio_irqs = 0; 467 ep->ep.maxpacket = usb_endpoint_maxp (desc); 468 469 /* flush fifo (mostly for OUT buffers) */ 470 pxa25x_ep_fifo_flush (_ep); 471 472 /* ... reset halt state too, if we could ... */ 473 474 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name); 475 return 0; 476 } 477 478 static int pxa25x_ep_disable (struct usb_ep *_ep) 479 { 480 struct pxa25x_ep *ep; 481 unsigned long flags; 482 483 ep = container_of (_ep, struct pxa25x_ep, ep); 484 if (!_ep || !ep->ep.desc) { 485 DMSG("%s, %s not enabled\n", __func__, 486 _ep ? ep->ep.name : NULL); 487 return -EINVAL; 488 } 489 local_irq_save(flags); 490 491 nuke (ep, -ESHUTDOWN); 492 493 /* flush fifo (mostly for IN buffers) */ 494 pxa25x_ep_fifo_flush (_ep); 495 496 ep->ep.desc = NULL; 497 ep->stopped = 1; 498 499 local_irq_restore(flags); 500 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name); 501 return 0; 502 } 503 504 /*-------------------------------------------------------------------------*/ 505 506 /* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers 507 * must still pass correctly initialized endpoints, since other controller 508 * drivers may care about how it's currently set up (dma issues etc). 509 */ 510 511 /* 512 * pxa25x_ep_alloc_request - allocate a request data structure 513 */ 514 static struct usb_request * 515 pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) 516 { 517 struct pxa25x_request *req; 518 519 req = kzalloc(sizeof(*req), gfp_flags); 520 if (!req) 521 return NULL; 522 523 INIT_LIST_HEAD (&req->queue); 524 return &req->req; 525 } 526 527 528 /* 529 * pxa25x_ep_free_request - deallocate a request data structure 530 */ 531 static void 532 pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) 533 { 534 struct pxa25x_request *req; 535 536 req = container_of (_req, struct pxa25x_request, req); 537 WARN_ON(!list_empty (&req->queue)); 538 kfree(req); 539 } 540 541 /*-------------------------------------------------------------------------*/ 542 543 /* 544 * done - retire a request; caller blocked irqs 545 */ 546 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) 547 { 548 unsigned stopped = ep->stopped; 549 550 list_del_init(&req->queue); 551 552 if (likely (req->req.status == -EINPROGRESS)) 553 req->req.status = status; 554 else 555 status = req->req.status; 556 557 if (status && status != -ESHUTDOWN) 558 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n", 559 ep->ep.name, &req->req, status, 560 req->req.actual, req->req.length); 561 562 /* don't modify queue heads during completion callback */ 563 ep->stopped = 1; 564 usb_gadget_giveback_request(&ep->ep, &req->req); 565 ep->stopped = stopped; 566 } 567 568 569 static inline void ep0_idle (struct pxa25x_udc *dev) 570 { 571 dev->ep0state = EP0_IDLE; 572 } 573 574 static int 575 write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max) 576 { 577 u8 *buf; 578 unsigned length, count; 579 580 buf = req->req.buf + req->req.actual; 581 prefetch(buf); 582 583 /* how big will this packet be? */ 584 length = min(req->req.length - req->req.actual, max); 585 req->req.actual += length; 586 587 count = length; 588 while (likely(count--)) 589 udc_ep_set_UDDR(ep, *buf++); 590 591 return length; 592 } 593 594 /* 595 * write to an IN endpoint fifo, as many packets as possible. 596 * irqs will use this to write the rest later. 597 * caller guarantees at least one packet buffer is ready (or a zlp). 598 */ 599 static int 600 write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 601 { 602 unsigned max; 603 604 max = usb_endpoint_maxp(ep->ep.desc); 605 do { 606 unsigned count; 607 int is_last, is_short; 608 609 count = write_packet(ep, req, max); 610 611 /* last packet is usually short (or a zlp) */ 612 if (unlikely (count != max)) 613 is_last = is_short = 1; 614 else { 615 if (likely(req->req.length != req->req.actual) 616 || req->req.zero) 617 is_last = 0; 618 else 619 is_last = 1; 620 /* interrupt/iso maxpacket may not fill the fifo */ 621 is_short = unlikely (max < ep->fifo_size); 622 } 623 624 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n", 625 ep->ep.name, count, 626 is_last ? "/L" : "", is_short ? "/S" : "", 627 req->req.length - req->req.actual, req); 628 629 /* let loose that packet. maybe try writing another one, 630 * double buffering might work. TSP, TPC, and TFS 631 * bit values are the same for all normal IN endpoints. 632 */ 633 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC); 634 if (is_short) 635 udc_ep_set_UDCCS(ep, UDCCS_BI_TSP); 636 637 /* requests complete when all IN data is in the FIFO */ 638 if (is_last) { 639 done (ep, req, 0); 640 if (list_empty(&ep->queue)) 641 pio_irq_disable(ep); 642 return 1; 643 } 644 645 // TODO experiment: how robust can fifo mode tweaking be? 646 // double buffering is off in the default fifo mode, which 647 // prevents TFS from being set here. 648 649 } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS); 650 return 0; 651 } 652 653 /* caller asserts req->pending (ep0 irq status nyet cleared); starts 654 * ep0 data stage. these chips want very simple state transitions. 655 */ 656 static inline 657 void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag) 658 { 659 udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR); 660 udc_set_reg(dev, USIR0, USIR0_IR0); 661 dev->req_pending = 0; 662 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n", 663 __func__, tag, udc_ep0_get_UDCCS(dev), flags); 664 } 665 666 static int 667 write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 668 { 669 struct pxa25x_udc *dev = ep->dev; 670 unsigned count; 671 int is_short; 672 673 count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE); 674 ep->dev->stats.write.bytes += count; 675 676 /* last packet "must be" short (or a zlp) */ 677 is_short = (count != EP0_FIFO_SIZE); 678 679 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count, 680 req->req.length - req->req.actual, req); 681 682 if (unlikely (is_short)) { 683 if (ep->dev->req_pending) 684 ep0start(ep->dev, UDCCS0_IPR, "short IN"); 685 else 686 udc_ep0_set_UDCCS(dev, UDCCS0_IPR); 687 688 count = req->req.length; 689 done (ep, req, 0); 690 ep0_idle(ep->dev); 691 #ifndef CONFIG_ARCH_IXP4XX 692 #if 1 693 /* This seems to get rid of lost status irqs in some cases: 694 * host responds quickly, or next request involves config 695 * change automagic, or should have been hidden, or ... 696 * 697 * FIXME get rid of all udelays possible... 698 */ 699 if (count >= EP0_FIFO_SIZE) { 700 count = 100; 701 do { 702 if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) { 703 /* clear OPR, generate ack */ 704 udc_ep0_set_UDCCS(dev, UDCCS0_OPR); 705 break; 706 } 707 count--; 708 udelay(1); 709 } while (count); 710 } 711 #endif 712 #endif 713 } else if (ep->dev->req_pending) 714 ep0start(ep->dev, 0, "IN"); 715 return is_short; 716 } 717 718 719 /* 720 * read_fifo - unload packet(s) from the fifo we use for usb OUT 721 * transfers and put them into the request. caller should have made 722 * sure there's at least one packet ready. 723 * 724 * returns true if the request completed because of short packet or the 725 * request buffer having filled (and maybe overran till end-of-packet). 726 */ 727 static int 728 read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 729 { 730 for (;;) { 731 u32 udccs; 732 u8 *buf; 733 unsigned bufferspace, count, is_short; 734 735 /* make sure there's a packet in the FIFO. 736 * UDCCS_{BO,IO}_RPC are all the same bit value. 737 * UDCCS_{BO,IO}_RNE are all the same bit value. 738 */ 739 udccs = udc_ep_get_UDCCS(ep); 740 if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) 741 break; 742 buf = req->req.buf + req->req.actual; 743 prefetchw(buf); 744 bufferspace = req->req.length - req->req.actual; 745 746 /* read all bytes from this packet */ 747 if (likely (udccs & UDCCS_BO_RNE)) { 748 count = 1 + (0x0ff & udc_ep_get_UBCR(ep)); 749 req->req.actual += min (count, bufferspace); 750 } else /* zlp */ 751 count = 0; 752 is_short = (count < ep->ep.maxpacket); 753 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n", 754 ep->ep.name, udccs, count, 755 is_short ? "/S" : "", 756 req, req->req.actual, req->req.length); 757 while (likely (count-- != 0)) { 758 u8 byte = (u8) udc_ep_get_UDDR(ep); 759 760 if (unlikely (bufferspace == 0)) { 761 /* this happens when the driver's buffer 762 * is smaller than what the host sent. 763 * discard the extra data. 764 */ 765 if (req->req.status != -EOVERFLOW) 766 DMSG("%s overflow %d\n", 767 ep->ep.name, count); 768 req->req.status = -EOVERFLOW; 769 } else { 770 *buf++ = byte; 771 bufferspace--; 772 } 773 } 774 udc_ep_set_UDCCS(ep, UDCCS_BO_RPC); 775 /* RPC/RSP/RNE could now reflect the other packet buffer */ 776 777 /* iso is one request per packet */ 778 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 779 if (udccs & UDCCS_IO_ROF) 780 req->req.status = -EHOSTUNREACH; 781 /* more like "is_done" */ 782 is_short = 1; 783 } 784 785 /* completion */ 786 if (is_short || req->req.actual == req->req.length) { 787 done (ep, req, 0); 788 if (list_empty(&ep->queue)) 789 pio_irq_disable(ep); 790 return 1; 791 } 792 793 /* finished that packet. the next one may be waiting... */ 794 } 795 return 0; 796 } 797 798 /* 799 * special ep0 version of the above. no UBCR0 or double buffering; status 800 * handshaking is magic. most device protocols don't need control-OUT. 801 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other 802 * protocols do use them. 803 */ 804 static int 805 read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 806 { 807 u8 *buf, byte; 808 unsigned bufferspace; 809 810 buf = req->req.buf + req->req.actual; 811 bufferspace = req->req.length - req->req.actual; 812 813 while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) { 814 byte = (u8) UDDR0; 815 816 if (unlikely (bufferspace == 0)) { 817 /* this happens when the driver's buffer 818 * is smaller than what the host sent. 819 * discard the extra data. 820 */ 821 if (req->req.status != -EOVERFLOW) 822 DMSG("%s overflow\n", ep->ep.name); 823 req->req.status = -EOVERFLOW; 824 } else { 825 *buf++ = byte; 826 req->req.actual++; 827 bufferspace--; 828 } 829 } 830 831 udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR); 832 833 /* completion */ 834 if (req->req.actual >= req->req.length) 835 return 1; 836 837 /* finished that packet. the next one may be waiting... */ 838 return 0; 839 } 840 841 /*-------------------------------------------------------------------------*/ 842 843 static int 844 pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 845 { 846 struct pxa25x_request *req; 847 struct pxa25x_ep *ep; 848 struct pxa25x_udc *dev; 849 unsigned long flags; 850 851 req = container_of(_req, struct pxa25x_request, req); 852 if (unlikely (!_req || !_req->complete || !_req->buf 853 || !list_empty(&req->queue))) { 854 DMSG("%s, bad params\n", __func__); 855 return -EINVAL; 856 } 857 858 ep = container_of(_ep, struct pxa25x_ep, ep); 859 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { 860 DMSG("%s, bad ep\n", __func__); 861 return -EINVAL; 862 } 863 864 dev = ep->dev; 865 if (unlikely (!dev->driver 866 || dev->gadget.speed == USB_SPEED_UNKNOWN)) { 867 DMSG("%s, bogus device state\n", __func__); 868 return -ESHUTDOWN; 869 } 870 871 /* iso is always one packet per request, that's the only way 872 * we can report per-packet status. that also helps with dma. 873 */ 874 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC 875 && req->req.length > usb_endpoint_maxp(ep->ep.desc))) 876 return -EMSGSIZE; 877 878 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n", 879 _ep->name, _req, _req->length, _req->buf); 880 881 local_irq_save(flags); 882 883 _req->status = -EINPROGRESS; 884 _req->actual = 0; 885 886 /* kickstart this i/o queue? */ 887 if (list_empty(&ep->queue) && !ep->stopped) { 888 if (ep->ep.desc == NULL/* ep0 */) { 889 unsigned length = _req->length; 890 891 switch (dev->ep0state) { 892 case EP0_IN_DATA_PHASE: 893 dev->stats.write.ops++; 894 if (write_ep0_fifo(ep, req)) 895 req = NULL; 896 break; 897 898 case EP0_OUT_DATA_PHASE: 899 dev->stats.read.ops++; 900 /* messy ... */ 901 if (dev->req_config) { 902 DBG(DBG_VERBOSE, "ep0 config ack%s\n", 903 dev->has_cfr ? "" : " raced"); 904 if (dev->has_cfr) 905 udc_set_reg(dev, UDCCFR, UDCCFR_AREN | 906 UDCCFR_ACM | UDCCFR_MB1); 907 done(ep, req, 0); 908 dev->ep0state = EP0_END_XFER; 909 local_irq_restore (flags); 910 return 0; 911 } 912 if (dev->req_pending) 913 ep0start(dev, UDCCS0_IPR, "OUT"); 914 if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0 915 && read_ep0_fifo(ep, req))) { 916 ep0_idle(dev); 917 done(ep, req, 0); 918 req = NULL; 919 } 920 break; 921 922 default: 923 DMSG("ep0 i/o, odd state %d\n", dev->ep0state); 924 local_irq_restore (flags); 925 return -EL2HLT; 926 } 927 /* can the FIFO can satisfy the request immediately? */ 928 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { 929 if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0 930 && write_fifo(ep, req)) 931 req = NULL; 932 } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0 933 && read_fifo(ep, req)) { 934 req = NULL; 935 } 936 937 if (likely(req && ep->ep.desc)) 938 pio_irq_enable(ep); 939 } 940 941 /* pio or dma irq handler advances the queue. */ 942 if (likely(req != NULL)) 943 list_add_tail(&req->queue, &ep->queue); 944 local_irq_restore(flags); 945 946 return 0; 947 } 948 949 950 /* 951 * nuke - dequeue ALL requests 952 */ 953 static void nuke(struct pxa25x_ep *ep, int status) 954 { 955 struct pxa25x_request *req; 956 957 /* called with irqs blocked */ 958 while (!list_empty(&ep->queue)) { 959 req = list_entry(ep->queue.next, 960 struct pxa25x_request, 961 queue); 962 done(ep, req, status); 963 } 964 if (ep->ep.desc) 965 pio_irq_disable(ep); 966 } 967 968 969 /* dequeue JUST ONE request */ 970 static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 971 { 972 struct pxa25x_ep *ep; 973 struct pxa25x_request *req; 974 unsigned long flags; 975 976 ep = container_of(_ep, struct pxa25x_ep, ep); 977 if (!_ep || ep->ep.name == ep0name) 978 return -EINVAL; 979 980 local_irq_save(flags); 981 982 /* make sure it's actually queued on this endpoint */ 983 list_for_each_entry (req, &ep->queue, queue) { 984 if (&req->req == _req) 985 break; 986 } 987 if (&req->req != _req) { 988 local_irq_restore(flags); 989 return -EINVAL; 990 } 991 992 done(ep, req, -ECONNRESET); 993 994 local_irq_restore(flags); 995 return 0; 996 } 997 998 /*-------------------------------------------------------------------------*/ 999 1000 static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value) 1001 { 1002 struct pxa25x_ep *ep; 1003 unsigned long flags; 1004 1005 ep = container_of(_ep, struct pxa25x_ep, ep); 1006 if (unlikely (!_ep 1007 || (!ep->ep.desc && ep->ep.name != ep0name)) 1008 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 1009 DMSG("%s, bad ep\n", __func__); 1010 return -EINVAL; 1011 } 1012 if (value == 0) { 1013 /* this path (reset toggle+halt) is needed to implement 1014 * SET_INTERFACE on normal hardware. but it can't be 1015 * done from software on the PXA UDC, and the hardware 1016 * forgets to do it as part of SET_INTERFACE automagic. 1017 */ 1018 DMSG("only host can clear %s halt\n", _ep->name); 1019 return -EROFS; 1020 } 1021 1022 local_irq_save(flags); 1023 1024 if ((ep->bEndpointAddress & USB_DIR_IN) != 0 1025 && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0 1026 || !list_empty(&ep->queue))) { 1027 local_irq_restore(flags); 1028 return -EAGAIN; 1029 } 1030 1031 /* FST bit is the same for control, bulk in, bulk out, interrupt in */ 1032 udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF); 1033 1034 /* ep0 needs special care */ 1035 if (!ep->ep.desc) { 1036 start_watchdog(ep->dev); 1037 ep->dev->req_pending = 0; 1038 ep->dev->ep0state = EP0_STALL; 1039 1040 /* and bulk/intr endpoints like dropping stalls too */ 1041 } else { 1042 unsigned i; 1043 for (i = 0; i < 1000; i += 20) { 1044 if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST) 1045 break; 1046 udelay(20); 1047 } 1048 } 1049 local_irq_restore(flags); 1050 1051 DBG(DBG_VERBOSE, "%s halt\n", _ep->name); 1052 return 0; 1053 } 1054 1055 static int pxa25x_ep_fifo_status(struct usb_ep *_ep) 1056 { 1057 struct pxa25x_ep *ep; 1058 1059 ep = container_of(_ep, struct pxa25x_ep, ep); 1060 if (!_ep) { 1061 DMSG("%s, bad ep\n", __func__); 1062 return -ENODEV; 1063 } 1064 /* pxa can't report unclaimed bytes from IN fifos */ 1065 if ((ep->bEndpointAddress & USB_DIR_IN) != 0) 1066 return -EOPNOTSUPP; 1067 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN 1068 || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0) 1069 return 0; 1070 else 1071 return (udc_ep_get_UBCR(ep) & 0xfff) + 1; 1072 } 1073 1074 static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) 1075 { 1076 struct pxa25x_ep *ep; 1077 1078 ep = container_of(_ep, struct pxa25x_ep, ep); 1079 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) { 1080 DMSG("%s, bad ep\n", __func__); 1081 return; 1082 } 1083 1084 /* toggle and halt bits stay unchanged */ 1085 1086 /* for OUT, just read and discard the FIFO contents. */ 1087 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { 1088 while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0) 1089 (void)udc_ep_get_UDDR(ep); 1090 return; 1091 } 1092 1093 /* most IN status is the same, but ISO can't stall */ 1094 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR 1095 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC 1096 ? 0 : UDCCS_BI_SST)); 1097 } 1098 1099 1100 static struct usb_ep_ops pxa25x_ep_ops = { 1101 .enable = pxa25x_ep_enable, 1102 .disable = pxa25x_ep_disable, 1103 1104 .alloc_request = pxa25x_ep_alloc_request, 1105 .free_request = pxa25x_ep_free_request, 1106 1107 .queue = pxa25x_ep_queue, 1108 .dequeue = pxa25x_ep_dequeue, 1109 1110 .set_halt = pxa25x_ep_set_halt, 1111 .fifo_status = pxa25x_ep_fifo_status, 1112 .fifo_flush = pxa25x_ep_fifo_flush, 1113 }; 1114 1115 1116 /* --------------------------------------------------------------------------- 1117 * device-scoped parts of the api to the usb controller hardware 1118 * --------------------------------------------------------------------------- 1119 */ 1120 1121 static int pxa25x_udc_get_frame(struct usb_gadget *_gadget) 1122 { 1123 struct pxa25x_udc *dev; 1124 1125 dev = container_of(_gadget, struct pxa25x_udc, gadget); 1126 return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) | 1127 (udc_get_reg(dev, UFNRL) & 0xff); 1128 } 1129 1130 static int pxa25x_udc_wakeup(struct usb_gadget *_gadget) 1131 { 1132 struct pxa25x_udc *udc; 1133 1134 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1135 1136 /* host may not have enabled remote wakeup */ 1137 if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0) 1138 return -EHOSTUNREACH; 1139 udc_set_mask_UDCCR(udc, UDCCR_RSM); 1140 return 0; 1141 } 1142 1143 static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *); 1144 static void udc_enable (struct pxa25x_udc *); 1145 static void udc_disable(struct pxa25x_udc *); 1146 1147 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not 1148 * in active use. 1149 */ 1150 static int pullup(struct pxa25x_udc *udc) 1151 { 1152 int is_active = udc->vbus && udc->pullup && !udc->suspended; 1153 DMSG("%s\n", is_active ? "active" : "inactive"); 1154 if (is_active) { 1155 if (!udc->active) { 1156 udc->active = 1; 1157 /* Enable clock for USB device */ 1158 clk_enable(udc->clk); 1159 udc_enable(udc); 1160 } 1161 } else { 1162 if (udc->active) { 1163 if (udc->gadget.speed != USB_SPEED_UNKNOWN) { 1164 DMSG("disconnect %s\n", udc->driver 1165 ? udc->driver->driver.name 1166 : "(no driver)"); 1167 stop_activity(udc, udc->driver); 1168 } 1169 udc_disable(udc); 1170 /* Disable clock for USB device */ 1171 clk_disable(udc->clk); 1172 udc->active = 0; 1173 } 1174 1175 } 1176 return 0; 1177 } 1178 1179 /* VBUS reporting logically comes from a transceiver */ 1180 static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active) 1181 { 1182 struct pxa25x_udc *udc; 1183 1184 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1185 udc->vbus = is_active; 1186 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 1187 pullup(udc); 1188 return 0; 1189 } 1190 1191 /* drivers may have software control over D+ pullup */ 1192 static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active) 1193 { 1194 struct pxa25x_udc *udc; 1195 1196 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1197 1198 /* not all boards support pullup control */ 1199 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) 1200 return -EOPNOTSUPP; 1201 1202 udc->pullup = (is_active != 0); 1203 pullup(udc); 1204 return 0; 1205 } 1206 1207 /* boards may consume current from VBUS, up to 100-500mA based on config. 1208 * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs 1209 * violate USB specs. 1210 */ 1211 static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1212 { 1213 struct pxa25x_udc *udc; 1214 1215 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1216 1217 if (!IS_ERR_OR_NULL(udc->transceiver)) 1218 return usb_phy_set_power(udc->transceiver, mA); 1219 return -EOPNOTSUPP; 1220 } 1221 1222 static int pxa25x_udc_start(struct usb_gadget *g, 1223 struct usb_gadget_driver *driver); 1224 static int pxa25x_udc_stop(struct usb_gadget *g); 1225 1226 static const struct usb_gadget_ops pxa25x_udc_ops = { 1227 .get_frame = pxa25x_udc_get_frame, 1228 .wakeup = pxa25x_udc_wakeup, 1229 .vbus_session = pxa25x_udc_vbus_session, 1230 .pullup = pxa25x_udc_pullup, 1231 .vbus_draw = pxa25x_udc_vbus_draw, 1232 .udc_start = pxa25x_udc_start, 1233 .udc_stop = pxa25x_udc_stop, 1234 }; 1235 1236 /*-------------------------------------------------------------------------*/ 1237 1238 #ifdef CONFIG_USB_GADGET_DEBUG_FS 1239 1240 static int 1241 udc_seq_show(struct seq_file *m, void *_d) 1242 { 1243 struct pxa25x_udc *dev = m->private; 1244 unsigned long flags; 1245 int i; 1246 u32 tmp; 1247 1248 local_irq_save(flags); 1249 1250 /* basic device status */ 1251 seq_printf(m, DRIVER_DESC "\n" 1252 "%s version: %s\nGadget driver: %s\nHost %s\n\n", 1253 driver_name, DRIVER_VERSION SIZE_STR "(pio)", 1254 dev->driver ? dev->driver->driver.name : "(none)", 1255 dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected"); 1256 1257 /* registers for device and ep0 */ 1258 seq_printf(m, 1259 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n", 1260 udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0), 1261 udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0), 1262 udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL)); 1263 1264 tmp = udc_get_reg(dev, UDCCR); 1265 seq_printf(m, 1266 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp, 1267 (tmp & UDCCR_REM) ? " rem" : "", 1268 (tmp & UDCCR_RSTIR) ? " rstir" : "", 1269 (tmp & UDCCR_SRM) ? " srm" : "", 1270 (tmp & UDCCR_SUSIR) ? " susir" : "", 1271 (tmp & UDCCR_RESIR) ? " resir" : "", 1272 (tmp & UDCCR_RSM) ? " rsm" : "", 1273 (tmp & UDCCR_UDA) ? " uda" : "", 1274 (tmp & UDCCR_UDE) ? " ude" : ""); 1275 1276 tmp = udc_ep0_get_UDCCS(dev); 1277 seq_printf(m, 1278 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp, 1279 (tmp & UDCCS0_SA) ? " sa" : "", 1280 (tmp & UDCCS0_RNE) ? " rne" : "", 1281 (tmp & UDCCS0_FST) ? " fst" : "", 1282 (tmp & UDCCS0_SST) ? " sst" : "", 1283 (tmp & UDCCS0_DRWF) ? " dwrf" : "", 1284 (tmp & UDCCS0_FTF) ? " ftf" : "", 1285 (tmp & UDCCS0_IPR) ? " ipr" : "", 1286 (tmp & UDCCS0_OPR) ? " opr" : ""); 1287 1288 if (dev->has_cfr) { 1289 tmp = udc_get_reg(dev, UDCCFR); 1290 seq_printf(m, 1291 "udccfr %02X =%s%s\n", tmp, 1292 (tmp & UDCCFR_AREN) ? " aren" : "", 1293 (tmp & UDCCFR_ACM) ? " acm" : ""); 1294 } 1295 1296 if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) 1297 goto done; 1298 1299 seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", 1300 dev->stats.write.bytes, dev->stats.write.ops, 1301 dev->stats.read.bytes, dev->stats.read.ops, 1302 dev->stats.irqs); 1303 1304 /* dump endpoint queues */ 1305 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1306 struct pxa25x_ep *ep = &dev->ep [i]; 1307 struct pxa25x_request *req; 1308 1309 if (i != 0) { 1310 const struct usb_endpoint_descriptor *desc; 1311 1312 desc = ep->ep.desc; 1313 if (!desc) 1314 continue; 1315 tmp = udc_ep_get_UDCCS(&dev->ep[i]); 1316 seq_printf(m, 1317 "%s max %d %s udccs %02x irqs %lu\n", 1318 ep->ep.name, usb_endpoint_maxp(desc), 1319 "pio", tmp, ep->pio_irqs); 1320 /* TODO translate all five groups of udccs bits! */ 1321 1322 } else /* ep0 should only have one transfer queued */ 1323 seq_printf(m, "ep0 max 16 pio irqs %lu\n", 1324 ep->pio_irqs); 1325 1326 if (list_empty(&ep->queue)) { 1327 seq_printf(m, "\t(nothing queued)\n"); 1328 continue; 1329 } 1330 list_for_each_entry(req, &ep->queue, queue) { 1331 seq_printf(m, 1332 "\treq %p len %d/%d buf %p\n", 1333 &req->req, req->req.actual, 1334 req->req.length, req->req.buf); 1335 } 1336 } 1337 1338 done: 1339 local_irq_restore(flags); 1340 return 0; 1341 } 1342 1343 static int 1344 udc_debugfs_open(struct inode *inode, struct file *file) 1345 { 1346 return single_open(file, udc_seq_show, inode->i_private); 1347 } 1348 1349 static const struct file_operations debug_fops = { 1350 .open = udc_debugfs_open, 1351 .read = seq_read, 1352 .llseek = seq_lseek, 1353 .release = single_release, 1354 .owner = THIS_MODULE, 1355 }; 1356 1357 #define create_debug_files(dev) \ 1358 do { \ 1359 dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \ 1360 S_IRUGO, NULL, dev, &debug_fops); \ 1361 } while (0) 1362 #define remove_debug_files(dev) debugfs_remove(dev->debugfs_udc) 1363 1364 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */ 1365 1366 #define create_debug_files(dev) do {} while (0) 1367 #define remove_debug_files(dev) do {} while (0) 1368 1369 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1370 1371 /*-------------------------------------------------------------------------*/ 1372 1373 /* 1374 * udc_disable - disable USB device controller 1375 */ 1376 static void udc_disable(struct pxa25x_udc *dev) 1377 { 1378 /* block all irqs */ 1379 udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM); 1380 udc_set_reg(dev, UICR0, 0xff); 1381 udc_set_reg(dev, UICR1, 0xff); 1382 udc_set_reg(dev, UFNRH, UFNRH_SIM); 1383 1384 /* if hardware supports it, disconnect from usb */ 1385 pullup_off(); 1386 1387 udc_clear_mask_UDCCR(dev, UDCCR_UDE); 1388 1389 ep0_idle (dev); 1390 dev->gadget.speed = USB_SPEED_UNKNOWN; 1391 } 1392 1393 1394 /* 1395 * udc_reinit - initialize software state 1396 */ 1397 static void udc_reinit(struct pxa25x_udc *dev) 1398 { 1399 u32 i; 1400 1401 /* device/ep0 records init */ 1402 INIT_LIST_HEAD (&dev->gadget.ep_list); 1403 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list); 1404 dev->ep0state = EP0_IDLE; 1405 dev->gadget.quirk_altset_not_supp = 1; 1406 1407 /* basic endpoint records init */ 1408 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1409 struct pxa25x_ep *ep = &dev->ep[i]; 1410 1411 if (i != 0) 1412 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); 1413 1414 ep->ep.desc = NULL; 1415 ep->stopped = 0; 1416 INIT_LIST_HEAD (&ep->queue); 1417 ep->pio_irqs = 0; 1418 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); 1419 } 1420 1421 /* the rest was statically initialized, and is read-only */ 1422 } 1423 1424 /* until it's enabled, this UDC should be completely invisible 1425 * to any USB host. 1426 */ 1427 static void udc_enable (struct pxa25x_udc *dev) 1428 { 1429 udc_clear_mask_UDCCR(dev, UDCCR_UDE); 1430 1431 /* try to clear these bits before we enable the udc */ 1432 udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); 1433 1434 ep0_idle(dev); 1435 dev->gadget.speed = USB_SPEED_UNKNOWN; 1436 dev->stats.irqs = 0; 1437 1438 /* 1439 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual: 1440 * - enable UDC 1441 * - if RESET is already in progress, ack interrupt 1442 * - unmask reset interrupt 1443 */ 1444 udc_set_mask_UDCCR(dev, UDCCR_UDE); 1445 if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA)) 1446 udc_ack_int_UDCCR(dev, UDCCR_RSTIR); 1447 1448 if (dev->has_cfr /* UDC_RES2 is defined */) { 1449 /* pxa255 (a0+) can avoid a set_config race that could 1450 * prevent gadget drivers from configuring correctly 1451 */ 1452 udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1); 1453 } else { 1454 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) 1455 * which could result in missing packets and interrupts. 1456 * supposedly one bit per endpoint, controlling whether it 1457 * double buffers or not; ACM/AREN bits fit into the holes. 1458 * zero bits (like USIR0_IRx) disable double buffering. 1459 */ 1460 udc_set_reg(dev, UDC_RES1, 0x00); 1461 udc_set_reg(dev, UDC_RES2, 0x00); 1462 } 1463 1464 /* enable suspend/resume and reset irqs */ 1465 udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM); 1466 1467 /* enable ep0 irqs */ 1468 udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0); 1469 1470 /* if hardware supports it, pullup D+ and wait for reset */ 1471 pullup_on(); 1472 } 1473 1474 1475 /* when a driver is successfully registered, it will receive 1476 * control requests including set_configuration(), which enables 1477 * non-control requests. then usb traffic follows until a 1478 * disconnect is reported. then a host may connect again, or 1479 * the driver might get unbound. 1480 */ 1481 static int pxa25x_udc_start(struct usb_gadget *g, 1482 struct usb_gadget_driver *driver) 1483 { 1484 struct pxa25x_udc *dev = to_pxa25x(g); 1485 int retval; 1486 1487 /* first hook up the driver ... */ 1488 dev->driver = driver; 1489 dev->pullup = 1; 1490 1491 /* ... then enable host detection and ep0; and we're ready 1492 * for set_configuration as well as eventual disconnect. 1493 */ 1494 /* connect to bus through transceiver */ 1495 if (!IS_ERR_OR_NULL(dev->transceiver)) { 1496 retval = otg_set_peripheral(dev->transceiver->otg, 1497 &dev->gadget); 1498 if (retval) 1499 goto bind_fail; 1500 } 1501 1502 dump_state(dev); 1503 return 0; 1504 bind_fail: 1505 return retval; 1506 } 1507 1508 static void 1509 reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) 1510 { 1511 int i; 1512 1513 /* don't disconnect drivers more than once */ 1514 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1515 driver = NULL; 1516 dev->gadget.speed = USB_SPEED_UNKNOWN; 1517 1518 /* prevent new request submissions, kill any outstanding requests */ 1519 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1520 struct pxa25x_ep *ep = &dev->ep[i]; 1521 1522 ep->stopped = 1; 1523 nuke(ep, -ESHUTDOWN); 1524 } 1525 del_timer_sync(&dev->timer); 1526 1527 /* report reset; the driver is already quiesced */ 1528 if (driver) 1529 usb_gadget_udc_reset(&dev->gadget, driver); 1530 1531 /* re-init driver-visible data structures */ 1532 udc_reinit(dev); 1533 } 1534 1535 static void 1536 stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) 1537 { 1538 int i; 1539 1540 /* don't disconnect drivers more than once */ 1541 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1542 driver = NULL; 1543 dev->gadget.speed = USB_SPEED_UNKNOWN; 1544 1545 /* prevent new request submissions, kill any outstanding requests */ 1546 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1547 struct pxa25x_ep *ep = &dev->ep[i]; 1548 1549 ep->stopped = 1; 1550 nuke(ep, -ESHUTDOWN); 1551 } 1552 del_timer_sync(&dev->timer); 1553 1554 /* report disconnect; the driver is already quiesced */ 1555 if (driver) 1556 driver->disconnect(&dev->gadget); 1557 1558 /* re-init driver-visible data structures */ 1559 udc_reinit(dev); 1560 } 1561 1562 static int pxa25x_udc_stop(struct usb_gadget*g) 1563 { 1564 struct pxa25x_udc *dev = to_pxa25x(g); 1565 1566 local_irq_disable(); 1567 dev->pullup = 0; 1568 stop_activity(dev, NULL); 1569 local_irq_enable(); 1570 1571 if (!IS_ERR_OR_NULL(dev->transceiver)) 1572 (void) otg_set_peripheral(dev->transceiver->otg, NULL); 1573 1574 dev->driver = NULL; 1575 1576 dump_state(dev); 1577 1578 return 0; 1579 } 1580 1581 /*-------------------------------------------------------------------------*/ 1582 1583 #ifdef CONFIG_ARCH_LUBBOCK 1584 1585 /* Lubbock has separate connect and disconnect irqs. More typical designs 1586 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup. 1587 */ 1588 1589 static irqreturn_t 1590 lubbock_vbus_irq(int irq, void *_dev) 1591 { 1592 struct pxa25x_udc *dev = _dev; 1593 int vbus; 1594 1595 dev->stats.irqs++; 1596 switch (irq) { 1597 case LUBBOCK_USB_IRQ: 1598 vbus = 1; 1599 disable_irq(LUBBOCK_USB_IRQ); 1600 enable_irq(LUBBOCK_USB_DISC_IRQ); 1601 break; 1602 case LUBBOCK_USB_DISC_IRQ: 1603 vbus = 0; 1604 disable_irq(LUBBOCK_USB_DISC_IRQ); 1605 enable_irq(LUBBOCK_USB_IRQ); 1606 break; 1607 default: 1608 return IRQ_NONE; 1609 } 1610 1611 pxa25x_udc_vbus_session(&dev->gadget, vbus); 1612 return IRQ_HANDLED; 1613 } 1614 1615 #endif 1616 1617 1618 /*-------------------------------------------------------------------------*/ 1619 1620 static inline void clear_ep_state (struct pxa25x_udc *dev) 1621 { 1622 unsigned i; 1623 1624 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint 1625 * fifos, and pending transactions mustn't be continued in any case. 1626 */ 1627 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) 1628 nuke(&dev->ep[i], -ECONNABORTED); 1629 } 1630 1631 static void udc_watchdog(unsigned long _dev) 1632 { 1633 struct pxa25x_udc *dev = (void *)_dev; 1634 1635 local_irq_disable(); 1636 if (dev->ep0state == EP0_STALL 1637 && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0 1638 && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) { 1639 udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF); 1640 DBG(DBG_VERBOSE, "ep0 re-stall\n"); 1641 start_watchdog(dev); 1642 } 1643 local_irq_enable(); 1644 } 1645 1646 static void handle_ep0 (struct pxa25x_udc *dev) 1647 { 1648 u32 udccs0 = udc_ep0_get_UDCCS(dev); 1649 struct pxa25x_ep *ep = &dev->ep [0]; 1650 struct pxa25x_request *req; 1651 union { 1652 struct usb_ctrlrequest r; 1653 u8 raw [8]; 1654 u32 word [2]; 1655 } u; 1656 1657 if (list_empty(&ep->queue)) 1658 req = NULL; 1659 else 1660 req = list_entry(ep->queue.next, struct pxa25x_request, queue); 1661 1662 /* clear stall status */ 1663 if (udccs0 & UDCCS0_SST) { 1664 nuke(ep, -EPIPE); 1665 udc_ep0_set_UDCCS(dev, UDCCS0_SST); 1666 del_timer(&dev->timer); 1667 ep0_idle(dev); 1668 } 1669 1670 /* previous request unfinished? non-error iff back-to-back ... */ 1671 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) { 1672 nuke(ep, 0); 1673 del_timer(&dev->timer); 1674 ep0_idle(dev); 1675 } 1676 1677 switch (dev->ep0state) { 1678 case EP0_IDLE: 1679 /* late-breaking status? */ 1680 udccs0 = udc_ep0_get_UDCCS(dev); 1681 1682 /* start control request? */ 1683 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE)) 1684 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) { 1685 int i; 1686 1687 nuke (ep, -EPROTO); 1688 1689 /* read SETUP packet */ 1690 for (i = 0; i < 8; i++) { 1691 if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) { 1692 bad_setup: 1693 DMSG("SETUP %d!\n", i); 1694 goto stall; 1695 } 1696 u.raw [i] = (u8) UDDR0; 1697 } 1698 if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0)) 1699 goto bad_setup; 1700 1701 got_setup: 1702 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1703 u.r.bRequestType, u.r.bRequest, 1704 le16_to_cpu(u.r.wValue), 1705 le16_to_cpu(u.r.wIndex), 1706 le16_to_cpu(u.r.wLength)); 1707 1708 /* cope with automagic for some standard requests. */ 1709 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK) 1710 == USB_TYPE_STANDARD; 1711 dev->req_config = 0; 1712 dev->req_pending = 1; 1713 switch (u.r.bRequest) { 1714 /* hardware restricts gadget drivers here! */ 1715 case USB_REQ_SET_CONFIGURATION: 1716 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1717 /* reflect hardware's automagic 1718 * up to the gadget driver. 1719 */ 1720 config_change: 1721 dev->req_config = 1; 1722 clear_ep_state(dev); 1723 /* if !has_cfr, there's no synch 1724 * else use AREN (later) not SA|OPR 1725 * USIR0_IR0 acts edge sensitive 1726 */ 1727 } 1728 break; 1729 /* ... and here, even more ... */ 1730 case USB_REQ_SET_INTERFACE: 1731 if (u.r.bRequestType == USB_RECIP_INTERFACE) { 1732 /* udc hardware is broken by design: 1733 * - altsetting may only be zero; 1734 * - hw resets all interfaces' eps; 1735 * - ep reset doesn't include halt(?). 1736 */ 1737 DMSG("broken set_interface (%d/%d)\n", 1738 le16_to_cpu(u.r.wIndex), 1739 le16_to_cpu(u.r.wValue)); 1740 goto config_change; 1741 } 1742 break; 1743 /* hardware was supposed to hide this */ 1744 case USB_REQ_SET_ADDRESS: 1745 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1746 ep0start(dev, 0, "address"); 1747 return; 1748 } 1749 break; 1750 } 1751 1752 if (u.r.bRequestType & USB_DIR_IN) 1753 dev->ep0state = EP0_IN_DATA_PHASE; 1754 else 1755 dev->ep0state = EP0_OUT_DATA_PHASE; 1756 1757 i = dev->driver->setup(&dev->gadget, &u.r); 1758 if (i < 0) { 1759 /* hardware automagic preventing STALL... */ 1760 if (dev->req_config) { 1761 /* hardware sometimes neglects to tell 1762 * tell us about config change events, 1763 * so later ones may fail... 1764 */ 1765 WARNING("config change %02x fail %d?\n", 1766 u.r.bRequest, i); 1767 return; 1768 /* TODO experiment: if has_cfr, 1769 * hardware didn't ACK; maybe we 1770 * could actually STALL! 1771 */ 1772 } 1773 DBG(DBG_VERBOSE, "protocol STALL, " 1774 "%02x err %d\n", udc_ep0_get_UDCCS(dev), i); 1775 stall: 1776 /* the watchdog timer helps deal with cases 1777 * where udc seems to clear FST wrongly, and 1778 * then NAKs instead of STALLing. 1779 */ 1780 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall"); 1781 start_watchdog(dev); 1782 dev->ep0state = EP0_STALL; 1783 1784 /* deferred i/o == no response yet */ 1785 } else if (dev->req_pending) { 1786 if (likely(dev->ep0state == EP0_IN_DATA_PHASE 1787 || dev->req_std || u.r.wLength)) 1788 ep0start(dev, 0, "defer"); 1789 else 1790 ep0start(dev, UDCCS0_IPR, "defer/IPR"); 1791 } 1792 1793 /* expect at least one data or status stage irq */ 1794 return; 1795 1796 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA)) 1797 == (UDCCS0_OPR|UDCCS0_SA))) { 1798 unsigned i; 1799 1800 /* pxa210/250 erratum 131 for B0/B1 says RNE lies. 1801 * still observed on a pxa255 a0. 1802 */ 1803 DBG(DBG_VERBOSE, "e131\n"); 1804 nuke(ep, -EPROTO); 1805 1806 /* read SETUP data, but don't trust it too much */ 1807 for (i = 0; i < 8; i++) 1808 u.raw [i] = (u8) UDDR0; 1809 if ((u.r.bRequestType & USB_RECIP_MASK) 1810 > USB_RECIP_OTHER) 1811 goto stall; 1812 if (u.word [0] == 0 && u.word [1] == 0) 1813 goto stall; 1814 goto got_setup; 1815 } else { 1816 /* some random early IRQ: 1817 * - we acked FST 1818 * - IPR cleared 1819 * - OPR got set, without SA (likely status stage) 1820 */ 1821 udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR)); 1822 } 1823 break; 1824 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ 1825 if (udccs0 & UDCCS0_OPR) { 1826 udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF); 1827 DBG(DBG_VERBOSE, "ep0in premature status\n"); 1828 if (req) 1829 done(ep, req, 0); 1830 ep0_idle(dev); 1831 } else /* irq was IPR clearing */ { 1832 if (req) { 1833 /* this IN packet might finish the request */ 1834 (void) write_ep0_fifo(ep, req); 1835 } /* else IN token before response was written */ 1836 } 1837 break; 1838 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */ 1839 if (udccs0 & UDCCS0_OPR) { 1840 if (req) { 1841 /* this OUT packet might finish the request */ 1842 if (read_ep0_fifo(ep, req)) 1843 done(ep, req, 0); 1844 /* else more OUT packets expected */ 1845 } /* else OUT token before read was issued */ 1846 } else /* irq was IPR clearing */ { 1847 DBG(DBG_VERBOSE, "ep0out premature status\n"); 1848 if (req) 1849 done(ep, req, 0); 1850 ep0_idle(dev); 1851 } 1852 break; 1853 case EP0_END_XFER: 1854 if (req) 1855 done(ep, req, 0); 1856 /* ack control-IN status (maybe in-zlp was skipped) 1857 * also appears after some config change events. 1858 */ 1859 if (udccs0 & UDCCS0_OPR) 1860 udc_ep0_set_UDCCS(dev, UDCCS0_OPR); 1861 ep0_idle(dev); 1862 break; 1863 case EP0_STALL: 1864 udc_ep0_set_UDCCS(dev, UDCCS0_FST); 1865 break; 1866 } 1867 udc_set_reg(dev, USIR0, USIR0_IR0); 1868 } 1869 1870 static void handle_ep(struct pxa25x_ep *ep) 1871 { 1872 struct pxa25x_request *req; 1873 int is_in = ep->bEndpointAddress & USB_DIR_IN; 1874 int completed; 1875 u32 udccs, tmp; 1876 1877 do { 1878 completed = 0; 1879 if (likely (!list_empty(&ep->queue))) 1880 req = list_entry(ep->queue.next, 1881 struct pxa25x_request, queue); 1882 else 1883 req = NULL; 1884 1885 // TODO check FST handling 1886 1887 udccs = udc_ep_get_UDCCS(ep); 1888 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */ 1889 tmp = UDCCS_BI_TUR; 1890 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) 1891 tmp |= UDCCS_BI_SST; 1892 tmp &= udccs; 1893 if (likely (tmp)) 1894 udc_ep_set_UDCCS(ep, tmp); 1895 if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) 1896 completed = write_fifo(ep, req); 1897 1898 } else { /* irq from RPC (or for ISO, ROF) */ 1899 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) 1900 tmp = UDCCS_BO_SST | UDCCS_BO_DME; 1901 else 1902 tmp = UDCCS_IO_ROF | UDCCS_IO_DME; 1903 tmp &= udccs; 1904 if (likely(tmp)) 1905 udc_ep_set_UDCCS(ep, tmp); 1906 1907 /* fifos can hold packets, ready for reading... */ 1908 if (likely(req)) { 1909 completed = read_fifo(ep, req); 1910 } else 1911 pio_irq_disable(ep); 1912 } 1913 ep->pio_irqs++; 1914 } while (completed); 1915 } 1916 1917 /* 1918 * pxa25x_udc_irq - interrupt handler 1919 * 1920 * avoid delays in ep0 processing. the control handshaking isn't always 1921 * under software control (pxa250c0 and the pxa255 are better), and delays 1922 * could cause usb protocol errors. 1923 */ 1924 static irqreturn_t 1925 pxa25x_udc_irq(int irq, void *_dev) 1926 { 1927 struct pxa25x_udc *dev = _dev; 1928 int handled; 1929 1930 dev->stats.irqs++; 1931 do { 1932 u32 udccr = udc_get_reg(dev, UDCCR); 1933 1934 handled = 0; 1935 1936 /* SUSpend Interrupt Request */ 1937 if (unlikely(udccr & UDCCR_SUSIR)) { 1938 udc_ack_int_UDCCR(dev, UDCCR_SUSIR); 1939 handled = 1; 1940 DBG(DBG_VERBOSE, "USB suspend\n"); 1941 1942 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1943 && dev->driver 1944 && dev->driver->suspend) 1945 dev->driver->suspend(&dev->gadget); 1946 ep0_idle (dev); 1947 } 1948 1949 /* RESume Interrupt Request */ 1950 if (unlikely(udccr & UDCCR_RESIR)) { 1951 udc_ack_int_UDCCR(dev, UDCCR_RESIR); 1952 handled = 1; 1953 DBG(DBG_VERBOSE, "USB resume\n"); 1954 1955 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1956 && dev->driver 1957 && dev->driver->resume) 1958 dev->driver->resume(&dev->gadget); 1959 } 1960 1961 /* ReSeT Interrupt Request - USB reset */ 1962 if (unlikely(udccr & UDCCR_RSTIR)) { 1963 udc_ack_int_UDCCR(dev, UDCCR_RSTIR); 1964 handled = 1; 1965 1966 if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) { 1967 DBG(DBG_VERBOSE, "USB reset start\n"); 1968 1969 /* reset driver and endpoints, 1970 * in case that's not yet done 1971 */ 1972 reset_gadget(dev, dev->driver); 1973 1974 } else { 1975 DBG(DBG_VERBOSE, "USB reset end\n"); 1976 dev->gadget.speed = USB_SPEED_FULL; 1977 memset(&dev->stats, 0, sizeof dev->stats); 1978 /* driver and endpoints are still reset */ 1979 } 1980 1981 } else { 1982 u32 usir0 = udc_get_reg(dev, USIR0) & 1983 ~udc_get_reg(dev, UICR0); 1984 u32 usir1 = udc_get_reg(dev, USIR1) & 1985 ~udc_get_reg(dev, UICR1); 1986 int i; 1987 1988 if (unlikely (!usir0 && !usir1)) 1989 continue; 1990 1991 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0); 1992 1993 /* control traffic */ 1994 if (usir0 & USIR0_IR0) { 1995 dev->ep[0].pio_irqs++; 1996 handle_ep0(dev); 1997 handled = 1; 1998 } 1999 2000 /* endpoint data transfers */ 2001 for (i = 0; i < 8; i++) { 2002 u32 tmp = 1 << i; 2003 2004 if (i && (usir0 & tmp)) { 2005 handle_ep(&dev->ep[i]); 2006 udc_set_reg(dev, USIR0, 2007 udc_get_reg(dev, USIR0) | tmp); 2008 handled = 1; 2009 } 2010 #ifndef CONFIG_USB_PXA25X_SMALL 2011 if (usir1 & tmp) { 2012 handle_ep(&dev->ep[i+8]); 2013 udc_set_reg(dev, USIR1, 2014 udc_get_reg(dev, USIR1) | tmp); 2015 handled = 1; 2016 } 2017 #endif 2018 } 2019 } 2020 2021 /* we could also ask for 1 msec SOF (SIR) interrupts */ 2022 2023 } while (handled); 2024 return IRQ_HANDLED; 2025 } 2026 2027 /*-------------------------------------------------------------------------*/ 2028 2029 static void nop_release (struct device *dev) 2030 { 2031 DMSG("%s %s\n", __func__, dev_name(dev)); 2032 } 2033 2034 /* this uses load-time allocation and initialization (instead of 2035 * doing it at run-time) to save code, eliminate fault paths, and 2036 * be more obviously correct. 2037 */ 2038 static struct pxa25x_udc memory = { 2039 .gadget = { 2040 .ops = &pxa25x_udc_ops, 2041 .ep0 = &memory.ep[0].ep, 2042 .name = driver_name, 2043 .dev = { 2044 .init_name = "gadget", 2045 .release = nop_release, 2046 }, 2047 }, 2048 2049 /* control endpoint */ 2050 .ep[0] = { 2051 .ep = { 2052 .name = ep0name, 2053 .ops = &pxa25x_ep_ops, 2054 .maxpacket = EP0_FIFO_SIZE, 2055 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, 2056 USB_EP_CAPS_DIR_ALL), 2057 }, 2058 .dev = &memory, 2059 .regoff_udccs = UDCCS0, 2060 .regoff_uddr = UDDR0, 2061 }, 2062 2063 /* first group of endpoints */ 2064 .ep[1] = { 2065 .ep = { 2066 .name = "ep1in-bulk", 2067 .ops = &pxa25x_ep_ops, 2068 .maxpacket = BULK_FIFO_SIZE, 2069 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2070 USB_EP_CAPS_DIR_IN), 2071 }, 2072 .dev = &memory, 2073 .fifo_size = BULK_FIFO_SIZE, 2074 .bEndpointAddress = USB_DIR_IN | 1, 2075 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2076 .regoff_udccs = UDCCS1, 2077 .regoff_uddr = UDDR1, 2078 }, 2079 .ep[2] = { 2080 .ep = { 2081 .name = "ep2out-bulk", 2082 .ops = &pxa25x_ep_ops, 2083 .maxpacket = BULK_FIFO_SIZE, 2084 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2085 USB_EP_CAPS_DIR_OUT), 2086 }, 2087 .dev = &memory, 2088 .fifo_size = BULK_FIFO_SIZE, 2089 .bEndpointAddress = 2, 2090 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2091 .regoff_udccs = UDCCS2, 2092 .regoff_ubcr = UBCR2, 2093 .regoff_uddr = UDDR2, 2094 }, 2095 #ifndef CONFIG_USB_PXA25X_SMALL 2096 .ep[3] = { 2097 .ep = { 2098 .name = "ep3in-iso", 2099 .ops = &pxa25x_ep_ops, 2100 .maxpacket = ISO_FIFO_SIZE, 2101 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2102 USB_EP_CAPS_DIR_IN), 2103 }, 2104 .dev = &memory, 2105 .fifo_size = ISO_FIFO_SIZE, 2106 .bEndpointAddress = USB_DIR_IN | 3, 2107 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2108 .regoff_udccs = UDCCS3, 2109 .regoff_uddr = UDDR3, 2110 }, 2111 .ep[4] = { 2112 .ep = { 2113 .name = "ep4out-iso", 2114 .ops = &pxa25x_ep_ops, 2115 .maxpacket = ISO_FIFO_SIZE, 2116 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2117 USB_EP_CAPS_DIR_OUT), 2118 }, 2119 .dev = &memory, 2120 .fifo_size = ISO_FIFO_SIZE, 2121 .bEndpointAddress = 4, 2122 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2123 .regoff_udccs = UDCCS4, 2124 .regoff_ubcr = UBCR4, 2125 .regoff_uddr = UDDR4, 2126 }, 2127 .ep[5] = { 2128 .ep = { 2129 .name = "ep5in-int", 2130 .ops = &pxa25x_ep_ops, 2131 .maxpacket = INT_FIFO_SIZE, 2132 .caps = USB_EP_CAPS(0, 0), 2133 }, 2134 .dev = &memory, 2135 .fifo_size = INT_FIFO_SIZE, 2136 .bEndpointAddress = USB_DIR_IN | 5, 2137 .bmAttributes = USB_ENDPOINT_XFER_INT, 2138 .regoff_udccs = UDCCS5, 2139 .regoff_uddr = UDDR5, 2140 }, 2141 2142 /* second group of endpoints */ 2143 .ep[6] = { 2144 .ep = { 2145 .name = "ep6in-bulk", 2146 .ops = &pxa25x_ep_ops, 2147 .maxpacket = BULK_FIFO_SIZE, 2148 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2149 USB_EP_CAPS_DIR_IN), 2150 }, 2151 .dev = &memory, 2152 .fifo_size = BULK_FIFO_SIZE, 2153 .bEndpointAddress = USB_DIR_IN | 6, 2154 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2155 .regoff_udccs = UDCCS6, 2156 .regoff_uddr = UDDR6, 2157 }, 2158 .ep[7] = { 2159 .ep = { 2160 .name = "ep7out-bulk", 2161 .ops = &pxa25x_ep_ops, 2162 .maxpacket = BULK_FIFO_SIZE, 2163 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2164 USB_EP_CAPS_DIR_OUT), 2165 }, 2166 .dev = &memory, 2167 .fifo_size = BULK_FIFO_SIZE, 2168 .bEndpointAddress = 7, 2169 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2170 .regoff_udccs = UDCCS7, 2171 .regoff_ubcr = UBCR7, 2172 .regoff_uddr = UDDR7, 2173 }, 2174 .ep[8] = { 2175 .ep = { 2176 .name = "ep8in-iso", 2177 .ops = &pxa25x_ep_ops, 2178 .maxpacket = ISO_FIFO_SIZE, 2179 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2180 USB_EP_CAPS_DIR_IN), 2181 }, 2182 .dev = &memory, 2183 .fifo_size = ISO_FIFO_SIZE, 2184 .bEndpointAddress = USB_DIR_IN | 8, 2185 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2186 .regoff_udccs = UDCCS8, 2187 .regoff_uddr = UDDR8, 2188 }, 2189 .ep[9] = { 2190 .ep = { 2191 .name = "ep9out-iso", 2192 .ops = &pxa25x_ep_ops, 2193 .maxpacket = ISO_FIFO_SIZE, 2194 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2195 USB_EP_CAPS_DIR_OUT), 2196 }, 2197 .dev = &memory, 2198 .fifo_size = ISO_FIFO_SIZE, 2199 .bEndpointAddress = 9, 2200 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2201 .regoff_udccs = UDCCS9, 2202 .regoff_ubcr = UBCR9, 2203 .regoff_uddr = UDDR9, 2204 }, 2205 .ep[10] = { 2206 .ep = { 2207 .name = "ep10in-int", 2208 .ops = &pxa25x_ep_ops, 2209 .maxpacket = INT_FIFO_SIZE, 2210 .caps = USB_EP_CAPS(0, 0), 2211 }, 2212 .dev = &memory, 2213 .fifo_size = INT_FIFO_SIZE, 2214 .bEndpointAddress = USB_DIR_IN | 10, 2215 .bmAttributes = USB_ENDPOINT_XFER_INT, 2216 .regoff_udccs = UDCCS10, 2217 .regoff_uddr = UDDR10, 2218 }, 2219 2220 /* third group of endpoints */ 2221 .ep[11] = { 2222 .ep = { 2223 .name = "ep11in-bulk", 2224 .ops = &pxa25x_ep_ops, 2225 .maxpacket = BULK_FIFO_SIZE, 2226 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2227 USB_EP_CAPS_DIR_IN), 2228 }, 2229 .dev = &memory, 2230 .fifo_size = BULK_FIFO_SIZE, 2231 .bEndpointAddress = USB_DIR_IN | 11, 2232 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2233 .regoff_udccs = UDCCS11, 2234 .regoff_uddr = UDDR11, 2235 }, 2236 .ep[12] = { 2237 .ep = { 2238 .name = "ep12out-bulk", 2239 .ops = &pxa25x_ep_ops, 2240 .maxpacket = BULK_FIFO_SIZE, 2241 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2242 USB_EP_CAPS_DIR_OUT), 2243 }, 2244 .dev = &memory, 2245 .fifo_size = BULK_FIFO_SIZE, 2246 .bEndpointAddress = 12, 2247 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2248 .regoff_udccs = UDCCS12, 2249 .regoff_ubcr = UBCR12, 2250 .regoff_uddr = UDDR12, 2251 }, 2252 .ep[13] = { 2253 .ep = { 2254 .name = "ep13in-iso", 2255 .ops = &pxa25x_ep_ops, 2256 .maxpacket = ISO_FIFO_SIZE, 2257 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2258 USB_EP_CAPS_DIR_IN), 2259 }, 2260 .dev = &memory, 2261 .fifo_size = ISO_FIFO_SIZE, 2262 .bEndpointAddress = USB_DIR_IN | 13, 2263 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2264 .regoff_udccs = UDCCS13, 2265 .regoff_uddr = UDDR13, 2266 }, 2267 .ep[14] = { 2268 .ep = { 2269 .name = "ep14out-iso", 2270 .ops = &pxa25x_ep_ops, 2271 .maxpacket = ISO_FIFO_SIZE, 2272 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2273 USB_EP_CAPS_DIR_OUT), 2274 }, 2275 .dev = &memory, 2276 .fifo_size = ISO_FIFO_SIZE, 2277 .bEndpointAddress = 14, 2278 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2279 .regoff_udccs = UDCCS14, 2280 .regoff_ubcr = UBCR14, 2281 .regoff_uddr = UDDR14, 2282 }, 2283 .ep[15] = { 2284 .ep = { 2285 .name = "ep15in-int", 2286 .ops = &pxa25x_ep_ops, 2287 .maxpacket = INT_FIFO_SIZE, 2288 .caps = USB_EP_CAPS(0, 0), 2289 }, 2290 .dev = &memory, 2291 .fifo_size = INT_FIFO_SIZE, 2292 .bEndpointAddress = USB_DIR_IN | 15, 2293 .bmAttributes = USB_ENDPOINT_XFER_INT, 2294 .regoff_udccs = UDCCS15, 2295 .regoff_uddr = UDDR15, 2296 }, 2297 #endif /* !CONFIG_USB_PXA25X_SMALL */ 2298 }; 2299 2300 #define CP15R0_VENDOR_MASK 0xffffe000 2301 2302 #if defined(CONFIG_ARCH_PXA) 2303 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */ 2304 2305 #elif defined(CONFIG_ARCH_IXP4XX) 2306 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */ 2307 2308 #endif 2309 2310 #define CP15R0_PROD_MASK 0x000003f0 2311 #define PXA25x 0x00000100 /* and PXA26x */ 2312 #define PXA210 0x00000120 2313 2314 #define CP15R0_REV_MASK 0x0000000f 2315 2316 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK) 2317 2318 #define PXA255_A0 0x00000106 /* or PXA260_B1 */ 2319 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */ 2320 #define PXA250_B2 0x00000104 2321 #define PXA250_B1 0x00000103 /* or PXA260_A0 */ 2322 #define PXA250_B0 0x00000102 2323 #define PXA250_A1 0x00000101 2324 #define PXA250_A0 0x00000100 2325 2326 #define PXA210_C0 0x00000125 2327 #define PXA210_B2 0x00000124 2328 #define PXA210_B1 0x00000123 2329 #define PXA210_B0 0x00000122 2330 #define IXP425_A0 0x000001c1 2331 #define IXP425_B0 0x000001f1 2332 #define IXP465_AD 0x00000200 2333 2334 /* 2335 * probe - binds to the platform device 2336 */ 2337 static int pxa25x_udc_probe(struct platform_device *pdev) 2338 { 2339 struct pxa25x_udc *dev = &memory; 2340 int retval, irq; 2341 u32 chiprev; 2342 struct resource *res; 2343 2344 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); 2345 2346 /* insist on Intel/ARM/XScale */ 2347 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev)); 2348 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) { 2349 pr_err("%s: not XScale!\n", driver_name); 2350 return -ENODEV; 2351 } 2352 2353 /* trigger chiprev-specific logic */ 2354 switch (chiprev & CP15R0_PRODREV_MASK) { 2355 #if defined(CONFIG_ARCH_PXA) 2356 case PXA255_A0: 2357 dev->has_cfr = 1; 2358 break; 2359 case PXA250_A0: 2360 case PXA250_A1: 2361 /* A0/A1 "not released"; ep 13, 15 unusable */ 2362 /* fall through */ 2363 case PXA250_B2: case PXA210_B2: 2364 case PXA250_B1: case PXA210_B1: 2365 case PXA250_B0: case PXA210_B0: 2366 /* OUT-DMA is broken ... */ 2367 /* fall through */ 2368 case PXA250_C0: case PXA210_C0: 2369 break; 2370 #elif defined(CONFIG_ARCH_IXP4XX) 2371 case IXP425_A0: 2372 case IXP425_B0: 2373 case IXP465_AD: 2374 dev->has_cfr = 1; 2375 break; 2376 #endif 2377 default: 2378 pr_err("%s: unrecognized processor: %08x\n", 2379 driver_name, chiprev); 2380 /* iop3xx, ixp4xx, ... */ 2381 return -ENODEV; 2382 } 2383 2384 irq = platform_get_irq(pdev, 0); 2385 if (irq < 0) 2386 return -ENODEV; 2387 2388 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2389 dev->regs = devm_ioremap_resource(&pdev->dev, res); 2390 if (IS_ERR(dev->regs)) 2391 return PTR_ERR(dev->regs); 2392 2393 dev->clk = devm_clk_get(&pdev->dev, NULL); 2394 if (IS_ERR(dev->clk)) 2395 return PTR_ERR(dev->clk); 2396 2397 pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, 2398 dev->has_cfr ? "" : " (!cfr)", 2399 SIZE_STR "(pio)" 2400 ); 2401 2402 /* other non-static parts of init */ 2403 dev->dev = &pdev->dev; 2404 dev->mach = dev_get_platdata(&pdev->dev); 2405 2406 dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); 2407 2408 if (gpio_is_valid(dev->mach->gpio_pullup)) { 2409 retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup, 2410 "pca25x_udc GPIO PULLUP"); 2411 if (retval) { 2412 dev_dbg(&pdev->dev, 2413 "can't get pullup gpio %d, err: %d\n", 2414 dev->mach->gpio_pullup, retval); 2415 goto err; 2416 } 2417 gpio_direction_output(dev->mach->gpio_pullup, 0); 2418 } 2419 2420 init_timer(&dev->timer); 2421 dev->timer.function = udc_watchdog; 2422 dev->timer.data = (unsigned long) dev; 2423 2424 the_controller = dev; 2425 platform_set_drvdata(pdev, dev); 2426 2427 udc_disable(dev); 2428 udc_reinit(dev); 2429 2430 dev->vbus = 0; 2431 2432 /* irq setup after old hardware state is cleaned up */ 2433 retval = devm_request_irq(&pdev->dev, irq, pxa25x_udc_irq, 0, 2434 driver_name, dev); 2435 if (retval != 0) { 2436 pr_err("%s: can't get irq %d, err %d\n", 2437 driver_name, irq, retval); 2438 goto err; 2439 } 2440 dev->got_irq = 1; 2441 2442 #ifdef CONFIG_ARCH_LUBBOCK 2443 if (machine_is_lubbock()) { 2444 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ, 2445 lubbock_vbus_irq, 0, driver_name, 2446 dev); 2447 if (retval != 0) { 2448 pr_err("%s: can't get irq %i, err %d\n", 2449 driver_name, LUBBOCK_USB_DISC_IRQ, retval); 2450 goto err; 2451 } 2452 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ, 2453 lubbock_vbus_irq, 0, driver_name, 2454 dev); 2455 if (retval != 0) { 2456 pr_err("%s: can't get irq %i, err %d\n", 2457 driver_name, LUBBOCK_USB_IRQ, retval); 2458 goto err; 2459 } 2460 } else 2461 #endif 2462 create_debug_files(dev); 2463 2464 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget); 2465 if (!retval) 2466 return retval; 2467 2468 remove_debug_files(dev); 2469 err: 2470 if (!IS_ERR_OR_NULL(dev->transceiver)) 2471 dev->transceiver = NULL; 2472 return retval; 2473 } 2474 2475 static void pxa25x_udc_shutdown(struct platform_device *_dev) 2476 { 2477 pullup_off(); 2478 } 2479 2480 static int pxa25x_udc_remove(struct platform_device *pdev) 2481 { 2482 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2483 2484 if (dev->driver) 2485 return -EBUSY; 2486 2487 usb_del_gadget_udc(&dev->gadget); 2488 dev->pullup = 0; 2489 pullup(dev); 2490 2491 remove_debug_files(dev); 2492 2493 if (!IS_ERR_OR_NULL(dev->transceiver)) 2494 dev->transceiver = NULL; 2495 2496 the_controller = NULL; 2497 return 0; 2498 } 2499 2500 /*-------------------------------------------------------------------------*/ 2501 2502 #ifdef CONFIG_PM 2503 2504 /* USB suspend (controlled by the host) and system suspend (controlled 2505 * by the PXA) don't necessarily work well together. If USB is active, 2506 * the 48 MHz clock is required; so the system can't enter 33 MHz idle 2507 * mode, or any deeper PM saving state. 2508 * 2509 * For now, we punt and forcibly disconnect from the USB host when PXA 2510 * enters any suspend state. While we're disconnected, we always disable 2511 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states. 2512 * Boards without software pullup control shouldn't use those states. 2513 * VBUS IRQs should probably be ignored so that the PXA device just acts 2514 * "dead" to USB hosts until system resume. 2515 */ 2516 static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state) 2517 { 2518 struct pxa25x_udc *udc = platform_get_drvdata(dev); 2519 unsigned long flags; 2520 2521 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) 2522 WARNING("USB host won't detect disconnect!\n"); 2523 udc->suspended = 1; 2524 2525 local_irq_save(flags); 2526 pullup(udc); 2527 local_irq_restore(flags); 2528 2529 return 0; 2530 } 2531 2532 static int pxa25x_udc_resume(struct platform_device *dev) 2533 { 2534 struct pxa25x_udc *udc = platform_get_drvdata(dev); 2535 unsigned long flags; 2536 2537 udc->suspended = 0; 2538 local_irq_save(flags); 2539 pullup(udc); 2540 local_irq_restore(flags); 2541 2542 return 0; 2543 } 2544 2545 #else 2546 #define pxa25x_udc_suspend NULL 2547 #define pxa25x_udc_resume NULL 2548 #endif 2549 2550 /*-------------------------------------------------------------------------*/ 2551 2552 static struct platform_driver udc_driver = { 2553 .shutdown = pxa25x_udc_shutdown, 2554 .probe = pxa25x_udc_probe, 2555 .remove = pxa25x_udc_remove, 2556 .suspend = pxa25x_udc_suspend, 2557 .resume = pxa25x_udc_resume, 2558 .driver = { 2559 .name = "pxa25x-udc", 2560 }, 2561 }; 2562 2563 module_platform_driver(udc_driver); 2564 2565 MODULE_DESCRIPTION(DRIVER_DESC); 2566 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2567 MODULE_LICENSE("GPL"); 2568 MODULE_ALIAS("platform:pxa25x-udc"); 2569