1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers 4 * 5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker) 6 * Copyright (C) 2003 Robert Schwebel, Pengutronix 7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix 8 * Copyright (C) 2003 David Brownell 9 * Copyright (C) 2003 Joshua Wise 10 */ 11 12 /* #define VERBOSE_DEBUG */ 13 14 #include <linux/device.h> 15 #include <linux/gpio.h> 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/ioport.h> 19 #include <linux/types.h> 20 #include <linux/errno.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/timer.h> 25 #include <linux/list.h> 26 #include <linux/interrupt.h> 27 #include <linux/mm.h> 28 #include <linux/platform_data/pxa2xx_udc.h> 29 #include <linux/platform_device.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/irq.h> 32 #include <linux/clk.h> 33 #include <linux/seq_file.h> 34 #include <linux/debugfs.h> 35 #include <linux/io.h> 36 #include <linux/prefetch.h> 37 38 #include <asm/byteorder.h> 39 #include <asm/dma.h> 40 #include <asm/mach-types.h> 41 #include <asm/unaligned.h> 42 43 #include <linux/usb/ch9.h> 44 #include <linux/usb/gadget.h> 45 #include <linux/usb/otg.h> 46 47 #ifdef CONFIG_ARCH_LUBBOCK 48 #include <mach/lubbock.h> 49 #endif 50 51 #define UDCCR 0x0000 /* UDC Control Register */ 52 #define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */ 53 #define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */ 54 #define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */ 55 #define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */ 56 #define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */ 57 #define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */ 58 #define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */ 59 #define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */ 60 #define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */ 61 #define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */ 62 #define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */ 63 #define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */ 64 #define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */ 65 #define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */ 66 #define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */ 67 #define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */ 68 #define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */ 69 #define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */ 70 #define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */ 71 #define UFNRH 0x0060 /* UDC Frame Number Register High */ 72 #define UFNRL 0x0064 /* UDC Frame Number Register Low */ 73 #define UBCR2 0x0068 /* UDC Byte Count Reg 2 */ 74 #define UBCR4 0x006c /* UDC Byte Count Reg 4 */ 75 #define UBCR7 0x0070 /* UDC Byte Count Reg 7 */ 76 #define UBCR9 0x0074 /* UDC Byte Count Reg 9 */ 77 #define UBCR12 0x0078 /* UDC Byte Count Reg 12 */ 78 #define UBCR14 0x007c /* UDC Byte Count Reg 14 */ 79 #define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */ 80 #define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */ 81 #define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */ 82 #define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */ 83 #define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */ 84 #define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */ 85 #define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */ 86 #define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */ 87 #define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */ 88 #define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */ 89 #define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */ 90 #define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */ 91 #define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */ 92 #define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */ 93 #define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */ 94 #define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */ 95 96 #define UICR0 0x0050 /* UDC Interrupt Control Register 0 */ 97 #define UICR1 0x0054 /* UDC Interrupt Control Register 1 */ 98 99 #define USIR0 0x0058 /* UDC Status Interrupt Register 0 */ 100 #define USIR1 0x005C /* UDC Status Interrupt Register 1 */ 101 102 #define UDCCR_UDE (1 << 0) /* UDC enable */ 103 #define UDCCR_UDA (1 << 1) /* UDC active */ 104 #define UDCCR_RSM (1 << 2) /* Device resume */ 105 #define UDCCR_RESIR (1 << 3) /* Resume interrupt request */ 106 #define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */ 107 #define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */ 108 #define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */ 109 #define UDCCR_REM (1 << 7) /* Reset interrupt mask */ 110 111 #define UDCCS0_OPR (1 << 0) /* OUT packet ready */ 112 #define UDCCS0_IPR (1 << 1) /* IN packet ready */ 113 #define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */ 114 #define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */ 115 #define UDCCS0_SST (1 << 4) /* Sent stall */ 116 #define UDCCS0_FST (1 << 5) /* Force stall */ 117 #define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */ 118 #define UDCCS0_SA (1 << 7) /* Setup active */ 119 120 #define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */ 121 #define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */ 122 #define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */ 123 #define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */ 124 #define UDCCS_BI_SST (1 << 4) /* Sent stall */ 125 #define UDCCS_BI_FST (1 << 5) /* Force stall */ 126 #define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */ 127 128 #define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */ 129 #define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */ 130 #define UDCCS_BO_DME (1 << 3) /* DMA enable */ 131 #define UDCCS_BO_SST (1 << 4) /* Sent stall */ 132 #define UDCCS_BO_FST (1 << 5) /* Force stall */ 133 #define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */ 134 #define UDCCS_BO_RSP (1 << 7) /* Receive short packet */ 135 136 #define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */ 137 #define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */ 138 #define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */ 139 #define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */ 140 #define UDCCS_II_TSP (1 << 7) /* Transmit short packet */ 141 142 #define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ 143 #define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ 144 #ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */ 145 #define UDCCS_IO_ROF (1 << 3) /* Receive overflow */ 146 #endif 147 #ifdef CONFIG_ARCH_PXA 148 #define UDCCS_IO_ROF (1 << 2) /* Receive overflow */ 149 #endif 150 #define UDCCS_IO_DME (1 << 3) /* DMA enable */ 151 #define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ 152 #define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ 153 154 #define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */ 155 #define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */ 156 #define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */ 157 #define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */ 158 #define UDCCS_INT_SST (1 << 4) /* Sent stall */ 159 #define UDCCS_INT_FST (1 << 5) /* Force stall */ 160 #define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */ 161 162 #define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */ 163 #define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */ 164 #define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */ 165 #define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */ 166 #define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */ 167 #define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */ 168 #define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */ 169 #define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */ 170 171 #define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */ 172 #define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */ 173 #define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */ 174 #define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */ 175 #define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */ 176 #define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */ 177 #define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */ 178 #define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */ 179 180 #define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */ 181 #define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */ 182 #define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */ 183 #define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */ 184 #define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */ 185 #define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */ 186 #define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */ 187 #define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */ 188 189 #define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */ 190 #define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */ 191 #define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */ 192 #define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */ 193 #define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */ 194 #define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */ 195 #define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */ 196 #define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */ 197 198 /* 199 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x 200 * series processors. The UDC for the IXP 4xx series is very similar. 201 * There are fifteen endpoints, in addition to ep0. 202 * 203 * Such controller drivers work with a gadget driver. The gadget driver 204 * returns descriptors, implements configuration and data protocols used 205 * by the host to interact with this device, and allocates endpoints to 206 * the different protocol interfaces. The controller driver virtualizes 207 * usb hardware so that the gadget drivers will be more portable. 208 * 209 * This UDC hardware wants to implement a bit too much USB protocol, so 210 * it constrains the sorts of USB configuration change events that work. 211 * The errata for these chips are misleading; some "fixed" bugs from 212 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there. 213 * 214 * Note that the UDC hardware supports DMA (except on IXP) but that's 215 * not used here. IN-DMA (to host) is simple enough, when the data is 216 * suitably aligned (16 bytes) ... the network stack doesn't do that, 217 * other software can. OUT-DMA is buggy in most chip versions, as well 218 * as poorly designed (data toggle not automatic). So this driver won't 219 * bother using DMA. (Mostly-working IN-DMA support was available in 220 * kernels before 2.6.23, but was never enabled or well tested.) 221 */ 222 223 #define DRIVER_VERSION "30-June-2007" 224 #define DRIVER_DESC "PXA 25x USB Device Controller driver" 225 226 227 static const char driver_name [] = "pxa25x_udc"; 228 229 static const char ep0name [] = "ep0"; 230 231 232 #ifdef CONFIG_ARCH_IXP4XX 233 234 /* cpu-specific register addresses are compiled in to this code */ 235 #ifdef CONFIG_ARCH_PXA 236 #error "Can't configure both IXP and PXA" 237 #endif 238 239 /* IXP doesn't yet support <linux/clk.h> */ 240 #define clk_get(dev,name) NULL 241 #define clk_enable(clk) do { } while (0) 242 #define clk_disable(clk) do { } while (0) 243 #define clk_put(clk) do { } while (0) 244 245 #endif 246 247 #include "pxa25x_udc.h" 248 249 250 #ifdef CONFIG_USB_PXA25X_SMALL 251 #define SIZE_STR " (small)" 252 #else 253 #define SIZE_STR "" 254 #endif 255 256 /* --------------------------------------------------------------------------- 257 * endpoint related parts of the api to the usb controller hardware, 258 * used by gadget driver; and the inner talker-to-hardware core. 259 * --------------------------------------------------------------------------- 260 */ 261 262 static void pxa25x_ep_fifo_flush (struct usb_ep *ep); 263 static void nuke (struct pxa25x_ep *, int status); 264 265 /* one GPIO should control a D+ pullup, so host sees this device (or not) */ 266 static void pullup_off(void) 267 { 268 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 269 int off_level = mach->gpio_pullup_inverted; 270 271 if (gpio_is_valid(mach->gpio_pullup)) 272 gpio_set_value(mach->gpio_pullup, off_level); 273 else if (mach->udc_command) 274 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 275 } 276 277 static void pullup_on(void) 278 { 279 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 280 int on_level = !mach->gpio_pullup_inverted; 281 282 if (gpio_is_valid(mach->gpio_pullup)) 283 gpio_set_value(mach->gpio_pullup, on_level); 284 else if (mach->udc_command) 285 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 286 } 287 288 #if defined(CONFIG_CPU_BIG_ENDIAN) 289 /* 290 * IXP4xx has its buses wired up in a way that relies on never doing any 291 * byte swaps, independent of whether it runs in big-endian or little-endian 292 * mode, as explained by Krzysztof Hałasa. 293 * 294 * We only support pxa25x in little-endian mode, but it is very likely 295 * that it works the same way. 296 */ 297 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) 298 { 299 iowrite32be(val, dev->regs + reg); 300 } 301 302 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) 303 { 304 return ioread32be(dev->regs + reg); 305 } 306 #else 307 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) 308 { 309 writel(val, dev->regs + reg); 310 } 311 312 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) 313 { 314 return readl(dev->regs + reg); 315 } 316 #endif 317 318 static void pio_irq_enable(struct pxa25x_ep *ep) 319 { 320 u32 bEndpointAddress = ep->bEndpointAddress & 0xf; 321 322 if (bEndpointAddress < 8) 323 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) & 324 ~(1 << bEndpointAddress)); 325 else { 326 bEndpointAddress -= 8; 327 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) & 328 ~(1 << bEndpointAddress)); 329 } 330 } 331 332 static void pio_irq_disable(struct pxa25x_ep *ep) 333 { 334 u32 bEndpointAddress = ep->bEndpointAddress & 0xf; 335 336 if (bEndpointAddress < 8) 337 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) | 338 (1 << bEndpointAddress)); 339 else { 340 bEndpointAddress -= 8; 341 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) | 342 (1 << bEndpointAddress)); 343 } 344 } 345 346 /* The UDCCR reg contains mask and interrupt status bits, 347 * so using '|=' isn't safe as it may ack an interrupt. 348 */ 349 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE) 350 351 static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask) 352 { 353 u32 udccr = udc_get_reg(dev, UDCCR); 354 355 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR); 356 } 357 358 static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask) 359 { 360 u32 udccr = udc_get_reg(dev, UDCCR); 361 362 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR); 363 } 364 365 static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask) 366 { 367 /* udccr contains the bits we dont want to change */ 368 u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS; 369 370 udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR); 371 } 372 373 static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep) 374 { 375 return udc_get_reg(ep->dev, ep->regoff_udccs); 376 } 377 378 static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data) 379 { 380 udc_set_reg(ep->dev, data, ep->regoff_udccs); 381 } 382 383 static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev) 384 { 385 return udc_get_reg(dev, UDCCS0); 386 } 387 388 static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data) 389 { 390 udc_set_reg(dev, data, UDCCS0); 391 } 392 393 static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep) 394 { 395 return udc_get_reg(ep->dev, ep->regoff_uddr); 396 } 397 398 static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data) 399 { 400 udc_set_reg(ep->dev, data, ep->regoff_uddr); 401 } 402 403 static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep) 404 { 405 return udc_get_reg(ep->dev, ep->regoff_ubcr); 406 } 407 408 /* 409 * endpoint enable/disable 410 * 411 * we need to verify the descriptors used to enable endpoints. since pxa25x 412 * endpoint configurations are fixed, and are pretty much always enabled, 413 * there's not a lot to manage here. 414 * 415 * because pxa25x can't selectively initialize bulk (or interrupt) endpoints, 416 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except 417 * for a single interface (with only the default altsetting) and for gadget 418 * drivers that don't halt endpoints (not reset by set_interface). that also 419 * means that if you use ISO, you must violate the USB spec rule that all 420 * iso endpoints must be in non-default altsettings. 421 */ 422 static int pxa25x_ep_enable (struct usb_ep *_ep, 423 const struct usb_endpoint_descriptor *desc) 424 { 425 struct pxa25x_ep *ep; 426 struct pxa25x_udc *dev; 427 428 ep = container_of (_ep, struct pxa25x_ep, ep); 429 if (!_ep || !desc || _ep->name == ep0name 430 || desc->bDescriptorType != USB_DT_ENDPOINT 431 || ep->bEndpointAddress != desc->bEndpointAddress 432 || ep->fifo_size < usb_endpoint_maxp (desc)) { 433 DMSG("%s, bad ep or descriptor\n", __func__); 434 return -EINVAL; 435 } 436 437 /* xfer types must match, except that interrupt ~= bulk */ 438 if (ep->bmAttributes != desc->bmAttributes 439 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK 440 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { 441 DMSG("%s, %s type mismatch\n", __func__, _ep->name); 442 return -EINVAL; 443 } 444 445 /* hardware _could_ do smaller, but driver doesn't */ 446 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK 447 && usb_endpoint_maxp (desc) 448 != BULK_FIFO_SIZE) 449 || !desc->wMaxPacketSize) { 450 DMSG("%s, bad %s maxpacket\n", __func__, _ep->name); 451 return -ERANGE; 452 } 453 454 dev = ep->dev; 455 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { 456 DMSG("%s, bogus device state\n", __func__); 457 return -ESHUTDOWN; 458 } 459 460 ep->ep.desc = desc; 461 ep->stopped = 0; 462 ep->pio_irqs = 0; 463 ep->ep.maxpacket = usb_endpoint_maxp (desc); 464 465 /* flush fifo (mostly for OUT buffers) */ 466 pxa25x_ep_fifo_flush (_ep); 467 468 /* ... reset halt state too, if we could ... */ 469 470 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name); 471 return 0; 472 } 473 474 static int pxa25x_ep_disable (struct usb_ep *_ep) 475 { 476 struct pxa25x_ep *ep; 477 unsigned long flags; 478 479 ep = container_of (_ep, struct pxa25x_ep, ep); 480 if (!_ep || !ep->ep.desc) { 481 DMSG("%s, %s not enabled\n", __func__, 482 _ep ? ep->ep.name : NULL); 483 return -EINVAL; 484 } 485 local_irq_save(flags); 486 487 nuke (ep, -ESHUTDOWN); 488 489 /* flush fifo (mostly for IN buffers) */ 490 pxa25x_ep_fifo_flush (_ep); 491 492 ep->ep.desc = NULL; 493 ep->stopped = 1; 494 495 local_irq_restore(flags); 496 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name); 497 return 0; 498 } 499 500 /*-------------------------------------------------------------------------*/ 501 502 /* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers 503 * must still pass correctly initialized endpoints, since other controller 504 * drivers may care about how it's currently set up (dma issues etc). 505 */ 506 507 /* 508 * pxa25x_ep_alloc_request - allocate a request data structure 509 */ 510 static struct usb_request * 511 pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) 512 { 513 struct pxa25x_request *req; 514 515 req = kzalloc(sizeof(*req), gfp_flags); 516 if (!req) 517 return NULL; 518 519 INIT_LIST_HEAD (&req->queue); 520 return &req->req; 521 } 522 523 524 /* 525 * pxa25x_ep_free_request - deallocate a request data structure 526 */ 527 static void 528 pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) 529 { 530 struct pxa25x_request *req; 531 532 req = container_of (_req, struct pxa25x_request, req); 533 WARN_ON(!list_empty (&req->queue)); 534 kfree(req); 535 } 536 537 /*-------------------------------------------------------------------------*/ 538 539 /* 540 * done - retire a request; caller blocked irqs 541 */ 542 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) 543 { 544 unsigned stopped = ep->stopped; 545 546 list_del_init(&req->queue); 547 548 if (likely (req->req.status == -EINPROGRESS)) 549 req->req.status = status; 550 else 551 status = req->req.status; 552 553 if (status && status != -ESHUTDOWN) 554 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n", 555 ep->ep.name, &req->req, status, 556 req->req.actual, req->req.length); 557 558 /* don't modify queue heads during completion callback */ 559 ep->stopped = 1; 560 usb_gadget_giveback_request(&ep->ep, &req->req); 561 ep->stopped = stopped; 562 } 563 564 565 static inline void ep0_idle (struct pxa25x_udc *dev) 566 { 567 dev->ep0state = EP0_IDLE; 568 } 569 570 static int 571 write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max) 572 { 573 u8 *buf; 574 unsigned length, count; 575 576 buf = req->req.buf + req->req.actual; 577 prefetch(buf); 578 579 /* how big will this packet be? */ 580 length = min(req->req.length - req->req.actual, max); 581 req->req.actual += length; 582 583 count = length; 584 while (likely(count--)) 585 udc_ep_set_UDDR(ep, *buf++); 586 587 return length; 588 } 589 590 /* 591 * write to an IN endpoint fifo, as many packets as possible. 592 * irqs will use this to write the rest later. 593 * caller guarantees at least one packet buffer is ready (or a zlp). 594 */ 595 static int 596 write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 597 { 598 unsigned max; 599 600 max = usb_endpoint_maxp(ep->ep.desc); 601 do { 602 unsigned count; 603 int is_last, is_short; 604 605 count = write_packet(ep, req, max); 606 607 /* last packet is usually short (or a zlp) */ 608 if (unlikely (count != max)) 609 is_last = is_short = 1; 610 else { 611 if (likely(req->req.length != req->req.actual) 612 || req->req.zero) 613 is_last = 0; 614 else 615 is_last = 1; 616 /* interrupt/iso maxpacket may not fill the fifo */ 617 is_short = unlikely (max < ep->fifo_size); 618 } 619 620 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n", 621 ep->ep.name, count, 622 is_last ? "/L" : "", is_short ? "/S" : "", 623 req->req.length - req->req.actual, req); 624 625 /* let loose that packet. maybe try writing another one, 626 * double buffering might work. TSP, TPC, and TFS 627 * bit values are the same for all normal IN endpoints. 628 */ 629 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC); 630 if (is_short) 631 udc_ep_set_UDCCS(ep, UDCCS_BI_TSP); 632 633 /* requests complete when all IN data is in the FIFO */ 634 if (is_last) { 635 done (ep, req, 0); 636 if (list_empty(&ep->queue)) 637 pio_irq_disable(ep); 638 return 1; 639 } 640 641 // TODO experiment: how robust can fifo mode tweaking be? 642 // double buffering is off in the default fifo mode, which 643 // prevents TFS from being set here. 644 645 } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS); 646 return 0; 647 } 648 649 /* caller asserts req->pending (ep0 irq status nyet cleared); starts 650 * ep0 data stage. these chips want very simple state transitions. 651 */ 652 static inline 653 void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag) 654 { 655 udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR); 656 udc_set_reg(dev, USIR0, USIR0_IR0); 657 dev->req_pending = 0; 658 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n", 659 __func__, tag, udc_ep0_get_UDCCS(dev), flags); 660 } 661 662 static int 663 write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 664 { 665 struct pxa25x_udc *dev = ep->dev; 666 unsigned count; 667 int is_short; 668 669 count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE); 670 ep->dev->stats.write.bytes += count; 671 672 /* last packet "must be" short (or a zlp) */ 673 is_short = (count != EP0_FIFO_SIZE); 674 675 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count, 676 req->req.length - req->req.actual, req); 677 678 if (unlikely (is_short)) { 679 if (ep->dev->req_pending) 680 ep0start(ep->dev, UDCCS0_IPR, "short IN"); 681 else 682 udc_ep0_set_UDCCS(dev, UDCCS0_IPR); 683 684 count = req->req.length; 685 done (ep, req, 0); 686 ep0_idle(ep->dev); 687 #ifndef CONFIG_ARCH_IXP4XX 688 #if 1 689 /* This seems to get rid of lost status irqs in some cases: 690 * host responds quickly, or next request involves config 691 * change automagic, or should have been hidden, or ... 692 * 693 * FIXME get rid of all udelays possible... 694 */ 695 if (count >= EP0_FIFO_SIZE) { 696 count = 100; 697 do { 698 if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) { 699 /* clear OPR, generate ack */ 700 udc_ep0_set_UDCCS(dev, UDCCS0_OPR); 701 break; 702 } 703 count--; 704 udelay(1); 705 } while (count); 706 } 707 #endif 708 #endif 709 } else if (ep->dev->req_pending) 710 ep0start(ep->dev, 0, "IN"); 711 return is_short; 712 } 713 714 715 /* 716 * read_fifo - unload packet(s) from the fifo we use for usb OUT 717 * transfers and put them into the request. caller should have made 718 * sure there's at least one packet ready. 719 * 720 * returns true if the request completed because of short packet or the 721 * request buffer having filled (and maybe overran till end-of-packet). 722 */ 723 static int 724 read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 725 { 726 for (;;) { 727 u32 udccs; 728 u8 *buf; 729 unsigned bufferspace, count, is_short; 730 731 /* make sure there's a packet in the FIFO. 732 * UDCCS_{BO,IO}_RPC are all the same bit value. 733 * UDCCS_{BO,IO}_RNE are all the same bit value. 734 */ 735 udccs = udc_ep_get_UDCCS(ep); 736 if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) 737 break; 738 buf = req->req.buf + req->req.actual; 739 prefetchw(buf); 740 bufferspace = req->req.length - req->req.actual; 741 742 /* read all bytes from this packet */ 743 if (likely (udccs & UDCCS_BO_RNE)) { 744 count = 1 + (0x0ff & udc_ep_get_UBCR(ep)); 745 req->req.actual += min (count, bufferspace); 746 } else /* zlp */ 747 count = 0; 748 is_short = (count < ep->ep.maxpacket); 749 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n", 750 ep->ep.name, udccs, count, 751 is_short ? "/S" : "", 752 req, req->req.actual, req->req.length); 753 while (likely (count-- != 0)) { 754 u8 byte = (u8) udc_ep_get_UDDR(ep); 755 756 if (unlikely (bufferspace == 0)) { 757 /* this happens when the driver's buffer 758 * is smaller than what the host sent. 759 * discard the extra data. 760 */ 761 if (req->req.status != -EOVERFLOW) 762 DMSG("%s overflow %d\n", 763 ep->ep.name, count); 764 req->req.status = -EOVERFLOW; 765 } else { 766 *buf++ = byte; 767 bufferspace--; 768 } 769 } 770 udc_ep_set_UDCCS(ep, UDCCS_BO_RPC); 771 /* RPC/RSP/RNE could now reflect the other packet buffer */ 772 773 /* iso is one request per packet */ 774 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 775 if (udccs & UDCCS_IO_ROF) 776 req->req.status = -EHOSTUNREACH; 777 /* more like "is_done" */ 778 is_short = 1; 779 } 780 781 /* completion */ 782 if (is_short || req->req.actual == req->req.length) { 783 done (ep, req, 0); 784 if (list_empty(&ep->queue)) 785 pio_irq_disable(ep); 786 return 1; 787 } 788 789 /* finished that packet. the next one may be waiting... */ 790 } 791 return 0; 792 } 793 794 /* 795 * special ep0 version of the above. no UBCR0 or double buffering; status 796 * handshaking is magic. most device protocols don't need control-OUT. 797 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other 798 * protocols do use them. 799 */ 800 static int 801 read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) 802 { 803 u8 *buf, byte; 804 unsigned bufferspace; 805 806 buf = req->req.buf + req->req.actual; 807 bufferspace = req->req.length - req->req.actual; 808 809 while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) { 810 byte = (u8) UDDR0; 811 812 if (unlikely (bufferspace == 0)) { 813 /* this happens when the driver's buffer 814 * is smaller than what the host sent. 815 * discard the extra data. 816 */ 817 if (req->req.status != -EOVERFLOW) 818 DMSG("%s overflow\n", ep->ep.name); 819 req->req.status = -EOVERFLOW; 820 } else { 821 *buf++ = byte; 822 req->req.actual++; 823 bufferspace--; 824 } 825 } 826 827 udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR); 828 829 /* completion */ 830 if (req->req.actual >= req->req.length) 831 return 1; 832 833 /* finished that packet. the next one may be waiting... */ 834 return 0; 835 } 836 837 /*-------------------------------------------------------------------------*/ 838 839 static int 840 pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 841 { 842 struct pxa25x_request *req; 843 struct pxa25x_ep *ep; 844 struct pxa25x_udc *dev; 845 unsigned long flags; 846 847 req = container_of(_req, struct pxa25x_request, req); 848 if (unlikely (!_req || !_req->complete || !_req->buf 849 || !list_empty(&req->queue))) { 850 DMSG("%s, bad params\n", __func__); 851 return -EINVAL; 852 } 853 854 ep = container_of(_ep, struct pxa25x_ep, ep); 855 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { 856 DMSG("%s, bad ep\n", __func__); 857 return -EINVAL; 858 } 859 860 dev = ep->dev; 861 if (unlikely (!dev->driver 862 || dev->gadget.speed == USB_SPEED_UNKNOWN)) { 863 DMSG("%s, bogus device state\n", __func__); 864 return -ESHUTDOWN; 865 } 866 867 /* iso is always one packet per request, that's the only way 868 * we can report per-packet status. that also helps with dma. 869 */ 870 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC 871 && req->req.length > usb_endpoint_maxp(ep->ep.desc))) 872 return -EMSGSIZE; 873 874 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n", 875 _ep->name, _req, _req->length, _req->buf); 876 877 local_irq_save(flags); 878 879 _req->status = -EINPROGRESS; 880 _req->actual = 0; 881 882 /* kickstart this i/o queue? */ 883 if (list_empty(&ep->queue) && !ep->stopped) { 884 if (ep->ep.desc == NULL/* ep0 */) { 885 unsigned length = _req->length; 886 887 switch (dev->ep0state) { 888 case EP0_IN_DATA_PHASE: 889 dev->stats.write.ops++; 890 if (write_ep0_fifo(ep, req)) 891 req = NULL; 892 break; 893 894 case EP0_OUT_DATA_PHASE: 895 dev->stats.read.ops++; 896 /* messy ... */ 897 if (dev->req_config) { 898 DBG(DBG_VERBOSE, "ep0 config ack%s\n", 899 dev->has_cfr ? "" : " raced"); 900 if (dev->has_cfr) 901 udc_set_reg(dev, UDCCFR, UDCCFR_AREN | 902 UDCCFR_ACM | UDCCFR_MB1); 903 done(ep, req, 0); 904 dev->ep0state = EP0_END_XFER; 905 local_irq_restore (flags); 906 return 0; 907 } 908 if (dev->req_pending) 909 ep0start(dev, UDCCS0_IPR, "OUT"); 910 if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0 911 && read_ep0_fifo(ep, req))) { 912 ep0_idle(dev); 913 done(ep, req, 0); 914 req = NULL; 915 } 916 break; 917 918 default: 919 DMSG("ep0 i/o, odd state %d\n", dev->ep0state); 920 local_irq_restore (flags); 921 return -EL2HLT; 922 } 923 /* can the FIFO can satisfy the request immediately? */ 924 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { 925 if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0 926 && write_fifo(ep, req)) 927 req = NULL; 928 } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0 929 && read_fifo(ep, req)) { 930 req = NULL; 931 } 932 933 if (likely(req && ep->ep.desc)) 934 pio_irq_enable(ep); 935 } 936 937 /* pio or dma irq handler advances the queue. */ 938 if (likely(req != NULL)) 939 list_add_tail(&req->queue, &ep->queue); 940 local_irq_restore(flags); 941 942 return 0; 943 } 944 945 946 /* 947 * nuke - dequeue ALL requests 948 */ 949 static void nuke(struct pxa25x_ep *ep, int status) 950 { 951 struct pxa25x_request *req; 952 953 /* called with irqs blocked */ 954 while (!list_empty(&ep->queue)) { 955 req = list_entry(ep->queue.next, 956 struct pxa25x_request, 957 queue); 958 done(ep, req, status); 959 } 960 if (ep->ep.desc) 961 pio_irq_disable(ep); 962 } 963 964 965 /* dequeue JUST ONE request */ 966 static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 967 { 968 struct pxa25x_ep *ep; 969 struct pxa25x_request *req; 970 unsigned long flags; 971 972 ep = container_of(_ep, struct pxa25x_ep, ep); 973 if (!_ep || ep->ep.name == ep0name) 974 return -EINVAL; 975 976 local_irq_save(flags); 977 978 /* make sure it's actually queued on this endpoint */ 979 list_for_each_entry (req, &ep->queue, queue) { 980 if (&req->req == _req) 981 break; 982 } 983 if (&req->req != _req) { 984 local_irq_restore(flags); 985 return -EINVAL; 986 } 987 988 done(ep, req, -ECONNRESET); 989 990 local_irq_restore(flags); 991 return 0; 992 } 993 994 /*-------------------------------------------------------------------------*/ 995 996 static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value) 997 { 998 struct pxa25x_ep *ep; 999 unsigned long flags; 1000 1001 ep = container_of(_ep, struct pxa25x_ep, ep); 1002 if (unlikely (!_ep 1003 || (!ep->ep.desc && ep->ep.name != ep0name)) 1004 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 1005 DMSG("%s, bad ep\n", __func__); 1006 return -EINVAL; 1007 } 1008 if (value == 0) { 1009 /* this path (reset toggle+halt) is needed to implement 1010 * SET_INTERFACE on normal hardware. but it can't be 1011 * done from software on the PXA UDC, and the hardware 1012 * forgets to do it as part of SET_INTERFACE automagic. 1013 */ 1014 DMSG("only host can clear %s halt\n", _ep->name); 1015 return -EROFS; 1016 } 1017 1018 local_irq_save(flags); 1019 1020 if ((ep->bEndpointAddress & USB_DIR_IN) != 0 1021 && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0 1022 || !list_empty(&ep->queue))) { 1023 local_irq_restore(flags); 1024 return -EAGAIN; 1025 } 1026 1027 /* FST bit is the same for control, bulk in, bulk out, interrupt in */ 1028 udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF); 1029 1030 /* ep0 needs special care */ 1031 if (!ep->ep.desc) { 1032 start_watchdog(ep->dev); 1033 ep->dev->req_pending = 0; 1034 ep->dev->ep0state = EP0_STALL; 1035 1036 /* and bulk/intr endpoints like dropping stalls too */ 1037 } else { 1038 unsigned i; 1039 for (i = 0; i < 1000; i += 20) { 1040 if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST) 1041 break; 1042 udelay(20); 1043 } 1044 } 1045 local_irq_restore(flags); 1046 1047 DBG(DBG_VERBOSE, "%s halt\n", _ep->name); 1048 return 0; 1049 } 1050 1051 static int pxa25x_ep_fifo_status(struct usb_ep *_ep) 1052 { 1053 struct pxa25x_ep *ep; 1054 1055 ep = container_of(_ep, struct pxa25x_ep, ep); 1056 if (!_ep) { 1057 DMSG("%s, bad ep\n", __func__); 1058 return -ENODEV; 1059 } 1060 /* pxa can't report unclaimed bytes from IN fifos */ 1061 if ((ep->bEndpointAddress & USB_DIR_IN) != 0) 1062 return -EOPNOTSUPP; 1063 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN 1064 || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0) 1065 return 0; 1066 else 1067 return (udc_ep_get_UBCR(ep) & 0xfff) + 1; 1068 } 1069 1070 static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) 1071 { 1072 struct pxa25x_ep *ep; 1073 1074 ep = container_of(_ep, struct pxa25x_ep, ep); 1075 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) { 1076 DMSG("%s, bad ep\n", __func__); 1077 return; 1078 } 1079 1080 /* toggle and halt bits stay unchanged */ 1081 1082 /* for OUT, just read and discard the FIFO contents. */ 1083 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { 1084 while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0) 1085 (void)udc_ep_get_UDDR(ep); 1086 return; 1087 } 1088 1089 /* most IN status is the same, but ISO can't stall */ 1090 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR 1091 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC 1092 ? 0 : UDCCS_BI_SST)); 1093 } 1094 1095 1096 static struct usb_ep_ops pxa25x_ep_ops = { 1097 .enable = pxa25x_ep_enable, 1098 .disable = pxa25x_ep_disable, 1099 1100 .alloc_request = pxa25x_ep_alloc_request, 1101 .free_request = pxa25x_ep_free_request, 1102 1103 .queue = pxa25x_ep_queue, 1104 .dequeue = pxa25x_ep_dequeue, 1105 1106 .set_halt = pxa25x_ep_set_halt, 1107 .fifo_status = pxa25x_ep_fifo_status, 1108 .fifo_flush = pxa25x_ep_fifo_flush, 1109 }; 1110 1111 1112 /* --------------------------------------------------------------------------- 1113 * device-scoped parts of the api to the usb controller hardware 1114 * --------------------------------------------------------------------------- 1115 */ 1116 1117 static int pxa25x_udc_get_frame(struct usb_gadget *_gadget) 1118 { 1119 struct pxa25x_udc *dev; 1120 1121 dev = container_of(_gadget, struct pxa25x_udc, gadget); 1122 return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) | 1123 (udc_get_reg(dev, UFNRL) & 0xff); 1124 } 1125 1126 static int pxa25x_udc_wakeup(struct usb_gadget *_gadget) 1127 { 1128 struct pxa25x_udc *udc; 1129 1130 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1131 1132 /* host may not have enabled remote wakeup */ 1133 if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0) 1134 return -EHOSTUNREACH; 1135 udc_set_mask_UDCCR(udc, UDCCR_RSM); 1136 return 0; 1137 } 1138 1139 static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *); 1140 static void udc_enable (struct pxa25x_udc *); 1141 static void udc_disable(struct pxa25x_udc *); 1142 1143 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not 1144 * in active use. 1145 */ 1146 static int pullup(struct pxa25x_udc *udc) 1147 { 1148 int is_active = udc->vbus && udc->pullup && !udc->suspended; 1149 DMSG("%s\n", is_active ? "active" : "inactive"); 1150 if (is_active) { 1151 if (!udc->active) { 1152 udc->active = 1; 1153 /* Enable clock for USB device */ 1154 clk_enable(udc->clk); 1155 udc_enable(udc); 1156 } 1157 } else { 1158 if (udc->active) { 1159 if (udc->gadget.speed != USB_SPEED_UNKNOWN) { 1160 DMSG("disconnect %s\n", udc->driver 1161 ? udc->driver->driver.name 1162 : "(no driver)"); 1163 stop_activity(udc, udc->driver); 1164 } 1165 udc_disable(udc); 1166 /* Disable clock for USB device */ 1167 clk_disable(udc->clk); 1168 udc->active = 0; 1169 } 1170 1171 } 1172 return 0; 1173 } 1174 1175 /* VBUS reporting logically comes from a transceiver */ 1176 static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active) 1177 { 1178 struct pxa25x_udc *udc; 1179 1180 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1181 udc->vbus = is_active; 1182 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 1183 pullup(udc); 1184 return 0; 1185 } 1186 1187 /* drivers may have software control over D+ pullup */ 1188 static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active) 1189 { 1190 struct pxa25x_udc *udc; 1191 1192 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1193 1194 /* not all boards support pullup control */ 1195 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) 1196 return -EOPNOTSUPP; 1197 1198 udc->pullup = (is_active != 0); 1199 pullup(udc); 1200 return 0; 1201 } 1202 1203 /* boards may consume current from VBUS, up to 100-500mA based on config. 1204 * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs 1205 * violate USB specs. 1206 */ 1207 static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) 1208 { 1209 struct pxa25x_udc *udc; 1210 1211 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1212 1213 if (!IS_ERR_OR_NULL(udc->transceiver)) 1214 return usb_phy_set_power(udc->transceiver, mA); 1215 return -EOPNOTSUPP; 1216 } 1217 1218 static int pxa25x_udc_start(struct usb_gadget *g, 1219 struct usb_gadget_driver *driver); 1220 static int pxa25x_udc_stop(struct usb_gadget *g); 1221 1222 static const struct usb_gadget_ops pxa25x_udc_ops = { 1223 .get_frame = pxa25x_udc_get_frame, 1224 .wakeup = pxa25x_udc_wakeup, 1225 .vbus_session = pxa25x_udc_vbus_session, 1226 .pullup = pxa25x_udc_pullup, 1227 .vbus_draw = pxa25x_udc_vbus_draw, 1228 .udc_start = pxa25x_udc_start, 1229 .udc_stop = pxa25x_udc_stop, 1230 }; 1231 1232 /*-------------------------------------------------------------------------*/ 1233 1234 #ifdef CONFIG_USB_GADGET_DEBUG_FS 1235 1236 static int 1237 udc_seq_show(struct seq_file *m, void *_d) 1238 { 1239 struct pxa25x_udc *dev = m->private; 1240 unsigned long flags; 1241 int i; 1242 u32 tmp; 1243 1244 local_irq_save(flags); 1245 1246 /* basic device status */ 1247 seq_printf(m, DRIVER_DESC "\n" 1248 "%s version: %s\nGadget driver: %s\nHost %s\n\n", 1249 driver_name, DRIVER_VERSION SIZE_STR "(pio)", 1250 dev->driver ? dev->driver->driver.name : "(none)", 1251 dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected"); 1252 1253 /* registers for device and ep0 */ 1254 seq_printf(m, 1255 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n", 1256 udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0), 1257 udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0), 1258 udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL)); 1259 1260 tmp = udc_get_reg(dev, UDCCR); 1261 seq_printf(m, 1262 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp, 1263 (tmp & UDCCR_REM) ? " rem" : "", 1264 (tmp & UDCCR_RSTIR) ? " rstir" : "", 1265 (tmp & UDCCR_SRM) ? " srm" : "", 1266 (tmp & UDCCR_SUSIR) ? " susir" : "", 1267 (tmp & UDCCR_RESIR) ? " resir" : "", 1268 (tmp & UDCCR_RSM) ? " rsm" : "", 1269 (tmp & UDCCR_UDA) ? " uda" : "", 1270 (tmp & UDCCR_UDE) ? " ude" : ""); 1271 1272 tmp = udc_ep0_get_UDCCS(dev); 1273 seq_printf(m, 1274 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp, 1275 (tmp & UDCCS0_SA) ? " sa" : "", 1276 (tmp & UDCCS0_RNE) ? " rne" : "", 1277 (tmp & UDCCS0_FST) ? " fst" : "", 1278 (tmp & UDCCS0_SST) ? " sst" : "", 1279 (tmp & UDCCS0_DRWF) ? " dwrf" : "", 1280 (tmp & UDCCS0_FTF) ? " ftf" : "", 1281 (tmp & UDCCS0_IPR) ? " ipr" : "", 1282 (tmp & UDCCS0_OPR) ? " opr" : ""); 1283 1284 if (dev->has_cfr) { 1285 tmp = udc_get_reg(dev, UDCCFR); 1286 seq_printf(m, 1287 "udccfr %02X =%s%s\n", tmp, 1288 (tmp & UDCCFR_AREN) ? " aren" : "", 1289 (tmp & UDCCFR_ACM) ? " acm" : ""); 1290 } 1291 1292 if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) 1293 goto done; 1294 1295 seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", 1296 dev->stats.write.bytes, dev->stats.write.ops, 1297 dev->stats.read.bytes, dev->stats.read.ops, 1298 dev->stats.irqs); 1299 1300 /* dump endpoint queues */ 1301 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1302 struct pxa25x_ep *ep = &dev->ep [i]; 1303 struct pxa25x_request *req; 1304 1305 if (i != 0) { 1306 const struct usb_endpoint_descriptor *desc; 1307 1308 desc = ep->ep.desc; 1309 if (!desc) 1310 continue; 1311 tmp = udc_ep_get_UDCCS(&dev->ep[i]); 1312 seq_printf(m, 1313 "%s max %d %s udccs %02x irqs %lu\n", 1314 ep->ep.name, usb_endpoint_maxp(desc), 1315 "pio", tmp, ep->pio_irqs); 1316 /* TODO translate all five groups of udccs bits! */ 1317 1318 } else /* ep0 should only have one transfer queued */ 1319 seq_printf(m, "ep0 max 16 pio irqs %lu\n", 1320 ep->pio_irqs); 1321 1322 if (list_empty(&ep->queue)) { 1323 seq_printf(m, "\t(nothing queued)\n"); 1324 continue; 1325 } 1326 list_for_each_entry(req, &ep->queue, queue) { 1327 seq_printf(m, 1328 "\treq %p len %d/%d buf %p\n", 1329 &req->req, req->req.actual, 1330 req->req.length, req->req.buf); 1331 } 1332 } 1333 1334 done: 1335 local_irq_restore(flags); 1336 return 0; 1337 } 1338 1339 static int 1340 udc_debugfs_open(struct inode *inode, struct file *file) 1341 { 1342 return single_open(file, udc_seq_show, inode->i_private); 1343 } 1344 1345 static const struct file_operations debug_fops = { 1346 .open = udc_debugfs_open, 1347 .read = seq_read, 1348 .llseek = seq_lseek, 1349 .release = single_release, 1350 .owner = THIS_MODULE, 1351 }; 1352 1353 #define create_debug_files(dev) \ 1354 do { \ 1355 dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \ 1356 S_IRUGO, NULL, dev, &debug_fops); \ 1357 } while (0) 1358 #define remove_debug_files(dev) debugfs_remove(dev->debugfs_udc) 1359 1360 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */ 1361 1362 #define create_debug_files(dev) do {} while (0) 1363 #define remove_debug_files(dev) do {} while (0) 1364 1365 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1366 1367 /*-------------------------------------------------------------------------*/ 1368 1369 /* 1370 * udc_disable - disable USB device controller 1371 */ 1372 static void udc_disable(struct pxa25x_udc *dev) 1373 { 1374 /* block all irqs */ 1375 udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM); 1376 udc_set_reg(dev, UICR0, 0xff); 1377 udc_set_reg(dev, UICR1, 0xff); 1378 udc_set_reg(dev, UFNRH, UFNRH_SIM); 1379 1380 /* if hardware supports it, disconnect from usb */ 1381 pullup_off(); 1382 1383 udc_clear_mask_UDCCR(dev, UDCCR_UDE); 1384 1385 ep0_idle (dev); 1386 dev->gadget.speed = USB_SPEED_UNKNOWN; 1387 } 1388 1389 1390 /* 1391 * udc_reinit - initialize software state 1392 */ 1393 static void udc_reinit(struct pxa25x_udc *dev) 1394 { 1395 u32 i; 1396 1397 /* device/ep0 records init */ 1398 INIT_LIST_HEAD (&dev->gadget.ep_list); 1399 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list); 1400 dev->ep0state = EP0_IDLE; 1401 dev->gadget.quirk_altset_not_supp = 1; 1402 1403 /* basic endpoint records init */ 1404 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1405 struct pxa25x_ep *ep = &dev->ep[i]; 1406 1407 if (i != 0) 1408 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); 1409 1410 ep->ep.desc = NULL; 1411 ep->stopped = 0; 1412 INIT_LIST_HEAD (&ep->queue); 1413 ep->pio_irqs = 0; 1414 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); 1415 } 1416 1417 /* the rest was statically initialized, and is read-only */ 1418 } 1419 1420 /* until it's enabled, this UDC should be completely invisible 1421 * to any USB host. 1422 */ 1423 static void udc_enable (struct pxa25x_udc *dev) 1424 { 1425 udc_clear_mask_UDCCR(dev, UDCCR_UDE); 1426 1427 /* try to clear these bits before we enable the udc */ 1428 udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); 1429 1430 ep0_idle(dev); 1431 dev->gadget.speed = USB_SPEED_UNKNOWN; 1432 dev->stats.irqs = 0; 1433 1434 /* 1435 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual: 1436 * - enable UDC 1437 * - if RESET is already in progress, ack interrupt 1438 * - unmask reset interrupt 1439 */ 1440 udc_set_mask_UDCCR(dev, UDCCR_UDE); 1441 if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA)) 1442 udc_ack_int_UDCCR(dev, UDCCR_RSTIR); 1443 1444 if (dev->has_cfr /* UDC_RES2 is defined */) { 1445 /* pxa255 (a0+) can avoid a set_config race that could 1446 * prevent gadget drivers from configuring correctly 1447 */ 1448 udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1); 1449 } else { 1450 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) 1451 * which could result in missing packets and interrupts. 1452 * supposedly one bit per endpoint, controlling whether it 1453 * double buffers or not; ACM/AREN bits fit into the holes. 1454 * zero bits (like USIR0_IRx) disable double buffering. 1455 */ 1456 udc_set_reg(dev, UDC_RES1, 0x00); 1457 udc_set_reg(dev, UDC_RES2, 0x00); 1458 } 1459 1460 /* enable suspend/resume and reset irqs */ 1461 udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM); 1462 1463 /* enable ep0 irqs */ 1464 udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0); 1465 1466 /* if hardware supports it, pullup D+ and wait for reset */ 1467 pullup_on(); 1468 } 1469 1470 1471 /* when a driver is successfully registered, it will receive 1472 * control requests including set_configuration(), which enables 1473 * non-control requests. then usb traffic follows until a 1474 * disconnect is reported. then a host may connect again, or 1475 * the driver might get unbound. 1476 */ 1477 static int pxa25x_udc_start(struct usb_gadget *g, 1478 struct usb_gadget_driver *driver) 1479 { 1480 struct pxa25x_udc *dev = to_pxa25x(g); 1481 int retval; 1482 1483 /* first hook up the driver ... */ 1484 dev->driver = driver; 1485 dev->pullup = 1; 1486 1487 /* ... then enable host detection and ep0; and we're ready 1488 * for set_configuration as well as eventual disconnect. 1489 */ 1490 /* connect to bus through transceiver */ 1491 if (!IS_ERR_OR_NULL(dev->transceiver)) { 1492 retval = otg_set_peripheral(dev->transceiver->otg, 1493 &dev->gadget); 1494 if (retval) 1495 goto bind_fail; 1496 } 1497 1498 dump_state(dev); 1499 return 0; 1500 bind_fail: 1501 return retval; 1502 } 1503 1504 static void 1505 reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) 1506 { 1507 int i; 1508 1509 /* don't disconnect drivers more than once */ 1510 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1511 driver = NULL; 1512 dev->gadget.speed = USB_SPEED_UNKNOWN; 1513 1514 /* prevent new request submissions, kill any outstanding requests */ 1515 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1516 struct pxa25x_ep *ep = &dev->ep[i]; 1517 1518 ep->stopped = 1; 1519 nuke(ep, -ESHUTDOWN); 1520 } 1521 del_timer_sync(&dev->timer); 1522 1523 /* report reset; the driver is already quiesced */ 1524 if (driver) 1525 usb_gadget_udc_reset(&dev->gadget, driver); 1526 1527 /* re-init driver-visible data structures */ 1528 udc_reinit(dev); 1529 } 1530 1531 static void 1532 stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) 1533 { 1534 int i; 1535 1536 /* don't disconnect drivers more than once */ 1537 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1538 driver = NULL; 1539 dev->gadget.speed = USB_SPEED_UNKNOWN; 1540 1541 /* prevent new request submissions, kill any outstanding requests */ 1542 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1543 struct pxa25x_ep *ep = &dev->ep[i]; 1544 1545 ep->stopped = 1; 1546 nuke(ep, -ESHUTDOWN); 1547 } 1548 del_timer_sync(&dev->timer); 1549 1550 /* report disconnect; the driver is already quiesced */ 1551 if (driver) 1552 driver->disconnect(&dev->gadget); 1553 1554 /* re-init driver-visible data structures */ 1555 udc_reinit(dev); 1556 } 1557 1558 static int pxa25x_udc_stop(struct usb_gadget*g) 1559 { 1560 struct pxa25x_udc *dev = to_pxa25x(g); 1561 1562 local_irq_disable(); 1563 dev->pullup = 0; 1564 stop_activity(dev, NULL); 1565 local_irq_enable(); 1566 1567 if (!IS_ERR_OR_NULL(dev->transceiver)) 1568 (void) otg_set_peripheral(dev->transceiver->otg, NULL); 1569 1570 dev->driver = NULL; 1571 1572 dump_state(dev); 1573 1574 return 0; 1575 } 1576 1577 /*-------------------------------------------------------------------------*/ 1578 1579 #ifdef CONFIG_ARCH_LUBBOCK 1580 1581 /* Lubbock has separate connect and disconnect irqs. More typical designs 1582 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup. 1583 */ 1584 1585 static irqreturn_t 1586 lubbock_vbus_irq(int irq, void *_dev) 1587 { 1588 struct pxa25x_udc *dev = _dev; 1589 int vbus; 1590 1591 dev->stats.irqs++; 1592 switch (irq) { 1593 case LUBBOCK_USB_IRQ: 1594 vbus = 1; 1595 disable_irq(LUBBOCK_USB_IRQ); 1596 enable_irq(LUBBOCK_USB_DISC_IRQ); 1597 break; 1598 case LUBBOCK_USB_DISC_IRQ: 1599 vbus = 0; 1600 disable_irq(LUBBOCK_USB_DISC_IRQ); 1601 enable_irq(LUBBOCK_USB_IRQ); 1602 break; 1603 default: 1604 return IRQ_NONE; 1605 } 1606 1607 pxa25x_udc_vbus_session(&dev->gadget, vbus); 1608 return IRQ_HANDLED; 1609 } 1610 1611 #endif 1612 1613 1614 /*-------------------------------------------------------------------------*/ 1615 1616 static inline void clear_ep_state (struct pxa25x_udc *dev) 1617 { 1618 unsigned i; 1619 1620 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint 1621 * fifos, and pending transactions mustn't be continued in any case. 1622 */ 1623 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) 1624 nuke(&dev->ep[i], -ECONNABORTED); 1625 } 1626 1627 static void udc_watchdog(struct timer_list *t) 1628 { 1629 struct pxa25x_udc *dev = from_timer(dev, t, timer); 1630 1631 local_irq_disable(); 1632 if (dev->ep0state == EP0_STALL 1633 && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0 1634 && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) { 1635 udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF); 1636 DBG(DBG_VERBOSE, "ep0 re-stall\n"); 1637 start_watchdog(dev); 1638 } 1639 local_irq_enable(); 1640 } 1641 1642 static void handle_ep0 (struct pxa25x_udc *dev) 1643 { 1644 u32 udccs0 = udc_ep0_get_UDCCS(dev); 1645 struct pxa25x_ep *ep = &dev->ep [0]; 1646 struct pxa25x_request *req; 1647 union { 1648 struct usb_ctrlrequest r; 1649 u8 raw [8]; 1650 u32 word [2]; 1651 } u; 1652 1653 if (list_empty(&ep->queue)) 1654 req = NULL; 1655 else 1656 req = list_entry(ep->queue.next, struct pxa25x_request, queue); 1657 1658 /* clear stall status */ 1659 if (udccs0 & UDCCS0_SST) { 1660 nuke(ep, -EPIPE); 1661 udc_ep0_set_UDCCS(dev, UDCCS0_SST); 1662 del_timer(&dev->timer); 1663 ep0_idle(dev); 1664 } 1665 1666 /* previous request unfinished? non-error iff back-to-back ... */ 1667 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) { 1668 nuke(ep, 0); 1669 del_timer(&dev->timer); 1670 ep0_idle(dev); 1671 } 1672 1673 switch (dev->ep0state) { 1674 case EP0_IDLE: 1675 /* late-breaking status? */ 1676 udccs0 = udc_ep0_get_UDCCS(dev); 1677 1678 /* start control request? */ 1679 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE)) 1680 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) { 1681 int i; 1682 1683 nuke (ep, -EPROTO); 1684 1685 /* read SETUP packet */ 1686 for (i = 0; i < 8; i++) { 1687 if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) { 1688 bad_setup: 1689 DMSG("SETUP %d!\n", i); 1690 goto stall; 1691 } 1692 u.raw [i] = (u8) UDDR0; 1693 } 1694 if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0)) 1695 goto bad_setup; 1696 1697 got_setup: 1698 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1699 u.r.bRequestType, u.r.bRequest, 1700 le16_to_cpu(u.r.wValue), 1701 le16_to_cpu(u.r.wIndex), 1702 le16_to_cpu(u.r.wLength)); 1703 1704 /* cope with automagic for some standard requests. */ 1705 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK) 1706 == USB_TYPE_STANDARD; 1707 dev->req_config = 0; 1708 dev->req_pending = 1; 1709 switch (u.r.bRequest) { 1710 /* hardware restricts gadget drivers here! */ 1711 case USB_REQ_SET_CONFIGURATION: 1712 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1713 /* reflect hardware's automagic 1714 * up to the gadget driver. 1715 */ 1716 config_change: 1717 dev->req_config = 1; 1718 clear_ep_state(dev); 1719 /* if !has_cfr, there's no synch 1720 * else use AREN (later) not SA|OPR 1721 * USIR0_IR0 acts edge sensitive 1722 */ 1723 } 1724 break; 1725 /* ... and here, even more ... */ 1726 case USB_REQ_SET_INTERFACE: 1727 if (u.r.bRequestType == USB_RECIP_INTERFACE) { 1728 /* udc hardware is broken by design: 1729 * - altsetting may only be zero; 1730 * - hw resets all interfaces' eps; 1731 * - ep reset doesn't include halt(?). 1732 */ 1733 DMSG("broken set_interface (%d/%d)\n", 1734 le16_to_cpu(u.r.wIndex), 1735 le16_to_cpu(u.r.wValue)); 1736 goto config_change; 1737 } 1738 break; 1739 /* hardware was supposed to hide this */ 1740 case USB_REQ_SET_ADDRESS: 1741 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1742 ep0start(dev, 0, "address"); 1743 return; 1744 } 1745 break; 1746 } 1747 1748 if (u.r.bRequestType & USB_DIR_IN) 1749 dev->ep0state = EP0_IN_DATA_PHASE; 1750 else 1751 dev->ep0state = EP0_OUT_DATA_PHASE; 1752 1753 i = dev->driver->setup(&dev->gadget, &u.r); 1754 if (i < 0) { 1755 /* hardware automagic preventing STALL... */ 1756 if (dev->req_config) { 1757 /* hardware sometimes neglects to tell 1758 * tell us about config change events, 1759 * so later ones may fail... 1760 */ 1761 WARNING("config change %02x fail %d?\n", 1762 u.r.bRequest, i); 1763 return; 1764 /* TODO experiment: if has_cfr, 1765 * hardware didn't ACK; maybe we 1766 * could actually STALL! 1767 */ 1768 } 1769 DBG(DBG_VERBOSE, "protocol STALL, " 1770 "%02x err %d\n", udc_ep0_get_UDCCS(dev), i); 1771 stall: 1772 /* the watchdog timer helps deal with cases 1773 * where udc seems to clear FST wrongly, and 1774 * then NAKs instead of STALLing. 1775 */ 1776 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall"); 1777 start_watchdog(dev); 1778 dev->ep0state = EP0_STALL; 1779 1780 /* deferred i/o == no response yet */ 1781 } else if (dev->req_pending) { 1782 if (likely(dev->ep0state == EP0_IN_DATA_PHASE 1783 || dev->req_std || u.r.wLength)) 1784 ep0start(dev, 0, "defer"); 1785 else 1786 ep0start(dev, UDCCS0_IPR, "defer/IPR"); 1787 } 1788 1789 /* expect at least one data or status stage irq */ 1790 return; 1791 1792 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA)) 1793 == (UDCCS0_OPR|UDCCS0_SA))) { 1794 unsigned i; 1795 1796 /* pxa210/250 erratum 131 for B0/B1 says RNE lies. 1797 * still observed on a pxa255 a0. 1798 */ 1799 DBG(DBG_VERBOSE, "e131\n"); 1800 nuke(ep, -EPROTO); 1801 1802 /* read SETUP data, but don't trust it too much */ 1803 for (i = 0; i < 8; i++) 1804 u.raw [i] = (u8) UDDR0; 1805 if ((u.r.bRequestType & USB_RECIP_MASK) 1806 > USB_RECIP_OTHER) 1807 goto stall; 1808 if (u.word [0] == 0 && u.word [1] == 0) 1809 goto stall; 1810 goto got_setup; 1811 } else { 1812 /* some random early IRQ: 1813 * - we acked FST 1814 * - IPR cleared 1815 * - OPR got set, without SA (likely status stage) 1816 */ 1817 udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR)); 1818 } 1819 break; 1820 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ 1821 if (udccs0 & UDCCS0_OPR) { 1822 udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF); 1823 DBG(DBG_VERBOSE, "ep0in premature status\n"); 1824 if (req) 1825 done(ep, req, 0); 1826 ep0_idle(dev); 1827 } else /* irq was IPR clearing */ { 1828 if (req) { 1829 /* this IN packet might finish the request */ 1830 (void) write_ep0_fifo(ep, req); 1831 } /* else IN token before response was written */ 1832 } 1833 break; 1834 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */ 1835 if (udccs0 & UDCCS0_OPR) { 1836 if (req) { 1837 /* this OUT packet might finish the request */ 1838 if (read_ep0_fifo(ep, req)) 1839 done(ep, req, 0); 1840 /* else more OUT packets expected */ 1841 } /* else OUT token before read was issued */ 1842 } else /* irq was IPR clearing */ { 1843 DBG(DBG_VERBOSE, "ep0out premature status\n"); 1844 if (req) 1845 done(ep, req, 0); 1846 ep0_idle(dev); 1847 } 1848 break; 1849 case EP0_END_XFER: 1850 if (req) 1851 done(ep, req, 0); 1852 /* ack control-IN status (maybe in-zlp was skipped) 1853 * also appears after some config change events. 1854 */ 1855 if (udccs0 & UDCCS0_OPR) 1856 udc_ep0_set_UDCCS(dev, UDCCS0_OPR); 1857 ep0_idle(dev); 1858 break; 1859 case EP0_STALL: 1860 udc_ep0_set_UDCCS(dev, UDCCS0_FST); 1861 break; 1862 } 1863 udc_set_reg(dev, USIR0, USIR0_IR0); 1864 } 1865 1866 static void handle_ep(struct pxa25x_ep *ep) 1867 { 1868 struct pxa25x_request *req; 1869 int is_in = ep->bEndpointAddress & USB_DIR_IN; 1870 int completed; 1871 u32 udccs, tmp; 1872 1873 do { 1874 completed = 0; 1875 if (likely (!list_empty(&ep->queue))) 1876 req = list_entry(ep->queue.next, 1877 struct pxa25x_request, queue); 1878 else 1879 req = NULL; 1880 1881 // TODO check FST handling 1882 1883 udccs = udc_ep_get_UDCCS(ep); 1884 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */ 1885 tmp = UDCCS_BI_TUR; 1886 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) 1887 tmp |= UDCCS_BI_SST; 1888 tmp &= udccs; 1889 if (likely (tmp)) 1890 udc_ep_set_UDCCS(ep, tmp); 1891 if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) 1892 completed = write_fifo(ep, req); 1893 1894 } else { /* irq from RPC (or for ISO, ROF) */ 1895 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) 1896 tmp = UDCCS_BO_SST | UDCCS_BO_DME; 1897 else 1898 tmp = UDCCS_IO_ROF | UDCCS_IO_DME; 1899 tmp &= udccs; 1900 if (likely(tmp)) 1901 udc_ep_set_UDCCS(ep, tmp); 1902 1903 /* fifos can hold packets, ready for reading... */ 1904 if (likely(req)) { 1905 completed = read_fifo(ep, req); 1906 } else 1907 pio_irq_disable(ep); 1908 } 1909 ep->pio_irqs++; 1910 } while (completed); 1911 } 1912 1913 /* 1914 * pxa25x_udc_irq - interrupt handler 1915 * 1916 * avoid delays in ep0 processing. the control handshaking isn't always 1917 * under software control (pxa250c0 and the pxa255 are better), and delays 1918 * could cause usb protocol errors. 1919 */ 1920 static irqreturn_t 1921 pxa25x_udc_irq(int irq, void *_dev) 1922 { 1923 struct pxa25x_udc *dev = _dev; 1924 int handled; 1925 1926 dev->stats.irqs++; 1927 do { 1928 u32 udccr = udc_get_reg(dev, UDCCR); 1929 1930 handled = 0; 1931 1932 /* SUSpend Interrupt Request */ 1933 if (unlikely(udccr & UDCCR_SUSIR)) { 1934 udc_ack_int_UDCCR(dev, UDCCR_SUSIR); 1935 handled = 1; 1936 DBG(DBG_VERBOSE, "USB suspend\n"); 1937 1938 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1939 && dev->driver 1940 && dev->driver->suspend) 1941 dev->driver->suspend(&dev->gadget); 1942 ep0_idle (dev); 1943 } 1944 1945 /* RESume Interrupt Request */ 1946 if (unlikely(udccr & UDCCR_RESIR)) { 1947 udc_ack_int_UDCCR(dev, UDCCR_RESIR); 1948 handled = 1; 1949 DBG(DBG_VERBOSE, "USB resume\n"); 1950 1951 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1952 && dev->driver 1953 && dev->driver->resume) 1954 dev->driver->resume(&dev->gadget); 1955 } 1956 1957 /* ReSeT Interrupt Request - USB reset */ 1958 if (unlikely(udccr & UDCCR_RSTIR)) { 1959 udc_ack_int_UDCCR(dev, UDCCR_RSTIR); 1960 handled = 1; 1961 1962 if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) { 1963 DBG(DBG_VERBOSE, "USB reset start\n"); 1964 1965 /* reset driver and endpoints, 1966 * in case that's not yet done 1967 */ 1968 reset_gadget(dev, dev->driver); 1969 1970 } else { 1971 DBG(DBG_VERBOSE, "USB reset end\n"); 1972 dev->gadget.speed = USB_SPEED_FULL; 1973 memset(&dev->stats, 0, sizeof dev->stats); 1974 /* driver and endpoints are still reset */ 1975 } 1976 1977 } else { 1978 u32 usir0 = udc_get_reg(dev, USIR0) & 1979 ~udc_get_reg(dev, UICR0); 1980 u32 usir1 = udc_get_reg(dev, USIR1) & 1981 ~udc_get_reg(dev, UICR1); 1982 int i; 1983 1984 if (unlikely (!usir0 && !usir1)) 1985 continue; 1986 1987 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0); 1988 1989 /* control traffic */ 1990 if (usir0 & USIR0_IR0) { 1991 dev->ep[0].pio_irqs++; 1992 handle_ep0(dev); 1993 handled = 1; 1994 } 1995 1996 /* endpoint data transfers */ 1997 for (i = 0; i < 8; i++) { 1998 u32 tmp = 1 << i; 1999 2000 if (i && (usir0 & tmp)) { 2001 handle_ep(&dev->ep[i]); 2002 udc_set_reg(dev, USIR0, 2003 udc_get_reg(dev, USIR0) | tmp); 2004 handled = 1; 2005 } 2006 #ifndef CONFIG_USB_PXA25X_SMALL 2007 if (usir1 & tmp) { 2008 handle_ep(&dev->ep[i+8]); 2009 udc_set_reg(dev, USIR1, 2010 udc_get_reg(dev, USIR1) | tmp); 2011 handled = 1; 2012 } 2013 #endif 2014 } 2015 } 2016 2017 /* we could also ask for 1 msec SOF (SIR) interrupts */ 2018 2019 } while (handled); 2020 return IRQ_HANDLED; 2021 } 2022 2023 /*-------------------------------------------------------------------------*/ 2024 2025 static void nop_release (struct device *dev) 2026 { 2027 DMSG("%s %s\n", __func__, dev_name(dev)); 2028 } 2029 2030 /* this uses load-time allocation and initialization (instead of 2031 * doing it at run-time) to save code, eliminate fault paths, and 2032 * be more obviously correct. 2033 */ 2034 static struct pxa25x_udc memory = { 2035 .gadget = { 2036 .ops = &pxa25x_udc_ops, 2037 .ep0 = &memory.ep[0].ep, 2038 .name = driver_name, 2039 .dev = { 2040 .init_name = "gadget", 2041 .release = nop_release, 2042 }, 2043 }, 2044 2045 /* control endpoint */ 2046 .ep[0] = { 2047 .ep = { 2048 .name = ep0name, 2049 .ops = &pxa25x_ep_ops, 2050 .maxpacket = EP0_FIFO_SIZE, 2051 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, 2052 USB_EP_CAPS_DIR_ALL), 2053 }, 2054 .dev = &memory, 2055 .regoff_udccs = UDCCS0, 2056 .regoff_uddr = UDDR0, 2057 }, 2058 2059 /* first group of endpoints */ 2060 .ep[1] = { 2061 .ep = { 2062 .name = "ep1in-bulk", 2063 .ops = &pxa25x_ep_ops, 2064 .maxpacket = BULK_FIFO_SIZE, 2065 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2066 USB_EP_CAPS_DIR_IN), 2067 }, 2068 .dev = &memory, 2069 .fifo_size = BULK_FIFO_SIZE, 2070 .bEndpointAddress = USB_DIR_IN | 1, 2071 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2072 .regoff_udccs = UDCCS1, 2073 .regoff_uddr = UDDR1, 2074 }, 2075 .ep[2] = { 2076 .ep = { 2077 .name = "ep2out-bulk", 2078 .ops = &pxa25x_ep_ops, 2079 .maxpacket = BULK_FIFO_SIZE, 2080 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2081 USB_EP_CAPS_DIR_OUT), 2082 }, 2083 .dev = &memory, 2084 .fifo_size = BULK_FIFO_SIZE, 2085 .bEndpointAddress = 2, 2086 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2087 .regoff_udccs = UDCCS2, 2088 .regoff_ubcr = UBCR2, 2089 .regoff_uddr = UDDR2, 2090 }, 2091 #ifndef CONFIG_USB_PXA25X_SMALL 2092 .ep[3] = { 2093 .ep = { 2094 .name = "ep3in-iso", 2095 .ops = &pxa25x_ep_ops, 2096 .maxpacket = ISO_FIFO_SIZE, 2097 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2098 USB_EP_CAPS_DIR_IN), 2099 }, 2100 .dev = &memory, 2101 .fifo_size = ISO_FIFO_SIZE, 2102 .bEndpointAddress = USB_DIR_IN | 3, 2103 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2104 .regoff_udccs = UDCCS3, 2105 .regoff_uddr = UDDR3, 2106 }, 2107 .ep[4] = { 2108 .ep = { 2109 .name = "ep4out-iso", 2110 .ops = &pxa25x_ep_ops, 2111 .maxpacket = ISO_FIFO_SIZE, 2112 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2113 USB_EP_CAPS_DIR_OUT), 2114 }, 2115 .dev = &memory, 2116 .fifo_size = ISO_FIFO_SIZE, 2117 .bEndpointAddress = 4, 2118 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2119 .regoff_udccs = UDCCS4, 2120 .regoff_ubcr = UBCR4, 2121 .regoff_uddr = UDDR4, 2122 }, 2123 .ep[5] = { 2124 .ep = { 2125 .name = "ep5in-int", 2126 .ops = &pxa25x_ep_ops, 2127 .maxpacket = INT_FIFO_SIZE, 2128 .caps = USB_EP_CAPS(0, 0), 2129 }, 2130 .dev = &memory, 2131 .fifo_size = INT_FIFO_SIZE, 2132 .bEndpointAddress = USB_DIR_IN | 5, 2133 .bmAttributes = USB_ENDPOINT_XFER_INT, 2134 .regoff_udccs = UDCCS5, 2135 .regoff_uddr = UDDR5, 2136 }, 2137 2138 /* second group of endpoints */ 2139 .ep[6] = { 2140 .ep = { 2141 .name = "ep6in-bulk", 2142 .ops = &pxa25x_ep_ops, 2143 .maxpacket = BULK_FIFO_SIZE, 2144 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2145 USB_EP_CAPS_DIR_IN), 2146 }, 2147 .dev = &memory, 2148 .fifo_size = BULK_FIFO_SIZE, 2149 .bEndpointAddress = USB_DIR_IN | 6, 2150 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2151 .regoff_udccs = UDCCS6, 2152 .regoff_uddr = UDDR6, 2153 }, 2154 .ep[7] = { 2155 .ep = { 2156 .name = "ep7out-bulk", 2157 .ops = &pxa25x_ep_ops, 2158 .maxpacket = BULK_FIFO_SIZE, 2159 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2160 USB_EP_CAPS_DIR_OUT), 2161 }, 2162 .dev = &memory, 2163 .fifo_size = BULK_FIFO_SIZE, 2164 .bEndpointAddress = 7, 2165 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2166 .regoff_udccs = UDCCS7, 2167 .regoff_ubcr = UBCR7, 2168 .regoff_uddr = UDDR7, 2169 }, 2170 .ep[8] = { 2171 .ep = { 2172 .name = "ep8in-iso", 2173 .ops = &pxa25x_ep_ops, 2174 .maxpacket = ISO_FIFO_SIZE, 2175 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2176 USB_EP_CAPS_DIR_IN), 2177 }, 2178 .dev = &memory, 2179 .fifo_size = ISO_FIFO_SIZE, 2180 .bEndpointAddress = USB_DIR_IN | 8, 2181 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2182 .regoff_udccs = UDCCS8, 2183 .regoff_uddr = UDDR8, 2184 }, 2185 .ep[9] = { 2186 .ep = { 2187 .name = "ep9out-iso", 2188 .ops = &pxa25x_ep_ops, 2189 .maxpacket = ISO_FIFO_SIZE, 2190 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2191 USB_EP_CAPS_DIR_OUT), 2192 }, 2193 .dev = &memory, 2194 .fifo_size = ISO_FIFO_SIZE, 2195 .bEndpointAddress = 9, 2196 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2197 .regoff_udccs = UDCCS9, 2198 .regoff_ubcr = UBCR9, 2199 .regoff_uddr = UDDR9, 2200 }, 2201 .ep[10] = { 2202 .ep = { 2203 .name = "ep10in-int", 2204 .ops = &pxa25x_ep_ops, 2205 .maxpacket = INT_FIFO_SIZE, 2206 .caps = USB_EP_CAPS(0, 0), 2207 }, 2208 .dev = &memory, 2209 .fifo_size = INT_FIFO_SIZE, 2210 .bEndpointAddress = USB_DIR_IN | 10, 2211 .bmAttributes = USB_ENDPOINT_XFER_INT, 2212 .regoff_udccs = UDCCS10, 2213 .regoff_uddr = UDDR10, 2214 }, 2215 2216 /* third group of endpoints */ 2217 .ep[11] = { 2218 .ep = { 2219 .name = "ep11in-bulk", 2220 .ops = &pxa25x_ep_ops, 2221 .maxpacket = BULK_FIFO_SIZE, 2222 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2223 USB_EP_CAPS_DIR_IN), 2224 }, 2225 .dev = &memory, 2226 .fifo_size = BULK_FIFO_SIZE, 2227 .bEndpointAddress = USB_DIR_IN | 11, 2228 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2229 .regoff_udccs = UDCCS11, 2230 .regoff_uddr = UDDR11, 2231 }, 2232 .ep[12] = { 2233 .ep = { 2234 .name = "ep12out-bulk", 2235 .ops = &pxa25x_ep_ops, 2236 .maxpacket = BULK_FIFO_SIZE, 2237 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, 2238 USB_EP_CAPS_DIR_OUT), 2239 }, 2240 .dev = &memory, 2241 .fifo_size = BULK_FIFO_SIZE, 2242 .bEndpointAddress = 12, 2243 .bmAttributes = USB_ENDPOINT_XFER_BULK, 2244 .regoff_udccs = UDCCS12, 2245 .regoff_ubcr = UBCR12, 2246 .regoff_uddr = UDDR12, 2247 }, 2248 .ep[13] = { 2249 .ep = { 2250 .name = "ep13in-iso", 2251 .ops = &pxa25x_ep_ops, 2252 .maxpacket = ISO_FIFO_SIZE, 2253 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2254 USB_EP_CAPS_DIR_IN), 2255 }, 2256 .dev = &memory, 2257 .fifo_size = ISO_FIFO_SIZE, 2258 .bEndpointAddress = USB_DIR_IN | 13, 2259 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2260 .regoff_udccs = UDCCS13, 2261 .regoff_uddr = UDDR13, 2262 }, 2263 .ep[14] = { 2264 .ep = { 2265 .name = "ep14out-iso", 2266 .ops = &pxa25x_ep_ops, 2267 .maxpacket = ISO_FIFO_SIZE, 2268 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, 2269 USB_EP_CAPS_DIR_OUT), 2270 }, 2271 .dev = &memory, 2272 .fifo_size = ISO_FIFO_SIZE, 2273 .bEndpointAddress = 14, 2274 .bmAttributes = USB_ENDPOINT_XFER_ISOC, 2275 .regoff_udccs = UDCCS14, 2276 .regoff_ubcr = UBCR14, 2277 .regoff_uddr = UDDR14, 2278 }, 2279 .ep[15] = { 2280 .ep = { 2281 .name = "ep15in-int", 2282 .ops = &pxa25x_ep_ops, 2283 .maxpacket = INT_FIFO_SIZE, 2284 .caps = USB_EP_CAPS(0, 0), 2285 }, 2286 .dev = &memory, 2287 .fifo_size = INT_FIFO_SIZE, 2288 .bEndpointAddress = USB_DIR_IN | 15, 2289 .bmAttributes = USB_ENDPOINT_XFER_INT, 2290 .regoff_udccs = UDCCS15, 2291 .regoff_uddr = UDDR15, 2292 }, 2293 #endif /* !CONFIG_USB_PXA25X_SMALL */ 2294 }; 2295 2296 #define CP15R0_VENDOR_MASK 0xffffe000 2297 2298 #if defined(CONFIG_ARCH_PXA) 2299 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */ 2300 2301 #elif defined(CONFIG_ARCH_IXP4XX) 2302 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */ 2303 2304 #endif 2305 2306 #define CP15R0_PROD_MASK 0x000003f0 2307 #define PXA25x 0x00000100 /* and PXA26x */ 2308 #define PXA210 0x00000120 2309 2310 #define CP15R0_REV_MASK 0x0000000f 2311 2312 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK) 2313 2314 #define PXA255_A0 0x00000106 /* or PXA260_B1 */ 2315 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */ 2316 #define PXA250_B2 0x00000104 2317 #define PXA250_B1 0x00000103 /* or PXA260_A0 */ 2318 #define PXA250_B0 0x00000102 2319 #define PXA250_A1 0x00000101 2320 #define PXA250_A0 0x00000100 2321 2322 #define PXA210_C0 0x00000125 2323 #define PXA210_B2 0x00000124 2324 #define PXA210_B1 0x00000123 2325 #define PXA210_B0 0x00000122 2326 #define IXP425_A0 0x000001c1 2327 #define IXP425_B0 0x000001f1 2328 #define IXP465_AD 0x00000200 2329 2330 /* 2331 * probe - binds to the platform device 2332 */ 2333 static int pxa25x_udc_probe(struct platform_device *pdev) 2334 { 2335 struct pxa25x_udc *dev = &memory; 2336 int retval, irq; 2337 u32 chiprev; 2338 struct resource *res; 2339 2340 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); 2341 2342 /* insist on Intel/ARM/XScale */ 2343 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev)); 2344 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) { 2345 pr_err("%s: not XScale!\n", driver_name); 2346 return -ENODEV; 2347 } 2348 2349 /* trigger chiprev-specific logic */ 2350 switch (chiprev & CP15R0_PRODREV_MASK) { 2351 #if defined(CONFIG_ARCH_PXA) 2352 case PXA255_A0: 2353 dev->has_cfr = 1; 2354 break; 2355 case PXA250_A0: 2356 case PXA250_A1: 2357 /* A0/A1 "not released"; ep 13, 15 unusable */ 2358 /* fall through */ 2359 case PXA250_B2: case PXA210_B2: 2360 case PXA250_B1: case PXA210_B1: 2361 case PXA250_B0: case PXA210_B0: 2362 /* OUT-DMA is broken ... */ 2363 /* fall through */ 2364 case PXA250_C0: case PXA210_C0: 2365 break; 2366 #elif defined(CONFIG_ARCH_IXP4XX) 2367 case IXP425_A0: 2368 case IXP425_B0: 2369 case IXP465_AD: 2370 dev->has_cfr = 1; 2371 break; 2372 #endif 2373 default: 2374 pr_err("%s: unrecognized processor: %08x\n", 2375 driver_name, chiprev); 2376 /* iop3xx, ixp4xx, ... */ 2377 return -ENODEV; 2378 } 2379 2380 irq = platform_get_irq(pdev, 0); 2381 if (irq < 0) 2382 return -ENODEV; 2383 2384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2385 dev->regs = devm_ioremap_resource(&pdev->dev, res); 2386 if (IS_ERR(dev->regs)) 2387 return PTR_ERR(dev->regs); 2388 2389 dev->clk = devm_clk_get(&pdev->dev, NULL); 2390 if (IS_ERR(dev->clk)) 2391 return PTR_ERR(dev->clk); 2392 2393 pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, 2394 dev->has_cfr ? "" : " (!cfr)", 2395 SIZE_STR "(pio)" 2396 ); 2397 2398 /* other non-static parts of init */ 2399 dev->dev = &pdev->dev; 2400 dev->mach = dev_get_platdata(&pdev->dev); 2401 2402 dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); 2403 2404 if (gpio_is_valid(dev->mach->gpio_pullup)) { 2405 retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup, 2406 "pca25x_udc GPIO PULLUP"); 2407 if (retval) { 2408 dev_dbg(&pdev->dev, 2409 "can't get pullup gpio %d, err: %d\n", 2410 dev->mach->gpio_pullup, retval); 2411 goto err; 2412 } 2413 gpio_direction_output(dev->mach->gpio_pullup, 0); 2414 } 2415 2416 timer_setup(&dev->timer, udc_watchdog, 0); 2417 2418 the_controller = dev; 2419 platform_set_drvdata(pdev, dev); 2420 2421 udc_disable(dev); 2422 udc_reinit(dev); 2423 2424 dev->vbus = 0; 2425 2426 /* irq setup after old hardware state is cleaned up */ 2427 retval = devm_request_irq(&pdev->dev, irq, pxa25x_udc_irq, 0, 2428 driver_name, dev); 2429 if (retval != 0) { 2430 pr_err("%s: can't get irq %d, err %d\n", 2431 driver_name, irq, retval); 2432 goto err; 2433 } 2434 dev->got_irq = 1; 2435 2436 #ifdef CONFIG_ARCH_LUBBOCK 2437 if (machine_is_lubbock()) { 2438 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ, 2439 lubbock_vbus_irq, 0, driver_name, 2440 dev); 2441 if (retval != 0) { 2442 pr_err("%s: can't get irq %i, err %d\n", 2443 driver_name, LUBBOCK_USB_DISC_IRQ, retval); 2444 goto err; 2445 } 2446 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ, 2447 lubbock_vbus_irq, 0, driver_name, 2448 dev); 2449 if (retval != 0) { 2450 pr_err("%s: can't get irq %i, err %d\n", 2451 driver_name, LUBBOCK_USB_IRQ, retval); 2452 goto err; 2453 } 2454 } else 2455 #endif 2456 create_debug_files(dev); 2457 2458 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget); 2459 if (!retval) 2460 return retval; 2461 2462 remove_debug_files(dev); 2463 err: 2464 if (!IS_ERR_OR_NULL(dev->transceiver)) 2465 dev->transceiver = NULL; 2466 return retval; 2467 } 2468 2469 static void pxa25x_udc_shutdown(struct platform_device *_dev) 2470 { 2471 pullup_off(); 2472 } 2473 2474 static int pxa25x_udc_remove(struct platform_device *pdev) 2475 { 2476 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2477 2478 if (dev->driver) 2479 return -EBUSY; 2480 2481 usb_del_gadget_udc(&dev->gadget); 2482 dev->pullup = 0; 2483 pullup(dev); 2484 2485 remove_debug_files(dev); 2486 2487 if (!IS_ERR_OR_NULL(dev->transceiver)) 2488 dev->transceiver = NULL; 2489 2490 the_controller = NULL; 2491 return 0; 2492 } 2493 2494 /*-------------------------------------------------------------------------*/ 2495 2496 #ifdef CONFIG_PM 2497 2498 /* USB suspend (controlled by the host) and system suspend (controlled 2499 * by the PXA) don't necessarily work well together. If USB is active, 2500 * the 48 MHz clock is required; so the system can't enter 33 MHz idle 2501 * mode, or any deeper PM saving state. 2502 * 2503 * For now, we punt and forcibly disconnect from the USB host when PXA 2504 * enters any suspend state. While we're disconnected, we always disable 2505 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states. 2506 * Boards without software pullup control shouldn't use those states. 2507 * VBUS IRQs should probably be ignored so that the PXA device just acts 2508 * "dead" to USB hosts until system resume. 2509 */ 2510 static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state) 2511 { 2512 struct pxa25x_udc *udc = platform_get_drvdata(dev); 2513 unsigned long flags; 2514 2515 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) 2516 WARNING("USB host won't detect disconnect!\n"); 2517 udc->suspended = 1; 2518 2519 local_irq_save(flags); 2520 pullup(udc); 2521 local_irq_restore(flags); 2522 2523 return 0; 2524 } 2525 2526 static int pxa25x_udc_resume(struct platform_device *dev) 2527 { 2528 struct pxa25x_udc *udc = platform_get_drvdata(dev); 2529 unsigned long flags; 2530 2531 udc->suspended = 0; 2532 local_irq_save(flags); 2533 pullup(udc); 2534 local_irq_restore(flags); 2535 2536 return 0; 2537 } 2538 2539 #else 2540 #define pxa25x_udc_suspend NULL 2541 #define pxa25x_udc_resume NULL 2542 #endif 2543 2544 /*-------------------------------------------------------------------------*/ 2545 2546 static struct platform_driver udc_driver = { 2547 .shutdown = pxa25x_udc_shutdown, 2548 .probe = pxa25x_udc_probe, 2549 .remove = pxa25x_udc_remove, 2550 .suspend = pxa25x_udc_suspend, 2551 .resume = pxa25x_udc_resume, 2552 .driver = { 2553 .name = "pxa25x-udc", 2554 }, 2555 }; 2556 2557 module_platform_driver(udc_driver); 2558 2559 MODULE_DESCRIPTION(DRIVER_DESC); 2560 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2561 MODULE_LICENSE("GPL"); 2562 MODULE_ALIAS("platform:pxa25x-udc"); 2563