1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller 4 * 5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com> 6 * Copyright (C) 2012 Broadcom Corporation 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/bug.h> 11 #include <linux/clk.h> 12 #include <linux/compiler.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/device.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/errno.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/kernel.h> 21 #include <linux/list.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/platform_device.h> 25 #include <linux/sched.h> 26 #include <linux/seq_file.h> 27 #include <linux/slab.h> 28 #include <linux/timer.h> 29 #include <linux/usb/ch9.h> 30 #include <linux/usb/gadget.h> 31 #include <linux/workqueue.h> 32 33 #include <bcm63xx_cpu.h> 34 #include <bcm63xx_iudma.h> 35 #include <bcm63xx_dev_usb_usbd.h> 36 #include <bcm63xx_io.h> 37 #include <bcm63xx_regs.h> 38 39 #define DRV_MODULE_NAME "bcm63xx_udc" 40 41 static const char bcm63xx_ep0name[] = "ep0"; 42 43 static const struct { 44 const char *name; 45 const struct usb_ep_caps caps; 46 } bcm63xx_ep_info[] = { 47 #define EP_INFO(_name, _caps) \ 48 { \ 49 .name = _name, \ 50 .caps = _caps, \ 51 } 52 53 EP_INFO(bcm63xx_ep0name, 54 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)), 55 EP_INFO("ep1in-bulk", 56 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), 57 EP_INFO("ep2out-bulk", 58 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), 59 EP_INFO("ep3in-int", 60 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)), 61 EP_INFO("ep4out-int", 62 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)), 63 64 #undef EP_INFO 65 }; 66 67 static bool use_fullspeed; 68 module_param(use_fullspeed, bool, S_IRUGO); 69 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); 70 71 /* 72 * RX IRQ coalescing options: 73 * 74 * false (default) - one IRQ per DATAx packet. Slow but reliable. The 75 * driver is able to pass the "testusb" suite and recover from conditions like: 76 * 77 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep 78 * 2) Host sends 512 bytes of data 79 * 3) Host decides to reconfigure the device and sends SET_INTERFACE 80 * 4) Device shuts down the endpoint and cancels the RX transaction 81 * 82 * true - one IRQ per transfer, for transfers <= 2048B. Generates 83 * considerably fewer IRQs, but error recovery is less robust. Does not 84 * reliably pass "testusb". 85 * 86 * TX always uses coalescing, because we can cancel partially complete TX 87 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow 88 * this on RX. 89 */ 90 static bool irq_coalesce; 91 module_param(irq_coalesce, bool, S_IRUGO); 92 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer"); 93 94 #define BCM63XX_NUM_EP 5 95 #define BCM63XX_NUM_IUDMA 6 96 #define BCM63XX_NUM_FIFO_PAIRS 3 97 98 #define IUDMA_RESET_TIMEOUT_US 10000 99 100 #define IUDMA_EP0_RXCHAN 0 101 #define IUDMA_EP0_TXCHAN 1 102 103 #define IUDMA_MAX_FRAGMENT 2048 104 #define BCM63XX_MAX_CTRL_PKT 64 105 106 #define BCMEP_CTRL 0x00 107 #define BCMEP_ISOC 0x01 108 #define BCMEP_BULK 0x02 109 #define BCMEP_INTR 0x03 110 111 #define BCMEP_OUT 0x00 112 #define BCMEP_IN 0x01 113 114 #define BCM63XX_SPD_FULL 1 115 #define BCM63XX_SPD_HIGH 0 116 117 #define IUDMA_DMAC_OFFSET 0x200 118 #define IUDMA_DMAS_OFFSET 0x400 119 120 enum bcm63xx_ep0_state { 121 EP0_REQUEUE, 122 EP0_IDLE, 123 EP0_IN_DATA_PHASE_SETUP, 124 EP0_IN_DATA_PHASE_COMPLETE, 125 EP0_OUT_DATA_PHASE_SETUP, 126 EP0_OUT_DATA_PHASE_COMPLETE, 127 EP0_OUT_STATUS_PHASE, 128 EP0_IN_FAKE_STATUS_PHASE, 129 EP0_SHUTDOWN, 130 }; 131 132 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = { 133 "REQUEUE", 134 "IDLE", 135 "IN_DATA_PHASE_SETUP", 136 "IN_DATA_PHASE_COMPLETE", 137 "OUT_DATA_PHASE_SETUP", 138 "OUT_DATA_PHASE_COMPLETE", 139 "OUT_STATUS_PHASE", 140 "IN_FAKE_STATUS_PHASE", 141 "SHUTDOWN", 142 }; 143 144 /** 145 * struct iudma_ch_cfg - Static configuration for an IUDMA channel. 146 * @ep_num: USB endpoint number. 147 * @n_bds: Number of buffer descriptors in the ring. 148 * @ep_type: Endpoint type (control, bulk, interrupt). 149 * @dir: Direction (in, out). 150 * @n_fifo_slots: Number of FIFO entries to allocate for this channel. 151 * @max_pkt_hs: Maximum packet size in high speed mode. 152 * @max_pkt_fs: Maximum packet size in full speed mode. 153 */ 154 struct iudma_ch_cfg { 155 int ep_num; 156 int n_bds; 157 int ep_type; 158 int dir; 159 int n_fifo_slots; 160 int max_pkt_hs; 161 int max_pkt_fs; 162 }; 163 164 static const struct iudma_ch_cfg iudma_defaults[] = { 165 166 /* This controller was designed to support a CDC/RNDIS application. 167 It may be possible to reconfigure some of the endpoints, but 168 the hardware limitations (FIFO sizing and number of DMA channels) 169 may significantly impact flexibility and/or stability. Change 170 these values at your own risk. 171 172 ep_num ep_type n_fifo_slots max_pkt_fs 173 idx | n_bds | dir | max_pkt_hs | 174 | | | | | | | | */ 175 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 }, 176 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 }, 177 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 }, 178 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 }, 179 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 }, 180 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 }, 181 }; 182 183 struct bcm63xx_udc; 184 185 /** 186 * struct iudma_ch - Represents the current state of a single IUDMA channel. 187 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1). 188 * @ep_num: USB endpoint number. -1 for ep0 RX. 189 * @enabled: Whether bcm63xx_ep_enable() has been called. 190 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed. 191 * @is_tx: true for TX, false for RX. 192 * @bep: Pointer to the associated endpoint. NULL for ep0 RX. 193 * @udc: Reference to the device controller. 194 * @read_bd: Next buffer descriptor to reap from the hardware. 195 * @write_bd: Next BD available for a new packet. 196 * @end_bd: Points to the final BD in the ring. 197 * @n_bds_used: Number of BD entries currently occupied. 198 * @bd_ring: Base pointer to the BD ring. 199 * @bd_ring_dma: Physical (DMA) address of bd_ring. 200 * @n_bds: Total number of BDs in the ring. 201 * 202 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is 203 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN) 204 * only. 205 * 206 * Each bulk/intr endpoint has a single IUDMA channel and a single 207 * struct usb_ep. 208 */ 209 struct iudma_ch { 210 unsigned int ch_idx; 211 int ep_num; 212 bool enabled; 213 int max_pkt; 214 bool is_tx; 215 struct bcm63xx_ep *bep; 216 struct bcm63xx_udc *udc; 217 218 struct bcm_enet_desc *read_bd; 219 struct bcm_enet_desc *write_bd; 220 struct bcm_enet_desc *end_bd; 221 int n_bds_used; 222 223 struct bcm_enet_desc *bd_ring; 224 dma_addr_t bd_ring_dma; 225 unsigned int n_bds; 226 }; 227 228 /** 229 * struct bcm63xx_ep - Internal (driver) state of a single endpoint. 230 * @ep_num: USB endpoint number. 231 * @iudma: Pointer to IUDMA channel state. 232 * @ep: USB gadget layer representation of the EP. 233 * @udc: Reference to the device controller. 234 * @queue: Linked list of outstanding requests for this EP. 235 * @halted: 1 if the EP is stalled; 0 otherwise. 236 */ 237 struct bcm63xx_ep { 238 unsigned int ep_num; 239 struct iudma_ch *iudma; 240 struct usb_ep ep; 241 struct bcm63xx_udc *udc; 242 struct list_head queue; 243 unsigned halted:1; 244 }; 245 246 /** 247 * struct bcm63xx_req - Internal (driver) state of a single request. 248 * @queue: Links back to the EP's request list. 249 * @req: USB gadget layer representation of the request. 250 * @offset: Current byte offset into the data buffer (next byte to queue). 251 * @bd_bytes: Number of data bytes in outstanding BD entries. 252 * @iudma: IUDMA channel used for the request. 253 */ 254 struct bcm63xx_req { 255 struct list_head queue; /* ep's requests */ 256 struct usb_request req; 257 unsigned int offset; 258 unsigned int bd_bytes; 259 struct iudma_ch *iudma; 260 }; 261 262 /** 263 * struct bcm63xx_udc - Driver/hardware private context. 264 * @lock: Spinlock to mediate access to this struct, and (most) HW regs. 265 * @dev: Generic Linux device structure. 266 * @pd: Platform data (board/port info). 267 * @usbd_clk: Clock descriptor for the USB device block. 268 * @usbh_clk: Clock descriptor for the USB host block. 269 * @gadget: USB slave device. 270 * @driver: Driver for USB slave devices. 271 * @usbd_regs: Base address of the USBD/USB20D block. 272 * @iudma_regs: Base address of the USBD's associated IUDMA block. 273 * @bep: Array of endpoints, including ep0. 274 * @iudma: Array of all IUDMA channels used by this controller. 275 * @cfg: USB configuration number, from SET_CONFIGURATION wValue. 276 * @iface: USB interface number, from SET_INTERFACE wIndex. 277 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue. 278 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions. 279 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req. 280 * @ep0state: Current state of the ep0 state machine. 281 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine. 282 * @wedgemap: Bitmap of wedged endpoints. 283 * @ep0_req_reset: USB reset is pending. 284 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet. 285 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet. 286 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity. 287 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet. 288 * @ep0_reply: Pending reply from gadget driver. 289 * @ep0_request: Outstanding ep0 request. 290 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>. 291 * @debugfs_usbd: debugfs file "usbd" for controller state. 292 * @debugfs_iudma: debugfs file "usbd" for IUDMA state. 293 */ 294 struct bcm63xx_udc { 295 spinlock_t lock; 296 297 struct device *dev; 298 struct bcm63xx_usbd_platform_data *pd; 299 struct clk *usbd_clk; 300 struct clk *usbh_clk; 301 302 struct usb_gadget gadget; 303 struct usb_gadget_driver *driver; 304 305 void __iomem *usbd_regs; 306 void __iomem *iudma_regs; 307 308 struct bcm63xx_ep bep[BCM63XX_NUM_EP]; 309 struct iudma_ch iudma[BCM63XX_NUM_IUDMA]; 310 311 int cfg; 312 int iface; 313 int alt_iface; 314 315 struct bcm63xx_req ep0_ctrl_req; 316 u8 *ep0_ctrl_buf; 317 318 int ep0state; 319 struct work_struct ep0_wq; 320 321 unsigned long wedgemap; 322 323 unsigned ep0_req_reset:1; 324 unsigned ep0_req_set_cfg:1; 325 unsigned ep0_req_set_iface:1; 326 unsigned ep0_req_shutdown:1; 327 328 unsigned ep0_req_completed:1; 329 struct usb_request *ep0_reply; 330 struct usb_request *ep0_request; 331 332 struct dentry *debugfs_root; 333 struct dentry *debugfs_usbd; 334 struct dentry *debugfs_iudma; 335 }; 336 337 static const struct usb_ep_ops bcm63xx_udc_ep_ops; 338 339 /*********************************************************************** 340 * Convenience functions 341 ***********************************************************************/ 342 343 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g) 344 { 345 return container_of(g, struct bcm63xx_udc, gadget); 346 } 347 348 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep) 349 { 350 return container_of(ep, struct bcm63xx_ep, ep); 351 } 352 353 static inline struct bcm63xx_req *our_req(struct usb_request *req) 354 { 355 return container_of(req, struct bcm63xx_req, req); 356 } 357 358 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off) 359 { 360 return bcm_readl(udc->usbd_regs + off); 361 } 362 363 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 364 { 365 bcm_writel(val, udc->usbd_regs + off); 366 } 367 368 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off) 369 { 370 return bcm_readl(udc->iudma_regs + off); 371 } 372 373 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 374 { 375 bcm_writel(val, udc->iudma_regs + off); 376 } 377 378 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan) 379 { 380 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 381 (ENETDMA_CHAN_WIDTH * chan)); 382 } 383 384 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 385 int chan) 386 { 387 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 388 (ENETDMA_CHAN_WIDTH * chan)); 389 } 390 391 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan) 392 { 393 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 394 (ENETDMA_CHAN_WIDTH * chan)); 395 } 396 397 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 398 int chan) 399 { 400 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 401 (ENETDMA_CHAN_WIDTH * chan)); 402 } 403 404 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled) 405 { 406 if (is_enabled) { 407 clk_enable(udc->usbh_clk); 408 clk_enable(udc->usbd_clk); 409 udelay(10); 410 } else { 411 clk_disable(udc->usbd_clk); 412 clk_disable(udc->usbh_clk); 413 } 414 } 415 416 /*********************************************************************** 417 * Low-level IUDMA / FIFO operations 418 ***********************************************************************/ 419 420 /** 421 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal. 422 * @udc: Reference to the device controller. 423 * @idx: Desired init_sel value. 424 * 425 * The "init_sel" signal is used as a selection index for both endpoints 426 * and IUDMA channels. Since these do not map 1:1, the use of this signal 427 * depends on the context. 428 */ 429 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx) 430 { 431 u32 val = usbd_readl(udc, USBD_CONTROL_REG); 432 433 val &= ~USBD_CONTROL_INIT_SEL_MASK; 434 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT; 435 usbd_writel(udc, val, USBD_CONTROL_REG); 436 } 437 438 /** 439 * bcm63xx_set_stall - Enable/disable stall on one endpoint. 440 * @udc: Reference to the device controller. 441 * @bep: Endpoint on which to operate. 442 * @is_stalled: true to enable stall, false to disable. 443 * 444 * See notes in bcm63xx_update_wedge() regarding automatic clearing of 445 * halt/stall conditions. 446 */ 447 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep, 448 bool is_stalled) 449 { 450 u32 val; 451 452 val = USBD_STALL_UPDATE_MASK | 453 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) | 454 (bep->ep_num << USBD_STALL_EPNUM_SHIFT); 455 usbd_writel(udc, val, USBD_STALL_REG); 456 } 457 458 /** 459 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings. 460 * @udc: Reference to the device controller. 461 * 462 * These parameters depend on the USB link speed. Settings are 463 * per-IUDMA-channel-pair. 464 */ 465 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc) 466 { 467 int is_hs = udc->gadget.speed == USB_SPEED_HIGH; 468 u32 i, val, rx_fifo_slot, tx_fifo_slot; 469 470 /* set up FIFO boundaries and packet sizes; this is done in pairs */ 471 rx_fifo_slot = tx_fifo_slot = 0; 472 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) { 473 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i]; 474 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1]; 475 476 bcm63xx_ep_dma_select(udc, i >> 1); 477 478 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) | 479 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) << 480 USBD_RXFIFO_CONFIG_END_SHIFT); 481 rx_fifo_slot += rx_cfg->n_fifo_slots; 482 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG); 483 usbd_writel(udc, 484 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs, 485 USBD_RXFIFO_EPSIZE_REG); 486 487 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) | 488 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) << 489 USBD_TXFIFO_CONFIG_END_SHIFT); 490 tx_fifo_slot += tx_cfg->n_fifo_slots; 491 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG); 492 usbd_writel(udc, 493 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs, 494 USBD_TXFIFO_EPSIZE_REG); 495 496 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG); 497 } 498 } 499 500 /** 501 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO. 502 * @udc: Reference to the device controller. 503 * @ep_num: Endpoint number. 504 */ 505 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num) 506 { 507 u32 val; 508 509 bcm63xx_ep_dma_select(udc, ep_num); 510 511 val = usbd_readl(udc, USBD_CONTROL_REG); 512 val |= USBD_CONTROL_FIFO_RESET_MASK; 513 usbd_writel(udc, val, USBD_CONTROL_REG); 514 usbd_readl(udc, USBD_CONTROL_REG); 515 } 516 517 /** 518 * bcm63xx_fifo_reset - Flush all hardware FIFOs. 519 * @udc: Reference to the device controller. 520 */ 521 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc) 522 { 523 int i; 524 525 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++) 526 bcm63xx_fifo_reset_ep(udc, i); 527 } 528 529 /** 530 * bcm63xx_ep_init - Initial (one-time) endpoint initialization. 531 * @udc: Reference to the device controller. 532 */ 533 static void bcm63xx_ep_init(struct bcm63xx_udc *udc) 534 { 535 u32 i, val; 536 537 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) { 538 const struct iudma_ch_cfg *cfg = &iudma_defaults[i]; 539 540 if (cfg->ep_num < 0) 541 continue; 542 543 bcm63xx_ep_dma_select(udc, cfg->ep_num); 544 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) | 545 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT); 546 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG); 547 } 548 } 549 550 /** 551 * bcm63xx_ep_setup - Configure per-endpoint settings. 552 * @udc: Reference to the device controller. 553 * 554 * This needs to be rerun if the speed/cfg/intf/altintf changes. 555 */ 556 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc) 557 { 558 u32 val, i; 559 560 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG); 561 562 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) { 563 const struct iudma_ch_cfg *cfg = &iudma_defaults[i]; 564 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ? 565 cfg->max_pkt_hs : cfg->max_pkt_fs; 566 int idx = cfg->ep_num; 567 568 udc->iudma[i].max_pkt = max_pkt; 569 570 if (idx < 0) 571 continue; 572 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt); 573 574 val = (idx << USBD_CSR_EP_LOG_SHIFT) | 575 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) | 576 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) | 577 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) | 578 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) | 579 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) | 580 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT); 581 usbd_writel(udc, val, USBD_CSR_EP_REG(idx)); 582 } 583 } 584 585 /** 586 * iudma_write - Queue a single IUDMA transaction. 587 * @udc: Reference to the device controller. 588 * @iudma: IUDMA channel to use. 589 * @breq: Request containing the transaction data. 590 * 591 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA 592 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous. 593 * So iudma_write() may be called several times to fulfill a single 594 * usb_request. 595 * 596 * For TX IUDMA, this can queue multiple buffer descriptors if needed. 597 */ 598 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma, 599 struct bcm63xx_req *breq) 600 { 601 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0; 602 unsigned int bytes_left = breq->req.length - breq->offset; 603 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ? 604 iudma->max_pkt : IUDMA_MAX_FRAGMENT; 605 606 iudma->n_bds_used = 0; 607 breq->bd_bytes = 0; 608 breq->iudma = iudma; 609 610 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero) 611 extra_zero_pkt = 1; 612 613 do { 614 struct bcm_enet_desc *d = iudma->write_bd; 615 u32 dmaflags = 0; 616 unsigned int n_bytes; 617 618 if (d == iudma->end_bd) { 619 dmaflags |= DMADESC_WRAP_MASK; 620 iudma->write_bd = iudma->bd_ring; 621 } else { 622 iudma->write_bd++; 623 } 624 iudma->n_bds_used++; 625 626 n_bytes = min_t(int, bytes_left, max_bd_bytes); 627 if (n_bytes) 628 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT; 629 else 630 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) | 631 DMADESC_USB_ZERO_MASK; 632 633 dmaflags |= DMADESC_OWNER_MASK; 634 if (first_bd) { 635 dmaflags |= DMADESC_SOP_MASK; 636 first_bd = 0; 637 } 638 639 /* 640 * extra_zero_pkt forces one more iteration through the loop 641 * after all data is queued up, to send the zero packet 642 */ 643 if (extra_zero_pkt && !bytes_left) 644 extra_zero_pkt = 0; 645 646 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds || 647 (n_bytes == bytes_left && !extra_zero_pkt)) { 648 last_bd = 1; 649 dmaflags |= DMADESC_EOP_MASK; 650 } 651 652 d->address = breq->req.dma + breq->offset; 653 mb(); 654 d->len_stat = dmaflags; 655 656 breq->offset += n_bytes; 657 breq->bd_bytes += n_bytes; 658 bytes_left -= n_bytes; 659 } while (!last_bd); 660 661 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK, 662 ENETDMAC_CHANCFG_REG, iudma->ch_idx); 663 } 664 665 /** 666 * iudma_read - Check for IUDMA buffer completion. 667 * @udc: Reference to the device controller. 668 * @iudma: IUDMA channel to use. 669 * 670 * This checks to see if ALL of the outstanding BDs on the DMA channel 671 * have been filled. If so, it returns the actual transfer length; 672 * otherwise it returns -EBUSY. 673 */ 674 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma) 675 { 676 int i, actual_len = 0; 677 struct bcm_enet_desc *d = iudma->read_bd; 678 679 if (!iudma->n_bds_used) 680 return -EINVAL; 681 682 for (i = 0; i < iudma->n_bds_used; i++) { 683 u32 dmaflags; 684 685 dmaflags = d->len_stat; 686 687 if (dmaflags & DMADESC_OWNER_MASK) 688 return -EBUSY; 689 690 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >> 691 DMADESC_LENGTH_SHIFT; 692 if (d == iudma->end_bd) 693 d = iudma->bd_ring; 694 else 695 d++; 696 } 697 698 iudma->read_bd = d; 699 iudma->n_bds_used = 0; 700 return actual_len; 701 } 702 703 /** 704 * iudma_reset_channel - Stop DMA on a single channel. 705 * @udc: Reference to the device controller. 706 * @iudma: IUDMA channel to reset. 707 */ 708 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma) 709 { 710 int timeout = IUDMA_RESET_TIMEOUT_US; 711 struct bcm_enet_desc *d; 712 int ch_idx = iudma->ch_idx; 713 714 if (!iudma->is_tx) 715 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); 716 717 /* stop DMA, then wait for the hardware to wrap up */ 718 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx); 719 720 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) & 721 ENETDMAC_CHANCFG_EN_MASK) { 722 udelay(1); 723 724 /* repeatedly flush the FIFO data until the BD completes */ 725 if (iudma->is_tx && iudma->ep_num >= 0) 726 bcm63xx_fifo_reset_ep(udc, iudma->ep_num); 727 728 if (!timeout--) { 729 dev_err(udc->dev, "can't reset IUDMA channel %d\n", 730 ch_idx); 731 break; 732 } 733 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) { 734 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", 735 ch_idx); 736 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK, 737 ENETDMAC_CHANCFG_REG, ch_idx); 738 } 739 } 740 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx); 741 742 /* don't leave "live" HW-owned entries for the next guy to step on */ 743 for (d = iudma->bd_ring; d <= iudma->end_bd; d++) 744 d->len_stat = 0; 745 mb(); 746 747 iudma->read_bd = iudma->write_bd = iudma->bd_ring; 748 iudma->n_bds_used = 0; 749 750 /* set up IRQs, UBUS burst size, and BD base for this channel */ 751 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 752 ENETDMAC_IRMASK_REG, ch_idx); 753 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx); 754 755 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx); 756 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx); 757 } 758 759 /** 760 * iudma_init_channel - One-time IUDMA channel initialization. 761 * @udc: Reference to the device controller. 762 * @ch_idx: Channel to initialize. 763 */ 764 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx) 765 { 766 struct iudma_ch *iudma = &udc->iudma[ch_idx]; 767 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx]; 768 unsigned int n_bds = cfg->n_bds; 769 struct bcm63xx_ep *bep = NULL; 770 771 iudma->ep_num = cfg->ep_num; 772 iudma->ch_idx = ch_idx; 773 iudma->is_tx = !!(ch_idx & 0x01); 774 if (iudma->ep_num >= 0) { 775 bep = &udc->bep[iudma->ep_num]; 776 bep->iudma = iudma; 777 INIT_LIST_HEAD(&bep->queue); 778 } 779 780 iudma->bep = bep; 781 iudma->udc = udc; 782 783 /* ep0 is always active; others are controlled by the gadget driver */ 784 if (iudma->ep_num <= 0) 785 iudma->enabled = true; 786 787 iudma->n_bds = n_bds; 788 iudma->bd_ring = dmam_alloc_coherent(udc->dev, 789 n_bds * sizeof(struct bcm_enet_desc), 790 &iudma->bd_ring_dma, GFP_KERNEL); 791 if (!iudma->bd_ring) 792 return -ENOMEM; 793 iudma->end_bd = &iudma->bd_ring[n_bds - 1]; 794 795 return 0; 796 } 797 798 /** 799 * iudma_init - One-time initialization of all IUDMA channels. 800 * @udc: Reference to the device controller. 801 * 802 * Enable DMA, flush channels, and enable global IUDMA IRQs. 803 */ 804 static int iudma_init(struct bcm63xx_udc *udc) 805 { 806 int i, rc; 807 808 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 809 810 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) { 811 rc = iudma_init_channel(udc, i); 812 if (rc) 813 return rc; 814 iudma_reset_channel(udc, &udc->iudma[i]); 815 } 816 817 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG); 818 return 0; 819 } 820 821 /** 822 * iudma_uninit - Uninitialize IUDMA channels. 823 * @udc: Reference to the device controller. 824 * 825 * Kill global IUDMA IRQs, flush channels, and kill DMA. 826 */ 827 static void iudma_uninit(struct bcm63xx_udc *udc) 828 { 829 int i; 830 831 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG); 832 833 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) 834 iudma_reset_channel(udc, &udc->iudma[i]); 835 836 usb_dma_writel(udc, 0, ENETDMA_CFG_REG); 837 } 838 839 /*********************************************************************** 840 * Other low-level USBD operations 841 ***********************************************************************/ 842 843 /** 844 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts. 845 * @udc: Reference to the device controller. 846 * @enable_irqs: true to enable, false to disable. 847 */ 848 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs) 849 { 850 u32 val; 851 852 usbd_writel(udc, 0, USBD_STATUS_REG); 853 854 val = BIT(USBD_EVENT_IRQ_USB_RESET) | 855 BIT(USBD_EVENT_IRQ_SETUP) | 856 BIT(USBD_EVENT_IRQ_SETCFG) | 857 BIT(USBD_EVENT_IRQ_SETINTF) | 858 BIT(USBD_EVENT_IRQ_USB_LINK); 859 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG); 860 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG); 861 } 862 863 /** 864 * bcm63xx_select_phy_mode - Select between USB device and host mode. 865 * @udc: Reference to the device controller. 866 * @is_device: true for device, false for host. 867 * 868 * This should probably be reworked to use the drivers/usb/otg 869 * infrastructure. 870 * 871 * By default, the AFE/pullups are disabled in device mode, until 872 * bcm63xx_select_pullup() is called. 873 */ 874 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device) 875 { 876 u32 val, portmask = BIT(udc->pd->port_no); 877 878 if (BCMCPU_IS_6328()) { 879 /* configure pinmux to sense VBUS signal */ 880 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG); 881 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK; 882 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV : 883 GPIO_PINMUX_OTHR_6328_USB_HOST; 884 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG); 885 } 886 887 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG); 888 if (is_device) { 889 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT); 890 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT); 891 } else { 892 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT); 893 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT); 894 } 895 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG); 896 897 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG); 898 if (is_device) 899 val |= USBH_PRIV_SWAP_USBD_MASK; 900 else 901 val &= ~USBH_PRIV_SWAP_USBD_MASK; 902 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG); 903 } 904 905 /** 906 * bcm63xx_select_pullup - Enable/disable the pullup on D+ 907 * @udc: Reference to the device controller. 908 * @is_on: true to enable the pullup, false to disable. 909 * 910 * If the pullup is active, the host will sense a FS/HS device connected to 911 * the port. If the pullup is inactive, the host will think the USB 912 * device has been disconnected. 913 */ 914 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on) 915 { 916 u32 val, portmask = BIT(udc->pd->port_no); 917 918 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG); 919 if (is_on) 920 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT); 921 else 922 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT); 923 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG); 924 } 925 926 /** 927 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal. 928 * @udc: Reference to the device controller. 929 * 930 * This just masks the IUDMA IRQs and releases the clocks. It is assumed 931 * that bcm63xx_udc_stop() has already run, and the clocks are stopped. 932 */ 933 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc) 934 { 935 set_clocks(udc, true); 936 iudma_uninit(udc); 937 set_clocks(udc, false); 938 939 clk_put(udc->usbd_clk); 940 clk_put(udc->usbh_clk); 941 } 942 943 /** 944 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures. 945 * @udc: Reference to the device controller. 946 */ 947 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc) 948 { 949 int i, rc = 0; 950 u32 val; 951 952 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT, 953 GFP_KERNEL); 954 if (!udc->ep0_ctrl_buf) 955 return -ENOMEM; 956 957 INIT_LIST_HEAD(&udc->gadget.ep_list); 958 for (i = 0; i < BCM63XX_NUM_EP; i++) { 959 struct bcm63xx_ep *bep = &udc->bep[i]; 960 961 bep->ep.name = bcm63xx_ep_info[i].name; 962 bep->ep.caps = bcm63xx_ep_info[i].caps; 963 bep->ep_num = i; 964 bep->ep.ops = &bcm63xx_udc_ep_ops; 965 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list); 966 bep->halted = 0; 967 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT); 968 bep->udc = udc; 969 bep->ep.desc = NULL; 970 INIT_LIST_HEAD(&bep->queue); 971 } 972 973 udc->gadget.ep0 = &udc->bep[0].ep; 974 list_del(&udc->bep[0].ep.ep_list); 975 976 udc->gadget.speed = USB_SPEED_UNKNOWN; 977 udc->ep0state = EP0_SHUTDOWN; 978 979 udc->usbh_clk = clk_get(udc->dev, "usbh"); 980 if (IS_ERR(udc->usbh_clk)) 981 return -EIO; 982 983 udc->usbd_clk = clk_get(udc->dev, "usbd"); 984 if (IS_ERR(udc->usbd_clk)) { 985 clk_put(udc->usbh_clk); 986 return -EIO; 987 } 988 989 set_clocks(udc, true); 990 991 val = USBD_CONTROL_AUTO_CSRS_MASK | 992 USBD_CONTROL_DONE_CSRS_MASK | 993 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0); 994 usbd_writel(udc, val, USBD_CONTROL_REG); 995 996 val = USBD_STRAPS_APP_SELF_PWR_MASK | 997 USBD_STRAPS_APP_RAM_IF_MASK | 998 USBD_STRAPS_APP_CSRPRGSUP_MASK | 999 USBD_STRAPS_APP_8BITPHY_MASK | 1000 USBD_STRAPS_APP_RMTWKUP_MASK; 1001 1002 if (udc->gadget.max_speed == USB_SPEED_HIGH) 1003 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT); 1004 else 1005 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT); 1006 usbd_writel(udc, val, USBD_STRAPS_REG); 1007 1008 bcm63xx_set_ctrl_irqs(udc, false); 1009 1010 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG); 1011 1012 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) | 1013 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS); 1014 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG); 1015 1016 rc = iudma_init(udc); 1017 set_clocks(udc, false); 1018 if (rc) 1019 bcm63xx_uninit_udc_hw(udc); 1020 1021 return 0; 1022 } 1023 1024 /*********************************************************************** 1025 * Standard EP gadget operations 1026 ***********************************************************************/ 1027 1028 /** 1029 * bcm63xx_ep_enable - Enable one endpoint. 1030 * @ep: Endpoint to enable. 1031 * @desc: Contains max packet, direction, etc. 1032 * 1033 * Most of the endpoint parameters are fixed in this controller, so there 1034 * isn't much for this function to do. 1035 */ 1036 static int bcm63xx_ep_enable(struct usb_ep *ep, 1037 const struct usb_endpoint_descriptor *desc) 1038 { 1039 struct bcm63xx_ep *bep = our_ep(ep); 1040 struct bcm63xx_udc *udc = bep->udc; 1041 struct iudma_ch *iudma = bep->iudma; 1042 unsigned long flags; 1043 1044 if (!ep || !desc || ep->name == bcm63xx_ep0name) 1045 return -EINVAL; 1046 1047 if (!udc->driver) 1048 return -ESHUTDOWN; 1049 1050 spin_lock_irqsave(&udc->lock, flags); 1051 if (iudma->enabled) { 1052 spin_unlock_irqrestore(&udc->lock, flags); 1053 return -EINVAL; 1054 } 1055 1056 iudma->enabled = true; 1057 BUG_ON(!list_empty(&bep->queue)); 1058 1059 iudma_reset_channel(udc, iudma); 1060 1061 bep->halted = 0; 1062 bcm63xx_set_stall(udc, bep, false); 1063 clear_bit(bep->ep_num, &udc->wedgemap); 1064 1065 ep->desc = desc; 1066 ep->maxpacket = usb_endpoint_maxp(desc); 1067 1068 spin_unlock_irqrestore(&udc->lock, flags); 1069 return 0; 1070 } 1071 1072 /** 1073 * bcm63xx_ep_disable - Disable one endpoint. 1074 * @ep: Endpoint to disable. 1075 */ 1076 static int bcm63xx_ep_disable(struct usb_ep *ep) 1077 { 1078 struct bcm63xx_ep *bep = our_ep(ep); 1079 struct bcm63xx_udc *udc = bep->udc; 1080 struct iudma_ch *iudma = bep->iudma; 1081 struct bcm63xx_req *breq, *n; 1082 unsigned long flags; 1083 1084 if (!ep || !ep->desc) 1085 return -EINVAL; 1086 1087 spin_lock_irqsave(&udc->lock, flags); 1088 if (!iudma->enabled) { 1089 spin_unlock_irqrestore(&udc->lock, flags); 1090 return -EINVAL; 1091 } 1092 iudma->enabled = false; 1093 1094 iudma_reset_channel(udc, iudma); 1095 1096 if (!list_empty(&bep->queue)) { 1097 list_for_each_entry_safe(breq, n, &bep->queue, queue) { 1098 usb_gadget_unmap_request(&udc->gadget, &breq->req, 1099 iudma->is_tx); 1100 list_del(&breq->queue); 1101 breq->req.status = -ESHUTDOWN; 1102 1103 spin_unlock_irqrestore(&udc->lock, flags); 1104 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req); 1105 spin_lock_irqsave(&udc->lock, flags); 1106 } 1107 } 1108 ep->desc = NULL; 1109 1110 spin_unlock_irqrestore(&udc->lock, flags); 1111 return 0; 1112 } 1113 1114 /** 1115 * bcm63xx_udc_alloc_request - Allocate a new request. 1116 * @ep: Endpoint associated with the request. 1117 * @mem_flags: Flags to pass to kzalloc(). 1118 */ 1119 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep, 1120 gfp_t mem_flags) 1121 { 1122 struct bcm63xx_req *breq; 1123 1124 breq = kzalloc(sizeof(*breq), mem_flags); 1125 if (!breq) 1126 return NULL; 1127 return &breq->req; 1128 } 1129 1130 /** 1131 * bcm63xx_udc_free_request - Free a request. 1132 * @ep: Endpoint associated with the request. 1133 * @req: Request to free. 1134 */ 1135 static void bcm63xx_udc_free_request(struct usb_ep *ep, 1136 struct usb_request *req) 1137 { 1138 struct bcm63xx_req *breq = our_req(req); 1139 kfree(breq); 1140 } 1141 1142 /** 1143 * bcm63xx_udc_queue - Queue up a new request. 1144 * @ep: Endpoint associated with the request. 1145 * @req: Request to add. 1146 * @mem_flags: Unused. 1147 * 1148 * If the queue is empty, start this request immediately. Otherwise, add 1149 * it to the list. 1150 * 1151 * ep0 replies are sent through this function from the gadget driver, but 1152 * they are treated differently because they need to be handled by the ep0 1153 * state machine. (Sometimes they are replies to control requests that 1154 * were spoofed by this driver, and so they shouldn't be transmitted at all.) 1155 */ 1156 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req, 1157 gfp_t mem_flags) 1158 { 1159 struct bcm63xx_ep *bep = our_ep(ep); 1160 struct bcm63xx_udc *udc = bep->udc; 1161 struct bcm63xx_req *breq = our_req(req); 1162 unsigned long flags; 1163 int rc = 0; 1164 1165 if (unlikely(!req || !req->complete || !req->buf || !ep)) 1166 return -EINVAL; 1167 1168 req->actual = 0; 1169 req->status = 0; 1170 breq->offset = 0; 1171 1172 if (bep == &udc->bep[0]) { 1173 /* only one reply per request, please */ 1174 if (udc->ep0_reply) 1175 return -EINVAL; 1176 1177 udc->ep0_reply = req; 1178 schedule_work(&udc->ep0_wq); 1179 return 0; 1180 } 1181 1182 spin_lock_irqsave(&udc->lock, flags); 1183 if (!bep->iudma->enabled) { 1184 rc = -ESHUTDOWN; 1185 goto out; 1186 } 1187 1188 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx); 1189 if (rc == 0) { 1190 list_add_tail(&breq->queue, &bep->queue); 1191 if (list_is_singular(&bep->queue)) 1192 iudma_write(udc, bep->iudma, breq); 1193 } 1194 1195 out: 1196 spin_unlock_irqrestore(&udc->lock, flags); 1197 return rc; 1198 } 1199 1200 /** 1201 * bcm63xx_udc_dequeue - Remove a pending request from the queue. 1202 * @ep: Endpoint associated with the request. 1203 * @req: Request to remove. 1204 * 1205 * If the request is not at the head of the queue, this is easy - just nuke 1206 * it. If the request is at the head of the queue, we'll need to stop the 1207 * DMA transaction and then queue up the successor. 1208 */ 1209 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req) 1210 { 1211 struct bcm63xx_ep *bep = our_ep(ep); 1212 struct bcm63xx_udc *udc = bep->udc; 1213 struct bcm63xx_req *breq = our_req(req), *cur; 1214 unsigned long flags; 1215 int rc = 0; 1216 1217 spin_lock_irqsave(&udc->lock, flags); 1218 if (list_empty(&bep->queue)) { 1219 rc = -EINVAL; 1220 goto out; 1221 } 1222 1223 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue); 1224 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx); 1225 1226 if (breq == cur) { 1227 iudma_reset_channel(udc, bep->iudma); 1228 list_del(&breq->queue); 1229 1230 if (!list_empty(&bep->queue)) { 1231 struct bcm63xx_req *next; 1232 1233 next = list_first_entry(&bep->queue, 1234 struct bcm63xx_req, queue); 1235 iudma_write(udc, bep->iudma, next); 1236 } 1237 } else { 1238 list_del(&breq->queue); 1239 } 1240 1241 out: 1242 spin_unlock_irqrestore(&udc->lock, flags); 1243 1244 req->status = -ESHUTDOWN; 1245 req->complete(ep, req); 1246 1247 return rc; 1248 } 1249 1250 /** 1251 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware. 1252 * @ep: Endpoint to halt. 1253 * @value: Zero to clear halt; nonzero to set halt. 1254 * 1255 * See comments in bcm63xx_update_wedge(). 1256 */ 1257 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value) 1258 { 1259 struct bcm63xx_ep *bep = our_ep(ep); 1260 struct bcm63xx_udc *udc = bep->udc; 1261 unsigned long flags; 1262 1263 spin_lock_irqsave(&udc->lock, flags); 1264 bcm63xx_set_stall(udc, bep, !!value); 1265 bep->halted = value; 1266 spin_unlock_irqrestore(&udc->lock, flags); 1267 1268 return 0; 1269 } 1270 1271 /** 1272 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset. 1273 * @ep: Endpoint to wedge. 1274 * 1275 * See comments in bcm63xx_update_wedge(). 1276 */ 1277 static int bcm63xx_udc_set_wedge(struct usb_ep *ep) 1278 { 1279 struct bcm63xx_ep *bep = our_ep(ep); 1280 struct bcm63xx_udc *udc = bep->udc; 1281 unsigned long flags; 1282 1283 spin_lock_irqsave(&udc->lock, flags); 1284 set_bit(bep->ep_num, &udc->wedgemap); 1285 bcm63xx_set_stall(udc, bep, true); 1286 spin_unlock_irqrestore(&udc->lock, flags); 1287 1288 return 0; 1289 } 1290 1291 static const struct usb_ep_ops bcm63xx_udc_ep_ops = { 1292 .enable = bcm63xx_ep_enable, 1293 .disable = bcm63xx_ep_disable, 1294 1295 .alloc_request = bcm63xx_udc_alloc_request, 1296 .free_request = bcm63xx_udc_free_request, 1297 1298 .queue = bcm63xx_udc_queue, 1299 .dequeue = bcm63xx_udc_dequeue, 1300 1301 .set_halt = bcm63xx_udc_set_halt, 1302 .set_wedge = bcm63xx_udc_set_wedge, 1303 }; 1304 1305 /*********************************************************************** 1306 * EP0 handling 1307 ***********************************************************************/ 1308 1309 /** 1310 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback. 1311 * @udc: Reference to the device controller. 1312 * @ctrl: 8-byte SETUP request. 1313 */ 1314 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc, 1315 struct usb_ctrlrequest *ctrl) 1316 { 1317 int rc; 1318 1319 spin_unlock_irq(&udc->lock); 1320 rc = udc->driver->setup(&udc->gadget, ctrl); 1321 spin_lock_irq(&udc->lock); 1322 return rc; 1323 } 1324 1325 /** 1326 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request. 1327 * @udc: Reference to the device controller. 1328 * 1329 * Many standard requests are handled automatically in the hardware, but 1330 * we still need to pass them to the gadget driver so that it can 1331 * reconfigure the interfaces/endpoints if necessary. 1332 * 1333 * Unfortunately we are not able to send a STALL response if the host 1334 * requests an invalid configuration. If this happens, we'll have to be 1335 * content with printing a warning. 1336 */ 1337 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc) 1338 { 1339 struct usb_ctrlrequest ctrl; 1340 int rc; 1341 1342 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE; 1343 ctrl.bRequest = USB_REQ_SET_CONFIGURATION; 1344 ctrl.wValue = cpu_to_le16(udc->cfg); 1345 ctrl.wIndex = 0; 1346 ctrl.wLength = 0; 1347 1348 rc = bcm63xx_ep0_setup_callback(udc, &ctrl); 1349 if (rc < 0) { 1350 dev_warn_ratelimited(udc->dev, 1351 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n", 1352 udc->cfg); 1353 } 1354 return rc; 1355 } 1356 1357 /** 1358 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request. 1359 * @udc: Reference to the device controller. 1360 */ 1361 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc) 1362 { 1363 struct usb_ctrlrequest ctrl; 1364 int rc; 1365 1366 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE; 1367 ctrl.bRequest = USB_REQ_SET_INTERFACE; 1368 ctrl.wValue = cpu_to_le16(udc->alt_iface); 1369 ctrl.wIndex = cpu_to_le16(udc->iface); 1370 ctrl.wLength = 0; 1371 1372 rc = bcm63xx_ep0_setup_callback(udc, &ctrl); 1373 if (rc < 0) { 1374 dev_warn_ratelimited(udc->dev, 1375 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n", 1376 udc->iface, udc->alt_iface); 1377 } 1378 return rc; 1379 } 1380 1381 /** 1382 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request. 1383 * @udc: Reference to the device controller. 1384 * @ch_idx: IUDMA channel number. 1385 * @req: USB gadget layer representation of the request. 1386 */ 1387 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx, 1388 struct usb_request *req) 1389 { 1390 struct bcm63xx_req *breq = our_req(req); 1391 struct iudma_ch *iudma = &udc->iudma[ch_idx]; 1392 1393 BUG_ON(udc->ep0_request); 1394 udc->ep0_request = req; 1395 1396 req->actual = 0; 1397 breq->offset = 0; 1398 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx); 1399 iudma_write(udc, iudma, breq); 1400 } 1401 1402 /** 1403 * bcm63xx_ep0_complete - Set completion status and "stage" the callback. 1404 * @udc: Reference to the device controller. 1405 * @req: USB gadget layer representation of the request. 1406 * @status: Status to return to the gadget driver. 1407 */ 1408 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc, 1409 struct usb_request *req, int status) 1410 { 1411 req->status = status; 1412 if (status) 1413 req->actual = 0; 1414 if (req->complete) { 1415 spin_unlock_irq(&udc->lock); 1416 req->complete(&udc->bep[0].ep, req); 1417 spin_lock_irq(&udc->lock); 1418 } 1419 } 1420 1421 /** 1422 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to 1423 * reset/shutdown. 1424 * @udc: Reference to the device controller. 1425 * @is_tx: Nonzero for TX (IN), zero for RX (OUT). 1426 */ 1427 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx) 1428 { 1429 struct usb_request *req = udc->ep0_reply; 1430 1431 udc->ep0_reply = NULL; 1432 usb_gadget_unmap_request(&udc->gadget, req, is_tx); 1433 if (udc->ep0_request == req) { 1434 udc->ep0_req_completed = 0; 1435 udc->ep0_request = NULL; 1436 } 1437 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN); 1438 } 1439 1440 /** 1441 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return 1442 * transfer len. 1443 * @udc: Reference to the device controller. 1444 */ 1445 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc) 1446 { 1447 struct usb_request *req = udc->ep0_request; 1448 1449 udc->ep0_req_completed = 0; 1450 udc->ep0_request = NULL; 1451 1452 return req->actual; 1453 } 1454 1455 /** 1456 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request. 1457 * @udc: Reference to the device controller. 1458 * @ch_idx: IUDMA channel number. 1459 * @length: Number of bytes to TX/RX. 1460 * 1461 * Used for simple transfers performed by the ep0 worker. This will always 1462 * use ep0_ctrl_req / ep0_ctrl_buf. 1463 */ 1464 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx, 1465 int length) 1466 { 1467 struct usb_request *req = &udc->ep0_ctrl_req.req; 1468 1469 req->buf = udc->ep0_ctrl_buf; 1470 req->length = length; 1471 req->complete = NULL; 1472 1473 bcm63xx_ep0_map_write(udc, ch_idx, req); 1474 } 1475 1476 /** 1477 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it. 1478 * @udc: Reference to the device controller. 1479 * 1480 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready 1481 * for the next packet. Anything else means the transaction requires multiple 1482 * stages of handling. 1483 */ 1484 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc) 1485 { 1486 int rc; 1487 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf; 1488 1489 rc = bcm63xx_ep0_read_complete(udc); 1490 1491 if (rc < 0) { 1492 dev_err(udc->dev, "missing SETUP packet\n"); 1493 return EP0_IDLE; 1494 } 1495 1496 /* 1497 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't 1498 * ALWAYS deliver these 100% of the time, so if we happen to see one, 1499 * just throw it away. 1500 */ 1501 if (rc == 0) 1502 return EP0_REQUEUE; 1503 1504 /* Drop malformed SETUP packets */ 1505 if (rc != sizeof(*ctrl)) { 1506 dev_warn_ratelimited(udc->dev, 1507 "malformed SETUP packet (%d bytes)\n", rc); 1508 return EP0_REQUEUE; 1509 } 1510 1511 /* Process new SETUP packet arriving on ep0 */ 1512 rc = bcm63xx_ep0_setup_callback(udc, ctrl); 1513 if (rc < 0) { 1514 bcm63xx_set_stall(udc, &udc->bep[0], true); 1515 return EP0_REQUEUE; 1516 } 1517 1518 if (!ctrl->wLength) 1519 return EP0_REQUEUE; 1520 else if (ctrl->bRequestType & USB_DIR_IN) 1521 return EP0_IN_DATA_PHASE_SETUP; 1522 else 1523 return EP0_OUT_DATA_PHASE_SETUP; 1524 } 1525 1526 /** 1527 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle. 1528 * @udc: Reference to the device controller. 1529 * 1530 * In state EP0_IDLE, the RX descriptor is either pending, or has been 1531 * filled with a SETUP packet from the host. This function handles new 1532 * SETUP packets, control IRQ events (which can generate fake SETUP packets), 1533 * and reset/shutdown events. 1534 * 1535 * Returns 0 if work was done; -EAGAIN if nothing to do. 1536 */ 1537 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc) 1538 { 1539 if (udc->ep0_req_reset) { 1540 udc->ep0_req_reset = 0; 1541 } else if (udc->ep0_req_set_cfg) { 1542 udc->ep0_req_set_cfg = 0; 1543 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0) 1544 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE; 1545 } else if (udc->ep0_req_set_iface) { 1546 udc->ep0_req_set_iface = 0; 1547 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0) 1548 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE; 1549 } else if (udc->ep0_req_completed) { 1550 udc->ep0state = bcm63xx_ep0_do_setup(udc); 1551 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0; 1552 } else if (udc->ep0_req_shutdown) { 1553 udc->ep0_req_shutdown = 0; 1554 udc->ep0_req_completed = 0; 1555 udc->ep0_request = NULL; 1556 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]); 1557 usb_gadget_unmap_request(&udc->gadget, 1558 &udc->ep0_ctrl_req.req, 0); 1559 1560 /* bcm63xx_udc_pullup() is waiting for this */ 1561 mb(); 1562 udc->ep0state = EP0_SHUTDOWN; 1563 } else if (udc->ep0_reply) { 1564 /* 1565 * This could happen if a USB RESET shows up during an ep0 1566 * transaction (especially if a laggy driver like gadgetfs 1567 * is in use). 1568 */ 1569 dev_warn(udc->dev, "nuking unexpected reply\n"); 1570 bcm63xx_ep0_nuke_reply(udc, 0); 1571 } else { 1572 return -EAGAIN; 1573 } 1574 1575 return 0; 1576 } 1577 1578 /** 1579 * bcm63xx_ep0_one_round - Handle the current ep0 state. 1580 * @udc: Reference to the device controller. 1581 * 1582 * Returns 0 if work was done; -EAGAIN if nothing to do. 1583 */ 1584 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc) 1585 { 1586 enum bcm63xx_ep0_state ep0state = udc->ep0state; 1587 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown; 1588 1589 switch (udc->ep0state) { 1590 case EP0_REQUEUE: 1591 /* set up descriptor to receive SETUP packet */ 1592 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN, 1593 BCM63XX_MAX_CTRL_PKT); 1594 ep0state = EP0_IDLE; 1595 break; 1596 case EP0_IDLE: 1597 return bcm63xx_ep0_do_idle(udc); 1598 case EP0_IN_DATA_PHASE_SETUP: 1599 /* 1600 * Normal case: TX request is in ep0_reply (queued by the 1601 * callback), or will be queued shortly. When it's here, 1602 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE. 1603 * 1604 * Shutdown case: Stop waiting for the reply. Just 1605 * REQUEUE->IDLE. The gadget driver is NOT expected to 1606 * queue anything else now. 1607 */ 1608 if (udc->ep0_reply) { 1609 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN, 1610 udc->ep0_reply); 1611 ep0state = EP0_IN_DATA_PHASE_COMPLETE; 1612 } else if (shutdown) { 1613 ep0state = EP0_REQUEUE; 1614 } 1615 break; 1616 case EP0_IN_DATA_PHASE_COMPLETE: { 1617 /* 1618 * Normal case: TX packet (ep0_reply) is in flight; wait for 1619 * it to finish, then go back to REQUEUE->IDLE. 1620 * 1621 * Shutdown case: Reset the TX channel, send -ESHUTDOWN 1622 * completion to the gadget driver, then REQUEUE->IDLE. 1623 */ 1624 if (udc->ep0_req_completed) { 1625 udc->ep0_reply = NULL; 1626 bcm63xx_ep0_read_complete(udc); 1627 /* 1628 * the "ack" sometimes gets eaten (see 1629 * bcm63xx_ep0_do_idle) 1630 */ 1631 ep0state = EP0_REQUEUE; 1632 } else if (shutdown) { 1633 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]); 1634 bcm63xx_ep0_nuke_reply(udc, 1); 1635 ep0state = EP0_REQUEUE; 1636 } 1637 break; 1638 } 1639 case EP0_OUT_DATA_PHASE_SETUP: 1640 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */ 1641 if (udc->ep0_reply) { 1642 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN, 1643 udc->ep0_reply); 1644 ep0state = EP0_OUT_DATA_PHASE_COMPLETE; 1645 } else if (shutdown) { 1646 ep0state = EP0_REQUEUE; 1647 } 1648 break; 1649 case EP0_OUT_DATA_PHASE_COMPLETE: { 1650 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */ 1651 if (udc->ep0_req_completed) { 1652 udc->ep0_reply = NULL; 1653 bcm63xx_ep0_read_complete(udc); 1654 1655 /* send 0-byte ack to host */ 1656 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0); 1657 ep0state = EP0_OUT_STATUS_PHASE; 1658 } else if (shutdown) { 1659 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]); 1660 bcm63xx_ep0_nuke_reply(udc, 0); 1661 ep0state = EP0_REQUEUE; 1662 } 1663 break; 1664 } 1665 case EP0_OUT_STATUS_PHASE: 1666 /* 1667 * Normal case: 0-byte OUT ack packet is in flight; wait 1668 * for it to finish, then go back to REQUEUE->IDLE. 1669 * 1670 * Shutdown case: just cancel the transmission. Don't bother 1671 * calling the completion, because it originated from this 1672 * function anyway. Then go back to REQUEUE->IDLE. 1673 */ 1674 if (udc->ep0_req_completed) { 1675 bcm63xx_ep0_read_complete(udc); 1676 ep0state = EP0_REQUEUE; 1677 } else if (shutdown) { 1678 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]); 1679 udc->ep0_request = NULL; 1680 ep0state = EP0_REQUEUE; 1681 } 1682 break; 1683 case EP0_IN_FAKE_STATUS_PHASE: { 1684 /* 1685 * Normal case: we spoofed a SETUP packet and are now 1686 * waiting for the gadget driver to send a 0-byte reply. 1687 * This doesn't actually get sent to the HW because the 1688 * HW has already sent its own reply. Once we get the 1689 * response, return to IDLE. 1690 * 1691 * Shutdown case: return to IDLE immediately. 1692 * 1693 * Note that the ep0 RX descriptor has remained queued 1694 * (and possibly unfilled) during this entire transaction. 1695 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION 1696 * or SET_INTERFACE transactions. 1697 */ 1698 struct usb_request *r = udc->ep0_reply; 1699 1700 if (!r) { 1701 if (shutdown) 1702 ep0state = EP0_IDLE; 1703 break; 1704 } 1705 1706 bcm63xx_ep0_complete(udc, r, 0); 1707 udc->ep0_reply = NULL; 1708 ep0state = EP0_IDLE; 1709 break; 1710 } 1711 case EP0_SHUTDOWN: 1712 break; 1713 } 1714 1715 if (udc->ep0state == ep0state) 1716 return -EAGAIN; 1717 1718 udc->ep0state = ep0state; 1719 return 0; 1720 } 1721 1722 /** 1723 * bcm63xx_ep0_process - ep0 worker thread / state machine. 1724 * @w: Workqueue struct. 1725 * 1726 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It 1727 * is used to synchronize ep0 events and ensure that both HW and SW events 1728 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may 1729 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed 1730 * by the USBD hardware. 1731 * 1732 * The worker function will continue iterating around the state machine 1733 * until there is nothing left to do. Usually "nothing left to do" means 1734 * that we're waiting for a new event from the hardware. 1735 */ 1736 static void bcm63xx_ep0_process(struct work_struct *w) 1737 { 1738 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq); 1739 spin_lock_irq(&udc->lock); 1740 while (bcm63xx_ep0_one_round(udc) == 0) 1741 ; 1742 spin_unlock_irq(&udc->lock); 1743 } 1744 1745 /*********************************************************************** 1746 * Standard UDC gadget operations 1747 ***********************************************************************/ 1748 1749 /** 1750 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW. 1751 * @gadget: USB slave device. 1752 */ 1753 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget) 1754 { 1755 struct bcm63xx_udc *udc = gadget_to_udc(gadget); 1756 1757 return (usbd_readl(udc, USBD_STATUS_REG) & 1758 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT; 1759 } 1760 1761 /** 1762 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line. 1763 * @gadget: USB slave device. 1764 * @is_on: 0 to disable pullup, 1 to enable. 1765 * 1766 * See notes in bcm63xx_select_pullup(). 1767 */ 1768 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on) 1769 { 1770 struct bcm63xx_udc *udc = gadget_to_udc(gadget); 1771 unsigned long flags; 1772 int i, rc = -EINVAL; 1773 1774 spin_lock_irqsave(&udc->lock, flags); 1775 if (is_on && udc->ep0state == EP0_SHUTDOWN) { 1776 udc->gadget.speed = USB_SPEED_UNKNOWN; 1777 udc->ep0state = EP0_REQUEUE; 1778 bcm63xx_fifo_setup(udc); 1779 bcm63xx_fifo_reset(udc); 1780 bcm63xx_ep_setup(udc); 1781 1782 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP); 1783 for (i = 0; i < BCM63XX_NUM_EP; i++) 1784 bcm63xx_set_stall(udc, &udc->bep[i], false); 1785 1786 bcm63xx_set_ctrl_irqs(udc, true); 1787 bcm63xx_select_pullup(gadget_to_udc(gadget), true); 1788 rc = 0; 1789 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) { 1790 bcm63xx_select_pullup(gadget_to_udc(gadget), false); 1791 1792 udc->ep0_req_shutdown = 1; 1793 spin_unlock_irqrestore(&udc->lock, flags); 1794 1795 while (1) { 1796 schedule_work(&udc->ep0_wq); 1797 if (udc->ep0state == EP0_SHUTDOWN) 1798 break; 1799 msleep(50); 1800 } 1801 bcm63xx_set_ctrl_irqs(udc, false); 1802 cancel_work_sync(&udc->ep0_wq); 1803 return 0; 1804 } 1805 1806 spin_unlock_irqrestore(&udc->lock, flags); 1807 return rc; 1808 } 1809 1810 /** 1811 * bcm63xx_udc_start - Start the controller. 1812 * @gadget: USB slave device. 1813 * @driver: Driver for USB slave devices. 1814 */ 1815 static int bcm63xx_udc_start(struct usb_gadget *gadget, 1816 struct usb_gadget_driver *driver) 1817 { 1818 struct bcm63xx_udc *udc = gadget_to_udc(gadget); 1819 unsigned long flags; 1820 1821 if (!driver || driver->max_speed < USB_SPEED_HIGH || 1822 !driver->setup) 1823 return -EINVAL; 1824 if (!udc) 1825 return -ENODEV; 1826 if (udc->driver) 1827 return -EBUSY; 1828 1829 spin_lock_irqsave(&udc->lock, flags); 1830 1831 set_clocks(udc, true); 1832 bcm63xx_fifo_setup(udc); 1833 bcm63xx_ep_init(udc); 1834 bcm63xx_ep_setup(udc); 1835 bcm63xx_fifo_reset(udc); 1836 bcm63xx_select_phy_mode(udc, true); 1837 1838 udc->driver = driver; 1839 driver->driver.bus = NULL; 1840 udc->gadget.dev.of_node = udc->dev->of_node; 1841 1842 spin_unlock_irqrestore(&udc->lock, flags); 1843 1844 return 0; 1845 } 1846 1847 /** 1848 * bcm63xx_udc_stop - Shut down the controller. 1849 * @gadget: USB slave device. 1850 * @driver: Driver for USB slave devices. 1851 */ 1852 static int bcm63xx_udc_stop(struct usb_gadget *gadget) 1853 { 1854 struct bcm63xx_udc *udc = gadget_to_udc(gadget); 1855 unsigned long flags; 1856 1857 spin_lock_irqsave(&udc->lock, flags); 1858 1859 udc->driver = NULL; 1860 1861 /* 1862 * If we switch the PHY too abruptly after dropping D+, the host 1863 * will often complain: 1864 * 1865 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling... 1866 */ 1867 msleep(100); 1868 1869 bcm63xx_select_phy_mode(udc, false); 1870 set_clocks(udc, false); 1871 1872 spin_unlock_irqrestore(&udc->lock, flags); 1873 1874 return 0; 1875 } 1876 1877 static const struct usb_gadget_ops bcm63xx_udc_ops = { 1878 .get_frame = bcm63xx_udc_get_frame, 1879 .pullup = bcm63xx_udc_pullup, 1880 .udc_start = bcm63xx_udc_start, 1881 .udc_stop = bcm63xx_udc_stop, 1882 }; 1883 1884 /*********************************************************************** 1885 * IRQ handling 1886 ***********************************************************************/ 1887 1888 /** 1889 * bcm63xx_update_cfg_iface - Read current configuration/interface settings. 1890 * @udc: Reference to the device controller. 1891 * 1892 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages. 1893 * The driver never sees the raw control packets coming in on the ep0 1894 * IUDMA channel, but at least we get an interrupt event to tell us that 1895 * new values are waiting in the USBD_STATUS register. 1896 */ 1897 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc) 1898 { 1899 u32 reg = usbd_readl(udc, USBD_STATUS_REG); 1900 1901 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT; 1902 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT; 1903 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >> 1904 USBD_STATUS_ALTINTF_SHIFT; 1905 bcm63xx_ep_setup(udc); 1906 } 1907 1908 /** 1909 * bcm63xx_update_link_speed - Check to see if the link speed has changed. 1910 * @udc: Reference to the device controller. 1911 * 1912 * The link speed update coincides with a SETUP IRQ. Returns 1 if the 1913 * speed has changed, so that the caller can update the endpoint settings. 1914 */ 1915 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc) 1916 { 1917 u32 reg = usbd_readl(udc, USBD_STATUS_REG); 1918 enum usb_device_speed oldspeed = udc->gadget.speed; 1919 1920 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) { 1921 case BCM63XX_SPD_HIGH: 1922 udc->gadget.speed = USB_SPEED_HIGH; 1923 break; 1924 case BCM63XX_SPD_FULL: 1925 udc->gadget.speed = USB_SPEED_FULL; 1926 break; 1927 default: 1928 /* this should never happen */ 1929 udc->gadget.speed = USB_SPEED_UNKNOWN; 1930 dev_err(udc->dev, 1931 "received SETUP packet with invalid link speed\n"); 1932 return 0; 1933 } 1934 1935 if (udc->gadget.speed != oldspeed) { 1936 dev_info(udc->dev, "link up, %s-speed mode\n", 1937 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full"); 1938 return 1; 1939 } else { 1940 return 0; 1941 } 1942 } 1943 1944 /** 1945 * bcm63xx_update_wedge - Iterate through wedged endpoints. 1946 * @udc: Reference to the device controller. 1947 * @new_status: true to "refresh" wedge status; false to clear it. 1948 * 1949 * On a SETUP interrupt, we need to manually "refresh" the wedge status 1950 * because the controller hardware is designed to automatically clear 1951 * stalls in response to a CLEAR_FEATURE request from the host. 1952 * 1953 * On a RESET interrupt, we do want to restore all wedged endpoints. 1954 */ 1955 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status) 1956 { 1957 int i; 1958 1959 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) { 1960 bcm63xx_set_stall(udc, &udc->bep[i], new_status); 1961 if (!new_status) 1962 clear_bit(i, &udc->wedgemap); 1963 } 1964 } 1965 1966 /** 1967 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD). 1968 * @irq: IRQ number (unused). 1969 * @dev_id: Reference to the device controller. 1970 * 1971 * This is where we handle link (VBUS) down, USB reset, speed changes, 1972 * SET_CONFIGURATION, and SET_INTERFACE events. 1973 */ 1974 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id) 1975 { 1976 struct bcm63xx_udc *udc = dev_id; 1977 u32 stat; 1978 bool disconnected = false, bus_reset = false; 1979 1980 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) & 1981 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG); 1982 1983 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG); 1984 1985 spin_lock(&udc->lock); 1986 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) { 1987 /* VBUS toggled */ 1988 1989 if (!(usbd_readl(udc, USBD_EVENTS_REG) & 1990 USBD_EVENTS_USB_LINK_MASK) && 1991 udc->gadget.speed != USB_SPEED_UNKNOWN) 1992 dev_info(udc->dev, "link down\n"); 1993 1994 udc->gadget.speed = USB_SPEED_UNKNOWN; 1995 disconnected = true; 1996 } 1997 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) { 1998 bcm63xx_fifo_setup(udc); 1999 bcm63xx_fifo_reset(udc); 2000 bcm63xx_ep_setup(udc); 2001 2002 bcm63xx_update_wedge(udc, false); 2003 2004 udc->ep0_req_reset = 1; 2005 schedule_work(&udc->ep0_wq); 2006 bus_reset = true; 2007 } 2008 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) { 2009 if (bcm63xx_update_link_speed(udc)) { 2010 bcm63xx_fifo_setup(udc); 2011 bcm63xx_ep_setup(udc); 2012 } 2013 bcm63xx_update_wedge(udc, true); 2014 } 2015 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) { 2016 bcm63xx_update_cfg_iface(udc); 2017 udc->ep0_req_set_cfg = 1; 2018 schedule_work(&udc->ep0_wq); 2019 } 2020 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) { 2021 bcm63xx_update_cfg_iface(udc); 2022 udc->ep0_req_set_iface = 1; 2023 schedule_work(&udc->ep0_wq); 2024 } 2025 spin_unlock(&udc->lock); 2026 2027 if (disconnected && udc->driver) 2028 udc->driver->disconnect(&udc->gadget); 2029 else if (bus_reset && udc->driver) 2030 usb_gadget_udc_reset(&udc->gadget, udc->driver); 2031 2032 return IRQ_HANDLED; 2033 } 2034 2035 /** 2036 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA). 2037 * @irq: IRQ number (unused). 2038 * @dev_id: Reference to the IUDMA channel that generated the interrupt. 2039 * 2040 * For the two ep0 channels, we have special handling that triggers the 2041 * ep0 worker thread. For normal bulk/intr channels, either queue up 2042 * the next buffer descriptor for the transaction (incomplete transaction), 2043 * or invoke the completion callback (complete transactions). 2044 */ 2045 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id) 2046 { 2047 struct iudma_ch *iudma = dev_id; 2048 struct bcm63xx_udc *udc = iudma->udc; 2049 struct bcm63xx_ep *bep; 2050 struct usb_request *req = NULL; 2051 struct bcm63xx_req *breq = NULL; 2052 int rc; 2053 bool is_done = false; 2054 2055 spin_lock(&udc->lock); 2056 2057 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 2058 ENETDMAC_IR_REG, iudma->ch_idx); 2059 bep = iudma->bep; 2060 rc = iudma_read(udc, iudma); 2061 2062 /* special handling for EP0 RX (0) and TX (1) */ 2063 if (iudma->ch_idx == IUDMA_EP0_RXCHAN || 2064 iudma->ch_idx == IUDMA_EP0_TXCHAN) { 2065 req = udc->ep0_request; 2066 breq = our_req(req); 2067 2068 /* a single request could require multiple submissions */ 2069 if (rc >= 0) { 2070 req->actual += rc; 2071 2072 if (req->actual >= req->length || breq->bd_bytes > rc) { 2073 udc->ep0_req_completed = 1; 2074 is_done = true; 2075 schedule_work(&udc->ep0_wq); 2076 2077 /* "actual" on a ZLP is 1 byte */ 2078 req->actual = min(req->actual, req->length); 2079 } else { 2080 /* queue up the next BD (same request) */ 2081 iudma_write(udc, iudma, breq); 2082 } 2083 } 2084 } else if (!list_empty(&bep->queue)) { 2085 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue); 2086 req = &breq->req; 2087 2088 if (rc >= 0) { 2089 req->actual += rc; 2090 2091 if (req->actual >= req->length || breq->bd_bytes > rc) { 2092 is_done = true; 2093 list_del(&breq->queue); 2094 2095 req->actual = min(req->actual, req->length); 2096 2097 if (!list_empty(&bep->queue)) { 2098 struct bcm63xx_req *next; 2099 2100 next = list_first_entry(&bep->queue, 2101 struct bcm63xx_req, queue); 2102 iudma_write(udc, iudma, next); 2103 } 2104 } else { 2105 iudma_write(udc, iudma, breq); 2106 } 2107 } 2108 } 2109 spin_unlock(&udc->lock); 2110 2111 if (is_done) { 2112 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx); 2113 if (req->complete) 2114 req->complete(&bep->ep, req); 2115 } 2116 2117 return IRQ_HANDLED; 2118 } 2119 2120 /*********************************************************************** 2121 * Debug filesystem 2122 ***********************************************************************/ 2123 2124 /* 2125 * bcm63xx_usbd_dbg_show - Show USBD controller state. 2126 * @s: seq_file to which the information will be written. 2127 * @p: Unused. 2128 * 2129 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd 2130 */ 2131 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p) 2132 { 2133 struct bcm63xx_udc *udc = s->private; 2134 2135 if (!udc->driver) 2136 return -ENODEV; 2137 2138 seq_printf(s, "ep0 state: %s\n", 2139 bcm63xx_ep0_state_names[udc->ep0state]); 2140 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n", 2141 udc->ep0_req_reset ? "reset " : "", 2142 udc->ep0_req_set_cfg ? "set_cfg " : "", 2143 udc->ep0_req_set_iface ? "set_iface " : "", 2144 udc->ep0_req_shutdown ? "shutdown " : "", 2145 udc->ep0_request ? "pending " : "", 2146 udc->ep0_req_completed ? "completed " : "", 2147 udc->ep0_reply ? "reply " : ""); 2148 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n", 2149 udc->cfg, udc->iface, udc->alt_iface); 2150 seq_printf(s, "regs:\n"); 2151 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n", 2152 usbd_readl(udc, USBD_CONTROL_REG), 2153 usbd_readl(udc, USBD_STRAPS_REG), 2154 usbd_readl(udc, USBD_STATUS_REG)); 2155 seq_printf(s, " events: %08x; stall: %08x\n", 2156 usbd_readl(udc, USBD_EVENTS_REG), 2157 usbd_readl(udc, USBD_STALL_REG)); 2158 2159 return 0; 2160 } 2161 DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg); 2162 2163 /* 2164 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors. 2165 * @s: seq_file to which the information will be written. 2166 * @p: Unused. 2167 * 2168 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma 2169 */ 2170 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p) 2171 { 2172 struct bcm63xx_udc *udc = s->private; 2173 int ch_idx, i; 2174 u32 sram2, sram3; 2175 2176 if (!udc->driver) 2177 return -ENODEV; 2178 2179 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) { 2180 struct iudma_ch *iudma = &udc->iudma[ch_idx]; 2181 struct list_head *pos; 2182 2183 seq_printf(s, "IUDMA channel %d -- ", ch_idx); 2184 switch (iudma_defaults[ch_idx].ep_type) { 2185 case BCMEP_CTRL: 2186 seq_printf(s, "control"); 2187 break; 2188 case BCMEP_BULK: 2189 seq_printf(s, "bulk"); 2190 break; 2191 case BCMEP_INTR: 2192 seq_printf(s, "interrupt"); 2193 break; 2194 } 2195 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx"); 2196 seq_printf(s, " [ep%d]:\n", 2197 max_t(int, iudma_defaults[ch_idx].ep_num, 0)); 2198 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n", 2199 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx), 2200 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx), 2201 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx), 2202 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx)); 2203 2204 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx); 2205 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx); 2206 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n", 2207 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx), 2208 sram2 >> 16, sram2 & 0xffff, 2209 sram3 >> 16, sram3 & 0xffff, 2210 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx)); 2211 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, 2212 iudma->n_bds); 2213 2214 if (iudma->bep) { 2215 i = 0; 2216 list_for_each(pos, &iudma->bep->queue) 2217 i++; 2218 seq_printf(s, "; %d queued\n", i); 2219 } else { 2220 seq_printf(s, "\n"); 2221 } 2222 2223 for (i = 0; i < iudma->n_bds; i++) { 2224 struct bcm_enet_desc *d = &iudma->bd_ring[i]; 2225 2226 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x", 2227 i * sizeof(*d), i, 2228 d->len_stat >> 16, d->len_stat & 0xffff, 2229 d->address); 2230 if (d == iudma->read_bd) 2231 seq_printf(s, " <<RD"); 2232 if (d == iudma->write_bd) 2233 seq_printf(s, " <<WR"); 2234 seq_printf(s, "\n"); 2235 } 2236 2237 seq_printf(s, "\n"); 2238 } 2239 2240 return 0; 2241 } 2242 DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg); 2243 2244 /** 2245 * bcm63xx_udc_init_debugfs - Create debugfs entries. 2246 * @udc: Reference to the device controller. 2247 */ 2248 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc) 2249 { 2250 struct dentry *root, *usbd, *iudma; 2251 2252 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS)) 2253 return; 2254 2255 root = debugfs_create_dir(udc->gadget.name, NULL); 2256 if (IS_ERR(root) || !root) 2257 goto err_root; 2258 2259 usbd = debugfs_create_file("usbd", 0400, root, udc, 2260 &bcm63xx_usbd_dbg_fops); 2261 if (!usbd) 2262 goto err_usbd; 2263 iudma = debugfs_create_file("iudma", 0400, root, udc, 2264 &bcm63xx_iudma_dbg_fops); 2265 if (!iudma) 2266 goto err_iudma; 2267 2268 udc->debugfs_root = root; 2269 udc->debugfs_usbd = usbd; 2270 udc->debugfs_iudma = iudma; 2271 return; 2272 err_iudma: 2273 debugfs_remove(usbd); 2274 err_usbd: 2275 debugfs_remove(root); 2276 err_root: 2277 dev_err(udc->dev, "debugfs is not available\n"); 2278 } 2279 2280 /** 2281 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries. 2282 * @udc: Reference to the device controller. 2283 * 2284 * debugfs_remove() is safe to call with a NULL argument. 2285 */ 2286 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc) 2287 { 2288 debugfs_remove(udc->debugfs_iudma); 2289 debugfs_remove(udc->debugfs_usbd); 2290 debugfs_remove(udc->debugfs_root); 2291 udc->debugfs_iudma = NULL; 2292 udc->debugfs_usbd = NULL; 2293 udc->debugfs_root = NULL; 2294 } 2295 2296 /*********************************************************************** 2297 * Driver init/exit 2298 ***********************************************************************/ 2299 2300 /** 2301 * bcm63xx_udc_probe - Initialize a new instance of the UDC. 2302 * @pdev: Platform device struct from the bcm63xx BSP code. 2303 * 2304 * Note that platform data is required, because pd.port_no varies from chip 2305 * to chip and is used to switch the correct USB port to device mode. 2306 */ 2307 static int bcm63xx_udc_probe(struct platform_device *pdev) 2308 { 2309 struct device *dev = &pdev->dev; 2310 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev); 2311 struct bcm63xx_udc *udc; 2312 struct resource *res; 2313 int rc = -ENOMEM, i, irq; 2314 2315 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL); 2316 if (!udc) 2317 return -ENOMEM; 2318 2319 platform_set_drvdata(pdev, udc); 2320 udc->dev = dev; 2321 udc->pd = pd; 2322 2323 if (!pd) { 2324 dev_err(dev, "missing platform data\n"); 2325 return -EINVAL; 2326 } 2327 2328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2329 udc->usbd_regs = devm_ioremap_resource(dev, res); 2330 if (IS_ERR(udc->usbd_regs)) 2331 return PTR_ERR(udc->usbd_regs); 2332 2333 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2334 udc->iudma_regs = devm_ioremap_resource(dev, res); 2335 if (IS_ERR(udc->iudma_regs)) 2336 return PTR_ERR(udc->iudma_regs); 2337 2338 spin_lock_init(&udc->lock); 2339 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process); 2340 2341 udc->gadget.ops = &bcm63xx_udc_ops; 2342 udc->gadget.name = dev_name(dev); 2343 2344 if (!pd->use_fullspeed && !use_fullspeed) 2345 udc->gadget.max_speed = USB_SPEED_HIGH; 2346 else 2347 udc->gadget.max_speed = USB_SPEED_FULL; 2348 2349 /* request clocks, allocate buffers, and clear any pending IRQs */ 2350 rc = bcm63xx_init_udc_hw(udc); 2351 if (rc) 2352 return rc; 2353 2354 rc = -ENXIO; 2355 2356 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */ 2357 irq = platform_get_irq(pdev, 0); 2358 if (irq < 0) { 2359 dev_err(dev, "missing IRQ resource #0\n"); 2360 goto out_uninit; 2361 } 2362 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0, 2363 dev_name(dev), udc) < 0) 2364 goto report_request_failure; 2365 2366 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */ 2367 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) { 2368 irq = platform_get_irq(pdev, i + 1); 2369 if (irq < 0) { 2370 dev_err(dev, "missing IRQ resource #%d\n", i + 1); 2371 goto out_uninit; 2372 } 2373 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0, 2374 dev_name(dev), &udc->iudma[i]) < 0) 2375 goto report_request_failure; 2376 } 2377 2378 bcm63xx_udc_init_debugfs(udc); 2379 rc = usb_add_gadget_udc(dev, &udc->gadget); 2380 if (!rc) 2381 return 0; 2382 2383 bcm63xx_udc_cleanup_debugfs(udc); 2384 out_uninit: 2385 bcm63xx_uninit_udc_hw(udc); 2386 return rc; 2387 2388 report_request_failure: 2389 dev_err(dev, "error requesting IRQ #%d\n", irq); 2390 goto out_uninit; 2391 } 2392 2393 /** 2394 * bcm63xx_udc_remove - Remove the device from the system. 2395 * @pdev: Platform device struct from the bcm63xx BSP code. 2396 */ 2397 static int bcm63xx_udc_remove(struct platform_device *pdev) 2398 { 2399 struct bcm63xx_udc *udc = platform_get_drvdata(pdev); 2400 2401 bcm63xx_udc_cleanup_debugfs(udc); 2402 usb_del_gadget_udc(&udc->gadget); 2403 BUG_ON(udc->driver); 2404 2405 bcm63xx_uninit_udc_hw(udc); 2406 2407 return 0; 2408 } 2409 2410 static struct platform_driver bcm63xx_udc_driver = { 2411 .probe = bcm63xx_udc_probe, 2412 .remove = bcm63xx_udc_remove, 2413 .driver = { 2414 .name = DRV_MODULE_NAME, 2415 }, 2416 }; 2417 module_platform_driver(bcm63xx_udc_driver); 2418 2419 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller"); 2420 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>"); 2421 MODULE_LICENSE("GPL"); 2422 MODULE_ALIAS("platform:" DRV_MODULE_NAME); 2423